summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/android/camera3_hal.cpp33
-rw-r--r--src/android/camera_buffer.h84
-rw-r--r--src/android/camera_capabilities.cpp1644
-rw-r--r--src/android/camera_capabilities.h86
-rw-r--r--src/android/camera_device.cpp2079
-rw-r--r--src/android/camera_device.h109
-rw-r--r--src/android/camera_hal_config.cpp208
-rw-r--r--src/android/camera_hal_config.h38
-rw-r--r--src/android/camera_hal_manager.cpp264
-rw-r--r--src/android/camera_hal_manager.h47
-rw-r--r--src/android/camera_metadata.cpp189
-rw-r--r--src/android/camera_metadata.h98
-rw-r--r--src/android/camera_ops.cpp13
-rw-r--r--src/android/camera_ops.h8
-rw-r--r--src/android/camera_request.cpp194
-rw-r--r--src/android/camera_request.h84
-rw-r--r--src/android/camera_stream.cpp341
-rw-r--r--src/android/camera_stream.h187
-rw-r--r--src/android/cros/camera3_hal.cpp26
-rw-r--r--src/android/cros/meson.build13
-rw-r--r--src/android/cros_mojo_token.h12
-rw-r--r--src/android/data/nautilus/camera_hal.yaml10
-rw-r--r--src/android/data/soraka/camera_hal.yaml10
-rw-r--r--src/android/frame_buffer_allocator.h56
-rw-r--r--src/android/hal_framebuffer.cpp22
-rw-r--r--src/android/hal_framebuffer.h26
-rw-r--r--src/android/jpeg/encoder.h26
-rw-r--r--src/android/jpeg/encoder_jea.cpp56
-rw-r--r--src/android/jpeg/encoder_jea.h31
-rw-r--r--src/android/jpeg/encoder_libjpeg.cpp239
-rw-r--r--src/android/jpeg/encoder_libjpeg.h44
-rw-r--r--src/android/jpeg/exif.cpp522
-rw-r--r--src/android/jpeg/exif.h112
-rw-r--r--src/android/jpeg/meson.build14
-rw-r--r--src/android/jpeg/post_processor_jpeg.cpp223
-rw-r--r--src/android/jpeg/post_processor_jpeg.h38
-rw-r--r--src/android/jpeg/thumbnailer.cpp96
-rw-r--r--src/android/jpeg/thumbnailer.h32
-rw-r--r--src/android/meson.build42
-rw-r--r--src/android/mm/cros_camera_buffer.cpp184
-rw-r--r--src/android/mm/cros_frame_buffer_allocator.cpp88
-rw-r--r--src/android/mm/generic_camera_buffer.cpp199
-rw-r--r--src/android/mm/generic_frame_buffer_allocator.cpp150
-rw-r--r--src/android/mm/libhardware_stub.c17
-rw-r--r--src/android/mm/meson.build19
-rw-r--r--src/android/post_processor.h33
-rw-r--r--src/android/yuv/post_processor_yuv.cpp146
-rw-r--r--src/android/yuv/post_processor_yuv.h35
-rw-r--r--src/apps/cam/camera_session.cpp514
-rw-r--r--src/apps/cam/camera_session.h79
-rw-r--r--src/apps/cam/capture-script.yaml71
-rw-r--r--src/apps/cam/capture_script.cpp662
-rw-r--r--src/apps/cam/capture_script.h73
-rw-r--r--src/apps/cam/drm.cpp717
-rw-r--r--src/apps/cam/drm.h334
-rw-r--r--src/apps/cam/file_sink.cpp184
-rw-r--r--src/apps/cam/file_sink.h57
-rw-r--r--src/apps/cam/frame_sink.cpp67
-rw-r--r--src/apps/cam/frame_sink.h32
-rw-r--r--src/apps/cam/kms_sink.cpp536
-rw-r--r--src/apps/cam/kms_sink.h83
-rw-r--r--src/apps/cam/main.cpp374
-rw-r--r--src/apps/cam/main.h27
-rw-r--r--src/apps/cam/meson.build62
-rw-r--r--src/apps/cam/sdl_sink.cpp215
-rw-r--r--src/apps/cam/sdl_sink.h48
-rw-r--r--src/apps/cam/sdl_texture.cpp36
-rw-r--r--src/apps/cam/sdl_texture.h30
-rw-r--r--src/apps/cam/sdl_texture_mjpg.cpp83
-rw-r--r--src/apps/cam/sdl_texture_mjpg.h23
-rw-r--r--src/apps/cam/sdl_texture_yuv.cpp33
-rw-r--r--src/apps/cam/sdl_texture_yuv.h26
-rw-r--r--src/apps/common/dng_writer.cpp809
-rw-r--r--src/apps/common/dng_writer.h26
-rw-r--r--src/apps/common/event_loop.cpp150
-rw-r--r--src/apps/common/event_loop.h68
-rw-r--r--src/apps/common/image.cpp109
-rw-r--r--src/apps/common/image.h50
-rw-r--r--src/apps/common/meson.build27
-rw-r--r--src/apps/common/options.cpp1143
-rw-r--r--src/apps/common/options.h157
-rw-r--r--src/apps/common/ppm_writer.cpp53
-rw-r--r--src/apps/common/ppm_writer.h20
-rw-r--r--src/apps/common/stream_options.cpp121
-rw-r--r--src/apps/common/stream_options.h29
-rw-r--r--src/apps/ipa-verify/main.cpp64
-rw-r--r--src/apps/ipa-verify/meson.build15
-rw-r--r--src/apps/lc-compliance/environment.cpp22
-rw-r--r--src/apps/lc-compliance/environment.h27
-rw-r--r--src/apps/lc-compliance/helpers/capture.cpp196
-rw-r--r--src/apps/lc-compliance/helpers/capture.h66
-rw-r--r--src/apps/lc-compliance/main.cpp194
-rw-r--r--src/apps/lc-compliance/meson.build37
-rw-r--r--src/apps/lc-compliance/tests/capture_test.cpp136
-rw-r--r--src/apps/meson.build22
-rw-r--r--src/apps/qcam/assets/feathericons/activity.svg (renamed from src/qcam/assets/feathericons/activity.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/airplay.svg (renamed from src/qcam/assets/feathericons/airplay.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/alert-circle.svg (renamed from src/qcam/assets/feathericons/alert-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/alert-octagon.svg (renamed from src/qcam/assets/feathericons/alert-octagon.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/alert-triangle.svg (renamed from src/qcam/assets/feathericons/alert-triangle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/align-center.svg (renamed from src/qcam/assets/feathericons/align-center.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/align-justify.svg (renamed from src/qcam/assets/feathericons/align-justify.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/align-left.svg (renamed from src/qcam/assets/feathericons/align-left.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/align-right.svg (renamed from src/qcam/assets/feathericons/align-right.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/anchor.svg (renamed from src/qcam/assets/feathericons/anchor.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/aperture.svg (renamed from src/qcam/assets/feathericons/aperture.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/archive.svg (renamed from src/qcam/assets/feathericons/archive.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-down-circle.svg (renamed from src/qcam/assets/feathericons/arrow-down-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-down-left.svg (renamed from src/qcam/assets/feathericons/arrow-down-left.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-down-right.svg (renamed from src/qcam/assets/feathericons/arrow-down-right.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-down.svg (renamed from src/qcam/assets/feathericons/arrow-down.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-left-circle.svg (renamed from src/qcam/assets/feathericons/arrow-left-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-left.svg (renamed from src/qcam/assets/feathericons/arrow-left.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-right-circle.svg (renamed from src/qcam/assets/feathericons/arrow-right-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-right.svg (renamed from src/qcam/assets/feathericons/arrow-right.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-up-circle.svg (renamed from src/qcam/assets/feathericons/arrow-up-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-up-left.svg (renamed from src/qcam/assets/feathericons/arrow-up-left.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-up-right.svg (renamed from src/qcam/assets/feathericons/arrow-up-right.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/arrow-up.svg (renamed from src/qcam/assets/feathericons/arrow-up.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/at-sign.svg (renamed from src/qcam/assets/feathericons/at-sign.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/award.svg (renamed from src/qcam/assets/feathericons/award.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/bar-chart-2.svg (renamed from src/qcam/assets/feathericons/bar-chart-2.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/bar-chart.svg (renamed from src/qcam/assets/feathericons/bar-chart.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/battery-charging.svg (renamed from src/qcam/assets/feathericons/battery-charging.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/battery.svg (renamed from src/qcam/assets/feathericons/battery.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/bell-off.svg (renamed from src/qcam/assets/feathericons/bell-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/bell.svg (renamed from src/qcam/assets/feathericons/bell.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/bluetooth.svg (renamed from src/qcam/assets/feathericons/bluetooth.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/bold.svg (renamed from src/qcam/assets/feathericons/bold.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/book-open.svg (renamed from src/qcam/assets/feathericons/book-open.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/book.svg (renamed from src/qcam/assets/feathericons/book.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/bookmark.svg (renamed from src/qcam/assets/feathericons/bookmark.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/box.svg (renamed from src/qcam/assets/feathericons/box.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/briefcase.svg (renamed from src/qcam/assets/feathericons/briefcase.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/calendar.svg (renamed from src/qcam/assets/feathericons/calendar.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/camera-off.svg (renamed from src/qcam/assets/feathericons/camera-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/camera.svg (renamed from src/qcam/assets/feathericons/camera.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/cast.svg (renamed from src/qcam/assets/feathericons/cast.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/check-circle.svg (renamed from src/qcam/assets/feathericons/check-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/check-square.svg (renamed from src/qcam/assets/feathericons/check-square.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/check.svg (renamed from src/qcam/assets/feathericons/check.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/chevron-down.svg (renamed from src/qcam/assets/feathericons/chevron-down.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/chevron-left.svg (renamed from src/qcam/assets/feathericons/chevron-left.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/chevron-right.svg (renamed from src/qcam/assets/feathericons/chevron-right.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/chevron-up.svg (renamed from src/qcam/assets/feathericons/chevron-up.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/chevrons-down.svg (renamed from src/qcam/assets/feathericons/chevrons-down.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/chevrons-left.svg (renamed from src/qcam/assets/feathericons/chevrons-left.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/chevrons-right.svg (renamed from src/qcam/assets/feathericons/chevrons-right.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/chevrons-up.svg (renamed from src/qcam/assets/feathericons/chevrons-up.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/chrome.svg (renamed from src/qcam/assets/feathericons/chrome.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/circle.svg (renamed from src/qcam/assets/feathericons/circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/clipboard.svg (renamed from src/qcam/assets/feathericons/clipboard.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/clock.svg (renamed from src/qcam/assets/feathericons/clock.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/cloud-drizzle.svg (renamed from src/qcam/assets/feathericons/cloud-drizzle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/cloud-lightning.svg (renamed from src/qcam/assets/feathericons/cloud-lightning.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/cloud-off.svg (renamed from src/qcam/assets/feathericons/cloud-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/cloud-rain.svg (renamed from src/qcam/assets/feathericons/cloud-rain.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/cloud-snow.svg (renamed from src/qcam/assets/feathericons/cloud-snow.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/cloud.svg (renamed from src/qcam/assets/feathericons/cloud.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/code.svg (renamed from src/qcam/assets/feathericons/code.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/codepen.svg (renamed from src/qcam/assets/feathericons/codepen.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/codesandbox.svg (renamed from src/qcam/assets/feathericons/codesandbox.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/coffee.svg (renamed from src/qcam/assets/feathericons/coffee.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/columns.svg (renamed from src/qcam/assets/feathericons/columns.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/command.svg (renamed from src/qcam/assets/feathericons/command.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/compass.svg (renamed from src/qcam/assets/feathericons/compass.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/copy.svg (renamed from src/qcam/assets/feathericons/copy.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/corner-down-left.svg (renamed from src/qcam/assets/feathericons/corner-down-left.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/corner-down-right.svg (renamed from src/qcam/assets/feathericons/corner-down-right.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/corner-left-down.svg (renamed from src/qcam/assets/feathericons/corner-left-down.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/corner-left-up.svg (renamed from src/qcam/assets/feathericons/corner-left-up.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/corner-right-down.svg (renamed from src/qcam/assets/feathericons/corner-right-down.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/corner-right-up.svg (renamed from src/qcam/assets/feathericons/corner-right-up.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/corner-up-left.svg (renamed from src/qcam/assets/feathericons/corner-up-left.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/corner-up-right.svg (renamed from src/qcam/assets/feathericons/corner-up-right.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/cpu.svg (renamed from src/qcam/assets/feathericons/cpu.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/credit-card.svg (renamed from src/qcam/assets/feathericons/credit-card.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/crop.svg (renamed from src/qcam/assets/feathericons/crop.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/crosshair.svg (renamed from src/qcam/assets/feathericons/crosshair.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/database.svg (renamed from src/qcam/assets/feathericons/database.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/delete.svg (renamed from src/qcam/assets/feathericons/delete.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/disc.svg (renamed from src/qcam/assets/feathericons/disc.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/dollar-sign.svg (renamed from src/qcam/assets/feathericons/dollar-sign.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/download-cloud.svg (renamed from src/qcam/assets/feathericons/download-cloud.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/download.svg (renamed from src/qcam/assets/feathericons/download.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/droplet.svg (renamed from src/qcam/assets/feathericons/droplet.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/edit-2.svg (renamed from src/qcam/assets/feathericons/edit-2.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/edit-3.svg (renamed from src/qcam/assets/feathericons/edit-3.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/edit.svg (renamed from src/qcam/assets/feathericons/edit.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/external-link.svg (renamed from src/qcam/assets/feathericons/external-link.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/eye-off.svg (renamed from src/qcam/assets/feathericons/eye-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/eye.svg (renamed from src/qcam/assets/feathericons/eye.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/facebook.svg (renamed from src/qcam/assets/feathericons/facebook.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/fast-forward.svg (renamed from src/qcam/assets/feathericons/fast-forward.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/feather.svg (renamed from src/qcam/assets/feathericons/feather.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/feathericons.qrc11
-rw-r--r--src/apps/qcam/assets/feathericons/figma.svg (renamed from src/qcam/assets/feathericons/figma.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/file-minus.svg (renamed from src/qcam/assets/feathericons/file-minus.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/file-plus.svg (renamed from src/qcam/assets/feathericons/file-plus.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/file-text.svg (renamed from src/qcam/assets/feathericons/file-text.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/file.svg (renamed from src/qcam/assets/feathericons/file.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/film.svg (renamed from src/qcam/assets/feathericons/film.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/filter.svg (renamed from src/qcam/assets/feathericons/filter.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/flag.svg (renamed from src/qcam/assets/feathericons/flag.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/folder-minus.svg (renamed from src/qcam/assets/feathericons/folder-minus.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/folder-plus.svg (renamed from src/qcam/assets/feathericons/folder-plus.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/folder.svg (renamed from src/qcam/assets/feathericons/folder.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/framer.svg (renamed from src/qcam/assets/feathericons/framer.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/frown.svg (renamed from src/qcam/assets/feathericons/frown.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/gift.svg (renamed from src/qcam/assets/feathericons/gift.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/git-branch.svg (renamed from src/qcam/assets/feathericons/git-branch.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/git-commit.svg (renamed from src/qcam/assets/feathericons/git-commit.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/git-merge.svg (renamed from src/qcam/assets/feathericons/git-merge.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/git-pull-request.svg (renamed from src/qcam/assets/feathericons/git-pull-request.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/github.svg (renamed from src/qcam/assets/feathericons/github.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/gitlab.svg (renamed from src/qcam/assets/feathericons/gitlab.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/globe.svg (renamed from src/qcam/assets/feathericons/globe.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/grid.svg (renamed from src/qcam/assets/feathericons/grid.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/hard-drive.svg (renamed from src/qcam/assets/feathericons/hard-drive.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/hash.svg (renamed from src/qcam/assets/feathericons/hash.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/headphones.svg (renamed from src/qcam/assets/feathericons/headphones.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/heart.svg (renamed from src/qcam/assets/feathericons/heart.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/help-circle.svg (renamed from src/qcam/assets/feathericons/help-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/hexagon.svg (renamed from src/qcam/assets/feathericons/hexagon.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/home.svg (renamed from src/qcam/assets/feathericons/home.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/image.svg (renamed from src/qcam/assets/feathericons/image.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/inbox.svg (renamed from src/qcam/assets/feathericons/inbox.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/info.svg (renamed from src/qcam/assets/feathericons/info.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/instagram.svg (renamed from src/qcam/assets/feathericons/instagram.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/italic.svg (renamed from src/qcam/assets/feathericons/italic.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/key.svg (renamed from src/qcam/assets/feathericons/key.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/layers.svg (renamed from src/qcam/assets/feathericons/layers.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/layout.svg (renamed from src/qcam/assets/feathericons/layout.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/life-buoy.svg (renamed from src/qcam/assets/feathericons/life-buoy.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/link-2.svg (renamed from src/qcam/assets/feathericons/link-2.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/link.svg (renamed from src/qcam/assets/feathericons/link.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/linkedin.svg (renamed from src/qcam/assets/feathericons/linkedin.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/list.svg (renamed from src/qcam/assets/feathericons/list.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/loader.svg (renamed from src/qcam/assets/feathericons/loader.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/lock.svg (renamed from src/qcam/assets/feathericons/lock.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/log-in.svg (renamed from src/qcam/assets/feathericons/log-in.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/log-out.svg (renamed from src/qcam/assets/feathericons/log-out.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/mail.svg (renamed from src/qcam/assets/feathericons/mail.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/map-pin.svg (renamed from src/qcam/assets/feathericons/map-pin.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/map.svg (renamed from src/qcam/assets/feathericons/map.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/maximize-2.svg (renamed from src/qcam/assets/feathericons/maximize-2.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/maximize.svg (renamed from src/qcam/assets/feathericons/maximize.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/meh.svg (renamed from src/qcam/assets/feathericons/meh.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/menu.svg (renamed from src/qcam/assets/feathericons/menu.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/message-circle.svg (renamed from src/qcam/assets/feathericons/message-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/message-square.svg (renamed from src/qcam/assets/feathericons/message-square.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/mic-off.svg (renamed from src/qcam/assets/feathericons/mic-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/mic.svg (renamed from src/qcam/assets/feathericons/mic.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/minimize-2.svg (renamed from src/qcam/assets/feathericons/minimize-2.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/minimize.svg (renamed from src/qcam/assets/feathericons/minimize.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/minus-circle.svg (renamed from src/qcam/assets/feathericons/minus-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/minus-square.svg (renamed from src/qcam/assets/feathericons/minus-square.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/minus.svg (renamed from src/qcam/assets/feathericons/minus.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/monitor.svg (renamed from src/qcam/assets/feathericons/monitor.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/moon.svg (renamed from src/qcam/assets/feathericons/moon.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/more-horizontal.svg (renamed from src/qcam/assets/feathericons/more-horizontal.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/more-vertical.svg (renamed from src/qcam/assets/feathericons/more-vertical.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/mouse-pointer.svg (renamed from src/qcam/assets/feathericons/mouse-pointer.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/move.svg (renamed from src/qcam/assets/feathericons/move.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/music.svg (renamed from src/qcam/assets/feathericons/music.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/navigation-2.svg (renamed from src/qcam/assets/feathericons/navigation-2.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/navigation.svg (renamed from src/qcam/assets/feathericons/navigation.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/octagon.svg (renamed from src/qcam/assets/feathericons/octagon.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/package.svg (renamed from src/qcam/assets/feathericons/package.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/paperclip.svg (renamed from src/qcam/assets/feathericons/paperclip.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/pause-circle.svg (renamed from src/qcam/assets/feathericons/pause-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/pause.svg (renamed from src/qcam/assets/feathericons/pause.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/pen-tool.svg (renamed from src/qcam/assets/feathericons/pen-tool.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/percent.svg (renamed from src/qcam/assets/feathericons/percent.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/phone-call.svg (renamed from src/qcam/assets/feathericons/phone-call.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/phone-forwarded.svg (renamed from src/qcam/assets/feathericons/phone-forwarded.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/phone-incoming.svg (renamed from src/qcam/assets/feathericons/phone-incoming.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/phone-missed.svg (renamed from src/qcam/assets/feathericons/phone-missed.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/phone-off.svg (renamed from src/qcam/assets/feathericons/phone-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/phone-outgoing.svg (renamed from src/qcam/assets/feathericons/phone-outgoing.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/phone.svg (renamed from src/qcam/assets/feathericons/phone.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/pie-chart.svg (renamed from src/qcam/assets/feathericons/pie-chart.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/play-circle.svg (renamed from src/qcam/assets/feathericons/play-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/play.svg (renamed from src/qcam/assets/feathericons/play.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/plus-circle.svg (renamed from src/qcam/assets/feathericons/plus-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/plus-square.svg (renamed from src/qcam/assets/feathericons/plus-square.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/plus.svg (renamed from src/qcam/assets/feathericons/plus.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/pocket.svg (renamed from src/qcam/assets/feathericons/pocket.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/power.svg (renamed from src/qcam/assets/feathericons/power.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/printer.svg (renamed from src/qcam/assets/feathericons/printer.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/radio.svg (renamed from src/qcam/assets/feathericons/radio.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/refresh-ccw.svg (renamed from src/qcam/assets/feathericons/refresh-ccw.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/refresh-cw.svg (renamed from src/qcam/assets/feathericons/refresh-cw.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/repeat.svg (renamed from src/qcam/assets/feathericons/repeat.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/rewind.svg (renamed from src/qcam/assets/feathericons/rewind.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/rotate-ccw.svg (renamed from src/qcam/assets/feathericons/rotate-ccw.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/rotate-cw.svg (renamed from src/qcam/assets/feathericons/rotate-cw.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/rss.svg (renamed from src/qcam/assets/feathericons/rss.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/save.svg (renamed from src/qcam/assets/feathericons/save.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/scissors.svg (renamed from src/qcam/assets/feathericons/scissors.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/search.svg (renamed from src/qcam/assets/feathericons/search.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/send.svg (renamed from src/qcam/assets/feathericons/send.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/server.svg (renamed from src/qcam/assets/feathericons/server.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/settings.svg (renamed from src/qcam/assets/feathericons/settings.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/share-2.svg (renamed from src/qcam/assets/feathericons/share-2.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/share.svg (renamed from src/qcam/assets/feathericons/share.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/shield-off.svg (renamed from src/qcam/assets/feathericons/shield-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/shield.svg (renamed from src/qcam/assets/feathericons/shield.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/shopping-bag.svg (renamed from src/qcam/assets/feathericons/shopping-bag.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/shopping-cart.svg (renamed from src/qcam/assets/feathericons/shopping-cart.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/shuffle.svg (renamed from src/qcam/assets/feathericons/shuffle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/sidebar.svg (renamed from src/qcam/assets/feathericons/sidebar.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/skip-back.svg (renamed from src/qcam/assets/feathericons/skip-back.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/skip-forward.svg (renamed from src/qcam/assets/feathericons/skip-forward.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/slack.svg (renamed from src/qcam/assets/feathericons/slack.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/slash.svg (renamed from src/qcam/assets/feathericons/slash.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/sliders.svg (renamed from src/qcam/assets/feathericons/sliders.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/smartphone.svg (renamed from src/qcam/assets/feathericons/smartphone.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/smile.svg (renamed from src/qcam/assets/feathericons/smile.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/speaker.svg (renamed from src/qcam/assets/feathericons/speaker.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/square.svg (renamed from src/qcam/assets/feathericons/square.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/star.svg (renamed from src/qcam/assets/feathericons/star.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/stop-circle.svg (renamed from src/qcam/assets/feathericons/stop-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/sun.svg (renamed from src/qcam/assets/feathericons/sun.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/sunrise.svg (renamed from src/qcam/assets/feathericons/sunrise.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/sunset.svg (renamed from src/qcam/assets/feathericons/sunset.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/tablet.svg (renamed from src/qcam/assets/feathericons/tablet.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/tag.svg (renamed from src/qcam/assets/feathericons/tag.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/target.svg (renamed from src/qcam/assets/feathericons/target.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/terminal.svg (renamed from src/qcam/assets/feathericons/terminal.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/thermometer.svg (renamed from src/qcam/assets/feathericons/thermometer.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/thumbs-down.svg (renamed from src/qcam/assets/feathericons/thumbs-down.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/thumbs-up.svg (renamed from src/qcam/assets/feathericons/thumbs-up.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/toggle-left.svg (renamed from src/qcam/assets/feathericons/toggle-left.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/toggle-right.svg (renamed from src/qcam/assets/feathericons/toggle-right.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/tool.svg (renamed from src/qcam/assets/feathericons/tool.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/trash-2.svg (renamed from src/qcam/assets/feathericons/trash-2.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/trash.svg (renamed from src/qcam/assets/feathericons/trash.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/trello.svg (renamed from src/qcam/assets/feathericons/trello.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/trending-down.svg (renamed from src/qcam/assets/feathericons/trending-down.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/trending-up.svg (renamed from src/qcam/assets/feathericons/trending-up.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/triangle.svg (renamed from src/qcam/assets/feathericons/triangle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/truck.svg (renamed from src/qcam/assets/feathericons/truck.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/tv.svg (renamed from src/qcam/assets/feathericons/tv.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/twitch.svg (renamed from src/qcam/assets/feathericons/twitch.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/twitter.svg (renamed from src/qcam/assets/feathericons/twitter.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/type.svg (renamed from src/qcam/assets/feathericons/type.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/umbrella.svg (renamed from src/qcam/assets/feathericons/umbrella.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/underline.svg (renamed from src/qcam/assets/feathericons/underline.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/unlock.svg (renamed from src/qcam/assets/feathericons/unlock.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/upload-cloud.svg (renamed from src/qcam/assets/feathericons/upload-cloud.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/upload.svg (renamed from src/qcam/assets/feathericons/upload.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/user-check.svg (renamed from src/qcam/assets/feathericons/user-check.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/user-minus.svg (renamed from src/qcam/assets/feathericons/user-minus.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/user-plus.svg (renamed from src/qcam/assets/feathericons/user-plus.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/user-x.svg (renamed from src/qcam/assets/feathericons/user-x.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/user.svg (renamed from src/qcam/assets/feathericons/user.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/users.svg (renamed from src/qcam/assets/feathericons/users.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/video-off.svg (renamed from src/qcam/assets/feathericons/video-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/video.svg (renamed from src/qcam/assets/feathericons/video.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/voicemail.svg (renamed from src/qcam/assets/feathericons/voicemail.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/volume-1.svg (renamed from src/qcam/assets/feathericons/volume-1.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/volume-2.svg (renamed from src/qcam/assets/feathericons/volume-2.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/volume-x.svg (renamed from src/qcam/assets/feathericons/volume-x.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/volume.svg (renamed from src/qcam/assets/feathericons/volume.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/watch.svg (renamed from src/qcam/assets/feathericons/watch.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/wifi-off.svg (renamed from src/qcam/assets/feathericons/wifi-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/wifi.svg (renamed from src/qcam/assets/feathericons/wifi.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/wind.svg (renamed from src/qcam/assets/feathericons/wind.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/x-circle.svg (renamed from src/qcam/assets/feathericons/x-circle.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/x-octagon.svg (renamed from src/qcam/assets/feathericons/x-octagon.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/x-square.svg (renamed from src/qcam/assets/feathericons/x-square.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/x.svg (renamed from src/qcam/assets/feathericons/x.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/youtube.svg (renamed from src/qcam/assets/feathericons/youtube.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/zap-off.svg (renamed from src/qcam/assets/feathericons/zap-off.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/zap.svg (renamed from src/qcam/assets/feathericons/zap.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/zoom-in.svg (renamed from src/qcam/assets/feathericons/zoom-in.svg)0
-rw-r--r--src/apps/qcam/assets/feathericons/zoom-out.svg (renamed from src/qcam/assets/feathericons/zoom-out.svg)0
-rw-r--r--src/apps/qcam/assets/shader/RGB.frag22
-rw-r--r--src/apps/qcam/assets/shader/YUV_2_planes.frag42
-rw-r--r--src/apps/qcam/assets/shader/YUV_3_planes.frag36
-rw-r--r--src/apps/qcam/assets/shader/YUV_packed.frag83
-rw-r--r--src/apps/qcam/assets/shader/bayer_1x_packed.frag216
-rw-r--r--src/apps/qcam/assets/shader/bayer_8.frag107
-rw-r--r--src/apps/qcam/assets/shader/bayer_8.vert53
-rw-r--r--src/apps/qcam/assets/shader/identity.vert19
-rw-r--r--src/apps/qcam/assets/shader/shaders.qrc13
-rw-r--r--src/apps/qcam/cam_select_dialog.cpp121
-rw-r--r--src/apps/qcam/cam_select_dialog.h47
-rw-r--r--src/apps/qcam/format_converter.cpp359
-rw-r--r--src/apps/qcam/format_converter.h62
-rw-r--r--src/apps/qcam/main.cpp94
-rw-r--r--src/apps/qcam/main_window.cpp776
-rw-r--r--src/apps/qcam/main_window.h133
-rw-r--r--src/apps/qcam/meson.build59
-rw-r--r--src/apps/qcam/message_handler.cpp27
-rw-r--r--src/apps/qcam/message_handler.h24
-rw-r--r--src/apps/qcam/viewfinder.h34
-rw-r--r--src/apps/qcam/viewfinder_gl.cpp847
-rw-r--r--src/apps/qcam/viewfinder_gl.h107
-rw-r--r--src/apps/qcam/viewfinder_qt.cpp218
-rw-r--r--src/apps/qcam/viewfinder_qt.h66
-rw-r--r--src/cam/buffer_writer.cpp88
-rw-r--r--src/cam/buffer_writer.h31
-rw-r--r--src/cam/capture.cpp204
-rw-r--r--src/cam/capture.h44
-rw-r--r--src/cam/event_loop.cpp39
-rw-r--r--src/cam/event_loop.h34
-rw-r--r--src/cam/main.cpp385
-rw-r--r--src/cam/main.h21
-rw-r--r--src/cam/meson.build11
-rw-r--r--src/cam/options.cpp537
-rw-r--r--src/cam/options.h147
-rw-r--r--src/gstreamer/gstlibcamera-controls.cpp.in332
-rw-r--r--src/gstreamer/gstlibcamera-controls.h43
-rw-r--r--src/gstreamer/gstlibcamera-utils.cpp540
-rw-r--r--src/gstreamer/gstlibcamera-utils.h36
-rw-r--r--src/gstreamer/gstlibcamera.cpp4
-rw-r--r--src/gstreamer/gstlibcameraallocator.cpp57
-rw-r--r--src/gstreamer/gstlibcameraallocator.h11
-rw-r--r--src/gstreamer/gstlibcamerapad.cpp61
-rw-r--r--src/gstreamer/gstlibcamerapad.h13
-rw-r--r--src/gstreamer/gstlibcamerapool.cpp56
-rw-r--r--src/gstreamer/gstlibcamerapool.h10
-rw-r--r--src/gstreamer/gstlibcameraprovider.cpp57
-rw-r--r--src/gstreamer/gstlibcameraprovider.h8
-rw-r--r--src/gstreamer/gstlibcamerasrc.cpp758
-rw-r--r--src/gstreamer/gstlibcamerasrc.h7
-rw-r--r--src/gstreamer/meson.build76
-rwxr-xr-xsrc/ipa/ipa-sign-install.sh22
-rwxr-xr-xsrc/ipa/ipa-sign.sh13
-rw-r--r--src/ipa/ipu3/algorithms/af.cpp458
-rw-r--r--src/ipa/ipu3/algorithms/af.h73
-rw-r--r--src/ipa/ipu3/algorithms/agc.cpp255
-rw-r--r--src/ipa/ipu3/algorithms/agc.h61
-rw-r--r--src/ipa/ipu3/algorithms/algorithm.h22
-rw-r--r--src/ipa/ipu3/algorithms/awb.cpp480
-rw-r--r--src/ipa/ipu3/algorithms/awb.h81
-rw-r--r--src/ipa/ipu3/algorithms/blc.cpp71
-rw-r--r--src/ipa/ipu3/algorithms/blc.h28
-rw-r--r--src/ipa/ipu3/algorithms/meson.build9
-rw-r--r--src/ipa/ipu3/algorithms/tone_mapping.cpp120
-rw-r--r--src/ipa/ipu3/algorithms/tone_mapping.h35
-rw-r--r--src/ipa/ipu3/data/meson.build9
-rw-r--r--src/ipa/ipu3/data/uncalibrated.yaml11
-rw-r--r--src/ipa/ipu3/ipa_context.cpp190
-rw-r--r--src/ipa/ipu3/ipa_context.h102
-rw-r--r--src/ipa/ipu3/ipu3-ipa-design-guide.rst162
-rw-r--r--src/ipa/ipu3/ipu3.cpp692
-rw-r--r--src/ipa/ipu3/meson.build31
-rw-r--r--src/ipa/ipu3/module.h27
-rw-r--r--src/ipa/libipa/agc_mean_luminance.cpp578
-rw-r--r--src/ipa/libipa/agc_mean_luminance.h98
-rw-r--r--src/ipa/libipa/algorithm.cpp181
-rw-r--r--src/ipa/libipa/algorithm.h106
-rw-r--r--src/ipa/libipa/camera_sensor_helper.cpp752
-rw-r--r--src/ipa/libipa/camera_sensor_helper.h94
-rw-r--r--src/ipa/libipa/colours.cpp81
-rw-r--r--src/ipa/libipa/colours.h23
-rw-r--r--src/ipa/libipa/exposure_mode_helper.cpp240
-rw-r--r--src/ipa/libipa/exposure_mode_helper.h53
-rw-r--r--src/ipa/libipa/fc_queue.cpp140
-rw-r--r--src/ipa/libipa/fc_queue.h137
-rw-r--r--src/ipa/libipa/fixedpoint.cpp42
-rw-r--r--src/ipa/libipa/fixedpoint.h65
-rw-r--r--src/ipa/libipa/histogram.cpp175
-rw-r--r--src/ipa/libipa/histogram.h51
-rw-r--r--src/ipa/libipa/interpolator.cpp157
-rw-r--r--src/ipa/libipa/interpolator.h131
-rw-r--r--src/ipa/libipa/ipa_interface_wrapper.cpp245
-rw-r--r--src/ipa/libipa/ipa_interface_wrapper.h57
-rw-r--r--src/ipa/libipa/lsc_polynomial.cpp81
-rw-r--r--src/ipa/libipa/lsc_polynomial.h105
-rw-r--r--src/ipa/libipa/lux.cpp181
-rw-r--r--src/ipa/libipa/lux.h42
-rw-r--r--src/ipa/libipa/meson.build40
-rw-r--r--src/ipa/libipa/module.cpp126
-rw-r--r--src/ipa/libipa/module.h124
-rw-r--r--src/ipa/libipa/pwl.cpp457
-rw-r--r--src/ipa/libipa/pwl.h85
-rw-r--r--src/ipa/libipa/vector.cpp351
-rw-r--r--src/ipa/libipa/vector.h370
-rw-r--r--src/ipa/mali-c55/algorithms/agc.cpp410
-rw-r--r--src/ipa/mali-c55/algorithms/agc.h81
-rw-r--r--src/ipa/mali-c55/algorithms/algorithm.h39
-rw-r--r--src/ipa/mali-c55/algorithms/awb.cpp230
-rw-r--r--src/ipa/mali-c55/algorithms/awb.h40
-rw-r--r--src/ipa/mali-c55/algorithms/blc.cpp140
-rw-r--r--src/ipa/mali-c55/algorithms/blc.h42
-rw-r--r--src/ipa/mali-c55/algorithms/lsc.cpp216
-rw-r--r--src/ipa/mali-c55/algorithms/lsc.h45
-rw-r--r--src/ipa/mali-c55/algorithms/meson.build8
-rw-r--r--src/ipa/mali-c55/data/imx415.yaml325
-rw-r--r--src/ipa/mali-c55/data/meson.build9
-rw-r--r--src/ipa/mali-c55/data/uncalibrated.yaml7
-rw-r--r--src/ipa/mali-c55/ipa_context.cpp101
-rw-r--r--src/ipa/mali-c55/ipa_context.h90
-rw-r--r--src/ipa/mali-c55/mali-c55.cpp399
-rw-r--r--src/ipa/mali-c55/meson.build33
-rw-r--r--src/ipa/mali-c55/module.h27
-rw-r--r--src/ipa/meson.build75
-rw-r--r--src/ipa/rkisp1/algorithms/agc.cpp470
-rw-r--r--src/ipa/rkisp1/algorithms/agc.h60
-rw-r--r--src/ipa/rkisp1/algorithms/algorithm.h32
-rw-r--r--src/ipa/rkisp1/algorithms/awb.cpp350
-rw-r--r--src/ipa/rkisp1/algorithms/awb.h46
-rw-r--r--src/ipa/rkisp1/algorithms/blc.cpp189
-rw-r--r--src/ipa/rkisp1/algorithms/blc.h43
-rw-r--r--src/ipa/rkisp1/algorithms/ccm.cpp135
-rw-r--r--src/ipa/rkisp1/algorithms/ccm.h50
-rw-r--r--src/ipa/rkisp1/algorithms/cproc.cpp160
-rw-r--r--src/ipa/rkisp1/algorithms/cproc.h36
-rw-r--r--src/ipa/rkisp1/algorithms/dpcc.cpp249
-rw-r--r--src/ipa/rkisp1/algorithms/dpcc.h32
-rw-r--r--src/ipa/rkisp1/algorithms/dpf.cpp265
-rw-r--r--src/ipa/rkisp1/algorithms/dpf.h38
-rw-r--r--src/ipa/rkisp1/algorithms/filter.cpp214
-rw-r--r--src/ipa/rkisp1/algorithms/filter.h33
-rw-r--r--src/ipa/rkisp1/algorithms/goc.cpp149
-rw-r--r--src/ipa/rkisp1/algorithms/goc.h42
-rw-r--r--src/ipa/rkisp1/algorithms/gsl.cpp142
-rw-r--r--src/ipa/rkisp1/algorithms/gsl.h35
-rw-r--r--src/ipa/rkisp1/algorithms/lsc.cpp438
-rw-r--r--src/ipa/rkisp1/algorithms/lsc.h60
-rw-r--r--src/ipa/rkisp1/algorithms/lux.cpp80
-rw-r--r--src/ipa/rkisp1/algorithms/lux.h36
-rw-r--r--src/ipa/rkisp1/algorithms/meson.build16
-rw-r--r--src/ipa/rkisp1/data/imx219.yaml114
-rw-r--r--src/ipa/rkisp1/data/imx258.yaml55
-rw-r--r--src/ipa/rkisp1/data/meson.build16
-rw-r--r--src/ipa/rkisp1/data/ov2685.yaml41
-rw-r--r--src/ipa/rkisp1/data/ov4689.yaml9
-rw-r--r--src/ipa/rkisp1/data/ov5640.yaml250
-rw-r--r--src/ipa/rkisp1/data/ov5695.yaml41
-rw-r--r--src/ipa/rkisp1/data/ov8858.yaml54
-rw-r--r--src/ipa/rkisp1/data/uncalibrated.yaml9
-rw-r--r--src/ipa/rkisp1/ipa_context.cpp413
-rw-r--r--src/ipa/rkisp1/ipa_context.h201
-rw-r--r--src/ipa/rkisp1/meson.build40
-rw-r--r--src/ipa/rkisp1/module.h28
-rw-r--r--src/ipa/rkisp1/params.cpp222
-rw-r--r--src/ipa/rkisp1/params.h163
-rw-r--r--src/ipa/rkisp1/rkisp1.cpp547
-rw-r--r--src/ipa/rpi/README.md25
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper.cpp257
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper.h127
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx219.cpp115
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx283.cpp61
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx290.cpp66
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx296.cpp72
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx415.cpp64
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx477.cpp186
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx519.cpp185
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx708.cpp371
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_ov5647.cpp94
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_ov64a40.cpp62
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_ov7251.cpp54
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_ov9281.cpp54
-rw-r--r--src/ipa/rpi/cam_helper/md_parser.h155
-rw-r--r--src/ipa/rpi/cam_helper/md_parser_smia.cpp152
-rw-r--r--src/ipa/rpi/cam_helper/meson.build30
-rw-r--r--src/ipa/rpi/common/ipa_base.cpp1542
-rw-r--r--src/ipa/rpi/common/ipa_base.h143
-rw-r--r--src/ipa/rpi/common/meson.build17
-rw-r--r--src/ipa/rpi/controller/af_algorithm.h76
-rw-r--r--src/ipa/rpi/controller/af_status.h35
-rw-r--r--src/ipa/rpi/controller/agc_algorithm.h38
-rw-r--r--src/ipa/rpi/controller/agc_status.h48
-rw-r--r--src/ipa/rpi/controller/algorithm.cpp56
-rw-r--r--src/ipa/rpi/controller/algorithm.h68
-rw-r--r--src/ipa/rpi/controller/alsc_status.h22
-rw-r--r--src/ipa/rpi/controller/awb_algorithm.h27
-rw-r--r--src/ipa/rpi/controller/awb_status.h20
-rw-r--r--src/ipa/rpi/controller/black_level_algorithm.h23
-rw-r--r--src/ipa/rpi/controller/black_level_status.h15
-rw-r--r--src/ipa/rpi/controller/cac_status.h14
-rw-r--r--src/ipa/rpi/controller/camera_mode.h59
-rw-r--r--src/ipa/rpi/controller/ccm_algorithm.h21
-rw-r--r--src/ipa/rpi/controller/ccm_status.h14
-rw-r--r--src/ipa/rpi/controller/contrast_algorithm.h24
-rw-r--r--src/ipa/rpi/controller/contrast_status.h20
-rw-r--r--src/ipa/rpi/controller/controller.cpp222
-rw-r--r--src/ipa/rpi/controller/controller.h78
-rw-r--r--src/ipa/rpi/controller/denoise_algorithm.h27
-rw-r--r--src/ipa/rpi/controller/denoise_status.h35
-rw-r--r--src/ipa/rpi/controller/device_status.cpp31
-rw-r--r--src/ipa/rpi/controller/device_status.h43
-rw-r--r--src/ipa/rpi/controller/dpc_status.h13
-rw-r--r--src/ipa/rpi/controller/geq_status.h14
-rw-r--r--src/ipa/rpi/controller/hdr_algorithm.h25
-rw-r--r--src/ipa/rpi/controller/hdr_status.h19
-rw-r--r--src/ipa/rpi/controller/histogram.cpp76
-rw-r--r--src/ipa/rpi/controller/histogram.h55
-rw-r--r--src/ipa/rpi/controller/lux_status.h23
-rw-r--r--src/ipa/rpi/controller/meson.build35
-rw-r--r--src/ipa/rpi/controller/metadata.h142
-rw-r--r--src/ipa/rpi/controller/noise_status.h14
-rw-r--r--src/ipa/rpi/controller/pdaf_data.h24
-rw-r--r--src/ipa/rpi/controller/region_stats.h123
-rw-r--r--src/ipa/rpi/controller/rpi/af.cpp797
-rw-r--r--src/ipa/rpi/controller/rpi/af.h166
-rw-r--r--src/ipa/rpi/controller/rpi/agc.cpp338
-rw-r--r--src/ipa/rpi/controller/rpi/agc.h58
-rw-r--r--src/ipa/rpi/controller/rpi/agc_channel.cpp1030
-rw-r--r--src/ipa/rpi/controller/rpi/agc_channel.h154
-rw-r--r--src/ipa/rpi/controller/rpi/alsc.cpp869
-rw-r--r--src/ipa/rpi/controller/rpi/alsc.h174
-rw-r--r--src/ipa/rpi/controller/rpi/awb.cpp797
-rw-r--r--src/ipa/rpi/controller/rpi/awb.h200
-rw-r--r--src/ipa/rpi/controller/rpi/black_level.cpp73
-rw-r--r--src/ipa/rpi/controller/rpi/black_level.h32
-rw-r--r--src/ipa/rpi/controller/rpi/cac.cpp107
-rw-r--r--src/ipa/rpi/controller/rpi/cac.h35
-rw-r--r--src/ipa/rpi/controller/rpi/ccm.cpp184
-rw-r--r--src/ipa/rpi/controller/rpi/ccm.h45
-rw-r--r--src/ipa/rpi/controller/rpi/contrast.cpp198
-rw-r--r--src/ipa/rpi/controller/rpi/contrast.h55
-rw-r--r--src/ipa/rpi/controller/rpi/denoise.cpp198
-rw-r--r--src/ipa/rpi/controller/rpi/denoise.h59
-rw-r--r--src/ipa/rpi/controller/rpi/dpc.cpp59
-rw-r--r--src/ipa/rpi/controller/rpi/dpc.h32
-rw-r--r--src/ipa/rpi/controller/rpi/focus.h28
-rw-r--r--src/ipa/rpi/controller/rpi/geq.cpp88
-rw-r--r--src/ipa/rpi/controller/rpi/geq.h36
-rw-r--r--src/ipa/rpi/controller/rpi/hdr.cpp417
-rw-r--r--src/ipa/rpi/controller/rpi/hdr.h85
-rw-r--r--src/ipa/rpi/controller/rpi/lux.cpp114
-rw-r--r--src/ipa/rpi/controller/rpi/lux.h45
-rw-r--r--src/ipa/rpi/controller/rpi/noise.cpp89
-rw-r--r--src/ipa/rpi/controller/rpi/noise.h32
-rw-r--r--src/ipa/rpi/controller/rpi/saturation.cpp57
-rw-r--r--src/ipa/rpi/controller/rpi/saturation.h32
-rw-r--r--src/ipa/rpi/controller/rpi/sdn.cpp83
-rw-r--r--src/ipa/rpi/controller/rpi/sdn.h32
-rw-r--r--src/ipa/rpi/controller/rpi/sharpen.cpp92
-rw-r--r--src/ipa/rpi/controller/rpi/sharpen.h34
-rw-r--r--src/ipa/rpi/controller/rpi/tonemap.cpp61
-rw-r--r--src/ipa/rpi/controller/rpi/tonemap.h36
-rw-r--r--src/ipa/rpi/controller/saturation_status.h13
-rw-r--r--src/ipa/rpi/controller/sharpen_algorithm.h21
-rw-r--r--src/ipa/rpi/controller/sharpen_status.h20
-rw-r--r--src/ipa/rpi/controller/statistics.h78
-rw-r--r--src/ipa/rpi/controller/stitch_status.h17
-rw-r--r--src/ipa/rpi/controller/tonemap_status.h17
-rw-r--r--src/ipa/rpi/meson.build14
-rw-r--r--src/ipa/rpi/vc4/data/imx219.json695
-rw-r--r--src/ipa/rpi/vc4/data/imx219_noir.json629
-rw-r--r--src/ipa/rpi/vc4/data/imx283.json313
-rw-r--r--src/ipa/rpi/vc4/data/imx290.json214
-rw-r--r--src/ipa/rpi/vc4/data/imx296.json443
-rw-r--r--src/ipa/rpi/vc4/data/imx296_mono.json240
-rw-r--r--src/ipa/rpi/vc4/data/imx327.json215
-rw-r--r--src/ipa/rpi/vc4/data/imx378.json427
-rwxr-xr-xsrc/ipa/rpi/vc4/data/imx415.json413
-rw-r--r--src/ipa/rpi/vc4/data/imx462.json215
-rw-r--r--src/ipa/rpi/vc4/data/imx477.json700
-rw-r--r--src/ipa/rpi/vc4/data/imx477_noir.json656
-rw-r--r--src/ipa/rpi/vc4/data/imx477_scientific.json488
-rw-r--r--src/ipa/rpi/vc4/data/imx477_v1.json525
-rw-r--r--src/ipa/rpi/vc4/data/imx519.json427
-rw-r--r--src/ipa/rpi/vc4/data/imx708.json671
-rw-r--r--src/ipa/rpi/vc4/data/imx708_noir.json770
-rw-r--r--src/ipa/rpi/vc4/data/imx708_wide.json682
-rw-r--r--src/ipa/rpi/vc4/data/imx708_wide_noir.json673
-rw-r--r--src/ipa/rpi/vc4/data/meson.build33
-rw-r--r--src/ipa/rpi/vc4/data/ov5647.json696
-rw-r--r--src/ipa/rpi/vc4/data/ov5647_noir.json412
-rw-r--r--src/ipa/rpi/vc4/data/ov64a40.json422
-rw-r--r--src/ipa/rpi/vc4/data/ov7251_mono.json136
-rw-r--r--src/ipa/rpi/vc4/data/ov9281_mono.json136
-rw-r--r--src/ipa/rpi/vc4/data/se327m12.json432
-rw-r--r--src/ipa/rpi/vc4/data/uncalibrated.json131
-rw-r--r--src/ipa/rpi/vc4/meson.build45
-rw-r--r--src/ipa/rpi/vc4/vc4.cpp597
-rw-r--r--src/ipa/simple/algorithms/agc.cpp139
-rw-r--r--src/ipa/simple/algorithms/agc.h33
-rw-r--r--src/ipa/simple/algorithms/algorithm.h22
-rw-r--r--src/ipa/simple/algorithms/awb.cpp69
-rw-r--r--src/ipa/simple/algorithms/awb.h32
-rw-r--r--src/ipa/simple/algorithms/blc.cpp98
-rw-r--r--src/ipa/simple/algorithms/blc.h40
-rw-r--r--src/ipa/simple/algorithms/lut.cpp123
-rw-r--r--src/ipa/simple/algorithms/lut.h40
-rw-r--r--src/ipa/simple/algorithms/meson.build8
-rw-r--r--src/ipa/simple/data/meson.build10
-rw-r--r--src/ipa/simple/data/uncalibrated.yaml10
-rw-r--r--src/ipa/simple/ipa_context.cpp102
-rw-r--r--src/ipa/simple/ipa_context.h77
-rw-r--r--src/ipa/simple/meson.build31
-rw-r--r--src/ipa/simple/module.h30
-rw-r--r--src/ipa/simple/soft_simple.cpp350
-rw-r--r--src/ipa/vimc/data/meson.build9
-rw-r--r--src/ipa/vimc/data/vimc.conf3
-rw-r--r--src/ipa/vimc/meson.build38
-rw-r--r--src/ipa/vimc/vimc.cpp149
-rw-r--r--src/libcamera/base/backtrace.cpp335
-rw-r--r--src/libcamera/base/bound_method.cpp110
-rw-r--r--src/libcamera/base/class.cpp208
-rw-r--r--src/libcamera/base/event_dispatcher.cpp116
-rw-r--r--src/libcamera/base/event_dispatcher_poll.cpp306
-rw-r--r--src/libcamera/base/event_notifier.cpp147
-rw-r--r--src/libcamera/base/file.cpp478
-rw-r--r--src/libcamera/base/flags.cpp192
-rw-r--r--src/libcamera/base/log.cpp1112
-rw-r--r--src/libcamera/base/memfd.cpp123
-rw-r--r--src/libcamera/base/meson.build81
-rw-r--r--src/libcamera/base/message.cpp166
-rw-r--r--src/libcamera/base/mutex.cpp55
-rw-r--r--src/libcamera/base/object.cpp361
-rw-r--r--src/libcamera/base/semaphore.cpp104
-rw-r--r--src/libcamera/base/shared_fd.cpp291
-rw-r--r--src/libcamera/base/signal.cpp205
-rw-r--r--src/libcamera/base/thread.cpp717
-rw-r--r--src/libcamera/base/timer.cpp171
-rw-r--r--src/libcamera/base/unique_fd.cpp123
-rw-r--r--src/libcamera/base/utils.cpp674
-rw-r--r--src/libcamera/bayer_format.cpp460
-rw-r--r--src/libcamera/bound_method.cpp110
-rw-r--r--src/libcamera/buffer.cpp214
-rw-r--r--src/libcamera/byte_stream_buffer.cpp23
-rw-r--r--src/libcamera/camera.cpp805
-rw-r--r--src/libcamera/camera_controls.cpp6
-rw-r--r--src/libcamera/camera_lens.cpp153
-rw-r--r--src/libcamera/camera_manager.cpp400
-rw-r--r--src/libcamera/camera_sensor.cpp369
-rw-r--r--src/libcamera/color_space.cpp520
-rw-r--r--src/libcamera/control_ids.cpp.in108
-rw-r--r--src/libcamera/control_ids.yaml53
-rw-r--r--src/libcamera/control_ids_core.yaml1052
-rw-r--r--src/libcamera/control_ids_debug.yaml6
-rw-r--r--src/libcamera/control_ids_draft.yaml327
-rw-r--r--src/libcamera/control_ids_rpi.yaml61
-rw-r--r--src/libcamera/control_ranges.yaml20
-rw-r--r--src/libcamera/control_serializer.cpp262
-rw-r--r--src/libcamera/control_validator.cpp6
-rw-r--r--src/libcamera/controls.cpp451
-rw-r--r--src/libcamera/converter.cpp458
-rw-r--r--src/libcamera/converter/converter_v4l2_m2m.cpp751
-rw-r--r--src/libcamera/converter/meson.build5
-rw-r--r--src/libcamera/debug_controls.cpp164
-rw-r--r--src/libcamera/delayed_controls.cpp285
-rw-r--r--src/libcamera/device_enumerator.cpp46
-rw-r--r--src/libcamera/device_enumerator_sysfs.cpp11
-rw-r--r--src/libcamera/device_enumerator_udev.cpp35
-rw-r--r--src/libcamera/dma_buf_allocator.cpp351
-rw-r--r--src/libcamera/event_dispatcher.cpp117
-rw-r--r--src/libcamera/event_dispatcher_poll.cpp308
-rw-r--r--src/libcamera/event_notifier.cpp141
-rw-r--r--src/libcamera/fence.cpp112
-rw-r--r--src/libcamera/file_descriptor.cpp203
-rw-r--r--src/libcamera/formats.cpp1217
-rw-r--r--src/libcamera/formats.yaml212
-rw-r--r--src/libcamera/framebuffer.cpp441
-rw-r--r--src/libcamera/framebuffer_allocator.cpp34
-rwxr-xr-xsrc/libcamera/gen-controls.py173
-rw-r--r--src/libcamera/geometry.cpp684
-rw-r--r--src/libcamera/include/byte_stream_buffer.h89
-rw-r--r--src/libcamera/include/camera_controls.h30
-rw-r--r--src/libcamera/include/camera_sensor.h66
-rw-r--r--src/libcamera/include/control_serializer.h55
-rw-r--r--src/libcamera/include/control_validator.h27
-rw-r--r--src/libcamera/include/device_enumerator.h57
-rw-r--r--src/libcamera/include/device_enumerator_sysfs.h32
-rw-r--r--src/libcamera/include/device_enumerator_udev.h75
-rw-r--r--src/libcamera/include/event_dispatcher_poll.h58
-rw-r--r--src/libcamera/include/formats.h34
-rw-r--r--src/libcamera/include/ipa_context_wrapper.h47
-rw-r--r--src/libcamera/include/ipa_manager.h42
-rw-r--r--src/libcamera/include/ipa_module.h55
-rw-r--r--src/libcamera/include/ipa_proxy.h65
-rw-r--r--src/libcamera/include/ipc_unixsocket.h59
-rw-r--r--src/libcamera/include/log.h130
-rw-r--r--src/libcamera/include/media_device.h93
-rw-r--r--src/libcamera/include/media_object.h124
-rw-r--r--src/libcamera/include/meson.build30
-rw-r--r--src/libcamera/include/message.h70
-rw-r--r--src/libcamera/include/pipeline_handler.h150
-rw-r--r--src/libcamera/include/process.h55
-rw-r--r--src/libcamera/include/semaphore.h34
-rw-r--r--src/libcamera/include/thread.h77
-rw-r--r--src/libcamera/include/utils.h152
-rw-r--r--src/libcamera/include/v4l2_controls.h31
-rw-r--r--src/libcamera/include/v4l2_device.h60
-rw-r--r--src/libcamera/include/v4l2_subdevice.h71
-rw-r--r--src/libcamera/include/v4l2_videodevice.h295
-rw-r--r--src/libcamera/ipa/meson.build14
-rw-r--r--src/libcamera/ipa_context_wrapper.cpp251
-rw-r--r--src/libcamera/ipa_controls.cpp55
-rw-r--r--src/libcamera/ipa_data_serializer.cpp626
-rw-r--r--src/libcamera/ipa_interface.cpp497
-rw-r--r--src/libcamera/ipa_manager.cpp160
-rw-r--r--src/libcamera/ipa_module.cpp296
-rw-r--r--src/libcamera/ipa_proxy.cpp223
-rw-r--r--src/libcamera/ipa_pub_key.cpp.in22
-rw-r--r--src/libcamera/ipc_pipe.cpp227
-rw-r--r--src/libcamera/ipc_pipe_unixsocket.cpp147
-rw-r--r--src/libcamera/ipc_unixsocket.cpp93
-rw-r--r--src/libcamera/log.cpp1051
-rw-r--r--src/libcamera/mapped_framebuffer.cpp243
-rw-r--r--src/libcamera/matrix.cpp145
-rw-r--r--src/libcamera/media_device.cpp181
-rw-r--r--src/libcamera/media_object.cpp148
-rw-r--r--src/libcamera/meson.build233
-rw-r--r--src/libcamera/message.cpp165
-rw-r--r--src/libcamera/object.cpp245
-rw-r--r--src/libcamera/orientation.cpp118
-rw-r--r--src/libcamera/pipeline/imx8-isi/imx8-isi.cpp1116
-rw-r--r--src/libcamera/pipeline/imx8-isi/meson.build5
-rw-r--r--src/libcamera/pipeline/ipu3/cio2.cpp431
-rw-r--r--src/libcamera/pipeline/ipu3/cio2.h79
-rw-r--r--src/libcamera/pipeline/ipu3/frames.cpp144
-rw-r--r--src/libcamera/pipeline/ipu3/frames.h67
-rw-r--r--src/libcamera/pipeline/ipu3/imgu.cpp767
-rw-r--r--src/libcamera/pipeline/ipu3/imgu.h124
-rw-r--r--src/libcamera/pipeline/ipu3/ipu3.cpp1767
-rw-r--r--src/libcamera/pipeline/ipu3/meson.build7
-rw-r--r--src/libcamera/pipeline/mali-c55/mali-c55.cpp1755
-rw-r--r--src/libcamera/pipeline/mali-c55/meson.build5
-rw-r--r--src/libcamera/pipeline/meson.build19
-rw-r--r--src/libcamera/pipeline/rkisp1/meson.build6
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1.cpp1554
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1_path.cpp567
-rw-r--r--src/libcamera/pipeline/rkisp1/rkisp1_path.h104
-rw-r--r--src/libcamera/pipeline/rkisp1/timeline.cpp227
-rw-r--r--src/libcamera/pipeline/rkisp1/timeline.h72
-rw-r--r--src/libcamera/pipeline/rpi/common/delayed_controls.cpp293
-rw-r--r--src/libcamera/pipeline/rpi/common/delayed_controls.h87
-rw-r--r--src/libcamera/pipeline/rpi/common/meson.build7
-rw-r--r--src/libcamera/pipeline/rpi/common/pipeline_base.cpp1528
-rw-r--r--src/libcamera/pipeline/rpi/common/pipeline_base.h300
-rw-r--r--src/libcamera/pipeline/rpi/common/rpi_stream.cpp283
-rw-r--r--src/libcamera/pipeline/rpi/common/rpi_stream.h199
-rw-r--r--src/libcamera/pipeline/rpi/meson.build12
-rw-r--r--src/libcamera/pipeline/rpi/vc4/data/example.yaml46
-rw-r--r--src/libcamera/pipeline/rpi/vc4/data/meson.build9
-rw-r--r--src/libcamera/pipeline/rpi/vc4/meson.build7
-rw-r--r--src/libcamera/pipeline/rpi/vc4/vc4.cpp1030
-rw-r--r--src/libcamera/pipeline/simple/meson.build5
-rw-r--r--src/libcamera/pipeline/simple/simple.cpp1768
-rw-r--r--src/libcamera/pipeline/uvcvideo/meson.build4
-rw-r--r--src/libcamera/pipeline/uvcvideo/uvcvideo.cpp604
-rw-r--r--src/libcamera/pipeline/vimc/meson.build4
-rw-r--r--src/libcamera/pipeline/vimc/vimc.cpp412
-rw-r--r--src/libcamera/pipeline/virtual/README.md65
-rw-r--r--src/libcamera/pipeline/virtual/config_parser.cpp260
-rw-r--r--src/libcamera/pipeline/virtual/config_parser.h39
-rw-r--r--src/libcamera/pipeline/virtual/data/virtual.yaml36
-rw-r--r--src/libcamera/pipeline/virtual/frame_generator.h29
-rw-r--r--src/libcamera/pipeline/virtual/image_frame_generator.cpp172
-rw-r--r--src/libcamera/pipeline/virtual/image_frame_generator.h49
-rw-r--r--src/libcamera/pipeline/virtual/meson.build13
-rw-r--r--src/libcamera/pipeline/virtual/test_pattern_generator.cpp125
-rw-r--r--src/libcamera/pipeline/virtual/test_pattern_generator.h48
-rw-r--r--src/libcamera/pipeline/virtual/virtual.cpp397
-rw-r--r--src/libcamera/pipeline/virtual/virtual.h61
-rw-r--r--src/libcamera/pipeline/vivid/meson.build5
-rw-r--r--src/libcamera/pipeline/vivid/vivid.cpp409
-rw-r--r--src/libcamera/pipeline_handler.cpp691
-rw-r--r--src/libcamera/pixel_format.cpp155
-rw-r--r--src/libcamera/pixelformats.cpp116
-rw-r--r--src/libcamera/process.cpp100
-rw-r--r--src/libcamera/property_ids.cpp.in43
-rw-r--r--src/libcamera/property_ids.yaml389
-rw-r--r--src/libcamera/property_ids_core.yaml704
-rw-r--r--src/libcamera/property_ids_draft.yaml39
-rw-r--r--src/libcamera/proxy/ipa_proxy_linux.cpp95
-rw-r--r--src/libcamera/proxy/meson.build23
-rw-r--r--src/libcamera/proxy/worker/ipa_proxy_linux_worker.cpp90
-rw-r--r--src/libcamera/proxy/worker/meson.build34
-rw-r--r--src/libcamera/pub_key.cpp140
-rw-r--r--src/libcamera/request.cpp485
-rw-r--r--src/libcamera/semaphore.cpp103
-rw-r--r--src/libcamera/sensor/camera_sensor.cpp583
-rw-r--r--src/libcamera/sensor/camera_sensor_legacy.cpp1045
-rw-r--r--src/libcamera/sensor/camera_sensor_properties.cpp473
-rw-r--r--src/libcamera/sensor/camera_sensor_raw.cpp1157
-rw-r--r--src/libcamera/sensor/meson.build8
-rw-r--r--src/libcamera/shared_mem_object.cpp231
-rw-r--r--src/libcamera/signal.cpp179
-rw-r--r--src/libcamera/software_isp/TODO208
-rw-r--r--src/libcamera/software_isp/debayer.cpp127
-rw-r--r--src/libcamera/software_isp/debayer.h54
-rw-r--r--src/libcamera/software_isp/debayer_cpu.cpp835
-rw-r--r--src/libcamera/software_isp/debayer_cpu.h163
-rw-r--r--src/libcamera/software_isp/meson.build15
-rw-r--r--src/libcamera/software_isp/software_isp.cpp370
-rw-r--r--src/libcamera/software_isp/swstats_cpu.cpp434
-rw-r--r--src/libcamera/software_isp/swstats_cpu.h97
-rw-r--r--src/libcamera/source_paths.cpp139
-rw-r--r--src/libcamera/stream.cpp120
-rw-r--r--src/libcamera/sysfs.cpp111
-rw-r--r--src/libcamera/thread.cpp626
-rw-r--r--src/libcamera/timer.cpp185
-rw-r--r--src/libcamera/tracepoints.cpp10
-rw-r--r--src/libcamera/transform.cpp409
-rw-r--r--src/libcamera/utils.cpp374
-rw-r--r--src/libcamera/v4l2_controls.cpp151
-rw-r--r--src/libcamera/v4l2_device.cpp838
-rw-r--r--src/libcamera/v4l2_pixelformat.cpp422
-rw-r--r--src/libcamera/v4l2_subdevice.cpp1656
-rw-r--r--src/libcamera/v4l2_videodevice.cpp1126
-rw-r--r--src/libcamera/version.cpp.in2
-rw-r--r--src/libcamera/yaml_parser.cpp784
-rw-r--r--src/meson.build61
-rwxr-xr-xsrc/py/cam/cam.py472
-rw-r--r--src/py/cam/cam_kms.py184
-rw-r--r--src/py/cam/cam_null.py47
-rw-r--r--src/py/cam/cam_qt.py182
-rw-r--r--src/py/cam/cam_qtgl.py363
-rw-r--r--src/py/cam/gl_helpers.py66
-rw-r--r--src/py/cam/helpers.py158
-rwxr-xr-xsrc/py/examples/simple-cam.py340
-rwxr-xr-xsrc/py/examples/simple-capture.py163
-rwxr-xr-xsrc/py/examples/simple-continuous-capture.py185
-rw-r--r--src/py/libcamera/__init__.py4
-rwxr-xr-xsrc/py/libcamera/gen-py-controls.py111
-rwxr-xr-xsrc/py/libcamera/gen-py-formats.py56
-rw-r--r--src/py/libcamera/meson.build102
-rw-r--r--src/py/libcamera/py_camera_manager.cpp131
-rw-r--r--src/py/libcamera/py_camera_manager.h45
-rw-r--r--src/py/libcamera/py_color_space.cpp72
-rw-r--r--src/py/libcamera/py_controls_generated.cpp.in47
-rw-r--r--src/py/libcamera/py_enums.cpp47
-rw-r--r--src/py/libcamera/py_formats_generated.cpp.in27
-rw-r--r--src/py/libcamera/py_geometry.cpp121
-rw-r--r--src/py/libcamera/py_helpers.cpp101
-rw-r--r--src/py/libcamera/py_helpers.h13
-rw-r--r--src/py/libcamera/py_main.cpp523
-rw-r--r--src/py/libcamera/py_main.h24
-rw-r--r--src/py/libcamera/py_transform.cpp83
-rw-r--r--src/py/libcamera/utils/MappedFrameBuffer.py105
-rw-r--r--src/py/libcamera/utils/__init__.py4
-rw-r--r--src/py/meson.build3
-rw-r--r--src/qcam/assets/feathericons/README.md5
-rw-r--r--src/qcam/assets/feathericons/feathericons.qrc9
-rw-r--r--src/qcam/format_converter.cpp275
-rw-r--r--src/qcam/format_converter.h59
-rw-r--r--src/qcam/main.cpp82
-rw-r--r--src/qcam/main_window.cpp554
-rw-r--r--src/qcam/main_window.h105
-rw-r--r--src/qcam/meson.build44
-rw-r--r--src/qcam/viewfinder.cpp179
-rw-r--r--src/qcam/viewfinder.h71
-rwxr-xr-xsrc/v4l2/libcamerify.in47
-rw-r--r--src/v4l2/meson.build27
-rw-r--r--src/v4l2/v4l2_camera.cpp157
-rw-r--r--src/v4l2/v4l2_camera.h87
-rw-r--r--src/v4l2/v4l2_camera_file.cpp52
-rw-r--r--src/v4l2/v4l2_camera_file.h40
-rw-r--r--src/v4l2/v4l2_camera_proxy.cpp869
-rw-r--r--src/v4l2/v4l2_camera_proxy.h113
-rw-r--r--src/v4l2/v4l2_compat.cpp93
-rw-r--r--src/v4l2/v4l2_compat_manager.cpp129
-rw-r--r--src/v4l2/v4l2_compat_manager.h23
954 files changed, 113049 insertions, 17625 deletions
diff --git a/src/android/camera3_hal.cpp b/src/android/camera3_hal.cpp
index d6fc1ecc..a5ad2374 100644
--- a/src/android/camera3_hal.cpp
+++ b/src/android/camera3_hal.cpp
@@ -1,48 +1,52 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
- * camera3_hal.cpp - Android Camera HALv3 module
+ * Android Camera HALv3 module
*/
#include <hardware/camera_common.h>
+#include <libcamera/base/log.h>
+
#include "camera_device.h"
#include "camera_hal_manager.h"
-#include "log.h"
using namespace libcamera;
LOG_DEFINE_CATEGORY(HAL)
-static CameraHalManager cameraManager;
-
/*------------------------------------------------------------------------------
* Android Camera HAL callbacks
*/
-static int hal_get_number_of_cameras(void)
+static int hal_get_number_of_cameras()
{
- return cameraManager.numCameras();
+ return CameraHalManager::instance()->numCameras();
}
static int hal_get_camera_info(int id, struct camera_info *info)
{
- return cameraManager.getCameraInfo(id, info);
+ return CameraHalManager::instance()->getCameraInfo(id, info);
}
static int hal_set_callbacks(const camera_module_callbacks_t *callbacks)
{
+ CameraHalManager::instance()->setCallbacks(callbacks);
+
return 0;
}
-static int hal_open_legacy(const struct hw_module_t *module, const char *id,
- uint32_t halVersion, struct hw_device_t **device)
+static int hal_open_legacy([[maybe_unused]] const struct hw_module_t *module,
+ [[maybe_unused]] const char *id,
+ [[maybe_unused]] uint32_t halVersion,
+ [[maybe_unused]] struct hw_device_t **device)
{
return -ENOSYS;
}
-static int hal_set_torch_mode(const char *camera_id, bool enabled)
+static int hal_set_torch_mode([[maybe_unused]] const char *camera_id,
+ [[maybe_unused]] bool enabled)
{
return -ENOSYS;
}
@@ -56,7 +60,7 @@ static int hal_init()
{
LOG(HAL, Info) << "Initialising Android camera HAL";
- cameraManager.init();
+ CameraHalManager::instance()->init();
return 0;
}
@@ -71,11 +75,12 @@ static int hal_dev_open(const hw_module_t *module, const char *name,
LOG(HAL, Debug) << "Open camera " << name;
int id = atoi(name);
- CameraDevice *camera = cameraManager.open(id, module);
+
+ auto [camera, ret] = CameraHalManager::instance()->open(id, module);
if (!camera) {
LOG(HAL, Error)
<< "Failed to open camera module '" << id << "'";
- return -ENODEV;
+ return ret == -EBUSY ? -EUSERS : ret;
}
*device = &camera->camera3Device()->common;
diff --git a/src/android/camera_buffer.h b/src/android/camera_buffer.h
new file mode 100644
index 00000000..96669962
--- /dev/null
+++ b/src/android/camera_buffer.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Frame buffer handling interface definition
+ */
+
+#pragma once
+
+#include <hardware/camera3.h>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/span.h>
+#include <libcamera/geometry.h>
+#include <libcamera/pixel_format.h>
+
+class CameraBuffer final : public libcamera::Extensible
+{
+ LIBCAMERA_DECLARE_PRIVATE()
+
+public:
+ CameraBuffer(buffer_handle_t camera3Buffer,
+ libcamera::PixelFormat pixelFormat,
+ const libcamera::Size &size, int flags);
+ ~CameraBuffer();
+
+ bool isValid() const;
+
+ unsigned int numPlanes() const;
+
+ libcamera::Span<const uint8_t> plane(unsigned int plane) const;
+ libcamera::Span<uint8_t> plane(unsigned int plane);
+
+ unsigned int stride(unsigned int plane) const;
+ unsigned int offset(unsigned int plane) const;
+ unsigned int size(unsigned int plane) const;
+
+ size_t jpegBufferSize(size_t maxJpegBufferSize) const;
+};
+
+#define PUBLIC_CAMERA_BUFFER_IMPLEMENTATION \
+CameraBuffer::CameraBuffer(buffer_handle_t camera3Buffer, \
+ libcamera::PixelFormat pixelFormat, \
+ const libcamera::Size &size, int flags) \
+ : Extensible(std::make_unique<Private>(this, camera3Buffer, \
+ pixelFormat, size, \
+ flags)) \
+{ \
+} \
+CameraBuffer::~CameraBuffer() \
+{ \
+} \
+bool CameraBuffer::isValid() const \
+{ \
+ return _d()->isValid(); \
+} \
+unsigned int CameraBuffer::numPlanes() const \
+{ \
+ return _d()->numPlanes(); \
+} \
+Span<const uint8_t> CameraBuffer::plane(unsigned int plane) const \
+{ \
+ return const_cast<Private *>(_d())->plane(plane); \
+} \
+Span<uint8_t> CameraBuffer::plane(unsigned int plane) \
+{ \
+ return _d()->plane(plane); \
+} \
+unsigned int CameraBuffer::stride(unsigned int plane) const \
+{ \
+ return _d()->stride(plane); \
+} \
+unsigned int CameraBuffer::offset(unsigned int plane) const \
+{ \
+ return _d()->offset(plane); \
+} \
+unsigned int CameraBuffer::size(unsigned int plane) const \
+{ \
+ return _d()->size(plane); \
+} \
+size_t CameraBuffer::jpegBufferSize(size_t maxJpegBufferSize) const \
+{ \
+ return _d()->jpegBufferSize(maxJpegBufferSize); \
+}
diff --git a/src/android/camera_capabilities.cpp b/src/android/camera_capabilities.cpp
new file mode 100644
index 00000000..b161bc6b
--- /dev/null
+++ b/src/android/camera_capabilities.cpp
@@ -0,0 +1,1644 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Camera static properties manager
+ */
+
+#include "camera_capabilities.h"
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+#include <map>
+#include <stdint.h>
+#include <type_traits>
+
+#include <hardware/camera3.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/property_ids.h>
+
+#include "libcamera/internal/formats.h"
+
+using namespace libcamera;
+
+LOG_DECLARE_CATEGORY(HAL)
+
+namespace {
+
+/*
+ * \var camera3Resolutions
+ * \brief The list of image resolutions commonly supported by Android
+ *
+ * The following are defined as mandatory to be supported by the Android
+ * Camera3 specification: (320x240), (640x480), (1280x720), (1920x1080).
+ *
+ * The following 4:3 resolutions are defined as optional, but commonly
+ * supported by Android devices: (1280x960), (1600x1200).
+ */
+const std::vector<Size> camera3Resolutions = {
+ { 320, 240 },
+ { 640, 480 },
+ { 1280, 720 },
+ { 1280, 960 },
+ { 1600, 1200 },
+ { 1920, 1080 }
+};
+
+/*
+ * \struct Camera3Format
+ * \brief Data associated with an Android format identifier
+ * \var libcameraFormats List of libcamera pixel formats compatible with the
+ * Android format
+ * \var name The human-readable representation of the Android format code
+ */
+struct Camera3Format {
+ std::vector<PixelFormat> libcameraFormats;
+ bool mandatory;
+ const char *name;
+};
+
+/*
+ * \var camera3FormatsMap
+ * \brief Associate Android format code with ancillary data
+ */
+const std::map<int, const Camera3Format> camera3FormatsMap = {
+ {
+ HAL_PIXEL_FORMAT_BLOB, {
+ { formats::MJPEG },
+ true,
+ "BLOB"
+ }
+ }, {
+ HAL_PIXEL_FORMAT_YCbCr_420_888, {
+ { formats::NV12, formats::NV21 },
+ true,
+ "YCbCr_420_888"
+ }
+ }, {
+ /*
+ * \todo Translate IMPLEMENTATION_DEFINED inspecting the gralloc
+ * usage flag. For now, copy the YCbCr_420 configuration.
+ */
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, {
+ { formats::NV12, formats::NV21 },
+ true,
+ "IMPLEMENTATION_DEFINED"
+ }
+ }, {
+ HAL_PIXEL_FORMAT_RAW10, {
+ {
+ formats::SBGGR10_CSI2P,
+ formats::SGBRG10_CSI2P,
+ formats::SGRBG10_CSI2P,
+ formats::SRGGB10_CSI2P
+ },
+ false,
+ "RAW10"
+ }
+ }, {
+ HAL_PIXEL_FORMAT_RAW12, {
+ {
+ formats::SBGGR12_CSI2P,
+ formats::SGBRG12_CSI2P,
+ formats::SGRBG12_CSI2P,
+ formats::SRGGB12_CSI2P
+ },
+ false,
+ "RAW12"
+ }
+ }, {
+ HAL_PIXEL_FORMAT_RAW16, {
+ {
+ formats::SBGGR16,
+ formats::SGBRG16,
+ formats::SGRBG16,
+ formats::SRGGB16
+ },
+ false,
+ "RAW16"
+ }
+ },
+};
+
+const std::map<camera_metadata_enum_android_info_supported_hardware_level, std::string>
+hwLevelStrings = {
+ { ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED, "LIMITED" },
+ { ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_FULL, "FULL" },
+ { ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY, "LEGACY" },
+ { ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_3, "LEVEL_3" },
+ { ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL, "EXTERNAL" },
+};
+
+enum class ControlRange {
+ Min,
+ Def,
+ Max,
+};
+
+/**
+ * \brief Set Android metadata from libcamera ControlInfo or a default value
+ * \tparam T Type of the control in libcamera
+ * \tparam U Type of the metadata in Android
+ * \param[in] metadata Android metadata pack to add the control value to
+ * \param[in] tag Android metadata tag
+ * \param[in] controlsInfo libcamera ControlInfoMap from which to find the control info
+ * \param[in] control libcamera ControlId to find from \a controlsInfo
+ * \param[in] controlRange Whether to use the min, def, or max value from the control info
+ * \param[in] defaultValue The value to set in \a metadata if \a control is not found
+ *
+ * Set the Android metadata entry in \a metadata with tag \a tag based on the
+ * control info found for the libcamera control \a control in the libcamera
+ * ControlInfoMap \a controlsInfo. If no libcamera ControlInfo is found, then
+ * the Android metadata entry is set to \a defaultValue.
+ *
+ * This function is for scalar values.
+ */
+template<typename T, typename U>
+U setMetadata(CameraMetadata *metadata, uint32_t tag,
+ const ControlInfoMap &controlsInfo, const Control<T> &control,
+ enum ControlRange controlRange, const U defaultValue)
+{
+ U value = defaultValue;
+
+ const auto &info = controlsInfo.find(&control);
+ if (info != controlsInfo.end()) {
+ switch (controlRange) {
+ case ControlRange::Min:
+ value = static_cast<U>(info->second.min().template get<T>());
+ break;
+ case ControlRange::Def:
+ value = static_cast<U>(info->second.def().template get<T>());
+ break;
+ case ControlRange::Max:
+ value = static_cast<U>(info->second.max().template get<T>());
+ break;
+ }
+ }
+
+ metadata->addEntry(tag, value);
+ return value;
+}
+
+/**
+ * \brief Set Android metadata from libcamera ControlInfo or a default value
+ * \tparam T Type of the control in libcamera
+ * \tparam U Type of the metadata in Android
+ * \param[in] metadata Android metadata pack to add the control value to
+ * \param[in] tag Android metadata tag
+ * \param[in] controlsInfo libcamera ControlInfoMap from which to find the control info
+ * \param[in] control libcamera ControlId to find from \a controlsInfo
+ * \param[in] defaultVector The value to set in \a metadata if \a control is not found
+ *
+ * Set the Android metadata entry in \a metadata with tag \a tag based on the
+ * control info found for the libcamera control \a control in the libcamera
+ * ControlInfoMap \a controlsInfo. If no libcamera ControlInfo is found, then
+ * the Android metadata entry is set to \a defaultVector.
+ *
+ * This function is for vector values.
+ */
+template<typename T, typename U>
+std::vector<U> setMetadata(CameraMetadata *metadata, uint32_t tag,
+ const ControlInfoMap &controlsInfo,
+ const Control<T> &control,
+ const std::vector<U> &defaultVector)
+{
+ const auto &info = controlsInfo.find(&control);
+ if (info == controlsInfo.end()) {
+ metadata->addEntry(tag, defaultVector);
+ return defaultVector;
+ }
+
+ std::vector<U> values(info->second.values().size());
+ for (const auto &value : info->second.values())
+ values.push_back(static_cast<U>(value.template get<T>()));
+ metadata->addEntry(tag, values);
+
+ return values;
+}
+
+} /* namespace */
+
+bool CameraCapabilities::validateManualSensorCapability()
+{
+ const char *noMode = "Manual sensor capability unavailable: ";
+
+ if (!staticMetadata_->entryContains<uint8_t>(ANDROID_CONTROL_AE_AVAILABLE_MODES,
+ ANDROID_CONTROL_AE_MODE_OFF)) {
+ LOG(HAL, Info) << noMode << "missing AE mode off";
+ return false;
+ }
+
+ if (!staticMetadata_->entryContains<uint8_t>(ANDROID_CONTROL_AE_LOCK_AVAILABLE,
+ ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE)) {
+ LOG(HAL, Info) << noMode << "missing AE lock";
+ return false;
+ }
+
+ /*
+ * \todo Return true here after we satisfy all the requirements:
+ * https://developer.android.com/reference/android/hardware/camera2/CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR
+ * Manual frame duration control
+ * android.sensor.frameDuration
+ * android.sensor.info.maxFrameDuration
+ * Manual exposure control
+ * android.sensor.exposureTime
+ * android.sensor.info.exposureTimeRange
+ * Manual sensitivity control
+ * android.sensor.sensitivity
+ * android.sensor.info.sensitivityRange
+ * Manual lens control (if the lens is adjustable)
+ * android.lens.*
+ * Manual flash control (if a flash unit is present)
+ * android.flash.*
+ * Manual black level locking
+ * android.blackLevel.lock
+ * Auto exposure lock
+ * android.control.aeLock
+ */
+ return false;
+}
+
+bool CameraCapabilities::validateManualPostProcessingCapability()
+{
+ const char *noMode = "Manual post processing capability unavailable: ";
+
+ if (!staticMetadata_->entryContains<uint8_t>(ANDROID_CONTROL_AWB_AVAILABLE_MODES,
+ ANDROID_CONTROL_AWB_MODE_OFF)) {
+ LOG(HAL, Info) << noMode << "missing AWB mode off";
+ return false;
+ }
+
+ if (!staticMetadata_->entryContains<uint8_t>(ANDROID_CONTROL_AWB_LOCK_AVAILABLE,
+ ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE)) {
+ LOG(HAL, Info) << noMode << "missing AWB lock";
+ return false;
+ }
+
+ /*
+ * \todo return true here after we satisfy all the requirements:
+ * https://developer.android.com/reference/android/hardware/camera2/CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING
+ * Manual tonemap control
+ * android.tonemap.curve
+ * android.tonemap.mode
+ * android.tonemap.maxCurvePoints
+ * android.tonemap.gamma
+ * android.tonemap.presetCurve
+ * Manual white balance control
+ * android.colorCorrection.transform
+ * android.colorCorrection.gains
+ * Manual lens shading map control
+ * android.shading.mode
+ * android.statistics.lensShadingMapMode
+ * android.statistics.lensShadingMap
+ * android.lens.info.shadingMapSize
+ * Manual aberration correction control (if aberration correction is supported)
+ * android.colorCorrection.aberrationMode
+ * android.colorCorrection.availableAberrationModes
+ * Auto white balance lock
+ * android.control.awbLock
+ */
+ return false;
+}
+
+bool CameraCapabilities::validateBurstCaptureCapability()
+{
+ camera_metadata_ro_entry_t entry;
+ bool found;
+
+ const char *noMode = "Burst capture capability unavailable: ";
+
+ if (!staticMetadata_->entryContains<uint8_t>(ANDROID_CONTROL_AE_LOCK_AVAILABLE,
+ ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE)) {
+ LOG(HAL, Info) << noMode << "missing AE lock";
+ return false;
+ }
+
+ if (!staticMetadata_->entryContains<uint8_t>(ANDROID_CONTROL_AWB_LOCK_AVAILABLE,
+ ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE)) {
+ LOG(HAL, Info) << noMode << "missing AWB lock";
+ return false;
+ }
+
+ found = staticMetadata_->getEntry(ANDROID_SYNC_MAX_LATENCY, &entry);
+ if (!found || *entry.data.i32 < 0 || 4 < *entry.data.i32) {
+ LOG(HAL, Info)
+ << noMode << "max sync latency is "
+ << (found ? std::to_string(*entry.data.i32) : "not present");
+ return false;
+ }
+
+ /*
+ * \todo return true here after we satisfy all the requirements
+ * https://developer.android.com/reference/android/hardware/camera2/CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE
+ */
+ return false;
+}
+
+std::set<camera_metadata_enum_android_request_available_capabilities>
+CameraCapabilities::computeCapabilities()
+{
+ std::set<camera_metadata_enum_android_request_available_capabilities>
+ capabilities;
+
+ capabilities.insert(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE);
+
+ if (validateManualSensorCapability()) {
+ capabilities.insert(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR);
+ /* The requirements for READ_SENSOR_SETTINGS are a subset of MANUAL_SENSOR */
+ capabilities.insert(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_READ_SENSOR_SETTINGS);
+ }
+
+ if (validateManualPostProcessingCapability())
+ capabilities.insert(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING);
+
+ if (validateBurstCaptureCapability())
+ capabilities.insert(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE);
+
+ if (rawStreamAvailable_)
+ capabilities.insert(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_RAW);
+
+ return capabilities;
+}
+
+void CameraCapabilities::computeHwLevel(
+ const std::set<camera_metadata_enum_android_request_available_capabilities> &caps)
+{
+ const char *noFull = "Hardware level FULL unavailable: ";
+ camera_metadata_ro_entry_t entry;
+ bool found;
+
+ camera_metadata_enum_android_info_supported_hardware_level
+ hwLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_FULL;
+
+ if (!caps.count(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR)) {
+ LOG(HAL, Info) << noFull << "missing manual sensor";
+ hwLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED;
+ }
+
+ if (!caps.count(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING)) {
+ LOG(HAL, Info) << noFull << "missing manual post processing";
+ hwLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED;
+ }
+
+ if (!caps.count(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE)) {
+ LOG(HAL, Info) << noFull << "missing burst capture";
+ hwLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED;
+ }
+
+ found = staticMetadata_->getEntry(ANDROID_SYNC_MAX_LATENCY, &entry);
+ if (!found || *entry.data.i32 != 0) {
+ LOG(HAL, Info) << noFull << "missing or invalid max sync latency";
+ hwLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED;
+ }
+
+ hwLevel_ = hwLevel;
+}
+
+int CameraCapabilities::initialize(std::shared_ptr<Camera> camera,
+ int orientation, int facing)
+{
+ camera_ = camera;
+ orientation_ = orientation;
+ facing_ = facing;
+ rawStreamAvailable_ = false;
+ maxFrameDuration_ = 0;
+
+ /* Acquire the camera and initialize available stream configurations. */
+ int ret = camera_->acquire();
+ if (ret) {
+ LOG(HAL, Error) << "Failed to temporarily acquire the camera";
+ return ret;
+ }
+
+ ret = initializeStreamConfigurations();
+ if (ret) {
+ camera_->release();
+ return ret;
+ }
+
+ ret = initializeStaticMetadata();
+ camera_->release();
+ return ret;
+}
+
+std::vector<Size>
+CameraCapabilities::initializeYUVResolutions(const PixelFormat &pixelFormat,
+ const std::vector<Size> &resolutions)
+{
+ std::vector<Size> supportedResolutions;
+ std::unique_ptr<CameraConfiguration> cameraConfig =
+ camera_->generateConfiguration({ StreamRole::Viewfinder });
+ if (!cameraConfig) {
+ LOG(HAL, Error) << "Failed to get supported YUV resolutions";
+ return supportedResolutions;
+ }
+
+ StreamConfiguration &cfg = cameraConfig->at(0);
+
+ for (const Size &res : resolutions) {
+ cfg.pixelFormat = pixelFormat;
+ cfg.size = res;
+
+ CameraConfiguration::Status status = cameraConfig->validate();
+ if (status != CameraConfiguration::Valid) {
+ LOG(HAL, Debug) << cfg.toString() << " not supported";
+ continue;
+ }
+
+ LOG(HAL, Debug) << cfg.toString() << " supported";
+
+ supportedResolutions.push_back(res);
+ }
+
+ return supportedResolutions;
+}
+
+std::vector<Size>
+CameraCapabilities::initializeRawResolutions(const PixelFormat &pixelFormat)
+{
+ std::vector<Size> supportedResolutions;
+ std::unique_ptr<CameraConfiguration> cameraConfig =
+ camera_->generateConfiguration({ StreamRole::Raw });
+ if (!cameraConfig) {
+ LOG(HAL, Error) << "Failed to get supported Raw resolutions";
+ return supportedResolutions;
+ }
+
+ StreamConfiguration &cfg = cameraConfig->at(0);
+ const StreamFormats &formats = cfg.formats();
+ supportedResolutions = formats.sizes(pixelFormat);
+
+ return supportedResolutions;
+}
+
+/*
+ * Initialize the format conversion map to translate from Android format
+ * identifier to libcamera pixel formats and fill in the list of supported
+ * stream configurations to be reported to the Android camera framework through
+ * the camera static metadata.
+ */
+int CameraCapabilities::initializeStreamConfigurations()
+{
+ /*
+ * Get the maximum output resolutions
+ * \todo Get this from the camera properties once defined
+ */
+ std::unique_ptr<CameraConfiguration> cameraConfig =
+ camera_->generateConfiguration({ StreamRole::StillCapture });
+ if (!cameraConfig) {
+ LOG(HAL, Error) << "Failed to get maximum resolution";
+ return -EINVAL;
+ }
+ StreamConfiguration &cfg = cameraConfig->at(0);
+
+ /*
+ * \todo JPEG - Adjust the maximum available resolution by taking the
+ * JPEG encoder requirements into account (alignment and aspect ratio).
+ */
+ const Size maxRes = cfg.size;
+ LOG(HAL, Debug) << "Maximum supported resolution: " << maxRes;
+
+ /*
+ * Build the list of supported image resolutions.
+ *
+ * The resolutions listed in camera3Resolution are supported, up to the
+ * camera maximum resolution.
+ *
+ * Augment the list by adding resolutions calculated from the camera
+ * maximum one.
+ */
+ std::vector<Size> cameraResolutions;
+ std::copy_if(camera3Resolutions.begin(), camera3Resolutions.end(),
+ std::back_inserter(cameraResolutions),
+ [&](const Size &res) { return res < maxRes; });
+
+ /*
+ * The Camera3 specification suggests adding 1/2 and 1/4 of the maximum
+ * resolution.
+ */
+ for (unsigned int divider = 2;; divider <<= 1) {
+ Size derivedSize{
+ maxRes.width / divider,
+ maxRes.height / divider,
+ };
+
+ if (derivedSize.width < 320 ||
+ derivedSize.height < 240)
+ break;
+
+ cameraResolutions.push_back(derivedSize);
+ }
+ cameraResolutions.push_back(maxRes);
+
+ /* Remove duplicated entries from the list of supported resolutions. */
+ std::sort(cameraResolutions.begin(), cameraResolutions.end());
+ auto last = std::unique(cameraResolutions.begin(), cameraResolutions.end());
+ cameraResolutions.erase(last, cameraResolutions.end());
+
+ /*
+ * Build the list of supported camera formats.
+ *
+ * To each Android format a list of compatible libcamera formats is
+ * associated. The first libcamera format that tests successful is added
+ * to the format translation map used when configuring the streams.
+ * It is then tested against the list of supported camera resolutions to
+ * build the stream configuration map reported through the camera static
+ * metadata.
+ */
+ Size maxJpegSize;
+ for (const auto &format : camera3FormatsMap) {
+ int androidFormat = format.first;
+ const Camera3Format &camera3Format = format.second;
+ const std::vector<PixelFormat> &libcameraFormats =
+ camera3Format.libcameraFormats;
+
+ LOG(HAL, Debug) << "Trying to map Android format "
+ << camera3Format.name;
+
+ /*
+ * JPEG is always supported, either produced directly by the
+ * camera, or encoded in the HAL.
+ */
+ if (androidFormat == HAL_PIXEL_FORMAT_BLOB) {
+ formatsMap_[androidFormat] = formats::MJPEG;
+ LOG(HAL, Debug) << "Mapped Android format "
+ << camera3Format.name << " to "
+ << formats::MJPEG
+ << " (fixed mapping)";
+ continue;
+ }
+
+ /*
+ * Test the libcamera formats that can produce images
+ * compatible with the format defined by Android.
+ */
+ PixelFormat mappedFormat;
+ for (const PixelFormat &pixelFormat : libcameraFormats) {
+
+ LOG(HAL, Debug) << "Testing " << pixelFormat;
+
+ /*
+ * The stream configuration size can be adjusted,
+ * not the pixel format.
+ *
+ * \todo This could be simplified once all pipeline
+ * handlers will report the StreamFormats list of
+ * supported formats.
+ */
+ cfg.pixelFormat = pixelFormat;
+
+ CameraConfiguration::Status status = cameraConfig->validate();
+ if (status != CameraConfiguration::Invalid &&
+ cfg.pixelFormat == pixelFormat) {
+ mappedFormat = pixelFormat;
+ break;
+ }
+ }
+
+ if (!mappedFormat.isValid()) {
+ /* If the format is not mandatory, skip it. */
+ if (!camera3Format.mandatory)
+ continue;
+
+ LOG(HAL, Error)
+ << "Failed to map mandatory Android format "
+ << camera3Format.name << " ("
+ << utils::hex(androidFormat) << "): aborting";
+ return -EINVAL;
+ }
+
+ /*
+ * Record the mapping and then proceed to generate the
+ * stream configurations map, by testing the image resolutions.
+ */
+ formatsMap_[androidFormat] = mappedFormat;
+ LOG(HAL, Debug) << "Mapped Android format "
+ << camera3Format.name << " to "
+ << mappedFormat;
+
+ std::vector<Size> resolutions;
+ const PixelFormatInfo &info = PixelFormatInfo::info(mappedFormat);
+ switch (info.colourEncoding) {
+ case PixelFormatInfo::ColourEncodingRAW:
+ if (info.bitsPerPixel != 16)
+ continue;
+
+ rawStreamAvailable_ = true;
+ resolutions = initializeRawResolutions(mappedFormat);
+ break;
+
+ case PixelFormatInfo::ColourEncodingYUV:
+ case PixelFormatInfo::ColourEncodingRGB:
+ /*
+ * We support enumerating RGB streams here to allow
+ * mapping IMPLEMENTATION_DEFINED format to RGB.
+ */
+ resolutions = initializeYUVResolutions(mappedFormat,
+ cameraResolutions);
+ break;
+ }
+
+ for (const Size &res : resolutions) {
+ /*
+ * Configure the Camera with the collected format and
+ * resolution to get an updated list of controls.
+ *
+ * \todo Avoid the need to configure the camera when
+ * redesigning the configuration API.
+ */
+ cfg.size = res;
+ int ret = camera_->configure(cameraConfig.get());
+ if (ret)
+ return ret;
+
+ const ControlInfoMap &controls = camera_->controls();
+ const auto frameDurations = controls.find(
+ &controls::FrameDurationLimits);
+ if (frameDurations == controls.end()) {
+ LOG(HAL, Error)
+ << "Camera does not report frame durations";
+ return -EINVAL;
+ }
+
+ int64_t minFrameDuration = frameDurations->second.min().get<int64_t>() * 1000;
+ int64_t maxFrameDuration = frameDurations->second.max().get<int64_t>() * 1000;
+
+ /*
+ * Cap min frame duration to 30 FPS with 1% tolerance.
+ *
+ * 30 frames per second has been validated as the most
+ * opportune frame rate for quality tuning, and power
+ * vs performances budget on Intel IPU3-based
+ * Chromebooks.
+ *
+ * \todo This is a platform-specific decision that needs
+ * to be abstracted and delegated to the configuration
+ * file.
+ *
+ * \todo libcamera only allows to control frame duration
+ * through the per-request controls::FrameDuration
+ * control. If we cap the durations here, we should be
+ * capable of configuring the camera to operate at such
+ * duration without requiring to have the FrameDuration
+ * control to be specified for each Request. Defer this
+ * to the in-development configuration API rework.
+ */
+ int64_t minFrameDurationCap = 1e9 / 30.0;
+ if (minFrameDuration < minFrameDurationCap) {
+ float tolerance =
+ (minFrameDurationCap - minFrameDuration) * 100.0 / minFrameDurationCap;
+
+ /*
+ * If the tolerance is less than 1%, do not cap
+ * the frame duration.
+ */
+ if (tolerance > 1.0)
+ minFrameDuration = minFrameDurationCap;
+ }
+
+ /*
+ * Calculate FPS as CTS does and adjust the minimum
+ * frame duration accordingly: see
+ * Camera2SurfaceViewTestCase.java:getSuitableFpsRangeForDuration()
+ */
+ minFrameDuration =
+ 1e9 / static_cast<unsigned int>(floor(1e9 / minFrameDuration + 0.05f));
+
+ streamConfigurations_.push_back({
+ res, androidFormat, minFrameDuration, maxFrameDuration,
+ });
+
+ /*
+ * If the format is HAL_PIXEL_FORMAT_YCbCr_420_888
+ * from which JPEG is produced, add an entry for
+ * the JPEG stream.
+ *
+ * \todo Wire the JPEG encoder to query the supported
+ * sizes provided a list of formats it can encode.
+ *
+ * \todo Support JPEG streams produced by the camera
+ * natively.
+ *
+ * \todo HAL_PIXEL_FORMAT_BLOB is a 'stalling' format,
+ * its duration should take into account the time
+ * required for the YUV to JPEG encoding. For now
+ * use the same frame durations as collected for
+ * the YUV/RGB streams.
+ */
+ if (androidFormat == HAL_PIXEL_FORMAT_YCbCr_420_888) {
+ streamConfigurations_.push_back({
+ res, HAL_PIXEL_FORMAT_BLOB,
+ minFrameDuration, maxFrameDuration,
+ });
+ maxJpegSize = std::max(maxJpegSize, res);
+ }
+
+ maxFrameDuration_ = std::max(maxFrameDuration_,
+ maxFrameDuration);
+ }
+
+ /*
+ * \todo Calculate the maximum JPEG buffer size by asking the
+ * encoder giving the maximum frame size required.
+ */
+ maxJpegBufferSize_ = maxJpegSize.width * maxJpegSize.height * 1.5;
+ }
+
+ LOG(HAL, Debug) << "Collected stream configuration map: ";
+ for (const auto &entry : streamConfigurations_)
+ LOG(HAL, Debug) << "{ " << entry.resolution << " - "
+ << utils::hex(entry.androidFormat) << " }";
+
+ return 0;
+}
+
+int CameraCapabilities::initializeStaticMetadata()
+{
+ staticMetadata_ = std::make_unique<CameraMetadata>(64, 1024);
+ if (!staticMetadata_->isValid()) {
+ LOG(HAL, Error) << "Failed to allocate static metadata";
+ staticMetadata_.reset();
+ return -EINVAL;
+ }
+
+ /*
+ * Generate and apply a new configuration for the Viewfinder role to
+ * collect control limits and properties from a known state.
+ */
+ std::unique_ptr<CameraConfiguration> cameraConfig =
+ camera_->generateConfiguration({ StreamRole::Viewfinder });
+ if (!cameraConfig) {
+ LOG(HAL, Error) << "Failed to generate camera configuration";
+ staticMetadata_.reset();
+ return -ENODEV;
+ }
+
+ int ret = camera_->configure(cameraConfig.get());
+ if (ret) {
+ LOG(HAL, Error) << "Failed to initialize the camera state";
+ staticMetadata_.reset();
+ return ret;
+ }
+
+ const ControlInfoMap &controlsInfo = camera_->controls();
+ const ControlList &properties = camera_->properties();
+
+ availableCharacteristicsKeys_ = {
+ ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
+ ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
+ ANDROID_CONTROL_AE_AVAILABLE_MODES,
+ ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
+ ANDROID_CONTROL_AE_COMPENSATION_RANGE,
+ ANDROID_CONTROL_AE_COMPENSATION_STEP,
+ ANDROID_CONTROL_AE_LOCK_AVAILABLE,
+ ANDROID_CONTROL_AF_AVAILABLE_MODES,
+ ANDROID_CONTROL_AVAILABLE_EFFECTS,
+ ANDROID_CONTROL_AVAILABLE_MODES,
+ ANDROID_CONTROL_AVAILABLE_SCENE_MODES,
+ ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
+ ANDROID_CONTROL_AWB_AVAILABLE_MODES,
+ ANDROID_CONTROL_AWB_LOCK_AVAILABLE,
+ ANDROID_CONTROL_MAX_REGIONS,
+ ANDROID_CONTROL_SCENE_MODE_OVERRIDES,
+ ANDROID_FLASH_INFO_AVAILABLE,
+ ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL,
+ ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
+ ANDROID_JPEG_MAX_SIZE,
+ ANDROID_LENS_FACING,
+ ANDROID_LENS_INFO_AVAILABLE_APERTURES,
+ ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
+ ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
+ ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE,
+ ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
+ ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES,
+ ANDROID_REQUEST_AVAILABLE_CAPABILITIES,
+ ANDROID_REQUEST_MAX_NUM_INPUT_STREAMS,
+ ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS,
+ ANDROID_REQUEST_PARTIAL_RESULT_COUNT,
+ ANDROID_REQUEST_PIPELINE_MAX_DEPTH,
+ ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
+ ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
+ ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
+ ANDROID_SCALER_CROPPING_TYPE,
+ ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES,
+ ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
+ ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
+ ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE,
+ ANDROID_SENSOR_INFO_MAX_FRAME_DURATION,
+ ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
+ ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
+ ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
+ ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE,
+ ANDROID_SENSOR_ORIENTATION,
+ ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
+ ANDROID_STATISTICS_INFO_MAX_FACE_COUNT,
+ ANDROID_SYNC_MAX_LATENCY,
+ };
+
+ availableRequestKeys_ = {
+ ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE,
+ ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
+ ANDROID_CONTROL_AE_LOCK,
+ ANDROID_CONTROL_AE_MODE,
+ ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER,
+ ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+ ANDROID_CONTROL_AF_MODE,
+ ANDROID_CONTROL_AF_TRIGGER,
+ ANDROID_CONTROL_AWB_LOCK,
+ ANDROID_CONTROL_AWB_MODE,
+ ANDROID_CONTROL_CAPTURE_INTENT,
+ ANDROID_CONTROL_EFFECT_MODE,
+ ANDROID_CONTROL_MODE,
+ ANDROID_CONTROL_SCENE_MODE,
+ ANDROID_CONTROL_VIDEO_STABILIZATION_MODE,
+ ANDROID_FLASH_MODE,
+ ANDROID_JPEG_ORIENTATION,
+ ANDROID_JPEG_QUALITY,
+ ANDROID_JPEG_THUMBNAIL_QUALITY,
+ ANDROID_JPEG_THUMBNAIL_SIZE,
+ ANDROID_LENS_APERTURE,
+ ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
+ ANDROID_NOISE_REDUCTION_MODE,
+ ANDROID_SCALER_CROP_REGION,
+ ANDROID_STATISTICS_FACE_DETECT_MODE
+ };
+
+ availableResultKeys_ = {
+ ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE,
+ ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
+ ANDROID_CONTROL_AE_LOCK,
+ ANDROID_CONTROL_AE_MODE,
+ ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER,
+ ANDROID_CONTROL_AE_STATE,
+ ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+ ANDROID_CONTROL_AF_MODE,
+ ANDROID_CONTROL_AF_STATE,
+ ANDROID_CONTROL_AF_TRIGGER,
+ ANDROID_CONTROL_AWB_LOCK,
+ ANDROID_CONTROL_AWB_MODE,
+ ANDROID_CONTROL_AWB_STATE,
+ ANDROID_CONTROL_CAPTURE_INTENT,
+ ANDROID_CONTROL_EFFECT_MODE,
+ ANDROID_CONTROL_MODE,
+ ANDROID_CONTROL_SCENE_MODE,
+ ANDROID_CONTROL_VIDEO_STABILIZATION_MODE,
+ ANDROID_FLASH_MODE,
+ ANDROID_FLASH_STATE,
+ ANDROID_JPEG_GPS_COORDINATES,
+ ANDROID_JPEG_GPS_PROCESSING_METHOD,
+ ANDROID_JPEG_GPS_TIMESTAMP,
+ ANDROID_JPEG_ORIENTATION,
+ ANDROID_JPEG_QUALITY,
+ ANDROID_JPEG_SIZE,
+ ANDROID_JPEG_THUMBNAIL_QUALITY,
+ ANDROID_JPEG_THUMBNAIL_SIZE,
+ ANDROID_LENS_APERTURE,
+ ANDROID_LENS_FOCAL_LENGTH,
+ ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
+ ANDROID_LENS_STATE,
+ ANDROID_NOISE_REDUCTION_MODE,
+ ANDROID_REQUEST_PIPELINE_DEPTH,
+ ANDROID_SCALER_CROP_REGION,
+ ANDROID_SENSOR_EXPOSURE_TIME,
+ ANDROID_SENSOR_FRAME_DURATION,
+ ANDROID_SENSOR_ROLLING_SHUTTER_SKEW,
+ ANDROID_SENSOR_TEST_PATTERN_MODE,
+ ANDROID_SENSOR_TIMESTAMP,
+ ANDROID_STATISTICS_FACE_DETECT_MODE,
+ ANDROID_STATISTICS_LENS_SHADING_MAP_MODE,
+ ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE,
+ ANDROID_STATISTICS_SCENE_FLICKER,
+ };
+
+ /* Color correction static metadata. */
+ {
+ std::vector<uint8_t> data;
+ data.reserve(3);
+ const auto &infoMap = controlsInfo.find(&controls::draft::ColorCorrectionAberrationMode);
+ if (infoMap != controlsInfo.end()) {
+ for (const auto &value : infoMap->second.values())
+ data.push_back(value.get<int32_t>());
+ } else {
+ data.push_back(ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF);
+ }
+ staticMetadata_->addEntry(ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
+ data);
+ }
+
+ /* Control static metadata. */
+ std::vector<uint8_t> aeAvailableAntiBandingModes = {
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF,
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ,
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ,
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
+ aeAvailableAntiBandingModes);
+
+ std::vector<uint8_t> aeAvailableModes = {
+ ANDROID_CONTROL_AE_MODE_ON,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AE_AVAILABLE_MODES,
+ aeAvailableModes);
+
+ std::vector<int32_t> aeCompensationRange = {
+ 0, 0,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AE_COMPENSATION_RANGE,
+ aeCompensationRange);
+
+ const camera_metadata_rational_t aeCompensationStep[] = {
+ { 0, 1 }
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AE_COMPENSATION_STEP,
+ aeCompensationStep);
+
+ std::vector<uint8_t> availableAfModes = {
+ ANDROID_CONTROL_AF_MODE_OFF,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AF_AVAILABLE_MODES,
+ availableAfModes);
+
+ std::vector<uint8_t> availableEffects = {
+ ANDROID_CONTROL_EFFECT_MODE_OFF,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_EFFECTS,
+ availableEffects);
+
+ std::vector<uint8_t> availableSceneModes = {
+ ANDROID_CONTROL_SCENE_MODE_DISABLED,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_SCENE_MODES,
+ availableSceneModes);
+
+ std::vector<uint8_t> availableStabilizationModes = {
+ ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
+ availableStabilizationModes);
+
+ /*
+ * \todo Inspect the camera capabilities to report the available
+ * AWB modes. Default to AUTO as CTS tests require it.
+ */
+ std::vector<uint8_t> availableAwbModes = {
+ ANDROID_CONTROL_AWB_MODE_AUTO,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AWB_AVAILABLE_MODES,
+ availableAwbModes);
+
+ std::vector<int32_t> availableMaxRegions = {
+ 0, 0, 0,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_MAX_REGIONS,
+ availableMaxRegions);
+
+ std::vector<uint8_t> sceneModesOverride = {
+ ANDROID_CONTROL_AE_MODE_ON,
+ ANDROID_CONTROL_AWB_MODE_AUTO,
+ ANDROID_CONTROL_AF_MODE_OFF,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_SCENE_MODE_OVERRIDES,
+ sceneModesOverride);
+
+ uint8_t aeLockAvailable = ANDROID_CONTROL_AE_LOCK_AVAILABLE_FALSE;
+ staticMetadata_->addEntry(ANDROID_CONTROL_AE_LOCK_AVAILABLE,
+ aeLockAvailable);
+
+ uint8_t awbLockAvailable = ANDROID_CONTROL_AWB_LOCK_AVAILABLE_FALSE;
+ staticMetadata_->addEntry(ANDROID_CONTROL_AWB_LOCK_AVAILABLE,
+ awbLockAvailable);
+
+ char availableControlModes = ANDROID_CONTROL_MODE_AUTO;
+ staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_MODES,
+ availableControlModes);
+
+ /* JPEG static metadata. */
+
+ /*
+ * Create the list of supported thumbnail sizes by inspecting the
+ * available JPEG resolutions collected in streamConfigurations_ and
+ * generate one entry for each aspect ratio.
+ *
+ * The JPEG thumbnailer can freely scale, so pick an arbitrary
+ * (160, 160) size as the bounding rectangle, which is then cropped to
+ * the different supported aspect ratios.
+ */
+ constexpr Size maxJpegThumbnail(160, 160);
+ std::vector<Size> thumbnailSizes;
+ thumbnailSizes.push_back({ 0, 0 });
+ for (const auto &entry : streamConfigurations_) {
+ if (entry.androidFormat != HAL_PIXEL_FORMAT_BLOB)
+ continue;
+
+ Size thumbnailSize = maxJpegThumbnail
+ .boundedToAspectRatio({ entry.resolution.width,
+ entry.resolution.height });
+ thumbnailSizes.push_back(thumbnailSize);
+ }
+
+ std::sort(thumbnailSizes.begin(), thumbnailSizes.end());
+ auto last = std::unique(thumbnailSizes.begin(), thumbnailSizes.end());
+ thumbnailSizes.erase(last, thumbnailSizes.end());
+
+ /* Transform sizes in to a list of integers that can be consumed. */
+ std::vector<int32_t> thumbnailEntries;
+ thumbnailEntries.reserve(thumbnailSizes.size() * 2);
+ for (const auto &size : thumbnailSizes) {
+ thumbnailEntries.push_back(size.width);
+ thumbnailEntries.push_back(size.height);
+ }
+ staticMetadata_->addEntry(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
+ thumbnailEntries);
+
+ staticMetadata_->addEntry(ANDROID_JPEG_MAX_SIZE, maxJpegBufferSize_);
+
+ /* Sensor static metadata. */
+ std::array<int32_t, 2> pixelArraySize;
+ {
+ const Size &size = properties.get(properties::PixelArraySize).value_or(Size{});
+ pixelArraySize[0] = size.width;
+ pixelArraySize[1] = size.height;
+ staticMetadata_->addEntry(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
+ pixelArraySize);
+ }
+
+ const auto &cellSize = properties.get<Size>(properties::UnitCellSize);
+ if (cellSize) {
+ std::array<float, 2> physicalSize{
+ cellSize->width * pixelArraySize[0] / 1e6f,
+ cellSize->height * pixelArraySize[1] / 1e6f
+ };
+ staticMetadata_->addEntry(ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
+ physicalSize);
+ }
+
+ {
+ const Span<const Rectangle> rects =
+ properties.get(properties::PixelArrayActiveAreas).value_or(Span<const Rectangle>{});
+ std::vector<int32_t> data{
+ static_cast<int32_t>(rects[0].x),
+ static_cast<int32_t>(rects[0].y),
+ static_cast<int32_t>(rects[0].width),
+ static_cast<int32_t>(rects[0].height),
+ };
+ staticMetadata_->addEntry(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
+ data);
+ }
+
+ int32_t sensitivityRange[] = {
+ 32, 2400,
+ };
+ staticMetadata_->addEntry(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
+ sensitivityRange);
+
+ /* Report the color filter arrangement if the camera reports it. */
+ const auto &filterArr = properties.get(properties::draft::ColorFilterArrangement);
+ if (filterArr)
+ staticMetadata_->addEntry(ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
+ *filterArr);
+
+ const auto &exposureInfo = controlsInfo.find(&controls::ExposureTime);
+ if (exposureInfo != controlsInfo.end()) {
+ int64_t exposureTimeRange[2] = {
+ exposureInfo->second.min().get<int32_t>() * 1000LL,
+ exposureInfo->second.max().get<int32_t>() * 1000LL,
+ };
+ staticMetadata_->addEntry(ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE,
+ exposureTimeRange, 2);
+ }
+
+ staticMetadata_->addEntry(ANDROID_SENSOR_ORIENTATION, orientation_);
+
+ std::vector<int32_t> testPatternModes = {
+ ANDROID_SENSOR_TEST_PATTERN_MODE_OFF
+ };
+ const auto &testPatternsInfo =
+ controlsInfo.find(&controls::draft::TestPatternMode);
+ if (testPatternsInfo != controlsInfo.end()) {
+ const auto &values = testPatternsInfo->second.values();
+ ASSERT(!values.empty());
+ for (const auto &value : values) {
+ switch (value.get<int32_t>()) {
+ case controls::draft::TestPatternModeOff:
+ /*
+ * ANDROID_SENSOR_TEST_PATTERN_MODE_OFF is
+ * already in testPatternModes.
+ */
+ break;
+
+ case controls::draft::TestPatternModeSolidColor:
+ testPatternModes.push_back(
+ ANDROID_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR);
+ break;
+
+ case controls::draft::TestPatternModeColorBars:
+ testPatternModes.push_back(
+ ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS);
+ break;
+
+ case controls::draft::TestPatternModeColorBarsFadeToGray:
+ testPatternModes.push_back(
+ ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS_FADE_TO_GRAY);
+ break;
+
+ case controls::draft::TestPatternModePn9:
+ testPatternModes.push_back(
+ ANDROID_SENSOR_TEST_PATTERN_MODE_PN9);
+ break;
+
+ case controls::draft::TestPatternModeCustom1:
+ /* We don't support this yet. */
+ break;
+
+ default:
+ LOG(HAL, Error) << "Unknown test pattern mode: "
+ << value.get<int32_t>();
+ continue;
+ }
+ }
+ }
+ staticMetadata_->addEntry(ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES,
+ testPatternModes);
+
+ uint8_t timestampSource = ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_UNKNOWN;
+ staticMetadata_->addEntry(ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE,
+ timestampSource);
+
+ staticMetadata_->addEntry(ANDROID_SENSOR_INFO_MAX_FRAME_DURATION,
+ maxFrameDuration_);
+
+ /* Statistics static metadata. */
+ int32_t maxFaceCount = 0;
+ auto iter = camera_->controls().find(controls::draft::FaceDetectMode.id());
+ if (iter != camera_->controls().end()) {
+ const ControlInfo &faceDetectCtrlInfo = iter->second;
+ std::vector<uint8_t> faceDetectModes;
+ bool hasFaceDetection = false;
+
+ for (const auto &value : faceDetectCtrlInfo.values()) {
+ int32_t mode = value.get<int32_t>();
+ uint8_t androidMode = 0;
+
+ switch (mode) {
+ case controls::draft::FaceDetectModeOff:
+ androidMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
+ break;
+ case controls::draft::FaceDetectModeSimple:
+ androidMode = ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE;
+ hasFaceDetection = true;
+ break;
+ default:
+ LOG(HAL, Fatal) << "Received invalid face detect mode: " << mode;
+ }
+ faceDetectModes.push_back(androidMode);
+ }
+ if (hasFaceDetection) {
+ /*
+ * \todo Create new libcamera controls to query max
+ * possible faces detected.
+ */
+ maxFaceCount = 10;
+ staticMetadata_->addEntry(
+ ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
+ faceDetectModes.data(), faceDetectModes.size());
+ }
+ } else {
+ uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
+ staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
+ faceDetectMode);
+ }
+
+ staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT,
+ maxFaceCount);
+
+ {
+ std::vector<uint8_t> data;
+ data.reserve(2);
+ const auto &infoMap = controlsInfo.find(&controls::draft::LensShadingMapMode);
+ if (infoMap != controlsInfo.end()) {
+ for (const auto &value : infoMap->second.values())
+ data.push_back(value.get<int32_t>());
+ } else {
+ data.push_back(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF);
+ }
+ staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES,
+ data);
+ }
+
+ /* Sync static metadata. */
+ setMetadata(staticMetadata_.get(), ANDROID_SYNC_MAX_LATENCY,
+ controlsInfo, controls::draft::MaxLatency,
+ ControlRange::Def,
+ ANDROID_SYNC_MAX_LATENCY_UNKNOWN);
+
+ /* Flash static metadata. */
+ char flashAvailable = ANDROID_FLASH_INFO_AVAILABLE_FALSE;
+ staticMetadata_->addEntry(ANDROID_FLASH_INFO_AVAILABLE,
+ flashAvailable);
+
+ /* Lens static metadata. */
+ std::vector<float> lensApertures = {
+ 2.53 / 100,
+ };
+ staticMetadata_->addEntry(ANDROID_LENS_INFO_AVAILABLE_APERTURES,
+ lensApertures);
+
+ uint8_t lensFacing;
+ switch (facing_) {
+ default:
+ case CAMERA_FACING_FRONT:
+ lensFacing = ANDROID_LENS_FACING_FRONT;
+ break;
+ case CAMERA_FACING_BACK:
+ lensFacing = ANDROID_LENS_FACING_BACK;
+ break;
+ case CAMERA_FACING_EXTERNAL:
+ lensFacing = ANDROID_LENS_FACING_EXTERNAL;
+ break;
+ }
+ staticMetadata_->addEntry(ANDROID_LENS_FACING, lensFacing);
+
+ std::vector<float> lensFocalLengths = {
+ 1,
+ };
+ staticMetadata_->addEntry(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
+ lensFocalLengths);
+
+ std::vector<uint8_t> opticalStabilizations = {
+ ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF,
+ };
+ staticMetadata_->addEntry(ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
+ opticalStabilizations);
+
+ float hypeFocalDistance = 0;
+ staticMetadata_->addEntry(ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE,
+ hypeFocalDistance);
+
+ float minFocusDistance = 0;
+ staticMetadata_->addEntry(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
+ minFocusDistance);
+
+ /* Noise reduction modes. */
+ {
+ std::vector<uint8_t> data;
+ data.reserve(5);
+ const auto &infoMap = controlsInfo.find(&controls::draft::NoiseReductionMode);
+ if (infoMap != controlsInfo.end()) {
+ for (const auto &value : infoMap->second.values())
+ data.push_back(value.get<int32_t>());
+ } else {
+ data.push_back(ANDROID_NOISE_REDUCTION_MODE_OFF);
+ }
+ staticMetadata_->addEntry(ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES,
+ data);
+ }
+
+ /* Scaler static metadata. */
+
+ /*
+ * \todo The digital zoom factor is a property that depends on the
+ * desired output configuration and the sensor frame size input to the
+ * ISP. This information is not available to the Android HAL, not at
+ * initialization time at least.
+ *
+ * As a workaround rely on pipeline handlers initializing the
+ * ScalerCrop control with the camera default configuration and use the
+ * maximum and minimum crop rectangles to calculate the digital zoom
+ * factor.
+ */
+ float maxZoom = 1.0f;
+ const auto scalerCrop = controlsInfo.find(&controls::ScalerCrop);
+ if (scalerCrop != controlsInfo.end()) {
+ Rectangle min = scalerCrop->second.min().get<Rectangle>();
+ Rectangle max = scalerCrop->second.max().get<Rectangle>();
+ maxZoom = std::min(1.0f * max.width / min.width,
+ 1.0f * max.height / min.height);
+ }
+ staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
+ maxZoom);
+
+ std::vector<uint32_t> availableStreamConfigurations;
+ std::vector<int64_t> minFrameDurations;
+ int maxYUVFps = 0;
+ Size maxYUVSize;
+
+ availableStreamConfigurations.reserve(streamConfigurations_.size() * 4);
+ minFrameDurations.reserve(streamConfigurations_.size() * 4);
+
+ for (const auto &entry : streamConfigurations_) {
+ /*
+ * Filter out YUV streams not capable of running at 30 FPS.
+ *
+ * This requirement comes from CTS RecordingTest failures most
+ * probably related to a requirement of the camcoder video
+ * recording profile. Inspecting the Intel IPU3 HAL
+ * implementation confirms this but no reference has been found
+ * in the metadata documentation.
+ */
+ unsigned int fps =
+ static_cast<unsigned int>(floor(1e9 / entry.minFrameDurationNsec));
+
+ if (entry.androidFormat != HAL_PIXEL_FORMAT_BLOB && fps < 30)
+ continue;
+
+ /*
+ * Collect the FPS of the maximum YUV output size to populate
+ * AE_AVAILABLE_TARGET_FPS_RANGE
+ */
+ if (entry.androidFormat == HAL_PIXEL_FORMAT_YCbCr_420_888 &&
+ entry.resolution > maxYUVSize) {
+ maxYUVSize = entry.resolution;
+ maxYUVFps = fps;
+ }
+
+ /* Stream configuration map. */
+ availableStreamConfigurations.push_back(entry.androidFormat);
+ availableStreamConfigurations.push_back(entry.resolution.width);
+ availableStreamConfigurations.push_back(entry.resolution.height);
+ availableStreamConfigurations.push_back(
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT);
+
+ /* Per-stream durations. */
+ minFrameDurations.push_back(entry.androidFormat);
+ minFrameDurations.push_back(entry.resolution.width);
+ minFrameDurations.push_back(entry.resolution.height);
+ minFrameDurations.push_back(entry.minFrameDurationNsec);
+
+ LOG(HAL, Debug)
+ << "Output Stream: " << utils::hex(entry.androidFormat)
+ << " (" << entry.resolution << ")["
+ << entry.minFrameDurationNsec << "]"
+ << "@" << fps;
+ }
+ staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
+ availableStreamConfigurations);
+
+ staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
+ minFrameDurations);
+
+ /*
+ * Register to the camera service {min, max} and {max, max} with
+ * 'max' being the larger YUV stream maximum frame rate and 'min' being
+ * the globally minimum frame rate rounded to the next largest integer
+ * as the camera service expects the camera maximum frame duration to be
+ * smaller than 10^9 / minFps.
+ */
+ int32_t minFps = std::ceil(1e9 / maxFrameDuration_);
+ int32_t availableAeFpsTarget[] = {
+ minFps, maxYUVFps, maxYUVFps, maxYUVFps,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
+ availableAeFpsTarget);
+
+ std::vector<int64_t> availableStallDurations;
+ for (const auto &entry : streamConfigurations_) {
+ if (entry.androidFormat != HAL_PIXEL_FORMAT_BLOB)
+ continue;
+
+ availableStallDurations.push_back(entry.androidFormat);
+ availableStallDurations.push_back(entry.resolution.width);
+ availableStallDurations.push_back(entry.resolution.height);
+ availableStallDurations.push_back(entry.minFrameDurationNsec);
+ }
+ staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
+ availableStallDurations);
+
+ uint8_t croppingType = ANDROID_SCALER_CROPPING_TYPE_CENTER_ONLY;
+ staticMetadata_->addEntry(ANDROID_SCALER_CROPPING_TYPE, croppingType);
+
+ /* Request static metadata. */
+ int32_t partialResultCount = 1;
+ staticMetadata_->addEntry(ANDROID_REQUEST_PARTIAL_RESULT_COUNT,
+ partialResultCount);
+
+ {
+ /* Default the value to 2 if not reported by the camera. */
+ uint8_t maxPipelineDepth = 2;
+ const auto &infoMap = controlsInfo.find(&controls::draft::PipelineDepth);
+ if (infoMap != controlsInfo.end())
+ maxPipelineDepth = infoMap->second.max().get<int32_t>();
+ staticMetadata_->addEntry(ANDROID_REQUEST_PIPELINE_MAX_DEPTH,
+ maxPipelineDepth);
+ }
+
+ /* LIMITED does not support reprocessing. */
+ uint32_t maxNumInputStreams = 0;
+ staticMetadata_->addEntry(ANDROID_REQUEST_MAX_NUM_INPUT_STREAMS,
+ maxNumInputStreams);
+
+ /* Number of { RAW, YUV, JPEG } supported output streams */
+ int32_t numOutStreams[] = { rawStreamAvailable_, 2, 1 };
+ staticMetadata_->addEntry(ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS,
+ numOutStreams);
+
+ /* Check capabilities */
+ capabilities_ = computeCapabilities();
+ /* This *must* be uint8_t. */
+ std::vector<uint8_t> capsVec(capabilities_.begin(), capabilities_.end());
+ staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_CAPABILITIES, capsVec);
+
+ computeHwLevel(capabilities_);
+ staticMetadata_->addEntry(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL, hwLevel_);
+
+ LOG(HAL, Info)
+ << "Hardware level: " << hwLevelStrings.find(hwLevel_)->second;
+
+ staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS,
+ std::vector<int32_t>(availableCharacteristicsKeys_.begin(),
+ availableCharacteristicsKeys_.end()));
+
+ staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS,
+ std::vector<int32_t>(availableRequestKeys_.begin(),
+ availableRequestKeys_.end()));
+
+ staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_RESULT_KEYS,
+ std::vector<int32_t>(availableResultKeys_.begin(),
+ availableResultKeys_.end()));
+
+ if (!staticMetadata_->isValid()) {
+ LOG(HAL, Error) << "Failed to construct static metadata";
+ staticMetadata_.reset();
+ return -EINVAL;
+ }
+
+ if (staticMetadata_->resized()) {
+ auto [entryCount, dataCount] = staticMetadata_->usage();
+ LOG(HAL, Info)
+ << "Static metadata resized: " << entryCount
+ << " entries and " << dataCount << " bytes used";
+ }
+
+ return 0;
+}
+
+/* Translate Android format code to libcamera pixel format. */
+PixelFormat CameraCapabilities::toPixelFormat(int format) const
+{
+ auto it = formatsMap_.find(format);
+ if (it == formatsMap_.end()) {
+ LOG(HAL, Error) << "Requested format " << utils::hex(format)
+ << " not supported";
+ return PixelFormat();
+ }
+
+ return it->second;
+}
+
+std::unique_ptr<CameraMetadata> CameraCapabilities::requestTemplateManual() const
+{
+ if (!capabilities_.count(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR)) {
+ LOG(HAL, Error) << "Manual template not supported";
+ return nullptr;
+ }
+
+ std::unique_ptr<CameraMetadata> manualTemplate = requestTemplatePreview();
+ if (!manualTemplate)
+ return nullptr;
+
+ return manualTemplate;
+}
+
+std::unique_ptr<CameraMetadata> CameraCapabilities::requestTemplatePreview() const
+{
+ /*
+ * Give initial hint of entries and number of bytes to be allocated.
+ * It is deliberate that the hint is slightly larger than required, to
+ * avoid resizing the container.
+ *
+ * CameraMetadata is capable of resizing the container on the fly, if
+ * adding a new entry will exceed its capacity.
+ */
+ auto requestTemplate = std::make_unique<CameraMetadata>(22, 38);
+ if (!requestTemplate->isValid()) {
+ return nullptr;
+ }
+
+ /* Get the FPS range registered in the static metadata. */
+ camera_metadata_ro_entry_t entry;
+ bool found = staticMetadata_->getEntry(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
+ &entry);
+ if (!found) {
+ LOG(HAL, Error) << "Cannot create capture template without FPS range";
+ return nullptr;
+ }
+
+ /*
+ * Assume the AE_AVAILABLE_TARGET_FPS_RANGE static metadata
+ * has been assembled as {{min, max} {max, max}}.
+ */
+ requestTemplate->addEntry(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+ entry.data.i32, 2);
+
+ /*
+ * Get thumbnail sizes from static metadata and add the first non-zero
+ * size to the template.
+ */
+ found = staticMetadata_->getEntry(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
+ &entry);
+ ASSERT(found && entry.count >= 4);
+ requestTemplate->addEntry(ANDROID_JPEG_THUMBNAIL_SIZE,
+ entry.data.i32 + 2, 2);
+
+ uint8_t aeMode = ANDROID_CONTROL_AE_MODE_ON;
+ requestTemplate->addEntry(ANDROID_CONTROL_AE_MODE, aeMode);
+
+ int32_t aeExposureCompensation = 0;
+ requestTemplate->addEntry(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
+ aeExposureCompensation);
+
+ uint8_t aePrecaptureTrigger = ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE;
+ requestTemplate->addEntry(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER,
+ aePrecaptureTrigger);
+
+ uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_OFF;
+ requestTemplate->addEntry(ANDROID_CONTROL_AE_LOCK, aeLock);
+
+ uint8_t aeAntibandingMode = ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO;
+ requestTemplate->addEntry(ANDROID_CONTROL_AE_ANTIBANDING_MODE,
+ aeAntibandingMode);
+
+ uint8_t afMode = ANDROID_CONTROL_AF_MODE_OFF;
+ requestTemplate->addEntry(ANDROID_CONTROL_AF_MODE, afMode);
+
+ uint8_t afTrigger = ANDROID_CONTROL_AF_TRIGGER_IDLE;
+ requestTemplate->addEntry(ANDROID_CONTROL_AF_TRIGGER, afTrigger);
+
+ uint8_t awbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
+ requestTemplate->addEntry(ANDROID_CONTROL_AWB_MODE, awbMode);
+
+ uint8_t awbLock = ANDROID_CONTROL_AWB_LOCK_OFF;
+ requestTemplate->addEntry(ANDROID_CONTROL_AWB_LOCK, awbLock);
+
+ uint8_t flashMode = ANDROID_FLASH_MODE_OFF;
+ requestTemplate->addEntry(ANDROID_FLASH_MODE, flashMode);
+
+ uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
+ requestTemplate->addEntry(ANDROID_STATISTICS_FACE_DETECT_MODE,
+ faceDetectMode);
+
+ uint8_t noiseReduction = ANDROID_NOISE_REDUCTION_MODE_OFF;
+ requestTemplate->addEntry(ANDROID_NOISE_REDUCTION_MODE,
+ noiseReduction);
+
+ uint8_t aberrationMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF;
+ requestTemplate->addEntry(ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
+ aberrationMode);
+
+ uint8_t controlMode = ANDROID_CONTROL_MODE_AUTO;
+ requestTemplate->addEntry(ANDROID_CONTROL_MODE, controlMode);
+
+ float lensAperture = 2.53 / 100;
+ requestTemplate->addEntry(ANDROID_LENS_APERTURE, lensAperture);
+
+ uint8_t opticalStabilization = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+ requestTemplate->addEntry(ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
+ opticalStabilization);
+
+ uint8_t captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+ requestTemplate->addEntry(ANDROID_CONTROL_CAPTURE_INTENT,
+ captureIntent);
+
+ return requestTemplate;
+}
+
+std::unique_ptr<CameraMetadata> CameraCapabilities::requestTemplateStill() const
+{
+ std::unique_ptr<CameraMetadata> stillTemplate = requestTemplatePreview();
+ if (!stillTemplate)
+ return nullptr;
+
+ return stillTemplate;
+}
+
+std::unique_ptr<CameraMetadata> CameraCapabilities::requestTemplateVideo() const
+{
+ std::unique_ptr<CameraMetadata> previewTemplate = requestTemplatePreview();
+ if (!previewTemplate)
+ return nullptr;
+
+ /*
+ * The video template requires a fixed FPS range. Everything else
+ * stays the same as the preview template.
+ */
+ camera_metadata_ro_entry_t entry;
+ staticMetadata_->getEntry(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
+ &entry);
+
+ /*
+ * Assume the AE_AVAILABLE_TARGET_FPS_RANGE static metadata
+ * has been assembled as {{min, max} {max, max}}.
+ */
+ previewTemplate->updateEntry(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+ entry.data.i32 + 2, 2);
+
+ return previewTemplate;
+}
diff --git a/src/android/camera_capabilities.h b/src/android/camera_capabilities.h
new file mode 100644
index 00000000..56ac1efe
--- /dev/null
+++ b/src/android/camera_capabilities.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Camera static properties manager
+ */
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <set>
+#include <vector>
+
+#include <libcamera/base/class.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
+
+#include "camera_metadata.h"
+
+class CameraCapabilities
+{
+public:
+ CameraCapabilities() = default;
+
+ int initialize(std::shared_ptr<libcamera::Camera> camera,
+ int orientation, int facing);
+
+ CameraMetadata *staticMetadata() const { return staticMetadata_.get(); }
+ libcamera::PixelFormat toPixelFormat(int format) const;
+ unsigned int maxJpegBufferSize() const { return maxJpegBufferSize_; }
+
+ std::unique_ptr<CameraMetadata> requestTemplateManual() const;
+ std::unique_ptr<CameraMetadata> requestTemplatePreview() const;
+ std::unique_ptr<CameraMetadata> requestTemplateStill() const;
+ std::unique_ptr<CameraMetadata> requestTemplateVideo() const;
+
+private:
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraCapabilities)
+
+ struct Camera3StreamConfiguration {
+ libcamera::Size resolution;
+ int androidFormat;
+ int64_t minFrameDurationNsec;
+ int64_t maxFrameDurationNsec;
+ };
+
+ bool validateManualSensorCapability();
+ bool validateManualPostProcessingCapability();
+ bool validateBurstCaptureCapability();
+
+ std::set<camera_metadata_enum_android_request_available_capabilities>
+ computeCapabilities();
+
+ void computeHwLevel(
+ const std::set<camera_metadata_enum_android_request_available_capabilities> &caps);
+
+ std::vector<libcamera::Size>
+ initializeYUVResolutions(const libcamera::PixelFormat &pixelFormat,
+ const std::vector<libcamera::Size> &resolutions);
+ std::vector<libcamera::Size>
+ initializeRawResolutions(const libcamera::PixelFormat &pixelFormat);
+ int initializeStreamConfigurations();
+
+ int initializeStaticMetadata();
+
+ std::shared_ptr<libcamera::Camera> camera_;
+
+ int facing_;
+ int orientation_;
+ bool rawStreamAvailable_;
+ int64_t maxFrameDuration_;
+ camera_metadata_enum_android_info_supported_hardware_level hwLevel_;
+ std::set<camera_metadata_enum_android_request_available_capabilities> capabilities_;
+
+ std::vector<Camera3StreamConfiguration> streamConfigurations_;
+ std::map<int, libcamera::PixelFormat> formatsMap_;
+ std::unique_ptr<CameraMetadata> staticMetadata_;
+ unsigned int maxJpegBufferSize_;
+
+ std::set<int32_t> availableCharacteristicsKeys_;
+ std::set<int32_t> availableRequestKeys_;
+ std::set<int32_t> availableResultKeys_;
+};
diff --git a/src/android/camera_device.cpp b/src/android/camera_device.cpp
index 76af70eb..a038131a 100644
--- a/src/android/camera_device.cpp
+++ b/src/android/camera_device.cpp
@@ -2,43 +2,238 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_device.cpp - libcamera Android Camera Device
+ * libcamera Android Camera Device
*/
#include "camera_device.h"
-#include "camera_ops.h"
+#include <algorithm>
+#include <fstream>
+#include <set>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/unique_fd.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/control_ids.h>
#include <libcamera/controls.h>
+#include <libcamera/fence.h>
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
#include <libcamera/property_ids.h>
-#include "log.h"
-#include "utils.h"
+#include "system/graphics.h"
-#include "camera_metadata.h"
+#include "camera_buffer.h"
+#include "camera_hal_config.h"
+#include "camera_ops.h"
+#include "camera_request.h"
+#include "hal_framebuffer.h"
using namespace libcamera;
-LOG_DECLARE_CATEGORY(HAL);
+LOG_DECLARE_CATEGORY(HAL)
+
+namespace {
/*
- * \struct Camera3RequestDescriptor
- *
- * A utility structure that groups information about a capture request to be
- * later re-used at request complete time to notify the framework.
+ * \struct Camera3StreamConfig
+ * \brief Data to store StreamConfiguration associated with camera3_stream(s)
+ * \var streams List of the pairs of a stream requested by Android HAL client
+ * and CameraStream::Type associated with the stream
+ * \var config StreamConfiguration for streams
+ */
+struct Camera3StreamConfig {
+ struct Camera3Stream {
+ camera3_stream_t *stream;
+ CameraStream::Type type;
+ };
+
+ std::vector<Camera3Stream> streams;
+ StreamConfiguration config;
+};
+
+/*
+ * Reorder the configurations so that libcamera::Camera can accept them as much
+ * as possible. The sort rule is as follows.
+ * 1.) The configuration for NV12 request whose resolution is the largest.
+ * 2.) The configuration for JPEG request.
+ * 3.) Others. Larger resolutions and different formats are put earlier.
*/
+void sortCamera3StreamConfigs(std::vector<Camera3StreamConfig> &unsortedConfigs,
+ const camera3_stream_t *jpegStream)
+{
+ const Camera3StreamConfig *jpegConfig = nullptr;
+
+ std::map<PixelFormat, std::vector<const Camera3StreamConfig *>> formatToConfigs;
+ for (const auto &streamConfig : unsortedConfigs) {
+ if (jpegStream && !jpegConfig) {
+ const auto &streams = streamConfig.streams;
+ if (std::find_if(streams.begin(), streams.end(),
+ [jpegStream](const auto &stream) {
+ return stream.stream == jpegStream;
+ }) != streams.end()) {
+ jpegConfig = &streamConfig;
+ continue;
+ }
+ }
+ formatToConfigs[streamConfig.config.pixelFormat].push_back(&streamConfig);
+ }
+
+ if (jpegStream && !jpegConfig)
+ LOG(HAL, Fatal) << "No Camera3StreamConfig is found for JPEG";
+
+ for (auto &fmt : formatToConfigs) {
+ auto &streamConfigs = fmt.second;
+
+ /* Sorted by resolution. Smaller is put first. */
+ std::sort(streamConfigs.begin(), streamConfigs.end(),
+ [](const auto *streamConfigA, const auto *streamConfigB) {
+ const Size &sizeA = streamConfigA->config.size;
+ const Size &sizeB = streamConfigB->config.size;
+ return sizeA < sizeB;
+ });
+ }
+
+ std::vector<Camera3StreamConfig> sortedConfigs;
+ sortedConfigs.reserve(unsortedConfigs.size());
+
+ /*
+ * NV12 is the most prioritized format. Put the configuration with NV12
+ * and the largest resolution first.
+ */
+ const auto nv12It = formatToConfigs.find(formats::NV12);
+ if (nv12It != formatToConfigs.end()) {
+ auto &nv12Configs = nv12It->second;
+ const Camera3StreamConfig *nv12Largest = nv12Configs.back();
+
+ /*
+ * If JPEG will be created from NV12 and the size is larger than
+ * the largest NV12 configurations, then put the NV12
+ * configuration for JPEG first.
+ */
+ if (jpegConfig && jpegConfig->config.pixelFormat == formats::NV12) {
+ const Size &nv12SizeForJpeg = jpegConfig->config.size;
+ const Size &nv12LargestSize = nv12Largest->config.size;
+
+ if (nv12LargestSize < nv12SizeForJpeg) {
+ LOG(HAL, Debug) << "Insert " << jpegConfig->config.toString();
+ sortedConfigs.push_back(std::move(*jpegConfig));
+ jpegConfig = nullptr;
+ }
+ }
+
+ LOG(HAL, Debug) << "Insert " << nv12Largest->config.toString();
+ sortedConfigs.push_back(*nv12Largest);
+ nv12Configs.pop_back();
+
+ if (nv12Configs.empty())
+ formatToConfigs.erase(nv12It);
+ }
+
+ /* If the configuration for JPEG is there, then put it. */
+ if (jpegConfig) {
+ LOG(HAL, Debug) << "Insert " << jpegConfig->config.toString();
+ sortedConfigs.push_back(std::move(*jpegConfig));
+ jpegConfig = nullptr;
+ }
-CameraDevice::Camera3RequestDescriptor::Camera3RequestDescriptor(
- unsigned int frameNumber, unsigned int numBuffers)
- : frameNumber(frameNumber), numBuffers(numBuffers)
+ /*
+ * Put configurations with different formats and larger resolutions
+ * earlier.
+ */
+ while (!formatToConfigs.empty()) {
+ for (auto it = formatToConfigs.begin(); it != formatToConfigs.end();) {
+ auto &configs = it->second;
+ LOG(HAL, Debug) << "Insert " << configs.back()->config.toString();
+ sortedConfigs.push_back(*configs.back());
+ configs.pop_back();
+
+ if (configs.empty())
+ it = formatToConfigs.erase(it);
+ else
+ it++;
+ }
+ }
+
+ ASSERT(sortedConfigs.size() == unsortedConfigs.size());
+
+ unsortedConfigs = sortedConfigs;
+}
+
+const char *rotationToString(int rotation)
{
- buffers = new camera3_stream_buffer_t[numBuffers];
+ switch (rotation) {
+ case CAMERA3_STREAM_ROTATION_0:
+ return "0";
+ case CAMERA3_STREAM_ROTATION_90:
+ return "90";
+ case CAMERA3_STREAM_ROTATION_180:
+ return "180";
+ case CAMERA3_STREAM_ROTATION_270:
+ return "270";
+ }
+ return "INVALID";
}
-CameraDevice::Camera3RequestDescriptor::~Camera3RequestDescriptor()
+const char *directionToString(int stream_type)
{
- delete[] buffers;
+ switch (stream_type) {
+ case CAMERA3_STREAM_OUTPUT:
+ return "Output";
+ case CAMERA3_STREAM_INPUT:
+ return "Input";
+ case CAMERA3_STREAM_BIDIRECTIONAL:
+ return "Bidirectional";
+ default:
+ LOG(HAL, Warning) << "Unknown stream type: " << stream_type;
+ return "Unknown";
+ }
}
+#if defined(OS_CHROMEOS)
+/*
+ * Check whether the crop_rotate_scale_degrees values for all streams in
+ * the list are valid according to the Chrome OS camera HAL API.
+ */
+bool validateCropRotate(const camera3_stream_configuration_t &streamList)
+{
+ ASSERT(streamList.num_streams > 0);
+ const int cropRotateScaleDegrees =
+ streamList.streams[0]->crop_rotate_scale_degrees;
+ for (unsigned int i = 0; i < streamList.num_streams; ++i) {
+ const camera3_stream_t &stream = *streamList.streams[i];
+
+ switch (stream.crop_rotate_scale_degrees) {
+ case CAMERA3_STREAM_ROTATION_0:
+ case CAMERA3_STREAM_ROTATION_90:
+ case CAMERA3_STREAM_ROTATION_270:
+ break;
+
+ /* 180° rotation is specified by Chrome OS as invalid. */
+ case CAMERA3_STREAM_ROTATION_180:
+ default:
+ LOG(HAL, Error) << "Invalid crop_rotate_scale_degrees: "
+ << stream.crop_rotate_scale_degrees;
+ return false;
+ }
+
+ if (cropRotateScaleDegrees != stream.crop_rotate_scale_degrees) {
+ LOG(HAL, Error) << "crop_rotate_scale_degrees in all "
+ << "streams are not identical";
+ return false;
+ }
+ }
+
+ return true;
+}
+#endif
+
+} /* namespace */
+
/*
* \class CameraDevice
*
@@ -52,21 +247,143 @@ CameraDevice::Camera3RequestDescriptor::~Camera3RequestDescriptor()
* back to the framework using the designated callbacks.
*/
-CameraDevice::CameraDevice(unsigned int id, const std::shared_ptr<Camera> &camera)
- : running_(false), camera_(camera), staticMetadata_(nullptr)
+CameraDevice::CameraDevice(unsigned int id, std::shared_ptr<Camera> camera)
+ : id_(id), state_(State::Stopped), camera_(std::move(camera)),
+ facing_(CAMERA_FACING_FRONT), orientation_(0)
{
camera_->requestCompleted.connect(this, &CameraDevice::requestComplete);
+
+ maker_ = "libcamera";
+ model_ = "cameraModel";
+
+ /* \todo Support getting properties on Android */
+ std::ifstream fstream("/var/cache/camera/camera.prop");
+ if (!fstream.is_open())
+ return;
+
+ std::string line;
+ while (std::getline(fstream, line)) {
+ std::string::size_type delimPos = line.find("=");
+ if (delimPos == std::string::npos)
+ continue;
+ std::string key = line.substr(0, delimPos);
+ std::string val = line.substr(delimPos + 1);
+
+ if (!key.compare("ro.product.model"))
+ model_ = val;
+ else if (!key.compare("ro.product.manufacturer"))
+ maker_ = val;
+ }
}
-CameraDevice::~CameraDevice()
+CameraDevice::~CameraDevice() = default;
+
+std::unique_ptr<CameraDevice> CameraDevice::create(unsigned int id,
+ std::shared_ptr<Camera> cam)
+{
+ return std::unique_ptr<CameraDevice>(
+ new CameraDevice(id, std::move(cam)));
+}
+
+/*
+ * Initialize the camera static information retrieved from the
+ * Camera::properties or from the cameraConfigData.
+ *
+ * cameraConfigData is optional for external camera devices and can be
+ * nullptr.
+ *
+ * This function is called before the camera device is opened.
+ */
+int CameraDevice::initialize(const CameraConfigData *cameraConfigData)
{
- if (staticMetadata_)
- delete staticMetadata_;
+ /*
+ * Initialize orientation and facing side of the camera.
+ *
+ * If the libcamera::Camera provides those information as retrieved
+ * from firmware use them, otherwise fallback to values parsed from
+ * the configuration file. If the configuration file is not available
+ * the camera is external so its location and rotation can be safely
+ * defaulted.
+ */
+ const ControlList &properties = camera_->properties();
+
+ const auto &location = properties.get(properties::Location);
+ if (location) {
+ switch (*location) {
+ case properties::CameraLocationFront:
+ facing_ = CAMERA_FACING_FRONT;
+ break;
+ case properties::CameraLocationBack:
+ facing_ = CAMERA_FACING_BACK;
+ break;
+ case properties::CameraLocationExternal:
+ /*
+ * If the camera is reported as external, but the
+ * CameraHalManager has overriden it, use what is
+ * reported in the configuration file. This typically
+ * happens for UVC cameras reported as 'External' by
+ * libcamera but installed in fixed position on the
+ * device.
+ */
+ if (cameraConfigData && cameraConfigData->facing != -1)
+ facing_ = cameraConfigData->facing;
+ else
+ facing_ = CAMERA_FACING_EXTERNAL;
+ break;
+ }
- for (auto &it : requestTemplates_)
- delete it.second;
+ if (cameraConfigData && cameraConfigData->facing != -1 &&
+ facing_ != cameraConfigData->facing) {
+ LOG(HAL, Warning)
+ << "Camera location does not match"
+ << " configuration file. Using " << facing_;
+ }
+ } else if (cameraConfigData) {
+ if (cameraConfigData->facing == -1) {
+ LOG(HAL, Error)
+ << "Camera facing not in configuration file";
+ return -EINVAL;
+ }
+ facing_ = cameraConfigData->facing;
+ } else {
+ facing_ = CAMERA_FACING_EXTERNAL;
+ }
+
+ /*
+ * The Android orientation metadata specifies its rotation correction
+ * value in clockwise direction whereas libcamera specifies the
+ * rotation property in anticlockwise direction. Read the libcamera's
+ * rotation property (anticlockwise) and compute the corresponding
+ * value for clockwise direction as required by the Android orientation
+ * metadata.
+ */
+ const auto &rotation = properties.get(properties::Rotation);
+ if (rotation) {
+ orientation_ = (360 - *rotation) % 360;
+ if (cameraConfigData && cameraConfigData->rotation != -1 &&
+ orientation_ != cameraConfigData->rotation) {
+ LOG(HAL, Warning)
+ << "Camera orientation does not match"
+ << " configuration file. Using " << orientation_;
+ }
+ } else if (cameraConfigData) {
+ if (cameraConfigData->rotation == -1) {
+ LOG(HAL, Error)
+ << "Camera rotation not in configuration file";
+ return -EINVAL;
+ }
+ orientation_ = cameraConfigData->rotation;
+ } else {
+ orientation_ = 0;
+ }
+
+ return capabilities_.initialize(camera_, orientation_, facing_);
}
+/*
+ * Open a camera device. The static information on the camera shall have been
+ * initialized with a call to CameraDevice::initialize().
+ */
int CameraDevice::open(const hw_module_t *hardwareModule)
{
int ret = camera_->acquire();
@@ -93,452 +410,56 @@ int CameraDevice::open(const hw_module_t *hardwareModule)
void CameraDevice::close()
{
- camera_->stop();
- camera_->release();
+ stop();
- running_ = false;
-}
-
-void CameraDevice::setCallbacks(const camera3_callback_ops_t *callbacks)
-{
- callbacks_ = callbacks;
+ camera_->release();
}
-/*
- * Return static information for the camera.
- */
-const camera_metadata_t *CameraDevice::getStaticMetadata()
+void CameraDevice::flush()
{
- if (staticMetadata_)
- return staticMetadata_->get();
-
- const ControlList &properties = camera_->properties();
+ {
+ MutexLocker stateLock(stateMutex_);
+ if (state_ != State::Running)
+ return;
- /*
- * The here reported metadata are enough to implement a basic capture
- * example application, but a real camera implementation will require
- * more.
- */
-
- /*
- * \todo Keep this in sync with the actual number of entries.
- * Currently: 50 entries, 666 bytes
- */
- staticMetadata_ = new CameraMetadata(50, 700);
- if (!staticMetadata_->isValid()) {
- LOG(HAL, Error) << "Failed to allocate static metadata";
- delete staticMetadata_;
- staticMetadata_ = nullptr;
- return nullptr;
+ state_ = State::Flushing;
}
- /* Color correction static metadata. */
- std::vector<uint8_t> aberrationModes = {
- ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
- aberrationModes.data(),
- aberrationModes.size());
-
- /* Control static metadata. */
- std::vector<uint8_t> aeAvailableAntiBandingModes = {
- ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF,
- ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ,
- ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ,
- ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
- aeAvailableAntiBandingModes.data(),
- aeAvailableAntiBandingModes.size());
-
- std::vector<uint8_t> aeAvailableModes = {
- ANDROID_CONTROL_AE_MODE_ON,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AE_AVAILABLE_MODES,
- aeAvailableModes.data(),
- aeAvailableModes.size());
-
- std::vector<int32_t> availableAeFpsTarget = {
- 15, 30,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
- availableAeFpsTarget.data(),
- availableAeFpsTarget.size());
-
- std::vector<int32_t> aeCompensationRange = {
- 0, 0,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AE_COMPENSATION_RANGE,
- aeCompensationRange.data(),
- aeCompensationRange.size());
-
- const camera_metadata_rational_t aeCompensationStep[] = {
- { 0, 1 }
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AE_COMPENSATION_STEP,
- aeCompensationStep, 1);
-
- std::vector<uint8_t> availableAfModes = {
- ANDROID_CONTROL_AF_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AF_AVAILABLE_MODES,
- availableAfModes.data(),
- availableAfModes.size());
-
- std::vector<uint8_t> availableEffects = {
- ANDROID_CONTROL_EFFECT_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_EFFECTS,
- availableEffects.data(),
- availableEffects.size());
-
- std::vector<uint8_t> availableSceneModes = {
- ANDROID_CONTROL_SCENE_MODE_DISABLED,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_SCENE_MODES,
- availableSceneModes.data(),
- availableSceneModes.size());
-
- std::vector<uint8_t> availableStabilizationModes = {
- ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
- availableStabilizationModes.data(),
- availableStabilizationModes.size());
-
- std::vector<uint8_t> availableAwbModes = {
- ANDROID_CONTROL_AWB_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AWB_AVAILABLE_MODES,
- availableAwbModes.data(),
- availableAwbModes.size());
-
- std::vector<int32_t> availableMaxRegions = {
- 0, 0, 0,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_MAX_REGIONS,
- availableMaxRegions.data(),
- availableMaxRegions.size());
-
- std::vector<uint8_t> sceneModesOverride = {
- ANDROID_CONTROL_AE_MODE_ON,
- ANDROID_CONTROL_AWB_MODE_AUTO,
- ANDROID_CONTROL_AF_MODE_AUTO,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_SCENE_MODE_OVERRIDES,
- sceneModesOverride.data(),
- sceneModesOverride.size());
-
- uint8_t aeLockAvailable = ANDROID_CONTROL_AE_LOCK_AVAILABLE_FALSE;
- staticMetadata_->addEntry(ANDROID_CONTROL_AE_LOCK_AVAILABLE,
- &aeLockAvailable, 1);
-
- uint8_t awbLockAvailable = ANDROID_CONTROL_AWB_LOCK_AVAILABLE_FALSE;
- staticMetadata_->addEntry(ANDROID_CONTROL_AWB_LOCK_AVAILABLE,
- &awbLockAvailable, 1);
-
- char availableControlModes = ANDROID_CONTROL_MODE_AUTO;
- staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_MODES,
- &availableControlModes, 1);
-
- /* JPEG static metadata. */
- std::vector<int32_t> availableThumbnailSizes = {
- 0, 0,
- };
- staticMetadata_->addEntry(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
- availableThumbnailSizes.data(),
- availableThumbnailSizes.size());
-
- /* Sensor static metadata. */
- int32_t pixelArraySize[] = {
- 2592, 1944,
- };
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
- &pixelArraySize, 2);
-
- int32_t sensorSizes[] = {
- 0, 0, 2560, 1920,
- };
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
- &sensorSizes, 4);
-
- int32_t sensitivityRange[] = {
- 32, 2400,
- };
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
- &sensitivityRange, 2);
-
- uint16_t filterArr = ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_GRBG;
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
- &filterArr, 1);
+ camera_->stop();
- int64_t exposureTimeRange[] = {
- 100000, 200000000,
- };
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE,
- &exposureTimeRange, 2);
+ MutexLocker stateLock(stateMutex_);
+ state_ = State::Stopped;
+}
- /*
- * The Android orientation metadata and libcamera rotation property are
- * defined differently but have identical numerical values for Android
- * devices such as phones and tablets.
- */
- int32_t orientation = 0;
- if (properties.contains(properties::Rotation))
- orientation = properties.get(properties::Rotation);
- staticMetadata_->addEntry(ANDROID_SENSOR_ORIENTATION, &orientation, 1);
+void CameraDevice::stop()
+{
+ MutexLocker stateLock(stateMutex_);
- std::vector<int32_t> testPatterModes = {
- ANDROID_SENSOR_TEST_PATTERN_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES,
- testPatterModes.data(),
- testPatterModes.size());
+ camera_->stop();
- std::vector<float> physicalSize = {
- 2592, 1944,
- };
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
- physicalSize.data(),
- physicalSize.size());
-
- uint8_t timestampSource = ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_UNKNOWN;
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE,
- &timestampSource, 1);
-
- /* Statistics static metadata. */
- uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
- staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
- &faceDetectMode, 1);
-
- int32_t maxFaceCount = 0;
- staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT,
- &maxFaceCount, 1);
-
- /* Sync static metadata. */
- int32_t maxLatency = ANDROID_SYNC_MAX_LATENCY_UNKNOWN;
- staticMetadata_->addEntry(ANDROID_SYNC_MAX_LATENCY, &maxLatency, 1);
-
- /* Flash static metadata. */
- char flashAvailable = ANDROID_FLASH_INFO_AVAILABLE_FALSE;
- staticMetadata_->addEntry(ANDROID_FLASH_INFO_AVAILABLE,
- &flashAvailable, 1);
-
- /* Lens static metadata. */
- std::vector<float> lensApertures = {
- 2.53 / 100,
- };
- staticMetadata_->addEntry(ANDROID_LENS_INFO_AVAILABLE_APERTURES,
- lensApertures.data(),
- lensApertures.size());
-
- uint8_t lensFacing = ANDROID_LENS_FACING_FRONT;
- if (properties.contains(properties::Location)) {
- int32_t location = properties.get(properties::Location);
- switch (location) {
- case properties::CameraLocationFront:
- lensFacing = ANDROID_LENS_FACING_FRONT;
- break;
- case properties::CameraLocationBack:
- lensFacing = ANDROID_LENS_FACING_BACK;
- break;
- case properties::CameraLocationExternal:
- lensFacing = ANDROID_LENS_FACING_EXTERNAL;
- break;
- }
+ {
+ MutexLocker descriptorsLock(descriptorsMutex_);
+ descriptors_ = {};
}
- staticMetadata_->addEntry(ANDROID_LENS_FACING, &lensFacing, 1);
-
- std::vector<float> lensFocalLenghts = {
- 1,
- };
- staticMetadata_->addEntry(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
- lensFocalLenghts.data(),
- lensFocalLenghts.size());
-
- std::vector<uint8_t> opticalStabilizations = {
- ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
- opticalStabilizations.data(),
- opticalStabilizations.size());
-
- float hypeFocalDistance = 0;
- staticMetadata_->addEntry(ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE,
- &hypeFocalDistance, 1);
-
- float minFocusDistance = 0;
- staticMetadata_->addEntry(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
- &minFocusDistance, 1);
-
- /* Noise reduction modes. */
- uint8_t noiseReductionModes = ANDROID_NOISE_REDUCTION_MODE_OFF;
- staticMetadata_->addEntry(ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES,
- &noiseReductionModes, 1);
-
- /* Scaler static metadata. */
- float maxDigitalZoom = 1;
- staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
- &maxDigitalZoom, 1);
-
- std::vector<uint32_t> availableStreamFormats = {
- ANDROID_SCALER_AVAILABLE_FORMATS_BLOB,
- ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888,
- ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED,
- };
- staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_FORMATS,
- availableStreamFormats.data(),
- availableStreamFormats.size());
-
- std::vector<uint32_t> availableStreamConfigurations = {
- ANDROID_SCALER_AVAILABLE_FORMATS_BLOB, 2560, 1920,
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
- ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888, 2560, 1920,
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
- ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED, 2560, 1920,
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
- };
- staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
- availableStreamConfigurations.data(),
- availableStreamConfigurations.size());
-
- std::vector<int64_t> availableStallDurations = {
- ANDROID_SCALER_AVAILABLE_FORMATS_BLOB, 2560, 1920, 33333333,
- };
- staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
- availableStallDurations.data(),
- availableStallDurations.size());
-
- std::vector<int64_t> minFrameDurations = {
- ANDROID_SCALER_AVAILABLE_FORMATS_BLOB, 2560, 1920, 33333333,
- ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED, 2560, 1920, 33333333,
- ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888, 2560, 1920, 33333333,
- };
- staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
- minFrameDurations.data(),
- minFrameDurations.size());
- uint8_t croppingType = ANDROID_SCALER_CROPPING_TYPE_CENTER_ONLY;
- staticMetadata_->addEntry(ANDROID_SCALER_CROPPING_TYPE, &croppingType, 1);
+ streams_.clear();
- /* Info static metadata. */
- uint8_t supportedHWLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED;
- staticMetadata_->addEntry(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL,
- &supportedHWLevel, 1);
-
- /* Request static metadata. */
- int32_t partialResultCount = 1;
- staticMetadata_->addEntry(ANDROID_REQUEST_PARTIAL_RESULT_COUNT,
- &partialResultCount, 1);
+ state_ = State::Stopped;
+}
- uint8_t maxPipelineDepth = 2;
- staticMetadata_->addEntry(ANDROID_REQUEST_PIPELINE_MAX_DEPTH,
- &maxPipelineDepth, 1);
+unsigned int CameraDevice::maxJpegBufferSize() const
+{
+ return capabilities_.maxJpegBufferSize();
+}
- std::vector<uint8_t> availableCapabilities = {
- ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE,
- };
- staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_CAPABILITIES,
- availableCapabilities.data(),
- availableCapabilities.size());
-
- std::vector<int32_t> availableCharacteristicsKeys = {
- ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
- ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
- ANDROID_CONTROL_AE_AVAILABLE_MODES,
- ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
- ANDROID_CONTROL_AE_COMPENSATION_RANGE,
- ANDROID_CONTROL_AE_COMPENSATION_STEP,
- ANDROID_CONTROL_AF_AVAILABLE_MODES,
- ANDROID_CONTROL_AVAILABLE_EFFECTS,
- ANDROID_CONTROL_AVAILABLE_SCENE_MODES,
- ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
- ANDROID_CONTROL_AWB_AVAILABLE_MODES,
- ANDROID_CONTROL_MAX_REGIONS,
- ANDROID_CONTROL_SCENE_MODE_OVERRIDES,
- ANDROID_CONTROL_AE_LOCK_AVAILABLE,
- ANDROID_CONTROL_AWB_LOCK_AVAILABLE,
- ANDROID_CONTROL_AVAILABLE_MODES,
- ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
- ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
- ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
- ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
- ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
- ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE,
- ANDROID_SENSOR_ORIENTATION,
- ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES,
- ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
- ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE,
- ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
- ANDROID_STATISTICS_INFO_MAX_FACE_COUNT,
- ANDROID_SYNC_MAX_LATENCY,
- ANDROID_FLASH_INFO_AVAILABLE,
- ANDROID_LENS_INFO_AVAILABLE_APERTURES,
- ANDROID_LENS_FACING,
- ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
- ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
- ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE,
- ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
- ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES,
- ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
- ANDROID_SCALER_AVAILABLE_FORMATS,
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
- ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
- ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
- ANDROID_SCALER_CROPPING_TYPE,
- ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL,
- ANDROID_REQUEST_PARTIAL_RESULT_COUNT,
- ANDROID_REQUEST_PIPELINE_MAX_DEPTH,
- ANDROID_REQUEST_AVAILABLE_CAPABILITIES,
- };
- staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS,
- availableCharacteristicsKeys.data(),
- availableCharacteristicsKeys.size());
-
- std::vector<int32_t> availableRequestKeys = {
- ANDROID_CONTROL_AE_MODE,
- ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
- ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER,
- ANDROID_CONTROL_AE_LOCK,
- ANDROID_CONTROL_AF_TRIGGER,
- ANDROID_CONTROL_AWB_MODE,
- ANDROID_CONTROL_AWB_LOCK,
- ANDROID_FLASH_MODE,
- ANDROID_STATISTICS_FACE_DETECT_MODE,
- ANDROID_NOISE_REDUCTION_MODE,
- ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
- ANDROID_CONTROL_CAPTURE_INTENT,
- };
- staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS,
- availableRequestKeys.data(),
- availableRequestKeys.size());
-
- std::vector<int32_t> availableResultKeys = {
- ANDROID_CONTROL_AE_STATE,
- ANDROID_CONTROL_AE_LOCK,
- ANDROID_CONTROL_AF_STATE,
- ANDROID_CONTROL_AWB_STATE,
- ANDROID_CONTROL_AWB_LOCK,
- ANDROID_LENS_STATE,
- ANDROID_SCALER_CROP_REGION,
- ANDROID_SENSOR_TIMESTAMP,
- ANDROID_SENSOR_ROLLING_SHUTTER_SKEW,
- ANDROID_SENSOR_EXPOSURE_TIME,
- ANDROID_STATISTICS_LENS_SHADING_MAP_MODE,
- ANDROID_STATISTICS_SCENE_FLICKER,
- };
- staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_RESULT_KEYS,
- availableResultKeys.data(),
- availableResultKeys.size());
-
- if (!staticMetadata_->isValid()) {
- LOG(HAL, Error) << "Failed to construct static metadata";
- delete staticMetadata_;
- staticMetadata_ = nullptr;
- return nullptr;
- }
+void CameraDevice::setCallbacks(const camera3_callback_ops_t *callbacks)
+{
+ callbacks_ = callbacks;
+}
- return staticMetadata_->get();
+const camera_metadata_t *CameraDevice::getStaticMetadata()
+{
+ return capabilities_.staticMetadata()->getMetadata();
}
/*
@@ -548,100 +469,53 @@ const camera_metadata_t *CameraDevice::constructDefaultRequestSettings(int type)
{
auto it = requestTemplates_.find(type);
if (it != requestTemplates_.end())
- return it->second->get();
+ return it->second->getMetadata();
/* Use the capture intent matching the requested template type. */
+ std::unique_ptr<CameraMetadata> requestTemplate;
uint8_t captureIntent;
switch (type) {
case CAMERA3_TEMPLATE_PREVIEW:
captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+ requestTemplate = capabilities_.requestTemplatePreview();
break;
case CAMERA3_TEMPLATE_STILL_CAPTURE:
+ /*
+ * Use the preview template for still capture, they only differ
+ * for the torch mode we currently do not support.
+ */
captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE;
+ requestTemplate = capabilities_.requestTemplateStill();
break;
case CAMERA3_TEMPLATE_VIDEO_RECORD:
captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD;
+ requestTemplate = capabilities_.requestTemplateVideo();
break;
case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT;
- break;
- case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
- captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG;
+ requestTemplate = capabilities_.requestTemplateVideo();
break;
case CAMERA3_TEMPLATE_MANUAL:
captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_MANUAL;
+ requestTemplate = capabilities_.requestTemplateManual();
break;
+ /* \todo Implement templates generation for the remaining use cases. */
+ case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
default:
- LOG(HAL, Error) << "Invalid template request type: " << type;
- return nullptr;
- }
-
- /*
- * \todo Keep this in sync with the actual number of entries.
- * Currently: 12 entries, 15 bytes
- */
- CameraMetadata *requestTemplate = new CameraMetadata(15, 20);
- if (!requestTemplate->isValid()) {
- LOG(HAL, Error) << "Failed to allocate template metadata";
- delete requestTemplate;
+ LOG(HAL, Error) << "Unsupported template request type: " << type;
return nullptr;
}
- uint8_t aeMode = ANDROID_CONTROL_AE_MODE_ON;
- requestTemplate->addEntry(ANDROID_CONTROL_AE_MODE,
- &aeMode, 1);
-
- int32_t aeExposureCompensation = 0;
- requestTemplate->addEntry(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
- &aeExposureCompensation, 1);
-
- uint8_t aePrecaptureTrigger = ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE;
- requestTemplate->addEntry(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER,
- &aePrecaptureTrigger, 1);
-
- uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_OFF;
- requestTemplate->addEntry(ANDROID_CONTROL_AE_LOCK,
- &aeLock, 1);
-
- uint8_t afTrigger = ANDROID_CONTROL_AF_TRIGGER_IDLE;
- requestTemplate->addEntry(ANDROID_CONTROL_AF_TRIGGER,
- &afTrigger, 1);
-
- uint8_t awbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
- requestTemplate->addEntry(ANDROID_CONTROL_AWB_MODE,
- &awbMode, 1);
-
- uint8_t awbLock = ANDROID_CONTROL_AWB_LOCK_OFF;
- requestTemplate->addEntry(ANDROID_CONTROL_AWB_LOCK,
- &awbLock, 1);
-
- uint8_t flashMode = ANDROID_FLASH_MODE_OFF;
- requestTemplate->addEntry(ANDROID_FLASH_MODE,
- &flashMode, 1);
-
- uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
- requestTemplate->addEntry(ANDROID_STATISTICS_FACE_DETECT_MODE,
- &faceDetectMode, 1);
-
- uint8_t noiseReduction = ANDROID_NOISE_REDUCTION_MODE_OFF;
- requestTemplate->addEntry(ANDROID_NOISE_REDUCTION_MODE,
- &noiseReduction, 1);
-
- uint8_t aberrationMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF;
- requestTemplate->addEntry(ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
- &aberrationMode, 1);
-
- requestTemplate->addEntry(ANDROID_CONTROL_CAPTURE_INTENT,
- &captureIntent, 1);
-
- if (!requestTemplate->isValid()) {
+ if (!requestTemplate || !requestTemplate->isValid()) {
LOG(HAL, Error) << "Failed to construct request template";
- delete requestTemplate;
return nullptr;
}
- requestTemplates_[type] = requestTemplate;
- return requestTemplate->get();
+ requestTemplate->updateEntry(ANDROID_CONTROL_CAPTURE_INTENT,
+ captureIntent);
+
+ requestTemplates_[type] = std::move(requestTemplate);
+ return requestTemplates_[type]->getMetadata();
}
/*
@@ -650,217 +524,873 @@ const camera_metadata_t *CameraDevice::constructDefaultRequestSettings(int type)
*/
int CameraDevice::configureStreams(camera3_stream_configuration_t *stream_list)
{
- for (unsigned int i = 0; i < stream_list->num_streams; ++i) {
- camera3_stream_t *stream = stream_list->streams[i];
+ /* Before any configuration attempt, stop the camera. */
+ stop();
- LOG(HAL, Info) << "Stream #" << i
- << ", direction: " << stream->stream_type
- << ", width: " << stream->width
- << ", height: " << stream->height
- << ", format: " << utils::hex(stream->format);
+ if (stream_list->num_streams == 0) {
+ LOG(HAL, Error) << "No streams in configuration";
+ return -EINVAL;
}
- /* Hardcode viewfinder role, collecting sizes from the stream config. */
- if (stream_list->num_streams != 1) {
- LOG(HAL, Error) << "Only one stream supported";
+#if defined(OS_CHROMEOS)
+ if (!validateCropRotate(*stream_list))
return -EINVAL;
- }
+#endif
- StreamRoles roles = { StreamRole::Viewfinder };
- config_ = camera_->generateConfiguration(roles);
- if (!config_ || config_->empty()) {
+ /*
+ * Generate an empty configuration, and construct a StreamConfiguration
+ * for each camera3_stream to add to it.
+ */
+ std::unique_ptr<CameraConfiguration> config = camera_->generateConfiguration();
+ if (!config) {
LOG(HAL, Error) << "Failed to generate camera configuration";
return -EINVAL;
}
- /* Only one stream is supported. */
- camera3_stream_t *camera3Stream = stream_list->streams[0];
- StreamConfiguration *streamConfiguration = &config_->at(0);
- streamConfiguration->size.width = camera3Stream->width;
- streamConfiguration->size.height = camera3Stream->height;
-
/*
- * \todo We'll need to translate from Android defined pixel format codes
- * to the libcamera image format codes. For now, do not change the
- * format returned from Camera::generateConfiguration().
+ * Clear and remove any existing configuration from previous calls, and
+ * ensure the required entries are available without further
+ * reallocation.
*/
+ streams_.clear();
+ streams_.reserve(stream_list->num_streams);
+
+ std::vector<Camera3StreamConfig> streamConfigs;
+ streamConfigs.reserve(stream_list->num_streams);
+
+ /* First handle all non-MJPEG streams. */
+ camera3_stream_t *jpegStream = nullptr;
+ for (unsigned int i = 0; i < stream_list->num_streams; ++i) {
+ camera3_stream_t *stream = stream_list->streams[i];
+ Size size(stream->width, stream->height);
+
+ PixelFormat format = capabilities_.toPixelFormat(stream->format);
+
+ LOG(HAL, Info) << "Stream #" << i
+ << ", direction: " << directionToString(stream->stream_type)
+ << ", width: " << stream->width
+ << ", height: " << stream->height
+ << ", format: " << utils::hex(stream->format)
+ << ", rotation: " << rotationToString(stream->rotation)
+#if defined(OS_CHROMEOS)
+ << ", crop_rotate_scale_degrees: "
+ << rotationToString(stream->crop_rotate_scale_degrees)
+#endif
+ << " (" << format << ")";
+
+ if (!format.isValid())
+ return -EINVAL;
+
+ /* \todo Support rotation. */
+ if (stream->rotation != CAMERA3_STREAM_ROTATION_0) {
+ LOG(HAL, Error) << "Rotation is not supported";
+ return -EINVAL;
+ }
+#if defined(OS_CHROMEOS)
+ if (stream->crop_rotate_scale_degrees != CAMERA3_STREAM_ROTATION_0) {
+ LOG(HAL, Error) << "Rotation is not supported";
+ return -EINVAL;
+ }
+#endif
+
+ /* Defer handling of MJPEG streams until all others are known. */
+ if (stream->format == HAL_PIXEL_FORMAT_BLOB) {
+ if (jpegStream) {
+ LOG(HAL, Error)
+ << "Multiple JPEG streams are not supported";
+ return -EINVAL;
+ }
+
+ jpegStream = stream;
+ continue;
+ }
+
+ /*
+ * While gralloc usage flags are supposed to report usage
+ * patterns to select a suitable buffer allocation strategy, in
+ * practice they're also used to make other decisions, such as
+ * selecting the actual format for the IMPLEMENTATION_DEFINED
+ * HAL pixel format. To avoid issues, we thus have to set the
+ * GRALLOC_USAGE_HW_CAMERA_WRITE flag unconditionally, even for
+ * streams that will be produced in software.
+ */
+ stream->usage |= GRALLOC_USAGE_HW_CAMERA_WRITE;
+
+ /*
+ * If a CameraStream with the same size and format as the
+ * current stream has already been requested, associate the two.
+ */
+ auto iter = std::find_if(
+ streamConfigs.begin(), streamConfigs.end(),
+ [&size, &format](const Camera3StreamConfig &streamConfig) {
+ return streamConfig.config.size == size &&
+ streamConfig.config.pixelFormat == format;
+ });
+ if (iter != streamConfigs.end()) {
+ /* Add usage to copy the buffer in streams[0] to stream. */
+ iter->streams[0].stream->usage |= GRALLOC_USAGE_SW_READ_OFTEN;
+ stream->usage |= GRALLOC_USAGE_SW_WRITE_OFTEN;
+ iter->streams.push_back({ stream, CameraStream::Type::Mapped });
+ continue;
+ }
+
+ Camera3StreamConfig streamConfig;
+ streamConfig.streams = { { stream, CameraStream::Type::Direct } };
+ streamConfig.config.size = size;
+ streamConfig.config.pixelFormat = format;
+ streamConfigs.push_back(std::move(streamConfig));
+ }
+
+ /* Now handle the MJPEG streams, adding a new stream if required. */
+ if (jpegStream) {
+ CameraStream::Type type;
+ int index = -1;
+
+ /* Search for a compatible stream in the non-JPEG ones. */
+ for (size_t i = 0; i < streamConfigs.size(); ++i) {
+ Camera3StreamConfig &streamConfig = streamConfigs[i];
+ const auto &cfg = streamConfig.config;
+
+ /*
+ * \todo The PixelFormat must also be compatible with
+ * the encoder.
+ */
+ if (cfg.size.width != jpegStream->width ||
+ cfg.size.height != jpegStream->height)
+ continue;
+
+ LOG(HAL, Info)
+ << "Android JPEG stream mapped to libcamera stream " << i;
+
+ type = CameraStream::Type::Mapped;
+ index = i;
+
+ /*
+ * The source stream will be read by software to
+ * produce the JPEG stream.
+ */
+ camera3_stream_t *stream = streamConfig.streams[0].stream;
+ stream->usage |= GRALLOC_USAGE_SW_READ_OFTEN;
+ break;
+ }
+
+ /*
+ * Without a compatible match for JPEG encoding we must
+ * introduce a new stream to satisfy the request requirements.
+ */
+ if (index < 0) {
+ /*
+ * \todo The pixelFormat should be a 'best-fit' choice
+ * and may require a validation cycle. This is not yet
+ * handled, and should be considered as part of any
+ * stream configuration reworks.
+ */
+ Camera3StreamConfig streamConfig;
+ streamConfig.config.size.width = jpegStream->width;
+ streamConfig.config.size.height = jpegStream->height;
+ streamConfig.config.pixelFormat = formats::NV12;
+ streamConfigs.push_back(std::move(streamConfig));
+
+ LOG(HAL, Info) << "Adding " << streamConfig.config.toString()
+ << " for MJPEG support";
+
+ type = CameraStream::Type::Internal;
+ index = streamConfigs.size() - 1;
+ }
- switch (config_->validate()) {
+ /* The JPEG stream will be produced by software. */
+ jpegStream->usage |= GRALLOC_USAGE_SW_WRITE_OFTEN;
+
+ streamConfigs[index].streams.push_back({ jpegStream, type });
+ }
+
+ sortCamera3StreamConfigs(streamConfigs, jpegStream);
+ for (const auto &streamConfig : streamConfigs) {
+ config->addConfiguration(streamConfig.config);
+
+ CameraStream *sourceStream = nullptr;
+ for (auto &stream : streamConfig.streams) {
+ streams_.emplace_back(this, config.get(), stream.type,
+ stream.stream, sourceStream,
+ config->size() - 1);
+ stream.stream->priv = static_cast<void *>(&streams_.back());
+
+ /*
+ * The streamConfig.streams vector contains as its first
+ * element a Direct (or Internal) stream, and then an
+ * optional set of Mapped streams derived from the
+ * Direct stream. Cache the Direct stream pointer, to
+ * be used when constructing the subsequent mapped
+ * streams.
+ */
+ if (stream.type == CameraStream::Type::Direct)
+ sourceStream = &streams_.back();
+ }
+ }
+
+ switch (config->validate()) {
case CameraConfiguration::Valid:
break;
case CameraConfiguration::Adjusted:
LOG(HAL, Info) << "Camera configuration adjusted";
- config_.reset();
+
+ for (const StreamConfiguration &cfg : *config)
+ LOG(HAL, Info) << " - " << cfg.toString();
+
return -EINVAL;
case CameraConfiguration::Invalid:
LOG(HAL, Info) << "Camera configuration invalid";
- config_.reset();
return -EINVAL;
}
- camera3Stream->max_buffers = streamConfiguration->bufferCount;
-
/*
* Once the CameraConfiguration has been adjusted/validated
* it can be applied to the camera.
*/
- int ret = camera_->configure(config_.get());
+ int ret = camera_->configure(config.get());
if (ret) {
LOG(HAL, Error) << "Failed to configure camera '"
- << camera_->name() << "'";
+ << camera_->id() << "'";
return ret;
}
+ /*
+ * Configure the HAL CameraStream instances using the associated
+ * StreamConfiguration and set the number of required buffers in
+ * the Android camera3_stream_t.
+ */
+ for (CameraStream &cameraStream : streams_) {
+ ret = cameraStream.configure();
+ if (ret) {
+ LOG(HAL, Error) << "Failed to configure camera stream";
+ return ret;
+ }
+ }
+
+ config_ = std::move(config);
return 0;
}
-int CameraDevice::processCaptureRequest(camera3_capture_request_t *camera3Request)
+std::unique_ptr<HALFrameBuffer>
+CameraDevice::createFrameBuffer(const buffer_handle_t camera3buffer,
+ PixelFormat pixelFormat, const Size &size)
{
- StreamConfiguration *streamConfiguration = &config_->at(0);
- Stream *stream = streamConfiguration->stream();
+ CameraBuffer buf(camera3buffer, pixelFormat, size, PROT_READ);
+ if (!buf.isValid()) {
+ LOG(HAL, Fatal) << "Failed to create CameraBuffer";
+ return nullptr;
+ }
- if (camera3Request->num_output_buffers != 1) {
- LOG(HAL, Error) << "Invalid number of output buffers: "
- << camera3Request->num_output_buffers;
- return -EINVAL;
+ std::vector<FrameBuffer::Plane> planes(buf.numPlanes());
+ for (size_t i = 0; i < buf.numPlanes(); ++i) {
+ SharedFD fd{ camera3buffer->data[i] };
+ if (!fd.isValid()) {
+ LOG(HAL, Fatal) << "No valid fd";
+ return nullptr;
+ }
+
+ planes[i].fd = fd;
+ planes[i].offset = buf.offset(i);
+ planes[i].length = buf.size(i);
}
- /* Start the camera if that's the first request we handle. */
- if (!running_) {
- int ret = camera_->start();
- if (ret) {
- LOG(HAL, Error) << "Failed to start camera";
- return ret;
+ return std::make_unique<HALFrameBuffer>(planes, camera3buffer);
+}
+
+int CameraDevice::processControls(Camera3RequestDescriptor *descriptor)
+{
+ const CameraMetadata &settings = descriptor->settings_;
+ if (!settings.isValid())
+ return 0;
+
+ /* Translate the Android request settings to libcamera controls. */
+ ControlList &controls = descriptor->request_->controls();
+ camera_metadata_ro_entry_t entry;
+ if (settings.getEntry(ANDROID_SCALER_CROP_REGION, &entry)) {
+ const int32_t *data = entry.data.i32;
+ Rectangle cropRegion{ data[0], data[1],
+ static_cast<unsigned int>(data[2]),
+ static_cast<unsigned int>(data[3]) };
+ controls.set(controls::ScalerCrop, cropRegion);
+ }
+
+ if (settings.getEntry(ANDROID_STATISTICS_FACE_DETECT_MODE, &entry)) {
+ const uint8_t *data = entry.data.u8;
+ controls.set(controls::draft::FaceDetectMode, data[0]);
+ }
+
+ if (settings.getEntry(ANDROID_SENSOR_TEST_PATTERN_MODE, &entry)) {
+ const int32_t data = *entry.data.i32;
+ int32_t testPatternMode = controls::draft::TestPatternModeOff;
+ switch (data) {
+ case ANDROID_SENSOR_TEST_PATTERN_MODE_OFF:
+ testPatternMode = controls::draft::TestPatternModeOff;
+ break;
+
+ case ANDROID_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR:
+ testPatternMode = controls::draft::TestPatternModeSolidColor;
+ break;
+
+ case ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS:
+ testPatternMode = controls::draft::TestPatternModeColorBars;
+ break;
+
+ case ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS_FADE_TO_GRAY:
+ testPatternMode = controls::draft::TestPatternModeColorBarsFadeToGray;
+ break;
+
+ case ANDROID_SENSOR_TEST_PATTERN_MODE_PN9:
+ testPatternMode = controls::draft::TestPatternModePn9;
+ break;
+
+ case ANDROID_SENSOR_TEST_PATTERN_MODE_CUSTOM1:
+ testPatternMode = controls::draft::TestPatternModeCustom1;
+ break;
+
+ default:
+ LOG(HAL, Error)
+ << "Unknown test pattern mode: " << data;
+
+ return -EINVAL;
}
- running_ = true;
+ controls.set(controls::draft::TestPatternMode, testPatternMode);
}
- /*
- * Queue a request for the Camera with the provided dmabuf file
- * descriptors.
- */
- const camera3_stream_buffer_t *camera3Buffers =
- camera3Request->output_buffers;
+ return 0;
+}
+
+void CameraDevice::abortRequest(Camera3RequestDescriptor *descriptor) const
+{
+ notifyError(descriptor->frameNumber_, nullptr, CAMERA3_MSG_ERROR_REQUEST);
+
+ for (auto &buffer : descriptor->buffers_)
+ buffer.status = Camera3RequestDescriptor::Status::Error;
+
+ descriptor->status_ = Camera3RequestDescriptor::Status::Error;
+}
+
+bool CameraDevice::isValidRequest(camera3_capture_request_t *camera3Request) const
+{
+ if (!camera3Request) {
+ LOG(HAL, Error) << "No capture request provided";
+ return false;
+ }
+
+ if (!camera3Request->num_output_buffers ||
+ !camera3Request->output_buffers) {
+ LOG(HAL, Error) << "No output buffers provided";
+ return false;
+ }
+
+ /* configureStreams() has not been called or has failed. */
+ if (streams_.empty() || !config_) {
+ LOG(HAL, Error) << "No stream is configured";
+ return false;
+ }
+
+ for (uint32_t i = 0; i < camera3Request->num_output_buffers; i++) {
+ const camera3_stream_buffer_t &outputBuffer =
+ camera3Request->output_buffers[i];
+ if (!outputBuffer.buffer || !(*outputBuffer.buffer)) {
+ LOG(HAL, Error) << "Invalid native handle";
+ return false;
+ }
+
+ const native_handle_t *handle = *outputBuffer.buffer;
+ constexpr int kNativeHandleMaxFds = 1024;
+ if (handle->numFds < 0 || handle->numFds > kNativeHandleMaxFds) {
+ LOG(HAL, Error)
+ << "Invalid number of fds (" << handle->numFds
+ << ") in buffer " << i;
+ return false;
+ }
+
+ constexpr int kNativeHandleMaxInts = 1024;
+ if (handle->numInts < 0 || handle->numInts > kNativeHandleMaxInts) {
+ LOG(HAL, Error)
+ << "Invalid number of ints (" << handle->numInts
+ << ") in buffer " << i;
+ return false;
+ }
+
+ const camera3_stream *camera3Stream = outputBuffer.stream;
+ if (!camera3Stream)
+ return false;
+
+ const CameraStream *cameraStream =
+ static_cast<CameraStream *>(camera3Stream->priv);
+
+ auto found = std::find_if(streams_.begin(), streams_.end(),
+ [cameraStream](const CameraStream &stream) {
+ return &stream == cameraStream;
+ });
+ if (found == streams_.end()) {
+ LOG(HAL, Error)
+ << "No corresponding configured stream found";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+int CameraDevice::processCaptureRequest(camera3_capture_request_t *camera3Request)
+{
+ if (!isValidRequest(camera3Request))
+ return -EINVAL;
/*
* Save the request descriptors for use at completion time.
* The descriptor and the associated memory reserved here are freed
* at request complete time.
*/
- Camera3RequestDescriptor *descriptor =
- new Camera3RequestDescriptor(camera3Request->frame_number,
- camera3Request->num_output_buffers);
- for (unsigned int i = 0; i < descriptor->numBuffers; ++i) {
+ auto descriptor = std::make_unique<Camera3RequestDescriptor>(camera_.get(),
+ camera3Request);
+
+ /*
+ * \todo The Android request model is incremental, settings passed in
+ * previous requests are to be effective until overridden explicitly in
+ * a new request. Do we need to cache settings incrementally here, or is
+ * it handled by the Android camera service ?
+ */
+ if (camera3Request->settings)
+ lastSettings_ = camera3Request->settings;
+
+ descriptor->settings_ = lastSettings_;
+
+ LOG(HAL, Debug) << "Queueing request " << descriptor->request_->cookie()
+ << " with " << descriptor->buffers_.size() << " streams";
+
+ /*
+ * Process all the Direct and Internal streams first, they map directly
+ * to a libcamera stream. Streams of type Mapped will be handled later.
+ *
+ * Collect the CameraStream associated to each requested capture stream.
+ * Since requestedStreams is an std:set<>, no duplications can happen.
+ */
+ std::set<CameraStream *> requestedStreams;
+ for (const auto &[i, buffer] : utils::enumerate(descriptor->buffers_)) {
+ CameraStream *cameraStream = buffer.stream;
+ camera3_stream_t *camera3Stream = cameraStream->camera3Stream();
+
+ std::stringstream ss;
+ ss << i << " - (" << camera3Stream->width << "x"
+ << camera3Stream->height << ")"
+ << "[" << utils::hex(camera3Stream->format) << "] -> "
+ << "(" << cameraStream->configuration().size << ")["
+ << cameraStream->configuration().pixelFormat << "]";
+
/*
- * Keep track of which stream the request belongs to and store
- * the native buffer handles.
- *
- * \todo Currently we only support one capture buffer. Copy
- * all of them to be ready once we'll support more.
+ * Inspect the camera stream type, create buffers opportunely
+ * and add them to the Request if required.
*/
- descriptor->buffers[i].stream = camera3Buffers[i].stream;
- descriptor->buffers[i].buffer = camera3Buffers[i].buffer;
+ FrameBuffer *frameBuffer = nullptr;
+ UniqueFD acquireFence;
+
+ MutexLocker lock(descriptor->streamsProcessMutex_);
+
+ switch (cameraStream->type()) {
+ case CameraStream::Type::Mapped:
+ /* Mapped streams will be handled in the next loop. */
+ continue;
+
+ case CameraStream::Type::Direct:
+ /*
+ * Create a libcamera buffer using the dmabuf
+ * descriptors of the camera3Buffer for each stream and
+ * associate it with the Camera3RequestDescriptor for
+ * lifetime management only.
+ */
+ buffer.frameBuffer =
+ createFrameBuffer(*buffer.camera3Buffer,
+ cameraStream->configuration().pixelFormat,
+ cameraStream->configuration().size);
+ frameBuffer = buffer.frameBuffer.get();
+ acquireFence = std::move(buffer.fence);
+ LOG(HAL, Debug) << ss.str() << " (direct)";
+ break;
+
+ case CameraStream::Type::Internal:
+ /*
+ * Get the frame buffer from the CameraStream internal
+ * buffer pool.
+ *
+ * The buffer has to be returned to the CameraStream
+ * once it has been processed.
+ */
+ frameBuffer = cameraStream->getBuffer();
+ buffer.internalBuffer = frameBuffer;
+ LOG(HAL, Debug) << ss.str() << " (internal)";
+
+ descriptor->pendingStreamsToProcess_.insert(
+ { cameraStream, &buffer });
+ break;
+ }
+
+ if (!frameBuffer) {
+ LOG(HAL, Error) << "Failed to create frame buffer";
+ return -ENOMEM;
+ }
+
+ auto fence = std::make_unique<Fence>(std::move(acquireFence));
+ descriptor->request_->addBuffer(cameraStream->stream(),
+ frameBuffer, std::move(fence));
+
+ requestedStreams.insert(cameraStream);
}
/*
- * Create a libcamera buffer using the dmabuf descriptors of the first
- * and (currently) only supported request buffer.
+ * Now handle the Mapped streams. If no buffer has been added for them
+ * because their corresponding direct source stream is not part of this
+ * particular request, add one here.
*/
- const buffer_handle_t camera3Handle = *camera3Buffers[0].buffer;
+ for (const auto &[i, buffer] : utils::enumerate(descriptor->buffers_)) {
+ CameraStream *cameraStream = buffer.stream;
+ camera3_stream_t *camera3Stream = cameraStream->camera3Stream();
+
+ if (cameraStream->type() != CameraStream::Type::Mapped)
+ continue;
+
+ LOG(HAL, Debug) << i << " - (" << camera3Stream->width << "x"
+ << camera3Stream->height << ")"
+ << "[" << utils::hex(camera3Stream->format) << "] -> "
+ << "(" << cameraStream->configuration().size << ")["
+ << cameraStream->configuration().pixelFormat << "]"
+ << " (mapped)";
+
+ MutexLocker lock(descriptor->streamsProcessMutex_);
+ descriptor->pendingStreamsToProcess_.insert({ cameraStream, &buffer });
+
+ /*
+ * Make sure the CameraStream this stream is mapped on has been
+ * added to the request.
+ */
+ CameraStream *sourceStream = cameraStream->sourceStream();
+ ASSERT(sourceStream);
+ if (requestedStreams.find(sourceStream) != requestedStreams.end())
+ continue;
- std::vector<FrameBuffer::Plane> planes;
- for (int i = 0; i < 3; i++) {
- FrameBuffer::Plane plane;
- plane.fd = FileDescriptor(camera3Handle->data[i]);
/*
- * Setting length to zero here is OK as the length is only used
- * to map the memory of the plane. Libcamera do not need to poke
- * at the memory content queued by the HAL.
+ * If that's not the case, we need to add a buffer to the request
+ * for this stream.
*/
- plane.length = 0;
- planes.push_back(std::move(plane));
+ FrameBuffer *frameBuffer = cameraStream->getBuffer();
+ buffer.internalBuffer = frameBuffer;
+
+ descriptor->request_->addBuffer(sourceStream->stream(),
+ frameBuffer, nullptr);
+
+ requestedStreams.insert(sourceStream);
+ }
+
+ /*
+ * Translate controls from Android to libcamera and queue the request
+ * to the camera.
+ */
+ int ret = processControls(descriptor.get());
+ if (ret)
+ return ret;
+
+ /*
+ * If flush is in progress set the request status to error and place it
+ * on the queue to be later completed. If the camera has been stopped we
+ * have to re-start it to be able to process the request.
+ */
+ MutexLocker stateLock(stateMutex_);
+
+ if (state_ == State::Flushing) {
+ Camera3RequestDescriptor *rawDescriptor = descriptor.get();
+ {
+ MutexLocker descriptorsLock(descriptorsMutex_);
+ descriptors_.push(std::move(descriptor));
+ }
+ abortRequest(rawDescriptor);
+ completeDescriptor(rawDescriptor);
+
+ return 0;
}
- FrameBuffer *buffer = new FrameBuffer(std::move(planes));
- if (!buffer) {
- LOG(HAL, Error) << "Failed to create buffer";
- delete descriptor;
- return -ENOMEM;
+ if (state_ == State::Stopped) {
+ lastSettings_ = {};
+
+ ret = camera_->start();
+ if (ret) {
+ LOG(HAL, Error) << "Failed to start camera";
+ return ret;
+ }
+
+ state_ = State::Running;
}
- Request *request =
- camera_->createRequest(reinterpret_cast<uint64_t>(descriptor));
- request->addBuffer(stream, buffer);
+ Request *request = descriptor->request_.get();
- int ret = camera_->queueRequest(request);
- if (ret) {
- LOG(HAL, Error) << "Failed to queue request";
- delete request;
- delete descriptor;
- return ret;
+ {
+ MutexLocker descriptorsLock(descriptorsMutex_);
+ descriptors_.push(std::move(descriptor));
}
+ camera_->queueRequest(request);
+
return 0;
}
void CameraDevice::requestComplete(Request *request)
{
- const std::map<Stream *, FrameBuffer *> &buffers = request->buffers();
- FrameBuffer *buffer = buffers.begin()->second;
- camera3_buffer_status status = CAMERA3_BUFFER_STATUS_OK;
- std::unique_ptr<CameraMetadata> resultMetadata;
+ Camera3RequestDescriptor *descriptor =
+ reinterpret_cast<Camera3RequestDescriptor *>(request->cookie());
+
+ /*
+ * Prepare the capture result for the Android camera stack.
+ *
+ * The buffer status is set to Success and later changed to Error if
+ * post-processing/compression fails.
+ */
+ for (auto &buffer : descriptor->buffers_) {
+ CameraStream *stream = buffer.stream;
+
+ /*
+ * Streams of type Direct have been queued to the
+ * libcamera::Camera and their acquire fences have
+ * already been waited on by the library.
+ *
+ * Acquire fences of streams of type Internal and Mapped
+ * will be handled during post-processing.
+ */
+ if (stream->type() == CameraStream::Type::Direct) {
+ /* If handling of the fence has failed restore buffer.fence. */
+ std::unique_ptr<Fence> fence = buffer.frameBuffer->releaseFence();
+ if (fence)
+ buffer.fence = fence->release();
+ }
+ buffer.status = Camera3RequestDescriptor::Status::Success;
+ }
+ /*
+ * If the Request has failed, abort the request by notifying the error
+ * and complete the request with all buffers in error state.
+ */
if (request->status() != Request::RequestComplete) {
- LOG(HAL, Error) << "Request not succesfully completed: "
+ LOG(HAL, Error) << "Request " << request->cookie()
+ << " not successfully completed: "
<< request->status();
- status = CAMERA3_BUFFER_STATUS_ERROR;
+
+ abortRequest(descriptor);
+ completeDescriptor(descriptor);
+
+ return;
}
- /* Prepare to call back the Android camera stack. */
- Camera3RequestDescriptor *descriptor =
- reinterpret_cast<Camera3RequestDescriptor *>(request->cookie());
+ /*
+ * Notify shutter as soon as we have verified we have a valid request.
+ *
+ * \todo The shutter event notification should be sent to the framework
+ * as soon as possible, earlier than request completion time.
+ */
+ uint64_t sensorTimestamp = static_cast<uint64_t>(request->metadata()
+ .get(controls::SensorTimestamp)
+ .value_or(0));
+ notifyShutter(descriptor->frameNumber_, sensorTimestamp);
+
+ LOG(HAL, Debug) << "Request " << request->cookie() << " completed with "
+ << descriptor->request_->buffers().size() << " streams";
+
+ /*
+ * Generate the metadata associated with the captured buffers.
+ *
+ * Notify if the metadata generation has failed, but continue processing
+ * buffers and return an empty metadata pack.
+ */
+ descriptor->resultMetadata_ = getResultMetadata(*descriptor);
+ if (!descriptor->resultMetadata_) {
+ notifyError(descriptor->frameNumber_, nullptr, CAMERA3_MSG_ERROR_RESULT);
- camera3_capture_result_t captureResult = {};
- captureResult.frame_number = descriptor->frameNumber;
- captureResult.num_output_buffers = descriptor->numBuffers;
- for (unsigned int i = 0; i < descriptor->numBuffers; ++i) {
/*
- * \todo Currently we only support one capture buffer. Prepare
- * all of them to be ready once we'll support more.
+ * The camera framework expects an empty metadata pack on error.
+ *
+ * \todo Check that the post-processor code handles this situation
+ * correctly.
*/
- descriptor->buffers[i].acquire_fence = -1;
- descriptor->buffers[i].release_fence = -1;
- descriptor->buffers[i].status = status;
+ descriptor->resultMetadata_ = std::make_unique<CameraMetadata>(0, 0);
}
- captureResult.output_buffers =
- const_cast<const camera3_stream_buffer_t *>(descriptor->buffers);
- if (status == CAMERA3_BUFFER_STATUS_OK) {
- notifyShutter(descriptor->frameNumber,
- buffer->metadata().timestamp);
+ /* Handle post-processing. */
+ MutexLocker locker(descriptor->streamsProcessMutex_);
- captureResult.partial_result = 1;
- resultMetadata = getResultMetadata(descriptor->frameNumber,
- buffer->metadata().timestamp);
- captureResult.result = resultMetadata->get();
+ /*
+ * Queue all the post-processing streams request at once. The completion
+ * slot streamProcessingComplete() can only execute when we are out
+ * this critical section. This helps to handle synchronous errors here
+ * itself.
+ */
+ auto iter = descriptor->pendingStreamsToProcess_.begin();
+ while (iter != descriptor->pendingStreamsToProcess_.end()) {
+ CameraStream *stream = iter->first;
+ Camera3RequestDescriptor::StreamBuffer *buffer = iter->second;
+
+ FrameBuffer *src = request->findBuffer(stream->stream());
+ if (!src) {
+ LOG(HAL, Error) << "Failed to find a source stream buffer";
+ setBufferStatus(*buffer, Camera3RequestDescriptor::Status::Error);
+ iter = descriptor->pendingStreamsToProcess_.erase(iter);
+ continue;
+ }
+
+ buffer->srcBuffer = src;
+
+ ++iter;
+ int ret = stream->process(buffer);
+ if (ret) {
+ setBufferStatus(*buffer, Camera3RequestDescriptor::Status::Error);
+ descriptor->pendingStreamsToProcess_.erase(stream);
+
+ /*
+ * If the framebuffer is internal to CameraStream return
+ * it back now that we're done processing it.
+ */
+ if (buffer->internalBuffer)
+ stream->putBuffer(buffer->internalBuffer);
+ }
}
- if (status == CAMERA3_BUFFER_STATUS_ERROR || !captureResult.result) {
- /* \todo Improve error handling. In case we notify an error
- * because the metadata generation fails, a shutter event has
- * already been notified for this frame number before the error
- * is here signalled. Make sure the error path plays well with
- * the camera stack state machine.
- */
- notifyError(descriptor->frameNumber,
- descriptor->buffers[0].stream);
+ if (descriptor->pendingStreamsToProcess_.empty()) {
+ locker.unlock();
+ completeDescriptor(descriptor);
}
+}
- callbacks_->process_capture_result(callbacks_, &captureResult);
+/**
+ * \brief Complete the Camera3RequestDescriptor
+ * \param[in] descriptor The Camera3RequestDescriptor that has completed
+ *
+ * The function marks the Camera3RequestDescriptor as 'complete'. It shall be
+ * called when all the streams in the Camera3RequestDescriptor have completed
+ * capture (or have been generated via post-processing) and the request is ready
+ * to be sent back to the framework.
+ *
+ * \context This function is \threadsafe.
+ */
+void CameraDevice::completeDescriptor(Camera3RequestDescriptor *descriptor)
+{
+ MutexLocker lock(descriptorsMutex_);
+ descriptor->complete_ = true;
- delete descriptor;
- delete buffer;
+ sendCaptureResults();
+}
+
+/**
+ * \brief Sequentially send capture results to the framework
+ *
+ * Iterate over the descriptors queue to send completed descriptors back to the
+ * framework, in the same order as they have been queued. For each complete
+ * descriptor, populate a locally-scoped camera3_capture_result_t from the
+ * descriptor, send the capture result back by calling the
+ * process_capture_result() callback, and remove the descriptor from the queue.
+ * Stop iterating if the descriptor at the front of the queue is not complete.
+ *
+ * This function should never be called directly in the codebase. Use
+ * completeDescriptor() instead.
+ */
+void CameraDevice::sendCaptureResults()
+{
+ while (!descriptors_.empty() && !descriptors_.front()->isPending()) {
+ auto descriptor = std::move(descriptors_.front());
+ descriptors_.pop();
+
+ camera3_capture_result_t captureResult = {};
+
+ captureResult.frame_number = descriptor->frameNumber_;
+
+ if (descriptor->resultMetadata_)
+ captureResult.result =
+ descriptor->resultMetadata_->getMetadata();
+
+ std::vector<camera3_stream_buffer_t> resultBuffers;
+ resultBuffers.reserve(descriptor->buffers_.size());
+
+ for (auto &buffer : descriptor->buffers_) {
+ camera3_buffer_status status = CAMERA3_BUFFER_STATUS_ERROR;
+
+ if (buffer.status == Camera3RequestDescriptor::Status::Success)
+ status = CAMERA3_BUFFER_STATUS_OK;
+
+ /*
+ * Pass the buffer fence back to the camera framework as
+ * a release fence. This instructs the framework to wait
+ * on the acquire fence in case we haven't done so
+ * ourselves for any reason.
+ */
+ resultBuffers.push_back({ buffer.stream->camera3Stream(),
+ buffer.camera3Buffer, status,
+ -1, buffer.fence.release() });
+ }
+
+ captureResult.num_output_buffers = resultBuffers.size();
+ captureResult.output_buffers = resultBuffers.data();
+
+ if (descriptor->status_ == Camera3RequestDescriptor::Status::Success)
+ captureResult.partial_result = 1;
+
+ callbacks_->process_capture_result(callbacks_, &captureResult);
+ }
+}
+
+void CameraDevice::setBufferStatus(Camera3RequestDescriptor::StreamBuffer &streamBuffer,
+ Camera3RequestDescriptor::Status status)
+{
+ streamBuffer.status = status;
+ if (status != Camera3RequestDescriptor::Status::Success) {
+ notifyError(streamBuffer.request->frameNumber_,
+ streamBuffer.stream->camera3Stream(),
+ CAMERA3_MSG_ERROR_BUFFER);
+
+ /* Also set error status on entire request descriptor. */
+ streamBuffer.request->status_ =
+ Camera3RequestDescriptor::Status::Error;
+ }
+}
+
+/**
+ * \brief Handle post-processing completion of a stream in a capture request
+ * \param[in] streamBuffer The StreamBuffer for which processing is complete
+ * \param[in] status Stream post-processing status
+ *
+ * This function is called from the post-processor's thread whenever a camera
+ * stream has finished post processing. The corresponding entry is dropped from
+ * the descriptor's pendingStreamsToProcess_ map.
+ *
+ * If the pendingStreamsToProcess_ map is then empty, all streams requiring to
+ * be generated from post-processing have been completed. Mark the descriptor as
+ * complete using completeDescriptor() in that case.
+ */
+void CameraDevice::streamProcessingComplete(Camera3RequestDescriptor::StreamBuffer *streamBuffer,
+ Camera3RequestDescriptor::Status status)
+{
+ setBufferStatus(*streamBuffer, status);
+
+ /*
+ * If the framebuffer is internal to CameraStream return it back now
+ * that we're done processing it.
+ */
+ if (streamBuffer->internalBuffer)
+ streamBuffer->stream->putBuffer(streamBuffer->internalBuffer);
+
+ Camera3RequestDescriptor *request = streamBuffer->request;
+
+ {
+ MutexLocker locker(request->streamsProcessMutex_);
+
+ request->pendingStreamsToProcess_.erase(streamBuffer->stream);
+ if (!request->pendingStreamsToProcess_.empty())
+ return;
+ }
+
+ completeDescriptor(streamBuffer->request);
+}
+
+std::string CameraDevice::logPrefix() const
+{
+ return "'" + camera_->id() + "'";
}
void CameraDevice::notifyShutter(uint32_t frameNumber, uint64_t timestamp)
@@ -874,14 +1404,15 @@ void CameraDevice::notifyShutter(uint32_t frameNumber, uint64_t timestamp)
callbacks_->notify(callbacks_, &notify);
}
-void CameraDevice::notifyError(uint32_t frameNumber, camera3_stream_t *stream)
+void CameraDevice::notifyError(uint32_t frameNumber, camera3_stream_t *stream,
+ camera3_error_msg_code code) const
{
camera3_notify_msg_t notify = {};
notify.type = CAMERA3_MSG_ERROR;
notify.message.error.error_stream = stream;
notify.message.error.frame_number = frameNumber;
- notify.message.error.error_code = CAMERA3_MSG_ERROR_REQUEST;
+ notify.message.error.error_code = code;
callbacks_->notify(callbacks_, &notify);
}
@@ -889,63 +1420,242 @@ void CameraDevice::notifyError(uint32_t frameNumber, camera3_stream_t *stream)
/*
* Produce a set of fixed result metadata.
*/
-std::unique_ptr<CameraMetadata> CameraDevice::getResultMetadata(int frame_number,
- int64_t timestamp)
+std::unique_ptr<CameraMetadata>
+CameraDevice::getResultMetadata(const Camera3RequestDescriptor &descriptor) const
{
+ const ControlList &metadata = descriptor.request_->metadata();
+ const CameraMetadata &settings = descriptor.settings_;
+ camera_metadata_ro_entry_t entry;
+ bool found;
+
/*
* \todo Keep this in sync with the actual number of entries.
- * Currently: 12 entries, 36 bytes
+ * Currently: 40 entries, 156 bytes
+ *
+ * Reserve more space for the JPEG metadata set by the post-processor.
+ * Currently:
+ * ANDROID_JPEG_GPS_COORDINATES (double x 3) = 24 bytes
+ * ANDROID_JPEG_GPS_PROCESSING_METHOD (byte x 32) = 32 bytes
+ * ANDROID_JPEG_GPS_TIMESTAMP (int64) = 8 bytes
+ * ANDROID_JPEG_SIZE (int32_t) = 4 bytes
+ * ANDROID_JPEG_QUALITY (byte) = 1 byte
+ * ANDROID_JPEG_ORIENTATION (int32_t) = 4 bytes
+ * ANDROID_JPEG_THUMBNAIL_QUALITY (byte) = 1 byte
+ * ANDROID_JPEG_THUMBNAIL_SIZE (int32 x 2) = 8 bytes
+ * Total bytes for JPEG metadata: 82
*/
std::unique_ptr<CameraMetadata> resultMetadata =
- std::make_unique<CameraMetadata>(15, 50);
+ std::make_unique<CameraMetadata>(88, 166);
if (!resultMetadata->isValid()) {
- LOG(HAL, Error) << "Failed to allocate static metadata";
+ LOG(HAL, Error) << "Failed to allocate result metadata";
return nullptr;
}
- const uint8_t ae_state = ANDROID_CONTROL_AE_STATE_CONVERGED;
- resultMetadata->addEntry(ANDROID_CONTROL_AE_STATE, &ae_state, 1);
+ /*
+ * \todo The value of the results metadata copied from the settings
+ * will have to be passed to the libcamera::Camera and extracted
+ * from libcamera::Request::metadata.
+ */
- const uint8_t ae_lock = ANDROID_CONTROL_AE_LOCK_OFF;
- resultMetadata->addEntry(ANDROID_CONTROL_AE_LOCK, &ae_lock, 1);
+ uint8_t value = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
+ value);
- uint8_t af_state = ANDROID_CONTROL_AF_STATE_INACTIVE;
- resultMetadata->addEntry(ANDROID_CONTROL_AF_STATE, &af_state, 1);
+ value = ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_ANTIBANDING_MODE, value);
- const uint8_t awb_state = ANDROID_CONTROL_AWB_STATE_CONVERGED;
- resultMetadata->addEntry(ANDROID_CONTROL_AWB_STATE, &awb_state, 1);
+ int32_t value32 = 0;
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
+ value32);
- const uint8_t awb_lock = ANDROID_CONTROL_AWB_LOCK_OFF;
- resultMetadata->addEntry(ANDROID_CONTROL_AWB_LOCK, &awb_lock, 1);
+ value = ANDROID_CONTROL_AE_LOCK_OFF;
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_LOCK, value);
- const uint8_t lens_state = ANDROID_LENS_STATE_STATIONARY;
- resultMetadata->addEntry(ANDROID_LENS_STATE, &lens_state, 1);
+ value = ANDROID_CONTROL_AE_MODE_ON;
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_MODE, value);
- int32_t sensorSizes[] = {
- 0, 0, 2560, 1920,
- };
- resultMetadata->addEntry(ANDROID_SCALER_CROP_REGION, sensorSizes, 4);
+ if (settings.getEntry(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, &entry))
+ /*
+ * \todo Retrieve the AE FPS range from the libcamera metadata.
+ * As libcamera does not support that control, as a temporary
+ * workaround return what the framework asked.
+ */
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+ entry.data.i32, 2);
+
+ found = settings.getEntry(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, &entry);
+ value = found ? *entry.data.u8 :
+ (uint8_t)ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE;
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, value);
+
+ value = ANDROID_CONTROL_AE_STATE_CONVERGED;
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_STATE, value);
+
+ value = ANDROID_CONTROL_AF_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_CONTROL_AF_MODE, value);
+
+ value = ANDROID_CONTROL_AF_STATE_INACTIVE;
+ resultMetadata->addEntry(ANDROID_CONTROL_AF_STATE, value);
+
+ value = ANDROID_CONTROL_AF_TRIGGER_IDLE;
+ resultMetadata->addEntry(ANDROID_CONTROL_AF_TRIGGER, value);
+
+ value = ANDROID_CONTROL_AWB_MODE_AUTO;
+ resultMetadata->addEntry(ANDROID_CONTROL_AWB_MODE, value);
+
+ value = ANDROID_CONTROL_AWB_LOCK_OFF;
+ resultMetadata->addEntry(ANDROID_CONTROL_AWB_LOCK, value);
+
+ value = ANDROID_CONTROL_AWB_STATE_CONVERGED;
+ resultMetadata->addEntry(ANDROID_CONTROL_AWB_STATE, value);
+
+ value = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+ resultMetadata->addEntry(ANDROID_CONTROL_CAPTURE_INTENT, value);
+
+ value = ANDROID_CONTROL_EFFECT_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_CONTROL_EFFECT_MODE, value);
+
+ value = ANDROID_CONTROL_MODE_AUTO;
+ resultMetadata->addEntry(ANDROID_CONTROL_MODE, value);
- resultMetadata->addEntry(ANDROID_SENSOR_TIMESTAMP, &timestamp, 1);
+ value = ANDROID_CONTROL_SCENE_MODE_DISABLED;
+ resultMetadata->addEntry(ANDROID_CONTROL_SCENE_MODE, value);
+
+ value = ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, value);
+
+ value = ANDROID_FLASH_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_FLASH_MODE, value);
+
+ value = ANDROID_FLASH_STATE_UNAVAILABLE;
+ resultMetadata->addEntry(ANDROID_FLASH_STATE, value);
+
+ if (settings.getEntry(ANDROID_LENS_APERTURE, &entry))
+ resultMetadata->addEntry(ANDROID_LENS_APERTURE, entry.data.f, 1);
+
+ float focal_length = 1.0;
+ resultMetadata->addEntry(ANDROID_LENS_FOCAL_LENGTH, focal_length);
+
+ value = ANDROID_LENS_STATE_STATIONARY;
+ resultMetadata->addEntry(ANDROID_LENS_STATE, value);
+
+ value = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
+ value);
+
+ value32 = ANDROID_SENSOR_TEST_PATTERN_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_SENSOR_TEST_PATTERN_MODE, value32);
+
+ if (settings.getEntry(ANDROID_STATISTICS_FACE_DETECT_MODE, &entry))
+ resultMetadata->addEntry(ANDROID_STATISTICS_FACE_DETECT_MODE,
+ entry.data.u8, 1);
+
+ value = ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE,
+ value);
+
+ value = ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE, value);
+
+ value = ANDROID_STATISTICS_SCENE_FLICKER_NONE;
+ resultMetadata->addEntry(ANDROID_STATISTICS_SCENE_FLICKER, value);
+
+ value = ANDROID_NOISE_REDUCTION_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_NOISE_REDUCTION_MODE, value);
/* 33.3 msec */
const int64_t rolling_shutter_skew = 33300000;
resultMetadata->addEntry(ANDROID_SENSOR_ROLLING_SHUTTER_SKEW,
- &rolling_shutter_skew, 1);
+ rolling_shutter_skew);
+
+ /* Add metadata tags reported by libcamera. */
+ const int64_t timestamp = metadata.get(controls::SensorTimestamp).value_or(0);
+ resultMetadata->addEntry(ANDROID_SENSOR_TIMESTAMP, timestamp);
+
+ const auto &pipelineDepth = metadata.get(controls::draft::PipelineDepth);
+ if (pipelineDepth)
+ resultMetadata->addEntry(ANDROID_REQUEST_PIPELINE_DEPTH,
+ *pipelineDepth);
+
+ const auto &exposureTime = metadata.get(controls::ExposureTime);
+ if (exposureTime)
+ resultMetadata->addEntry(ANDROID_SENSOR_EXPOSURE_TIME,
+ *exposureTime * 1000ULL);
+
+ const auto &frameDuration = metadata.get(controls::FrameDuration);
+ if (frameDuration)
+ resultMetadata->addEntry(ANDROID_SENSOR_FRAME_DURATION,
+ *frameDuration * 1000);
+
+ const auto &faceDetectRectangles =
+ metadata.get(controls::draft::FaceDetectFaceRectangles);
+ if (faceDetectRectangles) {
+ std::vector<int32_t> flatRectangles;
+ for (const Rectangle &rect : *faceDetectRectangles) {
+ flatRectangles.push_back(rect.x);
+ flatRectangles.push_back(rect.y);
+ flatRectangles.push_back(rect.x + rect.width);
+ flatRectangles.push_back(rect.y + rect.height);
+ }
+ resultMetadata->addEntry(
+ ANDROID_STATISTICS_FACE_RECTANGLES, flatRectangles);
+ }
- /* 16.6 msec */
- const int64_t exposure_time = 16600000;
- resultMetadata->addEntry(ANDROID_SENSOR_EXPOSURE_TIME,
- &exposure_time, 1);
+ const auto &faceDetectFaceScores =
+ metadata.get(controls::draft::FaceDetectFaceScores);
+ if (faceDetectRectangles && faceDetectFaceScores) {
+ if (faceDetectFaceScores->size() != faceDetectRectangles->size()) {
+ LOG(HAL, Error) << "Pipeline returned wrong number of face scores; "
+ << "Expected: " << faceDetectRectangles->size()
+ << ", got: " << faceDetectFaceScores->size();
+ }
+ resultMetadata->addEntry(ANDROID_STATISTICS_FACE_SCORES,
+ *faceDetectFaceScores);
+ }
- const uint8_t lens_shading_map_mode =
- ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF;
- resultMetadata->addEntry(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE,
- &lens_shading_map_mode, 1);
+ const auto &faceDetectFaceLandmarks =
+ metadata.get(controls::draft::FaceDetectFaceLandmarks);
+ if (faceDetectRectangles && faceDetectFaceLandmarks) {
+ size_t expectedLandmarks = faceDetectRectangles->size() * 3;
+ if (faceDetectFaceLandmarks->size() != expectedLandmarks) {
+ LOG(HAL, Error) << "Pipeline returned wrong number of face landmarks; "
+ << "Expected: " << expectedLandmarks
+ << ", got: " << faceDetectFaceLandmarks->size();
+ }
- const uint8_t scene_flicker = ANDROID_STATISTICS_SCENE_FLICKER_NONE;
- resultMetadata->addEntry(ANDROID_STATISTICS_SCENE_FLICKER,
- &scene_flicker, 1);
+ std::vector<int32_t> androidLandmarks;
+ for (const Point &landmark : *faceDetectFaceLandmarks) {
+ androidLandmarks.push_back(landmark.x);
+ androidLandmarks.push_back(landmark.y);
+ }
+ resultMetadata->addEntry(
+ ANDROID_STATISTICS_FACE_LANDMARKS, androidLandmarks);
+ }
+
+ const auto &faceDetectFaceIds = metadata.get(controls::draft::FaceDetectFaceIds);
+ if (faceDetectRectangles && faceDetectFaceIds) {
+ if (faceDetectFaceIds->size() != faceDetectRectangles->size()) {
+ LOG(HAL, Error) << "Pipeline returned wrong number of face ids; "
+ << "Expected: " << faceDetectRectangles->size()
+ << ", got: " << faceDetectFaceIds->size();
+ }
+ resultMetadata->addEntry(ANDROID_STATISTICS_FACE_IDS, *faceDetectFaceIds);
+ }
+
+ const auto &scalerCrop = metadata.get(controls::ScalerCrop);
+ if (scalerCrop) {
+ const Rectangle &crop = *scalerCrop;
+ int32_t cropRect[] = {
+ crop.x, crop.y, static_cast<int32_t>(crop.width),
+ static_cast<int32_t>(crop.height),
+ };
+ resultMetadata->addEntry(ANDROID_SCALER_CROP_REGION, cropRect);
+ }
+
+ const auto &testPatternMode = metadata.get(controls::draft::TestPatternMode);
+ if (testPatternMode)
+ resultMetadata->addEntry(ANDROID_SENSOR_TEST_PATTERN_MODE,
+ *testPatternMode);
/*
* Return the result metadata pack even is not valid: get() will return
@@ -955,5 +1665,12 @@ std::unique_ptr<CameraMetadata> CameraDevice::getResultMetadata(int frame_number
LOG(HAL, Error) << "Failed to construct result metadata";
}
+ if (resultMetadata->resized()) {
+ auto [entryCount, dataCount] = resultMetadata->usage();
+ LOG(HAL, Info)
+ << "Result metadata resized: " << entryCount
+ << " entries and " << dataCount << " bytes used";
+ }
+
return resultMetadata;
}
diff --git a/src/android/camera_device.h b/src/android/camera_device.h
index 55eac317..194ca303 100644
--- a/src/android/camera_device.h
+++ b/src/android/camera_device.h
@@ -2,35 +2,62 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_device.h - libcamera Android Camera Device
+ * libcamera Android Camera Device
*/
-#ifndef __ANDROID_CAMERA_DEVICE_H__
-#define __ANDROID_CAMERA_DEVICE_H__
+#pragma once
+
+#include <map>
#include <memory>
+#include <queue>
+#include <vector>
#include <hardware/camera3.h>
-#include <libcamera/buffer.h>
+#include <libcamera/base/class.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/message.h>
+#include <libcamera/base/mutex.h>
+
#include <libcamera/camera.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+#include <libcamera/pixel_format.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
-#include "message.h"
+#include "camera_capabilities.h"
+#include "camera_metadata.h"
+#include "camera_stream.h"
+#include "hal_framebuffer.h"
+#include "jpeg/encoder.h"
-class CameraMetadata;
+class Camera3RequestDescriptor;
+struct CameraConfigData;
-class CameraDevice
+class CameraDevice : protected libcamera::Loggable
{
public:
- CameraDevice(unsigned int id, const std::shared_ptr<libcamera::Camera> &camera);
+ static std::unique_ptr<CameraDevice> create(unsigned int id,
+ std::shared_ptr<libcamera::Camera> cam);
~CameraDevice();
+ int initialize(const CameraConfigData *cameraConfigData);
+
int open(const hw_module_t *hardwareModule);
void close();
+ void flush();
unsigned int id() const { return id_; }
camera3_device_t *camera3Device() { return &camera3Device_; }
+ const CameraCapabilities *capabilities() const { return &capabilities_; }
+ const std::shared_ptr<libcamera::Camera> &camera() const { return camera_; }
+
+ const std::string &maker() const { return maker_; }
+ const std::string &model() const { return model_; }
+ int facing() const { return facing_; }
+ int orientation() const { return orientation_; }
+ unsigned int maxJpegBufferSize() const;
void setCallbacks(const camera3_callback_ops_t *callbacks);
const camera_metadata_t *getStaticMetadata();
@@ -38,33 +65,67 @@ public:
int configureStreams(camera3_stream_configuration_t *stream_list);
int processCaptureRequest(camera3_capture_request_t *request);
void requestComplete(libcamera::Request *request);
+ void streamProcessingComplete(Camera3RequestDescriptor::StreamBuffer *bufferStream,
+ Camera3RequestDescriptor::Status status);
+
+protected:
+ std::string logPrefix() const override;
private:
- struct Camera3RequestDescriptor {
- Camera3RequestDescriptor(unsigned int frameNumber,
- unsigned int numBuffers);
- ~Camera3RequestDescriptor();
-
- uint32_t frameNumber;
- uint32_t numBuffers;
- camera3_stream_buffer_t *buffers;
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraDevice)
+
+ CameraDevice(unsigned int id, std::shared_ptr<libcamera::Camera> camera);
+
+ enum class State {
+ Stopped,
+ Flushing,
+ Running,
};
+ void stop() LIBCAMERA_TSA_EXCLUDES(stateMutex_);
+
+ std::unique_ptr<HALFrameBuffer>
+ createFrameBuffer(const buffer_handle_t camera3buffer,
+ libcamera::PixelFormat pixelFormat,
+ const libcamera::Size &size);
+ void abortRequest(Camera3RequestDescriptor *descriptor) const;
+ bool isValidRequest(camera3_capture_request_t *request) const;
void notifyShutter(uint32_t frameNumber, uint64_t timestamp);
- void notifyError(uint32_t frameNumber, camera3_stream_t *stream);
- std::unique_ptr<CameraMetadata> getResultMetadata(int frame_number,
- int64_t timestamp);
+ void notifyError(uint32_t frameNumber, camera3_stream_t *stream,
+ camera3_error_msg_code code) const;
+ int processControls(Camera3RequestDescriptor *descriptor);
+ void completeDescriptor(Camera3RequestDescriptor *descriptor)
+ LIBCAMERA_TSA_EXCLUDES(descriptorsMutex_);
+ void sendCaptureResults() LIBCAMERA_TSA_REQUIRES(descriptorsMutex_);
+ void setBufferStatus(Camera3RequestDescriptor::StreamBuffer &buffer,
+ Camera3RequestDescriptor::Status status);
+ std::unique_ptr<CameraMetadata> getResultMetadata(
+ const Camera3RequestDescriptor &descriptor) const;
unsigned int id_;
camera3_device_t camera3Device_;
- bool running_;
+ libcamera::Mutex stateMutex_; /* Protects access to the camera state. */
+ State state_ LIBCAMERA_TSA_GUARDED_BY(stateMutex_);
+
std::shared_ptr<libcamera::Camera> camera_;
std::unique_ptr<libcamera::CameraConfiguration> config_;
+ CameraCapabilities capabilities_;
- CameraMetadata *staticMetadata_;
- std::map<unsigned int, CameraMetadata *> requestTemplates_;
+ std::map<unsigned int, std::unique_ptr<CameraMetadata>> requestTemplates_;
const camera3_callback_ops_t *callbacks_;
-};
-#endif /* __ANDROID_CAMERA_DEVICE_H__ */
+ std::vector<CameraStream> streams_;
+
+ libcamera::Mutex descriptorsMutex_ LIBCAMERA_TSA_ACQUIRED_AFTER(stateMutex_);
+ std::queue<std::unique_ptr<Camera3RequestDescriptor>> descriptors_
+ LIBCAMERA_TSA_GUARDED_BY(descriptorsMutex_);
+
+ std::string maker_;
+ std::string model_;
+
+ int facing_;
+ int orientation_;
+
+ CameraMetadata lastSettings_;
+};
diff --git a/src/android/camera_hal_config.cpp b/src/android/camera_hal_config.cpp
new file mode 100644
index 00000000..7ef451ef
--- /dev/null
+++ b/src/android/camera_hal_config.cpp
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Camera HAL configuration file manager
+ */
+#include "camera_hal_config.h"
+
+#include <stdlib.h>
+#include <string>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include <hardware/camera3.h>
+
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(HALConfig)
+
+class CameraHalConfig::Private : public Extensible::Private
+{
+ LIBCAMERA_DECLARE_PUBLIC(CameraHalConfig)
+
+public:
+ Private();
+
+ int parseConfigFile(File &file, std::map<std::string, CameraConfigData> *cameras);
+
+private:
+ int parseCameraConfigData(const std::string &cameraId, const YamlObject &);
+ int parseLocation(const YamlObject &, CameraConfigData &cameraConfigData);
+ int parseRotation(const YamlObject &, CameraConfigData &cameraConfigData);
+
+ std::map<std::string, CameraConfigData> *cameras_;
+};
+
+CameraHalConfig::Private::Private()
+{
+}
+
+int CameraHalConfig::Private::parseConfigFile(File &file,
+ std::map<std::string, CameraConfigData> *cameras)
+{
+ /*
+ * Parse the HAL properties.
+ *
+ * Each camera properties block is a list of properties associated
+ * with the ID (as assembled by CameraSensor::generateId()) of the
+ * camera they refer to.
+ *
+ * cameras:
+ * "camera0 id":
+ * location: value
+ * rotation: value
+ * ...
+ *
+ * "camera1 id":
+ * location: value
+ * rotation: value
+ * ...
+ */
+
+ cameras_ = cameras;
+
+ std::unique_ptr<YamlObject> root = YamlParser::parse(file);
+ if (!root)
+ return -EINVAL;
+
+ if (!root->isDictionary())
+ return -EINVAL;
+
+ /* Parse property "cameras" */
+ if (!root->contains("cameras"))
+ return -EINVAL;
+
+ const YamlObject &yamlObjectCameras = (*root)["cameras"];
+
+ if (!yamlObjectCameras.isDictionary())
+ return -EINVAL;
+
+ for (const auto &[cameraId, configData] : yamlObjectCameras.asDict()) {
+ if (parseCameraConfigData(cameraId, configData))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int CameraHalConfig::Private::parseCameraConfigData(const std::string &cameraId,
+ const YamlObject &cameraObject)
+
+{
+ if (!cameraObject.isDictionary())
+ return -EINVAL;
+
+ CameraConfigData &cameraConfigData = (*cameras_)[cameraId];
+
+ /* Parse property "location" */
+ if (parseLocation(cameraObject, cameraConfigData))
+ return -EINVAL;
+
+ /* Parse property "rotation" */
+ if (parseRotation(cameraObject, cameraConfigData))
+ return -EINVAL;
+
+ return 0;
+}
+
+int CameraHalConfig::Private::parseLocation(const YamlObject &cameraObject,
+ CameraConfigData &cameraConfigData)
+{
+ if (!cameraObject.contains("location"))
+ return -EINVAL;
+
+ std::string location = cameraObject["location"].get<std::string>("");
+
+ if (location == "front")
+ cameraConfigData.facing = CAMERA_FACING_FRONT;
+ else if (location == "back")
+ cameraConfigData.facing = CAMERA_FACING_BACK;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+int CameraHalConfig::Private::parseRotation(const YamlObject &cameraObject,
+ CameraConfigData &cameraConfigData)
+{
+ if (!cameraObject.contains("rotation"))
+ return -EINVAL;
+
+ int32_t rotation = cameraObject["rotation"].get<int32_t>(-1);
+
+ if (rotation < 0 || rotation >= 360) {
+ LOG(HALConfig, Error)
+ << "Unknown rotation: " << rotation;
+ return -EINVAL;
+ }
+
+ cameraConfigData.rotation = rotation;
+ return 0;
+}
+
+CameraHalConfig::CameraHalConfig()
+ : Extensible(std::make_unique<Private>()), exists_(false), valid_(false)
+{
+ parseConfigurationFile();
+}
+
+/*
+ * Open the HAL configuration file and validate its content.
+ * Return 0 on success, a negative error code otherwise
+ * retval -ENOENT The configuration file is not available
+ * retval -EINVAL The configuration file is available but not valid
+ */
+int CameraHalConfig::parseConfigurationFile()
+{
+ std::string filePath = LIBCAMERA_SYSCONF_DIR "/camera_hal.yaml";
+
+ File file(filePath);
+ if (!file.exists()) {
+ LOG(HALConfig, Debug)
+ << "Configuration file: \"" << filePath << "\" not found";
+ return -ENOENT;
+ }
+
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ int ret = file.error();
+ LOG(HALConfig, Error) << "Failed to open configuration file "
+ << filePath << ": " << strerror(-ret);
+ return ret;
+ }
+
+ exists_ = true;
+
+ int ret = _d()->parseConfigFile(file, &cameras_);
+ if (ret)
+ return -EINVAL;
+
+ valid_ = true;
+
+ for (const auto &c : cameras_) {
+ const std::string &cameraId = c.first;
+ const CameraConfigData &camera = c.second;
+ LOG(HALConfig, Debug) << "'" << cameraId << "' "
+ << "(" << camera.facing << ")["
+ << camera.rotation << "]";
+ }
+
+ return 0;
+}
+
+const CameraConfigData *CameraHalConfig::cameraConfigData(const std::string &cameraId) const
+{
+ const auto &it = cameras_.find(cameraId);
+ if (it == cameras_.end()) {
+ LOG(HALConfig, Error)
+ << "Camera '" << cameraId
+ << "' not described in the HAL configuration file";
+ return nullptr;
+ }
+
+ return &it->second;
+}
diff --git a/src/android/camera_hal_config.h b/src/android/camera_hal_config.h
new file mode 100644
index 00000000..a4bedb6e
--- /dev/null
+++ b/src/android/camera_hal_config.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Camera HAL configuration file manager
+ */
+
+#pragma once
+
+#include <map>
+#include <string>
+
+#include <libcamera/base/class.h>
+
+struct CameraConfigData {
+ int facing = -1;
+ int rotation = -1;
+};
+
+class CameraHalConfig final : public libcamera::Extensible
+{
+ LIBCAMERA_DECLARE_PRIVATE()
+
+public:
+ CameraHalConfig();
+
+ bool exists() const { return exists_; }
+ bool isValid() const { return valid_; }
+
+ const CameraConfigData *cameraConfigData(const std::string &cameraId) const;
+
+private:
+ bool exists_;
+ bool valid_;
+ std::map<std::string, CameraConfigData> cameras_;
+
+ int parseConfigurationFile();
+};
diff --git a/src/android/camera_hal_manager.cpp b/src/android/camera_hal_manager.cpp
index 5bd3bdba..7500c749 100644
--- a/src/android/camera_hal_manager.cpp
+++ b/src/android/camera_hal_manager.cpp
@@ -2,20 +2,21 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_hal_manager.cpp - libcamera Android Camera Manager
+ * libcamera Android Camera Manager
*/
#include "camera_hal_manager.h"
-#include <libcamera/camera.h>
+#include <libcamera/base/log.h>
-#include "log.h"
+#include <libcamera/camera.h>
+#include <libcamera/property_ids.h>
#include "camera_device.h"
using namespace libcamera;
-LOG_DECLARE_CATEGORY(HAL);
+LOG_DECLARE_CATEGORY(HAL)
/*
* \class CameraHalManager
@@ -28,71 +29,224 @@ LOG_DECLARE_CATEGORY(HAL);
*/
CameraHalManager::CameraHalManager()
- : cameraManager_(nullptr)
+ : cameraManager_(nullptr), callbacks_(nullptr), numInternalCameras_(0),
+ nextExternalCameraId_(firstExternalCameraId_)
{
}
-CameraHalManager::~CameraHalManager()
-{
- cameras_.clear();
+/* CameraManager calls stop() in the destructor. */
+CameraHalManager::~CameraHalManager() = default;
- if (cameraManager_) {
- cameraManager_->stop();
- delete cameraManager_;
- cameraManager_ = nullptr;
- }
+/* static */
+CameraHalManager *CameraHalManager::instance()
+{
+ static CameraHalManager *cameraHalManager = new CameraHalManager;
+ return cameraHalManager;
}
int CameraHalManager::init()
{
- cameraManager_ = new CameraManager();
+ cameraManager_ = std::make_unique<CameraManager>();
+
+ /*
+ * If the configuration file is not available the HAL only supports
+ * external cameras. If it exists but it's not valid then error out.
+ */
+ if (halConfig_.exists() && !halConfig_.isValid()) {
+ LOG(HAL, Error) << "HAL configuration file is not valid";
+ return -EINVAL;
+ }
+
+ /* Support camera hotplug. */
+ cameraManager_->cameraAdded.connect(this, &CameraHalManager::cameraAdded);
+ cameraManager_->cameraRemoved.connect(this, &CameraHalManager::cameraRemoved);
int ret = cameraManager_->start();
if (ret) {
LOG(HAL, Error) << "Failed to start camera manager: "
<< strerror(-ret);
- delete cameraManager_;
- cameraManager_ = nullptr;
+ cameraManager_.reset();
return ret;
}
+ return 0;
+}
+
+std::tuple<CameraDevice *, int>
+CameraHalManager::open(unsigned int id, const hw_module_t *hardwareModule)
+{
+ MutexLocker locker(mutex_);
+
+ if (!callbacks_) {
+ LOG(HAL, Error) << "Can't open camera before callbacks are set";
+ return { nullptr, -ENODEV };
+ }
+
+ CameraDevice *camera = cameraDeviceFromHalId(id);
+ if (!camera) {
+ LOG(HAL, Error) << "Invalid camera id '" << id << "'";
+ return { nullptr, -ENODEV };
+ }
+
+ int ret = camera->open(hardwareModule);
+ if (ret)
+ return { nullptr, ret };
+
+ LOG(HAL, Info) << "Open camera '" << id << "'";
+
+ return { camera, 0 };
+}
+
+void CameraHalManager::cameraAdded(std::shared_ptr<Camera> cam)
+{
+ unsigned int id;
+ bool isCameraExternal = false;
+ bool isCameraNew = false;
+
+ MutexLocker locker(mutex_);
+
/*
- * For each Camera registered in the system, a CameraDevice
- * gets created here to wraps a libcamera Camera instance.
+ * Each camera is assigned a unique integer ID when it is seen for the
+ * first time. If the camera has been seen before, the previous ID is
+ * re-used.
*
- * \todo Support camera hotplug.
+ * IDs starts from '0' for internal cameras and '1000' for external
+ * cameras.
*/
- unsigned int index = 0;
- for (auto &cam : cameraManager_->cameras()) {
- CameraDevice *camera = new CameraDevice(index, cam);
- cameras_.emplace_back(camera);
+ auto iter = cameraIdsMap_.find(cam->id());
+ if (iter != cameraIdsMap_.end()) {
+ id = iter->second;
+ if (id >= firstExternalCameraId_)
+ isCameraExternal = true;
+ } else {
+ isCameraNew = true;
- ++index;
+ /*
+ * Now check if this is an external camera and assign
+ * its id accordingly.
+ */
+ if (cameraLocation(cam.get()) == properties::CameraLocationExternal) {
+ isCameraExternal = true;
+ id = nextExternalCameraId_;
+ } else {
+ id = numInternalCameras_;
+ }
}
- return 0;
+ /*
+ * The configuration file must be valid, and contain a corresponding
+ * entry for internal cameras. External cameras can be initialized
+ * without configuration file.
+ */
+ if (!isCameraExternal && !halConfig_.exists()) {
+ LOG(HAL, Error)
+ << "HAL configuration file is mandatory for internal cameras."
+ << " Camera " << cam->id() << " failed to load";
+ return;
+ }
+
+ const CameraConfigData *cameraConfigData = halConfig_.cameraConfigData(cam->id());
+
+ /*
+ * Some cameras whose location is reported by libcamera as external may
+ * actually be internal to the device. This is common with UVC cameras
+ * that are integrated in a laptop. In that case the real location
+ * should be specified in the configuration file.
+ *
+ * If the camera location is external and a configuration entry exists
+ * for it, override its location.
+ */
+ if (isCameraNew && isCameraExternal) {
+ if (cameraConfigData && cameraConfigData->facing != -1) {
+ isCameraExternal = false;
+ id = numInternalCameras_;
+ }
+ }
+
+ if (!isCameraExternal && !cameraConfigData) {
+ LOG(HAL, Error)
+ << "HAL configuration entry for internal camera "
+ << cam->id() << " is missing";
+ return;
+ }
+
+ /* Create a CameraDevice instance to wrap the libcamera Camera. */
+ std::unique_ptr<CameraDevice> camera = CameraDevice::create(id, cam);
+
+ int ret = camera->initialize(cameraConfigData);
+ if (ret) {
+ LOG(HAL, Error) << "Failed to initialize camera: " << cam->id();
+ return;
+ }
+
+ if (isCameraNew) {
+ cameraIdsMap_.emplace(cam->id(), id);
+
+ if (isCameraExternal)
+ nextExternalCameraId_++;
+ else
+ numInternalCameras_++;
+ }
+
+ cameras_.emplace_back(std::move(camera));
+
+ if (callbacks_)
+ callbacks_->camera_device_status_change(callbacks_, id,
+ CAMERA_DEVICE_STATUS_PRESENT);
+
+ LOG(HAL, Debug) << "Camera ID: " << id << " added successfully.";
}
-CameraDevice *CameraHalManager::open(unsigned int id,
- const hw_module_t *hardwareModule)
+void CameraHalManager::cameraRemoved(std::shared_ptr<Camera> cam)
{
- if (id >= numCameras()) {
- LOG(HAL, Error) << "Invalid camera id '" << id << "'";
- return nullptr;
- }
+ MutexLocker locker(mutex_);
- CameraDevice *camera = cameras_[id].get();
- if (camera->open(hardwareModule))
- return nullptr;
+ auto iter = std::find_if(cameras_.begin(), cameras_.end(),
+ [&cam](const std::unique_ptr<CameraDevice> &camera) {
+ return cam == camera->camera();
+ });
+ if (iter == cameras_.end())
+ return;
- LOG(HAL, Info) << "Open camera '" << id << "'";
+ /*
+ * CAMERA_DEVICE_STATUS_NOT_PRESENT should be set for external cameras
+ * only.
+ */
+ unsigned int id = (*iter)->id();
+ if (id >= firstExternalCameraId_)
+ callbacks_->camera_device_status_change(callbacks_, id,
+ CAMERA_DEVICE_STATUS_NOT_PRESENT);
+
+ /*
+ * \todo Check if the camera is already open and running.
+ * Inform the framework about its absence before deleting its
+ * reference here.
+ */
+ cameras_.erase(iter);
+
+ LOG(HAL, Debug) << "Camera ID: " << id << " removed successfully.";
+}
+
+int32_t CameraHalManager::cameraLocation(const Camera *cam)
+{
+ return cam->properties().get(properties::Location).value_or(-1);
+}
- return camera;
+CameraDevice *CameraHalManager::cameraDeviceFromHalId(unsigned int id)
+{
+ auto iter = std::find_if(cameras_.begin(), cameras_.end(),
+ [id](const std::unique_ptr<CameraDevice> &camera) {
+ return camera->id() == id;
+ });
+ if (iter == cameras_.end())
+ return nullptr;
+
+ return iter->get();
}
unsigned int CameraHalManager::numCameras() const
{
- return cameraManager_->cameras().size();
+ return numInternalCameras_;
}
int CameraHalManager::getCameraInfo(unsigned int id, struct camera_info *info)
@@ -100,17 +254,17 @@ int CameraHalManager::getCameraInfo(unsigned int id, struct camera_info *info)
if (!info)
return -EINVAL;
- if (id >= numCameras()) {
+ MutexLocker locker(mutex_);
+
+ CameraDevice *camera = cameraDeviceFromHalId(id);
+ if (!camera) {
LOG(HAL, Error) << "Invalid camera id '" << id << "'";
return -EINVAL;
}
- CameraDevice *camera = cameras_[id].get();
-
- /* \todo Get these info dynamically inspecting the camera module. */
- info->facing = id ? CAMERA_FACING_FRONT : CAMERA_FACING_BACK;
- info->orientation = 0;
- info->device_version = 0;
+ info->facing = camera->facing();
+ info->orientation = camera->orientation();
+ info->device_version = CAMERA_DEVICE_API_VERSION_3_3;
info->resource_cost = 0;
info->static_camera_characteristics = camera->getStaticMetadata();
info->conflicting_devices = nullptr;
@@ -118,3 +272,25 @@ int CameraHalManager::getCameraInfo(unsigned int id, struct camera_info *info)
return 0;
}
+
+void CameraHalManager::setCallbacks(const camera_module_callbacks_t *callbacks)
+{
+ callbacks_ = callbacks;
+
+ MutexLocker locker(mutex_);
+
+ /*
+ * Some external cameras may have been identified before the callbacks_
+ * were set. Iterate all existing external cameras and mark them as
+ * CAMERA_DEVICE_STATUS_PRESENT explicitly.
+ *
+ * Internal cameras are already assumed to be present at module load
+ * time by the Android framework.
+ */
+ for (const std::unique_ptr<CameraDevice> &camera : cameras_) {
+ unsigned int id = camera->id();
+ if (id >= firstExternalCameraId_)
+ callbacks_->camera_device_status_change(callbacks_, id,
+ CAMERA_DEVICE_STATUS_PRESENT);
+ }
+}
diff --git a/src/android/camera_hal_manager.h b/src/android/camera_hal_manager.h
index 94d8f005..836a8daf 100644
--- a/src/android/camera_hal_manager.h
+++ b/src/android/camera_hal_manager.h
@@ -2,40 +2,67 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_hal_manager.h - libcamera Android Camera Manager
+ * libcamera Android Camera Manager
*/
-#ifndef __ANDROID_CAMERA_MANAGER_H__
-#define __ANDROID_CAMERA_MANAGER_H__
+#pragma once
+
+#include <map>
#include <stddef.h>
+#include <tuple>
#include <vector>
+#include <hardware/camera_common.h>
#include <hardware/hardware.h>
#include <system/camera_metadata.h>
+#include <libcamera/base/class.h>
+#include <libcamera/base/mutex.h>
+
#include <libcamera/camera_manager.h>
+#include "camera_hal_config.h"
+
class CameraDevice;
class CameraHalManager
{
public:
- CameraHalManager();
~CameraHalManager();
+ static CameraHalManager *instance();
+
int init();
- CameraDevice *open(unsigned int id, const hw_module_t *module);
+ std::tuple<CameraDevice *, int>
+ open(unsigned int id, const hw_module_t *module);
unsigned int numCameras() const;
int getCameraInfo(unsigned int id, struct camera_info *info);
+ void setCallbacks(const camera_module_callbacks_t *callbacks);
private:
- camera_metadata_t *getStaticMetadata(unsigned int id);
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraHalManager)
- libcamera::CameraManager *cameraManager_;
+ static constexpr unsigned int firstExternalCameraId_ = 1000;
- std::vector<std::unique_ptr<CameraDevice>> cameras_;
-};
+ CameraHalManager();
-#endif /* __ANDROID_CAMERA_MANAGER_H__ */
+ static int32_t cameraLocation(const libcamera::Camera *cam);
+
+ void cameraAdded(std::shared_ptr<libcamera::Camera> cam);
+ void cameraRemoved(std::shared_ptr<libcamera::Camera> cam);
+
+ CameraDevice *cameraDeviceFromHalId(unsigned int id) LIBCAMERA_TSA_REQUIRES(mutex_);
+
+ std::unique_ptr<libcamera::CameraManager> cameraManager_;
+ CameraHalConfig halConfig_;
+
+ const camera_module_callbacks_t *callbacks_;
+ std::vector<std::unique_ptr<CameraDevice>> cameras_ LIBCAMERA_TSA_GUARDED_BY(mutex_);
+ std::map<std::string, unsigned int> cameraIdsMap_ LIBCAMERA_TSA_GUARDED_BY(mutex_);
+ libcamera::Mutex mutex_;
+
+ unsigned int numInternalCameras_;
+ unsigned int nextExternalCameraId_;
+};
diff --git a/src/android/camera_metadata.cpp b/src/android/camera_metadata.cpp
index 76965108..99f033f9 100644
--- a/src/android/camera_metadata.cpp
+++ b/src/android/camera_metadata.cpp
@@ -2,34 +2,157 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_metadata.cpp - libcamera Android Camera Metadata Helper
+ * libcamera Android Camera Metadata Helper
*/
#include "camera_metadata.h"
-#include "log.h"
+#include <libcamera/base/log.h>
using namespace libcamera;
-LOG_DEFINE_CATEGORY(CameraMetadata);
+LOG_DEFINE_CATEGORY(CameraMetadata)
+
+CameraMetadata::CameraMetadata()
+ : metadata_(nullptr), valid_(false), resized_(false)
+{
+}
CameraMetadata::CameraMetadata(size_t entryCapacity, size_t dataCapacity)
+ : resized_(false)
{
metadata_ = allocate_camera_metadata(entryCapacity, dataCapacity);
valid_ = metadata_ != nullptr;
}
+CameraMetadata::CameraMetadata(const camera_metadata_t *metadata)
+ : resized_(false)
+{
+ metadata_ = clone_camera_metadata(metadata);
+ valid_ = metadata_ != nullptr;
+}
+
+CameraMetadata::CameraMetadata(const CameraMetadata &other)
+ : CameraMetadata(other.getMetadata())
+{
+}
+
CameraMetadata::~CameraMetadata()
{
if (metadata_)
free_camera_metadata(metadata_);
}
-bool CameraMetadata::addEntry(uint32_t tag, const void *data, size_t count)
+CameraMetadata &CameraMetadata::operator=(const CameraMetadata &other)
+{
+ if (this == &other)
+ return *this;
+
+ if (metadata_)
+ free_camera_metadata(metadata_);
+
+ metadata_ = clone_camera_metadata(other.getMetadata());
+ valid_ = metadata_ != nullptr;
+
+ return *this;
+}
+
+std::tuple<size_t, size_t> CameraMetadata::usage() const
+{
+ size_t currentEntryCount = get_camera_metadata_entry_count(metadata_);
+ size_t currentDataCount = get_camera_metadata_data_count(metadata_);
+
+ return { currentEntryCount, currentDataCount };
+}
+
+bool CameraMetadata::getEntry(uint32_t tag, camera_metadata_ro_entry_t *entry) const
+{
+ if (find_camera_metadata_ro_entry(metadata_, tag, entry))
+ return false;
+
+ return true;
+}
+
+/*
+ * \brief Resize the metadata container, if necessary
+ * \param[in] count Number of entries to add to the container
+ * \param[in] size Total size of entries to add, in bytes
+ * \return True if resize was successful or unnecessary, false otherwise
+ */
+bool CameraMetadata::resize(size_t count, size_t size)
+{
+ if (!valid_)
+ return false;
+
+ if (!count && !size)
+ return true;
+
+ size_t currentEntryCount = get_camera_metadata_entry_count(metadata_);
+ size_t currentEntryCapacity = get_camera_metadata_entry_capacity(metadata_);
+ size_t newEntryCapacity = currentEntryCapacity < currentEntryCount + count ?
+ currentEntryCapacity * 2 : currentEntryCapacity;
+
+ size_t currentDataCount = get_camera_metadata_data_count(metadata_);
+ size_t currentDataCapacity = get_camera_metadata_data_capacity(metadata_);
+ size_t newDataCapacity = currentDataCapacity < currentDataCount + size ?
+ currentDataCapacity * 2 : currentDataCapacity;
+
+ if (newEntryCapacity > currentEntryCapacity ||
+ newDataCapacity > currentDataCapacity) {
+ camera_metadata_t *oldMetadata = metadata_;
+ metadata_ = allocate_camera_metadata(newEntryCapacity, newDataCapacity);
+ if (!metadata_) {
+ metadata_ = oldMetadata;
+ return false;
+ }
+
+ LOG(CameraMetadata, Info)
+ << "Resized: old entry capacity " << currentEntryCapacity
+ << ", old data capacity " << currentDataCapacity
+ << ", new entry capacity " << newEntryCapacity
+ << ", new data capacity " << newDataCapacity;
+
+ append_camera_metadata(metadata_, oldMetadata);
+ free_camera_metadata(oldMetadata);
+
+ resized_ = true;
+ }
+
+ return true;
+}
+
+template<> bool CameraMetadata::entryContains(uint32_t tag, uint8_t value) const
+{
+ camera_metadata_ro_entry_t entry;
+ if (!getEntry(tag, &entry))
+ return false;
+
+ for (unsigned int i = 0; i < entry.count; i++) {
+ if (entry.data.u8[i] == value)
+ return true;
+ }
+
+ return false;
+}
+
+bool CameraMetadata::hasEntry(uint32_t tag) const
+{
+ camera_metadata_ro_entry_t entry;
+ return getEntry(tag, &entry);
+}
+
+bool CameraMetadata::addEntry(uint32_t tag, const void *data, size_t count,
+ size_t elementSize)
{
if (!valid_)
return false;
+ if (!resize(1, count * elementSize)) {
+ LOG(CameraMetadata, Error) << "Failed to resize";
+ valid_ = false;
+ return false;
+ }
+
if (!add_camera_metadata_entry(metadata_, tag, data, count))
return true;
@@ -46,7 +169,63 @@ bool CameraMetadata::addEntry(uint32_t tag, const void *data, size_t count)
return false;
}
-camera_metadata_t *CameraMetadata::get()
+bool CameraMetadata::updateEntry(uint32_t tag, const void *data, size_t count,
+ size_t elementSize)
+{
+ if (!valid_)
+ return false;
+
+ camera_metadata_entry_t entry;
+ int ret = find_camera_metadata_entry(metadata_, tag, &entry);
+ if (ret) {
+ const char *name = get_camera_metadata_tag_name(tag);
+ LOG(CameraMetadata, Error)
+ << "Failed to update tag "
+ << (name ? name : "<unknown>") << ": not present";
+ return false;
+ }
+
+ if (camera_metadata_type_size[entry.type] != elementSize) {
+ const char *name = get_camera_metadata_tag_name(tag);
+ LOG(CameraMetadata, Fatal)
+ << "Invalid element size for tag "
+ << (name ? name : "<unknown>");
+ return false;
+ }
+
+ size_t oldSize =
+ calculate_camera_metadata_entry_data_size(entry.type,
+ entry.count);
+ size_t newSize =
+ calculate_camera_metadata_entry_data_size(entry.type,
+ count);
+ size_t sizeIncrement = newSize - oldSize > 0 ? newSize - oldSize : 0;
+ if (!resize(0, sizeIncrement)) {
+ LOG(CameraMetadata, Error) << "Failed to resize";
+ valid_ = false;
+ return false;
+ }
+
+ ret = update_camera_metadata_entry(metadata_, entry.index, data,
+ count, nullptr);
+ if (!ret)
+ return true;
+
+ const char *name = get_camera_metadata_tag_name(tag);
+ LOG(CameraMetadata, Error)
+ << "Failed to update tag " << (name ? name : "<unknown>");
+
+ valid_ = false;
+
+ return false;
+}
+
+camera_metadata_t *CameraMetadata::getMetadata()
+{
+ return valid_ ? metadata_ : nullptr;
+}
+
+const camera_metadata_t *CameraMetadata::getMetadata() const
{
return valid_ ? metadata_ : nullptr;
}
diff --git a/src/android/camera_metadata.h b/src/android/camera_metadata.h
index 75a9d706..474f280c 100644
--- a/src/android/camera_metadata.h
+++ b/src/android/camera_metadata.h
@@ -2,29 +2,111 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_metadata.h - libcamera Android Camera Metadata Helper
+ * libcamera Android Camera Metadata Helper
*/
-#ifndef __ANDROID_CAMERA_METADATA_H__
-#define __ANDROID_CAMERA_METADATA_H__
+
+#pragma once
#include <stdint.h>
+#include <vector>
#include <system/camera_metadata.h>
class CameraMetadata
{
public:
+ CameraMetadata();
CameraMetadata(size_t entryCapacity, size_t dataCapacity);
+ CameraMetadata(const camera_metadata_t *metadata);
+ CameraMetadata(const CameraMetadata &other);
~CameraMetadata();
- bool isValid() { return valid_; }
- bool addEntry(uint32_t tag, const void *data, size_t data_count);
+ CameraMetadata &operator=(const CameraMetadata &other);
+
+ std::tuple<size_t, size_t> usage() const;
+ bool resized() const { return resized_; }
+
+ bool isValid() const { return valid_; }
+ bool getEntry(uint32_t tag, camera_metadata_ro_entry_t *entry) const;
+
+ template<typename T> bool entryContains(uint32_t tag, T value) const;
+
+ bool hasEntry(uint32_t tag) const;
+
+ template<typename T,
+ std::enable_if_t<std::is_arithmetic_v<T> ||
+ std::is_enum_v<T>> * = nullptr>
+ bool setEntry(uint32_t tag, const T &data)
+ {
+ if (hasEntry(tag))
+ return updateEntry(tag, &data, 1, sizeof(T));
+ else
+ return addEntry(tag, &data, 1, sizeof(T));
+ }
+
+ template<typename T,
+ std::enable_if_t<std::is_arithmetic_v<T> ||
+ std::is_enum_v<T>> * = nullptr>
+ bool addEntry(uint32_t tag, const T &data)
+ {
+ return addEntry(tag, &data, 1, sizeof(T));
+ }
+
+ template<typename T, size_t size>
+ bool addEntry(uint32_t tag, const T (&data)[size])
+ {
+ return addEntry(tag, data, size, sizeof(T));
+ }
- camera_metadata_t *get();
+ template<typename S,
+ typename T = typename S::value_type>
+ bool addEntry(uint32_t tag, const S &data)
+ {
+ return addEntry(tag, data.data(), data.size(), sizeof(T));
+ }
+
+ template<typename T>
+ bool addEntry(uint32_t tag, const T *data, size_t count)
+ {
+ return addEntry(tag, data, count, sizeof(T));
+ }
+
+ template<typename T>
+ bool updateEntry(uint32_t tag, const T &data)
+ {
+ return updateEntry(tag, &data, 1, sizeof(T));
+ }
+
+ template<typename T, size_t size>
+ bool updateEntry(uint32_t tag, const T (&data)[size])
+ {
+ return updateEntry(tag, data, size, sizeof(T));
+ }
+
+ template<typename S,
+ typename T = typename S::value_type>
+ bool updateEntry(uint32_t tag, const S &data)
+ {
+ return updateEntry(tag, data.data(), data.size(), sizeof(T));
+ }
+
+ template<typename T>
+ bool updateEntry(uint32_t tag, const T *data, size_t count)
+ {
+ return updateEntry(tag, data, count, sizeof(T));
+ }
+
+ camera_metadata_t *getMetadata();
+ const camera_metadata_t *getMetadata() const;
private:
+ bool resize(size_t count, size_t size);
+ bool addEntry(uint32_t tag, const void *data, size_t count,
+ size_t elementSize);
+ bool updateEntry(uint32_t tag, const void *data, size_t count,
+ size_t elementSize);
+
camera_metadata_t *metadata_;
bool valid_;
+ bool resized_;
};
-
-#endif /* __ANDROID_CAMERA_METADATA_H__ */
diff --git a/src/android/camera_ops.cpp b/src/android/camera_ops.cpp
index 9dfc2e65..ecaac5a3 100644
--- a/src/android/camera_ops.cpp
+++ b/src/android/camera_ops.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_ops.h - Android Camera HAL Operations
+ * Android Camera HAL Operations
*/
#include "camera_ops.h"
@@ -14,7 +14,7 @@
using namespace libcamera;
/*
- * Translatation layer between the Android Camera HAL device operations and the
+ * Translation layer between the Android Camera HAL device operations and the
* CameraDevice.
*/
@@ -61,12 +61,19 @@ static int hal_dev_process_capture_request(const struct camera3_device *dev,
return camera->processCaptureRequest(request);
}
-static void hal_dev_dump(const struct camera3_device *dev, int fd)
+static void hal_dev_dump([[maybe_unused]] const struct camera3_device *dev,
+ [[maybe_unused]] int fd)
{
}
static int hal_dev_flush(const struct camera3_device *dev)
{
+ if (!dev)
+ return -EINVAL;
+
+ CameraDevice *camera = reinterpret_cast<CameraDevice *>(dev->priv);
+ camera->flush();
+
return 0;
}
diff --git a/src/android/camera_ops.h b/src/android/camera_ops.h
index 304e7b85..750dc945 100644
--- a/src/android/camera_ops.h
+++ b/src/android/camera_ops.h
@@ -2,14 +2,12 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_ops.h - Android Camera HAL Operations
+ * Android Camera HAL Operations
*/
-#ifndef __ANDROID_CAMERA_OPS_H__
-#define __ANDROID_CAMERA_OPS_H__
+
+#pragma once
#include <hardware/camera3.h>
int hal_dev_close(hw_device_t *hw_device);
extern camera3_device_ops hal_dev_ops;
-
-#endif /* __ANDROID_CAMERA_OPS_H__ */
diff --git a/src/android/camera_request.cpp b/src/android/camera_request.cpp
new file mode 100644
index 00000000..0d45960d
--- /dev/null
+++ b/src/android/camera_request.cpp
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2021, Google Inc.
+ *
+ * libcamera Android Camera Request Descriptor
+ */
+
+#include "camera_request.h"
+
+#include <libcamera/base/span.h>
+
+#include "camera_buffer.h"
+
+using namespace libcamera;
+
+/*
+ * \class Camera3RequestDescriptor
+ *
+ * A utility class that groups information about a capture request to be later
+ * reused at request complete time to notify the framework.
+ *
+ *******************************************************************************
+ * Lifetime of a Camera3RequestDescriptor tracking a capture request placed by
+ * Android Framework
+ *******************************************************************************
+ *
+ *
+ * Android Framework
+ * │
+ * │ ┌──────────────────────────────────┐
+ * │ │camera3_capture_request_t │
+ * │ │ │
+ * │ │Requested output streams │
+ * │ │ stream1 stream2 stream3 ... │
+ * │ └──────────────────────────────────┘
+ * ▼
+ * ┌─────────────────────────────────────────────────────────────┐
+ * │ libcamera HAL │
+ * ├─────────────────────────────────────────────────────────────┤
+ * │ CameraDevice │
+ * │ │
+ * │ processCaptureRequest(camera3_capture_request_t request) │
+ * │ │
+ * │ - Create Camera3RequestDescriptor tracking this request │
+ * │ - Streams requiring post-processing are stored in the │
+ * │ pendingStreamsToProcess map │
+ * │ - Add this Camera3RequestDescriptor to descriptors' queue │
+ * │ CameraDevice::descriptors_ │
+ * │ │ ┌─────────────────────────┐
+ * │ - Queue the capture request to libcamera core ────────────┤►│libcamera core │
+ * │ │ ├─────────────────────────┤
+ * │ │ │- Capture from Camera │
+ * │ │ │ │
+ * │ │ │- Emit │
+ * │ │ │ Camera::requestComplete│
+ * │ requestCompleted(Request *request) ◄───────────────────────┼─┼──── │
+ * │ │ │ │
+ * │ - Check request completion status │ └─────────────────────────┘
+ * │ │
+ * │ - if (pendingStreamsToProcess > 0) │
+ * │ Queue all entries from pendingStreamsToProcess │
+ * │ else │ │
+ * │ completeDescriptor() │ └──────────────────────┐
+ * │ │ │
+ * │ ┌──────────────────────────┴───┬──────────────────┐ │
+ * │ │ │ │ │
+ * │ ┌──────────▼────────────┐ ┌───────────▼─────────┐ ▼ │
+ * │ │CameraStream1 │ │CameraStream2 │ .... │
+ * │ ├┬───┬───┬──────────────┤ ├┬───┬───┬────────────┤ │
+ * │ ││ │ │ │ ││ │ │ │ │
+ * │ │▼───▼───▼──────────────┤ │▼───▼───▼────────────┤ │
+ * │ │PostProcessorWorker │ │PostProcessorWorker │ │
+ * │ │ │ │ │ │
+ * │ │ +------------------+ │ │ +------------------+│ │
+ * │ │ | PostProcessor | │ │ | PostProcessor |│ │
+ * │ │ | process() | │ │ | process() |│ │
+ * │ │ | | │ │ | |│ │
+ * │ │ | Emit | │ │ | Emit |│ │
+ * │ │ | processComplete | │ │ | processComplete |│ │
+ * │ │ | | │ │ | |│ │
+ * │ │ +--------------│---+ │ │ +--------------│---+│ │
+ * │ │ │ │ │ │ │ │
+ * │ │ │ │ │ │ │ │
+ * │ └────────────────┼──────┘ └────────────────┼────┘ │
+ * │ │ │ │
+ * │ │ │ │
+ * │ │ │ │
+ * │ ▼ ▼ │
+ * │ +---------------------------------------+ +--------------+ │
+ * │ | CameraDevice | | | │
+ * │ | | | | │
+ * │ | streamProcessingComplete() | | | │
+ * │ | | | | │
+ * │ | - Check and set buffer status | | .... | │
+ * │ | - Remove post+processing entry | | | │
+ * │ | from pendingStreamsToProcess | | | │
+ * │ | | | | │
+ * │ | - if (pendingStreamsToProcess.empty())| | | │
+ * │ | completeDescriptor | | | │
+ * │ | | | | │
+ * │ +---------------------------------------+ +--------------+ │
+ * │ │
+ * └────────────────────────────────────────────────────────────────────────────────────┘
+ *
+ * +-------------+
+ * | | - PostProcessorWorker's thread
+ * | |
+ * +-------------+
+ */
+
+Camera3RequestDescriptor::Camera3RequestDescriptor(
+ Camera *camera, const camera3_capture_request_t *camera3Request)
+{
+ frameNumber_ = camera3Request->frame_number;
+
+ /* Copy the camera3 request stream information for later access. */
+ const Span<const camera3_stream_buffer_t> buffers{
+ camera3Request->output_buffers,
+ camera3Request->num_output_buffers
+ };
+
+ buffers_.reserve(buffers.size());
+
+ for (const camera3_stream_buffer_t &buffer : buffers) {
+ CameraStream *stream =
+ static_cast<CameraStream *>(buffer.stream->priv);
+
+ buffers_.emplace_back(stream, buffer, this);
+ }
+
+ /* Clone the controls associated with the camera3 request. */
+ settings_ = CameraMetadata(camera3Request->settings);
+
+ /*
+ * Create the CaptureRequest, stored as a unique_ptr<> to tie its
+ * lifetime to the descriptor.
+ */
+ request_ = camera->createRequest(reinterpret_cast<uint64_t>(this));
+}
+
+Camera3RequestDescriptor::~Camera3RequestDescriptor() = default;
+
+/**
+ * \struct Camera3RequestDescriptor::StreamBuffer
+ * \brief Group information for per-stream buffer of Camera3RequestDescriptor
+ *
+ * A capture request placed to the libcamera HAL can contain multiple streams.
+ * Each stream will have an associated buffer to be filled. StreamBuffer
+ * tracks this buffer with contextual information which aids in the stream's
+ * generation. The generation of the stream will depend on its type (refer to
+ * the CameraStream::Type documentation).
+ *
+ * \var Camera3RequestDescriptor::StreamBuffer::stream
+ * \brief Pointer to the corresponding CameraStream
+ *
+ * \var Camera3RequestDescriptor::StreamBuffer::camera3Buffer
+ * \brief Native handle to the buffer
+ *
+ * \var Camera3RequestDescriptor::StreamBuffer::frameBuffer
+ * \brief Encapsulate the dmabuf handle inside a libcamera::FrameBuffer for
+ * direct streams
+ *
+ * \var Camera3RequestDescriptor::StreamBuffer::fence
+ * \brief Acquire fence of the buffer
+ *
+ * \var Camera3RequestDescriptor::StreamBuffer::status
+ * \brief Track the status of the buffer
+ *
+ * \var Camera3RequestDescriptor::StreamBuffer::internalBuffer
+ * \brief Pointer to a buffer internally handled by CameraStream (if any)
+ *
+ * \var Camera3RequestDescriptor::StreamBuffer::srcBuffer
+ * \brief Pointer to the source frame buffer used for post-processing
+ *
+ * \var Camera3RequestDescriptor::StreamBuffer::dstBuffer
+ * \brief Pointer to the destination frame buffer used for post-processing
+ *
+ * \var Camera3RequestDescriptor::StreamBuffer::request
+ * \brief Back pointer to the Camera3RequestDescriptor to which the StreamBuffer belongs
+ */
+Camera3RequestDescriptor::StreamBuffer::StreamBuffer(
+ CameraStream *cameraStream, const camera3_stream_buffer_t &buffer,
+ Camera3RequestDescriptor *requestDescriptor)
+ : stream(cameraStream), camera3Buffer(buffer.buffer),
+ fence(buffer.acquire_fence), request(requestDescriptor)
+{
+}
+
+Camera3RequestDescriptor::StreamBuffer::~StreamBuffer() = default;
+
+Camera3RequestDescriptor::StreamBuffer::StreamBuffer(StreamBuffer &&) = default;
+
+Camera3RequestDescriptor::StreamBuffer &
+Camera3RequestDescriptor::StreamBuffer::operator=(Camera3RequestDescriptor::StreamBuffer &&) = default;
diff --git a/src/android/camera_request.h b/src/android/camera_request.h
new file mode 100644
index 00000000..5b479180
--- /dev/null
+++ b/src/android/camera_request.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2021, Google Inc.
+ *
+ * libcamera Android Camera Request Descriptor
+ */
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/mutex.h>
+#include <libcamera/base/unique_fd.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/framebuffer.h>
+
+#include <hardware/camera3.h>
+
+#include "camera_metadata.h"
+#include "hal_framebuffer.h"
+
+class CameraBuffer;
+class CameraStream;
+
+class Camera3RequestDescriptor
+{
+public:
+ enum class Status {
+ Success,
+ Error,
+ };
+
+ struct StreamBuffer {
+ StreamBuffer(CameraStream *stream,
+ const camera3_stream_buffer_t &buffer,
+ Camera3RequestDescriptor *request);
+ ~StreamBuffer();
+
+ StreamBuffer(StreamBuffer &&);
+ StreamBuffer &operator=(StreamBuffer &&);
+
+ CameraStream *stream;
+ buffer_handle_t *camera3Buffer;
+ std::unique_ptr<HALFrameBuffer> frameBuffer;
+ libcamera::UniqueFD fence;
+ Status status = Status::Success;
+ libcamera::FrameBuffer *internalBuffer = nullptr;
+ const libcamera::FrameBuffer *srcBuffer = nullptr;
+ std::unique_ptr<CameraBuffer> dstBuffer;
+ Camera3RequestDescriptor *request;
+
+ private:
+ LIBCAMERA_DISABLE_COPY(StreamBuffer)
+ };
+
+ /* Keeps track of streams requiring post-processing. */
+ std::map<CameraStream *, StreamBuffer *> pendingStreamsToProcess_
+ LIBCAMERA_TSA_GUARDED_BY(streamsProcessMutex_);
+ libcamera::Mutex streamsProcessMutex_;
+
+ Camera3RequestDescriptor(libcamera::Camera *camera,
+ const camera3_capture_request_t *camera3Request);
+ ~Camera3RequestDescriptor();
+
+ bool isPending() const { return !complete_; }
+
+ uint32_t frameNumber_ = 0;
+
+ std::vector<StreamBuffer> buffers_;
+
+ CameraMetadata settings_;
+ std::unique_ptr<libcamera::Request> request_;
+ std::unique_ptr<CameraMetadata> resultMetadata_;
+
+ bool complete_ = false;
+ Status status_ = Status::Success;
+
+private:
+ LIBCAMERA_DISABLE_COPY(Camera3RequestDescriptor)
+};
diff --git a/src/android/camera_stream.cpp b/src/android/camera_stream.cpp
new file mode 100644
index 00000000..1d68540d
--- /dev/null
+++ b/src/android/camera_stream.cpp
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Camera HAL stream
+ */
+
+#include "camera_stream.h"
+
+#include <errno.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/poll.h>
+#include <unistd.h>
+
+#include <libcamera/formats.h>
+
+#include "jpeg/post_processor_jpeg.h"
+#include "yuv/post_processor_yuv.h"
+
+#include "camera_buffer.h"
+#include "camera_capabilities.h"
+#include "camera_device.h"
+#include "camera_metadata.h"
+#include "frame_buffer_allocator.h"
+#include "post_processor.h"
+
+using namespace libcamera;
+
+LOG_DECLARE_CATEGORY(HAL)
+
+/*
+ * \class CameraStream
+ * \brief Map a camera3_stream_t to a StreamConfiguration
+ *
+ * The CameraStream class maps a camera3_stream_t provided by Android
+ * camera framework to a libcamera::StreamConfiguration.
+ *
+ * The StreamConfiguration is represented by its index as recorded in the
+ * CameraConfiguration and not by pointer as StreamConfiguration is subject to
+ * relocation.
+ *
+ * A single StreamConfiguration may be used to deliver one or more streams to
+ * the Android framework. The mapping type between a camera3 stream to a
+ * StreamConfiguration is described by the CameraStream::Type.
+ *
+ * CameraStream handles all the aspects of producing a stream with the size
+ * and format requested by the camera3 stream from the data produced by
+ * the associated libcamera::Stream, including the creation of the encoder
+ * and buffer allocation.
+ */
+
+CameraStream::CameraStream(CameraDevice *const cameraDevice,
+ CameraConfiguration *config, Type type,
+ camera3_stream_t *camera3Stream,
+ CameraStream *const sourceStream, unsigned int index)
+ : cameraDevice_(cameraDevice), config_(config), type_(type),
+ camera3Stream_(camera3Stream), sourceStream_(sourceStream),
+ index_(index)
+{
+}
+
+CameraStream::CameraStream(CameraStream &&other) = default;
+
+CameraStream::~CameraStream()
+{
+ /*
+ * Manually delete buffers and then the allocator to make sure buffers
+ * are released while the allocator is still valid.
+ */
+ allocatedBuffers_.clear();
+ allocator_.reset();
+}
+
+const StreamConfiguration &CameraStream::configuration() const
+{
+ return config_->at(index_);
+}
+
+Stream *CameraStream::stream() const
+{
+ return configuration().stream();
+}
+
+int CameraStream::configure()
+{
+ if (type_ == Type::Internal || type_ == Type::Mapped) {
+ const PixelFormat outFormat =
+ cameraDevice_->capabilities()->toPixelFormat(camera3Stream_->format);
+ StreamConfiguration output = configuration();
+ output.pixelFormat = outFormat;
+ output.size.width = camera3Stream_->width;
+ output.size.height = camera3Stream_->height;
+
+ switch (outFormat) {
+ case formats::NV12:
+ postProcessor_ = std::make_unique<PostProcessorYuv>();
+ break;
+
+ case formats::MJPEG:
+ postProcessor_ = std::make_unique<PostProcessorJpeg>(cameraDevice_);
+ break;
+
+ default:
+ LOG(HAL, Error) << "Unsupported format: " << outFormat;
+ return -EINVAL;
+ }
+
+ int ret = postProcessor_->configure(configuration(), output);
+ if (ret)
+ return ret;
+
+ worker_ = std::make_unique<PostProcessorWorker>(postProcessor_.get());
+ postProcessor_->processComplete.connect(
+ this, [&](Camera3RequestDescriptor::StreamBuffer *streamBuffer,
+ PostProcessor::Status status) {
+ Camera3RequestDescriptor::Status bufferStatus;
+
+ if (status == PostProcessor::Status::Success)
+ bufferStatus = Camera3RequestDescriptor::Status::Success;
+ else
+ bufferStatus = Camera3RequestDescriptor::Status::Error;
+
+ cameraDevice_->streamProcessingComplete(streamBuffer,
+ bufferStatus);
+ });
+
+ worker_->start();
+ }
+
+ allocator_ = std::make_unique<PlatformFrameBufferAllocator>(cameraDevice_);
+ mutex_ = std::make_unique<Mutex>();
+
+ camera3Stream_->max_buffers = configuration().bufferCount;
+
+ return 0;
+}
+
+int CameraStream::waitFence(int fence)
+{
+ /*
+ * \todo The implementation here is copied from camera_worker.cpp
+ * and both should be removed once libcamera is instrumented to handle
+ * fences waiting in the core.
+ *
+ * \todo Better characterize the timeout. Currently equal to the one
+ * used by the Rockchip Camera HAL on ChromeOS.
+ */
+ constexpr unsigned int timeoutMs = 300;
+ struct pollfd fds = { fence, POLLIN, 0 };
+
+ do {
+ int ret = poll(&fds, 1, timeoutMs);
+ if (ret == 0)
+ return -ETIME;
+
+ if (ret > 0) {
+ if (fds.revents & (POLLERR | POLLNVAL))
+ return -EINVAL;
+
+ return 0;
+ }
+ } while (errno == EINTR || errno == EAGAIN);
+
+ return -errno;
+}
+
+int CameraStream::process(Camera3RequestDescriptor::StreamBuffer *streamBuffer)
+{
+ ASSERT(type_ != Type::Direct);
+
+ /* Handle waiting on fences on the destination buffer. */
+ if (streamBuffer->fence.isValid()) {
+ int ret = waitFence(streamBuffer->fence.get());
+ if (ret < 0) {
+ LOG(HAL, Error) << "Failed waiting for fence: "
+ << streamBuffer->fence.get() << ": "
+ << strerror(-ret);
+ return ret;
+ }
+
+ streamBuffer->fence.reset();
+ }
+
+ const StreamConfiguration &output = configuration();
+ streamBuffer->dstBuffer = std::make_unique<CameraBuffer>(
+ *streamBuffer->camera3Buffer, output.pixelFormat, output.size,
+ PROT_READ | PROT_WRITE);
+ if (!streamBuffer->dstBuffer->isValid()) {
+ LOG(HAL, Error) << "Failed to create destination buffer";
+ return -EINVAL;
+ }
+
+ worker_->queueRequest(streamBuffer);
+
+ return 0;
+}
+
+void CameraStream::flush()
+{
+ if (!postProcessor_)
+ return;
+
+ worker_->flush();
+}
+
+FrameBuffer *CameraStream::getBuffer()
+{
+ if (!allocator_)
+ return nullptr;
+
+ MutexLocker locker(*mutex_);
+
+ if (buffers_.empty()) {
+ /*
+ * Use HAL_PIXEL_FORMAT_YCBCR_420_888 unconditionally.
+ *
+ * YCBCR_420 is the source format for both the JPEG and the YUV
+ * post-processors.
+ *
+ * \todo Store a reference to the format of the source stream
+ * instead of hardcoding.
+ */
+ auto frameBuffer = allocator_->allocate(HAL_PIXEL_FORMAT_YCBCR_420_888,
+ configuration().size,
+ camera3Stream_->usage);
+ allocatedBuffers_.push_back(std::move(frameBuffer));
+ buffers_.emplace_back(allocatedBuffers_.back().get());
+ }
+
+ FrameBuffer *buffer = buffers_.back();
+ buffers_.pop_back();
+
+ return buffer;
+}
+
+void CameraStream::putBuffer(FrameBuffer *buffer)
+{
+ if (!allocator_)
+ return;
+
+ MutexLocker locker(*mutex_);
+
+ buffers_.push_back(buffer);
+}
+
+/**
+ * \class CameraStream::PostProcessorWorker
+ * \brief Post-process a CameraStream in an internal thread
+ *
+ * If the association between CameraStream and camera3_stream_t dictated by
+ * CameraStream::Type is internal or mapped, the stream is generated by post
+ * processing of a libcamera stream. Such a request is queued to a
+ * PostProcessorWorker in CameraStream::process(). A queue of post-processing
+ * requests is maintained by the PostProcessorWorker and it will run the
+ * post-processing on an internal thread as soon as any request is available on
+ * its queue.
+ */
+CameraStream::PostProcessorWorker::PostProcessorWorker(PostProcessor *postProcessor)
+ : postProcessor_(postProcessor)
+{
+}
+
+CameraStream::PostProcessorWorker::~PostProcessorWorker()
+{
+ {
+ MutexLocker lock(mutex_);
+ state_ = State::Stopped;
+ }
+
+ cv_.notify_one();
+ wait();
+}
+
+void CameraStream::PostProcessorWorker::start()
+{
+ {
+ MutexLocker lock(mutex_);
+ ASSERT(state_ != State::Running);
+ state_ = State::Running;
+ }
+
+ Thread::start();
+}
+
+void CameraStream::PostProcessorWorker::queueRequest(Camera3RequestDescriptor::StreamBuffer *dest)
+{
+ {
+ MutexLocker lock(mutex_);
+ ASSERT(state_ == State::Running);
+ requests_.push(dest);
+ }
+
+ cv_.notify_one();
+}
+
+void CameraStream::PostProcessorWorker::run()
+{
+ MutexLocker locker(mutex_);
+
+ while (1) {
+ cv_.wait(locker, [&]() LIBCAMERA_TSA_REQUIRES(mutex_) {
+ return state_ != State::Running || !requests_.empty();
+ });
+
+ if (state_ != State::Running)
+ break;
+
+ Camera3RequestDescriptor::StreamBuffer *streamBuffer = requests_.front();
+ requests_.pop();
+ locker.unlock();
+
+ postProcessor_->process(streamBuffer);
+
+ locker.lock();
+ }
+
+ if (state_ == State::Flushing) {
+ std::queue<Camera3RequestDescriptor::StreamBuffer *> requests =
+ std::move(requests_);
+ locker.unlock();
+
+ while (!requests.empty()) {
+ postProcessor_->processComplete.emit(
+ requests.front(), PostProcessor::Status::Error);
+ requests.pop();
+ }
+
+ locker.lock();
+ state_ = State::Stopped;
+ }
+}
+
+void CameraStream::PostProcessorWorker::flush()
+{
+ MutexLocker lock(mutex_);
+ state_ = State::Flushing;
+ lock.unlock();
+
+ cv_.notify_one();
+}
diff --git a/src/android/camera_stream.h b/src/android/camera_stream.h
new file mode 100644
index 00000000..395552da
--- /dev/null
+++ b/src/android/camera_stream.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Camera HAL stream
+ */
+
+#pragma once
+
+#include <memory>
+#include <queue>
+#include <vector>
+
+#include <hardware/camera3.h>
+
+#include <libcamera/base/mutex.h>
+#include <libcamera/base/thread.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+#include <libcamera/pixel_format.h>
+
+#include "camera_request.h"
+#include "post_processor.h"
+
+class CameraDevice;
+class PlatformFrameBufferAllocator;
+
+class CameraStream
+{
+public:
+ /*
+ * Enumeration of CameraStream types.
+ *
+ * A camera stream associates an Android stream to a libcamera stream.
+ * This enumeration describes how the two streams are associated and how
+ * and where data produced from libcamera are delivered to the
+ * Android framework.
+ *
+ * Direct:
+ *
+ * The Android stream is directly mapped onto a libcamera stream: frames
+ * are delivered by the library directly in the memory location
+ * specified by the Android stream (buffer_handle_t->data) and provided
+ * to the framework as they are. The Android stream characteristics are
+ * directly translated to the libcamera stream configuration.
+ *
+ * +-----+ +-----+
+ * | A | | L |
+ * +-----+ +-----+
+ * | |
+ * V V
+ * +-----+ +------+
+ * | B |<---------------| FB |
+ * +-----+ +------+
+ *
+ *
+ * Internal:
+ *
+ * Data for the Android stream is produced by processing a libcamera
+ * stream created by the HAL for that purpose. The libcamera stream
+ * needs to be supplied with intermediate buffers where the library
+ * delivers frames to be processed and then provided to the framework.
+ * The libcamera stream configuration is not a direct translation of the
+ * Android stream characteristics, but it describes the format and size
+ * required for the processing procedure to produce frames in the
+ * Android required format.
+ *
+ * +-----+ +-----+
+ * | A | | L |
+ * +-----+ +-----+
+ * | |
+ * V V
+ * +-----+ +------+
+ * | B | | FB |
+ * +-----+ +------+
+ * ^ |
+ * |-------Processing------|
+ *
+ *
+ * Mapped:
+ *
+ * Data for the Android stream is produced by processing a libcamera
+ * stream associated with another CameraStream. Mapped camera streams do
+ * not need any memory to be reserved for them as they process data
+ * produced by libcamera for a different stream whose format and size
+ * are compatible with the processing procedure requirements to produce
+ * frames in the Android required format.
+ *
+ * +-----+ +-----+ +-----+
+ * | A | | A' | | L |
+ * +-----+ +-----+ +-----+
+ * | | |
+ * V V V
+ * +-----+ +-----+ +------+
+ * | B | | B' |<---------| FB |
+ * +-----+ +-----+ +------+
+ * ^ |
+ * |--Processing--|
+ *
+ *
+ * --------------------------------------------------------------------
+ * A = Android stream
+ * L = libcamera stream
+ * B = memory buffer
+ * FB = libcamera FrameBuffer
+ * "Processing" = Frame processing procedure (Encoding, scaling etc)
+ */
+ enum class Type {
+ Direct,
+ Internal,
+ Mapped,
+ };
+ CameraStream(CameraDevice *const cameraDevice,
+ libcamera::CameraConfiguration *config, Type type,
+ camera3_stream_t *camera3Stream,
+ CameraStream *const sourceStream,
+ unsigned int index);
+ CameraStream(CameraStream &&other);
+ ~CameraStream();
+
+ Type type() const { return type_; }
+ camera3_stream_t *camera3Stream() const { return camera3Stream_; }
+ const libcamera::StreamConfiguration &configuration() const;
+ libcamera::Stream *stream() const;
+ CameraStream *sourceStream() const { return sourceStream_; }
+
+ int configure();
+ int process(Camera3RequestDescriptor::StreamBuffer *streamBuffer);
+ libcamera::FrameBuffer *getBuffer();
+ void putBuffer(libcamera::FrameBuffer *buffer);
+ void flush();
+
+private:
+ class PostProcessorWorker : public libcamera::Thread
+ {
+ public:
+ enum class State {
+ Stopped,
+ Running,
+ Flushing,
+ };
+
+ PostProcessorWorker(PostProcessor *postProcessor);
+ ~PostProcessorWorker();
+
+ void start();
+ void queueRequest(Camera3RequestDescriptor::StreamBuffer *request);
+ void flush();
+
+ protected:
+ void run() override;
+
+ private:
+ PostProcessor *postProcessor_;
+
+ libcamera::Mutex mutex_;
+ libcamera::ConditionVariable cv_;
+
+ std::queue<Camera3RequestDescriptor::StreamBuffer *> requests_
+ LIBCAMERA_TSA_GUARDED_BY(mutex_);
+
+ State state_ LIBCAMERA_TSA_GUARDED_BY(mutex_) = State::Stopped;
+ };
+
+ int waitFence(int fence);
+
+ CameraDevice *const cameraDevice_;
+ const libcamera::CameraConfiguration *config_;
+ const Type type_;
+ camera3_stream_t *camera3Stream_;
+ CameraStream *const sourceStream_;
+ const unsigned int index_;
+
+ std::unique_ptr<PlatformFrameBufferAllocator> allocator_;
+ std::vector<std::unique_ptr<libcamera::FrameBuffer>> allocatedBuffers_;
+ std::vector<libcamera::FrameBuffer *> buffers_ LIBCAMERA_TSA_GUARDED_BY(mutex_);
+ /*
+ * The class has to be MoveConstructible as instances are stored in
+ * an std::vector in CameraDevice.
+ */
+ std::unique_ptr<libcamera::Mutex> mutex_;
+ std::unique_ptr<PostProcessor> postProcessor_;
+
+ std::unique_ptr<PostProcessorWorker> worker_;
+};
diff --git a/src/android/cros/camera3_hal.cpp b/src/android/cros/camera3_hal.cpp
new file mode 100644
index 00000000..6010a5ad
--- /dev/null
+++ b/src/android/cros/camera3_hal.cpp
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * cros-specific components of Android Camera HALv3 module
+ */
+
+#include <cros-camera/cros_camera_hal.h>
+
+#include "../camera_hal_manager.h"
+#include "../cros_mojo_token.h"
+
+static void set_up(cros::CameraMojoChannelManagerToken *token)
+{
+ gCrosMojoToken = token;
+}
+
+static void tear_down()
+{
+ delete CameraHalManager::instance();
+}
+
+cros::cros_camera_hal_t CROS_CAMERA_EXPORT CROS_CAMERA_HAL_INFO_SYM = {
+ .set_up = set_up,
+ .tear_down = tear_down
+};
diff --git a/src/android/cros/meson.build b/src/android/cros/meson.build
new file mode 100644
index 00000000..35995dd8
--- /dev/null
+++ b/src/android/cros/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: CC0-1.0
+
+if get_option('android_platform') != 'cros'
+ subdir_done()
+endif
+
+android_hal_sources += files([
+ 'camera3_hal.cpp',
+])
+
+android_deps += dependency('libcros_camera')
+
+android_cpp_args += ['-DOS_CHROMEOS']
diff --git a/src/android/cros_mojo_token.h b/src/android/cros_mojo_token.h
new file mode 100644
index 00000000..d0baa80f
--- /dev/null
+++ b/src/android/cros_mojo_token.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * cros-specific mojo token
+ */
+
+#pragma once
+
+#include <cros-camera/cros_camera_hal.h>
+
+inline cros::CameraMojoChannelManagerToken *gCrosMojoToken = nullptr;
diff --git a/src/android/data/nautilus/camera_hal.yaml b/src/android/data/nautilus/camera_hal.yaml
new file mode 100644
index 00000000..2105fcca
--- /dev/null
+++ b/src/android/data/nautilus/camera_hal.yaml
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: CC0-1.0
+
+cameras:
+ "\\_SB_.PCI0.I2C2.CAM0":
+ location: back
+ rotation: 0
+
+ "\\_SB_.PCI0.XHCI.RHUB.HS09-9:1.0-04f2:b647":
+ location: front
+ rotation: 0
diff --git a/src/android/data/soraka/camera_hal.yaml b/src/android/data/soraka/camera_hal.yaml
new file mode 100644
index 00000000..d886af06
--- /dev/null
+++ b/src/android/data/soraka/camera_hal.yaml
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: CC0-1.0
+
+cameras:
+ "\\_SB_.PCI0.I2C4.CAM1":
+ location: front
+ rotation: 0
+
+ "\\_SB_.PCI0.I2C2.CAM0":
+ location: back
+ rotation: 0
diff --git a/src/android/frame_buffer_allocator.h b/src/android/frame_buffer_allocator.h
new file mode 100644
index 00000000..3e68641c
--- /dev/null
+++ b/src/android/frame_buffer_allocator.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Interface definition to allocate Frame buffer in
+ * platform dependent way.
+ */
+#ifndef __ANDROID_FRAME_BUFFER_ALLOCATOR_H__
+#define __ANDROID_FRAME_BUFFER_ALLOCATOR_H__
+
+#include <memory>
+
+#include <libcamera/base/class.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/geometry.h>
+
+#include "hal_framebuffer.h"
+
+class CameraDevice;
+
+class PlatformFrameBufferAllocator : libcamera::Extensible
+{
+ LIBCAMERA_DECLARE_PRIVATE()
+
+public:
+ explicit PlatformFrameBufferAllocator(CameraDevice *const cameraDevice);
+ ~PlatformFrameBufferAllocator();
+
+ /*
+ * FrameBuffer owns the underlying buffer. Returns nullptr on failure.
+ * Note: The returned FrameBuffer needs to be destroyed before
+ * PlatformFrameBufferAllocator is destroyed.
+ */
+ std::unique_ptr<HALFrameBuffer> allocate(
+ int halPixelFormat, const libcamera::Size &size, uint32_t usage);
+};
+
+#define PUBLIC_FRAME_BUFFER_ALLOCATOR_IMPLEMENTATION \
+PlatformFrameBufferAllocator::PlatformFrameBufferAllocator( \
+ CameraDevice *const cameraDevice) \
+ : Extensible(std::make_unique<Private>(cameraDevice)) \
+{ \
+} \
+PlatformFrameBufferAllocator::~PlatformFrameBufferAllocator() \
+{ \
+} \
+std::unique_ptr<HALFrameBuffer> \
+PlatformFrameBufferAllocator::allocate(int halPixelFormat, \
+ const libcamera::Size &size, \
+ uint32_t usage) \
+{ \
+ return _d()->allocate(halPixelFormat, size, usage); \
+}
+
+#endif /* __ANDROID_FRAME_BUFFER_ALLOCATOR_H__ */
diff --git a/src/android/hal_framebuffer.cpp b/src/android/hal_framebuffer.cpp
new file mode 100644
index 00000000..d4899f45
--- /dev/null
+++ b/src/android/hal_framebuffer.cpp
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * HAL Frame Buffer Handling
+ */
+
+#include "hal_framebuffer.h"
+
+#include <hardware/camera3.h>
+
+HALFrameBuffer::HALFrameBuffer(std::unique_ptr<Private> d,
+ buffer_handle_t handle)
+ : FrameBuffer(std::move(d)), handle_(handle)
+{
+}
+
+HALFrameBuffer::HALFrameBuffer(const std::vector<Plane> &planes,
+ buffer_handle_t handle)
+ : FrameBuffer(planes), handle_(handle)
+{
+}
diff --git a/src/android/hal_framebuffer.h b/src/android/hal_framebuffer.h
new file mode 100644
index 00000000..cea49e2d
--- /dev/null
+++ b/src/android/hal_framebuffer.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * HAL Frame Buffer Handling
+ */
+
+#pragma once
+
+#include "libcamera/internal/framebuffer.h"
+
+#include <hardware/camera3.h>
+
+class HALFrameBuffer final : public libcamera::FrameBuffer
+{
+public:
+ HALFrameBuffer(std::unique_ptr<Private> d,
+ buffer_handle_t handle);
+ HALFrameBuffer(const std::vector<Plane> &planes,
+ buffer_handle_t handle);
+
+ buffer_handle_t handle() const { return handle_; }
+
+private:
+ buffer_handle_t handle_;
+};
diff --git a/src/android/jpeg/encoder.h b/src/android/jpeg/encoder.h
new file mode 100644
index 00000000..ed033c19
--- /dev/null
+++ b/src/android/jpeg/encoder.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Image encoding interface
+ */
+
+#pragma once
+
+#include <libcamera/base/span.h>
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/stream.h>
+
+#include "../camera_request.h"
+
+class Encoder
+{
+public:
+ virtual ~Encoder() = default;
+
+ virtual int configure(const libcamera::StreamConfiguration &cfg) = 0;
+ virtual int encode(Camera3RequestDescriptor::StreamBuffer *buffer,
+ libcamera::Span<const uint8_t> exifData,
+ unsigned int quality) = 0;
+};
diff --git a/src/android/jpeg/encoder_jea.cpp b/src/android/jpeg/encoder_jea.cpp
new file mode 100644
index 00000000..25dc4317
--- /dev/null
+++ b/src/android/jpeg/encoder_jea.cpp
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * JPEG encoding using CrOS JEA
+ */
+
+#include "encoder_jea.h"
+
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include <cros-camera/camera_mojo_channel_manager_token.h>
+
+#include "../cros_mojo_token.h"
+#include "../hal_framebuffer.h"
+
+EncoderJea::EncoderJea() = default;
+
+EncoderJea::~EncoderJea() = default;
+
+int EncoderJea::configure(const libcamera::StreamConfiguration &cfg)
+{
+ size_ = cfg.size;
+
+ if (jpegCompressor_)
+ return 0;
+
+ if (gCrosMojoToken == nullptr)
+ return -ENOTSUP;
+
+ jpegCompressor_ = cros::JpegCompressor::GetInstance(gCrosMojoToken);
+
+ return 0;
+}
+
+int EncoderJea::encode(Camera3RequestDescriptor::StreamBuffer *buffer,
+ libcamera::Span<const uint8_t> exifData,
+ unsigned int quality)
+{
+ if (!jpegCompressor_)
+ return -ENOTSUP;
+
+ uint32_t outDataSize = 0;
+ const HALFrameBuffer *fb =
+ dynamic_cast<const HALFrameBuffer *>(buffer->srcBuffer);
+
+ if (!jpegCompressor_->CompressImageFromHandle(fb->handle(),
+ *buffer->camera3Buffer,
+ size_.width, size_.height,
+ quality, exifData.data(),
+ exifData.size(),
+ &outDataSize))
+ return -EBUSY;
+
+ return outDataSize;
+}
diff --git a/src/android/jpeg/encoder_jea.h b/src/android/jpeg/encoder_jea.h
new file mode 100644
index 00000000..91115d2e
--- /dev/null
+++ b/src/android/jpeg/encoder_jea.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * JPEG encoding using CrOS JEA
+ */
+
+#pragma once
+
+#include <libcamera/geometry.h>
+
+#include <cros-camera/jpeg_compressor.h>
+
+#include "encoder.h"
+
+class EncoderJea : public Encoder
+{
+public:
+ EncoderJea();
+ ~EncoderJea();
+
+ int configure(const libcamera::StreamConfiguration &cfg) override;
+ int encode(Camera3RequestDescriptor::StreamBuffer *buffer,
+ libcamera::Span<const uint8_t> exifData,
+ unsigned int quality) override;
+
+private:
+ libcamera::Size size_;
+
+ std::unique_ptr<cros::JpegCompressor> jpegCompressor_;
+};
diff --git a/src/android/jpeg/encoder_libjpeg.cpp b/src/android/jpeg/encoder_libjpeg.cpp
new file mode 100644
index 00000000..cb242b5e
--- /dev/null
+++ b/src/android/jpeg/encoder_libjpeg.cpp
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * JPEG encoding using libjpeg native API
+ */
+
+#include "encoder_libjpeg.h"
+
+#include <fcntl.h>
+#include <iomanip>
+#include <iostream>
+#include <sstream>
+#include <string.h>
+#include <unistd.h>
+#include <vector>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/formats.h>
+#include <libcamera/pixel_format.h>
+
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include "../camera_buffer.h"
+
+using namespace libcamera;
+
+LOG_DECLARE_CATEGORY(JPEG)
+
+namespace {
+
+struct JPEGPixelFormatInfo {
+ J_COLOR_SPACE colorSpace;
+ const PixelFormatInfo &pixelFormatInfo;
+ bool nvSwap;
+};
+
+const std::map<PixelFormat, JPEGPixelFormatInfo> pixelInfo{
+ { formats::R8, { JCS_GRAYSCALE, PixelFormatInfo::info(formats::R8), false } },
+
+ { formats::RGB888, { JCS_EXT_BGR, PixelFormatInfo::info(formats::RGB888), false } },
+ { formats::BGR888, { JCS_EXT_RGB, PixelFormatInfo::info(formats::BGR888), false } },
+
+ { formats::NV12, { JCS_YCbCr, PixelFormatInfo::info(formats::NV12), false } },
+ { formats::NV21, { JCS_YCbCr, PixelFormatInfo::info(formats::NV21), true } },
+ { formats::NV16, { JCS_YCbCr, PixelFormatInfo::info(formats::NV16), false } },
+ { formats::NV61, { JCS_YCbCr, PixelFormatInfo::info(formats::NV61), true } },
+ { formats::NV24, { JCS_YCbCr, PixelFormatInfo::info(formats::NV24), false } },
+ { formats::NV42, { JCS_YCbCr, PixelFormatInfo::info(formats::NV42), true } },
+};
+
+const struct JPEGPixelFormatInfo &findPixelInfo(const PixelFormat &format)
+{
+ static const struct JPEGPixelFormatInfo invalidPixelFormat {
+ JCS_UNKNOWN, PixelFormatInfo(), false
+ };
+
+ const auto iter = pixelInfo.find(format);
+ if (iter == pixelInfo.end()) {
+ LOG(JPEG, Error) << "Unsupported pixel format for JPEG encoder: "
+ << format;
+ return invalidPixelFormat;
+ }
+
+ return iter->second;
+}
+
+} /* namespace */
+
+EncoderLibJpeg::EncoderLibJpeg()
+{
+ /* \todo Expand error handling coverage with a custom handler. */
+ compress_.err = jpeg_std_error(&jerr_);
+
+ jpeg_create_compress(&compress_);
+}
+
+EncoderLibJpeg::~EncoderLibJpeg()
+{
+ jpeg_destroy_compress(&compress_);
+}
+
+int EncoderLibJpeg::configure(const StreamConfiguration &cfg)
+{
+ const struct JPEGPixelFormatInfo info = findPixelInfo(cfg.pixelFormat);
+ if (info.colorSpace == JCS_UNKNOWN)
+ return -ENOTSUP;
+
+ compress_.image_width = cfg.size.width;
+ compress_.image_height = cfg.size.height;
+ compress_.in_color_space = info.colorSpace;
+
+ compress_.input_components = info.colorSpace == JCS_GRAYSCALE ? 1 : 3;
+
+ jpeg_set_defaults(&compress_);
+
+ pixelFormatInfo_ = &info.pixelFormatInfo;
+
+ nv_ = pixelFormatInfo_->numPlanes() == 2;
+ nvSwap_ = info.nvSwap;
+
+ return 0;
+}
+
+void EncoderLibJpeg::compressRGB(const std::vector<Span<uint8_t>> &planes)
+{
+ unsigned char *src = const_cast<unsigned char *>(planes[0].data());
+ /* \todo Stride information should come from buffer configuration. */
+ unsigned int stride = pixelFormatInfo_->stride(compress_.image_width, 0);
+
+ JSAMPROW row_pointer[1];
+
+ while (compress_.next_scanline < compress_.image_height) {
+ row_pointer[0] = &src[compress_.next_scanline * stride];
+ jpeg_write_scanlines(&compress_, row_pointer, 1);
+ }
+}
+
+/*
+ * Compress the incoming buffer from a supported NV format.
+ * This naively unpacks the semi-planar NV12 to a YUV888 format for libjpeg.
+ */
+void EncoderLibJpeg::compressNV(const std::vector<Span<uint8_t>> &planes)
+{
+ std::vector<uint8_t> tmprowbuf(compress_.image_width * 3);
+
+ /*
+ * \todo Use the raw api, and only unpack the cb/cr samples to new line
+ * buffers. If possible, see if we can set appropriate pixel strides
+ * too to save even that copy.
+ *
+ * Possible hints at:
+ * https://sourceforge.net/p/libjpeg/mailman/message/30815123/
+ */
+ unsigned int y_stride = pixelFormatInfo_->stride(compress_.image_width, 0);
+ unsigned int c_stride = pixelFormatInfo_->stride(compress_.image_width, 1);
+
+ unsigned int horzSubSample = 2 * compress_.image_width / c_stride;
+ unsigned int vertSubSample = pixelFormatInfo_->planes[1].verticalSubSampling;
+
+ unsigned int c_inc = horzSubSample == 1 ? 2 : 0;
+ unsigned int cb_pos = nvSwap_ ? 1 : 0;
+ unsigned int cr_pos = nvSwap_ ? 0 : 1;
+
+ const unsigned char *src = planes[0].data();
+ const unsigned char *src_c = planes[1].data();
+
+ JSAMPROW row_pointer[1];
+ row_pointer[0] = tmprowbuf.data();
+
+ for (unsigned int y = 0; y < compress_.image_height; y++) {
+ unsigned char *dst = tmprowbuf.data();
+
+ const unsigned char *src_y = src + y * y_stride;
+ const unsigned char *src_cb = src_c + (y / vertSubSample) * c_stride + cb_pos;
+ const unsigned char *src_cr = src_c + (y / vertSubSample) * c_stride + cr_pos;
+
+ for (unsigned int x = 0; x < compress_.image_width; x += 2) {
+ dst[0] = *src_y;
+ dst[1] = *src_cb;
+ dst[2] = *src_cr;
+ src_y++;
+ src_cb += c_inc;
+ src_cr += c_inc;
+ dst += 3;
+
+ dst[0] = *src_y;
+ dst[1] = *src_cb;
+ dst[2] = *src_cr;
+ src_y++;
+ src_cb += 2;
+ src_cr += 2;
+ dst += 3;
+ }
+
+ jpeg_write_scanlines(&compress_, row_pointer, 1);
+ }
+}
+
+int EncoderLibJpeg::encode(Camera3RequestDescriptor::StreamBuffer *buffer,
+ libcamera::Span<const uint8_t> exifData,
+ unsigned int quality)
+{
+ MappedFrameBuffer frame(buffer->srcBuffer,
+ MappedFrameBuffer::MapFlag::Read);
+ if (!frame.isValid()) {
+ LOG(JPEG, Error) << "Failed to map FrameBuffer : "
+ << strerror(frame.error());
+ return frame.error();
+ }
+
+ return encode(frame.planes(), buffer->dstBuffer->plane(0),
+ exifData, quality);
+}
+
+int EncoderLibJpeg::encode(const std::vector<Span<uint8_t>> &src,
+ Span<uint8_t> dest, Span<const uint8_t> exifData,
+ unsigned int quality)
+{
+ unsigned char *destination = dest.data();
+ unsigned long size = dest.size();
+
+ jpeg_set_quality(&compress_, quality, TRUE);
+
+ /*
+ * The jpeg_mem_dest will reallocate if the required size is not
+ * sufficient. That means the output won't be written to the correct
+ * buffers.
+ *
+ * \todo Implement our own custom memory destination to prevent
+ * reallocation and prefer failure with correct reporting.
+ */
+ jpeg_mem_dest(&compress_, &destination, &size);
+
+ jpeg_start_compress(&compress_, TRUE);
+
+ if (exifData.size())
+ /* Store Exif data in the JPEG_APP1 data block. */
+ jpeg_write_marker(&compress_, JPEG_APP0 + 1,
+ static_cast<const JOCTET *>(exifData.data()),
+ exifData.size());
+
+ LOG(JPEG, Debug) << "JPEG Encode Starting:" << compress_.image_width
+ << "x" << compress_.image_height;
+
+ ASSERT(src.size() == pixelFormatInfo_->numPlanes());
+
+ if (nv_)
+ compressNV(src);
+ else
+ compressRGB(src);
+
+ jpeg_finish_compress(&compress_);
+
+ return size;
+}
diff --git a/src/android/jpeg/encoder_libjpeg.h b/src/android/jpeg/encoder_libjpeg.h
new file mode 100644
index 00000000..4ac85c22
--- /dev/null
+++ b/src/android/jpeg/encoder_libjpeg.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * JPEG encoding using libjpeg
+ */
+
+#pragma once
+
+#include "encoder.h"
+
+#include <vector>
+
+#include "libcamera/internal/formats.h"
+
+#include <jpeglib.h>
+
+class EncoderLibJpeg : public Encoder
+{
+public:
+ EncoderLibJpeg();
+ ~EncoderLibJpeg();
+
+ int configure(const libcamera::StreamConfiguration &cfg) override;
+ int encode(Camera3RequestDescriptor::StreamBuffer *buffer,
+ libcamera::Span<const uint8_t> exifData,
+ unsigned int quality) override;
+ int encode(const std::vector<libcamera::Span<uint8_t>> &planes,
+ libcamera::Span<uint8_t> destination,
+ libcamera::Span<const uint8_t> exifData,
+ unsigned int quality);
+
+private:
+ void compressRGB(const std::vector<libcamera::Span<uint8_t>> &planes);
+ void compressNV(const std::vector<libcamera::Span<uint8_t>> &planes);
+
+ struct jpeg_compress_struct compress_;
+ struct jpeg_error_mgr jerr_;
+
+ const libcamera::PixelFormatInfo *pixelFormatInfo_;
+
+ bool nv_;
+ bool nvSwap_;
+};
diff --git a/src/android/jpeg/exif.cpp b/src/android/jpeg/exif.cpp
new file mode 100644
index 00000000..b8c871df
--- /dev/null
+++ b/src/android/jpeg/exif.cpp
@@ -0,0 +1,522 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * EXIF tag creation using libexif
+ */
+
+#include "exif.h"
+
+#include <cmath>
+#include <iomanip>
+#include <map>
+#include <sstream>
+#include <tuple>
+#include <uchar.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(EXIF)
+
+/*
+ * List of EXIF tags that we set directly because they are not supported
+ * by libexif version 0.6.21.
+ */
+enum class _ExifTag {
+ OFFSET_TIME = 0x9010,
+ OFFSET_TIME_ORIGINAL = 0x9011,
+ OFFSET_TIME_DIGITIZED = 0x9012,
+};
+
+/*
+ * The Exif class should be instantiated and specific properties set
+ * through the exposed public API.
+ *
+ * Once all desired properties have been set, the user shall call
+ * generate() to process the entries and generate the Exif data.
+ *
+ * Calls to generate() must check the return code to determine if any error
+ * occurred during the construction of the Exif data, and if successful the
+ * data can be obtained using the data() function.
+ */
+Exif::Exif()
+ : valid_(false), data_(nullptr), order_(EXIF_BYTE_ORDER_INTEL),
+ exifData_(0), size_(0)
+{
+ /* Create an ExifMem allocator to construct entries. */
+ mem_ = exif_mem_new_default();
+ if (!mem_) {
+ LOG(EXIF, Error) << "Failed to allocate ExifMem Allocator";
+ return;
+ }
+
+ data_ = exif_data_new_mem(mem_);
+ if (!data_) {
+ LOG(EXIF, Error) << "Failed to allocate an ExifData structure";
+ return;
+ }
+
+ valid_ = true;
+
+ exif_data_set_option(data_, EXIF_DATA_OPTION_FOLLOW_SPECIFICATION);
+ exif_data_set_data_type(data_, EXIF_DATA_TYPE_COMPRESSED);
+
+ /*
+ * Big-Endian: EXIF_BYTE_ORDER_MOTOROLA
+ * Little Endian: EXIF_BYTE_ORDER_INTEL
+ */
+ exif_data_set_byte_order(data_, order_);
+
+ setString(EXIF_IFD_EXIF, EXIF_TAG_EXIF_VERSION,
+ EXIF_FORMAT_UNDEFINED, "0231");
+
+ /* Create the mandatory EXIF fields with default data. */
+ exif_data_fix(data_);
+}
+
+Exif::~Exif()
+{
+ if (exifData_)
+ free(exifData_);
+
+ if (data_) {
+ /*
+ * Reset thumbnail data to avoid getting double-freed by
+ * libexif. It is owned by the caller (i.e. PostProcessorJpeg).
+ */
+ data_->data = nullptr;
+ data_->size = 0;
+
+ exif_data_unref(data_);
+ }
+
+ if (mem_)
+ exif_mem_unref(mem_);
+}
+
+ExifEntry *Exif::createEntry(ExifIfd ifd, ExifTag tag)
+{
+ ExifContent *content = data_->ifd[ifd];
+ ExifEntry *entry = exif_content_get_entry(content, tag);
+
+ if (entry) {
+ exif_entry_ref(entry);
+ return entry;
+ }
+
+ entry = exif_entry_new_mem(mem_);
+ if (!entry) {
+ LOG(EXIF, Error) << "Failed to allocated new entry";
+ valid_ = false;
+ return nullptr;
+ }
+
+ exif_content_add_entry(content, entry);
+ exif_entry_initialize(entry, tag);
+
+ return entry;
+}
+
+ExifEntry *Exif::createEntry(ExifIfd ifd, ExifTag tag, ExifFormat format,
+ unsigned long components, unsigned int size)
+{
+ ExifContent *content = data_->ifd[ifd];
+
+ /* Replace any existing entry with the same tag. */
+ ExifEntry *existing = exif_content_get_entry(content, tag);
+ exif_content_remove_entry(content, existing);
+
+ ExifEntry *entry = exif_entry_new_mem(mem_);
+ if (!entry) {
+ LOG(EXIF, Error) << "Failed to allocated new entry";
+ valid_ = false;
+ return nullptr;
+ }
+
+ void *buffer = exif_mem_alloc(mem_, size);
+ if (!buffer) {
+ LOG(EXIF, Error) << "Failed to allocate buffer for variable entry";
+ exif_mem_unref(mem_);
+ valid_ = false;
+ return nullptr;
+ }
+
+ entry->data = static_cast<unsigned char *>(buffer);
+ entry->components = components;
+ entry->format = format;
+ entry->size = size;
+ entry->tag = tag;
+
+ exif_content_add_entry(content, entry);
+
+ return entry;
+}
+
+void Exif::setByte(ExifIfd ifd, ExifTag tag, uint8_t item)
+{
+ ExifEntry *entry = createEntry(ifd, tag, EXIF_FORMAT_BYTE, 1, 1);
+ if (!entry)
+ return;
+
+ entry->data[0] = item;
+ exif_entry_unref(entry);
+}
+
+void Exif::setShort(ExifIfd ifd, ExifTag tag, uint16_t item)
+{
+ ExifEntry *entry = createEntry(ifd, tag);
+ if (!entry)
+ return;
+
+ exif_set_short(entry->data, order_, item);
+ exif_entry_unref(entry);
+}
+
+void Exif::setLong(ExifIfd ifd, ExifTag tag, uint32_t item)
+{
+ ExifEntry *entry = createEntry(ifd, tag);
+ if (!entry)
+ return;
+
+ exif_set_long(entry->data, order_, item);
+ exif_entry_unref(entry);
+}
+
+void Exif::setRational(ExifIfd ifd, ExifTag tag, ExifRational item)
+{
+ setRational(ifd, tag, { &item, 1 });
+}
+
+void Exif::setRational(ExifIfd ifd, ExifTag tag, Span<const ExifRational> items)
+{
+ ExifEntry *entry = createEntry(ifd, tag, EXIF_FORMAT_RATIONAL,
+ items.size(),
+ items.size() * sizeof(ExifRational));
+ if (!entry)
+ return;
+
+ for (size_t i = 0; i < items.size(); i++)
+ exif_set_rational(entry->data + i * sizeof(ExifRational),
+ order_, items[i]);
+ exif_entry_unref(entry);
+}
+
+static const std::map<Exif::StringEncoding, std::array<uint8_t, 8>> stringEncodingCodes = {
+ { Exif::ASCII, { 0x41, 0x53, 0x43, 0x49, 0x49, 0x00, 0x00, 0x00 } },
+ { Exif::Unicode, { 0x55, 0x4e, 0x49, 0x43, 0x4f, 0x44, 0x45, 0x00 } },
+};
+
+void Exif::setString(ExifIfd ifd, ExifTag tag, ExifFormat format,
+ const std::string &item, StringEncoding encoding)
+{
+ std::string ascii;
+ size_t length;
+ const char *str;
+ std::vector<uint8_t> buf;
+
+ if (format == EXIF_FORMAT_ASCII) {
+ ascii = utils::toAscii(item);
+ str = ascii.c_str();
+
+ /* Pad 1 extra byte to null-terminate the ASCII string. */
+ length = ascii.length() + 1;
+ } else {
+ std::u16string u16str;
+
+ auto encodingString = stringEncodingCodes.find(encoding);
+ if (encodingString != stringEncodingCodes.end()) {
+ buf = {
+ encodingString->second.begin(),
+ encodingString->second.end()
+ };
+ }
+
+ switch (encoding) {
+ case Unicode:
+ u16str = utf8ToUtf16(item);
+
+ buf.resize(8 + u16str.size() * 2);
+ for (size_t i = 0; i < u16str.size(); i++) {
+ if (order_ == EXIF_BYTE_ORDER_INTEL) {
+ buf[8 + 2 * i] = u16str[i] & 0xff;
+ buf[8 + 2 * i + 1] = (u16str[i] >> 8) & 0xff;
+ } else {
+ buf[8 + 2 * i] = (u16str[i] >> 8) & 0xff;
+ buf[8 + 2 * i + 1] = u16str[i] & 0xff;
+ }
+ }
+
+ break;
+
+ case ASCII:
+ case NoEncoding:
+ buf.insert(buf.end(), item.begin(), item.end());
+ break;
+ }
+
+ str = reinterpret_cast<const char *>(buf.data());
+
+ /*
+ * Strings stored in different formats (EXIF_FORMAT_UNDEFINED)
+ * are not null-terminated.
+ */
+ length = buf.size();
+ }
+
+ ExifEntry *entry = createEntry(ifd, tag, format, length, length);
+ if (!entry)
+ return;
+
+ memcpy(entry->data, str, length);
+ exif_entry_unref(entry);
+}
+
+void Exif::setMake(const std::string &make)
+{
+ setString(EXIF_IFD_0, EXIF_TAG_MAKE, EXIF_FORMAT_ASCII, make);
+}
+
+void Exif::setModel(const std::string &model)
+{
+ setString(EXIF_IFD_0, EXIF_TAG_MODEL, EXIF_FORMAT_ASCII, model);
+}
+
+void Exif::setSize(const Size &size)
+{
+ setLong(EXIF_IFD_EXIF, EXIF_TAG_PIXEL_Y_DIMENSION, size.height);
+ setLong(EXIF_IFD_EXIF, EXIF_TAG_PIXEL_X_DIMENSION, size.width);
+}
+
+void Exif::setTimestamp(time_t timestamp, std::chrono::milliseconds msec)
+{
+ struct tm tm;
+ localtime_r(&timestamp, &tm);
+
+ char str[20];
+ strftime(str, sizeof(str), "%Y:%m:%d %H:%M:%S", &tm);
+ std::string ts(str);
+
+ setString(EXIF_IFD_0, EXIF_TAG_DATE_TIME, EXIF_FORMAT_ASCII, ts);
+ setString(EXIF_IFD_EXIF, EXIF_TAG_DATE_TIME_ORIGINAL, EXIF_FORMAT_ASCII, ts);
+ setString(EXIF_IFD_EXIF, EXIF_TAG_DATE_TIME_DIGITIZED, EXIF_FORMAT_ASCII, ts);
+
+ /* Query and set timezone information if available. */
+ int r = strftime(str, sizeof(str), "%z", &tm);
+ if (r <= 0)
+ return;
+
+ std::string tz(str);
+ tz.insert(3, 1, ':');
+ setString(EXIF_IFD_EXIF,
+ static_cast<ExifTag>(_ExifTag::OFFSET_TIME),
+ EXIF_FORMAT_ASCII, tz);
+ setString(EXIF_IFD_EXIF,
+ static_cast<ExifTag>(_ExifTag::OFFSET_TIME_ORIGINAL),
+ EXIF_FORMAT_ASCII, tz);
+ setString(EXIF_IFD_EXIF,
+ static_cast<ExifTag>(_ExifTag::OFFSET_TIME_DIGITIZED),
+ EXIF_FORMAT_ASCII, tz);
+
+ std::stringstream sstr;
+ sstr << std::setfill('0') << std::setw(3) << msec.count();
+ std::string subsec = sstr.str();
+
+ setString(EXIF_IFD_EXIF, EXIF_TAG_SUB_SEC_TIME,
+ EXIF_FORMAT_ASCII, subsec);
+ setString(EXIF_IFD_EXIF, EXIF_TAG_SUB_SEC_TIME_ORIGINAL,
+ EXIF_FORMAT_ASCII, subsec);
+ setString(EXIF_IFD_EXIF, EXIF_TAG_SUB_SEC_TIME_DIGITIZED,
+ EXIF_FORMAT_ASCII, subsec);
+}
+
+void Exif::setGPSDateTimestamp(time_t timestamp)
+{
+ struct tm tm;
+ gmtime_r(&timestamp, &tm);
+
+ char str[11];
+ strftime(str, sizeof(str), "%Y:%m:%d", &tm);
+ std::string tsStr(str);
+
+ setString(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_DATE_STAMP),
+ EXIF_FORMAT_ASCII, tsStr);
+
+ /* Set GPS_TIME_STAMP */
+ ExifRational ts[] = {
+ { static_cast<ExifLong>(tm.tm_hour), 1 },
+ { static_cast<ExifLong>(tm.tm_min), 1 },
+ { static_cast<ExifLong>(tm.tm_sec), 1 },
+ };
+
+ setRational(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_TIME_STAMP),
+ ts);
+}
+
+std::tuple<int, int, int> Exif::degreesToDMS(double decimalDegrees)
+{
+ int degrees = std::trunc(decimalDegrees);
+ double minutes = std::abs((decimalDegrees - degrees) * 60);
+ double seconds = (minutes - std::trunc(minutes)) * 60;
+
+ return { degrees, std::trunc(minutes), std::round(seconds) };
+}
+
+void Exif::setGPSDMS(ExifIfd ifd, ExifTag tag, int deg, int min, int sec)
+{
+ ExifRational coords[] = {
+ { static_cast<ExifLong>(deg), 1 },
+ { static_cast<ExifLong>(min), 1 },
+ { static_cast<ExifLong>(sec), 1 },
+ };
+
+ setRational(ifd, tag, coords);
+}
+
+/*
+ * \brief Set GPS location (lat, long, alt)
+ * \param[in] coords Pointer to coordinates latitude, longitude, and altitude,
+ * first two in degrees, the third in meters
+ */
+void Exif::setGPSLocation(const double *coords)
+{
+ int deg, min, sec;
+
+ std::tie<int, int, int>(deg, min, sec) = degreesToDMS(coords[0]);
+ setString(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_LATITUDE_REF),
+ EXIF_FORMAT_ASCII, deg >= 0 ? "N" : "S");
+ setGPSDMS(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_LATITUDE),
+ std::abs(deg), min, sec);
+
+ std::tie<int, int, int>(deg, min, sec) = degreesToDMS(coords[1]);
+ setString(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_LONGITUDE_REF),
+ EXIF_FORMAT_ASCII, deg >= 0 ? "E" : "W");
+ setGPSDMS(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_LONGITUDE),
+ std::abs(deg), min, sec);
+
+ setByte(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_ALTITUDE_REF),
+ coords[2] >= 0 ? 0 : 1);
+ setRational(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_ALTITUDE),
+ ExifRational{ static_cast<ExifLong>(std::abs(coords[2])), 1 });
+}
+
+void Exif::setGPSMethod(const std::string &method)
+{
+ setString(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_PROCESSING_METHOD),
+ EXIF_FORMAT_UNDEFINED, method, NoEncoding);
+}
+
+void Exif::setOrientation(int orientation)
+{
+ int value;
+ switch (orientation) {
+ case 0:
+ default:
+ value = 1;
+ break;
+ case 90:
+ value = 6;
+ break;
+ case 180:
+ value = 3;
+ break;
+ case 270:
+ value = 8;
+ break;
+ }
+
+ setShort(EXIF_IFD_0, EXIF_TAG_ORIENTATION, value);
+}
+
+void Exif::setThumbnail(std::vector<unsigned char> &&thumbnail,
+ Compression compression)
+{
+ thumbnailData_ = std::move(thumbnail);
+
+ data_->data = thumbnailData_.data();
+ data_->size = thumbnailData_.size();
+
+ setShort(EXIF_IFD_0, EXIF_TAG_COMPRESSION, compression);
+}
+
+void Exif::setFocalLength(float length)
+{
+ ExifRational rational = { static_cast<ExifLong>(length * 1000), 1000 };
+ setRational(EXIF_IFD_EXIF, EXIF_TAG_FOCAL_LENGTH, rational);
+}
+
+void Exif::setExposureTime(uint64_t nsec)
+{
+ ExifRational rational = { static_cast<ExifLong>(nsec), 1000000000 };
+ setRational(EXIF_IFD_EXIF, EXIF_TAG_EXPOSURE_TIME, rational);
+}
+
+void Exif::setAperture(float size)
+{
+ ExifRational rational = { static_cast<ExifLong>(size * 10000), 10000 };
+ setRational(EXIF_IFD_EXIF, EXIF_TAG_FNUMBER, rational);
+}
+
+void Exif::setISO(uint16_t iso)
+{
+ setShort(EXIF_IFD_EXIF, EXIF_TAG_ISO_SPEED_RATINGS, iso);
+}
+
+void Exif::setFlash(Flash flash)
+{
+ setShort(EXIF_IFD_EXIF, EXIF_TAG_FLASH, static_cast<ExifShort>(flash));
+}
+
+void Exif::setWhiteBalance(WhiteBalance wb)
+{
+ setShort(EXIF_IFD_EXIF, EXIF_TAG_WHITE_BALANCE, static_cast<ExifShort>(wb));
+}
+
+/**
+ * \brief Convert UTF-8 string to UTF-16 string
+ * \param[in] str String to convert
+ *
+ * \return \a str in UTF-16
+ */
+std::u16string Exif::utf8ToUtf16(const std::string &str)
+{
+ mbstate_t state{};
+ char16_t c16;
+ const char *ptr = str.data();
+ const char *end = ptr + str.size();
+
+ std::u16string ret;
+ while (size_t rc = mbrtoc16(&c16, ptr, end - ptr + 1, &state)) {
+ if (rc == static_cast<size_t>(-2) ||
+ rc == static_cast<size_t>(-1))
+ break;
+
+ ret.push_back(c16);
+
+ if (rc > 0)
+ ptr += rc;
+ }
+
+ return ret;
+}
+
+[[nodiscard]] int Exif::generate()
+{
+ if (exifData_) {
+ free(exifData_);
+ exifData_ = nullptr;
+ }
+
+ if (!valid_) {
+ LOG(EXIF, Error) << "Generated EXIF data is invalid";
+ return -1;
+ }
+
+ exif_data_save_data(data_, &exifData_, &size_);
+
+ LOG(EXIF, Debug) << "Created EXIF instance (" << size_ << " bytes)";
+
+ return 0;
+}
diff --git a/src/android/jpeg/exif.h b/src/android/jpeg/exif.h
new file mode 100644
index 00000000..446d53f3
--- /dev/null
+++ b/src/android/jpeg/exif.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * EXIF tag creator using libexif
+ */
+
+#pragma once
+
+#include <chrono>
+#include <string>
+#include <time.h>
+#include <vector>
+
+#include <libexif/exif-data.h>
+
+#include <libcamera/base/span.h>
+
+#include <libcamera/geometry.h>
+
+class Exif
+{
+public:
+ Exif();
+ ~Exif();
+
+ enum Compression {
+ None = 1,
+ JPEG = 6,
+ };
+
+ enum Flash {
+ /* bit 0 */
+ Fired = 0x01,
+ /* bits 1 and 2 */
+ StrobeDetected = 0x04,
+ StrobeNotDetected = 0x06,
+ /* bits 3 and 4 */
+ ModeCompulsoryFiring = 0x08,
+ ModeCompulsorySuppression = 0x10,
+ ModeAuto = 0x18,
+ /* bit 5 */
+ FlashNotPresent = 0x20,
+ /* bit 6 */
+ RedEye = 0x40,
+ };
+
+ enum WhiteBalance {
+ Auto = 0,
+ Manual = 1,
+ };
+
+ enum StringEncoding {
+ NoEncoding = 0,
+ ASCII = 1,
+ Unicode = 2,
+ };
+
+ void setMake(const std::string &make);
+ void setModel(const std::string &model);
+
+ void setOrientation(int orientation);
+ void setSize(const libcamera::Size &size);
+ void setThumbnail(std::vector<unsigned char> &&thumbnail,
+ Compression compression);
+ void setTimestamp(time_t timestamp, std::chrono::milliseconds msec);
+
+ void setGPSDateTimestamp(time_t timestamp);
+ void setGPSLocation(const double *coords);
+ void setGPSMethod(const std::string &method);
+
+ void setFocalLength(float length);
+ void setExposureTime(uint64_t nsec);
+ void setAperture(float size);
+ void setISO(uint16_t iso);
+ void setFlash(Flash flash);
+ void setWhiteBalance(WhiteBalance wb);
+
+ libcamera::Span<const uint8_t> data() const { return { exifData_, size_ }; }
+ [[nodiscard]] int generate();
+
+private:
+ ExifEntry *createEntry(ExifIfd ifd, ExifTag tag);
+ ExifEntry *createEntry(ExifIfd ifd, ExifTag tag, ExifFormat format,
+ unsigned long components, unsigned int size);
+
+ void setByte(ExifIfd ifd, ExifTag tag, uint8_t item);
+ void setShort(ExifIfd ifd, ExifTag tag, uint16_t item);
+ void setLong(ExifIfd ifd, ExifTag tag, uint32_t item);
+ void setString(ExifIfd ifd, ExifTag tag, ExifFormat format,
+ const std::string &item,
+ StringEncoding encoding = NoEncoding);
+ void setRational(ExifIfd ifd, ExifTag tag, ExifRational item);
+ void setRational(ExifIfd ifd, ExifTag tag,
+ libcamera::Span<const ExifRational> items);
+
+ std::tuple<int, int, int> degreesToDMS(double decimalDegrees);
+ void setGPSDMS(ExifIfd ifd, ExifTag tag, int deg, int min, int sec);
+
+ std::u16string utf8ToUtf16(const std::string &str);
+
+ bool valid_;
+
+ ExifData *data_;
+ ExifMem *mem_;
+ ExifByteOrder order_;
+
+ unsigned char *exifData_;
+ unsigned int size_;
+
+ std::vector<unsigned char> thumbnailData_;
+};
diff --git a/src/android/jpeg/meson.build b/src/android/jpeg/meson.build
new file mode 100644
index 00000000..3402e614
--- /dev/null
+++ b/src/android/jpeg/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: CC0-1.0
+
+android_hal_sources += files([
+ 'encoder_libjpeg.cpp',
+ 'exif.cpp',
+ 'post_processor_jpeg.cpp',
+ 'thumbnailer.cpp'
+])
+
+platform = get_option('android_platform')
+if platform == 'cros'
+ android_hal_sources += files(['encoder_jea.cpp'])
+ android_deps += [dependency('libcros_camera')]
+endif
diff --git a/src/android/jpeg/post_processor_jpeg.cpp b/src/android/jpeg/post_processor_jpeg.cpp
new file mode 100644
index 00000000..89b8a401
--- /dev/null
+++ b/src/android/jpeg/post_processor_jpeg.cpp
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * JPEG Post Processor
+ */
+
+#include "post_processor_jpeg.h"
+
+#include <chrono>
+
+#include "../camera_device.h"
+#include "../camera_metadata.h"
+#include "../camera_request.h"
+#if defined(OS_CHROMEOS)
+#include "encoder_jea.h"
+#else /* !defined(OS_CHROMEOS) */
+#include "encoder_libjpeg.h"
+#endif
+#include "exif.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/formats.h>
+
+using namespace libcamera;
+using namespace std::chrono_literals;
+
+LOG_DEFINE_CATEGORY(JPEG)
+
+PostProcessorJpeg::PostProcessorJpeg(CameraDevice *const device)
+ : cameraDevice_(device)
+{
+}
+
+int PostProcessorJpeg::configure(const StreamConfiguration &inCfg,
+ const StreamConfiguration &outCfg)
+{
+ if (inCfg.size != outCfg.size) {
+ LOG(JPEG, Error) << "Mismatch of input and output stream sizes";
+ return -EINVAL;
+ }
+
+ if (outCfg.pixelFormat != formats::MJPEG) {
+ LOG(JPEG, Error) << "Output stream pixel format is not JPEG";
+ return -EINVAL;
+ }
+
+ streamSize_ = outCfg.size;
+
+ thumbnailer_.configure(inCfg.size, inCfg.pixelFormat);
+
+#if defined(OS_CHROMEOS)
+ encoder_ = std::make_unique<EncoderJea>();
+#else /* !defined(OS_CHROMEOS) */
+ encoder_ = std::make_unique<EncoderLibJpeg>();
+#endif
+
+ return encoder_->configure(inCfg);
+}
+
+void PostProcessorJpeg::generateThumbnail(const FrameBuffer &source,
+ const Size &targetSize,
+ unsigned int quality,
+ std::vector<unsigned char> *thumbnail)
+{
+ /* Stores the raw scaled-down thumbnail bytes. */
+ std::vector<unsigned char> rawThumbnail;
+
+ thumbnailer_.createThumbnail(source, targetSize, &rawThumbnail);
+
+ StreamConfiguration thCfg;
+ thCfg.size = targetSize;
+ thCfg.pixelFormat = thumbnailer_.pixelFormat();
+ int ret = thumbnailEncoder_.configure(thCfg);
+
+ if (!rawThumbnail.empty() && !ret) {
+ /*
+ * \todo Avoid value-initialization of all elements of the
+ * vector.
+ */
+ thumbnail->resize(rawThumbnail.size());
+
+ /*
+ * Split planes manually as the encoder expects a vector of
+ * planes.
+ *
+ * \todo Pass a vector of planes directly to
+ * Thumbnailer::createThumbnailer above and remove the manual
+ * planes split from here.
+ */
+ std::vector<Span<uint8_t>> thumbnailPlanes;
+ const PixelFormatInfo &formatNV12 = PixelFormatInfo::info(formats::NV12);
+ size_t yPlaneSize = formatNV12.planeSize(targetSize, 0);
+ size_t uvPlaneSize = formatNV12.planeSize(targetSize, 1);
+ thumbnailPlanes.push_back({ rawThumbnail.data(), yPlaneSize });
+ thumbnailPlanes.push_back({ rawThumbnail.data() + yPlaneSize, uvPlaneSize });
+
+ int jpeg_size = thumbnailEncoder_.encode(thumbnailPlanes,
+ *thumbnail, {}, quality);
+ thumbnail->resize(jpeg_size);
+
+ LOG(JPEG, Debug)
+ << "Thumbnail compress returned "
+ << jpeg_size << " bytes";
+ }
+}
+
+void PostProcessorJpeg::process(Camera3RequestDescriptor::StreamBuffer *streamBuffer)
+{
+ ASSERT(encoder_);
+
+ const FrameBuffer &source = *streamBuffer->srcBuffer;
+ CameraBuffer *destination = streamBuffer->dstBuffer.get();
+
+ ASSERT(destination->numPlanes() == 1);
+
+ const CameraMetadata &requestMetadata = streamBuffer->request->settings_;
+ CameraMetadata *resultMetadata = streamBuffer->request->resultMetadata_.get();
+ camera_metadata_ro_entry_t entry;
+ int ret;
+
+ /* Set EXIF metadata for various tags. */
+ Exif exif;
+ exif.setMake(cameraDevice_->maker());
+ exif.setModel(cameraDevice_->model());
+
+ ret = requestMetadata.getEntry(ANDROID_JPEG_ORIENTATION, &entry);
+
+ const uint32_t jpegOrientation = ret ? *entry.data.i32 : 0;
+ resultMetadata->addEntry(ANDROID_JPEG_ORIENTATION, jpegOrientation);
+ exif.setOrientation(jpegOrientation);
+
+ exif.setSize(streamSize_);
+ /*
+ * We set the frame's EXIF timestamp as the time of encode.
+ * Since the precision we need for EXIF timestamp is only one
+ * second, it is good enough.
+ */
+ exif.setTimestamp(std::time(nullptr), 0ms);
+
+ ret = resultMetadata->getEntry(ANDROID_SENSOR_EXPOSURE_TIME, &entry);
+ exif.setExposureTime(ret ? *entry.data.i64 : 0);
+ ret = requestMetadata.getEntry(ANDROID_LENS_APERTURE, &entry);
+ if (ret)
+ exif.setAperture(*entry.data.f);
+
+ ret = resultMetadata->getEntry(ANDROID_SENSOR_SENSITIVITY, &entry);
+ exif.setISO(ret ? *entry.data.i32 : 100);
+
+ exif.setFlash(Exif::Flash::FlashNotPresent);
+ exif.setWhiteBalance(Exif::WhiteBalance::Auto);
+
+ exif.setFocalLength(1.0);
+
+ ret = requestMetadata.getEntry(ANDROID_JPEG_GPS_TIMESTAMP, &entry);
+ if (ret) {
+ exif.setGPSDateTimestamp(*entry.data.i64);
+ resultMetadata->addEntry(ANDROID_JPEG_GPS_TIMESTAMP,
+ *entry.data.i64);
+ }
+
+ ret = requestMetadata.getEntry(ANDROID_JPEG_THUMBNAIL_SIZE, &entry);
+ if (ret) {
+ const int32_t *data = entry.data.i32;
+ Size thumbnailSize = { static_cast<uint32_t>(data[0]),
+ static_cast<uint32_t>(data[1]) };
+
+ ret = requestMetadata.getEntry(ANDROID_JPEG_THUMBNAIL_QUALITY, &entry);
+ uint8_t quality = ret ? *entry.data.u8 : 95;
+ resultMetadata->addEntry(ANDROID_JPEG_THUMBNAIL_QUALITY, quality);
+
+ if (thumbnailSize != Size(0, 0)) {
+ std::vector<unsigned char> thumbnail;
+ generateThumbnail(source, thumbnailSize, quality, &thumbnail);
+ if (!thumbnail.empty())
+ exif.setThumbnail(std::move(thumbnail), Exif::Compression::JPEG);
+ }
+
+ resultMetadata->addEntry(ANDROID_JPEG_THUMBNAIL_SIZE, data, 2);
+ }
+
+ ret = requestMetadata.getEntry(ANDROID_JPEG_GPS_COORDINATES, &entry);
+ if (ret) {
+ exif.setGPSLocation(entry.data.d);
+ resultMetadata->addEntry(ANDROID_JPEG_GPS_COORDINATES,
+ entry.data.d, 3);
+ }
+
+ ret = requestMetadata.getEntry(ANDROID_JPEG_GPS_PROCESSING_METHOD, &entry);
+ if (ret) {
+ std::string method(entry.data.u8, entry.data.u8 + entry.count);
+ exif.setGPSMethod(method);
+ resultMetadata->addEntry(ANDROID_JPEG_GPS_PROCESSING_METHOD,
+ entry.data.u8, entry.count);
+ }
+
+ if (exif.generate() != 0)
+ LOG(JPEG, Error) << "Failed to generate valid EXIF data";
+
+ ret = requestMetadata.getEntry(ANDROID_JPEG_QUALITY, &entry);
+ const uint8_t quality = ret ? *entry.data.u8 : 95;
+ resultMetadata->addEntry(ANDROID_JPEG_QUALITY, quality);
+
+ int jpeg_size = encoder_->encode(streamBuffer, exif.data(), quality);
+ if (jpeg_size < 0) {
+ LOG(JPEG, Error) << "Failed to encode stream image";
+ processComplete.emit(streamBuffer, PostProcessor::Status::Error);
+ return;
+ }
+
+ /* Fill in the JPEG blob header. */
+ uint8_t *resultPtr = destination->plane(0).data()
+ + destination->jpegBufferSize(cameraDevice_->maxJpegBufferSize())
+ - sizeof(struct camera3_jpeg_blob);
+ auto *blob = reinterpret_cast<struct camera3_jpeg_blob *>(resultPtr);
+ blob->jpeg_blob_id = CAMERA3_JPEG_BLOB_ID;
+ blob->jpeg_size = jpeg_size;
+
+ /* Update the JPEG result Metadata. */
+ resultMetadata->addEntry(ANDROID_JPEG_SIZE, jpeg_size);
+ processComplete.emit(streamBuffer, PostProcessor::Status::Success);
+}
diff --git a/src/android/jpeg/post_processor_jpeg.h b/src/android/jpeg/post_processor_jpeg.h
new file mode 100644
index 00000000..6fe21457
--- /dev/null
+++ b/src/android/jpeg/post_processor_jpeg.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * JPEG Post Processor
+ */
+
+#pragma once
+
+#include "../post_processor.h"
+#include "encoder_libjpeg.h"
+#include "thumbnailer.h"
+
+#include <libcamera/geometry.h>
+
+class CameraDevice;
+
+class PostProcessorJpeg : public PostProcessor
+{
+public:
+ PostProcessorJpeg(CameraDevice *const device);
+
+ int configure(const libcamera::StreamConfiguration &incfg,
+ const libcamera::StreamConfiguration &outcfg) override;
+ void process(Camera3RequestDescriptor::StreamBuffer *streamBuffer) override;
+
+private:
+ void generateThumbnail(const libcamera::FrameBuffer &source,
+ const libcamera::Size &targetSize,
+ unsigned int quality,
+ std::vector<unsigned char> *thumbnail);
+
+ CameraDevice *const cameraDevice_;
+ std::unique_ptr<Encoder> encoder_;
+ libcamera::Size streamSize_;
+ EncoderLibJpeg thumbnailEncoder_;
+ Thumbnailer thumbnailer_;
+};
diff --git a/src/android/jpeg/thumbnailer.cpp b/src/android/jpeg/thumbnailer.cpp
new file mode 100644
index 00000000..adafc468
--- /dev/null
+++ b/src/android/jpeg/thumbnailer.cpp
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Simple image thumbnailer
+ */
+
+#include "thumbnailer.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/formats.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(Thumbnailer)
+
+Thumbnailer::Thumbnailer()
+ : valid_(false)
+{
+}
+
+void Thumbnailer::configure(const Size &sourceSize, PixelFormat pixelFormat)
+{
+ sourceSize_ = sourceSize;
+ pixelFormat_ = pixelFormat;
+
+ if (pixelFormat_ != formats::NV12) {
+ LOG(Thumbnailer, Error)
+ << "Failed to configure: Pixel Format "
+ << pixelFormat_ << " unsupported.";
+ return;
+ }
+
+ valid_ = true;
+}
+
+void Thumbnailer::createThumbnail(const FrameBuffer &source,
+ const Size &targetSize,
+ std::vector<unsigned char> *destination)
+{
+ MappedFrameBuffer frame(&source, MappedFrameBuffer::MapFlag::Read);
+ if (!frame.isValid()) {
+ LOG(Thumbnailer, Error)
+ << "Failed to map FrameBuffer : "
+ << strerror(frame.error());
+ return;
+ }
+
+ if (!valid_) {
+ LOG(Thumbnailer, Error) << "Config is unconfigured or invalid.";
+ return;
+ }
+
+ const unsigned int sw = sourceSize_.width;
+ const unsigned int sh = sourceSize_.height;
+ const unsigned int tw = targetSize.width;
+ const unsigned int th = targetSize.height;
+
+ ASSERT(frame.planes().size() == 2);
+ ASSERT(tw % 2 == 0 && th % 2 == 0);
+
+ /* Image scaling block implementing nearest-neighbour algorithm. */
+ unsigned char *src = frame.planes()[0].data();
+ unsigned char *srcC = frame.planes()[1].data();
+ unsigned char *srcCb, *srcCr;
+ unsigned char *dstY, *srcY;
+
+ size_t dstSize = (th * tw) + ((th / 2) * tw);
+ destination->resize(dstSize);
+ unsigned char *dst = destination->data();
+ unsigned char *dstC = dst + th * tw;
+
+ for (unsigned int y = 0; y < th; y += 2) {
+ unsigned int sourceY = (sh * y + th / 2) / th;
+
+ dstY = dst + y * tw;
+ srcY = src + sw * sourceY;
+ srcCb = srcC + (sourceY / 2) * sw + 0;
+ srcCr = srcC + (sourceY / 2) * sw + 1;
+
+ for (unsigned int x = 0; x < tw; x += 2) {
+ unsigned int sourceX = (sw * x + tw / 2) / tw;
+
+ dstY[x] = srcY[sourceX];
+ dstY[tw + x] = srcY[sw + sourceX];
+ dstY[x + 1] = srcY[sourceX + 1];
+ dstY[tw + x + 1] = srcY[sw + sourceX + 1];
+
+ dstC[(y / 2) * tw + x + 0] = srcCb[(sourceX / 2) * 2];
+ dstC[(y / 2) * tw + x + 1] = srcCr[(sourceX / 2) * 2];
+ }
+ }
+}
diff --git a/src/android/jpeg/thumbnailer.h b/src/android/jpeg/thumbnailer.h
new file mode 100644
index 00000000..1b836e59
--- /dev/null
+++ b/src/android/jpeg/thumbnailer.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Simple image thumbnailer
+ */
+
+#pragma once
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+
+#include "libcamera/internal/formats.h"
+
+class Thumbnailer
+{
+public:
+ Thumbnailer();
+
+ void configure(const libcamera::Size &sourceSize,
+ libcamera::PixelFormat pixelFormat);
+ void createThumbnail(const libcamera::FrameBuffer &source,
+ const libcamera::Size &targetSize,
+ std::vector<unsigned char> *dest);
+ const libcamera::PixelFormat &pixelFormat() const { return pixelFormat_; }
+
+private:
+ libcamera::PixelFormat pixelFormat_;
+ libcamera::Size sourceSize_;
+
+ bool valid_;
+};
diff --git a/src/android/meson.build b/src/android/meson.build
index 5a5a332e..7b226a4b 100644
--- a/src/android/meson.build
+++ b/src/android/meson.build
@@ -1,15 +1,55 @@
+# SPDX-License-Identifier: CC0-1.0
+
+android_deps = [
+ dependency('libexif', required : get_option('android')),
+ dependency('libjpeg', required : get_option('android')),
+ libcamera_private,
+ libyuv_dep,
+]
+
+android_enabled = true
+
+foreach dep : android_deps
+ if not dep.found()
+ android_enabled = false
+ subdir_done()
+ endif
+endforeach
+
android_hal_sources = files([
'camera3_hal.cpp',
- 'camera_hal_manager.cpp',
+ 'camera_capabilities.cpp',
'camera_device.cpp',
+ 'camera_hal_config.cpp',
+ 'camera_hal_manager.cpp',
'camera_metadata.cpp',
'camera_ops.cpp',
+ 'camera_request.cpp',
+ 'camera_stream.cpp',
+ 'hal_framebuffer.cpp',
+ 'yuv/post_processor_yuv.cpp'
])
+android_cpp_args = []
+
+subdir('cros')
+subdir('jpeg')
+subdir('mm')
+
android_camera_metadata_sources = files([
'metadata/camera_metadata.c',
])
android_camera_metadata = static_library('camera_metadata',
android_camera_metadata_sources,
+ c_args : '-Wno-shadow',
include_directories : android_includes)
+
+libcamera_hal = shared_library('libcamera-hal',
+ android_hal_sources,
+ name_prefix : '',
+ link_with : android_camera_metadata,
+ install : true,
+ cpp_args : android_cpp_args,
+ include_directories : android_includes,
+ dependencies : android_deps)
diff --git a/src/android/mm/cros_camera_buffer.cpp b/src/android/mm/cros_camera_buffer.cpp
new file mode 100644
index 00000000..e2a44a2a
--- /dev/null
+++ b/src/android/mm/cros_camera_buffer.cpp
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Chromium OS buffer backend using CameraBufferManager
+ */
+
+#include "../camera_buffer.h"
+
+#include <libcamera/base/log.h>
+
+#include "cros-camera/camera_buffer_manager.h"
+
+using namespace libcamera;
+
+LOG_DECLARE_CATEGORY(HAL)
+
+class CameraBuffer::Private : public Extensible::Private
+{
+ LIBCAMERA_DECLARE_PUBLIC(CameraBuffer)
+
+public:
+ Private(CameraBuffer *cameraBuffer, buffer_handle_t camera3Buffer,
+ PixelFormat pixelFormat, const Size &size,
+ int flags);
+ ~Private();
+
+ bool isValid() const { return registered_; }
+
+ unsigned int numPlanes() const;
+
+ Span<uint8_t> plane(unsigned int plane);
+
+ unsigned int stride(unsigned int plane) const;
+ unsigned int offset(unsigned int plane) const;
+ unsigned int size(unsigned int plane) const;
+
+ size_t jpegBufferSize(size_t maxJpegBufferSize) const;
+
+private:
+ void map();
+
+ cros::CameraBufferManager *bufferManager_;
+ buffer_handle_t handle_;
+ unsigned int numPlanes_;
+ bool mapped_;
+ bool registered_;
+ union {
+ void *addr;
+ android_ycbcr ycbcr;
+ } mem;
+};
+
+CameraBuffer::Private::Private([[maybe_unused]] CameraBuffer *cameraBuffer,
+ buffer_handle_t camera3Buffer,
+ [[maybe_unused]] PixelFormat pixelFormat,
+ [[maybe_unused]] const Size &size,
+ [[maybe_unused]] int flags)
+ : handle_(camera3Buffer), numPlanes_(0), mapped_(false),
+ registered_(false)
+{
+ bufferManager_ = cros::CameraBufferManager::GetInstance();
+ if (!bufferManager_) {
+ LOG(HAL, Fatal)
+ << "Failed to get cros CameraBufferManager instance";
+ return;
+ }
+
+ int ret = bufferManager_->Register(camera3Buffer);
+ if (ret) {
+ LOG(HAL, Error) << "Failed registering a buffer: " << ret;
+ return;
+ }
+
+ registered_ = true;
+ numPlanes_ = bufferManager_->GetNumPlanes(camera3Buffer);
+}
+
+CameraBuffer::Private::~Private()
+{
+ int ret;
+ if (mapped_) {
+ ret = bufferManager_->Unlock(handle_);
+ if (ret != 0)
+ LOG(HAL, Error) << "Failed to unlock buffer: "
+ << strerror(-ret);
+ }
+
+ if (registered_) {
+ ret = bufferManager_->Deregister(handle_);
+ if (ret != 0)
+ LOG(HAL, Error) << "Failed to deregister buffer: "
+ << strerror(-ret);
+ }
+}
+
+unsigned int CameraBuffer::Private::numPlanes() const
+{
+ return bufferManager_->GetNumPlanes(handle_);
+}
+
+Span<uint8_t> CameraBuffer::Private::plane(unsigned int plane)
+{
+ if (!mapped_)
+ map();
+ if (!mapped_)
+ return {};
+
+ void *addr;
+
+ switch (numPlanes()) {
+ case 1:
+ addr = mem.addr;
+ break;
+ default:
+ switch (plane) {
+ case 0:
+ addr = mem.ycbcr.y;
+ break;
+ case 1:
+ addr = mem.ycbcr.cb;
+ break;
+ case 2:
+ addr = mem.ycbcr.cr;
+ break;
+ }
+ }
+
+ return { static_cast<uint8_t *>(addr),
+ bufferManager_->GetPlaneSize(handle_, plane) };
+}
+
+unsigned int CameraBuffer::Private::stride(unsigned int plane) const
+{
+ return cros::CameraBufferManager::GetPlaneStride(handle_, plane);
+}
+
+unsigned int CameraBuffer::Private::offset(unsigned int plane) const
+{
+ return cros::CameraBufferManager::GetPlaneOffset(handle_, plane);
+}
+
+unsigned int CameraBuffer::Private::size(unsigned int plane) const
+{
+ return cros::CameraBufferManager::GetPlaneSize(handle_, plane);
+}
+
+size_t CameraBuffer::Private::jpegBufferSize([[maybe_unused]] size_t maxJpegBufferSize) const
+{
+ return bufferManager_->GetPlaneSize(handle_, 0);
+}
+
+void CameraBuffer::Private::map()
+{
+ int ret;
+ switch (numPlanes_) {
+ case 1: {
+ ret = bufferManager_->Lock(handle_, 0, 0, 0, 0, 0, &mem.addr);
+ if (ret) {
+ LOG(HAL, Error) << "Single plane buffer mapping failed";
+ return;
+ }
+ break;
+ }
+ case 2:
+ case 3: {
+ ret = bufferManager_->LockYCbCr(handle_, 0, 0, 0, 0, 0,
+ &mem.ycbcr);
+ if (ret) {
+ LOG(HAL, Error) << "YCbCr buffer mapping failed";
+ return;
+ }
+ break;
+ }
+ default:
+ LOG(HAL, Error) << "Invalid number of planes: " << numPlanes_;
+ return;
+ }
+
+ mapped_ = true;
+ return;
+}
+
+PUBLIC_CAMERA_BUFFER_IMPLEMENTATION
diff --git a/src/android/mm/cros_frame_buffer_allocator.cpp b/src/android/mm/cros_frame_buffer_allocator.cpp
new file mode 100644
index 00000000..264c0d48
--- /dev/null
+++ b/src/android/mm/cros_frame_buffer_allocator.cpp
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Allocate FrameBuffer for Chromium OS using CameraBufferManager
+ */
+
+#include <memory>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/shared_fd.h>
+
+#include "libcamera/internal/framebuffer.h"
+
+#include "../camera_device.h"
+#include "../frame_buffer_allocator.h"
+#include "../hal_framebuffer.h"
+#include "cros-camera/camera_buffer_manager.h"
+
+using namespace libcamera;
+
+LOG_DECLARE_CATEGORY(HAL)
+
+namespace {
+class CrosFrameBufferData : public FrameBuffer::Private
+{
+ LIBCAMERA_DECLARE_PUBLIC(FrameBuffer)
+
+public:
+ CrosFrameBufferData(cros::ScopedBufferHandle scopedHandle,
+ const std::vector<FrameBuffer::Plane> &planes)
+ : FrameBuffer::Private(planes), scopedHandle_(std::move(scopedHandle))
+ {
+ }
+
+private:
+ cros::ScopedBufferHandle scopedHandle_;
+};
+} /* namespace */
+
+class PlatformFrameBufferAllocator::Private : public Extensible::Private
+{
+ LIBCAMERA_DECLARE_PUBLIC(PlatformFrameBufferAllocator)
+
+public:
+ Private([[maybe_unused]] CameraDevice *const cameraDevice)
+ {
+ }
+
+ std::unique_ptr<HALFrameBuffer>
+ allocate(int halPixelFormat, const libcamera::Size &size, uint32_t usage);
+};
+
+std::unique_ptr<HALFrameBuffer>
+PlatformFrameBufferAllocator::Private::allocate(int halPixelFormat,
+ const libcamera::Size &size,
+ uint32_t usage)
+{
+ cros::ScopedBufferHandle scopedHandle =
+ cros::CameraBufferManager::AllocateScopedBuffer(
+ size.width, size.height, halPixelFormat, usage);
+ if (!scopedHandle) {
+ LOG(HAL, Error) << "Failed to allocate buffer handle";
+ return nullptr;
+ }
+
+ buffer_handle_t handle = *scopedHandle;
+ SharedFD fd{ handle->data[0] };
+ if (!fd.isValid()) {
+ LOG(HAL, Fatal) << "Invalid fd";
+ return nullptr;
+ }
+
+ /* This code assumes all the planes are located in the same buffer. */
+ const size_t numPlanes = cros::CameraBufferManager::GetNumPlanes(handle);
+ std::vector<FrameBuffer::Plane> planes(numPlanes);
+ for (auto [i, plane] : utils::enumerate(planes)) {
+ plane.fd = fd;
+ plane.offset = cros::CameraBufferManager::GetPlaneOffset(handle, i);
+ plane.length = cros::CameraBufferManager::GetPlaneSize(handle, i);
+ }
+
+ return std::make_unique<HALFrameBuffer>(
+ std::make_unique<CrosFrameBufferData>(std::move(scopedHandle), planes), handle);
+}
+
+PUBLIC_FRAME_BUFFER_ALLOCATOR_IMPLEMENTATION
diff --git a/src/android/mm/generic_camera_buffer.cpp b/src/android/mm/generic_camera_buffer.cpp
new file mode 100644
index 00000000..0ffcb445
--- /dev/null
+++ b/src/android/mm/generic_camera_buffer.cpp
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Generic Android frame buffer backend
+ */
+
+#include "../camera_buffer.h"
+
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/mapped_framebuffer.h"
+
+using namespace libcamera;
+
+LOG_DECLARE_CATEGORY(HAL)
+
+class CameraBuffer::Private : public Extensible::Private,
+ public MappedBuffer
+{
+ LIBCAMERA_DECLARE_PUBLIC(CameraBuffer)
+
+public:
+ Private(CameraBuffer *cameraBuffer, buffer_handle_t camera3Buffer,
+ PixelFormat pixelFormat, const Size &size, int flags);
+ ~Private();
+
+ unsigned int numPlanes() const;
+
+ Span<uint8_t> plane(unsigned int plane);
+
+ unsigned int stride(unsigned int plane) const;
+ unsigned int offset(unsigned int plane) const;
+ unsigned int size(unsigned int plane) const;
+
+ size_t jpegBufferSize(size_t maxJpegBufferSize) const;
+
+private:
+ struct PlaneInfo {
+ unsigned int stride;
+ unsigned int offset;
+ unsigned int size;
+ };
+
+ void map();
+
+ int fd_;
+ int flags_;
+ off_t bufferLength_;
+ bool mapped_;
+ std::vector<PlaneInfo> planeInfo_;
+};
+
+CameraBuffer::Private::Private([[maybe_unused]] CameraBuffer *cameraBuffer,
+ buffer_handle_t camera3Buffer,
+ PixelFormat pixelFormat,
+ const Size &size, int flags)
+ : fd_(-1), flags_(flags), bufferLength_(-1), mapped_(false)
+{
+ error_ = 0;
+
+ const auto &info = PixelFormatInfo::info(pixelFormat);
+ if (!info.isValid()) {
+ error_ = -EINVAL;
+ LOG(HAL, Error) << "Invalid pixel format: " << pixelFormat;
+ return;
+ }
+
+ /*
+ * As Android doesn't offer an API to query buffer layouts, assume for
+ * now that the buffer is backed by a single dmabuf, with planes being
+ * stored contiguously.
+ */
+ for (int i = 0; i < camera3Buffer->numFds; i++) {
+ if (camera3Buffer->data[i] == -1 || camera3Buffer->data[i] == fd_)
+ continue;
+
+ if (fd_ != -1) {
+ error_ = -EINVAL;
+ LOG(HAL, Error) << "Discontiguous planes are not supported";
+ return;
+ }
+
+ fd_ = camera3Buffer->data[i];
+ }
+
+ if (fd_ == -1) {
+ error_ = -EINVAL;
+ LOG(HAL, Error) << "No valid file descriptor";
+ return;
+ }
+
+ bufferLength_ = lseek(fd_, 0, SEEK_END);
+ if (bufferLength_ < 0) {
+ error_ = -errno;
+ LOG(HAL, Error) << "Failed to get buffer length";
+ return;
+ }
+
+ const unsigned int numPlanes = info.numPlanes();
+ planeInfo_.resize(numPlanes);
+
+ unsigned int offset = 0;
+ for (unsigned int i = 0; i < numPlanes; ++i) {
+ const unsigned int planeSize = info.planeSize(size, i);
+
+ planeInfo_[i].stride = info.stride(size.width, i, 1u);
+ planeInfo_[i].offset = offset;
+ planeInfo_[i].size = planeSize;
+
+ if (bufferLength_ < offset + planeSize) {
+ LOG(HAL, Error) << "Plane " << i << " is out of buffer:"
+ << " plane offset=" << offset
+ << ", plane size=" << planeSize
+ << ", buffer length=" << bufferLength_;
+ return;
+ }
+
+ offset += planeSize;
+ }
+}
+
+CameraBuffer::Private::~Private()
+{
+}
+
+unsigned int CameraBuffer::Private::numPlanes() const
+{
+ return planeInfo_.size();
+}
+
+Span<uint8_t> CameraBuffer::Private::plane(unsigned int plane)
+{
+ if (!mapped_)
+ map();
+ if (!mapped_)
+ return {};
+
+ return planes_[plane];
+}
+
+unsigned int CameraBuffer::Private::stride(unsigned int plane) const
+{
+ if (plane >= planeInfo_.size())
+ return 0;
+
+ return planeInfo_[plane].stride;
+}
+
+unsigned int CameraBuffer::Private::offset(unsigned int plane) const
+{
+ if (plane >= planeInfo_.size())
+ return 0;
+
+ return planeInfo_[plane].offset;
+}
+
+unsigned int CameraBuffer::Private::size(unsigned int plane) const
+{
+ if (plane >= planeInfo_.size())
+ return 0;
+
+ return planeInfo_[plane].size;
+}
+
+size_t CameraBuffer::Private::jpegBufferSize(size_t maxJpegBufferSize) const
+{
+ ASSERT(bufferLength_ >= 0);
+
+ return std::min<unsigned int>(bufferLength_, maxJpegBufferSize);
+}
+
+void CameraBuffer::Private::map()
+{
+ ASSERT(fd_ != -1);
+ ASSERT(bufferLength_ >= 0);
+
+ void *address = mmap(nullptr, bufferLength_, flags_, MAP_SHARED, fd_, 0);
+ if (address == MAP_FAILED) {
+ error_ = -errno;
+ LOG(HAL, Error) << "Failed to mmap plane";
+ return;
+ }
+ maps_.emplace_back(static_cast<uint8_t *>(address), bufferLength_);
+
+ planes_.reserve(planeInfo_.size());
+ for (const auto &info : planeInfo_) {
+ planes_.emplace_back(
+ static_cast<uint8_t *>(address) + info.offset, info.size);
+ }
+
+ mapped_ = true;
+}
+
+PUBLIC_CAMERA_BUFFER_IMPLEMENTATION
diff --git a/src/android/mm/generic_frame_buffer_allocator.cpp b/src/android/mm/generic_frame_buffer_allocator.cpp
new file mode 100644
index 00000000..79625a9a
--- /dev/null
+++ b/src/android/mm/generic_frame_buffer_allocator.cpp
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Allocate FrameBuffer using gralloc API
+ */
+
+#include <dlfcn.h>
+#include <memory>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/shared_fd.h>
+
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/framebuffer.h"
+
+#include <hardware/camera3.h>
+#include <hardware/gralloc.h>
+#include <hardware/hardware.h>
+
+#include "../camera_device.h"
+#include "../frame_buffer_allocator.h"
+#include "../hal_framebuffer.h"
+
+using namespace libcamera;
+
+LOG_DECLARE_CATEGORY(HAL)
+
+namespace {
+class GenericFrameBufferData : public FrameBuffer::Private
+{
+ LIBCAMERA_DECLARE_PUBLIC(FrameBuffer)
+
+public:
+ GenericFrameBufferData(struct alloc_device_t *allocDevice,
+ buffer_handle_t handle,
+ const std::vector<FrameBuffer::Plane> &planes)
+ : FrameBuffer::Private(planes), allocDevice_(allocDevice),
+ handle_(handle)
+ {
+ ASSERT(allocDevice_);
+ ASSERT(handle_);
+ }
+
+ ~GenericFrameBufferData() override
+ {
+ /*
+ * allocDevice_ is used to destroy handle_. allocDevice_ is
+ * owned by PlatformFrameBufferAllocator::Private.
+ * GenericFrameBufferData must be destroyed before it is
+ * destroyed.
+ *
+ * \todo Consider managing alloc_device_t with std::shared_ptr
+ * if this is difficult to maintain.
+ *
+ * \todo Thread safety against alloc_device_t is not documented.
+ * Is it no problem to call alloc/free in parallel?
+ */
+ allocDevice_->free(allocDevice_, handle_);
+ }
+
+private:
+ struct alloc_device_t *allocDevice_;
+ const buffer_handle_t handle_;
+};
+} /* namespace */
+
+class PlatformFrameBufferAllocator::Private : public Extensible::Private
+{
+ LIBCAMERA_DECLARE_PUBLIC(PlatformFrameBufferAllocator)
+
+public:
+ Private(CameraDevice *const cameraDevice)
+ : cameraDevice_(cameraDevice),
+ hardwareModule_(nullptr),
+ allocDevice_(nullptr)
+ {
+ hw_get_module(GRALLOC_HARDWARE_MODULE_ID, &hardwareModule_);
+ ASSERT(hardwareModule_);
+ }
+
+ ~Private() override;
+
+ std::unique_ptr<HALFrameBuffer>
+ allocate(int halPixelFormat, const libcamera::Size &size, uint32_t usage);
+
+private:
+ const CameraDevice *const cameraDevice_;
+ const struct hw_module_t *hardwareModule_;
+ struct alloc_device_t *allocDevice_;
+};
+
+PlatformFrameBufferAllocator::Private::~Private()
+{
+ if (allocDevice_)
+ gralloc_close(allocDevice_);
+ dlclose(hardwareModule_->dso);
+}
+
+std::unique_ptr<HALFrameBuffer>
+PlatformFrameBufferAllocator::Private::allocate(int halPixelFormat,
+ const libcamera::Size &size,
+ uint32_t usage)
+{
+ if (!allocDevice_) {
+ int ret = gralloc_open(hardwareModule_, &allocDevice_);
+ if (ret) {
+ LOG(HAL, Fatal) << "gralloc_open() failed: " << ret;
+ return nullptr;
+ }
+ }
+
+ int stride = 0;
+ buffer_handle_t handle = nullptr;
+ int ret = allocDevice_->alloc(allocDevice_, size.width, size.height,
+ halPixelFormat, usage, &handle, &stride);
+ if (ret) {
+ LOG(HAL, Error) << "failed buffer allocation: " << ret;
+ return nullptr;
+ }
+ if (!handle) {
+ LOG(HAL, Fatal) << "invalid buffer_handle_t";
+ return nullptr;
+ }
+
+ /* This code assumes the planes are mapped consecutively. */
+ const libcamera::PixelFormat pixelFormat =
+ cameraDevice_->capabilities()->toPixelFormat(halPixelFormat);
+ const auto &info = PixelFormatInfo::info(pixelFormat);
+ std::vector<FrameBuffer::Plane> planes(info.numPlanes());
+
+ SharedFD fd{ handle->data[0] };
+ size_t offset = 0;
+ for (auto [i, plane] : utils::enumerate(planes)) {
+ const size_t planeSize = info.planeSize(size.height, i, stride);
+
+ plane.fd = fd;
+ plane.offset = offset;
+ plane.length = planeSize;
+ offset += planeSize;
+ }
+
+ return std::make_unique<HALFrameBuffer>(
+ std::make_unique<GenericFrameBufferData>(
+ allocDevice_, handle, planes),
+ handle);
+}
+
+PUBLIC_FRAME_BUFFER_ALLOCATOR_IMPLEMENTATION
diff --git a/src/android/mm/libhardware_stub.c b/src/android/mm/libhardware_stub.c
new file mode 100644
index 00000000..28faa638
--- /dev/null
+++ b/src/android/mm/libhardware_stub.c
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+/*
+ * Copyright (C) 2023, Ideas on Board
+ *
+ * Android libhardware stub for test compilation
+ */
+
+#include <errno.h>
+
+#include <hardware/hardware.h>
+
+int hw_get_module(const char *id __attribute__((__unused__)),
+ const struct hw_module_t **module)
+{
+ *module = NULL;
+ return -ENOTSUP;
+}
diff --git a/src/android/mm/meson.build b/src/android/mm/meson.build
new file mode 100644
index 00000000..e3e0484c
--- /dev/null
+++ b/src/android/mm/meson.build
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: CC0-1.0
+
+platform = get_option('android_platform')
+if platform == 'generic'
+ android_hal_sources += files(['generic_camera_buffer.cpp',
+ 'generic_frame_buffer_allocator.cpp'])
+ android_deps += [libdl]
+
+ libhardware = dependency('libhardware', required : false)
+ if libhardware.found()
+ android_deps += [libhardware]
+ else
+ android_hal_sources += files(['libhardware_stub.c'])
+ endif
+elif platform == 'cros'
+ android_hal_sources += files(['cros_camera_buffer.cpp',
+ 'cros_frame_buffer_allocator.cpp'])
+ android_deps += [dependency('libcros_camera')]
+endif
diff --git a/src/android/post_processor.h b/src/android/post_processor.h
new file mode 100644
index 00000000..b504a379
--- /dev/null
+++ b/src/android/post_processor.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * CameraStream Post Processing Interface
+ */
+
+#pragma once
+
+#include <libcamera/base/signal.h>
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/stream.h>
+
+#include "camera_buffer.h"
+#include "camera_request.h"
+
+class PostProcessor
+{
+public:
+ enum class Status {
+ Error,
+ Success
+ };
+
+ virtual ~PostProcessor() = default;
+
+ virtual int configure(const libcamera::StreamConfiguration &inCfg,
+ const libcamera::StreamConfiguration &outCfg) = 0;
+ virtual void process(Camera3RequestDescriptor::StreamBuffer *streamBuffer) = 0;
+
+ libcamera::Signal<Camera3RequestDescriptor::StreamBuffer *, Status> processComplete;
+};
diff --git a/src/android/yuv/post_processor_yuv.cpp b/src/android/yuv/post_processor_yuv.cpp
new file mode 100644
index 00000000..c998807b
--- /dev/null
+++ b/src/android/yuv/post_processor_yuv.cpp
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Post Processor using libyuv
+ */
+
+#include "post_processor_yuv.h"
+
+#include <libyuv/scale.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
+#include <libcamera/pixel_format.h>
+
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/mapped_framebuffer.h"
+
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(YUV)
+
+int PostProcessorYuv::configure(const StreamConfiguration &inCfg,
+ const StreamConfiguration &outCfg)
+{
+ if (inCfg.pixelFormat != outCfg.pixelFormat) {
+ LOG(YUV, Error) << "Pixel format conversion is not supported"
+ << " (from " << inCfg.pixelFormat
+ << " to " << outCfg.pixelFormat << ")";
+ return -EINVAL;
+ }
+
+ if (inCfg.size < outCfg.size) {
+ LOG(YUV, Error) << "Up-scaling is not supported"
+ << " (from " << inCfg.size
+ << " to " << outCfg.size << ")";
+ return -EINVAL;
+ }
+
+ if (inCfg.pixelFormat != formats::NV12) {
+ LOG(YUV, Error) << "Unsupported format " << inCfg.pixelFormat
+ << " (only NV12 is supported)";
+ return -EINVAL;
+ }
+
+ calculateLengths(inCfg, outCfg);
+ return 0;
+}
+
+void PostProcessorYuv::process(Camera3RequestDescriptor::StreamBuffer *streamBuffer)
+{
+ const FrameBuffer &source = *streamBuffer->srcBuffer;
+ CameraBuffer *destination = streamBuffer->dstBuffer.get();
+
+ if (!isValidBuffers(source, *destination)) {
+ processComplete.emit(streamBuffer, PostProcessor::Status::Error);
+ return;
+ }
+
+ const MappedFrameBuffer sourceMapped(&source, MappedFrameBuffer::MapFlag::Read);
+ if (!sourceMapped.isValid()) {
+ LOG(YUV, Error) << "Failed to mmap camera frame buffer";
+ processComplete.emit(streamBuffer, PostProcessor::Status::Error);
+ return;
+ }
+
+ int ret = libyuv::NV12Scale(sourceMapped.planes()[0].data(),
+ sourceStride_[0],
+ sourceMapped.planes()[1].data(),
+ sourceStride_[1],
+ sourceSize_.width, sourceSize_.height,
+ destination->plane(0).data(),
+ destinationStride_[0],
+ destination->plane(1).data(),
+ destinationStride_[1],
+ destinationSize_.width,
+ destinationSize_.height,
+ libyuv::FilterMode::kFilterBilinear);
+ if (ret) {
+ LOG(YUV, Error) << "Failed NV12 scaling: " << ret;
+ processComplete.emit(streamBuffer, PostProcessor::Status::Error);
+ return;
+ }
+
+ processComplete.emit(streamBuffer, PostProcessor::Status::Success);
+}
+
+bool PostProcessorYuv::isValidBuffers(const FrameBuffer &source,
+ const CameraBuffer &destination) const
+{
+ if (source.planes().size() != 2) {
+ LOG(YUV, Error) << "Invalid number of source planes: "
+ << source.planes().size();
+ return false;
+ }
+ if (destination.numPlanes() != 2) {
+ LOG(YUV, Error) << "Invalid number of destination planes: "
+ << destination.numPlanes();
+ return false;
+ }
+
+ if (source.planes()[0].length < sourceLength_[0] ||
+ source.planes()[1].length < sourceLength_[1]) {
+ LOG(YUV, Error)
+ << "The source planes lengths are too small, actual size: {"
+ << source.planes()[0].length << ", "
+ << source.planes()[1].length
+ << "}, expected size: {"
+ << sourceLength_[0] << ", "
+ << sourceLength_[1] << "}";
+ return false;
+ }
+ if (destination.plane(0).size() < destinationLength_[0] ||
+ destination.plane(1).size() < destinationLength_[1]) {
+ LOG(YUV, Error)
+ << "The destination planes lengths are too small, actual size: {"
+ << destination.plane(0).size() << ", "
+ << destination.plane(1).size()
+ << "}, expected size: {"
+ << sourceLength_[0] << ", "
+ << sourceLength_[1] << "}";
+ return false;
+ }
+
+ return true;
+}
+
+void PostProcessorYuv::calculateLengths(const StreamConfiguration &inCfg,
+ const StreamConfiguration &outCfg)
+{
+ sourceSize_ = inCfg.size;
+ destinationSize_ = outCfg.size;
+
+ const PixelFormatInfo &nv12Info = PixelFormatInfo::info(formats::NV12);
+ for (unsigned int i = 0; i < 2; i++) {
+ sourceStride_[i] = inCfg.stride;
+ destinationStride_[i] = nv12Info.stride(destinationSize_.width, i, 1);
+
+ sourceLength_[i] = nv12Info.planeSize(sourceSize_.height, i,
+ sourceStride_[i]);
+ destinationLength_[i] = nv12Info.planeSize(destinationSize_.height, i,
+ destinationStride_[i]);
+ }
+}
diff --git a/src/android/yuv/post_processor_yuv.h b/src/android/yuv/post_processor_yuv.h
new file mode 100644
index 00000000..ed7bb1fb
--- /dev/null
+++ b/src/android/yuv/post_processor_yuv.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Post Processor using libyuv
+ */
+
+#pragma once
+
+#include "../post_processor.h"
+
+#include <libcamera/geometry.h>
+
+class PostProcessorYuv : public PostProcessor
+{
+public:
+ PostProcessorYuv() = default;
+
+ int configure(const libcamera::StreamConfiguration &incfg,
+ const libcamera::StreamConfiguration &outcfg) override;
+ void process(Camera3RequestDescriptor::StreamBuffer *streamBuffer) override;
+
+private:
+ bool isValidBuffers(const libcamera::FrameBuffer &source,
+ const CameraBuffer &destination) const;
+ void calculateLengths(const libcamera::StreamConfiguration &inCfg,
+ const libcamera::StreamConfiguration &outCfg);
+
+ libcamera::Size sourceSize_;
+ libcamera::Size destinationSize_;
+ unsigned int sourceLength_[2] = {};
+ unsigned int destinationLength_[2] = {};
+ unsigned int sourceStride_[2] = {};
+ unsigned int destinationStride_[2] = {};
+};
diff --git a/src/apps/cam/camera_session.cpp b/src/apps/cam/camera_session.cpp
new file mode 100644
index 00000000..9e934827
--- /dev/null
+++ b/src/apps/cam/camera_session.cpp
@@ -0,0 +1,514 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Camera capture session
+ */
+
+#include <iomanip>
+#include <iostream>
+#include <limits.h>
+#include <sstream>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/property_ids.h>
+
+#include "../common/event_loop.h"
+#include "../common/stream_options.h"
+
+#include "camera_session.h"
+#include "capture_script.h"
+#include "file_sink.h"
+#ifdef HAVE_KMS
+#include "kms_sink.h"
+#endif
+#include "main.h"
+#ifdef HAVE_SDL
+#include "sdl_sink.h"
+#endif
+
+using namespace libcamera;
+
+CameraSession::CameraSession(CameraManager *cm,
+ const std::string &cameraId,
+ unsigned int cameraIndex,
+ const OptionsParser::Options &options)
+ : options_(options), cameraIndex_(cameraIndex), last_(0),
+ queueCount_(0), captureCount_(0), captureLimit_(0),
+ printMetadata_(false)
+{
+ char *endptr;
+ unsigned long index = strtoul(cameraId.c_str(), &endptr, 10);
+
+ if (*endptr == '\0' && index > 0) {
+ auto cameras = cm->cameras();
+ if (index <= cameras.size())
+ camera_ = cameras[index - 1];
+ }
+
+ if (!camera_)
+ camera_ = cm->get(cameraId);
+
+ if (!camera_) {
+ std::cerr << "Camera " << cameraId << " not found" << std::endl;
+ return;
+ }
+
+ if (camera_->acquire()) {
+ std::cerr << "Failed to acquire camera " << cameraId
+ << std::endl;
+ return;
+ }
+
+ std::vector<StreamRole> roles = StreamKeyValueParser::roles(options_[OptStream]);
+
+ std::unique_ptr<CameraConfiguration> config =
+ camera_->generateConfiguration(roles);
+ if (!config || config->size() != roles.size()) {
+ std::cerr << "Failed to get default stream configuration"
+ << std::endl;
+ return;
+ }
+
+ if (options_.isSet(OptOrientation)) {
+ std::string orientOpt = options_[OptOrientation].toString();
+ static const std::map<std::string, libcamera::Orientation> orientations{
+ { "rot0", libcamera::Orientation::Rotate0 },
+ { "rot180", libcamera::Orientation::Rotate180 },
+ { "mirror", libcamera::Orientation::Rotate0Mirror },
+ { "flip", libcamera::Orientation::Rotate180Mirror },
+ };
+
+ auto orientation = orientations.find(orientOpt);
+ if (orientation == orientations.end()) {
+ std::cerr << "Invalid orientation " << orientOpt << std::endl;
+ return;
+ }
+
+ config->orientation = orientation->second;
+ }
+
+ /* Apply configuration if explicitly requested. */
+ if (StreamKeyValueParser::updateConfiguration(config.get(),
+ options_[OptStream])) {
+ std::cerr << "Failed to update configuration" << std::endl;
+ return;
+ }
+
+ bool strictFormats = options_.isSet(OptStrictFormats);
+
+#ifdef HAVE_KMS
+ if (options_.isSet(OptDisplay)) {
+ if (options_.isSet(OptFile)) {
+ std::cerr << "--display and --file options are mutually exclusive"
+ << std::endl;
+ return;
+ }
+
+ if (roles.size() != 1) {
+ std::cerr << "Display doesn't support multiple streams"
+ << std::endl;
+ return;
+ }
+
+ if (roles[0] != StreamRole::Viewfinder) {
+ std::cerr << "Display requires a viewfinder stream"
+ << std::endl;
+ return;
+ }
+ }
+#endif
+
+ if (options_.isSet(OptCaptureScript)) {
+ std::string scriptName = options_[OptCaptureScript].toString();
+ script_ = std::make_unique<CaptureScript>(camera_, scriptName);
+ if (!script_->valid()) {
+ std::cerr << "Invalid capture script '" << scriptName
+ << "'" << std::endl;
+ return;
+ }
+ }
+
+ switch (config->validate()) {
+ case CameraConfiguration::Valid:
+ break;
+
+ case CameraConfiguration::Adjusted:
+ if (strictFormats) {
+ std::cout << "Adjusting camera configuration disallowed by --strict-formats argument"
+ << std::endl;
+ return;
+ }
+ std::cout << "Camera configuration adjusted" << std::endl;
+ break;
+
+ case CameraConfiguration::Invalid:
+ std::cout << "Camera configuration invalid" << std::endl;
+ return;
+ }
+
+ config_ = std::move(config);
+}
+
+CameraSession::~CameraSession()
+{
+ if (camera_)
+ camera_->release();
+}
+
+void CameraSession::listControls() const
+{
+ for (const auto &[id, info] : camera_->controls()) {
+ std::stringstream io;
+ io << "["
+ << (id->isInput() ? "in" : " ")
+ << (id->isOutput() ? "out" : " ")
+ << "] ";
+
+ if (info.values().empty()) {
+ std::cout << "Control: " << io.str()
+ << id->vendor() << "::" << id->name() << ": "
+ << info.toString() << std::endl;
+ } else {
+ std::cout << "Control: " << io.str()
+ << id->vendor() << "::" << id->name() << ":"
+ << std::endl;
+ for (const auto &value : info.values()) {
+ int32_t val = value.get<int32_t>();
+ const auto &it = id->enumerators().find(val);
+
+ std::cout << " - ";
+ if (it == id->enumerators().end())
+ std::cout << "UNKNOWN";
+ else
+ std::cout << it->second;
+ std::cout << " (" << val << ")" << std::endl;
+ }
+ }
+
+ if (id->isArray()) {
+ std::size_t size = id->size();
+
+ std::cout << " Size: ";
+ if (size == std::numeric_limits<std::size_t>::max())
+ std::cout << "n";
+ else
+ std::cout << std::to_string(size);
+ std::cout << std::endl;
+ }
+ }
+}
+
+void CameraSession::listProperties() const
+{
+ for (const auto &[key, value] : camera_->properties()) {
+ const ControlId *id = properties::properties.at(key);
+
+ std::cout << "Property: " << id->name() << " = "
+ << value.toString() << std::endl;
+ }
+}
+
+void CameraSession::infoConfiguration() const
+{
+ unsigned int index = 0;
+ for (const StreamConfiguration &cfg : *config_) {
+ std::cout << index << ": " << cfg.toString() << std::endl;
+
+ const StreamFormats &formats = cfg.formats();
+ for (PixelFormat pixelformat : formats.pixelformats()) {
+ std::cout << " * Pixelformat: "
+ << pixelformat << " "
+ << formats.range(pixelformat).toString()
+ << std::endl;
+
+ for (const Size &size : formats.sizes(pixelformat))
+ std::cout << " - " << size << std::endl;
+ }
+
+ index++;
+ }
+}
+
+int CameraSession::start()
+{
+ int ret;
+
+ queueCount_ = 0;
+ captureCount_ = 0;
+ captureLimit_ = options_[OptCapture].toInteger();
+ printMetadata_ = options_.isSet(OptMetadata);
+
+ ret = camera_->configure(config_.get());
+ if (ret < 0) {
+ std::cout << "Failed to configure camera" << std::endl;
+ return ret;
+ }
+
+ streamNames_.clear();
+ for (unsigned int index = 0; index < config_->size(); ++index) {
+ StreamConfiguration &cfg = config_->at(index);
+ streamNames_[cfg.stream()] = "cam" + std::to_string(cameraIndex_)
+ + "-stream" + std::to_string(index);
+ }
+
+ camera_->requestCompleted.connect(this, &CameraSession::requestComplete);
+
+#ifdef HAVE_KMS
+ if (options_.isSet(OptDisplay))
+ sink_ = std::make_unique<KMSSink>(options_[OptDisplay].toString());
+#endif
+
+#ifdef HAVE_SDL
+ if (options_.isSet(OptSDL))
+ sink_ = std::make_unique<SDLSink>();
+#endif
+
+ if (options_.isSet(OptFile)) {
+ std::unique_ptr<FileSink> sink =
+ std::make_unique<FileSink>(camera_.get(), streamNames_);
+
+ if (!options_[OptFile].toString().empty()) {
+ ret = sink->setFilePattern(options_[OptFile]);
+ if (ret)
+ return ret;
+ }
+
+ sink_ = std::move(sink);
+ }
+
+ if (sink_) {
+ ret = sink_->configure(*config_);
+ if (ret < 0) {
+ std::cout << "Failed to configure frame sink"
+ << std::endl;
+ return ret;
+ }
+
+ sink_->requestProcessed.connect(this, &CameraSession::sinkRelease);
+ }
+
+ allocator_ = std::make_unique<FrameBufferAllocator>(camera_);
+
+ return startCapture();
+}
+
+void CameraSession::stop()
+{
+ int ret = camera_->stop();
+ if (ret)
+ std::cout << "Failed to stop capture" << std::endl;
+
+ if (sink_) {
+ ret = sink_->stop();
+ if (ret)
+ std::cout << "Failed to stop frame sink" << std::endl;
+ }
+
+ sink_.reset();
+
+ requests_.clear();
+
+ allocator_.reset();
+}
+
+int CameraSession::startCapture()
+{
+ int ret;
+
+ /* Identify the stream with the least number of buffers. */
+ unsigned int nbuffers = UINT_MAX;
+ for (StreamConfiguration &cfg : *config_) {
+ ret = allocator_->allocate(cfg.stream());
+ if (ret < 0) {
+ std::cerr << "Can't allocate buffers" << std::endl;
+ return -ENOMEM;
+ }
+
+ unsigned int allocated = allocator_->buffers(cfg.stream()).size();
+ nbuffers = std::min(nbuffers, allocated);
+ }
+
+ /*
+ * TODO: make cam tool smarter to support still capture by for
+ * example pushing a button. For now run all streams all the time.
+ */
+
+ for (unsigned int i = 0; i < nbuffers; i++) {
+ std::unique_ptr<Request> request = camera_->createRequest();
+ if (!request) {
+ std::cerr << "Can't create request" << std::endl;
+ return -ENOMEM;
+ }
+
+ for (StreamConfiguration &cfg : *config_) {
+ Stream *stream = cfg.stream();
+ const std::vector<std::unique_ptr<FrameBuffer>> &buffers =
+ allocator_->buffers(stream);
+ const std::unique_ptr<FrameBuffer> &buffer = buffers[i];
+
+ ret = request->addBuffer(stream, buffer.get());
+ if (ret < 0) {
+ std::cerr << "Can't set buffer for request"
+ << std::endl;
+ return ret;
+ }
+
+ if (sink_)
+ sink_->mapBuffer(buffer.get());
+ }
+
+ requests_.push_back(std::move(request));
+ }
+
+ if (sink_) {
+ ret = sink_->start();
+ if (ret) {
+ std::cout << "Failed to start frame sink" << std::endl;
+ return ret;
+ }
+ }
+
+ ret = camera_->start();
+ if (ret) {
+ std::cout << "Failed to start capture" << std::endl;
+ if (sink_)
+ sink_->stop();
+ return ret;
+ }
+
+ for (std::unique_ptr<Request> &request : requests_) {
+ ret = queueRequest(request.get());
+ if (ret < 0) {
+ std::cerr << "Can't queue request" << std::endl;
+ camera_->stop();
+ if (sink_)
+ sink_->stop();
+ return ret;
+ }
+ }
+
+ if (captureLimit_)
+ std::cout << "cam" << cameraIndex_
+ << ": Capture " << captureLimit_ << " frames"
+ << std::endl;
+ else
+ std::cout << "cam" << cameraIndex_
+ << ": Capture until user interrupts by SIGINT"
+ << std::endl;
+
+ return 0;
+}
+
+int CameraSession::queueRequest(Request *request)
+{
+ if (captureLimit_ && queueCount_ >= captureLimit_)
+ return 0;
+
+ if (script_)
+ request->controls() = script_->frameControls(queueCount_);
+
+ queueCount_++;
+
+ return camera_->queueRequest(request);
+}
+
+void CameraSession::requestComplete(Request *request)
+{
+ if (request->status() == Request::RequestCancelled)
+ return;
+
+ /*
+ * Defer processing of the completed request to the event loop, to avoid
+ * blocking the camera manager thread.
+ */
+ EventLoop::instance()->callLater([this, request]() { processRequest(request); });
+}
+
+void CameraSession::processRequest(Request *request)
+{
+ /*
+ * If we've reached the capture limit, we're done. This doesn't
+ * duplicate the check below that emits the captureDone signal, as this
+ * function will be called for each request still in flight after the
+ * capture limit is reached and we don't want to emit the signal every
+ * single time.
+ */
+ if (captureLimit_ && captureCount_ >= captureLimit_)
+ return;
+
+ const Request::BufferMap &buffers = request->buffers();
+
+ /*
+ * Compute the frame rate. The timestamp is arbitrarily retrieved from
+ * the first buffer, as all buffers should have matching timestamps.
+ */
+ uint64_t ts = buffers.begin()->second->metadata().timestamp;
+ double fps = ts - last_;
+ fps = last_ != 0 && fps ? 1000000000.0 / fps : 0.0;
+ last_ = ts;
+
+ bool requeue = true;
+
+ std::stringstream info;
+ info << ts / 1000000000 << "."
+ << std::setw(6) << std::setfill('0') << ts / 1000 % 1000000
+ << " (" << std::fixed << std::setprecision(2) << fps << " fps)";
+
+ for (const auto &[stream, buffer] : buffers) {
+ const FrameMetadata &metadata = buffer->metadata();
+
+ info << " " << streamNames_[stream]
+ << " seq: " << std::setw(6) << std::setfill('0') << metadata.sequence
+ << " bytesused: ";
+
+ unsigned int nplane = 0;
+ for (const FrameMetadata::Plane &plane : metadata.planes()) {
+ info << plane.bytesused;
+ if (++nplane < metadata.planes().size())
+ info << "/";
+ }
+ }
+
+ if (sink_) {
+ if (!sink_->processRequest(request))
+ requeue = false;
+ }
+
+ std::cout << info.str() << std::endl;
+
+ if (printMetadata_) {
+ const ControlList &requestMetadata = request->metadata();
+ for (const auto &[key, value] : requestMetadata) {
+ const ControlId *id = controls::controls.at(key);
+ std::cout << "\t" << id->name() << " = "
+ << value.toString() << std::endl;
+ }
+ }
+
+ /*
+ * Notify the user that capture is complete if the limit has just been
+ * reached.
+ */
+ captureCount_++;
+ if (captureLimit_ && captureCount_ >= captureLimit_) {
+ captureDone.emit();
+ return;
+ }
+
+ /*
+ * If the frame sink holds on the request, we'll requeue it later in the
+ * complete handler.
+ */
+ if (!requeue)
+ return;
+
+ request->reuse(Request::ReuseBuffers);
+ queueRequest(request);
+}
+
+void CameraSession::sinkRelease(Request *request)
+{
+ request->reuse(Request::ReuseBuffers);
+ queueRequest(request);
+}
diff --git a/src/apps/cam/camera_session.h b/src/apps/cam/camera_session.h
new file mode 100644
index 00000000..4442fd9b
--- /dev/null
+++ b/src/apps/cam/camera_session.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Camera capture session
+ */
+
+#pragma once
+
+#include <memory>
+#include <stdint.h>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/signal.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/camera_manager.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/framebuffer_allocator.h>
+#include <libcamera/request.h>
+#include <libcamera/stream.h>
+
+#include "../common/options.h"
+
+class CaptureScript;
+class FrameSink;
+
+class CameraSession
+{
+public:
+ CameraSession(libcamera::CameraManager *cm,
+ const std::string &cameraId, unsigned int cameraIndex,
+ const OptionsParser::Options &options);
+ ~CameraSession();
+
+ bool isValid() const { return config_ != nullptr; }
+ const OptionsParser::Options &options() { return options_; }
+
+ libcamera::Camera *camera() { return camera_.get(); }
+ libcamera::CameraConfiguration *config() { return config_.get(); }
+
+ void listControls() const;
+ void listProperties() const;
+ void infoConfiguration() const;
+
+ int start();
+ void stop();
+
+ libcamera::Signal<> captureDone;
+
+private:
+ int startCapture();
+
+ int queueRequest(libcamera::Request *request);
+ void requestComplete(libcamera::Request *request);
+ void processRequest(libcamera::Request *request);
+ void sinkRelease(libcamera::Request *request);
+
+ const OptionsParser::Options &options_;
+ std::shared_ptr<libcamera::Camera> camera_;
+ std::unique_ptr<libcamera::CameraConfiguration> config_;
+
+ std::unique_ptr<CaptureScript> script_;
+
+ std::map<const libcamera::Stream *, std::string> streamNames_;
+ std::unique_ptr<FrameSink> sink_;
+ unsigned int cameraIndex_;
+
+ uint64_t last_;
+
+ unsigned int queueCount_;
+ unsigned int captureCount_;
+ unsigned int captureLimit_;
+ bool printMetadata_;
+
+ std::unique_ptr<libcamera::FrameBufferAllocator> allocator_;
+ std::vector<std::unique_ptr<libcamera::Request>> requests_;
+};
diff --git a/src/apps/cam/capture-script.yaml b/src/apps/cam/capture-script.yaml
new file mode 100644
index 00000000..7118865e
--- /dev/null
+++ b/src/apps/cam/capture-script.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: CC0-1.0
+
+# Capture script example
+#
+# A capture script allows to associate a list of controls and their values
+# to frame numbers.
+#
+# The script allows defining a list of frames associated with controls
+# and an optional list of properties that can control the script behaviour.
+
+# properties:
+# # Repeat the controls every 'idx' frames.
+# - loop: idx
+#
+# # List of frame number with associated a list of controls to be applied
+# frames:
+# - frame-number:
+# Control1: value1
+# Control2: value2
+
+# \todo Formally define the capture script structure with a schema
+
+# Notes:
+# - Controls have to be specified by name, as defined in the
+# libcamera::controls:: enumeration
+# - Controls not supported by the camera currently operated are ignored
+# - Frame numbers shall be monotonically incrementing, gaps are allowed
+# - If a loop limit is specified, frame numbers in the 'frames' list shall be
+# less than the loop control
+
+# Example: Turn brightness up and down every 460 frames
+
+properties:
+ - loop: 460
+
+frames:
+ - 0:
+ Brightness: 0.0
+
+ - 40:
+ Brightness: 0.2
+
+ - 80:
+ Brightness: 0.4
+
+ - 120:
+ Brightness: 0.8
+
+ - 160:
+ Brightness: 0.4
+
+ - 200:
+ Brightness: 0.2
+
+ - 240:
+ Brightness: 0.0
+
+ - 280:
+ Brightness: -0.2
+
+ - 300:
+ Brightness: -0.4
+
+ - 340:
+ Brightness: -0.8
+
+ - 380:
+ Brightness: -0.4
+
+ - 420:
+ Brightness: -0.2
diff --git a/src/apps/cam/capture_script.cpp b/src/apps/cam/capture_script.cpp
new file mode 100644
index 00000000..fc1dfa75
--- /dev/null
+++ b/src/apps/cam/capture_script.cpp
@@ -0,0 +1,662 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Ideas on Board Oy
+ *
+ * Capture session configuration script
+ */
+
+#include "capture_script.h"
+
+#include <iostream>
+#include <stdio.h>
+#include <stdlib.h>
+
+using namespace libcamera;
+
+CaptureScript::CaptureScript(std::shared_ptr<Camera> camera,
+ const std::string &fileName)
+ : camera_(camera), loop_(0), valid_(false)
+{
+ FILE *fh = fopen(fileName.c_str(), "r");
+ if (!fh) {
+ int ret = -errno;
+ std::cerr << "Failed to open capture script " << fileName
+ << ": " << strerror(-ret) << std::endl;
+ return;
+ }
+
+ /*
+ * Map the camera's controls to their name so that they can be
+ * easily identified when parsing the script file.
+ */
+ for (const auto &[control, info] : camera_->controls())
+ controls_[control->name()] = control;
+
+ int ret = parseScript(fh);
+ fclose(fh);
+ if (ret)
+ return;
+
+ valid_ = true;
+}
+
+/* Retrieve the control list associated with a frame number. */
+const ControlList &CaptureScript::frameControls(unsigned int frame)
+{
+ static ControlList controls{};
+ unsigned int idx = frame;
+
+ /* If we loop, repeat the controls every 'loop_' frames. */
+ if (loop_)
+ idx = frame % loop_;
+
+ auto it = frameControls_.find(idx);
+ if (it == frameControls_.end())
+ return controls;
+
+ return it->second;
+}
+
+CaptureScript::EventPtr CaptureScript::nextEvent(yaml_event_type_t expectedType)
+{
+ EventPtr event(new yaml_event_t);
+
+ if (!yaml_parser_parse(&parser_, event.get()))
+ return nullptr;
+
+ if (expectedType != YAML_NO_EVENT && !checkEvent(event, expectedType))
+ return nullptr;
+
+ return event;
+}
+
+bool CaptureScript::checkEvent(const EventPtr &event, yaml_event_type_t expectedType) const
+{
+ if (event->type != expectedType) {
+ std::cerr << "Capture script error on line " << event->start_mark.line
+ << " column " << event->start_mark.column << ": "
+ << "Expected " << eventTypeName(expectedType)
+ << " event, got " << eventTypeName(event->type)
+ << std::endl;
+ return false;
+ }
+
+ return true;
+}
+
+std::string CaptureScript::eventScalarValue(const EventPtr &event)
+{
+ return std::string(reinterpret_cast<char *>(event->data.scalar.value),
+ event->data.scalar.length);
+}
+
+std::string CaptureScript::eventTypeName(yaml_event_type_t type)
+{
+ static const std::map<yaml_event_type_t, std::string> typeNames = {
+ { YAML_STREAM_START_EVENT, "stream-start" },
+ { YAML_STREAM_END_EVENT, "stream-end" },
+ { YAML_DOCUMENT_START_EVENT, "document-start" },
+ { YAML_DOCUMENT_END_EVENT, "document-end" },
+ { YAML_ALIAS_EVENT, "alias" },
+ { YAML_SCALAR_EVENT, "scalar" },
+ { YAML_SEQUENCE_START_EVENT, "sequence-start" },
+ { YAML_SEQUENCE_END_EVENT, "sequence-end" },
+ { YAML_MAPPING_START_EVENT, "mapping-start" },
+ { YAML_MAPPING_END_EVENT, "mapping-end" },
+ };
+
+ auto it = typeNames.find(type);
+ if (it == typeNames.end())
+ return "[type " + std::to_string(type) + "]";
+
+ return it->second;
+}
+
+int CaptureScript::parseScript(FILE *script)
+{
+ int ret = yaml_parser_initialize(&parser_);
+ if (!ret) {
+ std::cerr << "Failed to initialize yaml parser" << std::endl;
+ return ret;
+ }
+
+ /* Delete the parser upon function exit. */
+ struct ParserDeleter {
+ ParserDeleter(yaml_parser_t *parser) : parser_(parser) { }
+ ~ParserDeleter() { yaml_parser_delete(parser_); }
+ yaml_parser_t *parser_;
+ } deleter(&parser_);
+
+ yaml_parser_set_input_file(&parser_, script);
+
+ EventPtr event = nextEvent(YAML_STREAM_START_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ event = nextEvent(YAML_DOCUMENT_START_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ event = nextEvent(YAML_MAPPING_START_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ while (1) {
+ event = nextEvent();
+ if (!event)
+ return -EINVAL;
+
+ if (event->type == YAML_MAPPING_END_EVENT)
+ return 0;
+
+ if (!checkEvent(event, YAML_SCALAR_EVENT))
+ return -EINVAL;
+
+ std::string section = eventScalarValue(event);
+
+ if (section == "properties") {
+ ret = parseProperties();
+ if (ret)
+ return ret;
+ } else if (section == "frames") {
+ ret = parseFrames();
+ if (ret)
+ return ret;
+ } else {
+ std::cerr << "Unsupported section '" << section << "'"
+ << std::endl;
+ return -EINVAL;
+ }
+ }
+}
+
+int CaptureScript::parseProperty()
+{
+ EventPtr event = nextEvent(YAML_MAPPING_START_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ std::string prop = parseScalar();
+ if (prop.empty())
+ return -EINVAL;
+
+ if (prop == "loop") {
+ event = nextEvent();
+ if (!event)
+ return -EINVAL;
+
+ std::string value = eventScalarValue(event);
+ if (value.empty())
+ return -EINVAL;
+
+ loop_ = atoi(value.c_str());
+ if (!loop_) {
+ std::cerr << "Invalid loop limit '" << loop_ << "'"
+ << std::endl;
+ return -EINVAL;
+ }
+ } else {
+ std::cerr << "Unsupported property '" << prop << "'" << std::endl;
+ return -EINVAL;
+ }
+
+ event = nextEvent(YAML_MAPPING_END_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ return 0;
+}
+
+int CaptureScript::parseProperties()
+{
+ EventPtr event = nextEvent(YAML_SEQUENCE_START_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ while (1) {
+ if (event->type == YAML_SEQUENCE_END_EVENT)
+ return 0;
+
+ int ret = parseProperty();
+ if (ret)
+ return ret;
+
+ event = nextEvent();
+ if (!event)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int CaptureScript::parseFrames()
+{
+ EventPtr event = nextEvent(YAML_SEQUENCE_START_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ while (1) {
+ event = nextEvent();
+ if (!event)
+ return -EINVAL;
+
+ if (event->type == YAML_SEQUENCE_END_EVENT)
+ return 0;
+
+ int ret = parseFrame(std::move(event));
+ if (ret)
+ return ret;
+ }
+}
+
+int CaptureScript::parseFrame(EventPtr event)
+{
+ if (!checkEvent(event, YAML_MAPPING_START_EVENT))
+ return -EINVAL;
+
+ std::string key = parseScalar();
+ if (key.empty())
+ return -EINVAL;
+
+ unsigned int frameId = atoi(key.c_str());
+ if (loop_ && frameId >= loop_) {
+ std::cerr
+ << "Frame id (" << frameId << ") shall be smaller than"
+ << "loop limit (" << loop_ << ")" << std::endl;
+ return -EINVAL;
+ }
+
+ event = nextEvent(YAML_MAPPING_START_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ ControlList controls{};
+
+ while (1) {
+ event = nextEvent();
+ if (!event)
+ return -EINVAL;
+
+ if (event->type == YAML_MAPPING_END_EVENT)
+ break;
+
+ int ret = parseControl(std::move(event), controls);
+ if (ret)
+ return ret;
+ }
+
+ frameControls_[frameId] = std::move(controls);
+
+ event = nextEvent(YAML_MAPPING_END_EVENT);
+ if (!event)
+ return -EINVAL;
+
+ return 0;
+}
+
+int CaptureScript::parseControl(EventPtr event, ControlList &controls)
+{
+ /* We expect a value after a key. */
+ std::string name = eventScalarValue(event);
+ if (name.empty())
+ return -EINVAL;
+
+ /* If the camera does not support the control just ignore it. */
+ auto it = controls_.find(name);
+ if (it == controls_.end()) {
+ std::cerr << "Unsupported control '" << name << "'" << std::endl;
+ return -EINVAL;
+ }
+
+ const ControlId *controlId = it->second;
+
+ ControlValue val = unpackControl(controlId);
+ if (val.isNone()) {
+ std::cerr << "Error unpacking control '" << name << "'"
+ << std::endl;
+ return -EINVAL;
+ }
+
+ controls.set(controlId->id(), val);
+
+ return 0;
+}
+
+std::string CaptureScript::parseScalar()
+{
+ EventPtr event = nextEvent(YAML_SCALAR_EVENT);
+ if (!event)
+ return "";
+
+ return eventScalarValue(event);
+}
+
+ControlValue CaptureScript::parseRectangles()
+{
+ std::vector<libcamera::Rectangle> rectangles;
+
+ std::vector<std::vector<std::string>> arrays = parseArrays();
+ if (arrays.empty())
+ return {};
+
+ for (const std::vector<std::string> &values : arrays) {
+ if (values.size() != 4) {
+ std::cerr << "Error parsing Rectangle: expected "
+ << "array with 4 parameters" << std::endl;
+ return {};
+ }
+
+ Rectangle rect = unpackRectangle(values);
+ rectangles.push_back(rect);
+ }
+
+ ControlValue controlValue;
+ if (rectangles.size() == 1)
+ controlValue.set(rectangles.at(0));
+ else
+ controlValue.set(Span<const Rectangle>(rectangles));
+
+ return controlValue;
+}
+
+std::vector<std::vector<std::string>> CaptureScript::parseArrays()
+{
+ EventPtr event = nextEvent(YAML_SEQUENCE_START_EVENT);
+ if (!event)
+ return {};
+
+ event = nextEvent();
+ if (!event)
+ return {};
+
+ std::vector<std::vector<std::string>> valueArrays;
+
+ /* Parse single array. */
+ if (event->type == YAML_SCALAR_EVENT) {
+ std::string firstValue = eventScalarValue(event);
+ if (firstValue.empty())
+ return {};
+
+ std::vector<std::string> remaining = parseSingleArray();
+
+ std::vector<std::string> values = { firstValue };
+ values.insert(std::end(values),
+ std::begin(remaining), std::end(remaining));
+ valueArrays.push_back(values);
+
+ return valueArrays;
+ }
+
+ /* Parse array of arrays. */
+ while (1) {
+ switch (event->type) {
+ case YAML_SEQUENCE_START_EVENT: {
+ std::vector<std::string> values = parseSingleArray();
+ valueArrays.push_back(values);
+ break;
+ }
+ case YAML_SEQUENCE_END_EVENT:
+ return valueArrays;
+ default:
+ return {};
+ }
+
+ event = nextEvent();
+ if (!event)
+ return {};
+ }
+}
+
+std::vector<std::string> CaptureScript::parseSingleArray()
+{
+ std::vector<std::string> values;
+
+ while (1) {
+ EventPtr event = nextEvent();
+ if (!event)
+ return {};
+
+ switch (event->type) {
+ case YAML_SCALAR_EVENT: {
+ std::string value = eventScalarValue(event);
+ if (value.empty())
+ return {};
+ values.push_back(value);
+ break;
+ }
+ case YAML_SEQUENCE_END_EVENT:
+ return values;
+ default:
+ return {};
+ }
+ }
+}
+
+void CaptureScript::unpackFailure(const ControlId *id, const std::string &repr)
+{
+ static const std::map<unsigned int, const char *> typeNames = {
+ { ControlTypeNone, "none" },
+ { ControlTypeBool, "bool" },
+ { ControlTypeByte, "byte" },
+ { ControlTypeInteger32, "int32" },
+ { ControlTypeInteger64, "int64" },
+ { ControlTypeFloat, "float" },
+ { ControlTypeString, "string" },
+ { ControlTypeRectangle, "Rectangle" },
+ { ControlTypeSize, "Size" },
+ };
+
+ const char *typeName;
+ auto it = typeNames.find(id->type());
+ if (it != typeNames.end())
+ typeName = it->second;
+ else
+ typeName = "unknown";
+
+ std::cerr << "Unsupported control '" << repr << "' for "
+ << typeName << " control " << id->name() << std::endl;
+}
+
+ControlValue CaptureScript::parseScalarControl(const ControlId *id,
+ const std::string repr)
+{
+ ControlValue value{};
+
+ switch (id->type()) {
+ case ControlTypeNone:
+ break;
+ case ControlTypeBool: {
+ bool val;
+
+ if (repr == "true") {
+ val = true;
+ } else if (repr == "false") {
+ val = false;
+ } else {
+ unpackFailure(id, repr);
+ return value;
+ }
+
+ value.set<bool>(val);
+ break;
+ }
+ case ControlTypeByte: {
+ uint8_t val = strtol(repr.c_str(), NULL, 10);
+ value.set<uint8_t>(val);
+ break;
+ }
+ case ControlTypeInteger32: {
+ int32_t val = strtol(repr.c_str(), NULL, 10);
+ value.set<int32_t>(val);
+ break;
+ }
+ case ControlTypeInteger64: {
+ int64_t val = strtoll(repr.c_str(), NULL, 10);
+ value.set<int64_t>(val);
+ break;
+ }
+ case ControlTypeFloat: {
+ float val = strtof(repr.c_str(), NULL);
+ value.set<float>(val);
+ break;
+ }
+ case ControlTypeString: {
+ value.set<std::string>(repr);
+ break;
+ }
+ default:
+ std::cerr << "Unsupported control type" << std::endl;
+ break;
+ }
+
+ return value;
+}
+
+ControlValue CaptureScript::parseArrayControl(const ControlId *id,
+ const std::vector<std::string> &repr)
+{
+ ControlValue value{};
+
+ switch (id->type()) {
+ case ControlTypeNone:
+ break;
+ case ControlTypeBool: {
+ /*
+ * This is unpleasant, but we cannot use an std::vector<> as its
+ * boolean type overload does not allow to access the raw data,
+ * as boolean values are stored in a bitmask for efficiency.
+ *
+ * As we need a contiguous memory region to wrap in a Span<>,
+ * use an array instead but be strict about not overflowing it
+ * by limiting the number of controls we can store.
+ *
+ * Be loud but do not fail, as the issue would present at
+ * runtime and it's not fatal.
+ */
+ static constexpr unsigned int kMaxNumBooleanControls = 1024;
+ std::array<bool, kMaxNumBooleanControls> values;
+ unsigned int idx = 0;
+
+ for (const std::string &s : repr) {
+ bool val;
+
+ if (s == "true") {
+ val = true;
+ } else if (s == "false") {
+ val = false;
+ } else {
+ unpackFailure(id, s);
+ return value;
+ }
+
+ if (idx == kMaxNumBooleanControls) {
+ std::cerr << "Cannot parse more than "
+ << kMaxNumBooleanControls
+ << " boolean controls" << std::endl;
+ break;
+ }
+
+ values[idx++] = val;
+ }
+
+ value = Span<bool>(values.data(), idx);
+ break;
+ }
+ case ControlTypeByte: {
+ std::vector<uint8_t> values;
+ for (const std::string &s : repr) {
+ uint8_t val = strtoll(s.c_str(), NULL, 10);
+ values.push_back(val);
+ }
+
+ value = Span<const uint8_t>(values.data(), values.size());
+ break;
+ }
+ case ControlTypeInteger32: {
+ std::vector<int32_t> values;
+ for (const std::string &s : repr) {
+ int32_t val = strtoll(s.c_str(), NULL, 10);
+ values.push_back(val);
+ }
+
+ value = Span<const int32_t>(values.data(), values.size());
+ break;
+ }
+ case ControlTypeInteger64: {
+ std::vector<int64_t> values;
+ for (const std::string &s : repr) {
+ int64_t val = strtoll(s.c_str(), NULL, 10);
+ values.push_back(val);
+ }
+
+ value = Span<const int64_t>(values.data(), values.size());
+ break;
+ }
+ case ControlTypeFloat: {
+ std::vector<float> values;
+ for (const std::string &s : repr)
+ values.push_back(strtof(s.c_str(), NULL));
+
+ value = Span<const float>(values.data(), values.size());
+ break;
+ }
+ case ControlTypeString: {
+ value = Span<const std::string>(repr.data(), repr.size());
+ break;
+ }
+ default:
+ std::cerr << "Unsupported control type" << std::endl;
+ break;
+ }
+
+ return value;
+}
+
+ControlValue CaptureScript::unpackControl(const ControlId *id)
+{
+ /* Parse complex types. */
+ switch (id->type()) {
+ case ControlTypeRectangle:
+ return parseRectangles();
+ case ControlTypeSize:
+ /* \todo Parse Sizes. */
+ return {};
+ default:
+ break;
+ }
+
+ /* Check if the control has a single scalar value or is an array. */
+ EventPtr event = nextEvent();
+ if (!event)
+ return {};
+
+ switch (event->type) {
+ case YAML_SCALAR_EVENT: {
+ const std::string repr = eventScalarValue(event);
+ if (repr.empty())
+ return {};
+
+ return parseScalarControl(id, repr);
+ }
+ case YAML_SEQUENCE_START_EVENT: {
+ std::vector<std::string> array = parseSingleArray();
+ if (array.empty())
+ return {};
+
+ return parseArrayControl(id, array);
+ }
+ default:
+ std::cerr << "Unexpected event type: " << event->type << std::endl;
+ return {};
+ }
+}
+
+libcamera::Rectangle CaptureScript::unpackRectangle(const std::vector<std::string> &strVec)
+{
+ int x = strtol(strVec[0].c_str(), NULL, 10);
+ int y = strtol(strVec[1].c_str(), NULL, 10);
+ unsigned int width = strtoul(strVec[2].c_str(), NULL, 10);
+ unsigned int height = strtoul(strVec[3].c_str(), NULL, 10);
+
+ return Rectangle(x, y, width, height);
+}
diff --git a/src/apps/cam/capture_script.h b/src/apps/cam/capture_script.h
new file mode 100644
index 00000000..294b9203
--- /dev/null
+++ b/src/apps/cam/capture_script.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Ideas on Board Oy
+ *
+ * Capture session configuration script
+ */
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include <libcamera/camera.h>
+#include <libcamera/controls.h>
+
+#include <yaml.h>
+
+class CaptureScript
+{
+public:
+ CaptureScript(std::shared_ptr<libcamera::Camera> camera,
+ const std::string &fileName);
+
+ bool valid() const { return valid_; }
+
+ const libcamera::ControlList &frameControls(unsigned int frame);
+
+private:
+ struct EventDeleter {
+ void operator()(yaml_event_t *event) const
+ {
+ yaml_event_delete(event);
+ delete event;
+ }
+ };
+ using EventPtr = std::unique_ptr<yaml_event_t, EventDeleter>;
+
+ std::map<std::string, const libcamera::ControlId *> controls_;
+ std::map<unsigned int, libcamera::ControlList> frameControls_;
+ std::shared_ptr<libcamera::Camera> camera_;
+ yaml_parser_t parser_;
+ unsigned int loop_;
+ bool valid_;
+
+ EventPtr nextEvent(yaml_event_type_t expectedType = YAML_NO_EVENT);
+ bool checkEvent(const EventPtr &event, yaml_event_type_t expectedType) const;
+ static std::string eventScalarValue(const EventPtr &event);
+ static std::string eventTypeName(yaml_event_type_t type);
+
+ int parseScript(FILE *script);
+
+ int parseProperties();
+ int parseProperty();
+ int parseFrames();
+ int parseFrame(EventPtr event);
+ int parseControl(EventPtr event, libcamera::ControlList &controls);
+
+ libcamera::ControlValue parseScalarControl(const libcamera::ControlId *id,
+ const std::string repr);
+ libcamera::ControlValue parseArrayControl(const libcamera::ControlId *id,
+ const std::vector<std::string> &repr);
+
+ std::string parseScalar();
+ libcamera::ControlValue parseRectangles();
+ std::vector<std::vector<std::string>> parseArrays();
+ std::vector<std::string> parseSingleArray();
+
+ void unpackFailure(const libcamera::ControlId *id,
+ const std::string &repr);
+ libcamera::ControlValue unpackControl(const libcamera::ControlId *id);
+ libcamera::Rectangle unpackRectangle(const std::vector<std::string> &strVec);
+};
diff --git a/src/apps/cam/drm.cpp b/src/apps/cam/drm.cpp
new file mode 100644
index 00000000..47bbb6b0
--- /dev/null
+++ b/src/apps/cam/drm.cpp
@@ -0,0 +1,717 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2021, Ideas on Board Oy
+ *
+ * DRM/KMS Helpers
+ */
+
+#include "drm.h"
+
+#include <algorithm>
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <iostream>
+#include <set>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+#include <libcamera/pixel_format.h>
+
+#include <libdrm/drm_mode.h>
+
+#include "../common/event_loop.h"
+
+namespace DRM {
+
+Object::Object(Device *dev, uint32_t id, Type type)
+ : id_(id), dev_(dev), type_(type)
+{
+ /* Retrieve properties from the objects that support them. */
+ if (type != TypeConnector && type != TypeCrtc &&
+ type != TypeEncoder && type != TypePlane)
+ return;
+
+ /*
+ * We can't distinguish between failures due to the object having no
+ * property and failures due to other conditions. Assume we use the API
+ * correctly and consider the object has no property.
+ */
+ drmModeObjectProperties *properties = drmModeObjectGetProperties(dev->fd(), id, type);
+ if (!properties)
+ return;
+
+ properties_.reserve(properties->count_props);
+ for (uint32_t i = 0; i < properties->count_props; ++i)
+ properties_.emplace_back(properties->props[i],
+ properties->prop_values[i]);
+
+ drmModeFreeObjectProperties(properties);
+}
+
+Object::~Object()
+{
+}
+
+const Property *Object::property(const std::string &name) const
+{
+ for (const PropertyValue &pv : properties_) {
+ const Property *property = static_cast<const Property *>(dev_->object(pv.id()));
+ if (property && property->name() == name)
+ return property;
+ }
+
+ return nullptr;
+}
+
+const PropertyValue *Object::propertyValue(const std::string &name) const
+{
+ for (const PropertyValue &pv : properties_) {
+ const Property *property = static_cast<const Property *>(dev_->object(pv.id()));
+ if (property && property->name() == name)
+ return &pv;
+ }
+
+ return nullptr;
+}
+
+Property::Property(Device *dev, drmModePropertyRes *property)
+ : Object(dev, property->prop_id, TypeProperty),
+ name_(property->name), flags_(property->flags),
+ values_(property->values, property->values + property->count_values),
+ blobs_(property->blob_ids, property->blob_ids + property->count_blobs)
+{
+ if (drm_property_type_is(property, DRM_MODE_PROP_RANGE))
+ type_ = TypeRange;
+ else if (drm_property_type_is(property, DRM_MODE_PROP_ENUM))
+ type_ = TypeEnum;
+ else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
+ type_ = TypeBlob;
+ else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK))
+ type_ = TypeBitmask;
+ else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT))
+ type_ = TypeObject;
+ else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE))
+ type_ = TypeSignedRange;
+ else
+ type_ = TypeUnknown;
+
+ for (int i = 0; i < property->count_enums; ++i)
+ enums_[property->enums[i].value] = property->enums[i].name;
+}
+
+Blob::Blob(Device *dev, const libcamera::Span<const uint8_t> &data)
+ : Object(dev, 0, Object::TypeBlob)
+{
+ drmModeCreatePropertyBlob(dev->fd(), data.data(), data.size(), &id_);
+}
+
+Blob::~Blob()
+{
+ if (isValid())
+ drmModeDestroyPropertyBlob(device()->fd(), id());
+}
+
+Mode::Mode(const drmModeModeInfo &mode)
+ : drmModeModeInfo(mode)
+{
+}
+
+std::unique_ptr<Blob> Mode::toBlob(Device *dev) const
+{
+ libcamera::Span<const uint8_t> data{ reinterpret_cast<const uint8_t *>(this),
+ sizeof(*this) };
+ return std::make_unique<Blob>(dev, data);
+}
+
+Crtc::Crtc(Device *dev, const drmModeCrtc *crtc, unsigned int index)
+ : Object(dev, crtc->crtc_id, Object::TypeCrtc), index_(index)
+{
+}
+
+Encoder::Encoder(Device *dev, const drmModeEncoder *encoder)
+ : Object(dev, encoder->encoder_id, Object::TypeEncoder),
+ type_(encoder->encoder_type)
+{
+ const std::list<Crtc> &crtcs = dev->crtcs();
+ possibleCrtcs_.reserve(crtcs.size());
+
+ for (const Crtc &crtc : crtcs) {
+ if (encoder->possible_crtcs & (1 << crtc.index()))
+ possibleCrtcs_.push_back(&crtc);
+ }
+
+ possibleCrtcs_.shrink_to_fit();
+}
+
+namespace {
+
+const std::map<uint32_t, const char *> connectorTypeNames{
+ { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
+ { DRM_MODE_CONNECTOR_VGA, "VGA" },
+ { DRM_MODE_CONNECTOR_DVII, "DVI-I" },
+ { DRM_MODE_CONNECTOR_DVID, "DVI-D" },
+ { DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
+ { DRM_MODE_CONNECTOR_Composite, "Composite" },
+ { DRM_MODE_CONNECTOR_SVIDEO, "S-Video" },
+ { DRM_MODE_CONNECTOR_LVDS, "LVDS" },
+ { DRM_MODE_CONNECTOR_Component, "Component" },
+ { DRM_MODE_CONNECTOR_9PinDIN, "9-Pin-DIN" },
+ { DRM_MODE_CONNECTOR_DisplayPort, "DP" },
+ { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
+ { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
+ { DRM_MODE_CONNECTOR_TV, "TV" },
+ { DRM_MODE_CONNECTOR_eDP, "eDP" },
+ { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
+ { DRM_MODE_CONNECTOR_DSI, "DSI" },
+ { DRM_MODE_CONNECTOR_DPI, "DPI" },
+};
+
+} /* namespace */
+
+Connector::Connector(Device *dev, const drmModeConnector *connector)
+ : Object(dev, connector->connector_id, Object::TypeConnector),
+ type_(connector->connector_type)
+{
+ auto typeName = connectorTypeNames.find(connector->connector_type);
+ if (typeName == connectorTypeNames.end()) {
+ std::cerr
+ << "Invalid connector type "
+ << connector->connector_type << std::endl;
+ typeName = connectorTypeNames.find(DRM_MODE_CONNECTOR_Unknown);
+ }
+
+ name_ = std::string(typeName->second) + "-"
+ + std::to_string(connector->connector_type_id);
+
+ switch (connector->connection) {
+ case DRM_MODE_CONNECTED:
+ status_ = Status::Connected;
+ break;
+
+ case DRM_MODE_DISCONNECTED:
+ status_ = Status::Disconnected;
+ break;
+
+ case DRM_MODE_UNKNOWNCONNECTION:
+ default:
+ status_ = Status::Unknown;
+ break;
+ }
+
+ const std::list<Encoder> &encoders = dev->encoders();
+
+ encoders_.reserve(connector->count_encoders);
+
+ for (int i = 0; i < connector->count_encoders; ++i) {
+ uint32_t encoderId = connector->encoders[i];
+ auto encoder = std::find_if(encoders.begin(), encoders.end(),
+ [=](const Encoder &e) {
+ return e.id() == encoderId;
+ });
+ if (encoder == encoders.end()) {
+ std::cerr
+ << "Encoder " << encoderId << " not found"
+ << std::endl;
+ continue;
+ }
+
+ encoders_.push_back(&*encoder);
+ }
+
+ encoders_.shrink_to_fit();
+
+ modes_ = { connector->modes, connector->modes + connector->count_modes };
+}
+
+Plane::Plane(Device *dev, const drmModePlane *plane)
+ : Object(dev, plane->plane_id, Object::TypePlane),
+ possibleCrtcsMask_(plane->possible_crtcs)
+{
+ formats_ = { plane->formats, plane->formats + plane->count_formats };
+
+ const std::list<Crtc> &crtcs = dev->crtcs();
+ possibleCrtcs_.reserve(crtcs.size());
+
+ for (const Crtc &crtc : crtcs) {
+ if (plane->possible_crtcs & (1 << crtc.index()))
+ possibleCrtcs_.push_back(&crtc);
+ }
+
+ possibleCrtcs_.shrink_to_fit();
+}
+
+bool Plane::supportsFormat(const libcamera::PixelFormat &format) const
+{
+ return std::find(formats_.begin(), formats_.end(), format.fourcc())
+ != formats_.end();
+}
+
+int Plane::setup()
+{
+ const PropertyValue *pv = propertyValue("type");
+ if (!pv)
+ return -EINVAL;
+
+ switch (pv->value()) {
+ case DRM_PLANE_TYPE_OVERLAY:
+ type_ = TypeOverlay;
+ break;
+
+ case DRM_PLANE_TYPE_PRIMARY:
+ type_ = TypePrimary;
+ break;
+
+ case DRM_PLANE_TYPE_CURSOR:
+ type_ = TypeCursor;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+FrameBuffer::FrameBuffer(Device *dev)
+ : Object(dev, 0, Object::TypeFb)
+{
+}
+
+FrameBuffer::~FrameBuffer()
+{
+ for (const auto &plane : planes_) {
+ struct drm_gem_close gem_close = {
+ .handle = plane.second.handle,
+ .pad = 0,
+ };
+ int ret;
+
+ do {
+ ret = ioctl(device()->fd(), DRM_IOCTL_GEM_CLOSE, &gem_close);
+ } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
+
+ if (ret == -1) {
+ ret = -errno;
+ std::cerr
+ << "Failed to close GEM object: "
+ << strerror(-ret) << std::endl;
+ }
+ }
+
+ drmModeRmFB(device()->fd(), id());
+}
+
+AtomicRequest::AtomicRequest(Device *dev)
+ : dev_(dev), valid_(true)
+{
+ request_ = drmModeAtomicAlloc();
+ if (!request_)
+ valid_ = false;
+}
+
+AtomicRequest::~AtomicRequest()
+{
+ if (request_)
+ drmModeAtomicFree(request_);
+}
+
+int AtomicRequest::addProperty(const Object *object, const std::string &property,
+ uint64_t value)
+{
+ if (!valid_)
+ return -EINVAL;
+
+ const Property *prop = object->property(property);
+ if (!prop) {
+ valid_ = false;
+ return -EINVAL;
+ }
+
+ return addProperty(object->id(), prop->id(), value);
+}
+
+int AtomicRequest::addProperty(const Object *object, const std::string &property,
+ std::unique_ptr<Blob> blob)
+{
+ if (!valid_)
+ return -EINVAL;
+
+ const Property *prop = object->property(property);
+ if (!prop) {
+ valid_ = false;
+ return -EINVAL;
+ }
+
+ int ret = addProperty(object->id(), prop->id(), blob->id());
+ if (ret < 0)
+ return ret;
+
+ blobs_.emplace_back(std::move(blob));
+
+ return 0;
+}
+
+int AtomicRequest::addProperty(uint32_t object, uint32_t property, uint64_t value)
+{
+ int ret = drmModeAtomicAddProperty(request_, object, property, value);
+ if (ret < 0) {
+ valid_ = false;
+ return ret;
+ }
+
+ return 0;
+}
+
+int AtomicRequest::commit(unsigned int flags)
+{
+ if (!valid_)
+ return -EINVAL;
+
+ uint32_t drmFlags = 0;
+ if (flags & FlagAllowModeset)
+ drmFlags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
+ if (flags & FlagAsync)
+ drmFlags |= DRM_MODE_PAGE_FLIP_EVENT | DRM_MODE_ATOMIC_NONBLOCK;
+ if (flags & FlagTestOnly)
+ drmFlags |= DRM_MODE_ATOMIC_TEST_ONLY;
+
+ return drmModeAtomicCommit(dev_->fd(), request_, drmFlags, this);
+}
+
+Device::Device()
+ : fd_(-1)
+{
+}
+
+Device::~Device()
+{
+ if (fd_ != -1)
+ drmClose(fd_);
+}
+
+int Device::init()
+{
+ int ret = openCard();
+ if (ret < 0) {
+ std::cerr << "Failed to open any DRM/KMS device: "
+ << strerror(-ret) << std::endl;
+ return ret;
+ }
+
+ /*
+ * Enable the atomic APIs. This also automatically enables the
+ * universal planes API.
+ */
+ ret = drmSetClientCap(fd_, DRM_CLIENT_CAP_ATOMIC, 1);
+ if (ret < 0) {
+ ret = -errno;
+ std::cerr
+ << "Failed to enable atomic capability: "
+ << strerror(-ret) << std::endl;
+ return ret;
+ }
+
+ /* List all the resources. */
+ ret = getResources();
+ if (ret < 0)
+ return ret;
+
+ EventLoop::instance()->addFdEvent(fd_, EventLoop::Read,
+ std::bind(&Device::drmEvent, this));
+
+ return 0;
+}
+
+int Device::openCard()
+{
+ const std::string dirName = "/dev/dri/";
+ bool found = false;
+ int ret;
+
+ /*
+ * Open the first DRM/KMS device beginning with /dev/dri/card. The
+ * libdrm drmOpen*() functions require either a module name or a bus ID,
+ * which we don't have, so bypass them. The automatic module loading and
+ * device node creation from drmOpen() is of no practical use as any
+ * modern system will handle that through udev or an equivalent
+ * component.
+ */
+ DIR *folder = opendir(dirName.c_str());
+ if (!folder) {
+ ret = -errno;
+ std::cerr << "Failed to open " << dirName
+ << " directory: " << strerror(-ret) << std::endl;
+ return ret;
+ }
+
+ for (struct dirent *res; (res = readdir(folder));) {
+ uint64_t cap;
+
+ if (strncmp(res->d_name, "card", 4))
+ continue;
+
+ const std::string devName = dirName + res->d_name;
+ fd_ = open(devName.c_str(), O_RDWR | O_CLOEXEC);
+ if (fd_ < 0) {
+ ret = -errno;
+ std::cerr << "Failed to open DRM/KMS device " << devName << ": "
+ << strerror(-ret) << std::endl;
+ continue;
+ }
+
+ /*
+ * Skip devices that don't support the modeset API, to avoid
+ * selecting a DRM device corresponding to a GPU. There is no
+ * modeset capability, but the kernel returns an error for most
+ * caps if mode setting isn't support by the driver. The
+ * DRM_CAP_DUMB_BUFFER capability is one of those, other would
+ * do as well. The capability value itself isn't relevant.
+ */
+ ret = drmGetCap(fd_, DRM_CAP_DUMB_BUFFER, &cap);
+ if (ret < 0) {
+ drmClose(fd_);
+ fd_ = -1;
+ continue;
+ }
+
+ found = true;
+ break;
+ }
+
+ closedir(folder);
+
+ return found ? 0 : -ENOENT;
+}
+
+int Device::getResources()
+{
+ int ret;
+
+ std::unique_ptr<drmModeRes, decltype(&drmModeFreeResources)> resources{
+ drmModeGetResources(fd_),
+ &drmModeFreeResources
+ };
+ if (!resources) {
+ ret = -errno;
+ std::cerr
+ << "Failed to get DRM/KMS resources: "
+ << strerror(-ret) << std::endl;
+ return ret;
+ }
+
+ for (int i = 0; i < resources->count_crtcs; ++i) {
+ drmModeCrtc *crtc = drmModeGetCrtc(fd_, resources->crtcs[i]);
+ if (!crtc) {
+ ret = -errno;
+ std::cerr
+ << "Failed to get CRTC: " << strerror(-ret)
+ << std::endl;
+ return ret;
+ }
+
+ crtcs_.emplace_back(this, crtc, i);
+ drmModeFreeCrtc(crtc);
+
+ Crtc &obj = crtcs_.back();
+ objects_[obj.id()] = &obj;
+ }
+
+ for (int i = 0; i < resources->count_encoders; ++i) {
+ drmModeEncoder *encoder =
+ drmModeGetEncoder(fd_, resources->encoders[i]);
+ if (!encoder) {
+ ret = -errno;
+ std::cerr
+ << "Failed to get encoder: " << strerror(-ret)
+ << std::endl;
+ return ret;
+ }
+
+ encoders_.emplace_back(this, encoder);
+ drmModeFreeEncoder(encoder);
+
+ Encoder &obj = encoders_.back();
+ objects_[obj.id()] = &obj;
+ }
+
+ for (int i = 0; i < resources->count_connectors; ++i) {
+ drmModeConnector *connector =
+ drmModeGetConnector(fd_, resources->connectors[i]);
+ if (!connector) {
+ ret = -errno;
+ std::cerr
+ << "Failed to get connector: " << strerror(-ret)
+ << std::endl;
+ return ret;
+ }
+
+ connectors_.emplace_back(this, connector);
+ drmModeFreeConnector(connector);
+
+ Connector &obj = connectors_.back();
+ objects_[obj.id()] = &obj;
+ }
+
+ std::unique_ptr<drmModePlaneRes, decltype(&drmModeFreePlaneResources)> planes{
+ drmModeGetPlaneResources(fd_),
+ &drmModeFreePlaneResources
+ };
+ if (!planes) {
+ ret = -errno;
+ std::cerr
+ << "Failed to get DRM/KMS planes: "
+ << strerror(-ret) << std::endl;
+ return ret;
+ }
+
+ for (uint32_t i = 0; i < planes->count_planes; ++i) {
+ drmModePlane *plane =
+ drmModeGetPlane(fd_, planes->planes[i]);
+ if (!plane) {
+ ret = -errno;
+ std::cerr
+ << "Failed to get plane: " << strerror(-ret)
+ << std::endl;
+ return ret;
+ }
+
+ planes_.emplace_back(this, plane);
+ drmModeFreePlane(plane);
+
+ Plane &obj = planes_.back();
+ objects_[obj.id()] = &obj;
+ }
+
+ /* Set the possible planes for each CRTC. */
+ for (Crtc &crtc : crtcs_) {
+ for (const Plane &plane : planes_) {
+ if (plane.possibleCrtcsMask_ & (1 << crtc.index()))
+ crtc.planes_.push_back(&plane);
+ }
+ }
+
+ /* Collect all property IDs and create Property instances. */
+ std::set<uint32_t> properties;
+ for (const auto &object : objects_) {
+ for (const PropertyValue &value : object.second->properties())
+ properties.insert(value.id());
+ }
+
+ for (uint32_t id : properties) {
+ drmModePropertyRes *property = drmModeGetProperty(fd_, id);
+ if (!property) {
+ ret = -errno;
+ std::cerr
+ << "Failed to get property: " << strerror(-ret)
+ << std::endl;
+ continue;
+ }
+
+ properties_.emplace_back(this, property);
+ drmModeFreeProperty(property);
+
+ Property &obj = properties_.back();
+ objects_[obj.id()] = &obj;
+ }
+
+ /* Finally, perform all delayed setup of mode objects. */
+ for (auto &object : objects_) {
+ ret = object.second->setup();
+ if (ret < 0) {
+ std::cerr
+ << "Failed to setup object " << object.second->id()
+ << ": " << strerror(-ret) << std::endl;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+const Object *Device::object(uint32_t id)
+{
+ const auto iter = objects_.find(id);
+ if (iter == objects_.end())
+ return nullptr;
+
+ return iter->second;
+}
+
+std::unique_ptr<FrameBuffer> Device::createFrameBuffer(
+ const libcamera::FrameBuffer &buffer,
+ const libcamera::PixelFormat &format,
+ const libcamera::Size &size,
+ const std::array<uint32_t, 4> &strides)
+{
+ std::unique_ptr<FrameBuffer> fb{ new FrameBuffer(this) };
+
+ uint32_t handles[4] = {};
+ uint32_t offsets[4] = {};
+ int ret;
+
+ const std::vector<libcamera::FrameBuffer::Plane> &planes = buffer.planes();
+
+ unsigned int i = 0;
+ for (const libcamera::FrameBuffer::Plane &plane : planes) {
+ int fd = plane.fd.get();
+ uint32_t handle;
+
+ auto iter = fb->planes_.find(fd);
+ if (iter == fb->planes_.end()) {
+ ret = drmPrimeFDToHandle(fd_, plane.fd.get(), &handle);
+ if (ret < 0) {
+ ret = -errno;
+ std::cerr
+ << "Unable to import framebuffer dmabuf: "
+ << strerror(-ret) << std::endl;
+ return nullptr;
+ }
+
+ fb->planes_[fd] = { handle };
+ } else {
+ handle = iter->second.handle;
+ }
+
+ handles[i] = handle;
+ offsets[i] = plane.offset;
+ ++i;
+ }
+
+ ret = drmModeAddFB2(fd_, size.width, size.height, format.fourcc(), handles,
+ strides.data(), offsets, &fb->id_, 0);
+ if (ret < 0) {
+ ret = -errno;
+ std::cerr
+ << "Failed to add framebuffer: "
+ << strerror(-ret) << std::endl;
+ return nullptr;
+ }
+
+ return fb;
+}
+
+void Device::drmEvent()
+{
+ drmEventContext ctx{};
+ ctx.version = DRM_EVENT_CONTEXT_VERSION;
+ ctx.page_flip_handler = &Device::pageFlipComplete;
+
+ drmHandleEvent(fd_, &ctx);
+}
+
+void Device::pageFlipComplete([[maybe_unused]] int fd,
+ [[maybe_unused]] unsigned int sequence,
+ [[maybe_unused]] unsigned int tv_sec,
+ [[maybe_unused]] unsigned int tv_usec,
+ void *user_data)
+{
+ AtomicRequest *request = static_cast<AtomicRequest *>(user_data);
+ request->device()->requestComplete.emit(request);
+}
+
+} /* namespace DRM */
diff --git a/src/apps/cam/drm.h b/src/apps/cam/drm.h
new file mode 100644
index 00000000..1ba83b6e
--- /dev/null
+++ b/src/apps/cam/drm.h
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2021, Ideas on Board Oy
+ *
+ * DRM/KMS Helpers
+ */
+
+#pragma once
+
+#include <array>
+#include <list>
+#include <map>
+#include <memory>
+#include <stdint.h>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/signal.h>
+#include <libcamera/base/span.h>
+
+#include <libdrm/drm.h>
+#include <xf86drm.h>
+#include <xf86drmMode.h>
+
+namespace libcamera {
+class FrameBuffer;
+class PixelFormat;
+class Size;
+} /* namespace libcamera */
+
+namespace DRM {
+
+class Device;
+class Plane;
+class Property;
+class PropertyValue;
+
+class Object
+{
+public:
+ enum Type {
+ TypeCrtc = DRM_MODE_OBJECT_CRTC,
+ TypeConnector = DRM_MODE_OBJECT_CONNECTOR,
+ TypeEncoder = DRM_MODE_OBJECT_ENCODER,
+ TypeMode = DRM_MODE_OBJECT_MODE,
+ TypeProperty = DRM_MODE_OBJECT_PROPERTY,
+ TypeFb = DRM_MODE_OBJECT_FB,
+ TypeBlob = DRM_MODE_OBJECT_BLOB,
+ TypePlane = DRM_MODE_OBJECT_PLANE,
+ TypeAny = DRM_MODE_OBJECT_ANY,
+ };
+
+ Object(Device *dev, uint32_t id, Type type);
+ virtual ~Object();
+
+ Device *device() const { return dev_; }
+ uint32_t id() const { return id_; }
+ Type type() const { return type_; }
+
+ const Property *property(const std::string &name) const;
+ const PropertyValue *propertyValue(const std::string &name) const;
+ const std::vector<PropertyValue> &properties() const { return properties_; }
+
+protected:
+ virtual int setup()
+ {
+ return 0;
+ }
+
+ uint32_t id_;
+
+private:
+ friend Device;
+
+ Device *dev_;
+ Type type_;
+ std::vector<PropertyValue> properties_;
+};
+
+class Property : public Object
+{
+public:
+ enum Type {
+ TypeUnknown = 0,
+ TypeRange,
+ TypeEnum,
+ TypeBlob,
+ TypeBitmask,
+ TypeObject,
+ TypeSignedRange,
+ };
+
+ Property(Device *dev, drmModePropertyRes *property);
+
+ Type type() const { return type_; }
+ const std::string &name() const { return name_; }
+
+ bool isImmutable() const { return flags_ & DRM_MODE_PROP_IMMUTABLE; }
+
+ const std::vector<uint64_t> values() const { return values_; }
+ const std::map<uint32_t, std::string> &enums() const { return enums_; }
+ const std::vector<uint32_t> blobs() const { return blobs_; }
+
+private:
+ Type type_;
+ std::string name_;
+ uint32_t flags_;
+ std::vector<uint64_t> values_;
+ std::map<uint32_t, std::string> enums_;
+ std::vector<uint32_t> blobs_;
+};
+
+class PropertyValue
+{
+public:
+ PropertyValue(uint32_t id, uint64_t value)
+ : id_(id), value_(value)
+ {
+ }
+
+ uint32_t id() const { return id_; }
+ uint32_t value() const { return value_; }
+
+private:
+ uint32_t id_;
+ uint64_t value_;
+};
+
+class Blob : public Object
+{
+public:
+ Blob(Device *dev, const libcamera::Span<const uint8_t> &data);
+ ~Blob();
+
+ bool isValid() const { return id() != 0; }
+};
+
+class Mode : public drmModeModeInfo
+{
+public:
+ Mode(const drmModeModeInfo &mode);
+
+ std::unique_ptr<Blob> toBlob(Device *dev) const;
+};
+
+class Crtc : public Object
+{
+public:
+ Crtc(Device *dev, const drmModeCrtc *crtc, unsigned int index);
+
+ unsigned int index() const { return index_; }
+ const std::vector<const Plane *> &planes() const { return planes_; }
+
+private:
+ friend Device;
+
+ unsigned int index_;
+ std::vector<const Plane *> planes_;
+};
+
+class Encoder : public Object
+{
+public:
+ Encoder(Device *dev, const drmModeEncoder *encoder);
+
+ uint32_t type() const { return type_; }
+
+ const std::vector<const Crtc *> &possibleCrtcs() const { return possibleCrtcs_; }
+
+private:
+ uint32_t type_;
+ std::vector<const Crtc *> possibleCrtcs_;
+};
+
+class Connector : public Object
+{
+public:
+ enum Status {
+ Connected,
+ Disconnected,
+ Unknown,
+ };
+
+ Connector(Device *dev, const drmModeConnector *connector);
+
+ uint32_t type() const { return type_; }
+ const std::string &name() const { return name_; }
+
+ Status status() const { return status_; }
+
+ const std::vector<const Encoder *> &encoders() const { return encoders_; }
+ const std::vector<Mode> &modes() const { return modes_; }
+
+private:
+ uint32_t type_;
+ std::string name_;
+ Status status_;
+ std::vector<const Encoder *> encoders_;
+ std::vector<Mode> modes_;
+};
+
+class Plane : public Object
+{
+public:
+ enum Type {
+ TypeOverlay,
+ TypePrimary,
+ TypeCursor,
+ };
+
+ Plane(Device *dev, const drmModePlane *plane);
+
+ Type type() const { return type_; }
+ const std::vector<uint32_t> &formats() const { return formats_; }
+ const std::vector<const Crtc *> &possibleCrtcs() const { return possibleCrtcs_; }
+
+ bool supportsFormat(const libcamera::PixelFormat &format) const;
+
+protected:
+ int setup() override;
+
+private:
+ friend class Device;
+
+ Type type_;
+ std::vector<uint32_t> formats_;
+ std::vector<const Crtc *> possibleCrtcs_;
+ uint32_t possibleCrtcsMask_;
+};
+
+class FrameBuffer : public Object
+{
+public:
+ struct Plane {
+ uint32_t handle;
+ };
+
+ ~FrameBuffer();
+
+private:
+ friend class Device;
+
+ FrameBuffer(Device *dev);
+
+ std::map<int, Plane> planes_;
+};
+
+class AtomicRequest
+{
+public:
+ enum Flags {
+ FlagAllowModeset = (1 << 0),
+ FlagAsync = (1 << 1),
+ FlagTestOnly = (1 << 2),
+ };
+
+ AtomicRequest(Device *dev);
+ ~AtomicRequest();
+
+ Device *device() const { return dev_; }
+ bool isValid() const { return valid_; }
+
+ int addProperty(const Object *object, const std::string &property,
+ uint64_t value);
+ int addProperty(const Object *object, const std::string &property,
+ std::unique_ptr<Blob> blob);
+ int commit(unsigned int flags = 0);
+
+private:
+ AtomicRequest(const AtomicRequest &) = delete;
+ AtomicRequest(const AtomicRequest &&) = delete;
+ AtomicRequest &operator=(const AtomicRequest &) = delete;
+ AtomicRequest &operator=(const AtomicRequest &&) = delete;
+
+ int addProperty(uint32_t object, uint32_t property, uint64_t value);
+
+ Device *dev_;
+ bool valid_;
+ drmModeAtomicReq *request_;
+ std::list<std::unique_ptr<Blob>> blobs_;
+};
+
+class Device
+{
+public:
+ Device();
+ ~Device();
+
+ int init();
+
+ int fd() const { return fd_; }
+
+ const std::list<Crtc> &crtcs() const { return crtcs_; }
+ const std::list<Encoder> &encoders() const { return encoders_; }
+ const std::list<Connector> &connectors() const { return connectors_; }
+ const std::list<Plane> &planes() const { return planes_; }
+ const std::list<Property> &properties() const { return properties_; }
+
+ const Object *object(uint32_t id);
+
+ std::unique_ptr<FrameBuffer> createFrameBuffer(
+ const libcamera::FrameBuffer &buffer,
+ const libcamera::PixelFormat &format,
+ const libcamera::Size &size,
+ const std::array<uint32_t, 4> &strides);
+
+ libcamera::Signal<AtomicRequest *> requestComplete;
+
+private:
+ Device(const Device &) = delete;
+ Device(const Device &&) = delete;
+ Device &operator=(const Device &) = delete;
+ Device &operator=(const Device &&) = delete;
+
+ int openCard();
+ int getResources();
+
+ void drmEvent();
+ static void pageFlipComplete(int fd, unsigned int sequence,
+ unsigned int tv_sec, unsigned int tv_usec,
+ void *user_data);
+
+ int fd_;
+
+ std::list<Crtc> crtcs_;
+ std::list<Encoder> encoders_;
+ std::list<Connector> connectors_;
+ std::list<Plane> planes_;
+ std::list<Property> properties_;
+
+ std::map<uint32_t, Object *> objects_;
+};
+
+} /* namespace DRM */
diff --git a/src/apps/cam/file_sink.cpp b/src/apps/cam/file_sink.cpp
new file mode 100644
index 00000000..76e21db9
--- /dev/null
+++ b/src/apps/cam/file_sink.cpp
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * File Sink
+ */
+
+#include <array>
+#include <assert.h>
+#include <fcntl.h>
+#include <iomanip>
+#include <iostream>
+#include <sstream>
+#include <string.h>
+#include <unistd.h>
+#include <utility>
+
+#include <libcamera/camera.h>
+
+#include "../common/dng_writer.h"
+#include "../common/image.h"
+#include "../common/ppm_writer.h"
+
+#include "file_sink.h"
+
+using namespace libcamera;
+
+FileSink::FileSink([[maybe_unused]] const libcamera::Camera *camera,
+ const std::map<const libcamera::Stream *, std::string> &streamNames)
+ :
+#ifdef HAVE_TIFF
+ camera_(camera),
+#endif
+ pattern_(kDefaultFilePattern), fileType_(FileType::Binary),
+ streamNames_(streamNames)
+{
+}
+
+FileSink::~FileSink()
+{
+}
+
+int FileSink::setFilePattern(const std::string &pattern)
+{
+ static const std::array<std::pair<std::string, FileType>, 2> types{{
+ { ".dng", FileType::Dng },
+ { ".ppm", FileType::Ppm },
+ }};
+
+ pattern_ = pattern;
+
+ if (pattern_.empty() || pattern_.back() == '/')
+ pattern_ += kDefaultFilePattern;
+
+ fileType_ = FileType::Binary;
+
+ for (const auto &type : types) {
+ if (pattern_.size() < type.first.size())
+ continue;
+
+ if (pattern_.find(type.first, pattern_.size() - type.first.size()) !=
+ std::string::npos) {
+ fileType_ = type.second;
+ break;
+ }
+ }
+
+#ifndef HAVE_TIFF
+ if (fileType_ == FileType::Dng) {
+ std::cerr << "DNG support not available" << std::endl;
+ return -EINVAL;
+ }
+#endif /* HAVE_TIFF */
+
+ return 0;
+}
+
+int FileSink::configure(const libcamera::CameraConfiguration &config)
+{
+ int ret = FrameSink::configure(config);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+void FileSink::mapBuffer(FrameBuffer *buffer)
+{
+ std::unique_ptr<Image> image =
+ Image::fromFrameBuffer(buffer, Image::MapMode::ReadOnly);
+ assert(image != nullptr);
+
+ mappedBuffers_[buffer] = std::move(image);
+}
+
+bool FileSink::processRequest(Request *request)
+{
+ for (auto [stream, buffer] : request->buffers())
+ writeBuffer(stream, buffer, request->metadata());
+
+ return true;
+}
+
+void FileSink::writeBuffer(const Stream *stream, FrameBuffer *buffer,
+ [[maybe_unused]] const ControlList &metadata)
+{
+ std::string filename = pattern_;
+ size_t pos;
+ int fd, ret = 0;
+
+ pos = filename.find_first_of('#');
+ if (pos != std::string::npos) {
+ std::stringstream ss;
+ ss << streamNames_[stream] << "-" << std::setw(6)
+ << std::setfill('0') << buffer->metadata().sequence;
+ filename.replace(pos, 1, ss.str());
+ }
+
+ Image *image = mappedBuffers_[buffer].get();
+
+#ifdef HAVE_TIFF
+ if (fileType_ == FileType::Dng) {
+ ret = DNGWriter::write(filename.c_str(), camera_,
+ stream->configuration(), metadata,
+ buffer, image->data(0).data());
+ if (ret < 0)
+ std::cerr << "failed to write DNG file `" << filename
+ << "'" << std::endl;
+
+ return;
+ }
+#endif /* HAVE_TIFF */
+ if (fileType_ == FileType::Ppm) {
+ ret = PPMWriter::write(filename.c_str(), stream->configuration(),
+ image->data(0));
+ if (ret < 0)
+ std::cerr << "failed to write PPM file `" << filename
+ << "'" << std::endl;
+
+ return;
+ }
+
+ fd = open(filename.c_str(), O_CREAT | O_WRONLY |
+ (pos == std::string::npos ? O_APPEND : O_TRUNC),
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
+ if (fd == -1) {
+ ret = -errno;
+ std::cerr << "failed to open file " << filename << ": "
+ << strerror(-ret) << std::endl;
+ return;
+ }
+
+ for (unsigned int i = 0; i < buffer->planes().size(); ++i) {
+ /*
+ * This was formerly a local "const FrameMetadata::Plane &"
+ * however this causes a false positive warning for dangling
+ * references on gcc 13.
+ */
+ const unsigned int bytesused = buffer->metadata().planes()[i].bytesused;
+
+ Span<uint8_t> data = image->data(i);
+ const unsigned int length = std::min<unsigned int>(bytesused, data.size());
+
+ if (bytesused > data.size())
+ std::cerr << "payload size " << bytesused
+ << " larger than plane size " << data.size()
+ << std::endl;
+
+ ret = ::write(fd, data.data(), length);
+ if (ret < 0) {
+ ret = -errno;
+ std::cerr << "write error: " << strerror(-ret)
+ << std::endl;
+ break;
+ } else if (ret != (int)length) {
+ std::cerr << "write error: only " << ret
+ << " bytes written instead of "
+ << length << std::endl;
+ break;
+ }
+ }
+
+ close(fd);
+}
diff --git a/src/apps/cam/file_sink.h b/src/apps/cam/file_sink.h
new file mode 100644
index 00000000..71b7fe0f
--- /dev/null
+++ b/src/apps/cam/file_sink.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * File Sink
+ */
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include <libcamera/stream.h>
+
+#include "frame_sink.h"
+
+class Image;
+
+class FileSink : public FrameSink
+{
+public:
+ FileSink(const libcamera::Camera *camera,
+ const std::map<const libcamera::Stream *, std::string> &streamNames);
+ ~FileSink();
+
+ int setFilePattern(const std::string &pattern);
+
+ int configure(const libcamera::CameraConfiguration &config) override;
+
+ void mapBuffer(libcamera::FrameBuffer *buffer) override;
+
+ bool processRequest(libcamera::Request *request) override;
+
+private:
+ static constexpr const char *kDefaultFilePattern = "frame-#.bin";
+
+ enum class FileType {
+ Binary,
+ Dng,
+ Ppm,
+ };
+
+ void writeBuffer(const libcamera::Stream *stream,
+ libcamera::FrameBuffer *buffer,
+ const libcamera::ControlList &metadata);
+
+#ifdef HAVE_TIFF
+ const libcamera::Camera *camera_;
+#endif
+
+ std::string pattern_;
+ FileType fileType_;
+
+ std::map<const libcamera::Stream *, std::string> streamNames_;
+ std::map<libcamera::FrameBuffer *, std::unique_ptr<Image>> mappedBuffers_;
+};
diff --git a/src/apps/cam/frame_sink.cpp b/src/apps/cam/frame_sink.cpp
new file mode 100644
index 00000000..68d6f2c1
--- /dev/null
+++ b/src/apps/cam/frame_sink.cpp
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2021, Ideas on Board Oy
+ *
+ * Base Frame Sink Class
+ */
+
+#include "frame_sink.h"
+
+/**
+ * \class FrameSink
+ * \brief Abstract class to model a consumer of frames
+ *
+ * The FrameSink class models the consumer that processes frames after a request
+ * completes. It receives requests through processRequest(), and processes them
+ * synchronously or asynchronously. This allows frame sinks to hold onto frames
+ * for an extended period of time, for instance to display them until a new
+ * frame arrives.
+ *
+ * A frame sink processes whole requests, and is solely responsible for deciding
+ * how to handle different frame buffers in case multiple streams are captured.
+ */
+
+FrameSink::~FrameSink()
+{
+}
+
+int FrameSink::configure([[maybe_unused]] const libcamera::CameraConfiguration &config)
+{
+ return 0;
+}
+
+void FrameSink::mapBuffer([[maybe_unused]] libcamera::FrameBuffer *buffer)
+{
+}
+
+int FrameSink::start()
+{
+ return 0;
+}
+
+int FrameSink::stop()
+{
+ return 0;
+}
+
+/**
+ * \fn FrameSink::processRequest()
+ * \param[in] request The request
+ *
+ * This function is called to instruct the sink to process a request. The sink
+ * may process the request synchronously or queue it for asynchronous
+ * processing.
+ *
+ * When the request is processed synchronously, this function shall return true.
+ * The \a request shall not be accessed by the FrameSink after the function
+ * returns.
+ *
+ * When the request is processed asynchronously, the FrameSink temporarily takes
+ * ownership of the \a request. The function shall return false, and the
+ * FrameSink shall emit the requestProcessed signal when the request processing
+ * completes. If the stop() function is called before the request processing
+ * completes, it shall release the request synchronously.
+ *
+ * \return True if the request has been processed synchronously, false if
+ * processing has been queued
+ */
diff --git a/src/apps/cam/frame_sink.h b/src/apps/cam/frame_sink.h
new file mode 100644
index 00000000..11105c6c
--- /dev/null
+++ b/src/apps/cam/frame_sink.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2021, Ideas on Board Oy
+ *
+ * Base Frame Sink Class
+ */
+
+#pragma once
+
+#include <libcamera/base/signal.h>
+
+namespace libcamera {
+class CameraConfiguration;
+class FrameBuffer;
+class Request;
+} /* namespace libcamera */
+
+class FrameSink
+{
+public:
+ virtual ~FrameSink();
+
+ virtual int configure(const libcamera::CameraConfiguration &config);
+
+ virtual void mapBuffer(libcamera::FrameBuffer *buffer);
+
+ virtual int start();
+ virtual int stop();
+
+ virtual bool processRequest(libcamera::Request *request) = 0;
+ libcamera::Signal<libcamera::Request *> requestProcessed;
+};
diff --git a/src/apps/cam/kms_sink.cpp b/src/apps/cam/kms_sink.cpp
new file mode 100644
index 00000000..672c985a
--- /dev/null
+++ b/src/apps/cam/kms_sink.cpp
@@ -0,0 +1,536 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2021, Ideas on Board Oy
+ *
+ * KMS Sink
+ */
+
+#include "kms_sink.h"
+
+#include <array>
+#include <algorithm>
+#include <assert.h>
+#include <iostream>
+#include <limits.h>
+#include <memory>
+#include <stdint.h>
+#include <string.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/formats.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/stream.h>
+
+#include "drm.h"
+
+KMSSink::KMSSink(const std::string &connectorName)
+ : connector_(nullptr), crtc_(nullptr), plane_(nullptr), mode_(nullptr)
+{
+ int ret = dev_.init();
+ if (ret < 0)
+ return;
+
+ /*
+ * Find the requested connector. If no specific connector is requested,
+ * pick the first connected connector or, if no connector is connected,
+ * the first connector with unknown status.
+ */
+ for (const DRM::Connector &conn : dev_.connectors()) {
+ if (!connectorName.empty()) {
+ if (conn.name() != connectorName)
+ continue;
+
+ connector_ = &conn;
+ break;
+ }
+
+ if (conn.status() == DRM::Connector::Connected) {
+ connector_ = &conn;
+ break;
+ }
+
+ if (!connector_ && conn.status() == DRM::Connector::Unknown)
+ connector_ = &conn;
+ }
+
+ if (!connector_) {
+ if (!connectorName.empty())
+ std::cerr
+ << "Connector " << connectorName << " not found"
+ << std::endl;
+ else
+ std::cerr << "No connected connector found" << std::endl;
+ return;
+ }
+
+ dev_.requestComplete.connect(this, &KMSSink::requestComplete);
+}
+
+void KMSSink::mapBuffer(libcamera::FrameBuffer *buffer)
+{
+ std::array<uint32_t, 4> strides = {};
+
+ /* \todo Should libcamera report per-plane strides ? */
+ unsigned int uvStrideMultiplier;
+
+ switch (format_) {
+ case libcamera::formats::NV24:
+ case libcamera::formats::NV42:
+ uvStrideMultiplier = 4;
+ break;
+ case libcamera::formats::YUV420:
+ case libcamera::formats::YVU420:
+ case libcamera::formats::YUV422:
+ uvStrideMultiplier = 1;
+ break;
+ default:
+ uvStrideMultiplier = 2;
+ break;
+ }
+
+ strides[0] = stride_;
+ for (unsigned int i = 1; i < buffer->planes().size(); ++i)
+ strides[i] = stride_ * uvStrideMultiplier / 2;
+
+ std::unique_ptr<DRM::FrameBuffer> drmBuffer =
+ dev_.createFrameBuffer(*buffer, format_, size_, strides);
+ if (!drmBuffer)
+ return;
+
+ buffers_.emplace(std::piecewise_construct,
+ std::forward_as_tuple(buffer),
+ std::forward_as_tuple(std::move(drmBuffer)));
+}
+
+int KMSSink::configure(const libcamera::CameraConfiguration &config)
+{
+ if (!connector_)
+ return -EINVAL;
+
+ crtc_ = nullptr;
+ plane_ = nullptr;
+ mode_ = nullptr;
+
+ const libcamera::StreamConfiguration &cfg = config.at(0);
+
+ /* Find the best mode for the stream size. */
+ const std::vector<DRM::Mode> &modes = connector_->modes();
+
+ unsigned int cfgArea = cfg.size.width * cfg.size.height;
+ unsigned int bestDistance = UINT_MAX;
+
+ for (const DRM::Mode &mode : modes) {
+ unsigned int modeArea = mode.hdisplay * mode.vdisplay;
+ unsigned int distance = modeArea > cfgArea ? modeArea - cfgArea
+ : cfgArea - modeArea;
+
+ if (distance < bestDistance) {
+ mode_ = &mode;
+ bestDistance = distance;
+
+ /*
+ * If the sizes match exactly, there will be no better
+ * match.
+ */
+ if (distance == 0)
+ break;
+ }
+ }
+
+ if (!mode_) {
+ std::cerr << "No modes\n";
+ return -EINVAL;
+ }
+
+ int ret = configurePipeline(cfg.pixelFormat);
+ if (ret < 0)
+ return ret;
+
+ size_ = cfg.size;
+ stride_ = cfg.stride;
+
+ /* Configure color space. */
+ colorEncoding_ = std::nullopt;
+ colorRange_ = std::nullopt;
+
+ if (cfg.colorSpace->ycbcrEncoding == libcamera::ColorSpace::YcbcrEncoding::None)
+ return 0;
+
+ /*
+ * The encoding and range enums are defined in the kernel but not
+ * exposed in public headers.
+ */
+ enum drm_color_encoding {
+ DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_BT709,
+ DRM_COLOR_YCBCR_BT2020,
+ };
+
+ enum drm_color_range {
+ DRM_COLOR_YCBCR_LIMITED_RANGE,
+ DRM_COLOR_YCBCR_FULL_RANGE,
+ };
+
+ const DRM::Property *colorEncoding = plane_->property("COLOR_ENCODING");
+ const DRM::Property *colorRange = plane_->property("COLOR_RANGE");
+
+ if (colorEncoding) {
+ drm_color_encoding encoding;
+
+ switch (cfg.colorSpace->ycbcrEncoding) {
+ case libcamera::ColorSpace::YcbcrEncoding::Rec601:
+ default:
+ encoding = DRM_COLOR_YCBCR_BT601;
+ break;
+ case libcamera::ColorSpace::YcbcrEncoding::Rec709:
+ encoding = DRM_COLOR_YCBCR_BT709;
+ break;
+ case libcamera::ColorSpace::YcbcrEncoding::Rec2020:
+ encoding = DRM_COLOR_YCBCR_BT2020;
+ break;
+ }
+
+ for (const auto &[id, name] : colorEncoding->enums()) {
+ if (id == encoding) {
+ colorEncoding_ = encoding;
+ break;
+ }
+ }
+ }
+
+ if (colorRange) {
+ drm_color_range range;
+
+ switch (cfg.colorSpace->range) {
+ case libcamera::ColorSpace::Range::Limited:
+ default:
+ range = DRM_COLOR_YCBCR_LIMITED_RANGE;
+ break;
+ case libcamera::ColorSpace::Range::Full:
+ range = DRM_COLOR_YCBCR_FULL_RANGE;
+ break;
+ }
+
+ for (const auto &[id, name] : colorRange->enums()) {
+ if (id == range) {
+ colorRange_ = range;
+ break;
+ }
+ }
+ }
+
+ if (!colorEncoding_ || !colorRange_)
+ std::cerr << "Color space " << cfg.colorSpace->toString()
+ << " not supported by the display device."
+ << " Colors may be wrong." << std::endl;
+
+ return 0;
+}
+
+int KMSSink::selectPipeline(const libcamera::PixelFormat &format)
+{
+ /*
+ * If the requested format has an alpha channel, also consider the X
+ * variant.
+ */
+ libcamera::PixelFormat xFormat;
+
+ switch (format) {
+ case libcamera::formats::ABGR8888:
+ xFormat = libcamera::formats::XBGR8888;
+ break;
+ case libcamera::formats::ARGB8888:
+ xFormat = libcamera::formats::XRGB8888;
+ break;
+ case libcamera::formats::BGRA8888:
+ xFormat = libcamera::formats::BGRX8888;
+ break;
+ case libcamera::formats::RGBA8888:
+ xFormat = libcamera::formats::RGBX8888;
+ break;
+ }
+
+ /*
+ * Find a CRTC and plane suitable for the request format and the
+ * connector at the end of the pipeline. Restrict the search to primary
+ * planes for now.
+ */
+ for (const DRM::Encoder *encoder : connector_->encoders()) {
+ for (const DRM::Crtc *crtc : encoder->possibleCrtcs()) {
+ for (const DRM::Plane *plane : crtc->planes()) {
+ if (plane->type() != DRM::Plane::TypePrimary)
+ continue;
+
+ if (plane->supportsFormat(format)) {
+ crtc_ = crtc;
+ plane_ = plane;
+ format_ = format;
+ return 0;
+ }
+
+ if (plane->supportsFormat(xFormat)) {
+ crtc_ = crtc;
+ plane_ = plane;
+ format_ = xFormat;
+ return 0;
+ }
+ }
+ }
+ }
+
+ return -EPIPE;
+}
+
+int KMSSink::configurePipeline(const libcamera::PixelFormat &format)
+{
+ const int ret = selectPipeline(format);
+ if (ret) {
+ std::cerr
+ << "Unable to find display pipeline for format "
+ << format << std::endl;
+
+ return ret;
+ }
+
+ std::cout
+ << "Using KMS plane " << plane_->id() << ", CRTC " << crtc_->id()
+ << ", connector " << connector_->name()
+ << " (" << connector_->id() << "), mode " << mode_->hdisplay
+ << "x" << mode_->vdisplay << "@" << mode_->vrefresh << std::endl;
+
+ return 0;
+}
+
+int KMSSink::start()
+{
+ int ret = FrameSink::start();
+ if (ret < 0)
+ return ret;
+
+ /* Disable all CRTCs and planes to start from a known valid state. */
+ DRM::AtomicRequest request(&dev_);
+
+ for (const DRM::Crtc &crtc : dev_.crtcs())
+ request.addProperty(&crtc, "ACTIVE", 0);
+
+ for (const DRM::Plane &plane : dev_.planes()) {
+ request.addProperty(&plane, "CRTC_ID", 0);
+ request.addProperty(&plane, "FB_ID", 0);
+ }
+
+ ret = request.commit(DRM::AtomicRequest::FlagAllowModeset);
+ if (ret < 0) {
+ std::cerr
+ << "Failed to disable CRTCs and planes: "
+ << strerror(-ret) << std::endl;
+ return ret;
+ }
+
+ return 0;
+}
+
+int KMSSink::stop()
+{
+ /* Display pipeline. */
+ DRM::AtomicRequest request(&dev_);
+
+ request.addProperty(connector_, "CRTC_ID", 0);
+ request.addProperty(crtc_, "ACTIVE", 0);
+ request.addProperty(crtc_, "MODE_ID", 0);
+ request.addProperty(plane_, "CRTC_ID", 0);
+ request.addProperty(plane_, "FB_ID", 0);
+
+ int ret = request.commit(DRM::AtomicRequest::FlagAllowModeset);
+ if (ret < 0) {
+ std::cerr
+ << "Failed to stop display pipeline: "
+ << strerror(-ret) << std::endl;
+ return ret;
+ }
+
+ /* Free all buffers. */
+ pending_.reset();
+ queued_.reset();
+ active_.reset();
+ buffers_.clear();
+
+ return FrameSink::stop();
+}
+
+bool KMSSink::testModeSet(DRM::FrameBuffer *drmBuffer,
+ const libcamera::Rectangle &src,
+ const libcamera::Rectangle &dst)
+{
+ DRM::AtomicRequest drmRequest{ &dev_ };
+
+ drmRequest.addProperty(connector_, "CRTC_ID", crtc_->id());
+
+ drmRequest.addProperty(crtc_, "ACTIVE", 1);
+ drmRequest.addProperty(crtc_, "MODE_ID", mode_->toBlob(&dev_));
+
+ drmRequest.addProperty(plane_, "CRTC_ID", crtc_->id());
+ drmRequest.addProperty(plane_, "FB_ID", drmBuffer->id());
+ drmRequest.addProperty(plane_, "SRC_X", src.x << 16);
+ drmRequest.addProperty(plane_, "SRC_Y", src.y << 16);
+ drmRequest.addProperty(plane_, "SRC_W", src.width << 16);
+ drmRequest.addProperty(plane_, "SRC_H", src.height << 16);
+ drmRequest.addProperty(plane_, "CRTC_X", dst.x);
+ drmRequest.addProperty(plane_, "CRTC_Y", dst.y);
+ drmRequest.addProperty(plane_, "CRTC_W", dst.width);
+ drmRequest.addProperty(plane_, "CRTC_H", dst.height);
+
+ return !drmRequest.commit(DRM::AtomicRequest::FlagAllowModeset |
+ DRM::AtomicRequest::FlagTestOnly);
+}
+
+bool KMSSink::setupComposition(DRM::FrameBuffer *drmBuffer)
+{
+ /*
+ * Test composition options, from most to least desirable, to select the
+ * best one.
+ */
+ const libcamera::Rectangle framebuffer{ size_ };
+ const libcamera::Rectangle display{ 0, 0, mode_->hdisplay, mode_->vdisplay };
+
+ /* 1. Scale the frame buffer to full screen, preserving aspect ratio. */
+ libcamera::Rectangle src = framebuffer;
+ libcamera::Rectangle dst = display.size().boundedToAspectRatio(framebuffer.size())
+ .centeredTo(display.center());
+
+ if (testModeSet(drmBuffer, src, dst)) {
+ std::cout << "KMS: full-screen scaled output, square pixels"
+ << std::endl;
+ src_ = src;
+ dst_ = dst;
+ return true;
+ }
+
+ /*
+ * 2. Scale the frame buffer to full screen, without preserving aspect
+ * ratio.
+ */
+ src = framebuffer;
+ dst = display;
+
+ if (testModeSet(drmBuffer, src, dst)) {
+ std::cout << "KMS: full-screen scaled output, non-square pixels"
+ << std::endl;
+ src_ = src;
+ dst_ = dst;
+ return true;
+ }
+
+ /* 3. Center the frame buffer on the display. */
+ src = display.size().centeredTo(framebuffer.center()).boundedTo(framebuffer);
+ dst = framebuffer.size().centeredTo(display.center()).boundedTo(display);
+
+ if (testModeSet(drmBuffer, src, dst)) {
+ std::cout << "KMS: centered output" << std::endl;
+ src_ = src;
+ dst_ = dst;
+ return true;
+ }
+
+ /* 4. Align the frame buffer on the top-left of the display. */
+ src = framebuffer.boundedTo(display);
+ dst = display.boundedTo(framebuffer);
+
+ if (testModeSet(drmBuffer, src, dst)) {
+ std::cout << "KMS: top-left aligned output" << std::endl;
+ src_ = src;
+ dst_ = dst;
+ return true;
+ }
+
+ return false;
+}
+
+bool KMSSink::processRequest(libcamera::Request *camRequest)
+{
+ /*
+ * Perform a very crude rate adaptation by simply dropping the request
+ * if the display queue is full.
+ */
+ if (pending_)
+ return true;
+
+ libcamera::FrameBuffer *buffer = camRequest->buffers().begin()->second;
+ auto iter = buffers_.find(buffer);
+ if (iter == buffers_.end())
+ return true;
+
+ DRM::FrameBuffer *drmBuffer = iter->second.get();
+
+ unsigned int flags = DRM::AtomicRequest::FlagAsync;
+ std::unique_ptr<DRM::AtomicRequest> drmRequest =
+ std::make_unique<DRM::AtomicRequest>(&dev_);
+ drmRequest->addProperty(plane_, "FB_ID", drmBuffer->id());
+
+ if (!active_ && !queued_) {
+ /* Enable the display pipeline on the first frame. */
+ if (!setupComposition(drmBuffer)) {
+ std::cerr << "Failed to setup composition" << std::endl;
+ return true;
+ }
+
+ drmRequest->addProperty(connector_, "CRTC_ID", crtc_->id());
+
+ drmRequest->addProperty(crtc_, "ACTIVE", 1);
+ drmRequest->addProperty(crtc_, "MODE_ID", mode_->toBlob(&dev_));
+
+ drmRequest->addProperty(plane_, "CRTC_ID", crtc_->id());
+ drmRequest->addProperty(plane_, "SRC_X", src_.x << 16);
+ drmRequest->addProperty(plane_, "SRC_Y", src_.y << 16);
+ drmRequest->addProperty(plane_, "SRC_W", src_.width << 16);
+ drmRequest->addProperty(plane_, "SRC_H", src_.height << 16);
+ drmRequest->addProperty(plane_, "CRTC_X", dst_.x);
+ drmRequest->addProperty(plane_, "CRTC_Y", dst_.y);
+ drmRequest->addProperty(plane_, "CRTC_W", dst_.width);
+ drmRequest->addProperty(plane_, "CRTC_H", dst_.height);
+
+ if (colorEncoding_)
+ drmRequest->addProperty(plane_, "COLOR_ENCODING", *colorEncoding_);
+ if (colorRange_)
+ drmRequest->addProperty(plane_, "COLOR_RANGE", *colorRange_);
+
+ flags |= DRM::AtomicRequest::FlagAllowModeset;
+ }
+
+ pending_ = std::make_unique<Request>(std::move(drmRequest), camRequest);
+
+ std::lock_guard<std::mutex> lock(lock_);
+
+ if (!queued_) {
+ int ret = pending_->drmRequest_->commit(flags);
+ if (ret < 0) {
+ std::cerr
+ << "Failed to commit atomic request: "
+ << strerror(-ret) << std::endl;
+ /* \todo Implement error handling */
+ }
+
+ queued_ = std::move(pending_);
+ }
+
+ return false;
+}
+
+void KMSSink::requestComplete([[maybe_unused]] DRM::AtomicRequest *request)
+{
+ std::lock_guard<std::mutex> lock(lock_);
+
+ assert(queued_ && queued_->drmRequest_.get() == request);
+
+ /* Complete the active request, if any. */
+ if (active_)
+ requestProcessed.emit(active_->camRequest_);
+
+ /* The queued request becomes active. */
+ active_ = std::move(queued_);
+
+ /* Queue the pending request, if any. */
+ if (pending_) {
+ pending_->drmRequest_->commit(DRM::AtomicRequest::FlagAsync);
+ queued_ = std::move(pending_);
+ }
+}
diff --git a/src/apps/cam/kms_sink.h b/src/apps/cam/kms_sink.h
new file mode 100644
index 00000000..4b7b4c26
--- /dev/null
+++ b/src/apps/cam/kms_sink.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2021, Ideas on Board Oy
+ *
+ * KMS Sink
+ */
+
+#pragma once
+
+#include <list>
+#include <memory>
+#include <mutex>
+#include <optional>
+#include <string>
+#include <utility>
+
+#include <libcamera/base/signal.h>
+
+#include <libcamera/geometry.h>
+#include <libcamera/pixel_format.h>
+
+#include "drm.h"
+#include "frame_sink.h"
+
+class KMSSink : public FrameSink
+{
+public:
+ KMSSink(const std::string &connectorName);
+
+ void mapBuffer(libcamera::FrameBuffer *buffer) override;
+
+ int configure(const libcamera::CameraConfiguration &config) override;
+ int start() override;
+ int stop() override;
+
+ bool processRequest(libcamera::Request *request) override;
+
+private:
+ class Request
+ {
+ public:
+ Request(std::unique_ptr<DRM::AtomicRequest> drmRequest,
+ libcamera::Request *camRequest)
+ : drmRequest_(std::move(drmRequest)), camRequest_(camRequest)
+ {
+ }
+
+ std::unique_ptr<DRM::AtomicRequest> drmRequest_;
+ libcamera::Request *camRequest_;
+ };
+
+ int selectPipeline(const libcamera::PixelFormat &format);
+ int configurePipeline(const libcamera::PixelFormat &format);
+ bool testModeSet(DRM::FrameBuffer *drmBuffer,
+ const libcamera::Rectangle &src,
+ const libcamera::Rectangle &dst);
+ bool setupComposition(DRM::FrameBuffer *drmBuffer);
+
+ void requestComplete(DRM::AtomicRequest *request);
+
+ DRM::Device dev_;
+
+ const DRM::Connector *connector_;
+ const DRM::Crtc *crtc_;
+ const DRM::Plane *plane_;
+ const DRM::Mode *mode_;
+
+ libcamera::PixelFormat format_;
+ libcamera::Size size_;
+ unsigned int stride_;
+ std::optional<unsigned int> colorEncoding_;
+ std::optional<unsigned int> colorRange_;
+
+ libcamera::Rectangle src_;
+ libcamera::Rectangle dst_;
+
+ std::map<libcamera::FrameBuffer *, std::unique_ptr<DRM::FrameBuffer>> buffers_;
+
+ std::mutex lock_;
+ std::unique_ptr<Request> pending_;
+ std::unique_ptr<Request> queued_;
+ std::unique_ptr<Request> active_;
+};
diff --git a/src/apps/cam/main.cpp b/src/apps/cam/main.cpp
new file mode 100644
index 00000000..460dbc81
--- /dev/null
+++ b/src/apps/cam/main.cpp
@@ -0,0 +1,374 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * cam - The libcamera swiss army knife
+ */
+
+#include <atomic>
+#include <iomanip>
+#include <iostream>
+#include <signal.h>
+#include <string.h>
+
+#include <libcamera/libcamera.h>
+#include <libcamera/property_ids.h>
+
+#include "../common/event_loop.h"
+#include "../common/options.h"
+#include "../common/stream_options.h"
+
+#include "camera_session.h"
+#include "main.h"
+
+using namespace libcamera;
+
+class CamApp
+{
+public:
+ CamApp();
+
+ static CamApp *instance();
+
+ int init(int argc, char **argv);
+ void cleanup();
+
+ int exec();
+ void quit();
+
+private:
+ void cameraAdded(std::shared_ptr<Camera> cam);
+ void cameraRemoved(std::shared_ptr<Camera> cam);
+ void captureDone();
+ int parseOptions(int argc, char *argv[]);
+ int run();
+
+ static std::string cameraName(const Camera *camera);
+
+ static CamApp *app_;
+ OptionsParser::Options options_;
+
+ std::unique_ptr<CameraManager> cm_;
+
+ std::atomic_uint loopUsers_;
+ EventLoop loop_;
+};
+
+CamApp *CamApp::app_ = nullptr;
+
+CamApp::CamApp()
+ : loopUsers_(0)
+{
+ CamApp::app_ = this;
+}
+
+CamApp *CamApp::instance()
+{
+ return CamApp::app_;
+}
+
+int CamApp::init(int argc, char **argv)
+{
+ int ret;
+
+ ret = parseOptions(argc, argv);
+ if (ret < 0)
+ return ret;
+
+ cm_ = std::make_unique<CameraManager>();
+
+ ret = cm_->start();
+ if (ret) {
+ std::cout << "Failed to start camera manager: "
+ << strerror(-ret) << std::endl;
+ return ret;
+ }
+
+ return 0;
+}
+
+void CamApp::cleanup()
+{
+ cm_->stop();
+}
+
+int CamApp::exec()
+{
+ int ret;
+
+ ret = run();
+ cleanup();
+
+ return ret;
+}
+
+void CamApp::quit()
+{
+ loop_.exit();
+}
+
+int CamApp::parseOptions(int argc, char *argv[])
+{
+ StreamKeyValueParser streamKeyValue;
+
+ OptionsParser parser;
+ parser.addOption(OptCamera, OptionString,
+ "Specify which camera to operate on, by id or by index", "camera",
+ ArgumentRequired, "camera", true);
+ parser.addOption(OptHelp, OptionNone, "Display this help message",
+ "help");
+ parser.addOption(OptInfo, OptionNone,
+ "Display information about stream(s)", "info");
+ parser.addOption(OptList, OptionNone, "List all cameras", "list");
+ parser.addOption(OptListControls, OptionNone, "List cameras controls",
+ "list-controls");
+ parser.addOption(OptListProperties, OptionNone, "List cameras properties",
+ "list-properties");
+ parser.addOption(OptMonitor, OptionNone,
+ "Monitor for hotplug and unplug camera events",
+ "monitor");
+
+ /* Sub-options of OptCamera: */
+ parser.addOption(OptCapture, OptionInteger,
+ "Capture until interrupted by user or until <count> frames captured",
+ "capture", ArgumentOptional, "count", false,
+ OptCamera);
+
+ parser.addOption(OptOrientation, OptionString,
+ "Desired image orientation (rot0, rot180, mirror, flip)",
+ "orientation", ArgumentRequired, "orientation", false,
+ OptCamera);
+#ifdef HAVE_KMS
+ parser.addOption(OptDisplay, OptionString,
+ "Display viewfinder through DRM/KMS on specified connector",
+ "display", ArgumentOptional, "connector", false,
+ OptCamera);
+#endif
+ parser.addOption(OptFile, OptionString,
+ "Write captured frames to disk\n"
+ "If the file name ends with a '/', it sets the directory in which\n"
+ "to write files, using the default file name. Otherwise it sets the\n"
+ "full file path and name. The first '#' character in the file name\n"
+ "is expanded to the camera index, stream name and frame sequence number.\n"
+#ifdef HAVE_TIFF
+ "If the file name ends with '.dng', then the frame will be written to\n"
+ "the output file(s) in DNG format.\n"
+#endif
+ "If the file name ends with '.ppm', then the frame will be written to\n"
+ "the output file(s) in PPM format.\n"
+ "The default file name is 'frame-#.bin'.",
+ "file", ArgumentOptional, "filename", false,
+ OptCamera);
+#ifdef HAVE_SDL
+ parser.addOption(OptSDL, OptionNone, "Display viewfinder through SDL",
+ "sdl", ArgumentNone, "", false, OptCamera);
+#endif
+ parser.addOption(OptStream, &streamKeyValue,
+ "Set configuration of a camera stream", "stream", true,
+ OptCamera);
+ parser.addOption(OptStrictFormats, OptionNone,
+ "Do not allow requested stream format(s) to be adjusted",
+ "strict-formats", ArgumentNone, nullptr, false,
+ OptCamera);
+ parser.addOption(OptMetadata, OptionNone,
+ "Print the metadata for completed requests",
+ "metadata", ArgumentNone, nullptr, false,
+ OptCamera);
+ parser.addOption(OptCaptureScript, OptionString,
+ "Load a capture session configuration script from a file",
+ "script", ArgumentRequired, "script", false,
+ OptCamera);
+
+ options_ = parser.parse(argc, argv);
+ if (!options_.valid())
+ return -EINVAL;
+
+ if (options_.empty() || options_.isSet(OptHelp)) {
+ parser.usage();
+ return options_.empty() ? -EINVAL : -EINTR;
+ }
+
+ return 0;
+}
+
+void CamApp::cameraAdded(std::shared_ptr<Camera> cam)
+{
+ std::cout << "Camera Added: " << cam->id() << std::endl;
+}
+
+void CamApp::cameraRemoved(std::shared_ptr<Camera> cam)
+{
+ std::cout << "Camera Removed: " << cam->id() << std::endl;
+}
+
+void CamApp::captureDone()
+{
+ if (--loopUsers_ == 0)
+ EventLoop::instance()->exit(0);
+}
+
+int CamApp::run()
+{
+ int ret;
+
+ /* 1. List all cameras. */
+ if (options_.isSet(OptList)) {
+ std::cout << "Available cameras:" << std::endl;
+
+ unsigned int index = 1;
+ for (const std::shared_ptr<Camera> &cam : cm_->cameras()) {
+ std::cout << index << ": " << cameraName(cam.get()) << std::endl;
+ index++;
+ }
+ }
+
+ /* 2. Create the camera sessions. */
+ std::vector<std::unique_ptr<CameraSession>> sessions;
+
+ if (options_.isSet(OptCamera)) {
+ unsigned int index = 0;
+
+ for (const OptionValue &camera : options_[OptCamera].toArray()) {
+ std::unique_ptr<CameraSession> session =
+ std::make_unique<CameraSession>(cm_.get(),
+ camera.toString(),
+ index,
+ camera.children());
+ if (!session->isValid()) {
+ std::cout << "Failed to create camera session" << std::endl;
+ return -EINVAL;
+ }
+
+ std::cout << "Using camera " << session->camera()->id()
+ << " as cam" << index << std::endl;
+
+ session->captureDone.connect(this, &CamApp::captureDone);
+
+ sessions.push_back(std::move(session));
+ index++;
+ }
+ }
+
+ /* 3. Print camera information. */
+ if (options_.isSet(OptListControls) ||
+ options_.isSet(OptListProperties) ||
+ options_.isSet(OptInfo)) {
+ for (const auto &session : sessions) {
+ if (options_.isSet(OptListControls))
+ session->listControls();
+ if (options_.isSet(OptListProperties))
+ session->listProperties();
+ if (options_.isSet(OptInfo))
+ session->infoConfiguration();
+ }
+ }
+
+ /* 4. Start capture. */
+ for (const auto &session : sessions) {
+ if (!session->options().isSet(OptCapture))
+ continue;
+
+ ret = session->start();
+ if (ret) {
+ std::cout << "Failed to start camera session" << std::endl;
+ return ret;
+ }
+
+ loopUsers_++;
+ }
+
+ /* 5. Enable hotplug monitoring. */
+ if (options_.isSet(OptMonitor)) {
+ std::cout << "Monitoring new hotplug and unplug events" << std::endl;
+ std::cout << "Press Ctrl-C to interrupt" << std::endl;
+
+ cm_->cameraAdded.connect(this, &CamApp::cameraAdded);
+ cm_->cameraRemoved.connect(this, &CamApp::cameraRemoved);
+
+ loopUsers_++;
+ }
+
+ if (loopUsers_)
+ loop_.exec();
+
+ /* 6. Stop capture. */
+ for (const auto &session : sessions) {
+ if (!session->options().isSet(OptCapture))
+ continue;
+
+ session->stop();
+ }
+
+ return 0;
+}
+
+std::string CamApp::cameraName(const Camera *camera)
+{
+ const ControlList &props = camera->properties();
+ bool addModel = true;
+ std::string name;
+
+ /*
+ * Construct the name from the camera location, model and ID. The model
+ * is only used if the location isn't present or is set to External.
+ */
+ const auto &location = props.get(properties::Location);
+ if (location) {
+ switch (*location) {
+ case properties::CameraLocationFront:
+ addModel = false;
+ name = "Internal front camera ";
+ break;
+ case properties::CameraLocationBack:
+ addModel = false;
+ name = "Internal back camera ";
+ break;
+ case properties::CameraLocationExternal:
+ name = "External camera ";
+ break;
+ }
+ }
+
+ if (addModel) {
+ /*
+ * If the camera location is not availble use the camera model
+ * to build the camera name.
+ */
+ const auto &model = props.get(properties::Model);
+ if (model)
+ name = "'" + *model + "' ";
+ }
+
+ name += "(" + camera->id() + ")";
+
+ return name;
+}
+
+namespace {
+
+void signalHandler([[maybe_unused]] int signal)
+{
+ std::cout << "Exiting" << std::endl;
+ CamApp::instance()->quit();
+}
+
+} /* namespace */
+
+int main(int argc, char **argv)
+{
+ CamApp app;
+ int ret;
+
+ ret = app.init(argc, argv);
+ if (ret)
+ return ret == -EINTR ? 0 : EXIT_FAILURE;
+
+ struct sigaction sa = {};
+ sa.sa_handler = &signalHandler;
+ sigaction(SIGINT, &sa, nullptr);
+
+ if (app.exec())
+ return EXIT_FAILURE;
+
+ return 0;
+}
diff --git a/src/apps/cam/main.h b/src/apps/cam/main.h
new file mode 100644
index 00000000..64e6a20e
--- /dev/null
+++ b/src/apps/cam/main.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Cam application
+ */
+
+#pragma once
+
+enum {
+ OptCamera = 'c',
+ OptCapture = 'C',
+ OptDisplay = 'D',
+ OptFile = 'F',
+ OptHelp = 'h',
+ OptInfo = 'I',
+ OptList = 'l',
+ OptListProperties = 'p',
+ OptMonitor = 'm',
+ OptOrientation = 'o',
+ OptSDL = 'S',
+ OptStream = 's',
+ OptListControls = 256,
+ OptStrictFormats = 257,
+ OptMetadata = 258,
+ OptCaptureScript = 259,
+};
diff --git a/src/apps/cam/meson.build b/src/apps/cam/meson.build
new file mode 100644
index 00000000..c70ca3cd
--- /dev/null
+++ b/src/apps/cam/meson.build
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: CC0-1.0
+
+if opt_cam.disabled() or not libevent.found()
+ cam_enabled = false
+ subdir_done()
+endif
+
+cam_enabled = true
+
+cam_sources = files([
+ 'camera_session.cpp',
+ 'capture_script.cpp',
+ 'file_sink.cpp',
+ 'frame_sink.cpp',
+ 'main.cpp',
+])
+
+cam_cpp_args = [apps_cpp_args]
+
+libdrm = dependency('libdrm', required : false)
+libjpeg = dependency('libjpeg', required : false)
+libsdl2 = dependency('SDL2', required : false)
+
+if libdrm.found()
+ cam_cpp_args += [ '-DHAVE_KMS' ]
+ cam_sources += files([
+ 'drm.cpp',
+ 'kms_sink.cpp'
+ ])
+endif
+
+if libsdl2.found()
+ cam_cpp_args += ['-DHAVE_SDL']
+ cam_sources += files([
+ 'sdl_sink.cpp',
+ 'sdl_texture.cpp',
+ 'sdl_texture_yuv.cpp',
+ ])
+
+ if libjpeg.found()
+ cam_cpp_args += ['-DHAVE_LIBJPEG']
+ cam_sources += files([
+ 'sdl_texture_mjpg.cpp'
+ ])
+ endif
+endif
+
+cam = executable('cam', cam_sources,
+ link_with : apps_lib,
+ dependencies : [
+ libatomic,
+ libcamera_public,
+ libdrm,
+ libevent,
+ libjpeg,
+ libsdl2,
+ libtiff,
+ libyaml,
+ ],
+ cpp_args : cam_cpp_args,
+ install : true,
+ install_tag : 'bin')
diff --git a/src/apps/cam/sdl_sink.cpp b/src/apps/cam/sdl_sink.cpp
new file mode 100644
index 00000000..8355dd5e
--- /dev/null
+++ b/src/apps/cam/sdl_sink.cpp
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Ideas on Board Oy
+ *
+ * SDL Sink
+ */
+
+#include "sdl_sink.h"
+
+#include <assert.h>
+#include <fcntl.h>
+#include <iomanip>
+#include <iostream>
+#include <signal.h>
+#include <sstream>
+#include <string.h>
+#include <unistd.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/formats.h>
+
+#include "../common/event_loop.h"
+#include "../common/image.h"
+
+#ifdef HAVE_LIBJPEG
+#include "sdl_texture_mjpg.h"
+#endif
+#include "sdl_texture_yuv.h"
+
+using namespace libcamera;
+
+using namespace std::chrono_literals;
+
+SDLSink::SDLSink()
+ : window_(nullptr), renderer_(nullptr), rect_({}),
+ init_(false)
+{
+}
+
+SDLSink::~SDLSink()
+{
+ stop();
+}
+
+int SDLSink::configure(const libcamera::CameraConfiguration &config)
+{
+ int ret = FrameSink::configure(config);
+ if (ret < 0)
+ return ret;
+
+ if (config.size() > 1) {
+ std::cerr
+ << "SDL sink only supports one camera stream at present, streaming first camera stream"
+ << std::endl;
+ } else if (config.empty()) {
+ std::cerr << "Require at least one camera stream to process"
+ << std::endl;
+ return -EINVAL;
+ }
+
+ const libcamera::StreamConfiguration &cfg = config.at(0);
+ rect_.w = cfg.size.width;
+ rect_.h = cfg.size.height;
+
+ switch (cfg.pixelFormat) {
+#ifdef HAVE_LIBJPEG
+ case libcamera::formats::MJPEG:
+ texture_ = std::make_unique<SDLTextureMJPG>(rect_);
+ break;
+#endif
+#if SDL_VERSION_ATLEAST(2, 0, 16)
+ case libcamera::formats::NV12:
+ texture_ = std::make_unique<SDLTextureNV12>(rect_, cfg.stride);
+ break;
+#endif
+ case libcamera::formats::YUYV:
+ texture_ = std::make_unique<SDLTextureYUYV>(rect_, cfg.stride);
+ break;
+ default:
+ std::cerr << "Unsupported pixel format "
+ << cfg.pixelFormat.toString() << std::endl;
+ return -EINVAL;
+ };
+
+ return 0;
+}
+
+int SDLSink::start()
+{
+ int ret = SDL_Init(SDL_INIT_VIDEO);
+ if (ret) {
+ std::cerr << "Failed to initialize SDL: " << SDL_GetError()
+ << std::endl;
+ return ret;
+ }
+
+ init_ = true;
+ window_ = SDL_CreateWindow("", SDL_WINDOWPOS_UNDEFINED,
+ SDL_WINDOWPOS_UNDEFINED, rect_.w,
+ rect_.h,
+ SDL_WINDOW_SHOWN | SDL_WINDOW_RESIZABLE);
+ if (!window_) {
+ std::cerr << "Failed to create SDL window: " << SDL_GetError()
+ << std::endl;
+ return -EINVAL;
+ }
+
+ renderer_ = SDL_CreateRenderer(window_, -1, 0);
+ if (!renderer_) {
+ std::cerr << "Failed to create SDL renderer: " << SDL_GetError()
+ << std::endl;
+ return -EINVAL;
+ }
+
+ /*
+ * Set for scaling purposes, not critical, don't return in case of
+ * error.
+ */
+ ret = SDL_RenderSetLogicalSize(renderer_, rect_.w, rect_.h);
+ if (ret)
+ std::cerr << "Failed to set SDL render logical size: "
+ << SDL_GetError() << std::endl;
+
+ ret = texture_->create(renderer_);
+ if (ret) {
+ return ret;
+ }
+
+ /* \todo Make the event cancellable to support stop/start cycles. */
+ EventLoop::instance()->addTimerEvent(
+ 10ms, std::bind(&SDLSink::processSDLEvents, this));
+
+ return 0;
+}
+
+int SDLSink::stop()
+{
+ texture_.reset();
+
+ if (renderer_) {
+ SDL_DestroyRenderer(renderer_);
+ renderer_ = nullptr;
+ }
+
+ if (window_) {
+ SDL_DestroyWindow(window_);
+ window_ = nullptr;
+ }
+
+ if (init_) {
+ SDL_Quit();
+ init_ = false;
+ }
+
+ return FrameSink::stop();
+}
+
+void SDLSink::mapBuffer(FrameBuffer *buffer)
+{
+ std::unique_ptr<Image> image =
+ Image::fromFrameBuffer(buffer, Image::MapMode::ReadOnly);
+ assert(image != nullptr);
+
+ mappedBuffers_[buffer] = std::move(image);
+}
+
+bool SDLSink::processRequest(Request *request)
+{
+ for (auto [stream, buffer] : request->buffers()) {
+ renderBuffer(buffer);
+ break; /* to be expanded to launch SDL window per buffer */
+ }
+
+ return true;
+}
+
+/*
+ * Process SDL events, required for things like window resize and quit button
+ */
+void SDLSink::processSDLEvents()
+{
+ for (SDL_Event e; SDL_PollEvent(&e);) {
+ if (e.type == SDL_QUIT) {
+ /* Click close icon then quit */
+ EventLoop::instance()->exit(0);
+ }
+ }
+}
+
+void SDLSink::renderBuffer(FrameBuffer *buffer)
+{
+ Image *image = mappedBuffers_[buffer].get();
+
+ std::vector<Span<const uint8_t>> planes;
+ unsigned int i = 0;
+
+ planes.reserve(buffer->metadata().planes().size());
+
+ for (const FrameMetadata::Plane &meta : buffer->metadata().planes()) {
+ Span<uint8_t> data = image->data(i);
+ if (meta.bytesused > data.size())
+ std::cerr << "payload size " << meta.bytesused
+ << " larger than plane size " << data.size()
+ << std::endl;
+
+ planes.push_back(data);
+ i++;
+ }
+
+ texture_->update(planes);
+
+ SDL_RenderClear(renderer_);
+ SDL_RenderCopy(renderer_, texture_->get(), nullptr, nullptr);
+ SDL_RenderPresent(renderer_);
+}
diff --git a/src/apps/cam/sdl_sink.h b/src/apps/cam/sdl_sink.h
new file mode 100644
index 00000000..18ec7fbe
--- /dev/null
+++ b/src/apps/cam/sdl_sink.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Ideas on Board Oy
+ *
+ * SDL Sink
+ */
+
+#pragma once
+
+#include <map>
+#include <memory>
+
+#include <libcamera/stream.h>
+
+#include <SDL2/SDL.h>
+
+#include "frame_sink.h"
+
+class Image;
+class SDLTexture;
+
+class SDLSink : public FrameSink
+{
+public:
+ SDLSink();
+ ~SDLSink();
+
+ int configure(const libcamera::CameraConfiguration &config) override;
+ int start() override;
+ int stop() override;
+ void mapBuffer(libcamera::FrameBuffer *buffer) override;
+
+ bool processRequest(libcamera::Request *request) override;
+
+private:
+ void renderBuffer(libcamera::FrameBuffer *buffer);
+ void processSDLEvents();
+
+ std::map<libcamera::FrameBuffer *, std::unique_ptr<Image>>
+ mappedBuffers_;
+
+ std::unique_ptr<SDLTexture> texture_;
+
+ SDL_Window *window_;
+ SDL_Renderer *renderer_;
+ SDL_Rect rect_;
+ bool init_;
+};
diff --git a/src/apps/cam/sdl_texture.cpp b/src/apps/cam/sdl_texture.cpp
new file mode 100644
index 00000000..e52c4a3a
--- /dev/null
+++ b/src/apps/cam/sdl_texture.cpp
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Ideas on Board Oy
+ *
+ * SDL Texture
+ */
+
+#include "sdl_texture.h"
+
+#include <iostream>
+
+SDLTexture::SDLTexture(const SDL_Rect &rect, uint32_t pixelFormat,
+ const int stride)
+ : ptr_(nullptr), rect_(rect), pixelFormat_(pixelFormat), stride_(stride)
+{
+}
+
+SDLTexture::~SDLTexture()
+{
+ if (ptr_)
+ SDL_DestroyTexture(ptr_);
+}
+
+int SDLTexture::create(SDL_Renderer *renderer)
+{
+ ptr_ = SDL_CreateTexture(renderer, pixelFormat_,
+ SDL_TEXTUREACCESS_STREAMING, rect_.w,
+ rect_.h);
+ if (!ptr_) {
+ std::cerr << "Failed to create SDL texture: " << SDL_GetError()
+ << std::endl;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
diff --git a/src/apps/cam/sdl_texture.h b/src/apps/cam/sdl_texture.h
new file mode 100644
index 00000000..990f83b6
--- /dev/null
+++ b/src/apps/cam/sdl_texture.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Ideas on Board Oy
+ *
+ * SDL Texture
+ */
+
+#pragma once
+
+#include <vector>
+
+#include <SDL2/SDL.h>
+
+#include "../common/image.h"
+
+class SDLTexture
+{
+public:
+ SDLTexture(const SDL_Rect &rect, uint32_t pixelFormat, const int stride);
+ virtual ~SDLTexture();
+ int create(SDL_Renderer *renderer);
+ virtual void update(const std::vector<libcamera::Span<const uint8_t>> &data) = 0;
+ SDL_Texture *get() const { return ptr_; }
+
+protected:
+ SDL_Texture *ptr_;
+ const SDL_Rect rect_;
+ const uint32_t pixelFormat_;
+ const int stride_;
+};
diff --git a/src/apps/cam/sdl_texture_mjpg.cpp b/src/apps/cam/sdl_texture_mjpg.cpp
new file mode 100644
index 00000000..cace18fc
--- /dev/null
+++ b/src/apps/cam/sdl_texture_mjpg.cpp
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Ideas on Board Oy
+ *
+ * SDL Texture MJPG
+ */
+
+#include "sdl_texture_mjpg.h"
+
+#include <iostream>
+#include <setjmp.h>
+#include <stdio.h>
+
+#include <jpeglib.h>
+
+using namespace libcamera;
+
+struct JpegErrorManager : public jpeg_error_mgr {
+ JpegErrorManager()
+ {
+ jpeg_std_error(this);
+ error_exit = errorExit;
+ output_message = outputMessage;
+ }
+
+ static void errorExit(j_common_ptr cinfo)
+ {
+ JpegErrorManager *self =
+ static_cast<JpegErrorManager *>(cinfo->err);
+ longjmp(self->escape_, 1);
+ }
+
+ static void outputMessage([[maybe_unused]] j_common_ptr cinfo)
+ {
+ }
+
+ jmp_buf escape_;
+};
+
+SDLTextureMJPG::SDLTextureMJPG(const SDL_Rect &rect)
+ : SDLTexture(rect, SDL_PIXELFORMAT_RGB24, rect.w * 3),
+ rgb_(std::make_unique<unsigned char[]>(stride_ * rect.h))
+{
+}
+
+int SDLTextureMJPG::decompress(Span<const uint8_t> data)
+{
+ struct jpeg_decompress_struct cinfo;
+
+ JpegErrorManager errorManager;
+ if (setjmp(errorManager.escape_)) {
+ /* libjpeg found an error */
+ jpeg_destroy_decompress(&cinfo);
+ std::cerr << "JPEG decompression error" << std::endl;
+ return -EINVAL;
+ }
+
+ cinfo.err = &errorManager;
+ jpeg_create_decompress(&cinfo);
+
+ jpeg_mem_src(&cinfo, data.data(), data.size());
+
+ jpeg_read_header(&cinfo, TRUE);
+
+ jpeg_start_decompress(&cinfo);
+
+ for (int i = 0; cinfo.output_scanline < cinfo.output_height; ++i) {
+ JSAMPROW rowptr = rgb_.get() + i * stride_;
+ jpeg_read_scanlines(&cinfo, &rowptr, 1);
+ }
+
+ jpeg_finish_decompress(&cinfo);
+
+ jpeg_destroy_decompress(&cinfo);
+
+ return 0;
+}
+
+void SDLTextureMJPG::update(const std::vector<libcamera::Span<const uint8_t>> &data)
+{
+ decompress(data[0]);
+ SDL_UpdateTexture(ptr_, nullptr, rgb_.get(), stride_);
+}
diff --git a/src/apps/cam/sdl_texture_mjpg.h b/src/apps/cam/sdl_texture_mjpg.h
new file mode 100644
index 00000000..37bed5f0
--- /dev/null
+++ b/src/apps/cam/sdl_texture_mjpg.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Ideas on Board Oy
+ *
+ * SDL Texture MJPG
+ */
+
+#pragma once
+
+#include "sdl_texture.h"
+
+class SDLTextureMJPG : public SDLTexture
+{
+public:
+ SDLTextureMJPG(const SDL_Rect &rect);
+
+ void update(const std::vector<libcamera::Span<const uint8_t>> &data) override;
+
+private:
+ int decompress(libcamera::Span<const uint8_t> data);
+
+ std::unique_ptr<unsigned char[]> rgb_;
+};
diff --git a/src/apps/cam/sdl_texture_yuv.cpp b/src/apps/cam/sdl_texture_yuv.cpp
new file mode 100644
index 00000000..480d7a37
--- /dev/null
+++ b/src/apps/cam/sdl_texture_yuv.cpp
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Ideas on Board Oy
+ *
+ * SDL YUV Textures
+ */
+
+#include "sdl_texture_yuv.h"
+
+using namespace libcamera;
+
+#if SDL_VERSION_ATLEAST(2, 0, 16)
+SDLTextureNV12::SDLTextureNV12(const SDL_Rect &rect, unsigned int stride)
+ : SDLTexture(rect, SDL_PIXELFORMAT_NV12, stride)
+{
+}
+
+void SDLTextureNV12::update(const std::vector<libcamera::Span<const uint8_t>> &data)
+{
+ SDL_UpdateNVTexture(ptr_, &rect_, data[0].data(), stride_,
+ data[1].data(), stride_);
+}
+#endif
+
+SDLTextureYUYV::SDLTextureYUYV(const SDL_Rect &rect, unsigned int stride)
+ : SDLTexture(rect, SDL_PIXELFORMAT_YUY2, stride)
+{
+}
+
+void SDLTextureYUYV::update(const std::vector<libcamera::Span<const uint8_t>> &data)
+{
+ SDL_UpdateTexture(ptr_, &rect_, data[0].data(), stride_);
+}
diff --git a/src/apps/cam/sdl_texture_yuv.h b/src/apps/cam/sdl_texture_yuv.h
new file mode 100644
index 00000000..29c756e7
--- /dev/null
+++ b/src/apps/cam/sdl_texture_yuv.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Ideas on Board Oy
+ *
+ * SDL YUV Textures
+ */
+
+#pragma once
+
+#include "sdl_texture.h"
+
+#if SDL_VERSION_ATLEAST(2, 0, 16)
+class SDLTextureNV12 : public SDLTexture
+{
+public:
+ SDLTextureNV12(const SDL_Rect &rect, unsigned int stride);
+ void update(const std::vector<libcamera::Span<const uint8_t>> &data) override;
+};
+#endif
+
+class SDLTextureYUYV : public SDLTexture
+{
+public:
+ SDLTextureYUYV(const SDL_Rect &rect, unsigned int stride);
+ void update(const std::vector<libcamera::Span<const uint8_t>> &data) override;
+};
diff --git a/src/apps/common/dng_writer.cpp b/src/apps/common/dng_writer.cpp
new file mode 100644
index 00000000..ac461951
--- /dev/null
+++ b/src/apps/common/dng_writer.cpp
@@ -0,0 +1,809 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * DNG writer
+ */
+
+#include "dng_writer.h"
+
+#include <algorithm>
+#include <endian.h>
+#include <iostream>
+#include <map>
+#include <vector>
+
+#include <tiffio.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/formats.h>
+#include <libcamera/property_ids.h>
+
+using namespace libcamera;
+
+enum CFAPatternColour : uint8_t {
+ CFAPatternRed = 0,
+ CFAPatternGreen = 1,
+ CFAPatternBlue = 2,
+};
+
+struct FormatInfo {
+ uint8_t bitsPerSample;
+ CFAPatternColour pattern[4];
+ void (*packScanline)(void *output, const void *input,
+ unsigned int width);
+ void (*thumbScanline)(const FormatInfo &info, void *output,
+ const void *input, unsigned int width,
+ unsigned int stride);
+};
+
+struct Matrix3d {
+ Matrix3d()
+ {
+ }
+
+ Matrix3d(float m0, float m1, float m2,
+ float m3, float m4, float m5,
+ float m6, float m7, float m8)
+ {
+ m[0] = m0, m[1] = m1, m[2] = m2;
+ m[3] = m3, m[4] = m4, m[5] = m5;
+ m[6] = m6, m[7] = m7, m[8] = m8;
+ }
+
+ Matrix3d(const Span<const float> &span)
+ : Matrix3d(span[0], span[1], span[2],
+ span[3], span[4], span[5],
+ span[6], span[7], span[8])
+ {
+ }
+
+ static Matrix3d diag(float diag0, float diag1, float diag2)
+ {
+ return Matrix3d(diag0, 0, 0, 0, diag1, 0, 0, 0, diag2);
+ }
+
+ static Matrix3d identity()
+ {
+ return Matrix3d(1, 0, 0, 0, 1, 0, 0, 0, 1);
+ }
+
+ Matrix3d transpose() const
+ {
+ return { m[0], m[3], m[6], m[1], m[4], m[7], m[2], m[5], m[8] };
+ }
+
+ Matrix3d cofactors() const
+ {
+ return { m[4] * m[8] - m[5] * m[7],
+ -(m[3] * m[8] - m[5] * m[6]),
+ m[3] * m[7] - m[4] * m[6],
+ -(m[1] * m[8] - m[2] * m[7]),
+ m[0] * m[8] - m[2] * m[6],
+ -(m[0] * m[7] - m[1] * m[6]),
+ m[1] * m[5] - m[2] * m[4],
+ -(m[0] * m[5] - m[2] * m[3]),
+ m[0] * m[4] - m[1] * m[3] };
+ }
+
+ Matrix3d adjugate() const
+ {
+ return cofactors().transpose();
+ }
+
+ float determinant() const
+ {
+ return m[0] * (m[4] * m[8] - m[5] * m[7]) -
+ m[1] * (m[3] * m[8] - m[5] * m[6]) +
+ m[2] * (m[3] * m[7] - m[4] * m[6]);
+ }
+
+ Matrix3d inverse() const
+ {
+ return adjugate() * (1.0 / determinant());
+ }
+
+ Matrix3d operator*(const Matrix3d &other) const
+ {
+ Matrix3d result;
+ for (unsigned int i = 0; i < 3; i++) {
+ for (unsigned int j = 0; j < 3; j++) {
+ result.m[i * 3 + j] =
+ m[i * 3 + 0] * other.m[0 + j] +
+ m[i * 3 + 1] * other.m[3 + j] +
+ m[i * 3 + 2] * other.m[6 + j];
+ }
+ }
+ return result;
+ }
+
+ Matrix3d operator*(float f) const
+ {
+ Matrix3d result;
+ for (unsigned int i = 0; i < 9; i++)
+ result.m[i] = m[i] * f;
+ return result;
+ }
+
+ float m[9];
+};
+
+namespace {
+
+void packScanlineRaw8(void *output, const void *input, unsigned int width)
+{
+ const uint8_t *in = static_cast<const uint8_t *>(input);
+ uint8_t *out = static_cast<uint8_t *>(output);
+
+ std::copy(in, in + width, out);
+}
+
+void packScanlineRaw10(void *output, const void *input, unsigned int width)
+{
+ const uint8_t *in = static_cast<const uint8_t *>(input);
+ uint8_t *out = static_cast<uint8_t *>(output);
+
+ for (unsigned int i = 0; i < width; i += 4) {
+ *out++ = in[1] << 6 | in[0] >> 2;
+ *out++ = in[0] << 6 | (in[3] & 0x03) << 4 | in[2] >> 4;
+ *out++ = in[2] << 4 | (in[5] & 0x03) << 2 | in[4] >> 6;
+ *out++ = in[4] << 2 | (in[7] & 0x03) << 0;
+ *out++ = in[6];
+ in += 8;
+ }
+}
+
+void packScanlineRaw12(void *output, const void *input, unsigned int width)
+{
+ const uint8_t *in = static_cast<const uint8_t *>(input);
+ uint8_t *out = static_cast<uint8_t *>(output);
+
+ for (unsigned int i = 0; i < width; i += 2) {
+ *out++ = in[1] << 4 | in[0] >> 4;
+ *out++ = in[0] << 4 | (in[3] & 0x0f);
+ *out++ = in[2];
+ in += 4;
+ }
+}
+
+void packScanlineRaw16(void *output, const void *input, unsigned int width)
+{
+ const uint16_t *in = static_cast<const uint16_t *>(input);
+ uint16_t *out = static_cast<uint16_t *>(output);
+
+ std::copy(in, in + width, out);
+}
+
+/* Thumbnail function for raw data with each pixel aligned to 16bit. */
+void thumbScanlineRaw(const FormatInfo &info, void *output, const void *input,
+ unsigned int width, unsigned int stride)
+{
+ const uint16_t *in = static_cast<const uint16_t *>(input);
+ const uint16_t *in2 = static_cast<const uint16_t *>(input) + stride / 2;
+ uint8_t *out = static_cast<uint8_t *>(output);
+
+ /* Shift down to 8. */
+ unsigned int shift = info.bitsPerSample - 8;
+
+ /* Simple averaging that produces greyscale RGB values. */
+ for (unsigned int x = 0; x < width; x++) {
+ uint16_t value = (le16toh(in[0]) + le16toh(in[1]) +
+ le16toh(in2[0]) + le16toh(in2[1])) >> 2;
+ value = value >> shift;
+ *out++ = value;
+ *out++ = value;
+ *out++ = value;
+ in += 16;
+ in2 += 16;
+ }
+}
+
+void packScanlineRaw10_CSI2P(void *output, const void *input, unsigned int width)
+{
+ const uint8_t *in = static_cast<const uint8_t *>(input);
+ uint8_t *out = static_cast<uint8_t *>(output);
+
+ /* \todo Can this be made more efficient? */
+ for (unsigned int x = 0; x < width; x += 4) {
+ *out++ = in[0];
+ *out++ = (in[4] & 0x03) << 6 | in[1] >> 2;
+ *out++ = (in[1] & 0x03) << 6 | (in[4] & 0x0c) << 2 | in[2] >> 4;
+ *out++ = (in[2] & 0x0f) << 4 | (in[4] & 0x30) >> 2 | in[3] >> 6;
+ *out++ = (in[3] & 0x3f) << 2 | (in[4] & 0xc0) >> 6;
+ in += 5;
+ }
+}
+
+void packScanlineRaw12_CSI2P(void *output, const void *input, unsigned int width)
+{
+ const uint8_t *in = static_cast<const uint8_t *>(input);
+ uint8_t *out = static_cast<uint8_t *>(output);
+
+ /* \todo Can this be made more efficient? */
+ for (unsigned int i = 0; i < width; i += 2) {
+ *out++ = in[0];
+ *out++ = (in[2] & 0x0f) << 4 | in[1] >> 4;
+ *out++ = (in[1] & 0x0f) << 4 | in[2] >> 4;
+ in += 3;
+ }
+}
+
+void thumbScanlineRaw_CSI2P(const FormatInfo &info, void *output,
+ const void *input, unsigned int width,
+ unsigned int stride)
+{
+ const uint8_t *in = static_cast<const uint8_t *>(input);
+ uint8_t *out = static_cast<uint8_t *>(output);
+
+ /* Number of bytes corresponding to 16 pixels. */
+ unsigned int skip = info.bitsPerSample * 16 / 8;
+
+ for (unsigned int x = 0; x < width; x++) {
+ uint8_t value = (in[0] + in[1] + in[stride] + in[stride + 1]) >> 2;
+ *out++ = value;
+ *out++ = value;
+ *out++ = value;
+ in += skip;
+ }
+}
+
+void packScanlineIPU3(void *output, const void *input, unsigned int width)
+{
+ const uint8_t *in = static_cast<const uint8_t *>(input);
+ uint16_t *out = static_cast<uint16_t *>(output);
+
+ /*
+ * Upscale the 10-bit format to 16-bit as it's not trivial to pack it
+ * as 10-bit without gaps.
+ *
+ * \todo Improve packing to keep the 10-bit sample size.
+ */
+ unsigned int x = 0;
+ while (true) {
+ for (unsigned int i = 0; i < 6; i++) {
+ *out++ = (in[1] & 0x03) << 14 | (in[0] & 0xff) << 6;
+ if (++x >= width)
+ return;
+
+ *out++ = (in[2] & 0x0f) << 12 | (in[1] & 0xfc) << 4;
+ if (++x >= width)
+ return;
+
+ *out++ = (in[3] & 0x3f) << 10 | (in[2] & 0xf0) << 2;
+ if (++x >= width)
+ return;
+
+ *out++ = (in[4] & 0xff) << 8 | (in[3] & 0xc0) << 0;
+ if (++x >= width)
+ return;
+
+ in += 5;
+ }
+
+ *out++ = (in[1] & 0x03) << 14 | (in[0] & 0xff) << 6;
+ if (++x >= width)
+ return;
+
+ in += 2;
+ }
+}
+
+void thumbScanlineIPU3([[maybe_unused]] const FormatInfo &info, void *output,
+ const void *input, unsigned int width,
+ unsigned int stride)
+{
+ uint8_t *out = static_cast<uint8_t *>(output);
+
+ for (unsigned int x = 0; x < width; x++) {
+ unsigned int pixel = x * 16;
+ unsigned int block = pixel / 25;
+ unsigned int pixelInBlock = pixel - block * 25;
+
+ /*
+ * If the pixel is the last in the block cheat a little and
+ * move one pixel backward to avoid reading between two blocks
+ * and having to deal with the padding bits.
+ */
+ if (pixelInBlock == 24)
+ pixelInBlock--;
+
+ const uint8_t *in = static_cast<const uint8_t *>(input)
+ + block * 32 + (pixelInBlock / 4) * 5;
+
+ uint16_t val1, val2, val3, val4;
+ switch (pixelInBlock % 4) {
+ default:
+ case 0:
+ val1 = (in[1] & 0x03) << 14 | (in[0] & 0xff) << 6;
+ val2 = (in[2] & 0x0f) << 12 | (in[1] & 0xfc) << 4;
+ val3 = (in[stride + 1] & 0x03) << 14 | (in[stride + 0] & 0xff) << 6;
+ val4 = (in[stride + 2] & 0x0f) << 12 | (in[stride + 1] & 0xfc) << 4;
+ break;
+ case 1:
+ val1 = (in[2] & 0x0f) << 12 | (in[1] & 0xfc) << 4;
+ val2 = (in[3] & 0x3f) << 10 | (in[2] & 0xf0) << 2;
+ val3 = (in[stride + 2] & 0x0f) << 12 | (in[stride + 1] & 0xfc) << 4;
+ val4 = (in[stride + 3] & 0x3f) << 10 | (in[stride + 2] & 0xf0) << 2;
+ break;
+ case 2:
+ val1 = (in[3] & 0x3f) << 10 | (in[2] & 0xf0) << 2;
+ val2 = (in[4] & 0xff) << 8 | (in[3] & 0xc0) << 0;
+ val3 = (in[stride + 3] & 0x3f) << 10 | (in[stride + 2] & 0xf0) << 2;
+ val4 = (in[stride + 4] & 0xff) << 8 | (in[stride + 3] & 0xc0) << 0;
+ break;
+ case 3:
+ val1 = (in[4] & 0xff) << 8 | (in[3] & 0xc0) << 0;
+ val2 = (in[6] & 0x03) << 14 | (in[5] & 0xff) << 6;
+ val3 = (in[stride + 4] & 0xff) << 8 | (in[stride + 3] & 0xc0) << 0;
+ val4 = (in[stride + 6] & 0x03) << 14 | (in[stride + 5] & 0xff) << 6;
+ break;
+ }
+
+ uint8_t value = (val1 + val2 + val3 + val4) >> 10;
+ *out++ = value;
+ *out++ = value;
+ *out++ = value;
+ }
+}
+
+const std::map<PixelFormat, FormatInfo> formatInfo = {
+ { formats::SBGGR8, {
+ .bitsPerSample = 8,
+ .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
+ .packScanline = packScanlineRaw8,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
+ } },
+ { formats::SGBRG8, {
+ .bitsPerSample = 8,
+ .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
+ .packScanline = packScanlineRaw8,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
+ } },
+ { formats::SGRBG8, {
+ .bitsPerSample = 8,
+ .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
+ .packScanline = packScanlineRaw8,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
+ } },
+ { formats::SRGGB8, {
+ .bitsPerSample = 8,
+ .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
+ .packScanline = packScanlineRaw8,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
+ } },
+ { formats::SBGGR10, {
+ .bitsPerSample = 10,
+ .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
+ .packScanline = packScanlineRaw10,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SGBRG10, {
+ .bitsPerSample = 10,
+ .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
+ .packScanline = packScanlineRaw10,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SGRBG10, {
+ .bitsPerSample = 10,
+ .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
+ .packScanline = packScanlineRaw10,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SRGGB10, {
+ .bitsPerSample = 10,
+ .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
+ .packScanline = packScanlineRaw10,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SBGGR12, {
+ .bitsPerSample = 12,
+ .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
+ .packScanline = packScanlineRaw12,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SGBRG12, {
+ .bitsPerSample = 12,
+ .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
+ .packScanline = packScanlineRaw12,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SGRBG12, {
+ .bitsPerSample = 12,
+ .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
+ .packScanline = packScanlineRaw12,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SRGGB12, {
+ .bitsPerSample = 12,
+ .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
+ .packScanline = packScanlineRaw12,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SBGGR16, {
+ .bitsPerSample = 16,
+ .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
+ .packScanline = packScanlineRaw16,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SGBRG16, {
+ .bitsPerSample = 16,
+ .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
+ .packScanline = packScanlineRaw16,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SGRBG16, {
+ .bitsPerSample = 16,
+ .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
+ .packScanline = packScanlineRaw16,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SRGGB16, {
+ .bitsPerSample = 16,
+ .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
+ .packScanline = packScanlineRaw16,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SBGGR10_CSI2P, {
+ .bitsPerSample = 10,
+ .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
+ .packScanline = packScanlineRaw10_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
+ } },
+ { formats::SGBRG10_CSI2P, {
+ .bitsPerSample = 10,
+ .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
+ .packScanline = packScanlineRaw10_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
+ } },
+ { formats::SGRBG10_CSI2P, {
+ .bitsPerSample = 10,
+ .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
+ .packScanline = packScanlineRaw10_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
+ } },
+ { formats::SRGGB10_CSI2P, {
+ .bitsPerSample = 10,
+ .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
+ .packScanline = packScanlineRaw10_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
+ } },
+ { formats::SBGGR12_CSI2P, {
+ .bitsPerSample = 12,
+ .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
+ .packScanline = packScanlineRaw12_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
+ } },
+ { formats::SGBRG12_CSI2P, {
+ .bitsPerSample = 12,
+ .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
+ .packScanline = packScanlineRaw12_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
+ } },
+ { formats::SGRBG12_CSI2P, {
+ .bitsPerSample = 12,
+ .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
+ .packScanline = packScanlineRaw12_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
+ } },
+ { formats::SRGGB12_CSI2P, {
+ .bitsPerSample = 12,
+ .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
+ .packScanline = packScanlineRaw12_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
+ } },
+ { formats::SBGGR10_IPU3, {
+ .bitsPerSample = 16,
+ .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
+ .packScanline = packScanlineIPU3,
+ .thumbScanline = thumbScanlineIPU3,
+ } },
+ { formats::SGBRG10_IPU3, {
+ .bitsPerSample = 16,
+ .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
+ .packScanline = packScanlineIPU3,
+ .thumbScanline = thumbScanlineIPU3,
+ } },
+ { formats::SGRBG10_IPU3, {
+ .bitsPerSample = 16,
+ .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
+ .packScanline = packScanlineIPU3,
+ .thumbScanline = thumbScanlineIPU3,
+ } },
+ { formats::SRGGB10_IPU3, {
+ .bitsPerSample = 16,
+ .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
+ .packScanline = packScanlineIPU3,
+ .thumbScanline = thumbScanlineIPU3,
+ } },
+};
+
+} /* namespace */
+
+int DNGWriter::write(const char *filename, const Camera *camera,
+ const StreamConfiguration &config,
+ const ControlList &metadata,
+ [[maybe_unused]] const FrameBuffer *buffer,
+ const void *data)
+{
+ const ControlList &cameraProperties = camera->properties();
+
+ const auto it = formatInfo.find(config.pixelFormat);
+ if (it == formatInfo.cend()) {
+ std::cerr << "Unsupported pixel format" << std::endl;
+ return -EINVAL;
+ }
+ const FormatInfo *info = &it->second;
+
+ TIFF *tif = TIFFOpen(filename, "w");
+ if (!tif) {
+ std::cerr << "Failed to open tiff file" << std::endl;
+ return -EINVAL;
+ }
+
+ /*
+ * Scanline buffer, has to be large enough to store both a RAW scanline
+ * or a thumbnail scanline. The latter will always be much smaller than
+ * the former as we downscale by 16 in both directions.
+ */
+ std::vector<uint8_t> scanline((config.size.width * info->bitsPerSample + 7) / 8);
+
+ toff_t rawIFDOffset = 0;
+ toff_t exifIFDOffset = 0;
+
+ /*
+ * Start with a thumbnail in IFD 0 for compatibility with TIFF baseline
+ * readers, as required by the TIFF/EP specification. Tags that apply to
+ * the whole file are stored here.
+ */
+ const uint8_t version[] = { 1, 2, 0, 0 };
+
+ TIFFSetField(tif, TIFFTAG_DNGVERSION, version);
+ TIFFSetField(tif, TIFFTAG_DNGBACKWARDVERSION, version);
+ TIFFSetField(tif, TIFFTAG_FILLORDER, FILLORDER_MSB2LSB);
+ TIFFSetField(tif, TIFFTAG_MAKE, "libcamera");
+
+ const auto &model = cameraProperties.get(properties::Model);
+ if (model) {
+ TIFFSetField(tif, TIFFTAG_MODEL, model->c_str());
+ /* \todo set TIFFTAG_UNIQUECAMERAMODEL. */
+ }
+
+ TIFFSetField(tif, TIFFTAG_SOFTWARE, "qcam");
+ TIFFSetField(tif, TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT);
+
+ /*
+ * Thumbnail-specific tags. The thumbnail is stored as an RGB image
+ * with 1/16 of the raw image resolution. Greyscale would save space,
+ * but doesn't seem well supported by RawTherapee.
+ */
+ TIFFSetField(tif, TIFFTAG_SUBFILETYPE, FILETYPE_REDUCEDIMAGE);
+ TIFFSetField(tif, TIFFTAG_IMAGEWIDTH, config.size.width / 16);
+ TIFFSetField(tif, TIFFTAG_IMAGELENGTH, config.size.height / 16);
+ TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 8);
+ TIFFSetField(tif, TIFFTAG_COMPRESSION, COMPRESSION_NONE);
+ TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB);
+ TIFFSetField(tif, TIFFTAG_SAMPLESPERPIXEL, 3);
+ TIFFSetField(tif, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG);
+ TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_UINT);
+
+ /*
+ * Fill in some reasonable colour information in the DNG. We supply
+ * the "neutral" colour values which determine the white balance, and the
+ * "ColorMatrix1" which converts XYZ to (un-white-balanced) camera RGB.
+ * Note that this is not a "proper" colour calibration for the DNG,
+ * nonetheless, many tools should be able to render the colours better.
+ */
+ float neutral[3] = { 1, 1, 1 };
+ Matrix3d wbGain = Matrix3d::identity();
+ /* From http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html */
+ const Matrix3d rgb2xyz(0.4124564, 0.3575761, 0.1804375,
+ 0.2126729, 0.7151522, 0.0721750,
+ 0.0193339, 0.1191920, 0.9503041);
+ Matrix3d ccm = Matrix3d::identity();
+ /*
+ * Pick a reasonable number eps to protect against singularities. It
+ * should be comfortably larger than the point at which we run into
+ * numerical trouble, yet smaller than any plausible gain that we might
+ * apply to a colour, either explicitly or as part of the colour matrix.
+ */
+ const double eps = 1e-2;
+
+ const auto &colourGains = metadata.get(controls::ColourGains);
+ if (colourGains) {
+ if ((*colourGains)[0] > eps && (*colourGains)[1] > eps) {
+ wbGain = Matrix3d::diag((*colourGains)[0], 1, (*colourGains)[1]);
+ neutral[0] = 1.0 / (*colourGains)[0]; /* red */
+ neutral[2] = 1.0 / (*colourGains)[1]; /* blue */
+ }
+ }
+
+ const auto &ccmControl = metadata.get(controls::ColourCorrectionMatrix);
+ if (ccmControl) {
+ Matrix3d ccmSupplied(*ccmControl);
+ if (ccmSupplied.determinant() > eps)
+ ccm = ccmSupplied;
+ }
+
+ /*
+ * rgb2xyz is known to be invertible, and we've ensured above that both
+ * the ccm and wbGain matrices are non-singular, so the product of all
+ * three is guaranteed to be invertible too.
+ */
+ Matrix3d colorMatrix1 = (rgb2xyz * ccm * wbGain).inverse();
+
+ TIFFSetField(tif, TIFFTAG_COLORMATRIX1, 9, colorMatrix1.m);
+ TIFFSetField(tif, TIFFTAG_ASSHOTNEUTRAL, 3, neutral);
+
+ /*
+ * Reserve space for the SubIFD and ExifIFD tags, pointing to the IFD
+ * for the raw image and EXIF data respectively. The real offsets will
+ * be set later.
+ */
+ TIFFSetField(tif, TIFFTAG_SUBIFD, 1, &rawIFDOffset);
+ TIFFSetField(tif, TIFFTAG_EXIFIFD, exifIFDOffset);
+
+ /* Write the thumbnail. */
+ const uint8_t *row = static_cast<const uint8_t *>(data);
+ for (unsigned int y = 0; y < config.size.height / 16; y++) {
+ info->thumbScanline(*info, scanline.data(), row,
+ config.size.width / 16, config.stride);
+
+ if (TIFFWriteScanline(tif, scanline.data(), y, 0) != 1) {
+ std::cerr << "Failed to write thumbnail scanline"
+ << std::endl;
+ TIFFClose(tif);
+ return -EINVAL;
+ }
+
+ row += config.stride * 16;
+ }
+
+ TIFFWriteDirectory(tif);
+
+ /*
+ * Workaround for a bug introduced in libtiff version 4.5.1 and no fix
+ * released. In these versions the CFA* tags were missing in the field
+ * info.
+ * Introduced by: https://gitlab.com/libtiff/libtiff/-/commit/738e04099b13192bb1f654e74e9b5829313f3161
+ * Fixed by: https://gitlab.com/libtiff/libtiff/-/commit/49856998c3d82e65444b47bb4fb11b7830a0c2be
+ */
+ if (!TIFFFindField(tif, TIFFTAG_CFAREPEATPATTERNDIM, TIFF_ANY)) {
+ static const TIFFFieldInfo infos[] = {
+ { TIFFTAG_CFAREPEATPATTERNDIM, 2, 2, TIFF_SHORT, FIELD_CUSTOM,
+ 1, 0, const_cast<char *>("CFARepeatPatternDim") },
+ { TIFFTAG_CFAPATTERN, -1, -1, TIFF_BYTE, FIELD_CUSTOM,
+ 1, 1, const_cast<char *>("CFAPattern") },
+ };
+ TIFFMergeFieldInfo(tif, infos, 2);
+ }
+
+ /* Create a new IFD for the RAW image. */
+ const uint16_t cfaRepeatPatternDim[] = { 2, 2 };
+ const uint8_t cfaPlaneColor[] = {
+ CFAPatternRed,
+ CFAPatternGreen,
+ CFAPatternBlue
+ };
+
+ TIFFSetField(tif, TIFFTAG_SUBFILETYPE, 0);
+ TIFFSetField(tif, TIFFTAG_IMAGEWIDTH, config.size.width);
+ TIFFSetField(tif, TIFFTAG_IMAGELENGTH, config.size.height);
+ TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, info->bitsPerSample);
+ TIFFSetField(tif, TIFFTAG_COMPRESSION, COMPRESSION_NONE);
+ TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_CFA);
+ TIFFSetField(tif, TIFFTAG_SAMPLESPERPIXEL, 1);
+ TIFFSetField(tif, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG);
+ TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_UINT);
+ TIFFSetField(tif, TIFFTAG_CFAREPEATPATTERNDIM, cfaRepeatPatternDim);
+ if (TIFFLIB_VERSION < 20201219)
+ TIFFSetField(tif, TIFFTAG_CFAPATTERN, info->pattern);
+ else
+ TIFFSetField(tif, TIFFTAG_CFAPATTERN, 4, info->pattern);
+ TIFFSetField(tif, TIFFTAG_CFAPLANECOLOR, 3, cfaPlaneColor);
+ TIFFSetField(tif, TIFFTAG_CFALAYOUT, 1);
+
+ const uint16_t blackLevelRepeatDim[] = { 2, 2 };
+ float blackLevel[] = { 0.0f, 0.0f, 0.0f, 0.0f };
+ uint32_t whiteLevel = (1 << info->bitsPerSample) - 1;
+
+ const auto &blackLevels = metadata.get(controls::SensorBlackLevels);
+ if (blackLevels) {
+ Span<const int32_t, 4> levels = *blackLevels;
+
+ /*
+ * The black levels control is specified in R, Gr, Gb, B order.
+ * Map it to the TIFF tag that is specified in CFA pattern
+ * order.
+ */
+ unsigned int green = (info->pattern[0] == CFAPatternRed ||
+ info->pattern[1] == CFAPatternRed)
+ ? 0 : 1;
+
+ for (unsigned int i = 0; i < 4; ++i) {
+ unsigned int level;
+
+ switch (info->pattern[i]) {
+ case CFAPatternRed:
+ level = levels[0];
+ break;
+ case CFAPatternGreen:
+ level = levels[green + 1];
+ green = (green + 1) % 2;
+ break;
+ case CFAPatternBlue:
+ default:
+ level = levels[3];
+ break;
+ }
+
+ /* Map the 16-bit value to the bits per sample range. */
+ blackLevel[i] = level >> (16 - info->bitsPerSample);
+ }
+ }
+
+ TIFFSetField(tif, TIFFTAG_BLACKLEVELREPEATDIM, &blackLevelRepeatDim);
+ TIFFSetField(tif, TIFFTAG_BLACKLEVEL, 4, &blackLevel);
+ TIFFSetField(tif, TIFFTAG_WHITELEVEL, 1, &whiteLevel);
+
+ /* Write RAW content. */
+ row = static_cast<const uint8_t *>(data);
+ for (unsigned int y = 0; y < config.size.height; y++) {
+ info->packScanline(scanline.data(), row, config.size.width);
+
+ if (TIFFWriteScanline(tif, scanline.data(), y, 0) != 1) {
+ std::cerr << "Failed to write RAW scanline"
+ << std::endl;
+ TIFFClose(tif);
+ return -EINVAL;
+ }
+
+ row += config.stride;
+ }
+
+ /* Checkpoint the IFD to retrieve its offset, and write it out. */
+ TIFFCheckpointDirectory(tif);
+ rawIFDOffset = TIFFCurrentDirOffset(tif);
+ TIFFWriteDirectory(tif);
+
+ /* Create a new IFD for the EXIF data and fill it. */
+ TIFFCreateEXIFDirectory(tif);
+
+ /* Store creation time. */
+ time_t rawtime;
+ struct tm *timeinfo;
+ char strTime[20];
+
+ time(&rawtime);
+ timeinfo = localtime(&rawtime);
+ strftime(strTime, 20, "%Y:%m:%d %H:%M:%S", timeinfo);
+
+ /*
+ * \todo Handle timezone information by setting OffsetTimeOriginal and
+ * OffsetTimeDigitized once libtiff catches up to the specification and
+ * has EXIFTAG_ defines to handle them.
+ */
+ TIFFSetField(tif, EXIFTAG_DATETIMEORIGINAL, strTime);
+ TIFFSetField(tif, EXIFTAG_DATETIMEDIGITIZED, strTime);
+
+ const auto &analogGain = metadata.get(controls::AnalogueGain);
+ if (analogGain) {
+ uint16_t iso = std::min(std::max(*analogGain * 100, 0.0f), 65535.0f);
+ TIFFSetField(tif, EXIFTAG_ISOSPEEDRATINGS, 1, &iso);
+ }
+
+ const auto &exposureTime = metadata.get(controls::ExposureTime);
+ if (exposureTime)
+ TIFFSetField(tif, EXIFTAG_EXPOSURETIME, *exposureTime / 1e6);
+
+ TIFFWriteCustomDirectory(tif, &exifIFDOffset);
+
+ /* Update the IFD offsets and close the file. */
+ TIFFSetDirectory(tif, 0);
+ TIFFSetField(tif, TIFFTAG_SUBIFD, 1, &rawIFDOffset);
+ TIFFSetField(tif, TIFFTAG_EXIFIFD, exifIFDOffset);
+ TIFFWriteDirectory(tif);
+
+ TIFFClose(tif);
+
+ return 0;
+}
diff --git a/src/apps/common/dng_writer.h b/src/apps/common/dng_writer.h
new file mode 100644
index 00000000..aaa8a852
--- /dev/null
+++ b/src/apps/common/dng_writer.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * DNG writer
+ */
+
+#pragma once
+
+#ifdef HAVE_TIFF
+
+#include <libcamera/camera.h>
+#include <libcamera/controls.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/stream.h>
+
+class DNGWriter
+{
+public:
+ static int write(const char *filename, const libcamera::Camera *camera,
+ const libcamera::StreamConfiguration &config,
+ const libcamera::ControlList &metadata,
+ const libcamera::FrameBuffer *buffer, const void *data);
+};
+
+#endif /* HAVE_TIFF */
diff --git a/src/apps/common/event_loop.cpp b/src/apps/common/event_loop.cpp
new file mode 100644
index 00000000..f7f9afa0
--- /dev/null
+++ b/src/apps/common/event_loop.cpp
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * cam - Event loop
+ */
+
+#include "event_loop.h"
+
+#include <assert.h>
+#include <event2/event.h>
+#include <event2/thread.h>
+#include <iostream>
+
+EventLoop *EventLoop::instance_ = nullptr;
+
+EventLoop::EventLoop()
+{
+ assert(!instance_);
+
+ evthread_use_pthreads();
+ base_ = event_base_new();
+ instance_ = this;
+}
+
+EventLoop::~EventLoop()
+{
+ instance_ = nullptr;
+
+ events_.clear();
+ event_base_free(base_);
+ libevent_global_shutdown();
+}
+
+EventLoop *EventLoop::instance()
+{
+ return instance_;
+}
+
+int EventLoop::exec()
+{
+ exitCode_ = -1;
+ event_base_loop(base_, EVLOOP_NO_EXIT_ON_EMPTY);
+ return exitCode_;
+}
+
+void EventLoop::exit(int code)
+{
+ exitCode_ = code;
+ event_base_loopbreak(base_);
+}
+
+void EventLoop::callLater(const std::function<void()> &func)
+{
+ {
+ std::unique_lock<std::mutex> locker(lock_);
+ calls_.push_back(func);
+ }
+
+ event_base_once(base_, -1, EV_TIMEOUT, dispatchCallback, this, nullptr);
+}
+
+void EventLoop::addFdEvent(int fd, EventType type,
+ const std::function<void()> &callback)
+{
+ std::unique_ptr<Event> event = std::make_unique<Event>(callback);
+ short events = (type & Read ? EV_READ : 0)
+ | (type & Write ? EV_WRITE : 0)
+ | EV_PERSIST;
+
+ event->event_ = event_new(base_, fd, events, &EventLoop::Event::dispatch,
+ event.get());
+ if (!event->event_) {
+ std::cerr << "Failed to create event for fd " << fd << std::endl;
+ return;
+ }
+
+ int ret = event_add(event->event_, nullptr);
+ if (ret < 0) {
+ std::cerr << "Failed to add event for fd " << fd << std::endl;
+ return;
+ }
+
+ events_.push_back(std::move(event));
+}
+
+void EventLoop::addTimerEvent(const std::chrono::microseconds period,
+ const std::function<void()> &callback)
+{
+ std::unique_ptr<Event> event = std::make_unique<Event>(callback);
+ event->event_ = event_new(base_, -1, EV_PERSIST, &EventLoop::Event::dispatch,
+ event.get());
+ if (!event->event_) {
+ std::cerr << "Failed to create timer event" << std::endl;
+ return;
+ }
+
+ struct timeval tv;
+ tv.tv_sec = period.count() / 1000000ULL;
+ tv.tv_usec = period.count() % 1000000ULL;
+
+ int ret = event_add(event->event_, &tv);
+ if (ret < 0) {
+ std::cerr << "Failed to add timer event" << std::endl;
+ return;
+ }
+
+ events_.push_back(std::move(event));
+}
+
+void EventLoop::dispatchCallback([[maybe_unused]] evutil_socket_t fd,
+ [[maybe_unused]] short flags, void *param)
+{
+ EventLoop *loop = static_cast<EventLoop *>(param);
+ loop->dispatchCall();
+}
+
+void EventLoop::dispatchCall()
+{
+ std::function<void()> call;
+
+ {
+ std::unique_lock<std::mutex> locker(lock_);
+ if (calls_.empty())
+ return;
+
+ call = calls_.front();
+ calls_.pop_front();
+ }
+
+ call();
+}
+
+EventLoop::Event::Event(const std::function<void()> &callback)
+ : callback_(callback), event_(nullptr)
+{
+}
+
+EventLoop::Event::~Event()
+{
+ event_del(event_);
+ event_free(event_);
+}
+
+void EventLoop::Event::dispatch([[maybe_unused]] int fd,
+ [[maybe_unused]] short events, void *arg)
+{
+ Event *event = static_cast<Event *>(arg);
+ event->callback_();
+}
diff --git a/src/apps/common/event_loop.h b/src/apps/common/event_loop.h
new file mode 100644
index 00000000..ef129b9a
--- /dev/null
+++ b/src/apps/common/event_loop.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * cam - Event loop
+ */
+
+#pragma once
+
+#include <chrono>
+#include <functional>
+#include <list>
+#include <memory>
+#include <mutex>
+
+#include <event2/util.h>
+
+struct event_base;
+
+class EventLoop
+{
+public:
+ enum EventType {
+ Read = 1,
+ Write = 2,
+ };
+
+ EventLoop();
+ ~EventLoop();
+
+ static EventLoop *instance();
+
+ int exec();
+ void exit(int code = 0);
+
+ void callLater(const std::function<void()> &func);
+
+ void addFdEvent(int fd, EventType type,
+ const std::function<void()> &handler);
+
+ using duration = std::chrono::steady_clock::duration;
+ void addTimerEvent(const std::chrono::microseconds period,
+ const std::function<void()> &handler);
+
+private:
+ struct Event {
+ Event(const std::function<void()> &callback);
+ ~Event();
+
+ static void dispatch(int fd, short events, void *arg);
+
+ std::function<void()> callback_;
+ struct event *event_;
+ };
+
+ static EventLoop *instance_;
+
+ struct event_base *base_;
+ int exitCode_;
+
+ std::list<std::function<void()>> calls_;
+ std::list<std::unique_ptr<Event>> events_;
+ std::mutex lock_;
+
+ static void dispatchCallback(evutil_socket_t fd, short flags,
+ void *param);
+ void dispatchCall();
+};
diff --git a/src/apps/common/image.cpp b/src/apps/common/image.cpp
new file mode 100644
index 00000000..a2a0f58f
--- /dev/null
+++ b/src/apps/common/image.cpp
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Ideas on Board Oy
+ *
+ * Multi-planar image with access to pixel data
+ */
+
+#include "image.h"
+
+#include <assert.h>
+#include <errno.h>
+#include <iostream>
+#include <map>
+#include <string.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+using namespace libcamera;
+
+std::unique_ptr<Image> Image::fromFrameBuffer(const FrameBuffer *buffer, MapMode mode)
+{
+ std::unique_ptr<Image> image{ new Image() };
+
+ assert(!buffer->planes().empty());
+
+ int mmapFlags = 0;
+
+ if (mode & MapMode::ReadOnly)
+ mmapFlags |= PROT_READ;
+
+ if (mode & MapMode::WriteOnly)
+ mmapFlags |= PROT_WRITE;
+
+ struct MappedBufferInfo {
+ uint8_t *address = nullptr;
+ size_t mapLength = 0;
+ size_t dmabufLength = 0;
+ };
+ std::map<int, MappedBufferInfo> mappedBuffers;
+
+ for (const FrameBuffer::Plane &plane : buffer->planes()) {
+ const int fd = plane.fd.get();
+ if (mappedBuffers.find(fd) == mappedBuffers.end()) {
+ const size_t length = lseek(fd, 0, SEEK_END);
+ mappedBuffers[fd] = MappedBufferInfo{ nullptr, 0, length };
+ }
+
+ const size_t length = mappedBuffers[fd].dmabufLength;
+
+ if (plane.offset > length ||
+ plane.offset + plane.length > length) {
+ std::cerr << "plane is out of buffer: buffer length="
+ << length << ", plane offset=" << plane.offset
+ << ", plane length=" << plane.length
+ << std::endl;
+ return nullptr;
+ }
+ size_t &mapLength = mappedBuffers[fd].mapLength;
+ mapLength = std::max(mapLength,
+ static_cast<size_t>(plane.offset + plane.length));
+ }
+
+ for (const FrameBuffer::Plane &plane : buffer->planes()) {
+ const int fd = plane.fd.get();
+ auto &info = mappedBuffers[fd];
+ if (!info.address) {
+ void *address = mmap(nullptr, info.mapLength, mmapFlags,
+ MAP_SHARED, fd, 0);
+ if (address == MAP_FAILED) {
+ int error = -errno;
+ std::cerr << "Failed to mmap plane: "
+ << strerror(-error) << std::endl;
+ return nullptr;
+ }
+
+ info.address = static_cast<uint8_t *>(address);
+ image->maps_.emplace_back(info.address, info.mapLength);
+ }
+
+ image->planes_.emplace_back(info.address + plane.offset, plane.length);
+ }
+
+ return image;
+}
+
+Image::Image() = default;
+
+Image::~Image()
+{
+ for (Span<uint8_t> &map : maps_)
+ munmap(map.data(), map.size());
+}
+
+unsigned int Image::numPlanes() const
+{
+ return planes_.size();
+}
+
+Span<uint8_t> Image::data(unsigned int plane)
+{
+ assert(plane <= planes_.size());
+ return planes_[plane];
+}
+
+Span<const uint8_t> Image::data(unsigned int plane) const
+{
+ assert(plane <= planes_.size());
+ return planes_[plane];
+}
diff --git a/src/apps/common/image.h b/src/apps/common/image.h
new file mode 100644
index 00000000..e47e446b
--- /dev/null
+++ b/src/apps/common/image.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Ideas on Board Oy
+ *
+ * Multi-planar image with access to pixel data
+ */
+
+#pragma once
+
+#include <memory>
+#include <stdint.h>
+#include <vector>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/flags.h>
+#include <libcamera/base/span.h>
+
+#include <libcamera/framebuffer.h>
+
+class Image
+{
+public:
+ enum class MapMode {
+ ReadOnly = 1 << 0,
+ WriteOnly = 1 << 1,
+ ReadWrite = ReadOnly | WriteOnly,
+ };
+
+ static std::unique_ptr<Image> fromFrameBuffer(const libcamera::FrameBuffer *buffer,
+ MapMode mode);
+
+ ~Image();
+
+ unsigned int numPlanes() const;
+
+ libcamera::Span<uint8_t> data(unsigned int plane);
+ libcamera::Span<const uint8_t> data(unsigned int plane) const;
+
+private:
+ LIBCAMERA_DISABLE_COPY(Image)
+
+ Image();
+
+ std::vector<libcamera::Span<uint8_t>> maps_;
+ std::vector<libcamera::Span<uint8_t>> planes_;
+};
+
+namespace libcamera {
+LIBCAMERA_FLAGS_ENABLE_OPERATORS(Image::MapMode)
+}
diff --git a/src/apps/common/meson.build b/src/apps/common/meson.build
new file mode 100644
index 00000000..5b683390
--- /dev/null
+++ b/src/apps/common/meson.build
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: CC0-1.0
+
+apps_sources = files([
+ 'image.cpp',
+ 'options.cpp',
+ 'ppm_writer.cpp',
+ 'stream_options.cpp',
+])
+
+apps_cpp_args = []
+
+if libevent.found()
+ apps_sources += files([
+ 'event_loop.cpp',
+ ])
+endif
+
+if libtiff.found()
+ apps_cpp_args += ['-DHAVE_TIFF']
+ apps_sources += files([
+ 'dng_writer.cpp',
+ ])
+endif
+
+apps_lib = static_library('apps', apps_sources,
+ cpp_args : apps_cpp_args,
+ dependencies : [libcamera_public])
diff --git a/src/apps/common/options.cpp b/src/apps/common/options.cpp
new file mode 100644
index 00000000..ece268d0
--- /dev/null
+++ b/src/apps/common/options.cpp
@@ -0,0 +1,1143 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * cam - Options parsing
+ */
+
+#include <assert.h>
+#include <getopt.h>
+#include <iomanip>
+#include <iostream>
+#include <string.h>
+#include <vector>
+
+#include "options.h"
+
+/**
+ * \enum OptionArgument
+ * \brief Indicate if an option takes an argument
+ *
+ * \var OptionArgument::ArgumentNone
+ * \brief The option doesn't accept any argument
+ *
+ * \var OptionArgument::ArgumentRequired
+ * \brief The option requires an argument
+ *
+ * \var OptionArgument::ArgumentOptional
+ * \brief The option accepts an optional argument
+ */
+
+/**
+ * \enum OptionType
+ * \brief The type of argument for an option
+ *
+ * \var OptionType::OptionNone
+ * \brief No argument type, used for options that take no argument
+ *
+ * \var OptionType::OptionInteger
+ * \brief Integer argument type, with an optional base prefix (`0` for base 8,
+ * `0x` for base 16, none for base 10)
+ *
+ * \var OptionType::OptionString
+ * \brief String argument
+ *
+ * \var OptionType::OptionKeyValue
+ * \brief key=value list argument
+ */
+
+/* -----------------------------------------------------------------------------
+ * Option
+ */
+
+/**
+ * \struct Option
+ * \brief Store metadata about an option
+ *
+ * \var Option::opt
+ * \brief The option identifier
+ *
+ * \var Option::type
+ * \brief The type of the option argument
+ *
+ * \var Option::name
+ * \brief The option name
+ *
+ * \var Option::argument
+ * \brief Whether the option accepts an optional argument, a mandatory
+ * argument, or no argument at all
+ *
+ * \var Option::argumentName
+ * \brief The argument name used in the help text
+ *
+ * \var Option::help
+ * \brief The help text (may be a multi-line string)
+ *
+ * \var Option::keyValueParser
+ * \brief For options of type OptionType::OptionKeyValue, the key-value parser
+ * to parse the argument
+ *
+ * \var Option::isArray
+ * \brief Whether the option can appear once or multiple times
+ *
+ * \var Option::parent
+ * \brief The parent option
+ *
+ * \var Option::children
+ * \brief List of child options, storing all options whose parent is this option
+ *
+ * \fn Option::hasShortOption()
+ * \brief Tell if the option has a short option specifier (e.g. `-f`)
+ * \return True if the option has a short option specifier, false otherwise
+ *
+ * \fn Option::hasLongOption()
+ * \brief Tell if the option has a long option specifier (e.g. `--foo`)
+ * \return True if the option has a long option specifier, false otherwise
+ */
+struct Option {
+ int opt;
+ OptionType type;
+ const char *name;
+ OptionArgument argument;
+ const char *argumentName;
+ const char *help;
+ KeyValueParser *keyValueParser;
+ bool isArray;
+ Option *parent;
+ std::list<Option> children;
+
+ bool hasShortOption() const { return isalnum(opt); }
+ bool hasLongOption() const { return name != nullptr; }
+ const char *typeName() const;
+ std::string optionName() const;
+};
+
+/**
+ * \brief Retrieve a string describing the option type
+ * \return A string describing the option type
+ */
+const char *Option::typeName() const
+{
+ switch (type) {
+ case OptionNone:
+ return "none";
+
+ case OptionInteger:
+ return "integer";
+
+ case OptionString:
+ return "string";
+
+ case OptionKeyValue:
+ return "key=value";
+ }
+
+ return "unknown";
+}
+
+/**
+ * \brief Retrieve a string describing the option name, with leading dashes
+ * \return A string describing the option name, as a long option identifier
+ * (double dash) if the option has a name, or a short option identifier (single
+ * dash) otherwise
+ */
+std::string Option::optionName() const
+{
+ if (name)
+ return "--" + std::string(name);
+ else
+ return "-" + std::string(1, opt);
+}
+
+/* -----------------------------------------------------------------------------
+ * OptionBase<T>
+ */
+
+/**
+ * \class template<typename T> OptionBase
+ * \brief Container to store the values of parsed options
+ * \tparam T The type through which options are identified
+ *
+ * The OptionsBase class is generated by a parser (either OptionsParser or
+ * KeyValueParser) when parsing options. It stores values for all the options
+ * found, and exposes accessor functions to retrieve them. The options are
+ * accessed through an identifier to type \a T, which is an int referencing an
+ * Option::opt for OptionsParser, or a std::string referencing an Option::name
+ * for KeyValueParser.
+ */
+
+/**
+ * \fn OptionsBase::OptionsBase()
+ * \brief Construct an OptionsBase instance
+ *
+ * The constructed instance is initially invalid, and will be populated by the
+ * options parser.
+ */
+
+/**
+ * \brief Tell if the stored options list is empty
+ * \return True if the container is empty, false otherwise
+ */
+template<typename T>
+bool OptionsBase<T>::empty() const
+{
+ return values_.empty();
+}
+
+/**
+ * \brief Tell if the options parsing completed successfully
+ * \return True if the container is returned after successfully parsing
+ * options, false if it is returned after an error was detected during parsing
+ */
+template<typename T>
+bool OptionsBase<T>::valid() const
+{
+ return valid_;
+}
+
+/**
+ * \brief Tell if the option \a opt is specified
+ * \param[in] opt The option to search for
+ * \return True if the \a opt option is set, false otherwise
+ */
+template<typename T>
+bool OptionsBase<T>::isSet(const T &opt) const
+{
+ return values_.find(opt) != values_.end();
+}
+
+/**
+ * \brief Retrieve the value of option \a opt
+ * \param[in] opt The option to retrieve
+ * \return The value of option \a opt if found, an empty OptionValue otherwise
+ */
+template<typename T>
+const OptionValue &OptionsBase<T>::operator[](const T &opt) const
+{
+ static const OptionValue empty;
+
+ auto it = values_.find(opt);
+ if (it != values_.end())
+ return it->second;
+ return empty;
+}
+
+/**
+ * \brief Mark the container as invalid
+ *
+ * This function can be used in a key-value parser's override of the
+ * KeyValueParser::parse() function to mark the returned options as invalid if
+ * a validation error occurs.
+ */
+template<typename T>
+void OptionsBase<T>::invalidate()
+{
+ valid_ = false;
+}
+
+template<typename T>
+bool OptionsBase<T>::parseValue(const T &opt, const Option &option,
+ const char *arg)
+{
+ OptionValue value;
+
+ switch (option.type) {
+ case OptionNone:
+ break;
+
+ case OptionInteger:
+ unsigned int integer;
+
+ if (arg) {
+ char *endptr;
+ integer = strtoul(arg, &endptr, 0);
+ if (*endptr != '\0')
+ return false;
+ } else {
+ integer = 0;
+ }
+
+ value = OptionValue(integer);
+ break;
+
+ case OptionString:
+ value = OptionValue(arg ? arg : "");
+ break;
+
+ case OptionKeyValue:
+ KeyValueParser *kvParser = option.keyValueParser;
+ KeyValueParser::Options keyValues = kvParser->parse(arg);
+ if (!keyValues.valid())
+ return false;
+
+ value = OptionValue(keyValues);
+ break;
+ }
+
+ if (option.isArray)
+ values_[opt].addValue(value);
+ else
+ values_[opt] = value;
+
+ return true;
+}
+
+template class OptionsBase<int>;
+template class OptionsBase<std::string>;
+
+/* -----------------------------------------------------------------------------
+ * KeyValueParser
+ */
+
+/**
+ * \class KeyValueParser
+ * \brief A specialized parser for list of key-value pairs
+ *
+ * The KeyValueParser is an options parser for comma-separated lists of
+ * `key=value` pairs. The supported keys are added to the parser with
+ * addOption(). A given key can only appear once in the parsed list.
+ *
+ * Instances of this class can be passed to the OptionsParser::addOption()
+ * function to create options that take key-value pairs as an option argument.
+ * Specialized versions of the key-value parser can be created by inheriting
+ * from this class, to pre-build the options list in the constructor, and to add
+ * custom validation by overriding the parse() function.
+ */
+
+/**
+ * \class KeyValueParser::Options
+ * \brief An option list generated by the key-value parser
+ *
+ * This is a specialization of OptionsBase with the option reference type set to
+ * std::string.
+ */
+
+KeyValueParser::KeyValueParser() = default;
+KeyValueParser::~KeyValueParser() = default;
+
+/**
+ * \brief Add a supported option to the parser
+ * \param[in] name The option name, corresponding to the key name in the
+ * key=value pair. The name shall be unique.
+ * \param[in] type The type of the value in the key=value pair
+ * \param[in] help The help text
+ * \param[in] argument Whether the value is optional, mandatory or not allowed.
+ * Shall be ArgumentNone if \a type is OptionNone.
+ *
+ * \sa OptionsParser
+ *
+ * \return True if the option was added successfully, false if an error
+ * occurred.
+ */
+bool KeyValueParser::addOption(const char *name, OptionType type,
+ const char *help, OptionArgument argument)
+{
+ if (!name)
+ return false;
+ if (!help || help[0] == '\0')
+ return false;
+ if (argument != ArgumentNone && type == OptionNone)
+ return false;
+
+ /* Reject duplicate options. */
+ if (optionsMap_.find(name) != optionsMap_.end())
+ return false;
+
+ optionsMap_[name] = Option({ 0, type, name, argument, nullptr,
+ help, nullptr, false, nullptr, {} });
+ return true;
+}
+
+/**
+ * \brief Parse a string containing a list of key-value pairs
+ * \param[in] arguments The key-value pairs string to parse
+ *
+ * If a parsing error occurs, the parsing stops and the function returns an
+ * invalid container. The container is populated with the options successfully
+ * parsed so far.
+ *
+ * \return A valid container with the list of parsed options on success, or an
+ * invalid container otherwise
+ */
+KeyValueParser::Options KeyValueParser::parse(const char *arguments)
+{
+ Options options;
+
+ for (const char *pair = arguments; *arguments != '\0'; pair = arguments) {
+ const char *comma = strchrnul(arguments, ',');
+ size_t len = comma - pair;
+
+ /* Skip over the comma. */
+ arguments = *comma == ',' ? comma + 1 : comma;
+
+ /* Skip to the next pair if the pair is empty. */
+ if (!len)
+ continue;
+
+ std::string key;
+ std::string value;
+
+ const char *separator = static_cast<const char *>(memchr(pair, '=', len));
+ if (!separator) {
+ key = std::string(pair, len);
+ value = "";
+ } else {
+ key = std::string(pair, separator - pair);
+ value = std::string(separator + 1, comma - separator - 1);
+ }
+
+ /* The key is mandatory, the value might be optional. */
+ if (key.empty())
+ continue;
+
+ if (optionsMap_.find(key) == optionsMap_.end()) {
+ std::cerr << "Invalid option " << key << std::endl;
+ return options;
+ }
+
+ OptionArgument arg = optionsMap_[key].argument;
+ if (value.empty() && arg == ArgumentRequired) {
+ std::cerr << "Option " << key << " requires an argument"
+ << std::endl;
+ return options;
+ } else if (!value.empty() && arg == ArgumentNone) {
+ std::cerr << "Option " << key << " takes no argument"
+ << std::endl;
+ return options;
+ }
+
+ const Option &option = optionsMap_[key];
+ if (!options.parseValue(key, option, value.c_str())) {
+ std::cerr << "Failed to parse '" << value << "' as "
+ << option.typeName() << " for option " << key
+ << std::endl;
+ return options;
+ }
+ }
+
+ options.valid_ = true;
+ return options;
+}
+
+unsigned int KeyValueParser::maxOptionLength() const
+{
+ unsigned int maxLength = 0;
+
+ for (auto const &iter : optionsMap_) {
+ const Option &option = iter.second;
+ unsigned int length = 10 + strlen(option.name);
+ if (option.argument != ArgumentNone)
+ length += 1 + strlen(option.typeName());
+ if (option.argument == ArgumentOptional)
+ length += 2;
+
+ if (length > maxLength)
+ maxLength = length;
+ }
+
+ return maxLength;
+}
+
+void KeyValueParser::usage(int indent)
+{
+ for (auto const &iter : optionsMap_) {
+ const Option &option = iter.second;
+ std::string argument = std::string(" ") + option.name;
+
+ if (option.argument != ArgumentNone) {
+ if (option.argument == ArgumentOptional)
+ argument += "[=";
+ else
+ argument += "=";
+ argument += option.typeName();
+ if (option.argument == ArgumentOptional)
+ argument += "]";
+ }
+
+ std::cerr << std::setw(indent) << argument;
+
+ for (const char *help = option.help, *end = help; end;) {
+ end = strchr(help, '\n');
+ if (end) {
+ std::cerr << std::string(help, end - help + 1);
+ std::cerr << std::setw(indent) << " ";
+ help = end + 1;
+ } else {
+ std::cerr << help << std::endl;
+ }
+ }
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * OptionValue
+ */
+
+/**
+ * \class OptionValue
+ * \brief Container to store the value of an option
+ *
+ * The OptionValue class is a variant-type container to store the value of an
+ * option. It supports empty values, integers, strings, key-value lists, as well
+ * as arrays of those types. For array values, all array elements shall have the
+ * same type.
+ *
+ * OptionValue instances are organized in a tree-based structure that matches
+ * the parent-child relationship of the options added to the parser. Children
+ * are retrieved with the children() function, and are stored as an
+ * OptionsBase<int>.
+ */
+
+/**
+ * \enum OptionValue::ValueType
+ * \brief The option value type
+ *
+ * \var OptionValue::ValueType::ValueNone
+ * \brief Empty value
+ *
+ * \var OptionValue::ValueType::ValueInteger
+ * \brief Integer value (int)
+ *
+ * \var OptionValue::ValueType::ValueString
+ * \brief String value (std::string)
+ *
+ * \var OptionValue::ValueType::ValueKeyValue
+ * \brief Key-value list value (KeyValueParser::Options)
+ *
+ * \var OptionValue::ValueType::ValueArray
+ * \brief Array value
+ */
+
+/**
+ * \brief Construct an empty OptionValue instance
+ *
+ * The value type is set to ValueType::ValueNone.
+ */
+OptionValue::OptionValue()
+ : type_(ValueNone), integer_(0)
+{
+}
+
+/**
+ * \brief Construct an integer OptionValue instance
+ * \param[in] value The integer value
+ *
+ * The value type is set to ValueType::ValueInteger.
+ */
+OptionValue::OptionValue(int value)
+ : type_(ValueInteger), integer_(value)
+{
+}
+
+/**
+ * \brief Construct a string OptionValue instance
+ * \param[in] value The string value
+ *
+ * The value type is set to ValueType::ValueString.
+ */
+OptionValue::OptionValue(const char *value)
+ : type_(ValueString), integer_(0), string_(value)
+{
+}
+
+/**
+ * \brief Construct a string OptionValue instance
+ * \param[in] value The string value
+ *
+ * The value type is set to ValueType::ValueString.
+ */
+OptionValue::OptionValue(const std::string &value)
+ : type_(ValueString), integer_(0), string_(value)
+{
+}
+
+/**
+ * \brief Construct a key-value OptionValue instance
+ * \param[in] value The key-value list
+ *
+ * The value type is set to ValueType::ValueKeyValue.
+ */
+OptionValue::OptionValue(const KeyValueParser::Options &value)
+ : type_(ValueKeyValue), integer_(0), keyValues_(value)
+{
+}
+
+/**
+ * \brief Add an entry to an array value
+ * \param[in] value The entry value
+ *
+ * This function can only be called if the OptionValue type is
+ * ValueType::ValueNone or ValueType::ValueArray. Upon return, the type will be
+ * set to ValueType::ValueArray.
+ */
+void OptionValue::addValue(const OptionValue &value)
+{
+ assert(type_ == ValueNone || type_ == ValueArray);
+
+ type_ = ValueArray;
+ array_.push_back(value);
+}
+
+/**
+ * \fn OptionValue::type()
+ * \brief Retrieve the value type
+ * \return The value type
+ */
+
+/**
+ * \fn OptionValue::empty()
+ * \brief Check if the value is empty
+ * \return True if the value is empty (type set to ValueType::ValueNone), or
+ * false otherwise
+ */
+
+/**
+ * \brief Cast the value to an int
+ * \return The option value as an int, or 0 if the value type isn't
+ * ValueType::ValueInteger
+ */
+OptionValue::operator int() const
+{
+ return toInteger();
+}
+
+/**
+ * \brief Cast the value to a std::string
+ * \return The option value as an std::string, or an empty string if the value
+ * type isn't ValueType::ValueString
+ */
+OptionValue::operator std::string() const
+{
+ return toString();
+}
+
+/**
+ * \brief Retrieve the value as an int
+ * \return The option value as an int, or 0 if the value type isn't
+ * ValueType::ValueInteger
+ */
+int OptionValue::toInteger() const
+{
+ if (type_ != ValueInteger)
+ return 0;
+
+ return integer_;
+}
+
+/**
+ * \brief Retrieve the value as a std::string
+ * \return The option value as a std::string, or an empty string if the value
+ * type isn't ValueType::ValueString
+ */
+std::string OptionValue::toString() const
+{
+ if (type_ != ValueString)
+ return std::string();
+
+ return string_;
+}
+
+/**
+ * \brief Retrieve the value as a key-value list
+ *
+ * The behaviour is undefined if the value type isn't ValueType::ValueKeyValue.
+ *
+ * \return The option value as a KeyValueParser::Options
+ */
+const KeyValueParser::Options &OptionValue::toKeyValues() const
+{
+ assert(type_ == ValueKeyValue);
+ return keyValues_;
+}
+
+/**
+ * \brief Retrieve the value as an array
+ *
+ * The behaviour is undefined if the value type isn't ValueType::ValueArray.
+ *
+ * \return The option value as a std::vector of OptionValue
+ */
+const std::vector<OptionValue> &OptionValue::toArray() const
+{
+ assert(type_ == ValueArray);
+ return array_;
+}
+
+/**
+ * \brief Retrieve the list of child values
+ * \return The list of child values
+ */
+const OptionsParser::Options &OptionValue::children() const
+{
+ return children_;
+}
+
+/* -----------------------------------------------------------------------------
+ * OptionsParser
+ */
+
+/**
+ * \class OptionsParser
+ * \brief A command line options parser
+ *
+ * The OptionsParser class is an easy to use options parser for POSIX-style
+ * command line options. Supports short (e.g. `-f`) and long (e.g. `--foo`)
+ * options, optional and mandatory arguments, automatic parsing arguments for
+ * integer types and comma-separated list of key=value pairs, and multi-value
+ * arguments. It handles help text generation automatically.
+ *
+ * An OptionsParser instance is initialized by adding supported options with
+ * addOption(). Options are specified by an identifier and a name. If the
+ * identifier is an alphanumeric character, it will be used by the parser as a
+ * short option identifier (e.g. `-f`). The name, if specified, will be used as
+ * a long option identifier (e.g. `--foo`). It should not include the double
+ * dashes. The name is optional if the option identifier is an alphanumeric
+ * character and mandatory otherwise.
+ *
+ * An option has a mandatory help text, which is used to print the full options
+ * list with the usage() function. The help text may be a multi-line string.
+ * Correct indentation of the help text is handled automatically.
+ *
+ * Options accept arguments when created with OptionArgument::ArgumentRequired
+ * or OptionArgument::ArgumentOptional. If the argument is required, it can be
+ * specified as a positional argument after the option (e.g. `-f bar`,
+ * `--foo bar`), collated with the short option (e.g. `-fbar`) or separated from
+ * the long option by an equal sign (e.g. `--foo=bar`'). When the argument is
+ * optional, it must be collated with the short option or separated from the
+ * long option by an equal sign.
+ *
+ * If an option has a required or optional argument, an argument name must be
+ * set when adding the option. The argument name is used in the help text as a
+ * place holder for an argument value. For instance, a `--write` option that
+ * takes a file name as an argument could set the argument name to `filename`,
+ * and the help text would display `--write filename`. This is only used to
+ * clarify the help text and has no effect on option parsing.
+ *
+ * The option type tells the parser how to process the argument. Arguments for
+ * string options (OptionType::OptionString) are stored as-is without any
+ * processing. Arguments for integer options (OptionType::OptionInteger) are
+ * converted to an integer value, using an optional base prefix (`0` for base 8,
+ * `0x` for base 16, none for base 10). Arguments for key-value options are
+ * parsed by a KeyValueParser given to addOption().
+ *
+ * By default, a given option can appear once only in the parsed command line.
+ * If the option is created as an array option, the parser will accept multiple
+ * instances of the option. The order in which identical options are specified
+ * is preserved in the values of an array option.
+ *
+ * After preparing the parser, it can be used any number of times to parse
+ * command line options with the parse() function. The function returns an
+ * Options instance that stores the values for the parsed options. The
+ * Options::isSet() function can be used to test if an option has been found,
+ * and is the only way to access options that take no argument (specified by
+ * OptionType::OptionNone and OptionArgument::ArgumentNone). For options that
+ * accept an argument, the option value can be access by Options::operator[]()
+ * using the option identifier as the key. The order in which different options
+ * are specified on the command line isn't preserved.
+ *
+ * Options can be created with parent-child relationships to organize them as a
+ * tree instead of a flat list. When parsing a command line, the child options
+ * are considered related to the parent option that precedes them. This is
+ * useful when the parent is an array option. The Options values list generated
+ * by the parser then turns into a tree, which each parent value storing the
+ * values of child options that follow that instance of the parent option.
+ * For instance, with a `capture` option specified as a child of a `camera`
+ * array option, parsing the command line
+ *
+ * `--camera 1 --capture=10 --camera 2 --capture=20`
+ *
+ * will return an Options instance containing a single OptionValue instance of
+ * array type, for the `camera` option. The OptionValue will contain two
+ * entries, with the first entry containing the integer value 1 and the second
+ * entry the integer value 2. Each of those entries will in turn store an
+ * Options instance that contains the respective children. The first entry will
+ * store in its children a `capture` option of value 10, and the second entry a
+ * `capture` option of value 20.
+ *
+ * The command line
+ *
+ * `--capture=10 --camera 1`
+ *
+ * would result in a parsing error, as the `capture` option has no preceding
+ * `camera` option on the command line.
+ */
+
+/**
+ * \class OptionsParser::Options
+ * \brief An option list generated by the options parser
+ *
+ * This is a specialization of OptionsBase with the option reference type set to
+ * int.
+ */
+
+OptionsParser::OptionsParser() = default;
+OptionsParser::~OptionsParser() = default;
+
+/**
+ * \brief Add an option to the parser
+ * \param[in] opt The option identifier
+ * \param[in] type The type of the option argument
+ * \param[in] help The help text (may be a multi-line string)
+ * \param[in] name The option name
+ * \param[in] argument Whether the option accepts an optional argument, a
+ * mandatory argument, or no argument at all
+ * \param[in] argumentName The argument name used in the help text
+ * \param[in] array Whether the option can appear once or multiple times
+ * \param[in] parent The identifier of the parent option (optional)
+ *
+ * \return True if the option was added successfully, false if an error
+ * occurred.
+ */
+bool OptionsParser::addOption(int opt, OptionType type, const char *help,
+ const char *name, OptionArgument argument,
+ const char *argumentName, bool array, int parent)
+{
+ /*
+ * Options must have at least a short or long name, and a text message.
+ * If an argument is accepted, it must be described by argumentName.
+ */
+ if (!isalnum(opt) && !name)
+ return false;
+ if (!help || help[0] == '\0')
+ return false;
+ if (argument != ArgumentNone && !argumentName)
+ return false;
+
+ /* Reject duplicate options. */
+ if (optionsMap_.find(opt) != optionsMap_.end())
+ return false;
+
+ /*
+ * If a parent is specified, create the option as a child of its parent.
+ * Otherwise, create it in the parser's options list.
+ */
+ Option *option;
+
+ if (parent) {
+ auto iter = optionsMap_.find(parent);
+ if (iter == optionsMap_.end())
+ return false;
+
+ Option *parentOpt = iter->second;
+ parentOpt->children.push_back({
+ opt, type, name, argument, argumentName, help, nullptr,
+ array, parentOpt, {}
+ });
+ option = &parentOpt->children.back();
+ } else {
+ options_.push_back({ opt, type, name, argument, argumentName,
+ help, nullptr, array, nullptr, {} });
+ option = &options_.back();
+ }
+
+ optionsMap_[opt] = option;
+
+ return true;
+}
+
+/**
+ * \brief Add a key-value pair option to the parser
+ * \param[in] opt The option identifier
+ * \param[in] parser The KeyValueParser for the option value
+ * \param[in] help The help text (may be a multi-line string)
+ * \param[in] name The option name
+ * \param[in] array Whether the option can appear once or multiple times
+ *
+ * \sa Option
+ *
+ * \return True if the option was added successfully, false if an error
+ * occurred.
+ */
+bool OptionsParser::addOption(int opt, KeyValueParser *parser, const char *help,
+ const char *name, bool array, int parent)
+{
+ if (!addOption(opt, OptionKeyValue, help, name, ArgumentRequired,
+ "key=value[,key=value,...]", array, parent))
+ return false;
+
+ optionsMap_[opt]->keyValueParser = parser;
+ return true;
+}
+
+/**
+ * \brief Parse command line arguments
+ * \param[in] argc The number of arguments in the \a argv array
+ * \param[in] argv The array of arguments
+ *
+ * If a parsing error occurs, the parsing stops, the function prints an error
+ * message that identifies the invalid argument, prints usage information with
+ * usage(), and returns an invalid container. The container is populated with
+ * the options successfully parsed so far.
+ *
+ * \return A valid container with the list of parsed options on success, or an
+ * invalid container otherwise
+ */
+OptionsParser::Options OptionsParser::parse(int argc, char **argv)
+{
+ OptionsParser::Options options;
+
+ /*
+ * Allocate short and long options arrays large enough to contain all
+ * options.
+ */
+ std::vector<char> shortOptions(optionsMap_.size() * 3 + 2);
+ std::vector<struct option> longOptions(optionsMap_.size() + 1);
+ unsigned int ids = 0;
+ unsigned int idl = 0;
+
+ shortOptions[ids++] = ':';
+
+ for (const auto [opt, option] : optionsMap_) {
+ if (option->hasShortOption()) {
+ shortOptions[ids++] = opt;
+ if (option->argument != ArgumentNone)
+ shortOptions[ids++] = ':';
+ if (option->argument == ArgumentOptional)
+ shortOptions[ids++] = ':';
+ }
+
+ if (option->hasLongOption()) {
+ longOptions[idl].name = option->name;
+
+ switch (option->argument) {
+ case ArgumentNone:
+ longOptions[idl].has_arg = no_argument;
+ break;
+ case ArgumentRequired:
+ longOptions[idl].has_arg = required_argument;
+ break;
+ case ArgumentOptional:
+ longOptions[idl].has_arg = optional_argument;
+ break;
+ }
+
+ longOptions[idl].flag = 0;
+ longOptions[idl].val = option->opt;
+ idl++;
+ }
+ }
+
+ shortOptions[ids] = '\0';
+ memset(&longOptions[idl], 0, sizeof(longOptions[idl]));
+
+ opterr = 0;
+
+ while (true) {
+ int c = getopt_long(argc, argv, shortOptions.data(),
+ longOptions.data(), nullptr);
+
+ if (c == -1)
+ break;
+
+ if (c == '?' || c == ':') {
+ if (c == '?')
+ std::cerr << "Invalid option ";
+ else
+ std::cerr << "Missing argument for option ";
+ std::cerr << argv[optind - 1] << std::endl;
+
+ usage();
+ return options;
+ }
+
+ const Option &option = *optionsMap_[c];
+ if (!parseValue(option, optarg, &options)) {
+ usage();
+ return options;
+ }
+ }
+
+ if (optind < argc) {
+ std::cerr << "Invalid non-option argument '" << argv[optind]
+ << "'" << std::endl;
+ usage();
+ return options;
+ }
+
+ options.valid_ = true;
+ return options;
+}
+
+/**
+ * \brief Print usage text to std::cerr
+ *
+ * The usage text list all the supported option with their arguments. It is
+ * generated automatically from the options added to the parser. Caller of this
+ * function may print additional usage information for the application before
+ * the list of options.
+ */
+void OptionsParser::usage()
+{
+ unsigned int indent = 0;
+
+ for (const auto &opt : optionsMap_) {
+ const Option *option = opt.second;
+ unsigned int length = 14;
+ if (option->hasLongOption())
+ length += 2 + strlen(option->name);
+ if (option->argument != ArgumentNone)
+ length += 1 + strlen(option->argumentName);
+ if (option->argument == ArgumentOptional)
+ length += 2;
+ if (option->isArray)
+ length += 4;
+
+ if (length > indent)
+ indent = length;
+
+ if (option->keyValueParser) {
+ length = option->keyValueParser->maxOptionLength();
+ if (length > indent)
+ indent = length;
+ }
+ }
+
+ indent = (indent + 7) / 8 * 8;
+
+ std::cerr << "Options:" << std::endl;
+
+ std::ios_base::fmtflags f(std::cerr.flags());
+ std::cerr << std::left;
+
+ usageOptions(options_, indent);
+
+ std::cerr.flags(f);
+}
+
+void OptionsParser::usageOptions(const std::list<Option> &options,
+ unsigned int indent)
+{
+ std::vector<const Option *> parentOptions;
+
+ for (const Option &option : options) {
+ std::string argument;
+ if (option.hasShortOption())
+ argument = std::string(" -")
+ + static_cast<char>(option.opt);
+ else
+ argument = " ";
+
+ if (option.hasLongOption()) {
+ if (option.hasShortOption())
+ argument += ", ";
+ else
+ argument += " ";
+ argument += std::string("--") + option.name;
+ }
+
+ if (option.argument != ArgumentNone) {
+ if (option.argument == ArgumentOptional)
+ argument += "[=";
+ else
+ argument += " ";
+ argument += option.argumentName;
+ if (option.argument == ArgumentOptional)
+ argument += "]";
+ }
+
+ if (option.isArray)
+ argument += " ...";
+
+ std::cerr << std::setw(indent) << argument;
+
+ for (const char *help = option.help, *end = help; end; ) {
+ end = strchr(help, '\n');
+ if (end) {
+ std::cerr << std::string(help, end - help + 1);
+ std::cerr << std::setw(indent) << " ";
+ help = end + 1;
+ } else {
+ std::cerr << help << std::endl;
+ }
+ }
+
+ if (option.keyValueParser)
+ option.keyValueParser->usage(indent);
+
+ if (!option.children.empty())
+ parentOptions.push_back(&option);
+ }
+
+ if (parentOptions.empty())
+ return;
+
+ for (const Option *option : parentOptions) {
+ std::cerr << std::endl << "Options valid in the context of "
+ << option->optionName() << ":" << std::endl;
+ usageOptions(option->children, indent);
+ }
+}
+
+std::tuple<OptionsParser::Options *, const Option *>
+OptionsParser::childOption(const Option *parent, Options *options)
+{
+ /*
+ * The parent argument points to the parent of the leaf node Option,
+ * and the options argument to the root node of the Options tree. Use
+ * recursive calls to traverse the Option tree up to the root node while
+ * traversing the Options tree down to the leaf node:
+ */
+
+ /*
+ * - If we have no parent, we've reached the root node of the Option
+ * tree, the options argument is what we need.
+ */
+ if (!parent)
+ return { options, nullptr };
+
+ /*
+ * - If the parent has a parent, use recursion to move one level up the
+ * Option tree. This returns the Options corresponding to parent, or
+ * nullptr if a suitable Options child isn't found.
+ */
+ if (parent->parent) {
+ const Option *error;
+ std::tie(options, error) = childOption(parent->parent, options);
+
+ /* Propagate the error all the way back up the call stack. */
+ if (!error)
+ return { options, error };
+ }
+
+ /*
+ * - The parent has no parent, we're now one level down the root.
+ * Return the Options child corresponding to the parent. The child may
+ * not exist if options are specified in an incorrect order.
+ */
+ if (!options->isSet(parent->opt))
+ return { nullptr, parent };
+
+ /*
+ * If the child value is of array type, children are not stored in the
+ * value .children() list, but in the .children() of the value's array
+ * elements. Use the last array element in that case, as a child option
+ * relates to the last instance of its parent option.
+ */
+ const OptionValue *value = &(*options)[parent->opt];
+ if (value->type() == OptionValue::ValueArray)
+ value = &value->toArray().back();
+
+ return { const_cast<Options *>(&value->children()), nullptr };
+}
+
+bool OptionsParser::parseValue(const Option &option, const char *arg,
+ Options *options)
+{
+ const Option *error;
+
+ std::tie(options, error) = childOption(option.parent, options);
+ if (error) {
+ std::cerr << "Option " << option.optionName() << " requires a "
+ << error->optionName() << " context" << std::endl;
+ return false;
+ }
+
+ if (!options->parseValue(option.opt, option, arg)) {
+ std::cerr << "Can't parse " << option.typeName()
+ << " argument for option " << option.optionName()
+ << std::endl;
+ return false;
+ }
+
+ return true;
+}
diff --git a/src/apps/common/options.h b/src/apps/common/options.h
new file mode 100644
index 00000000..9771aa7a
--- /dev/null
+++ b/src/apps/common/options.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * cam - Options parsing
+ */
+
+#pragma once
+
+#include <ctype.h>
+#include <list>
+#include <map>
+#include <tuple>
+#include <vector>
+
+class KeyValueParser;
+class OptionValue;
+struct Option;
+
+enum OptionArgument {
+ ArgumentNone,
+ ArgumentRequired,
+ ArgumentOptional,
+};
+
+enum OptionType {
+ OptionNone,
+ OptionInteger,
+ OptionString,
+ OptionKeyValue,
+};
+
+template<typename T>
+class OptionsBase
+{
+public:
+ OptionsBase() : valid_(false) {}
+
+ bool empty() const;
+ bool valid() const;
+ bool isSet(const T &opt) const;
+ const OptionValue &operator[](const T &opt) const;
+
+ void invalidate();
+
+private:
+ friend class KeyValueParser;
+ friend class OptionsParser;
+
+ bool parseValue(const T &opt, const Option &option, const char *value);
+
+ std::map<T, OptionValue> values_;
+ bool valid_;
+};
+
+class KeyValueParser
+{
+public:
+ class Options : public OptionsBase<std::string>
+ {
+ };
+
+ KeyValueParser();
+ virtual ~KeyValueParser();
+
+ bool addOption(const char *name, OptionType type, const char *help,
+ OptionArgument argument = ArgumentNone);
+
+ virtual Options parse(const char *arguments);
+
+private:
+ KeyValueParser(const KeyValueParser &) = delete;
+ KeyValueParser &operator=(const KeyValueParser &) = delete;
+
+ friend class OptionsParser;
+ unsigned int maxOptionLength() const;
+ void usage(int indent);
+
+ std::map<std::string, Option> optionsMap_;
+};
+
+class OptionsParser
+{
+public:
+ class Options : public OptionsBase<int>
+ {
+ };
+
+ OptionsParser();
+ ~OptionsParser();
+
+ bool addOption(int opt, OptionType type, const char *help,
+ const char *name = nullptr,
+ OptionArgument argument = ArgumentNone,
+ const char *argumentName = nullptr, bool array = false,
+ int parent = 0);
+ bool addOption(int opt, KeyValueParser *parser, const char *help,
+ const char *name = nullptr, bool array = false,
+ int parent = 0);
+
+ Options parse(int argc, char *argv[]);
+ void usage();
+
+private:
+ OptionsParser(const OptionsParser &) = delete;
+ OptionsParser &operator=(const OptionsParser &) = delete;
+
+ void usageOptions(const std::list<Option> &options, unsigned int indent);
+
+ std::tuple<OptionsParser::Options *, const Option *>
+ childOption(const Option *parent, Options *options);
+ bool parseValue(const Option &option, const char *arg, Options *options);
+
+ std::list<Option> options_;
+ std::map<unsigned int, Option *> optionsMap_;
+};
+
+class OptionValue
+{
+public:
+ enum ValueType {
+ ValueNone,
+ ValueInteger,
+ ValueString,
+ ValueKeyValue,
+ ValueArray,
+ };
+
+ OptionValue();
+ OptionValue(int value);
+ OptionValue(const char *value);
+ OptionValue(const std::string &value);
+ OptionValue(const KeyValueParser::Options &value);
+
+ void addValue(const OptionValue &value);
+
+ ValueType type() const { return type_; }
+ bool empty() const { return type_ == ValueType::ValueNone; }
+
+ operator int() const;
+ operator std::string() const;
+
+ int toInteger() const;
+ std::string toString() const;
+ const KeyValueParser::Options &toKeyValues() const;
+ const std::vector<OptionValue> &toArray() const;
+
+ const OptionsParser::Options &children() const;
+
+private:
+ ValueType type_;
+ int integer_;
+ std::string string_;
+ KeyValueParser::Options keyValues_;
+ std::vector<OptionValue> array_;
+ OptionsParser::Options children_;
+};
diff --git a/src/apps/common/ppm_writer.cpp b/src/apps/common/ppm_writer.cpp
new file mode 100644
index 00000000..d6c8641d
--- /dev/null
+++ b/src/apps/common/ppm_writer.cpp
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Red Hat, Inc.
+ *
+ * PPM writer
+ */
+
+#include "ppm_writer.h"
+
+#include <fstream>
+#include <iostream>
+
+#include <libcamera/formats.h>
+#include <libcamera/pixel_format.h>
+
+using namespace libcamera;
+
+int PPMWriter::write(const char *filename,
+ const StreamConfiguration &config,
+ const Span<uint8_t> &data)
+{
+ if (config.pixelFormat != formats::BGR888) {
+ std::cerr << "Only BGR888 output pixel format is supported ("
+ << config.pixelFormat << " requested)" << std::endl;
+ return -EINVAL;
+ }
+
+ std::ofstream output(filename, std::ios::binary);
+ if (!output) {
+ std::cerr << "Failed to open ppm file: " << filename << std::endl;
+ return -EINVAL;
+ }
+
+ output << "P6" << std::endl
+ << config.size.width << " " << config.size.height << std::endl
+ << "255" << std::endl;
+ if (!output) {
+ std::cerr << "Failed to write the file header" << std::endl;
+ return -EINVAL;
+ }
+
+ const unsigned int rowLength = config.size.width * 3;
+ const char *row = reinterpret_cast<const char *>(data.data());
+ for (unsigned int y = 0; y < config.size.height; y++, row += config.stride) {
+ output.write(row, rowLength);
+ if (!output) {
+ std::cerr << "Failed to write image data at row " << y << std::endl;
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/src/apps/common/ppm_writer.h b/src/apps/common/ppm_writer.h
new file mode 100644
index 00000000..8c8d2e15
--- /dev/null
+++ b/src/apps/common/ppm_writer.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat, Inc.
+ *
+ * PPM writer
+ */
+
+#pragma once
+
+#include <libcamera/base/span.h>
+
+#include <libcamera/stream.h>
+
+class PPMWriter
+{
+public:
+ static int write(const char *filename,
+ const libcamera::StreamConfiguration &config,
+ const libcamera::Span<uint8_t> &data);
+};
diff --git a/src/apps/common/stream_options.cpp b/src/apps/common/stream_options.cpp
new file mode 100644
index 00000000..99239e07
--- /dev/null
+++ b/src/apps/common/stream_options.cpp
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper to parse options for streams
+ */
+#include "stream_options.h"
+
+#include <iostream>
+
+#include <libcamera/color_space.h>
+
+using namespace libcamera;
+
+StreamKeyValueParser::StreamKeyValueParser()
+{
+ addOption("role", OptionString,
+ "Role for the stream (viewfinder, video, still, raw)",
+ ArgumentRequired);
+ addOption("width", OptionInteger, "Width in pixels",
+ ArgumentRequired);
+ addOption("height", OptionInteger, "Height in pixels",
+ ArgumentRequired);
+ addOption("pixelformat", OptionString, "Pixel format name",
+ ArgumentRequired);
+ addOption("colorspace", OptionString, "Color space",
+ ArgumentRequired);
+}
+
+KeyValueParser::Options StreamKeyValueParser::parse(const char *arguments)
+{
+ KeyValueParser::Options options = KeyValueParser::parse(arguments);
+
+ if (options.valid() && options.isSet("role") && !parseRole(options)) {
+ std::cerr << "Unknown stream role "
+ << options["role"].toString() << std::endl;
+ options.invalidate();
+ }
+
+ return options;
+}
+
+std::vector<StreamRole> StreamKeyValueParser::roles(const OptionValue &values)
+{
+ /* If no configuration values to examine default to viewfinder. */
+ if (values.empty())
+ return { StreamRole::Viewfinder };
+
+ const std::vector<OptionValue> &streamParameters = values.toArray();
+
+ std::vector<StreamRole> roles;
+ for (auto const &value : streamParameters) {
+ /* If a role is invalid default it to viewfinder. */
+ roles.push_back(parseRole(value.toKeyValues()).value_or(StreamRole::Viewfinder));
+ }
+
+ return roles;
+}
+
+int StreamKeyValueParser::updateConfiguration(CameraConfiguration *config,
+ const OptionValue &values)
+{
+ if (!config) {
+ std::cerr << "No configuration provided" << std::endl;
+ return -EINVAL;
+ }
+
+ /* If no configuration values nothing to do. */
+ if (values.empty())
+ return 0;
+
+ const std::vector<OptionValue> &streamParameters = values.toArray();
+
+ if (config->size() != streamParameters.size()) {
+ std::cerr
+ << "Number of streams in configuration "
+ << config->size()
+ << " does not match number of streams parsed "
+ << streamParameters.size()
+ << std::endl;
+ return -EINVAL;
+ }
+
+ unsigned int i = 0;
+ for (auto const &value : streamParameters) {
+ KeyValueParser::Options opts = value.toKeyValues();
+ StreamConfiguration &cfg = config->at(i++);
+
+ if (opts.isSet("width") && opts.isSet("height")) {
+ cfg.size.width = opts["width"];
+ cfg.size.height = opts["height"];
+ }
+
+ if (opts.isSet("pixelformat"))
+ cfg.pixelFormat = PixelFormat::fromString(opts["pixelformat"].toString());
+
+ if (opts.isSet("colorspace"))
+ cfg.colorSpace = ColorSpace::fromString(opts["colorspace"].toString());
+ }
+
+ return 0;
+}
+
+std::optional<libcamera::StreamRole> StreamKeyValueParser::parseRole(const KeyValueParser::Options &options)
+{
+ if (!options.isSet("role"))
+ return {};
+
+ std::string name = options["role"].toString();
+
+ if (name == "viewfinder")
+ return StreamRole::Viewfinder;
+ else if (name == "video")
+ return StreamRole::VideoRecording;
+ else if (name == "still")
+ return StreamRole::StillCapture;
+ else if (name == "raw")
+ return StreamRole::Raw;
+
+ return {};
+}
diff --git a/src/apps/common/stream_options.h b/src/apps/common/stream_options.h
new file mode 100644
index 00000000..a93f104c
--- /dev/null
+++ b/src/apps/common/stream_options.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper to parse options for streams
+ */
+
+#pragma once
+
+#include <optional>
+
+#include <libcamera/camera.h>
+
+#include "options.h"
+
+class StreamKeyValueParser : public KeyValueParser
+{
+public:
+ StreamKeyValueParser();
+
+ KeyValueParser::Options parse(const char *arguments) override;
+
+ static std::vector<libcamera::StreamRole> roles(const OptionValue &values);
+ static int updateConfiguration(libcamera::CameraConfiguration *config,
+ const OptionValue &values);
+
+private:
+ static std::optional<libcamera::StreamRole> parseRole(const KeyValueParser::Options &options);
+};
diff --git a/src/apps/ipa-verify/main.cpp b/src/apps/ipa-verify/main.cpp
new file mode 100644
index 00000000..0903cd85
--- /dev/null
+++ b/src/apps/ipa-verify/main.cpp
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023, Ideas on Board Oy
+ *
+ * Verify signature on an IPA module
+ */
+
+#include <iostream>
+#include <libgen.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/span.h>
+
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/ipa_module.h"
+
+using namespace libcamera;
+
+namespace {
+
+bool isSignatureValid(IPAModule *ipa)
+{
+ File file{ ipa->path() };
+ if (!file.open(File::OpenModeFlag::ReadOnly))
+ return false;
+
+ Span<uint8_t> data = file.map();
+ if (data.empty())
+ return false;
+
+ return IPAManager::pubKey().verify(data, ipa->signature());
+}
+
+void usage(char *argv0)
+{
+ std::cout << "Usage: " << basename(argv0) << " ipa_name.so" << std::endl;
+ std::cout << std::endl;
+ std::cout << "Verify the signature of an IPA module. The signature file ipa_name.so.sign is" << std::endl;
+ std::cout << "expected to be in the same directory as the IPA module." << std::endl;
+}
+
+} /* namespace */
+
+int main(int argc, char **argv)
+{
+ if (argc != 2) {
+ usage(argv[0]);
+ return EXIT_FAILURE;
+ }
+
+ IPAModule module{ argv[1] };
+ if (!module.isValid()) {
+ std::cout << "Invalid IPA module " << argv[1] << std::endl;
+ return EXIT_FAILURE;
+ }
+
+ if (!isSignatureValid(&module)) {
+ std::cout << "IPA module signature is invalid" << std::endl;
+ return EXIT_FAILURE;
+ }
+
+ std::cout << "IPA module signature is valid" << std::endl;
+ return 0;
+}
diff --git a/src/apps/ipa-verify/meson.build b/src/apps/ipa-verify/meson.build
new file mode 100644
index 00000000..7fdda3b9
--- /dev/null
+++ b/src/apps/ipa-verify/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: CC0-1.0
+
+if not ipa_sign_module
+ subdir_done()
+endif
+
+ipa_verify_sources = files([
+ 'main.cpp',
+])
+
+ipa_verify = executable('ipa_verify', ipa_verify_sources,
+ dependencies : [
+ libcamera_private,
+ ],
+ install : false)
diff --git a/src/apps/lc-compliance/environment.cpp b/src/apps/lc-compliance/environment.cpp
new file mode 100644
index 00000000..987264f1
--- /dev/null
+++ b/src/apps/lc-compliance/environment.cpp
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2021, Collabora Ltd.
+ *
+ * Common environment for tests
+ */
+
+#include "environment.h"
+
+using namespace libcamera;
+
+Environment *Environment::get()
+{
+ static Environment instance;
+ return &instance;
+}
+
+void Environment::setup(CameraManager *cm, std::string cameraId)
+{
+ cm_ = cm;
+ cameraId_ = cameraId;
+}
diff --git a/src/apps/lc-compliance/environment.h b/src/apps/lc-compliance/environment.h
new file mode 100644
index 00000000..543e5372
--- /dev/null
+++ b/src/apps/lc-compliance/environment.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2021, Collabora Ltd.
+ *
+ * Common environment for tests
+ */
+
+#pragma once
+
+#include <libcamera/libcamera.h>
+
+class Environment
+{
+public:
+ static Environment *get();
+
+ void setup(libcamera::CameraManager *cm, std::string cameraId);
+
+ const std::string &cameraId() const { return cameraId_; }
+ libcamera::CameraManager *cm() const { return cm_; }
+
+private:
+ Environment() = default;
+
+ std::string cameraId_;
+ libcamera::CameraManager *cm_;
+};
diff --git a/src/apps/lc-compliance/helpers/capture.cpp b/src/apps/lc-compliance/helpers/capture.cpp
new file mode 100644
index 00000000..90c1530b
--- /dev/null
+++ b/src/apps/lc-compliance/helpers/capture.cpp
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020-2021, Google Inc.
+ *
+ * Simple capture helper
+ */
+
+#include "capture.h"
+
+#include <gtest/gtest.h>
+
+using namespace libcamera;
+
+Capture::Capture(std::shared_ptr<Camera> camera)
+ : loop_(nullptr), camera_(camera),
+ allocator_(std::make_unique<FrameBufferAllocator>(camera))
+{
+}
+
+Capture::~Capture()
+{
+ stop();
+}
+
+void Capture::configure(StreamRole role)
+{
+ config_ = camera_->generateConfiguration({ role });
+
+ if (!config_) {
+ std::cout << "Role not supported by camera" << std::endl;
+ GTEST_SKIP();
+ }
+
+ if (config_->validate() != CameraConfiguration::Valid) {
+ config_.reset();
+ FAIL() << "Configuration not valid";
+ }
+
+ if (camera_->configure(config_.get())) {
+ config_.reset();
+ FAIL() << "Failed to configure camera";
+ }
+}
+
+void Capture::start()
+{
+ Stream *stream = config_->at(0).stream();
+ int count = allocator_->allocate(stream);
+
+ ASSERT_GE(count, 0) << "Failed to allocate buffers";
+ EXPECT_EQ(count, config_->at(0).bufferCount) << "Allocated less buffers than expected";
+
+ camera_->requestCompleted.connect(this, &Capture::requestComplete);
+
+ ASSERT_EQ(camera_->start(), 0) << "Failed to start camera";
+}
+
+void Capture::stop()
+{
+ if (!config_ || !allocator_->allocated())
+ return;
+
+ camera_->stop();
+
+ camera_->requestCompleted.disconnect(this);
+
+ Stream *stream = config_->at(0).stream();
+ requests_.clear();
+ allocator_->free(stream);
+}
+
+/* CaptureBalanced */
+
+CaptureBalanced::CaptureBalanced(std::shared_ptr<Camera> camera)
+ : Capture(camera)
+{
+}
+
+void CaptureBalanced::capture(unsigned int numRequests)
+{
+ start();
+
+ Stream *stream = config_->at(0).stream();
+ const std::vector<std::unique_ptr<FrameBuffer>> &buffers = allocator_->buffers(stream);
+
+ /* No point in testing less requests then the camera depth. */
+ if (buffers.size() > numRequests) {
+ std::cout << "Camera needs " + std::to_string(buffers.size())
+ + " requests, can't test only "
+ + std::to_string(numRequests) << std::endl;
+ GTEST_SKIP();
+ }
+
+ queueCount_ = 0;
+ captureCount_ = 0;
+ captureLimit_ = numRequests;
+
+ /* Queue the recommended number of requests. */
+ for (const std::unique_ptr<FrameBuffer> &buffer : buffers) {
+ std::unique_ptr<Request> request = camera_->createRequest();
+ ASSERT_TRUE(request) << "Can't create request";
+
+ ASSERT_EQ(request->addBuffer(stream, buffer.get()), 0) << "Can't set buffer for request";
+
+ ASSERT_EQ(queueRequest(request.get()), 0) << "Failed to queue request";
+
+ requests_.push_back(std::move(request));
+ }
+
+ /* Run capture session. */
+ loop_ = new EventLoop();
+ loop_->exec();
+ stop();
+ delete loop_;
+
+ ASSERT_EQ(captureCount_, captureLimit_);
+}
+
+int CaptureBalanced::queueRequest(Request *request)
+{
+ queueCount_++;
+ if (queueCount_ > captureLimit_)
+ return 0;
+
+ return camera_->queueRequest(request);
+}
+
+void CaptureBalanced::requestComplete(Request *request)
+{
+ EXPECT_EQ(request->status(), Request::Status::RequestComplete)
+ << "Request didn't complete successfully";
+
+ captureCount_++;
+ if (captureCount_ >= captureLimit_) {
+ loop_->exit(0);
+ return;
+ }
+
+ request->reuse(Request::ReuseBuffers);
+ if (queueRequest(request))
+ loop_->exit(-EINVAL);
+}
+
+/* CaptureUnbalanced */
+
+CaptureUnbalanced::CaptureUnbalanced(std::shared_ptr<Camera> camera)
+ : Capture(camera)
+{
+}
+
+void CaptureUnbalanced::capture(unsigned int numRequests)
+{
+ start();
+
+ Stream *stream = config_->at(0).stream();
+ const std::vector<std::unique_ptr<FrameBuffer>> &buffers = allocator_->buffers(stream);
+
+ captureCount_ = 0;
+ captureLimit_ = numRequests;
+
+ /* Queue the recommended number of requests. */
+ for (const std::unique_ptr<FrameBuffer> &buffer : buffers) {
+ std::unique_ptr<Request> request = camera_->createRequest();
+ ASSERT_TRUE(request) << "Can't create request";
+
+ ASSERT_EQ(request->addBuffer(stream, buffer.get()), 0) << "Can't set buffer for request";
+
+ ASSERT_EQ(camera_->queueRequest(request.get()), 0) << "Failed to queue request";
+
+ requests_.push_back(std::move(request));
+ }
+
+ /* Run capture session. */
+ loop_ = new EventLoop();
+ int status = loop_->exec();
+ stop();
+ delete loop_;
+
+ ASSERT_EQ(status, 0);
+}
+
+void CaptureUnbalanced::requestComplete(Request *request)
+{
+ captureCount_++;
+ if (captureCount_ >= captureLimit_) {
+ loop_->exit(0);
+ return;
+ }
+
+ EXPECT_EQ(request->status(), Request::Status::RequestComplete)
+ << "Request didn't complete successfully";
+
+ request->reuse(Request::ReuseBuffers);
+ if (camera_->queueRequest(request))
+ loop_->exit(-EINVAL);
+}
diff --git a/src/apps/lc-compliance/helpers/capture.h b/src/apps/lc-compliance/helpers/capture.h
new file mode 100644
index 00000000..19b6927c
--- /dev/null
+++ b/src/apps/lc-compliance/helpers/capture.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020-2021, Google Inc.
+ *
+ * Simple capture helper
+ */
+
+#pragma once
+
+#include <memory>
+
+#include <libcamera/libcamera.h>
+
+#include "../common/event_loop.h"
+
+class Capture
+{
+public:
+ void configure(libcamera::StreamRole role);
+
+protected:
+ Capture(std::shared_ptr<libcamera::Camera> camera);
+ virtual ~Capture();
+
+ void start();
+ void stop();
+
+ virtual void requestComplete(libcamera::Request *request) = 0;
+
+ EventLoop *loop_;
+
+ std::shared_ptr<libcamera::Camera> camera_;
+ std::unique_ptr<libcamera::FrameBufferAllocator> allocator_;
+ std::unique_ptr<libcamera::CameraConfiguration> config_;
+ std::vector<std::unique_ptr<libcamera::Request>> requests_;
+};
+
+class CaptureBalanced : public Capture
+{
+public:
+ CaptureBalanced(std::shared_ptr<libcamera::Camera> camera);
+
+ void capture(unsigned int numRequests);
+
+private:
+ int queueRequest(libcamera::Request *request);
+ void requestComplete(libcamera::Request *request) override;
+
+ unsigned int queueCount_;
+ unsigned int captureCount_;
+ unsigned int captureLimit_;
+};
+
+class CaptureUnbalanced : public Capture
+{
+public:
+ CaptureUnbalanced(std::shared_ptr<libcamera::Camera> camera);
+
+ void capture(unsigned int numRequests);
+
+private:
+ void requestComplete(libcamera::Request *request) override;
+
+ unsigned int captureCount_;
+ unsigned int captureLimit_;
+};
diff --git a/src/apps/lc-compliance/main.cpp b/src/apps/lc-compliance/main.cpp
new file mode 100644
index 00000000..3f1d2a61
--- /dev/null
+++ b/src/apps/lc-compliance/main.cpp
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ * Copyright (C) 2021, Collabora Ltd.
+ *
+ * lc-compliance - The libcamera compliance tool
+ */
+
+#include <iomanip>
+#include <iostream>
+#include <string.h>
+
+#include <gtest/gtest.h>
+
+#include <libcamera/libcamera.h>
+
+#include "../common/options.h"
+
+#include "environment.h"
+
+using namespace libcamera;
+
+enum {
+ OptCamera = 'c',
+ OptList = 'l',
+ OptFilter = 'f',
+ OptHelp = 'h',
+};
+
+/*
+ * Make asserts act like exceptions, otherwise they only fail (or skip) the
+ * current function. From gtest documentation:
+ * https://google.github.io/googletest/advanced.html#asserting-on-subroutines-with-an-exception
+ */
+class ThrowListener : public testing::EmptyTestEventListener
+{
+ void OnTestPartResult(const testing::TestPartResult &result) override
+ {
+ if (result.type() == testing::TestPartResult::kFatalFailure ||
+ result.type() == testing::TestPartResult::kSkip)
+ throw testing::AssertionException(result);
+ }
+};
+
+static void listCameras(CameraManager *cm)
+{
+ for (const std::shared_ptr<Camera> &cam : cm->cameras())
+ std::cout << "- " << cam.get()->id() << std::endl;
+}
+
+static int initCamera(CameraManager *cm, OptionsParser::Options options)
+{
+ std::shared_ptr<Camera> camera;
+
+ int ret = cm->start();
+ if (ret) {
+ std::cout << "Failed to start camera manager: "
+ << strerror(-ret) << std::endl;
+ return ret;
+ }
+
+ if (!options.isSet(OptCamera)) {
+ std::cout << "No camera specified, available cameras:" << std::endl;
+ listCameras(cm);
+ return -ENODEV;
+ }
+
+ const std::string &cameraId = options[OptCamera];
+ camera = cm->get(cameraId);
+ if (!camera) {
+ std::cout << "Camera " << cameraId << " not found, available cameras:" << std::endl;
+ listCameras(cm);
+ return -ENODEV;
+ }
+
+ Environment::get()->setup(cm, cameraId);
+
+ std::cout << "Using camera " << cameraId << std::endl;
+
+ return 0;
+}
+
+static int initGtestParameters(char *arg0, OptionsParser::Options options)
+{
+ const std::map<std::string, std::string> gtestFlags = { { "list", "--gtest_list_tests" },
+ { "filter", "--gtest_filter" } };
+
+ int argc = 0;
+ std::string filterParam;
+
+ /*
+ * +2 to have space for both the 0th argument that is needed but not
+ * used and the null at the end.
+ */
+ char **argv = new char *[(gtestFlags.size() + 2)];
+ if (!argv)
+ return -ENOMEM;
+
+ argv[0] = arg0;
+ argc++;
+
+ if (options.isSet(OptList)) {
+ argv[argc] = const_cast<char *>(gtestFlags.at("list").c_str());
+ argc++;
+ }
+
+ if (options.isSet(OptFilter)) {
+ /*
+ * The filter flag needs to be passed as a single parameter, in
+ * the format --gtest_filter=filterStr
+ */
+ filterParam = gtestFlags.at("filter") + "=" +
+ static_cast<const std::string &>(options[OptFilter]);
+
+ argv[argc] = const_cast<char *>(filterParam.c_str());
+ argc++;
+ }
+
+ argv[argc] = nullptr;
+
+ ::testing::InitGoogleTest(&argc, argv);
+
+ delete[] argv;
+
+ return 0;
+}
+
+static int initGtest(char *arg0, OptionsParser::Options options)
+{
+ int ret = initGtestParameters(arg0, options);
+ if (ret)
+ return ret;
+
+ testing::UnitTest::GetInstance()->listeners().Append(new ThrowListener);
+
+ return 0;
+}
+
+static int parseOptions(int argc, char **argv, OptionsParser::Options *options)
+{
+ OptionsParser parser;
+ parser.addOption(OptCamera, OptionString,
+ "Specify which camera to operate on, by id", "camera",
+ ArgumentRequired, "camera");
+ parser.addOption(OptList, OptionNone, "List all tests and exit", "list");
+ parser.addOption(OptFilter, OptionString,
+ "Specify which tests to run", "filter",
+ ArgumentRequired, "filter");
+ parser.addOption(OptHelp, OptionNone, "Display this help message",
+ "help");
+
+ *options = parser.parse(argc, argv);
+ if (!options->valid())
+ return -EINVAL;
+
+ if (options->isSet(OptHelp)) {
+ parser.usage();
+ std::cerr << "Further options from Googletest can be passed as environment variables"
+ << std::endl;
+ return -EINTR;
+ }
+
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ OptionsParser::Options options;
+ int ret = parseOptions(argc, argv, &options);
+ if (ret == -EINTR)
+ return EXIT_SUCCESS;
+ if (ret < 0)
+ return EXIT_FAILURE;
+
+ std::unique_ptr<CameraManager> cm = std::make_unique<CameraManager>();
+
+ /* No need to initialize the camera if we'll just list tests */
+ if (!options.isSet(OptList)) {
+ ret = initCamera(cm.get(), options);
+ if (ret)
+ return ret;
+ }
+
+ ret = initGtest(argv[0], options);
+ if (ret)
+ return ret;
+
+ ret = RUN_ALL_TESTS();
+
+ if (!options.isSet(OptList))
+ cm->stop();
+
+ return ret;
+}
diff --git a/src/apps/lc-compliance/meson.build b/src/apps/lc-compliance/meson.build
new file mode 100644
index 00000000..b1f605f3
--- /dev/null
+++ b/src/apps/lc-compliance/meson.build
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libgtest = dependency('gtest', version : '>=1.10.0',
+ required : get_option('lc-compliance'),
+ fallback : ['gtest', 'gtest_dep'])
+
+if opt_lc_compliance.disabled() or not libevent.found() or not libgtest.found()
+ lc_compliance_enabled = false
+ subdir_done()
+endif
+
+lc_compliance_enabled = true
+
+lc_compliance_sources = files([
+ 'environment.cpp',
+ 'helpers/capture.cpp',
+ 'main.cpp',
+ 'tests/capture_test.cpp',
+])
+
+lc_compliance_includes = ([
+ include_directories('.'),
+ include_directories('helpers/')
+])
+
+lc_compliance = executable('lc-compliance', lc_compliance_sources,
+ cpp_args : [ '-fexceptions' ],
+ link_with : apps_lib,
+ dependencies : [
+ libatomic,
+ libcamera_public,
+ libevent,
+ libgtest,
+ ],
+ include_directories : lc_compliance_includes,
+ install : true,
+ install_tag : 'bin-devel')
diff --git a/src/apps/lc-compliance/tests/capture_test.cpp b/src/apps/lc-compliance/tests/capture_test.cpp
new file mode 100644
index 00000000..ad3a1da2
--- /dev/null
+++ b/src/apps/lc-compliance/tests/capture_test.cpp
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ * Copyright (C) 2021, Collabora Ltd.
+ *
+ * Test camera capture
+ */
+
+#include "capture.h"
+
+#include <iostream>
+
+#include <gtest/gtest.h>
+
+#include "environment.h"
+
+using namespace libcamera;
+
+const std::vector<int> NUMREQUESTS = { 1, 2, 3, 5, 8, 13, 21, 34, 55, 89 };
+const std::vector<StreamRole> ROLES = {
+ StreamRole::Raw,
+ StreamRole::StillCapture,
+ StreamRole::VideoRecording,
+ StreamRole::Viewfinder
+};
+
+class SingleStream : public testing::TestWithParam<std::tuple<StreamRole, int>>
+{
+public:
+ static std::string nameParameters(const testing::TestParamInfo<SingleStream::ParamType> &info);
+
+protected:
+ void SetUp() override;
+ void TearDown() override;
+
+ std::shared_ptr<Camera> camera_;
+};
+
+/*
+ * We use gtest's SetUp() and TearDown() instead of constructor and destructor
+ * in order to be able to assert on them.
+ */
+void SingleStream::SetUp()
+{
+ Environment *env = Environment::get();
+
+ camera_ = env->cm()->get(env->cameraId());
+
+ ASSERT_EQ(camera_->acquire(), 0);
+}
+
+void SingleStream::TearDown()
+{
+ if (!camera_)
+ return;
+
+ camera_->release();
+ camera_.reset();
+}
+
+std::string SingleStream::nameParameters(const testing::TestParamInfo<SingleStream::ParamType> &info)
+{
+ std::map<StreamRole, std::string> rolesMap = {
+ { StreamRole::Raw, "Raw" },
+ { StreamRole::StillCapture, "StillCapture" },
+ { StreamRole::VideoRecording, "VideoRecording" },
+ { StreamRole::Viewfinder, "Viewfinder" }
+ };
+
+ std::string roleName = rolesMap[std::get<0>(info.param)];
+ std::string numRequestsName = std::to_string(std::get<1>(info.param));
+
+ return roleName + "_" + numRequestsName;
+}
+
+/*
+ * Test single capture cycles
+ *
+ * Makes sure the camera completes the exact number of requests queued. Example
+ * failure is a camera that completes less requests than the number of requests
+ * queued.
+ */
+TEST_P(SingleStream, Capture)
+{
+ auto [role, numRequests] = GetParam();
+
+ CaptureBalanced capture(camera_);
+
+ capture.configure(role);
+
+ capture.capture(numRequests);
+}
+
+/*
+ * Test multiple start/stop cycles
+ *
+ * Makes sure the camera supports multiple start/stop cycles. Example failure is
+ * a camera that does not clean up correctly in its error path but is only
+ * tested by single-capture applications.
+ */
+TEST_P(SingleStream, CaptureStartStop)
+{
+ auto [role, numRequests] = GetParam();
+ unsigned int numRepeats = 3;
+
+ CaptureBalanced capture(camera_);
+
+ capture.configure(role);
+
+ for (unsigned int starts = 0; starts < numRepeats; starts++)
+ capture.capture(numRequests);
+}
+
+/*
+ * Test unbalanced stop
+ *
+ * Makes sure the camera supports a stop with requests queued. Example failure
+ * is a camera that does not handle cancelation of buffers coming back from the
+ * video device while stopping.
+ */
+TEST_P(SingleStream, UnbalancedStop)
+{
+ auto [role, numRequests] = GetParam();
+
+ CaptureUnbalanced capture(camera_);
+
+ capture.configure(role);
+
+ capture.capture(numRequests);
+}
+
+INSTANTIATE_TEST_SUITE_P(CaptureTests,
+ SingleStream,
+ testing::Combine(testing::ValuesIn(ROLES),
+ testing::ValuesIn(NUMREQUESTS)),
+ SingleStream::nameParameters);
diff --git a/src/apps/meson.build b/src/apps/meson.build
new file mode 100644
index 00000000..af632b9a
--- /dev/null
+++ b/src/apps/meson.build
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: CC0-1.0
+
+opt_cam = get_option('cam')
+opt_lc_compliance = get_option('lc-compliance')
+
+# libevent is needed by cam and lc-compliance. As they are both feature options,
+# they can't be combined with simple boolean logic.
+libevent = dependency('libevent_pthreads', required : opt_cam)
+if not libevent.found()
+ libevent = dependency('libevent_pthreads', required : opt_lc_compliance)
+endif
+
+libtiff = dependency('libtiff-4', required : false)
+
+subdir('common')
+
+subdir('lc-compliance')
+
+subdir('cam')
+subdir('qcam')
+
+subdir('ipa-verify')
diff --git a/src/qcam/assets/feathericons/activity.svg b/src/apps/qcam/assets/feathericons/activity.svg
index 669a57a7..669a57a7 100644
--- a/src/qcam/assets/feathericons/activity.svg
+++ b/src/apps/qcam/assets/feathericons/activity.svg
diff --git a/src/qcam/assets/feathericons/airplay.svg b/src/apps/qcam/assets/feathericons/airplay.svg
index 7ce73022..7ce73022 100644
--- a/src/qcam/assets/feathericons/airplay.svg
+++ b/src/apps/qcam/assets/feathericons/airplay.svg
diff --git a/src/qcam/assets/feathericons/alert-circle.svg b/src/apps/qcam/assets/feathericons/alert-circle.svg
index 8d02b7d1..8d02b7d1 100644
--- a/src/qcam/assets/feathericons/alert-circle.svg
+++ b/src/apps/qcam/assets/feathericons/alert-circle.svg
diff --git a/src/qcam/assets/feathericons/alert-octagon.svg b/src/apps/qcam/assets/feathericons/alert-octagon.svg
index de9b03f2..de9b03f2 100644
--- a/src/qcam/assets/feathericons/alert-octagon.svg
+++ b/src/apps/qcam/assets/feathericons/alert-octagon.svg
diff --git a/src/qcam/assets/feathericons/alert-triangle.svg b/src/apps/qcam/assets/feathericons/alert-triangle.svg
index 6dcb0963..6dcb0963 100644
--- a/src/qcam/assets/feathericons/alert-triangle.svg
+++ b/src/apps/qcam/assets/feathericons/alert-triangle.svg
diff --git a/src/qcam/assets/feathericons/align-center.svg b/src/apps/qcam/assets/feathericons/align-center.svg
index 5b8842ea..5b8842ea 100644
--- a/src/qcam/assets/feathericons/align-center.svg
+++ b/src/apps/qcam/assets/feathericons/align-center.svg
diff --git a/src/qcam/assets/feathericons/align-justify.svg b/src/apps/qcam/assets/feathericons/align-justify.svg
index 0539876f..0539876f 100644
--- a/src/qcam/assets/feathericons/align-justify.svg
+++ b/src/apps/qcam/assets/feathericons/align-justify.svg
diff --git a/src/qcam/assets/feathericons/align-left.svg b/src/apps/qcam/assets/feathericons/align-left.svg
index 9ac852a5..9ac852a5 100644
--- a/src/qcam/assets/feathericons/align-left.svg
+++ b/src/apps/qcam/assets/feathericons/align-left.svg
diff --git a/src/qcam/assets/feathericons/align-right.svg b/src/apps/qcam/assets/feathericons/align-right.svg
index ef139ffa..ef139ffa 100644
--- a/src/qcam/assets/feathericons/align-right.svg
+++ b/src/apps/qcam/assets/feathericons/align-right.svg
diff --git a/src/qcam/assets/feathericons/anchor.svg b/src/apps/qcam/assets/feathericons/anchor.svg
index e01627a3..e01627a3 100644
--- a/src/qcam/assets/feathericons/anchor.svg
+++ b/src/apps/qcam/assets/feathericons/anchor.svg
diff --git a/src/qcam/assets/feathericons/aperture.svg b/src/apps/qcam/assets/feathericons/aperture.svg
index 9936e868..9936e868 100644
--- a/src/qcam/assets/feathericons/aperture.svg
+++ b/src/apps/qcam/assets/feathericons/aperture.svg
diff --git a/src/qcam/assets/feathericons/archive.svg b/src/apps/qcam/assets/feathericons/archive.svg
index 428882c8..428882c8 100644
--- a/src/qcam/assets/feathericons/archive.svg
+++ b/src/apps/qcam/assets/feathericons/archive.svg
diff --git a/src/qcam/assets/feathericons/arrow-down-circle.svg b/src/apps/qcam/assets/feathericons/arrow-down-circle.svg
index 3238091b..3238091b 100644
--- a/src/qcam/assets/feathericons/arrow-down-circle.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-down-circle.svg
diff --git a/src/qcam/assets/feathericons/arrow-down-left.svg b/src/apps/qcam/assets/feathericons/arrow-down-left.svg
index 72483584..72483584 100644
--- a/src/qcam/assets/feathericons/arrow-down-left.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-down-left.svg
diff --git a/src/qcam/assets/feathericons/arrow-down-right.svg b/src/apps/qcam/assets/feathericons/arrow-down-right.svg
index 81d9822b..81d9822b 100644
--- a/src/qcam/assets/feathericons/arrow-down-right.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-down-right.svg
diff --git a/src/qcam/assets/feathericons/arrow-down.svg b/src/apps/qcam/assets/feathericons/arrow-down.svg
index 4f84f627..4f84f627 100644
--- a/src/qcam/assets/feathericons/arrow-down.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-down.svg
diff --git a/src/qcam/assets/feathericons/arrow-left-circle.svg b/src/apps/qcam/assets/feathericons/arrow-left-circle.svg
index 3b19ff8a..3b19ff8a 100644
--- a/src/qcam/assets/feathericons/arrow-left-circle.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-left-circle.svg
diff --git a/src/qcam/assets/feathericons/arrow-left.svg b/src/apps/qcam/assets/feathericons/arrow-left.svg
index a5058fc7..a5058fc7 100644
--- a/src/qcam/assets/feathericons/arrow-left.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-left.svg
diff --git a/src/qcam/assets/feathericons/arrow-right-circle.svg b/src/apps/qcam/assets/feathericons/arrow-right-circle.svg
index ff01dd58..ff01dd58 100644
--- a/src/qcam/assets/feathericons/arrow-right-circle.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-right-circle.svg
diff --git a/src/qcam/assets/feathericons/arrow-right.svg b/src/apps/qcam/assets/feathericons/arrow-right.svg
index 939b57c5..939b57c5 100644
--- a/src/qcam/assets/feathericons/arrow-right.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-right.svg
diff --git a/src/qcam/assets/feathericons/arrow-up-circle.svg b/src/apps/qcam/assets/feathericons/arrow-up-circle.svg
index 044a75d3..044a75d3 100644
--- a/src/qcam/assets/feathericons/arrow-up-circle.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-up-circle.svg
diff --git a/src/qcam/assets/feathericons/arrow-up-left.svg b/src/apps/qcam/assets/feathericons/arrow-up-left.svg
index cea55e87..cea55e87 100644
--- a/src/qcam/assets/feathericons/arrow-up-left.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-up-left.svg
diff --git a/src/qcam/assets/feathericons/arrow-up-right.svg b/src/apps/qcam/assets/feathericons/arrow-up-right.svg
index 95678e00..95678e00 100644
--- a/src/qcam/assets/feathericons/arrow-up-right.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-up-right.svg
diff --git a/src/qcam/assets/feathericons/arrow-up.svg b/src/apps/qcam/assets/feathericons/arrow-up.svg
index 16b13aba..16b13aba 100644
--- a/src/qcam/assets/feathericons/arrow-up.svg
+++ b/src/apps/qcam/assets/feathericons/arrow-up.svg
diff --git a/src/qcam/assets/feathericons/at-sign.svg b/src/apps/qcam/assets/feathericons/at-sign.svg
index 5a5e5d0d..5a5e5d0d 100644
--- a/src/qcam/assets/feathericons/at-sign.svg
+++ b/src/apps/qcam/assets/feathericons/at-sign.svg
diff --git a/src/qcam/assets/feathericons/award.svg b/src/apps/qcam/assets/feathericons/award.svg
index be70d5a1..be70d5a1 100644
--- a/src/qcam/assets/feathericons/award.svg
+++ b/src/apps/qcam/assets/feathericons/award.svg
diff --git a/src/qcam/assets/feathericons/bar-chart-2.svg b/src/apps/qcam/assets/feathericons/bar-chart-2.svg
index 864167a6..864167a6 100644
--- a/src/qcam/assets/feathericons/bar-chart-2.svg
+++ b/src/apps/qcam/assets/feathericons/bar-chart-2.svg
diff --git a/src/qcam/assets/feathericons/bar-chart.svg b/src/apps/qcam/assets/feathericons/bar-chart.svg
index 074d7c1a..074d7c1a 100644
--- a/src/qcam/assets/feathericons/bar-chart.svg
+++ b/src/apps/qcam/assets/feathericons/bar-chart.svg
diff --git a/src/qcam/assets/feathericons/battery-charging.svg b/src/apps/qcam/assets/feathericons/battery-charging.svg
index 644cb59c..644cb59c 100644
--- a/src/qcam/assets/feathericons/battery-charging.svg
+++ b/src/apps/qcam/assets/feathericons/battery-charging.svg
diff --git a/src/qcam/assets/feathericons/battery.svg b/src/apps/qcam/assets/feathericons/battery.svg
index 7fe87710..7fe87710 100644
--- a/src/qcam/assets/feathericons/battery.svg
+++ b/src/apps/qcam/assets/feathericons/battery.svg
diff --git a/src/qcam/assets/feathericons/bell-off.svg b/src/apps/qcam/assets/feathericons/bell-off.svg
index 4b07c848..4b07c848 100644
--- a/src/qcam/assets/feathericons/bell-off.svg
+++ b/src/apps/qcam/assets/feathericons/bell-off.svg
diff --git a/src/qcam/assets/feathericons/bell.svg b/src/apps/qcam/assets/feathericons/bell.svg
index bba561c1..bba561c1 100644
--- a/src/qcam/assets/feathericons/bell.svg
+++ b/src/apps/qcam/assets/feathericons/bell.svg
diff --git a/src/qcam/assets/feathericons/bluetooth.svg b/src/apps/qcam/assets/feathericons/bluetooth.svg
index cebed7b1..cebed7b1 100644
--- a/src/qcam/assets/feathericons/bluetooth.svg
+++ b/src/apps/qcam/assets/feathericons/bluetooth.svg
diff --git a/src/qcam/assets/feathericons/bold.svg b/src/apps/qcam/assets/feathericons/bold.svg
index d1a4efd3..d1a4efd3 100644
--- a/src/qcam/assets/feathericons/bold.svg
+++ b/src/apps/qcam/assets/feathericons/bold.svg
diff --git a/src/qcam/assets/feathericons/book-open.svg b/src/apps/qcam/assets/feathericons/book-open.svg
index 5e0ca0ab..5e0ca0ab 100644
--- a/src/qcam/assets/feathericons/book-open.svg
+++ b/src/apps/qcam/assets/feathericons/book-open.svg
diff --git a/src/qcam/assets/feathericons/book.svg b/src/apps/qcam/assets/feathericons/book.svg
index 12ffcbc4..12ffcbc4 100644
--- a/src/qcam/assets/feathericons/book.svg
+++ b/src/apps/qcam/assets/feathericons/book.svg
diff --git a/src/qcam/assets/feathericons/bookmark.svg b/src/apps/qcam/assets/feathericons/bookmark.svg
index 2239cc58..2239cc58 100644
--- a/src/qcam/assets/feathericons/bookmark.svg
+++ b/src/apps/qcam/assets/feathericons/bookmark.svg
diff --git a/src/qcam/assets/feathericons/box.svg b/src/apps/qcam/assets/feathericons/box.svg
index d89be30f..d89be30f 100644
--- a/src/qcam/assets/feathericons/box.svg
+++ b/src/apps/qcam/assets/feathericons/box.svg
diff --git a/src/qcam/assets/feathericons/briefcase.svg b/src/apps/qcam/assets/feathericons/briefcase.svg
index e3af0506..e3af0506 100644
--- a/src/qcam/assets/feathericons/briefcase.svg
+++ b/src/apps/qcam/assets/feathericons/briefcase.svg
diff --git a/src/qcam/assets/feathericons/calendar.svg b/src/apps/qcam/assets/feathericons/calendar.svg
index 6c7fd870..6c7fd870 100644
--- a/src/qcam/assets/feathericons/calendar.svg
+++ b/src/apps/qcam/assets/feathericons/calendar.svg
diff --git a/src/qcam/assets/feathericons/camera-off.svg b/src/apps/qcam/assets/feathericons/camera-off.svg
index daa3e25f..daa3e25f 100644
--- a/src/qcam/assets/feathericons/camera-off.svg
+++ b/src/apps/qcam/assets/feathericons/camera-off.svg
diff --git a/src/qcam/assets/feathericons/camera.svg b/src/apps/qcam/assets/feathericons/camera.svg
index 0e7f0603..0e7f0603 100644
--- a/src/qcam/assets/feathericons/camera.svg
+++ b/src/apps/qcam/assets/feathericons/camera.svg
diff --git a/src/qcam/assets/feathericons/cast.svg b/src/apps/qcam/assets/feathericons/cast.svg
index 63c954d9..63c954d9 100644
--- a/src/qcam/assets/feathericons/cast.svg
+++ b/src/apps/qcam/assets/feathericons/cast.svg
diff --git a/src/qcam/assets/feathericons/check-circle.svg b/src/apps/qcam/assets/feathericons/check-circle.svg
index f2f4fd1a..f2f4fd1a 100644
--- a/src/qcam/assets/feathericons/check-circle.svg
+++ b/src/apps/qcam/assets/feathericons/check-circle.svg
diff --git a/src/qcam/assets/feathericons/check-square.svg b/src/apps/qcam/assets/feathericons/check-square.svg
index 72ab7a80..72ab7a80 100644
--- a/src/qcam/assets/feathericons/check-square.svg
+++ b/src/apps/qcam/assets/feathericons/check-square.svg
diff --git a/src/qcam/assets/feathericons/check.svg b/src/apps/qcam/assets/feathericons/check.svg
index 1c209899..1c209899 100644
--- a/src/qcam/assets/feathericons/check.svg
+++ b/src/apps/qcam/assets/feathericons/check.svg
diff --git a/src/qcam/assets/feathericons/chevron-down.svg b/src/apps/qcam/assets/feathericons/chevron-down.svg
index 278c6a31..278c6a31 100644
--- a/src/qcam/assets/feathericons/chevron-down.svg
+++ b/src/apps/qcam/assets/feathericons/chevron-down.svg
diff --git a/src/qcam/assets/feathericons/chevron-left.svg b/src/apps/qcam/assets/feathericons/chevron-left.svg
index 747d46d9..747d46d9 100644
--- a/src/qcam/assets/feathericons/chevron-left.svg
+++ b/src/apps/qcam/assets/feathericons/chevron-left.svg
diff --git a/src/qcam/assets/feathericons/chevron-right.svg b/src/apps/qcam/assets/feathericons/chevron-right.svg
index 258de414..258de414 100644
--- a/src/qcam/assets/feathericons/chevron-right.svg
+++ b/src/apps/qcam/assets/feathericons/chevron-right.svg
diff --git a/src/qcam/assets/feathericons/chevron-up.svg b/src/apps/qcam/assets/feathericons/chevron-up.svg
index 4eb5ecc3..4eb5ecc3 100644
--- a/src/qcam/assets/feathericons/chevron-up.svg
+++ b/src/apps/qcam/assets/feathericons/chevron-up.svg
diff --git a/src/qcam/assets/feathericons/chevrons-down.svg b/src/apps/qcam/assets/feathericons/chevrons-down.svg
index e67ef2fb..e67ef2fb 100644
--- a/src/qcam/assets/feathericons/chevrons-down.svg
+++ b/src/apps/qcam/assets/feathericons/chevrons-down.svg
diff --git a/src/qcam/assets/feathericons/chevrons-left.svg b/src/apps/qcam/assets/feathericons/chevrons-left.svg
index c32e3983..c32e3983 100644
--- a/src/qcam/assets/feathericons/chevrons-left.svg
+++ b/src/apps/qcam/assets/feathericons/chevrons-left.svg
diff --git a/src/qcam/assets/feathericons/chevrons-right.svg b/src/apps/qcam/assets/feathericons/chevrons-right.svg
index f5068145..f5068145 100644
--- a/src/qcam/assets/feathericons/chevrons-right.svg
+++ b/src/apps/qcam/assets/feathericons/chevrons-right.svg
diff --git a/src/qcam/assets/feathericons/chevrons-up.svg b/src/apps/qcam/assets/feathericons/chevrons-up.svg
index 0eaf5183..0eaf5183 100644
--- a/src/qcam/assets/feathericons/chevrons-up.svg
+++ b/src/apps/qcam/assets/feathericons/chevrons-up.svg
diff --git a/src/qcam/assets/feathericons/chrome.svg b/src/apps/qcam/assets/feathericons/chrome.svg
index 9189815e..9189815e 100644
--- a/src/qcam/assets/feathericons/chrome.svg
+++ b/src/apps/qcam/assets/feathericons/chrome.svg
diff --git a/src/qcam/assets/feathericons/circle.svg b/src/apps/qcam/assets/feathericons/circle.svg
index b0090882..b0090882 100644
--- a/src/qcam/assets/feathericons/circle.svg
+++ b/src/apps/qcam/assets/feathericons/circle.svg
diff --git a/src/qcam/assets/feathericons/clipboard.svg b/src/apps/qcam/assets/feathericons/clipboard.svg
index ccee454d..ccee454d 100644
--- a/src/qcam/assets/feathericons/clipboard.svg
+++ b/src/apps/qcam/assets/feathericons/clipboard.svg
diff --git a/src/qcam/assets/feathericons/clock.svg b/src/apps/qcam/assets/feathericons/clock.svg
index ea3f5e50..ea3f5e50 100644
--- a/src/qcam/assets/feathericons/clock.svg
+++ b/src/apps/qcam/assets/feathericons/clock.svg
diff --git a/src/qcam/assets/feathericons/cloud-drizzle.svg b/src/apps/qcam/assets/feathericons/cloud-drizzle.svg
index 13af6bb5..13af6bb5 100644
--- a/src/qcam/assets/feathericons/cloud-drizzle.svg
+++ b/src/apps/qcam/assets/feathericons/cloud-drizzle.svg
diff --git a/src/qcam/assets/feathericons/cloud-lightning.svg b/src/apps/qcam/assets/feathericons/cloud-lightning.svg
index 32d154cc..32d154cc 100644
--- a/src/qcam/assets/feathericons/cloud-lightning.svg
+++ b/src/apps/qcam/assets/feathericons/cloud-lightning.svg
diff --git a/src/qcam/assets/feathericons/cloud-off.svg b/src/apps/qcam/assets/feathericons/cloud-off.svg
index 1e1e7d60..1e1e7d60 100644
--- a/src/qcam/assets/feathericons/cloud-off.svg
+++ b/src/apps/qcam/assets/feathericons/cloud-off.svg
diff --git a/src/qcam/assets/feathericons/cloud-rain.svg b/src/apps/qcam/assets/feathericons/cloud-rain.svg
index 3e0b85b0..3e0b85b0 100644
--- a/src/qcam/assets/feathericons/cloud-rain.svg
+++ b/src/apps/qcam/assets/feathericons/cloud-rain.svg
diff --git a/src/qcam/assets/feathericons/cloud-snow.svg b/src/apps/qcam/assets/feathericons/cloud-snow.svg
index e4eb8207..e4eb8207 100644
--- a/src/qcam/assets/feathericons/cloud-snow.svg
+++ b/src/apps/qcam/assets/feathericons/cloud-snow.svg
diff --git a/src/qcam/assets/feathericons/cloud.svg b/src/apps/qcam/assets/feathericons/cloud.svg
index 0ee0c632..0ee0c632 100644
--- a/src/qcam/assets/feathericons/cloud.svg
+++ b/src/apps/qcam/assets/feathericons/cloud.svg
diff --git a/src/qcam/assets/feathericons/code.svg b/src/apps/qcam/assets/feathericons/code.svg
index c4954b55..c4954b55 100644
--- a/src/qcam/assets/feathericons/code.svg
+++ b/src/apps/qcam/assets/feathericons/code.svg
diff --git a/src/qcam/assets/feathericons/codepen.svg b/src/apps/qcam/assets/feathericons/codepen.svg
index ab2a815a..ab2a815a 100644
--- a/src/qcam/assets/feathericons/codepen.svg
+++ b/src/apps/qcam/assets/feathericons/codepen.svg
diff --git a/src/qcam/assets/feathericons/codesandbox.svg b/src/apps/qcam/assets/feathericons/codesandbox.svg
index 49848f52..49848f52 100644
--- a/src/qcam/assets/feathericons/codesandbox.svg
+++ b/src/apps/qcam/assets/feathericons/codesandbox.svg
diff --git a/src/qcam/assets/feathericons/coffee.svg b/src/apps/qcam/assets/feathericons/coffee.svg
index 32905e52..32905e52 100644
--- a/src/qcam/assets/feathericons/coffee.svg
+++ b/src/apps/qcam/assets/feathericons/coffee.svg
diff --git a/src/qcam/assets/feathericons/columns.svg b/src/apps/qcam/assets/feathericons/columns.svg
index d264b557..d264b557 100644
--- a/src/qcam/assets/feathericons/columns.svg
+++ b/src/apps/qcam/assets/feathericons/columns.svg
diff --git a/src/qcam/assets/feathericons/command.svg b/src/apps/qcam/assets/feathericons/command.svg
index 93f554c3..93f554c3 100644
--- a/src/qcam/assets/feathericons/command.svg
+++ b/src/apps/qcam/assets/feathericons/command.svg
diff --git a/src/qcam/assets/feathericons/compass.svg b/src/apps/qcam/assets/feathericons/compass.svg
index 32962608..32962608 100644
--- a/src/qcam/assets/feathericons/compass.svg
+++ b/src/apps/qcam/assets/feathericons/compass.svg
diff --git a/src/qcam/assets/feathericons/copy.svg b/src/apps/qcam/assets/feathericons/copy.svg
index 4e0b09f1..4e0b09f1 100644
--- a/src/qcam/assets/feathericons/copy.svg
+++ b/src/apps/qcam/assets/feathericons/copy.svg
diff --git a/src/qcam/assets/feathericons/corner-down-left.svg b/src/apps/qcam/assets/feathericons/corner-down-left.svg
index 9fffb3e9..9fffb3e9 100644
--- a/src/qcam/assets/feathericons/corner-down-left.svg
+++ b/src/apps/qcam/assets/feathericons/corner-down-left.svg
diff --git a/src/qcam/assets/feathericons/corner-down-right.svg b/src/apps/qcam/assets/feathericons/corner-down-right.svg
index b27d408d..b27d408d 100644
--- a/src/qcam/assets/feathericons/corner-down-right.svg
+++ b/src/apps/qcam/assets/feathericons/corner-down-right.svg
diff --git a/src/qcam/assets/feathericons/corner-left-down.svg b/src/apps/qcam/assets/feathericons/corner-left-down.svg
index 24b8375c..24b8375c 100644
--- a/src/qcam/assets/feathericons/corner-left-down.svg
+++ b/src/apps/qcam/assets/feathericons/corner-left-down.svg
diff --git a/src/qcam/assets/feathericons/corner-left-up.svg b/src/apps/qcam/assets/feathericons/corner-left-up.svg
index e54527cd..e54527cd 100644
--- a/src/qcam/assets/feathericons/corner-left-up.svg
+++ b/src/apps/qcam/assets/feathericons/corner-left-up.svg
diff --git a/src/qcam/assets/feathericons/corner-right-down.svg b/src/apps/qcam/assets/feathericons/corner-right-down.svg
index a49e6d6c..a49e6d6c 100644
--- a/src/qcam/assets/feathericons/corner-right-down.svg
+++ b/src/apps/qcam/assets/feathericons/corner-right-down.svg
diff --git a/src/qcam/assets/feathericons/corner-right-up.svg b/src/apps/qcam/assets/feathericons/corner-right-up.svg
index a5c5dce5..a5c5dce5 100644
--- a/src/qcam/assets/feathericons/corner-right-up.svg
+++ b/src/apps/qcam/assets/feathericons/corner-right-up.svg
diff --git a/src/qcam/assets/feathericons/corner-up-left.svg b/src/apps/qcam/assets/feathericons/corner-up-left.svg
index 0a1ffd61..0a1ffd61 100644
--- a/src/qcam/assets/feathericons/corner-up-left.svg
+++ b/src/apps/qcam/assets/feathericons/corner-up-left.svg
diff --git a/src/qcam/assets/feathericons/corner-up-right.svg b/src/apps/qcam/assets/feathericons/corner-up-right.svg
index 0b8f961b..0b8f961b 100644
--- a/src/qcam/assets/feathericons/corner-up-right.svg
+++ b/src/apps/qcam/assets/feathericons/corner-up-right.svg
diff --git a/src/qcam/assets/feathericons/cpu.svg b/src/apps/qcam/assets/feathericons/cpu.svg
index 2ed16ef7..2ed16ef7 100644
--- a/src/qcam/assets/feathericons/cpu.svg
+++ b/src/apps/qcam/assets/feathericons/cpu.svg
diff --git a/src/qcam/assets/feathericons/credit-card.svg b/src/apps/qcam/assets/feathericons/credit-card.svg
index 1b7fd029..1b7fd029 100644
--- a/src/qcam/assets/feathericons/credit-card.svg
+++ b/src/apps/qcam/assets/feathericons/credit-card.svg
diff --git a/src/qcam/assets/feathericons/crop.svg b/src/apps/qcam/assets/feathericons/crop.svg
index ffbfd045..ffbfd045 100644
--- a/src/qcam/assets/feathericons/crop.svg
+++ b/src/apps/qcam/assets/feathericons/crop.svg
diff --git a/src/qcam/assets/feathericons/crosshair.svg b/src/apps/qcam/assets/feathericons/crosshair.svg
index ba394015..ba394015 100644
--- a/src/qcam/assets/feathericons/crosshair.svg
+++ b/src/apps/qcam/assets/feathericons/crosshair.svg
diff --git a/src/qcam/assets/feathericons/database.svg b/src/apps/qcam/assets/feathericons/database.svg
index c296fbcf..c296fbcf 100644
--- a/src/qcam/assets/feathericons/database.svg
+++ b/src/apps/qcam/assets/feathericons/database.svg
diff --git a/src/qcam/assets/feathericons/delete.svg b/src/apps/qcam/assets/feathericons/delete.svg
index 8c6074b9..8c6074b9 100644
--- a/src/qcam/assets/feathericons/delete.svg
+++ b/src/apps/qcam/assets/feathericons/delete.svg
diff --git a/src/qcam/assets/feathericons/disc.svg b/src/apps/qcam/assets/feathericons/disc.svg
index 2595b444..2595b444 100644
--- a/src/qcam/assets/feathericons/disc.svg
+++ b/src/apps/qcam/assets/feathericons/disc.svg
diff --git a/src/qcam/assets/feathericons/dollar-sign.svg b/src/apps/qcam/assets/feathericons/dollar-sign.svg
index 1a124d26..1a124d26 100644
--- a/src/qcam/assets/feathericons/dollar-sign.svg
+++ b/src/apps/qcam/assets/feathericons/dollar-sign.svg
diff --git a/src/qcam/assets/feathericons/download-cloud.svg b/src/apps/qcam/assets/feathericons/download-cloud.svg
index f3126fc3..f3126fc3 100644
--- a/src/qcam/assets/feathericons/download-cloud.svg
+++ b/src/apps/qcam/assets/feathericons/download-cloud.svg
diff --git a/src/qcam/assets/feathericons/download.svg b/src/apps/qcam/assets/feathericons/download.svg
index 76767a92..76767a92 100644
--- a/src/qcam/assets/feathericons/download.svg
+++ b/src/apps/qcam/assets/feathericons/download.svg
diff --git a/src/qcam/assets/feathericons/droplet.svg b/src/apps/qcam/assets/feathericons/droplet.svg
index ca093014..ca093014 100644
--- a/src/qcam/assets/feathericons/droplet.svg
+++ b/src/apps/qcam/assets/feathericons/droplet.svg
diff --git a/src/qcam/assets/feathericons/edit-2.svg b/src/apps/qcam/assets/feathericons/edit-2.svg
index 06830c9d..06830c9d 100644
--- a/src/qcam/assets/feathericons/edit-2.svg
+++ b/src/apps/qcam/assets/feathericons/edit-2.svg
diff --git a/src/qcam/assets/feathericons/edit-3.svg b/src/apps/qcam/assets/feathericons/edit-3.svg
index d728efcc..d728efcc 100644
--- a/src/qcam/assets/feathericons/edit-3.svg
+++ b/src/apps/qcam/assets/feathericons/edit-3.svg
diff --git a/src/qcam/assets/feathericons/edit.svg b/src/apps/qcam/assets/feathericons/edit.svg
index ec7b4ca2..ec7b4ca2 100644
--- a/src/qcam/assets/feathericons/edit.svg
+++ b/src/apps/qcam/assets/feathericons/edit.svg
diff --git a/src/qcam/assets/feathericons/external-link.svg b/src/apps/qcam/assets/feathericons/external-link.svg
index 6236df3e..6236df3e 100644
--- a/src/qcam/assets/feathericons/external-link.svg
+++ b/src/apps/qcam/assets/feathericons/external-link.svg
diff --git a/src/qcam/assets/feathericons/eye-off.svg b/src/apps/qcam/assets/feathericons/eye-off.svg
index 77c54cb4..77c54cb4 100644
--- a/src/qcam/assets/feathericons/eye-off.svg
+++ b/src/apps/qcam/assets/feathericons/eye-off.svg
diff --git a/src/qcam/assets/feathericons/eye.svg b/src/apps/qcam/assets/feathericons/eye.svg
index 9cde2437..9cde2437 100644
--- a/src/qcam/assets/feathericons/eye.svg
+++ b/src/apps/qcam/assets/feathericons/eye.svg
diff --git a/src/qcam/assets/feathericons/facebook.svg b/src/apps/qcam/assets/feathericons/facebook.svg
index 2570f56a..2570f56a 100644
--- a/src/qcam/assets/feathericons/facebook.svg
+++ b/src/apps/qcam/assets/feathericons/facebook.svg
diff --git a/src/qcam/assets/feathericons/fast-forward.svg b/src/apps/qcam/assets/feathericons/fast-forward.svg
index fa39877a..fa39877a 100644
--- a/src/qcam/assets/feathericons/fast-forward.svg
+++ b/src/apps/qcam/assets/feathericons/fast-forward.svg
diff --git a/src/qcam/assets/feathericons/feather.svg b/src/apps/qcam/assets/feathericons/feather.svg
index ac3b868d..ac3b868d 100644
--- a/src/qcam/assets/feathericons/feather.svg
+++ b/src/apps/qcam/assets/feathericons/feather.svg
diff --git a/src/apps/qcam/assets/feathericons/feathericons.qrc b/src/apps/qcam/assets/feathericons/feathericons.qrc
new file mode 100644
index 00000000..c5302040
--- /dev/null
+++ b/src/apps/qcam/assets/feathericons/feathericons.qrc
@@ -0,0 +1,11 @@
+<!-- SPDX-License-Identifier: GPL-2.0-or-later -->
+<!DOCTYPE RCC><RCC version="1.0">
+<qresource>
+ <file>aperture.svg</file>
+ <file>camera-off.svg</file>
+ <file>play-circle.svg</file>
+ <file>save.svg</file>
+ <file>stop-circle.svg</file>
+ <file>x-circle.svg</file>
+</qresource>
+</RCC>
diff --git a/src/qcam/assets/feathericons/figma.svg b/src/apps/qcam/assets/feathericons/figma.svg
index 66fd2178..66fd2178 100644
--- a/src/qcam/assets/feathericons/figma.svg
+++ b/src/apps/qcam/assets/feathericons/figma.svg
diff --git a/src/qcam/assets/feathericons/file-minus.svg b/src/apps/qcam/assets/feathericons/file-minus.svg
index 345756ef..345756ef 100644
--- a/src/qcam/assets/feathericons/file-minus.svg
+++ b/src/apps/qcam/assets/feathericons/file-minus.svg
diff --git a/src/qcam/assets/feathericons/file-plus.svg b/src/apps/qcam/assets/feathericons/file-plus.svg
index eed12004..eed12004 100644
--- a/src/qcam/assets/feathericons/file-plus.svg
+++ b/src/apps/qcam/assets/feathericons/file-plus.svg
diff --git a/src/qcam/assets/feathericons/file-text.svg b/src/apps/qcam/assets/feathericons/file-text.svg
index 4197ddd4..4197ddd4 100644
--- a/src/qcam/assets/feathericons/file-text.svg
+++ b/src/apps/qcam/assets/feathericons/file-text.svg
diff --git a/src/qcam/assets/feathericons/file.svg b/src/apps/qcam/assets/feathericons/file.svg
index 378519ab..378519ab 100644
--- a/src/qcam/assets/feathericons/file.svg
+++ b/src/apps/qcam/assets/feathericons/file.svg
diff --git a/src/qcam/assets/feathericons/film.svg b/src/apps/qcam/assets/feathericons/film.svg
index ac46360d..ac46360d 100644
--- a/src/qcam/assets/feathericons/film.svg
+++ b/src/apps/qcam/assets/feathericons/film.svg
diff --git a/src/qcam/assets/feathericons/filter.svg b/src/apps/qcam/assets/feathericons/filter.svg
index 38a47e04..38a47e04 100644
--- a/src/qcam/assets/feathericons/filter.svg
+++ b/src/apps/qcam/assets/feathericons/filter.svg
diff --git a/src/qcam/assets/feathericons/flag.svg b/src/apps/qcam/assets/feathericons/flag.svg
index 037737cb..037737cb 100644
--- a/src/qcam/assets/feathericons/flag.svg
+++ b/src/apps/qcam/assets/feathericons/flag.svg
diff --git a/src/qcam/assets/feathericons/folder-minus.svg b/src/apps/qcam/assets/feathericons/folder-minus.svg
index d5b7af65..d5b7af65 100644
--- a/src/qcam/assets/feathericons/folder-minus.svg
+++ b/src/apps/qcam/assets/feathericons/folder-minus.svg
diff --git a/src/qcam/assets/feathericons/folder-plus.svg b/src/apps/qcam/assets/feathericons/folder-plus.svg
index 898f2fc9..898f2fc9 100644
--- a/src/qcam/assets/feathericons/folder-plus.svg
+++ b/src/apps/qcam/assets/feathericons/folder-plus.svg
diff --git a/src/qcam/assets/feathericons/folder.svg b/src/apps/qcam/assets/feathericons/folder.svg
index 134458b9..134458b9 100644
--- a/src/qcam/assets/feathericons/folder.svg
+++ b/src/apps/qcam/assets/feathericons/folder.svg
diff --git a/src/qcam/assets/feathericons/framer.svg b/src/apps/qcam/assets/feathericons/framer.svg
index 3e663478..3e663478 100644
--- a/src/qcam/assets/feathericons/framer.svg
+++ b/src/apps/qcam/assets/feathericons/framer.svg
diff --git a/src/qcam/assets/feathericons/frown.svg b/src/apps/qcam/assets/feathericons/frown.svg
index f3122547..f3122547 100644
--- a/src/qcam/assets/feathericons/frown.svg
+++ b/src/apps/qcam/assets/feathericons/frown.svg
diff --git a/src/qcam/assets/feathericons/gift.svg b/src/apps/qcam/assets/feathericons/gift.svg
index d2c14bd6..d2c14bd6 100644
--- a/src/qcam/assets/feathericons/gift.svg
+++ b/src/apps/qcam/assets/feathericons/gift.svg
diff --git a/src/qcam/assets/feathericons/git-branch.svg b/src/apps/qcam/assets/feathericons/git-branch.svg
index 44003726..44003726 100644
--- a/src/qcam/assets/feathericons/git-branch.svg
+++ b/src/apps/qcam/assets/feathericons/git-branch.svg
diff --git a/src/qcam/assets/feathericons/git-commit.svg b/src/apps/qcam/assets/feathericons/git-commit.svg
index e959d725..e959d725 100644
--- a/src/qcam/assets/feathericons/git-commit.svg
+++ b/src/apps/qcam/assets/feathericons/git-commit.svg
diff --git a/src/qcam/assets/feathericons/git-merge.svg b/src/apps/qcam/assets/feathericons/git-merge.svg
index c65fffdd..c65fffdd 100644
--- a/src/qcam/assets/feathericons/git-merge.svg
+++ b/src/apps/qcam/assets/feathericons/git-merge.svg
diff --git a/src/qcam/assets/feathericons/git-pull-request.svg b/src/apps/qcam/assets/feathericons/git-pull-request.svg
index fc80bdfd..fc80bdfd 100644
--- a/src/qcam/assets/feathericons/git-pull-request.svg
+++ b/src/apps/qcam/assets/feathericons/git-pull-request.svg
diff --git a/src/qcam/assets/feathericons/github.svg b/src/apps/qcam/assets/feathericons/github.svg
index ff0af481..ff0af481 100644
--- a/src/qcam/assets/feathericons/github.svg
+++ b/src/apps/qcam/assets/feathericons/github.svg
diff --git a/src/qcam/assets/feathericons/gitlab.svg b/src/apps/qcam/assets/feathericons/gitlab.svg
index 85d54a1e..85d54a1e 100644
--- a/src/qcam/assets/feathericons/gitlab.svg
+++ b/src/apps/qcam/assets/feathericons/gitlab.svg
diff --git a/src/qcam/assets/feathericons/globe.svg b/src/apps/qcam/assets/feathericons/globe.svg
index 0a0586d3..0a0586d3 100644
--- a/src/qcam/assets/feathericons/globe.svg
+++ b/src/apps/qcam/assets/feathericons/globe.svg
diff --git a/src/qcam/assets/feathericons/grid.svg b/src/apps/qcam/assets/feathericons/grid.svg
index 8ef2e9d8..8ef2e9d8 100644
--- a/src/qcam/assets/feathericons/grid.svg
+++ b/src/apps/qcam/assets/feathericons/grid.svg
diff --git a/src/qcam/assets/feathericons/hard-drive.svg b/src/apps/qcam/assets/feathericons/hard-drive.svg
index 8e90fa1b..8e90fa1b 100644
--- a/src/qcam/assets/feathericons/hard-drive.svg
+++ b/src/apps/qcam/assets/feathericons/hard-drive.svg
diff --git a/src/qcam/assets/feathericons/hash.svg b/src/apps/qcam/assets/feathericons/hash.svg
index c9c8d41f..c9c8d41f 100644
--- a/src/qcam/assets/feathericons/hash.svg
+++ b/src/apps/qcam/assets/feathericons/hash.svg
diff --git a/src/qcam/assets/feathericons/headphones.svg b/src/apps/qcam/assets/feathericons/headphones.svg
index fd8915b4..fd8915b4 100644
--- a/src/qcam/assets/feathericons/headphones.svg
+++ b/src/apps/qcam/assets/feathericons/headphones.svg
diff --git a/src/qcam/assets/feathericons/heart.svg b/src/apps/qcam/assets/feathericons/heart.svg
index a083b7e2..a083b7e2 100644
--- a/src/qcam/assets/feathericons/heart.svg
+++ b/src/apps/qcam/assets/feathericons/heart.svg
diff --git a/src/qcam/assets/feathericons/help-circle.svg b/src/apps/qcam/assets/feathericons/help-circle.svg
index 51fddd80..51fddd80 100644
--- a/src/qcam/assets/feathericons/help-circle.svg
+++ b/src/apps/qcam/assets/feathericons/help-circle.svg
diff --git a/src/qcam/assets/feathericons/hexagon.svg b/src/apps/qcam/assets/feathericons/hexagon.svg
index eae7f255..eae7f255 100644
--- a/src/qcam/assets/feathericons/hexagon.svg
+++ b/src/apps/qcam/assets/feathericons/hexagon.svg
diff --git a/src/qcam/assets/feathericons/home.svg b/src/apps/qcam/assets/feathericons/home.svg
index 7bb31b23..7bb31b23 100644
--- a/src/qcam/assets/feathericons/home.svg
+++ b/src/apps/qcam/assets/feathericons/home.svg
diff --git a/src/qcam/assets/feathericons/image.svg b/src/apps/qcam/assets/feathericons/image.svg
index a7d84b98..a7d84b98 100644
--- a/src/qcam/assets/feathericons/image.svg
+++ b/src/apps/qcam/assets/feathericons/image.svg
diff --git a/src/qcam/assets/feathericons/inbox.svg b/src/apps/qcam/assets/feathericons/inbox.svg
index 03a13b4e..03a13b4e 100644
--- a/src/qcam/assets/feathericons/inbox.svg
+++ b/src/apps/qcam/assets/feathericons/inbox.svg
diff --git a/src/qcam/assets/feathericons/info.svg b/src/apps/qcam/assets/feathericons/info.svg
index a09fa5f1..a09fa5f1 100644
--- a/src/qcam/assets/feathericons/info.svg
+++ b/src/apps/qcam/assets/feathericons/info.svg
diff --git a/src/qcam/assets/feathericons/instagram.svg b/src/apps/qcam/assets/feathericons/instagram.svg
index 9fdb8e35..9fdb8e35 100644
--- a/src/qcam/assets/feathericons/instagram.svg
+++ b/src/apps/qcam/assets/feathericons/instagram.svg
diff --git a/src/qcam/assets/feathericons/italic.svg b/src/apps/qcam/assets/feathericons/italic.svg
index a123d371..a123d371 100644
--- a/src/qcam/assets/feathericons/italic.svg
+++ b/src/apps/qcam/assets/feathericons/italic.svg
diff --git a/src/qcam/assets/feathericons/key.svg b/src/apps/qcam/assets/feathericons/key.svg
index e778e74e..e778e74e 100644
--- a/src/qcam/assets/feathericons/key.svg
+++ b/src/apps/qcam/assets/feathericons/key.svg
diff --git a/src/qcam/assets/feathericons/layers.svg b/src/apps/qcam/assets/feathericons/layers.svg
index ea788c22..ea788c22 100644
--- a/src/qcam/assets/feathericons/layers.svg
+++ b/src/apps/qcam/assets/feathericons/layers.svg
diff --git a/src/qcam/assets/feathericons/layout.svg b/src/apps/qcam/assets/feathericons/layout.svg
index 28743d92..28743d92 100644
--- a/src/qcam/assets/feathericons/layout.svg
+++ b/src/apps/qcam/assets/feathericons/layout.svg
diff --git a/src/qcam/assets/feathericons/life-buoy.svg b/src/apps/qcam/assets/feathericons/life-buoy.svg
index 54c2bd7d..54c2bd7d 100644
--- a/src/qcam/assets/feathericons/life-buoy.svg
+++ b/src/apps/qcam/assets/feathericons/life-buoy.svg
diff --git a/src/qcam/assets/feathericons/link-2.svg b/src/apps/qcam/assets/feathericons/link-2.svg
index 8cc7f6dd..8cc7f6dd 100644
--- a/src/qcam/assets/feathericons/link-2.svg
+++ b/src/apps/qcam/assets/feathericons/link-2.svg
diff --git a/src/qcam/assets/feathericons/link.svg b/src/apps/qcam/assets/feathericons/link.svg
index c89dd41c..c89dd41c 100644
--- a/src/qcam/assets/feathericons/link.svg
+++ b/src/apps/qcam/assets/feathericons/link.svg
diff --git a/src/qcam/assets/feathericons/linkedin.svg b/src/apps/qcam/assets/feathericons/linkedin.svg
index 39531094..39531094 100644
--- a/src/qcam/assets/feathericons/linkedin.svg
+++ b/src/apps/qcam/assets/feathericons/linkedin.svg
diff --git a/src/qcam/assets/feathericons/list.svg b/src/apps/qcam/assets/feathericons/list.svg
index 5ce38eaa..5ce38eaa 100644
--- a/src/qcam/assets/feathericons/list.svg
+++ b/src/apps/qcam/assets/feathericons/list.svg
diff --git a/src/qcam/assets/feathericons/loader.svg b/src/apps/qcam/assets/feathericons/loader.svg
index e1a70c12..e1a70c12 100644
--- a/src/qcam/assets/feathericons/loader.svg
+++ b/src/apps/qcam/assets/feathericons/loader.svg
diff --git a/src/qcam/assets/feathericons/lock.svg b/src/apps/qcam/assets/feathericons/lock.svg
index de09d9db..de09d9db 100644
--- a/src/qcam/assets/feathericons/lock.svg
+++ b/src/apps/qcam/assets/feathericons/lock.svg
diff --git a/src/qcam/assets/feathericons/log-in.svg b/src/apps/qcam/assets/feathericons/log-in.svg
index ba0da59a..ba0da59a 100644
--- a/src/qcam/assets/feathericons/log-in.svg
+++ b/src/apps/qcam/assets/feathericons/log-in.svg
diff --git a/src/qcam/assets/feathericons/log-out.svg b/src/apps/qcam/assets/feathericons/log-out.svg
index c9002c90..c9002c90 100644
--- a/src/qcam/assets/feathericons/log-out.svg
+++ b/src/apps/qcam/assets/feathericons/log-out.svg
diff --git a/src/qcam/assets/feathericons/mail.svg b/src/apps/qcam/assets/feathericons/mail.svg
index 2af169e8..2af169e8 100644
--- a/src/qcam/assets/feathericons/mail.svg
+++ b/src/apps/qcam/assets/feathericons/mail.svg
diff --git a/src/qcam/assets/feathericons/map-pin.svg b/src/apps/qcam/assets/feathericons/map-pin.svg
index d5548e92..d5548e92 100644
--- a/src/qcam/assets/feathericons/map-pin.svg
+++ b/src/apps/qcam/assets/feathericons/map-pin.svg
diff --git a/src/qcam/assets/feathericons/map.svg b/src/apps/qcam/assets/feathericons/map.svg
index ecebd7bf..ecebd7bf 100644
--- a/src/qcam/assets/feathericons/map.svg
+++ b/src/apps/qcam/assets/feathericons/map.svg
diff --git a/src/qcam/assets/feathericons/maximize-2.svg b/src/apps/qcam/assets/feathericons/maximize-2.svg
index e41fc0b7..e41fc0b7 100644
--- a/src/qcam/assets/feathericons/maximize-2.svg
+++ b/src/apps/qcam/assets/feathericons/maximize-2.svg
diff --git a/src/qcam/assets/feathericons/maximize.svg b/src/apps/qcam/assets/feathericons/maximize.svg
index fc305189..fc305189 100644
--- a/src/qcam/assets/feathericons/maximize.svg
+++ b/src/apps/qcam/assets/feathericons/maximize.svg
diff --git a/src/qcam/assets/feathericons/meh.svg b/src/apps/qcam/assets/feathericons/meh.svg
index 6f57fff2..6f57fff2 100644
--- a/src/qcam/assets/feathericons/meh.svg
+++ b/src/apps/qcam/assets/feathericons/meh.svg
diff --git a/src/qcam/assets/feathericons/menu.svg b/src/apps/qcam/assets/feathericons/menu.svg
index e8a84a95..e8a84a95 100644
--- a/src/qcam/assets/feathericons/menu.svg
+++ b/src/apps/qcam/assets/feathericons/menu.svg
diff --git a/src/qcam/assets/feathericons/message-circle.svg b/src/apps/qcam/assets/feathericons/message-circle.svg
index 4b21b32b..4b21b32b 100644
--- a/src/qcam/assets/feathericons/message-circle.svg
+++ b/src/apps/qcam/assets/feathericons/message-circle.svg
diff --git a/src/qcam/assets/feathericons/message-square.svg b/src/apps/qcam/assets/feathericons/message-square.svg
index 6a2e4e59..6a2e4e59 100644
--- a/src/qcam/assets/feathericons/message-square.svg
+++ b/src/apps/qcam/assets/feathericons/message-square.svg
diff --git a/src/qcam/assets/feathericons/mic-off.svg b/src/apps/qcam/assets/feathericons/mic-off.svg
index 0786219c..0786219c 100644
--- a/src/qcam/assets/feathericons/mic-off.svg
+++ b/src/apps/qcam/assets/feathericons/mic-off.svg
diff --git a/src/qcam/assets/feathericons/mic.svg b/src/apps/qcam/assets/feathericons/mic.svg
index dc5f780c..dc5f780c 100644
--- a/src/qcam/assets/feathericons/mic.svg
+++ b/src/apps/qcam/assets/feathericons/mic.svg
diff --git a/src/qcam/assets/feathericons/minimize-2.svg b/src/apps/qcam/assets/feathericons/minimize-2.svg
index a720fa6c..a720fa6c 100644
--- a/src/qcam/assets/feathericons/minimize-2.svg
+++ b/src/apps/qcam/assets/feathericons/minimize-2.svg
diff --git a/src/qcam/assets/feathericons/minimize.svg b/src/apps/qcam/assets/feathericons/minimize.svg
index 46d61196..46d61196 100644
--- a/src/qcam/assets/feathericons/minimize.svg
+++ b/src/apps/qcam/assets/feathericons/minimize.svg
diff --git a/src/qcam/assets/feathericons/minus-circle.svg b/src/apps/qcam/assets/feathericons/minus-circle.svg
index 80c0de1e..80c0de1e 100644
--- a/src/qcam/assets/feathericons/minus-circle.svg
+++ b/src/apps/qcam/assets/feathericons/minus-circle.svg
diff --git a/src/qcam/assets/feathericons/minus-square.svg b/src/apps/qcam/assets/feathericons/minus-square.svg
index 4862832a..4862832a 100644
--- a/src/qcam/assets/feathericons/minus-square.svg
+++ b/src/apps/qcam/assets/feathericons/minus-square.svg
diff --git a/src/qcam/assets/feathericons/minus.svg b/src/apps/qcam/assets/feathericons/minus.svg
index 93cc7340..93cc7340 100644
--- a/src/qcam/assets/feathericons/minus.svg
+++ b/src/apps/qcam/assets/feathericons/minus.svg
diff --git a/src/qcam/assets/feathericons/monitor.svg b/src/apps/qcam/assets/feathericons/monitor.svg
index 6c3556db..6c3556db 100644
--- a/src/qcam/assets/feathericons/monitor.svg
+++ b/src/apps/qcam/assets/feathericons/monitor.svg
diff --git a/src/qcam/assets/feathericons/moon.svg b/src/apps/qcam/assets/feathericons/moon.svg
index dbf7c6cf..dbf7c6cf 100644
--- a/src/qcam/assets/feathericons/moon.svg
+++ b/src/apps/qcam/assets/feathericons/moon.svg
diff --git a/src/qcam/assets/feathericons/more-horizontal.svg b/src/apps/qcam/assets/feathericons/more-horizontal.svg
index dc6a8556..dc6a8556 100644
--- a/src/qcam/assets/feathericons/more-horizontal.svg
+++ b/src/apps/qcam/assets/feathericons/more-horizontal.svg
diff --git a/src/qcam/assets/feathericons/more-vertical.svg b/src/apps/qcam/assets/feathericons/more-vertical.svg
index cba6958f..cba6958f 100644
--- a/src/qcam/assets/feathericons/more-vertical.svg
+++ b/src/apps/qcam/assets/feathericons/more-vertical.svg
diff --git a/src/qcam/assets/feathericons/mouse-pointer.svg b/src/apps/qcam/assets/feathericons/mouse-pointer.svg
index f5af5591..f5af5591 100644
--- a/src/qcam/assets/feathericons/mouse-pointer.svg
+++ b/src/apps/qcam/assets/feathericons/mouse-pointer.svg
diff --git a/src/qcam/assets/feathericons/move.svg b/src/apps/qcam/assets/feathericons/move.svg
index 4e251b56..4e251b56 100644
--- a/src/qcam/assets/feathericons/move.svg
+++ b/src/apps/qcam/assets/feathericons/move.svg
diff --git a/src/qcam/assets/feathericons/music.svg b/src/apps/qcam/assets/feathericons/music.svg
index 7bee2f7e..7bee2f7e 100644
--- a/src/qcam/assets/feathericons/music.svg
+++ b/src/apps/qcam/assets/feathericons/music.svg
diff --git a/src/qcam/assets/feathericons/navigation-2.svg b/src/apps/qcam/assets/feathericons/navigation-2.svg
index ae31db96..ae31db96 100644
--- a/src/qcam/assets/feathericons/navigation-2.svg
+++ b/src/apps/qcam/assets/feathericons/navigation-2.svg
diff --git a/src/qcam/assets/feathericons/navigation.svg b/src/apps/qcam/assets/feathericons/navigation.svg
index f600a414..f600a414 100644
--- a/src/qcam/assets/feathericons/navigation.svg
+++ b/src/apps/qcam/assets/feathericons/navigation.svg
diff --git a/src/qcam/assets/feathericons/octagon.svg b/src/apps/qcam/assets/feathericons/octagon.svg
index 124c5483..124c5483 100644
--- a/src/qcam/assets/feathericons/octagon.svg
+++ b/src/apps/qcam/assets/feathericons/octagon.svg
diff --git a/src/qcam/assets/feathericons/package.svg b/src/apps/qcam/assets/feathericons/package.svg
index f1e09eec..f1e09eec 100644
--- a/src/qcam/assets/feathericons/package.svg
+++ b/src/apps/qcam/assets/feathericons/package.svg
diff --git a/src/qcam/assets/feathericons/paperclip.svg b/src/apps/qcam/assets/feathericons/paperclip.svg
index b1f69b7a..b1f69b7a 100644
--- a/src/qcam/assets/feathericons/paperclip.svg
+++ b/src/apps/qcam/assets/feathericons/paperclip.svg
diff --git a/src/qcam/assets/feathericons/pause-circle.svg b/src/apps/qcam/assets/feathericons/pause-circle.svg
index f6b1a8df..f6b1a8df 100644
--- a/src/qcam/assets/feathericons/pause-circle.svg
+++ b/src/apps/qcam/assets/feathericons/pause-circle.svg
diff --git a/src/qcam/assets/feathericons/pause.svg b/src/apps/qcam/assets/feathericons/pause.svg
index 4e78038d..4e78038d 100644
--- a/src/qcam/assets/feathericons/pause.svg
+++ b/src/apps/qcam/assets/feathericons/pause.svg
diff --git a/src/qcam/assets/feathericons/pen-tool.svg b/src/apps/qcam/assets/feathericons/pen-tool.svg
index 0d26fa1e..0d26fa1e 100644
--- a/src/qcam/assets/feathericons/pen-tool.svg
+++ b/src/apps/qcam/assets/feathericons/pen-tool.svg
diff --git a/src/qcam/assets/feathericons/percent.svg b/src/apps/qcam/assets/feathericons/percent.svg
index 2cb9719d..2cb9719d 100644
--- a/src/qcam/assets/feathericons/percent.svg
+++ b/src/apps/qcam/assets/feathericons/percent.svg
diff --git a/src/qcam/assets/feathericons/phone-call.svg b/src/apps/qcam/assets/feathericons/phone-call.svg
index 8b866602..8b866602 100644
--- a/src/qcam/assets/feathericons/phone-call.svg
+++ b/src/apps/qcam/assets/feathericons/phone-call.svg
diff --git a/src/qcam/assets/feathericons/phone-forwarded.svg b/src/apps/qcam/assets/feathericons/phone-forwarded.svg
index aa21befc..aa21befc 100644
--- a/src/qcam/assets/feathericons/phone-forwarded.svg
+++ b/src/apps/qcam/assets/feathericons/phone-forwarded.svg
diff --git a/src/qcam/assets/feathericons/phone-incoming.svg b/src/apps/qcam/assets/feathericons/phone-incoming.svg
index b2d523a8..b2d523a8 100644
--- a/src/qcam/assets/feathericons/phone-incoming.svg
+++ b/src/apps/qcam/assets/feathericons/phone-incoming.svg
diff --git a/src/qcam/assets/feathericons/phone-missed.svg b/src/apps/qcam/assets/feathericons/phone-missed.svg
index 4950f09f..4950f09f 100644
--- a/src/qcam/assets/feathericons/phone-missed.svg
+++ b/src/apps/qcam/assets/feathericons/phone-missed.svg
diff --git a/src/qcam/assets/feathericons/phone-off.svg b/src/apps/qcam/assets/feathericons/phone-off.svg
index 4d00fb3d..4d00fb3d 100644
--- a/src/qcam/assets/feathericons/phone-off.svg
+++ b/src/apps/qcam/assets/feathericons/phone-off.svg
diff --git a/src/qcam/assets/feathericons/phone-outgoing.svg b/src/apps/qcam/assets/feathericons/phone-outgoing.svg
index fea27a37..fea27a37 100644
--- a/src/qcam/assets/feathericons/phone-outgoing.svg
+++ b/src/apps/qcam/assets/feathericons/phone-outgoing.svg
diff --git a/src/qcam/assets/feathericons/phone.svg b/src/apps/qcam/assets/feathericons/phone.svg
index 2a35154a..2a35154a 100644
--- a/src/qcam/assets/feathericons/phone.svg
+++ b/src/apps/qcam/assets/feathericons/phone.svg
diff --git a/src/qcam/assets/feathericons/pie-chart.svg b/src/apps/qcam/assets/feathericons/pie-chart.svg
index b5bbe67c..b5bbe67c 100644
--- a/src/qcam/assets/feathericons/pie-chart.svg
+++ b/src/apps/qcam/assets/feathericons/pie-chart.svg
diff --git a/src/qcam/assets/feathericons/play-circle.svg b/src/apps/qcam/assets/feathericons/play-circle.svg
index 8766dc7b..8766dc7b 100644
--- a/src/qcam/assets/feathericons/play-circle.svg
+++ b/src/apps/qcam/assets/feathericons/play-circle.svg
diff --git a/src/qcam/assets/feathericons/play.svg b/src/apps/qcam/assets/feathericons/play.svg
index fd76e30d..fd76e30d 100644
--- a/src/qcam/assets/feathericons/play.svg
+++ b/src/apps/qcam/assets/feathericons/play.svg
diff --git a/src/qcam/assets/feathericons/plus-circle.svg b/src/apps/qcam/assets/feathericons/plus-circle.svg
index 4291ff05..4291ff05 100644
--- a/src/qcam/assets/feathericons/plus-circle.svg
+++ b/src/apps/qcam/assets/feathericons/plus-circle.svg
diff --git a/src/qcam/assets/feathericons/plus-square.svg b/src/apps/qcam/assets/feathericons/plus-square.svg
index c380e24b..c380e24b 100644
--- a/src/qcam/assets/feathericons/plus-square.svg
+++ b/src/apps/qcam/assets/feathericons/plus-square.svg
diff --git a/src/qcam/assets/feathericons/plus.svg b/src/apps/qcam/assets/feathericons/plus.svg
index 703c5b7b..703c5b7b 100644
--- a/src/qcam/assets/feathericons/plus.svg
+++ b/src/apps/qcam/assets/feathericons/plus.svg
diff --git a/src/qcam/assets/feathericons/pocket.svg b/src/apps/qcam/assets/feathericons/pocket.svg
index a3b25619..a3b25619 100644
--- a/src/qcam/assets/feathericons/pocket.svg
+++ b/src/apps/qcam/assets/feathericons/pocket.svg
diff --git a/src/qcam/assets/feathericons/power.svg b/src/apps/qcam/assets/feathericons/power.svg
index 598308fc..598308fc 100644
--- a/src/qcam/assets/feathericons/power.svg
+++ b/src/apps/qcam/assets/feathericons/power.svg
diff --git a/src/qcam/assets/feathericons/printer.svg b/src/apps/qcam/assets/feathericons/printer.svg
index 8a9a7ace..8a9a7ace 100644
--- a/src/qcam/assets/feathericons/printer.svg
+++ b/src/apps/qcam/assets/feathericons/printer.svg
diff --git a/src/qcam/assets/feathericons/radio.svg b/src/apps/qcam/assets/feathericons/radio.svg
index 5abfcd13..5abfcd13 100644
--- a/src/qcam/assets/feathericons/radio.svg
+++ b/src/apps/qcam/assets/feathericons/radio.svg
diff --git a/src/qcam/assets/feathericons/refresh-ccw.svg b/src/apps/qcam/assets/feathericons/refresh-ccw.svg
index 10cff0ec..10cff0ec 100644
--- a/src/qcam/assets/feathericons/refresh-ccw.svg
+++ b/src/apps/qcam/assets/feathericons/refresh-ccw.svg
diff --git a/src/qcam/assets/feathericons/refresh-cw.svg b/src/apps/qcam/assets/feathericons/refresh-cw.svg
index 06c358dd..06c358dd 100644
--- a/src/qcam/assets/feathericons/refresh-cw.svg
+++ b/src/apps/qcam/assets/feathericons/refresh-cw.svg
diff --git a/src/qcam/assets/feathericons/repeat.svg b/src/apps/qcam/assets/feathericons/repeat.svg
index c7657b08..c7657b08 100644
--- a/src/qcam/assets/feathericons/repeat.svg
+++ b/src/apps/qcam/assets/feathericons/repeat.svg
diff --git a/src/qcam/assets/feathericons/rewind.svg b/src/apps/qcam/assets/feathericons/rewind.svg
index 7b0fa3d5..7b0fa3d5 100644
--- a/src/qcam/assets/feathericons/rewind.svg
+++ b/src/apps/qcam/assets/feathericons/rewind.svg
diff --git a/src/qcam/assets/feathericons/rotate-ccw.svg b/src/apps/qcam/assets/feathericons/rotate-ccw.svg
index ade5dc42..ade5dc42 100644
--- a/src/qcam/assets/feathericons/rotate-ccw.svg
+++ b/src/apps/qcam/assets/feathericons/rotate-ccw.svg
diff --git a/src/qcam/assets/feathericons/rotate-cw.svg b/src/apps/qcam/assets/feathericons/rotate-cw.svg
index 83dca351..83dca351 100644
--- a/src/qcam/assets/feathericons/rotate-cw.svg
+++ b/src/apps/qcam/assets/feathericons/rotate-cw.svg
diff --git a/src/qcam/assets/feathericons/rss.svg b/src/apps/qcam/assets/feathericons/rss.svg
index c9a13684..c9a13684 100644
--- a/src/qcam/assets/feathericons/rss.svg
+++ b/src/apps/qcam/assets/feathericons/rss.svg
diff --git a/src/qcam/assets/feathericons/save.svg b/src/apps/qcam/assets/feathericons/save.svg
index 46c72990..46c72990 100644
--- a/src/qcam/assets/feathericons/save.svg
+++ b/src/apps/qcam/assets/feathericons/save.svg
diff --git a/src/qcam/assets/feathericons/scissors.svg b/src/apps/qcam/assets/feathericons/scissors.svg
index fd0647ff..fd0647ff 100644
--- a/src/qcam/assets/feathericons/scissors.svg
+++ b/src/apps/qcam/assets/feathericons/scissors.svg
diff --git a/src/qcam/assets/feathericons/search.svg b/src/apps/qcam/assets/feathericons/search.svg
index 8710306d..8710306d 100644
--- a/src/qcam/assets/feathericons/search.svg
+++ b/src/apps/qcam/assets/feathericons/search.svg
diff --git a/src/qcam/assets/feathericons/send.svg b/src/apps/qcam/assets/feathericons/send.svg
index 42ef2a24..42ef2a24 100644
--- a/src/qcam/assets/feathericons/send.svg
+++ b/src/apps/qcam/assets/feathericons/send.svg
diff --git a/src/qcam/assets/feathericons/server.svg b/src/apps/qcam/assets/feathericons/server.svg
index 54ce094a..54ce094a 100644
--- a/src/qcam/assets/feathericons/server.svg
+++ b/src/apps/qcam/assets/feathericons/server.svg
diff --git a/src/qcam/assets/feathericons/settings.svg b/src/apps/qcam/assets/feathericons/settings.svg
index 19c27265..19c27265 100644
--- a/src/qcam/assets/feathericons/settings.svg
+++ b/src/apps/qcam/assets/feathericons/settings.svg
diff --git a/src/qcam/assets/feathericons/share-2.svg b/src/apps/qcam/assets/feathericons/share-2.svg
index 09b1c7bc..09b1c7bc 100644
--- a/src/qcam/assets/feathericons/share-2.svg
+++ b/src/apps/qcam/assets/feathericons/share-2.svg
diff --git a/src/qcam/assets/feathericons/share.svg b/src/apps/qcam/assets/feathericons/share.svg
index df38c14d..df38c14d 100644
--- a/src/qcam/assets/feathericons/share.svg
+++ b/src/apps/qcam/assets/feathericons/share.svg
diff --git a/src/qcam/assets/feathericons/shield-off.svg b/src/apps/qcam/assets/feathericons/shield-off.svg
index 18692ddd..18692ddd 100644
--- a/src/qcam/assets/feathericons/shield-off.svg
+++ b/src/apps/qcam/assets/feathericons/shield-off.svg
diff --git a/src/qcam/assets/feathericons/shield.svg b/src/apps/qcam/assets/feathericons/shield.svg
index c7c48413..c7c48413 100644
--- a/src/qcam/assets/feathericons/shield.svg
+++ b/src/apps/qcam/assets/feathericons/shield.svg
diff --git a/src/qcam/assets/feathericons/shopping-bag.svg b/src/apps/qcam/assets/feathericons/shopping-bag.svg
index eaa39e81..eaa39e81 100644
--- a/src/qcam/assets/feathericons/shopping-bag.svg
+++ b/src/apps/qcam/assets/feathericons/shopping-bag.svg
diff --git a/src/qcam/assets/feathericons/shopping-cart.svg b/src/apps/qcam/assets/feathericons/shopping-cart.svg
index 17a40bf4..17a40bf4 100644
--- a/src/qcam/assets/feathericons/shopping-cart.svg
+++ b/src/apps/qcam/assets/feathericons/shopping-cart.svg
diff --git a/src/qcam/assets/feathericons/shuffle.svg b/src/apps/qcam/assets/feathericons/shuffle.svg
index 8cfb5db5..8cfb5db5 100644
--- a/src/qcam/assets/feathericons/shuffle.svg
+++ b/src/apps/qcam/assets/feathericons/shuffle.svg
diff --git a/src/qcam/assets/feathericons/sidebar.svg b/src/apps/qcam/assets/feathericons/sidebar.svg
index 8ba817e6..8ba817e6 100644
--- a/src/qcam/assets/feathericons/sidebar.svg
+++ b/src/apps/qcam/assets/feathericons/sidebar.svg
diff --git a/src/qcam/assets/feathericons/skip-back.svg b/src/apps/qcam/assets/feathericons/skip-back.svg
index 88d024e2..88d024e2 100644
--- a/src/qcam/assets/feathericons/skip-back.svg
+++ b/src/apps/qcam/assets/feathericons/skip-back.svg
diff --git a/src/qcam/assets/feathericons/skip-forward.svg b/src/apps/qcam/assets/feathericons/skip-forward.svg
index f3fdac3a..f3fdac3a 100644
--- a/src/qcam/assets/feathericons/skip-forward.svg
+++ b/src/apps/qcam/assets/feathericons/skip-forward.svg
diff --git a/src/qcam/assets/feathericons/slack.svg b/src/apps/qcam/assets/feathericons/slack.svg
index 5d973466..5d973466 100644
--- a/src/qcam/assets/feathericons/slack.svg
+++ b/src/apps/qcam/assets/feathericons/slack.svg
diff --git a/src/qcam/assets/feathericons/slash.svg b/src/apps/qcam/assets/feathericons/slash.svg
index f4131b85..f4131b85 100644
--- a/src/qcam/assets/feathericons/slash.svg
+++ b/src/apps/qcam/assets/feathericons/slash.svg
diff --git a/src/qcam/assets/feathericons/sliders.svg b/src/apps/qcam/assets/feathericons/sliders.svg
index 19c93852..19c93852 100644
--- a/src/qcam/assets/feathericons/sliders.svg
+++ b/src/apps/qcam/assets/feathericons/sliders.svg
diff --git a/src/qcam/assets/feathericons/smartphone.svg b/src/apps/qcam/assets/feathericons/smartphone.svg
index 0171a95a..0171a95a 100644
--- a/src/qcam/assets/feathericons/smartphone.svg
+++ b/src/apps/qcam/assets/feathericons/smartphone.svg
diff --git a/src/qcam/assets/feathericons/smile.svg b/src/apps/qcam/assets/feathericons/smile.svg
index 24dc8a26..24dc8a26 100644
--- a/src/qcam/assets/feathericons/smile.svg
+++ b/src/apps/qcam/assets/feathericons/smile.svg
diff --git a/src/qcam/assets/feathericons/speaker.svg b/src/apps/qcam/assets/feathericons/speaker.svg
index 75d5ff9c..75d5ff9c 100644
--- a/src/qcam/assets/feathericons/speaker.svg
+++ b/src/apps/qcam/assets/feathericons/speaker.svg
diff --git a/src/qcam/assets/feathericons/square.svg b/src/apps/qcam/assets/feathericons/square.svg
index 6eabc77d..6eabc77d 100644
--- a/src/qcam/assets/feathericons/square.svg
+++ b/src/apps/qcam/assets/feathericons/square.svg
diff --git a/src/qcam/assets/feathericons/star.svg b/src/apps/qcam/assets/feathericons/star.svg
index bcdc31aa..bcdc31aa 100644
--- a/src/qcam/assets/feathericons/star.svg
+++ b/src/apps/qcam/assets/feathericons/star.svg
diff --git a/src/qcam/assets/feathericons/stop-circle.svg b/src/apps/qcam/assets/feathericons/stop-circle.svg
index c10d9d47..c10d9d47 100644
--- a/src/qcam/assets/feathericons/stop-circle.svg
+++ b/src/apps/qcam/assets/feathericons/stop-circle.svg
diff --git a/src/qcam/assets/feathericons/sun.svg b/src/apps/qcam/assets/feathericons/sun.svg
index 7f51b94d..7f51b94d 100644
--- a/src/qcam/assets/feathericons/sun.svg
+++ b/src/apps/qcam/assets/feathericons/sun.svg
diff --git a/src/qcam/assets/feathericons/sunrise.svg b/src/apps/qcam/assets/feathericons/sunrise.svg
index eff4b1e4..eff4b1e4 100644
--- a/src/qcam/assets/feathericons/sunrise.svg
+++ b/src/apps/qcam/assets/feathericons/sunrise.svg
diff --git a/src/qcam/assets/feathericons/sunset.svg b/src/apps/qcam/assets/feathericons/sunset.svg
index a5a22215..a5a22215 100644
--- a/src/qcam/assets/feathericons/sunset.svg
+++ b/src/apps/qcam/assets/feathericons/sunset.svg
diff --git a/src/qcam/assets/feathericons/tablet.svg b/src/apps/qcam/assets/feathericons/tablet.svg
index 9c80b40a..9c80b40a 100644
--- a/src/qcam/assets/feathericons/tablet.svg
+++ b/src/apps/qcam/assets/feathericons/tablet.svg
diff --git a/src/qcam/assets/feathericons/tag.svg b/src/apps/qcam/assets/feathericons/tag.svg
index 7219b15f..7219b15f 100644
--- a/src/qcam/assets/feathericons/tag.svg
+++ b/src/apps/qcam/assets/feathericons/tag.svg
diff --git a/src/qcam/assets/feathericons/target.svg b/src/apps/qcam/assets/feathericons/target.svg
index be84b17c..be84b17c 100644
--- a/src/qcam/assets/feathericons/target.svg
+++ b/src/apps/qcam/assets/feathericons/target.svg
diff --git a/src/qcam/assets/feathericons/terminal.svg b/src/apps/qcam/assets/feathericons/terminal.svg
index af459c04..af459c04 100644
--- a/src/qcam/assets/feathericons/terminal.svg
+++ b/src/apps/qcam/assets/feathericons/terminal.svg
diff --git a/src/qcam/assets/feathericons/thermometer.svg b/src/apps/qcam/assets/feathericons/thermometer.svg
index 33142ccc..33142ccc 100644
--- a/src/qcam/assets/feathericons/thermometer.svg
+++ b/src/apps/qcam/assets/feathericons/thermometer.svg
diff --git a/src/qcam/assets/feathericons/thumbs-down.svg b/src/apps/qcam/assets/feathericons/thumbs-down.svg
index 3e7bcd6d..3e7bcd6d 100644
--- a/src/qcam/assets/feathericons/thumbs-down.svg
+++ b/src/apps/qcam/assets/feathericons/thumbs-down.svg
diff --git a/src/qcam/assets/feathericons/thumbs-up.svg b/src/apps/qcam/assets/feathericons/thumbs-up.svg
index 226c44d8..226c44d8 100644
--- a/src/qcam/assets/feathericons/thumbs-up.svg
+++ b/src/apps/qcam/assets/feathericons/thumbs-up.svg
diff --git a/src/qcam/assets/feathericons/toggle-left.svg b/src/apps/qcam/assets/feathericons/toggle-left.svg
index 240be290..240be290 100644
--- a/src/qcam/assets/feathericons/toggle-left.svg
+++ b/src/apps/qcam/assets/feathericons/toggle-left.svg
diff --git a/src/qcam/assets/feathericons/toggle-right.svg b/src/apps/qcam/assets/feathericons/toggle-right.svg
index fc6e81c1..fc6e81c1 100644
--- a/src/qcam/assets/feathericons/toggle-right.svg
+++ b/src/apps/qcam/assets/feathericons/toggle-right.svg
diff --git a/src/qcam/assets/feathericons/tool.svg b/src/apps/qcam/assets/feathericons/tool.svg
index f3cbf3d9..f3cbf3d9 100644
--- a/src/qcam/assets/feathericons/tool.svg
+++ b/src/apps/qcam/assets/feathericons/tool.svg
diff --git a/src/qcam/assets/feathericons/trash-2.svg b/src/apps/qcam/assets/feathericons/trash-2.svg
index f24d55bf..f24d55bf 100644
--- a/src/qcam/assets/feathericons/trash-2.svg
+++ b/src/apps/qcam/assets/feathericons/trash-2.svg
diff --git a/src/qcam/assets/feathericons/trash.svg b/src/apps/qcam/assets/feathericons/trash.svg
index 55650bd4..55650bd4 100644
--- a/src/qcam/assets/feathericons/trash.svg
+++ b/src/apps/qcam/assets/feathericons/trash.svg
diff --git a/src/qcam/assets/feathericons/trello.svg b/src/apps/qcam/assets/feathericons/trello.svg
index b2f599b6..b2f599b6 100644
--- a/src/qcam/assets/feathericons/trello.svg
+++ b/src/apps/qcam/assets/feathericons/trello.svg
diff --git a/src/qcam/assets/feathericons/trending-down.svg b/src/apps/qcam/assets/feathericons/trending-down.svg
index a9d4cfa5..a9d4cfa5 100644
--- a/src/qcam/assets/feathericons/trending-down.svg
+++ b/src/apps/qcam/assets/feathericons/trending-down.svg
diff --git a/src/qcam/assets/feathericons/trending-up.svg b/src/apps/qcam/assets/feathericons/trending-up.svg
index 52026a4d..52026a4d 100644
--- a/src/qcam/assets/feathericons/trending-up.svg
+++ b/src/apps/qcam/assets/feathericons/trending-up.svg
diff --git a/src/qcam/assets/feathericons/triangle.svg b/src/apps/qcam/assets/feathericons/triangle.svg
index 274b6528..274b6528 100644
--- a/src/qcam/assets/feathericons/triangle.svg
+++ b/src/apps/qcam/assets/feathericons/triangle.svg
diff --git a/src/qcam/assets/feathericons/truck.svg b/src/apps/qcam/assets/feathericons/truck.svg
index 33898373..33898373 100644
--- a/src/qcam/assets/feathericons/truck.svg
+++ b/src/apps/qcam/assets/feathericons/truck.svg
diff --git a/src/qcam/assets/feathericons/tv.svg b/src/apps/qcam/assets/feathericons/tv.svg
index 955bbfff..955bbfff 100644
--- a/src/qcam/assets/feathericons/tv.svg
+++ b/src/apps/qcam/assets/feathericons/tv.svg
diff --git a/src/qcam/assets/feathericons/twitch.svg b/src/apps/qcam/assets/feathericons/twitch.svg
index 17062495..17062495 100644
--- a/src/qcam/assets/feathericons/twitch.svg
+++ b/src/apps/qcam/assets/feathericons/twitch.svg
diff --git a/src/qcam/assets/feathericons/twitter.svg b/src/apps/qcam/assets/feathericons/twitter.svg
index f8886eca..f8886eca 100644
--- a/src/qcam/assets/feathericons/twitter.svg
+++ b/src/apps/qcam/assets/feathericons/twitter.svg
diff --git a/src/qcam/assets/feathericons/type.svg b/src/apps/qcam/assets/feathericons/type.svg
index c6b2de33..c6b2de33 100644
--- a/src/qcam/assets/feathericons/type.svg
+++ b/src/apps/qcam/assets/feathericons/type.svg
diff --git a/src/qcam/assets/feathericons/umbrella.svg b/src/apps/qcam/assets/feathericons/umbrella.svg
index dc77c0cb..dc77c0cb 100644
--- a/src/qcam/assets/feathericons/umbrella.svg
+++ b/src/apps/qcam/assets/feathericons/umbrella.svg
diff --git a/src/qcam/assets/feathericons/underline.svg b/src/apps/qcam/assets/feathericons/underline.svg
index 044945d4..044945d4 100644
--- a/src/qcam/assets/feathericons/underline.svg
+++ b/src/apps/qcam/assets/feathericons/underline.svg
diff --git a/src/qcam/assets/feathericons/unlock.svg b/src/apps/qcam/assets/feathericons/unlock.svg
index 01dc3597..01dc3597 100644
--- a/src/qcam/assets/feathericons/unlock.svg
+++ b/src/apps/qcam/assets/feathericons/unlock.svg
diff --git a/src/qcam/assets/feathericons/upload-cloud.svg b/src/apps/qcam/assets/feathericons/upload-cloud.svg
index a1db297c..a1db297c 100644
--- a/src/qcam/assets/feathericons/upload-cloud.svg
+++ b/src/apps/qcam/assets/feathericons/upload-cloud.svg
diff --git a/src/qcam/assets/feathericons/upload.svg b/src/apps/qcam/assets/feathericons/upload.svg
index 91eaff75..91eaff75 100644
--- a/src/qcam/assets/feathericons/upload.svg
+++ b/src/apps/qcam/assets/feathericons/upload.svg
diff --git a/src/qcam/assets/feathericons/user-check.svg b/src/apps/qcam/assets/feathericons/user-check.svg
index 42f91b29..42f91b29 100644
--- a/src/qcam/assets/feathericons/user-check.svg
+++ b/src/apps/qcam/assets/feathericons/user-check.svg
diff --git a/src/qcam/assets/feathericons/user-minus.svg b/src/apps/qcam/assets/feathericons/user-minus.svg
index 44b75f5a..44b75f5a 100644
--- a/src/qcam/assets/feathericons/user-minus.svg
+++ b/src/apps/qcam/assets/feathericons/user-minus.svg
diff --git a/src/qcam/assets/feathericons/user-plus.svg b/src/apps/qcam/assets/feathericons/user-plus.svg
index 21460f6e..21460f6e 100644
--- a/src/qcam/assets/feathericons/user-plus.svg
+++ b/src/apps/qcam/assets/feathericons/user-plus.svg
diff --git a/src/qcam/assets/feathericons/user-x.svg b/src/apps/qcam/assets/feathericons/user-x.svg
index 0c41a481..0c41a481 100644
--- a/src/qcam/assets/feathericons/user-x.svg
+++ b/src/apps/qcam/assets/feathericons/user-x.svg
diff --git a/src/qcam/assets/feathericons/user.svg b/src/apps/qcam/assets/feathericons/user.svg
index 7bb5f291..7bb5f291 100644
--- a/src/qcam/assets/feathericons/user.svg
+++ b/src/apps/qcam/assets/feathericons/user.svg
diff --git a/src/qcam/assets/feathericons/users.svg b/src/apps/qcam/assets/feathericons/users.svg
index aacf6b08..aacf6b08 100644
--- a/src/qcam/assets/feathericons/users.svg
+++ b/src/apps/qcam/assets/feathericons/users.svg
diff --git a/src/qcam/assets/feathericons/video-off.svg b/src/apps/qcam/assets/feathericons/video-off.svg
index 08ec6973..08ec6973 100644
--- a/src/qcam/assets/feathericons/video-off.svg
+++ b/src/apps/qcam/assets/feathericons/video-off.svg
diff --git a/src/qcam/assets/feathericons/video.svg b/src/apps/qcam/assets/feathericons/video.svg
index 8ff156aa..8ff156aa 100644
--- a/src/qcam/assets/feathericons/video.svg
+++ b/src/apps/qcam/assets/feathericons/video.svg
diff --git a/src/qcam/assets/feathericons/voicemail.svg b/src/apps/qcam/assets/feathericons/voicemail.svg
index 5d78a8e7..5d78a8e7 100644
--- a/src/qcam/assets/feathericons/voicemail.svg
+++ b/src/apps/qcam/assets/feathericons/voicemail.svg
diff --git a/src/qcam/assets/feathericons/volume-1.svg b/src/apps/qcam/assets/feathericons/volume-1.svg
index 150e875f..150e875f 100644
--- a/src/qcam/assets/feathericons/volume-1.svg
+++ b/src/apps/qcam/assets/feathericons/volume-1.svg
diff --git a/src/qcam/assets/feathericons/volume-2.svg b/src/apps/qcam/assets/feathericons/volume-2.svg
index 03d521c7..03d521c7 100644
--- a/src/qcam/assets/feathericons/volume-2.svg
+++ b/src/apps/qcam/assets/feathericons/volume-2.svg
diff --git a/src/qcam/assets/feathericons/volume-x.svg b/src/apps/qcam/assets/feathericons/volume-x.svg
index be442406..be442406 100644
--- a/src/qcam/assets/feathericons/volume-x.svg
+++ b/src/apps/qcam/assets/feathericons/volume-x.svg
diff --git a/src/qcam/assets/feathericons/volume.svg b/src/apps/qcam/assets/feathericons/volume.svg
index 53bfe15e..53bfe15e 100644
--- a/src/qcam/assets/feathericons/volume.svg
+++ b/src/apps/qcam/assets/feathericons/volume.svg
diff --git a/src/qcam/assets/feathericons/watch.svg b/src/apps/qcam/assets/feathericons/watch.svg
index a1099da3..a1099da3 100644
--- a/src/qcam/assets/feathericons/watch.svg
+++ b/src/apps/qcam/assets/feathericons/watch.svg
diff --git a/src/qcam/assets/feathericons/wifi-off.svg b/src/apps/qcam/assets/feathericons/wifi-off.svg
index 35eae43b..35eae43b 100644
--- a/src/qcam/assets/feathericons/wifi-off.svg
+++ b/src/apps/qcam/assets/feathericons/wifi-off.svg
diff --git a/src/qcam/assets/feathericons/wifi.svg b/src/apps/qcam/assets/feathericons/wifi.svg
index 748c285e..748c285e 100644
--- a/src/qcam/assets/feathericons/wifi.svg
+++ b/src/apps/qcam/assets/feathericons/wifi.svg
diff --git a/src/qcam/assets/feathericons/wind.svg b/src/apps/qcam/assets/feathericons/wind.svg
index 82b36468..82b36468 100644
--- a/src/qcam/assets/feathericons/wind.svg
+++ b/src/apps/qcam/assets/feathericons/wind.svg
diff --git a/src/qcam/assets/feathericons/x-circle.svg b/src/apps/qcam/assets/feathericons/x-circle.svg
index 94aad5e5..94aad5e5 100644
--- a/src/qcam/assets/feathericons/x-circle.svg
+++ b/src/apps/qcam/assets/feathericons/x-circle.svg
diff --git a/src/qcam/assets/feathericons/x-octagon.svg b/src/apps/qcam/assets/feathericons/x-octagon.svg
index 85431985..85431985 100644
--- a/src/qcam/assets/feathericons/x-octagon.svg
+++ b/src/apps/qcam/assets/feathericons/x-octagon.svg
diff --git a/src/qcam/assets/feathericons/x-square.svg b/src/apps/qcam/assets/feathericons/x-square.svg
index 7677c387..7677c387 100644
--- a/src/qcam/assets/feathericons/x-square.svg
+++ b/src/apps/qcam/assets/feathericons/x-square.svg
diff --git a/src/qcam/assets/feathericons/x.svg b/src/apps/qcam/assets/feathericons/x.svg
index 7d5875ca..7d5875ca 100644
--- a/src/qcam/assets/feathericons/x.svg
+++ b/src/apps/qcam/assets/feathericons/x.svg
diff --git a/src/qcam/assets/feathericons/youtube.svg b/src/apps/qcam/assets/feathericons/youtube.svg
index c4824385..c4824385 100644
--- a/src/qcam/assets/feathericons/youtube.svg
+++ b/src/apps/qcam/assets/feathericons/youtube.svg
diff --git a/src/qcam/assets/feathericons/zap-off.svg b/src/apps/qcam/assets/feathericons/zap-off.svg
index c636f8bb..c636f8bb 100644
--- a/src/qcam/assets/feathericons/zap-off.svg
+++ b/src/apps/qcam/assets/feathericons/zap-off.svg
diff --git a/src/qcam/assets/feathericons/zap.svg b/src/apps/qcam/assets/feathericons/zap.svg
index 8fdafa93..8fdafa93 100644
--- a/src/qcam/assets/feathericons/zap.svg
+++ b/src/apps/qcam/assets/feathericons/zap.svg
diff --git a/src/qcam/assets/feathericons/zoom-in.svg b/src/apps/qcam/assets/feathericons/zoom-in.svg
index da4572d2..da4572d2 100644
--- a/src/qcam/assets/feathericons/zoom-in.svg
+++ b/src/apps/qcam/assets/feathericons/zoom-in.svg
diff --git a/src/qcam/assets/feathericons/zoom-out.svg b/src/apps/qcam/assets/feathericons/zoom-out.svg
index fd678d72..fd678d72 100644
--- a/src/qcam/assets/feathericons/zoom-out.svg
+++ b/src/apps/qcam/assets/feathericons/zoom-out.svg
diff --git a/src/apps/qcam/assets/shader/RGB.frag b/src/apps/qcam/assets/shader/RGB.frag
new file mode 100644
index 00000000..4c374ac9
--- /dev/null
+++ b/src/apps/qcam/assets/shader/RGB.frag
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Laurent Pinchart
+ *
+ * RGB.frag - Fragment shader code for RGB formats
+ */
+
+#ifdef GL_ES
+precision mediump float;
+#endif
+
+varying vec2 textureOut;
+uniform sampler2D tex_y;
+
+void main(void)
+{
+ vec3 rgb;
+
+ rgb = texture2D(tex_y, textureOut).RGB_PATTERN;
+
+ gl_FragColor = vec4(rgb, 1.0);
+}
diff --git a/src/apps/qcam/assets/shader/YUV_2_planes.frag b/src/apps/qcam/assets/shader/YUV_2_planes.frag
new file mode 100644
index 00000000..1d5d1206
--- /dev/null
+++ b/src/apps/qcam/assets/shader/YUV_2_planes.frag
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Linaro
+ *
+ * YUV_2_planes.frag - Fragment shader code for NV12, NV16 and NV24 formats
+ */
+
+#ifdef GL_ES
+precision mediump float;
+#endif
+
+varying vec2 textureOut;
+uniform sampler2D tex_y;
+uniform sampler2D tex_u;
+
+const mat3 yuv2rgb_matrix = mat3(
+ YUV2RGB_MATRIX
+);
+
+const vec3 yuv2rgb_offset = vec3(
+ YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
+);
+
+void main(void)
+{
+ vec3 yuv;
+
+ yuv.x = texture2D(tex_y, textureOut).r;
+#if defined(YUV_PATTERN_UV)
+ yuv.y = texture2D(tex_u, textureOut).r;
+ yuv.z = texture2D(tex_u, textureOut).a;
+#elif defined(YUV_PATTERN_VU)
+ yuv.y = texture2D(tex_u, textureOut).a;
+ yuv.z = texture2D(tex_u, textureOut).r;
+#else
+#error Invalid pattern
+#endif
+
+ vec3 rgb = yuv2rgb_matrix * (yuv - yuv2rgb_offset);
+
+ gl_FragColor = vec4(rgb, 1.0);
+}
diff --git a/src/apps/qcam/assets/shader/YUV_3_planes.frag b/src/apps/qcam/assets/shader/YUV_3_planes.frag
new file mode 100644
index 00000000..8f788e90
--- /dev/null
+++ b/src/apps/qcam/assets/shader/YUV_3_planes.frag
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Linaro
+ *
+ * YUV_3_planes_UV.frag - Fragment shader code for YUV420 format
+ */
+
+#ifdef GL_ES
+precision mediump float;
+#endif
+
+varying vec2 textureOut;
+uniform sampler2D tex_y;
+uniform sampler2D tex_u;
+uniform sampler2D tex_v;
+
+const mat3 yuv2rgb_matrix = mat3(
+ YUV2RGB_MATRIX
+);
+
+const vec3 yuv2rgb_offset = vec3(
+ YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
+);
+
+void main(void)
+{
+ vec3 yuv;
+
+ yuv.x = texture2D(tex_y, textureOut).r;
+ yuv.y = texture2D(tex_u, textureOut).r;
+ yuv.z = texture2D(tex_v, textureOut).r;
+
+ vec3 rgb = yuv2rgb_matrix * (yuv - yuv2rgb_offset);
+
+ gl_FragColor = vec4(rgb, 1.0);
+}
diff --git a/src/apps/qcam/assets/shader/YUV_packed.frag b/src/apps/qcam/assets/shader/YUV_packed.frag
new file mode 100644
index 00000000..b9ef9d41
--- /dev/null
+++ b/src/apps/qcam/assets/shader/YUV_packed.frag
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * YUV_packed.frag - Fragment shader code for YUYV packed formats
+ */
+
+#ifdef GL_ES
+precision mediump float;
+#endif
+
+varying vec2 textureOut;
+
+uniform sampler2D tex_y;
+uniform vec2 tex_step;
+
+const mat3 yuv2rgb_matrix = mat3(
+ YUV2RGB_MATRIX
+);
+
+const vec3 yuv2rgb_offset = vec3(
+ YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
+);
+
+void main(void)
+{
+ /*
+ * The sampler won't interpolate the texture correctly along the X axis,
+ * as each RGBA pixel effectively stores two pixels. We thus need to
+ * interpolate manually.
+ *
+ * In integer texture coordinates, the Y values are layed out in the
+ * texture memory as follows:
+ *
+ * ...| Y U Y V | Y U Y V | Y U Y V |...
+ * ...| R G B A | R G B A | R G B A |...
+ * ^ ^ ^ ^ ^ ^
+ * | | | | | |
+ * n-1 n-0.5 n n+0.5 n+1 n+1.5
+ *
+ * For a texture location x in the interval [n, n+1[, sample the left
+ * and right pixels at n and n+1, and interpolate them with
+ *
+ * left.r * (1 - a) + left.b * a if fract(x) < 0.5
+ * left.b * (1 - a) + right.r * a if fract(x) >= 0.5
+ *
+ * with a = fract(x * 2) which can also be written
+ *
+ * a = fract(x) * 2 if fract(x) < 0.5
+ * a = fract(x) * 2 - 1 if fract(x) >= 0.5
+ */
+ vec2 pos = textureOut;
+ float f_x = fract(pos.x / tex_step.x);
+
+ vec4 left = texture2D(tex_y, vec2(pos.x - f_x * tex_step.x, pos.y));
+ vec4 right = texture2D(tex_y, vec2(pos.x + (1.0 - f_x) * tex_step.x , pos.y));
+
+#if defined(YUV_PATTERN_UYVY)
+ float y_left = mix(left.g, left.a, f_x * 2.0);
+ float y_right = mix(left.a, right.g, f_x * 2.0 - 1.0);
+ vec2 uv = mix(left.rb, right.rb, f_x);
+#elif defined(YUV_PATTERN_VYUY)
+ float y_left = mix(left.g, left.a, f_x * 2.0);
+ float y_right = mix(left.a, right.g, f_x * 2.0 - 1.0);
+ vec2 uv = mix(left.br, right.br, f_x);
+#elif defined(YUV_PATTERN_YUYV)
+ float y_left = mix(left.r, left.b, f_x * 2.0);
+ float y_right = mix(left.b, right.r, f_x * 2.0 - 1.0);
+ vec2 uv = mix(left.ga, right.ga, f_x);
+#elif defined(YUV_PATTERN_YVYU)
+ float y_left = mix(left.r, left.b, f_x * 2.0);
+ float y_right = mix(left.b, right.r, f_x * 2.0 - 1.0);
+ vec2 uv = mix(left.ag, right.ag, f_x);
+#else
+#error Invalid pattern
+#endif
+
+ float y = mix(y_left, y_right, step(0.5, f_x));
+
+ vec3 rgb = yuv2rgb_matrix * (vec3(y, uv) - yuv2rgb_offset);
+
+ gl_FragColor = vec4(rgb, 1.0);
+}
diff --git a/src/apps/qcam/assets/shader/bayer_1x_packed.frag b/src/apps/qcam/assets/shader/bayer_1x_packed.frag
new file mode 100644
index 00000000..f53f5575
--- /dev/null
+++ b/src/apps/qcam/assets/shader/bayer_1x_packed.frag
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Based on the code from http://jgt.akpeters.com/papers/McGuire08/
+ *
+ * Efficient, High-Quality Bayer Demosaic Filtering on GPUs
+ *
+ * Morgan McGuire
+ *
+ * This paper appears in issue Volume 13, Number 4.
+ * ---------------------------------------------------------
+ * Copyright (c) 2008, Morgan McGuire. All rights reserved.
+ *
+ *
+ * Modified by Linaro Ltd for 10/12-bit packed vs 8-bit raw Bayer format,
+ * and for simpler demosaic algorithm.
+ * Copyright (C) 2020, Linaro
+ *
+ * bayer_1x_packed.frag - Fragment shader code for raw Bayer 10-bit and 12-bit
+ * packed formats
+ */
+
+#ifdef GL_ES
+precision mediump float;
+#endif
+
+/*
+ * These constants are used to select the bytes containing the HS part of
+ * the pixel value:
+ * BPP - bytes per pixel,
+ * THRESHOLD_L = fract(BPP) * 0.5 + 0.02
+ * THRESHOLD_H = 1.0 - fract(BPP) * 1.5 + 0.02
+ * Let X is the x coordinate in the texture measured in bytes (so that the
+ * range is from 0 to (stride_-1)) aligned on the nearest pixel.
+ * E.g. for RAW10P:
+ * -------------+-------------------+-------------------+--
+ * pixel No | 0 1 2 3 | 4 5 6 7 | ...
+ * -------------+-------------------+-------------------+--
+ * byte offset | 0 1 2 3 4 | 5 6 7 8 9 | ...
+ * -------------+-------------------+-------------------+--
+ * X | 0.0 1.25 2.5 3.75 | 5.0 6.25 7.5 8.75 | ...
+ * -------------+-------------------+-------------------+--
+ * If fract(X) < THRESHOLD_L then the previous byte contains the LS
+ * bits of the pixel values and needs to be skipped.
+ * If fract(X) > THRESHOLD_H then the next byte contains the LS bits
+ * of the pixel values and needs to be skipped.
+ */
+#if defined(RAW10P)
+#define BPP 1.25
+#define THRESHOLD_L 0.14
+#define THRESHOLD_H 0.64
+#elif defined(RAW12P)
+#define BPP 1.5
+#define THRESHOLD_L 0.27
+#define THRESHOLD_H 0.27
+#else
+#error Invalid raw format
+#endif
+
+
+varying vec2 textureOut;
+
+/* the texture size in pixels */
+uniform vec2 tex_size;
+uniform vec2 tex_step;
+uniform vec2 tex_bayer_first_red;
+
+uniform sampler2D tex_y;
+
+void main(void)
+{
+ vec3 rgb;
+
+ /*
+ * center_bytes holds the coordinates of the MS byte of the pixel
+ * being sampled on the [0, stride-1/height-1] range.
+ * center_pixel holds the coordinates of the pixel being sampled
+ * on the [0, width/height-1] range.
+ */
+ vec2 center_bytes;
+ vec2 center_pixel;
+
+ /*
+ * x- and y-positions of the adjacent pixels on the [0, 1] range.
+ */
+ vec2 xcoords;
+ vec2 ycoords;
+
+ /*
+ * The coordinates passed to the shader in textureOut may point
+ * to a place in between the pixels if the texture format doesn't
+ * match the image format. In particular, MIPI packed raw Bayer
+ * formats don't have a matching texture format.
+ * In this case align the coordinates to the left nearest pixel
+ * by hand.
+ */
+ center_pixel = floor(textureOut * tex_size);
+ center_bytes.y = center_pixel.y;
+
+ /*
+ * Add a small number (a few mantissa's LSBs) to avoid float
+ * representation issues. Maybe paranoic.
+ */
+ center_bytes.x = BPP * center_pixel.x + 0.02;
+
+ float fract_x = fract(center_bytes.x);
+
+ /*
+ * The below floor() call ensures that center_bytes.x points
+ * at one of the bytes representing the 8 higher bits of
+ * the pixel value, not at the byte containing the LS bits
+ * of the group of the pixels.
+ */
+ center_bytes.x = floor(center_bytes.x);
+ center_bytes *= tex_step;
+
+ xcoords = center_bytes.x + vec2(-tex_step.x, tex_step.x);
+ ycoords = center_bytes.y + vec2(-tex_step.y, tex_step.y);
+
+ /*
+ * If xcoords[0] points at the byte containing the LS bits
+ * of the previous group of the pixels, move xcoords[0] one
+ * byte back.
+ */
+ xcoords[0] += (fract_x < THRESHOLD_L) ? -tex_step.x : 0.0;
+
+ /*
+ * If xcoords[1] points at the byte containing the LS bits
+ * of the current group of the pixels, move xcoords[1] one
+ * byte forward.
+ */
+ xcoords[1] += (fract_x > THRESHOLD_H) ? tex_step.x : 0.0;
+
+ vec2 alternate = mod(center_pixel.xy + tex_bayer_first_red, 2.0);
+ bool even_col = alternate.x < 1.0;
+ bool even_row = alternate.y < 1.0;
+
+ /*
+ * We need to sample the central pixel and the ones with offset
+ * of -1 to +1 pixel in both X and Y directions. Let's name these
+ * pixels as below, where C is the central pixel:
+ *
+ * +----+----+----+----+
+ * | \ x| | | |
+ * |y \ | -1 | 0 | +1 |
+ * +----+----+----+----+
+ * | +1 | D2 | A1 | D3 |
+ * +----+----+----+----+
+ * | 0 | B0 | C | B1 |
+ * +----+----+----+----+
+ * | -1 | D0 | A0 | D1 |
+ * +----+----+----+----+
+ *
+ * In the below equations (0,-1).r means "r component of the texel
+ * shifted by -tex_step.y from the center_bytes one" etc.
+ *
+ * In the even row / even column (EE) case the colour values are:
+ * R = C = (0,0).r,
+ * G = (A0 + A1 + B0 + B1) / 4.0 =
+ * ( (0,-1).r + (0,1).r + (-1,0).r + (1,0).r ) / 4.0,
+ * B = (D0 + D1 + D2 + D3) / 4.0 =
+ * ( (-1,-1).r + (1,-1).r + (-1,1).r + (1,1).r ) / 4.0
+ *
+ * For even row / odd column (EO):
+ * R = (B0 + B1) / 2.0 = ( (-1,0).r + (1,0).r ) / 2.0,
+ * G = C = (0,0).r,
+ * B = (A0 + A1) / 2.0 = ( (0,-1).r + (0,1).r ) / 2.0
+ *
+ * For odd row / even column (OE):
+ * R = (A0 + A1) / 2.0 = ( (0,-1).r + (0,1).r ) / 2.0,
+ * G = C = (0,0).r,
+ * B = (B0 + B1) / 2.0 = ( (-1,0).r + (1,0).r ) / 2.0
+ *
+ * For odd row / odd column (OO):
+ * R = (D0 + D1 + D2 + D3) / 4.0 =
+ * ( (-1,-1).r + (1,-1).r + (-1,1).r + (1,1).r ) / 4.0,
+ * G = (A0 + A1 + B0 + B1) / 4.0 =
+ * ( (0,-1).r + (0,1).r + (-1,0).r + (1,0).r ) / 4.0,
+ * B = C = (0,0).r
+ */
+
+ /*
+ * Fetch the values and precalculate the terms:
+ * patterns.x = (A0 + A1) / 2.0
+ * patterns.y = (B0 + B1) / 2.0
+ * patterns.z = (A0 + A1 + B0 + B1) / 4.0
+ * patterns.w = (D0 + D1 + D2 + D3) / 4.0
+ */
+ #define fetch(x, y) texture2D(tex_y, vec2(x, y)).r
+
+ float C = texture2D(tex_y, center_bytes).r;
+ vec4 patterns = vec4(
+ fetch(center_bytes.x, ycoords[0]), /* A0: (0,-1) */
+ fetch(xcoords[0], center_bytes.y), /* B0: (-1,0) */
+ fetch(xcoords[0], ycoords[0]), /* D0: (-1,-1) */
+ fetch(xcoords[1], ycoords[0])); /* D1: (1,-1) */
+ vec4 temp = vec4(
+ fetch(center_bytes.x, ycoords[1]), /* A1: (0,1) */
+ fetch(xcoords[1], center_bytes.y), /* B1: (1,0) */
+ fetch(xcoords[1], ycoords[1]), /* D3: (1,1) */
+ fetch(xcoords[0], ycoords[1])); /* D2: (-1,1) */
+ patterns = (patterns + temp) * 0.5;
+ /* .x = (A0 + A1) / 2.0, .y = (B0 + B1) / 2.0 */
+ /* .z = (D0 + D3) / 2.0, .w = (D1 + D2) / 2.0 */
+ patterns.w = (patterns.z + patterns.w) * 0.5;
+ patterns.z = (patterns.x + patterns.y) * 0.5;
+
+ rgb = even_col ?
+ (even_row ?
+ vec3(C, patterns.zw) :
+ vec3(patterns.x, C, patterns.y)) :
+ (even_row ?
+ vec3(patterns.y, C, patterns.x) :
+ vec3(patterns.wz, C));
+
+ gl_FragColor = vec4(rgb, 1.0);
+}
diff --git a/src/apps/qcam/assets/shader/bayer_8.frag b/src/apps/qcam/assets/shader/bayer_8.frag
new file mode 100644
index 00000000..7e35ca88
--- /dev/null
+++ b/src/apps/qcam/assets/shader/bayer_8.frag
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+From http://jgt.akpeters.com/papers/McGuire08/
+
+Efficient, High-Quality Bayer Demosaic Filtering on GPUs
+
+Morgan McGuire
+
+This paper appears in issue Volume 13, Number 4.
+---------------------------------------------------------
+Copyright (c) 2008, Morgan McGuire. All rights reserved.
+
+Modified by Linaro Ltd to integrate it into libcamera.
+Copyright (C) 2021, Linaro
+*/
+
+//Pixel Shader
+#ifdef GL_ES
+precision mediump float;
+#endif
+
+/** Monochrome RGBA or GL_LUMINANCE Bayer encoded texture.*/
+uniform sampler2D tex_y;
+varying vec4 center;
+varying vec4 yCoord;
+varying vec4 xCoord;
+
+void main(void) {
+ #define fetch(x, y) texture2D(tex_y, vec2(x, y)).r
+
+ float C = texture2D(tex_y, center.xy).r; // ( 0, 0)
+ const vec4 kC = vec4( 4.0, 6.0, 5.0, 5.0) / 8.0;
+
+ // Determine which of four types of pixels we are on.
+ vec2 alternate = mod(floor(center.zw), 2.0);
+
+ vec4 Dvec = vec4(
+ fetch(xCoord[1], yCoord[1]), // (-1,-1)
+ fetch(xCoord[1], yCoord[2]), // (-1, 1)
+ fetch(xCoord[2], yCoord[1]), // ( 1,-1)
+ fetch(xCoord[2], yCoord[2])); // ( 1, 1)
+
+ vec4 PATTERN = (kC.xyz * C).xyzz;
+
+ // Can also be a dot product with (1,1,1,1) on hardware where that is
+ // specially optimized.
+ // Equivalent to: D = Dvec[0] + Dvec[1] + Dvec[2] + Dvec[3];
+ Dvec.xy += Dvec.zw;
+ Dvec.x += Dvec.y;
+
+ vec4 value = vec4(
+ fetch(center.x, yCoord[0]), // ( 0,-2)
+ fetch(center.x, yCoord[1]), // ( 0,-1)
+ fetch(xCoord[0], center.y), // (-2, 0)
+ fetch(xCoord[1], center.y)); // (-1, 0)
+
+ vec4 temp = vec4(
+ fetch(center.x, yCoord[3]), // ( 0, 2)
+ fetch(center.x, yCoord[2]), // ( 0, 1)
+ fetch(xCoord[3], center.y), // ( 2, 0)
+ fetch(xCoord[2], center.y)); // ( 1, 0)
+
+ // Even the simplest compilers should be able to constant-fold these to
+ // avoid the division.
+ // Note that on scalar processors these constants force computation of some
+ // identical products twice.
+ const vec4 kA = vec4(-1.0, -1.5, 0.5, -1.0) / 8.0;
+ const vec4 kB = vec4( 2.0, 0.0, 0.0, 4.0) / 8.0;
+ const vec4 kD = vec4( 0.0, 2.0, -1.0, -1.0) / 8.0;
+
+ // Conserve constant registers and take advantage of free swizzle on load
+ #define kE (kA.xywz)
+ #define kF (kB.xywz)
+
+ value += temp;
+
+ // There are five filter patterns (identity, cross, checker,
+ // theta, phi). Precompute the terms from all of them and then
+ // use swizzles to assign to color channels.
+ //
+ // Channel Matches
+ // x cross (e.g., EE G)
+ // y checker (e.g., EE B)
+ // z theta (e.g., EO R)
+ // w phi (e.g., EO R)
+ #define A (value[0])
+ #define B (value[1])
+ #define D (Dvec.x)
+ #define E (value[2])
+ #define F (value[3])
+
+ // Avoid zero elements. On a scalar processor this saves two MADDs
+ // and it has no effect on a vector processor.
+ PATTERN.yzw += (kD.yz * D).xyy;
+
+ PATTERN += (kA.xyz * A).xyzx + (kE.xyw * E).xyxz;
+ PATTERN.xw += kB.xw * B;
+ PATTERN.xz += kF.xz * F;
+
+ gl_FragColor.rgb = (alternate.y == 0.0) ?
+ ((alternate.x == 0.0) ?
+ vec3(C, PATTERN.xy) :
+ vec3(PATTERN.z, C, PATTERN.w)) :
+ ((alternate.x == 0.0) ?
+ vec3(PATTERN.w, C, PATTERN.z) :
+ vec3(PATTERN.yx, C));
+}
diff --git a/src/apps/qcam/assets/shader/bayer_8.vert b/src/apps/qcam/assets/shader/bayer_8.vert
new file mode 100644
index 00000000..fb5109ee
--- /dev/null
+++ b/src/apps/qcam/assets/shader/bayer_8.vert
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+From http://jgt.akpeters.com/papers/McGuire08/
+
+Efficient, High-Quality Bayer Demosaic Filtering on GPUs
+
+Morgan McGuire
+
+This paper appears in issue Volume 13, Number 4.
+---------------------------------------------------------
+Copyright (c) 2008, Morgan McGuire. All rights reserved.
+
+Modified by Linaro Ltd to integrate it into libcamera.
+Copyright (C) 2021, Linaro
+*/
+
+//Vertex Shader
+
+attribute vec4 vertexIn;
+attribute vec2 textureIn;
+
+uniform mat4 proj_matrix;
+
+uniform vec2 tex_size; /* The texture size in pixels */
+uniform vec2 tex_step;
+
+/** Pixel position of the first red pixel in the */
+/** Bayer pattern. [{0,1}, {0, 1}]*/
+uniform vec2 tex_bayer_first_red;
+
+/** .xy = Pixel being sampled in the fragment shader on the range [0, 1]
+ .zw = ...on the range [0, sourceSize], offset by firstRed */
+varying vec4 center;
+
+/** center.x + (-2/w, -1/w, 1/w, 2/w); These are the x-positions */
+/** of the adjacent pixels.*/
+varying vec4 xCoord;
+
+/** center.y + (-2/h, -1/h, 1/h, 2/h); These are the y-positions */
+/** of the adjacent pixels.*/
+varying vec4 yCoord;
+
+void main(void) {
+ center.xy = textureIn;
+ center.zw = textureIn * tex_size + tex_bayer_first_red;
+
+ xCoord = center.x + vec4(-2.0 * tex_step.x,
+ -tex_step.x, tex_step.x, 2.0 * tex_step.x);
+ yCoord = center.y + vec4(-2.0 * tex_step.y,
+ -tex_step.y, tex_step.y, 2.0 * tex_step.y);
+
+ gl_Position = proj_matrix * vertexIn;
+}
diff --git a/src/apps/qcam/assets/shader/identity.vert b/src/apps/qcam/assets/shader/identity.vert
new file mode 100644
index 00000000..907e8741
--- /dev/null
+++ b/src/apps/qcam/assets/shader/identity.vert
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Linaro
+ *
+ * identity.vert - Identity vertex shader for pixel format conversion
+ */
+
+attribute vec4 vertexIn;
+attribute vec2 textureIn;
+varying vec2 textureOut;
+
+uniform mat4 proj_matrix;
+uniform float stride_factor;
+
+void main(void)
+{
+ gl_Position = proj_matrix * vertexIn;
+ textureOut = vec2(textureIn.x * stride_factor, textureIn.y);
+}
diff --git a/src/apps/qcam/assets/shader/shaders.qrc b/src/apps/qcam/assets/shader/shaders.qrc
new file mode 100644
index 00000000..96c709f9
--- /dev/null
+++ b/src/apps/qcam/assets/shader/shaders.qrc
@@ -0,0 +1,13 @@
+<!-- SPDX-License-Identifier: LGPL-2.1-or-later -->
+<!DOCTYPE RCC><RCC version="1.0">
+<qresource>
+ <file>RGB.frag</file>
+ <file>YUV_2_planes.frag</file>
+ <file>YUV_3_planes.frag</file>
+ <file>YUV_packed.frag</file>
+ <file>bayer_1x_packed.frag</file>
+ <file>bayer_8.frag</file>
+ <file>bayer_8.vert</file>
+ <file>identity.vert</file>
+</qresource>
+</RCC>
diff --git a/src/apps/qcam/cam_select_dialog.cpp b/src/apps/qcam/cam_select_dialog.cpp
new file mode 100644
index 00000000..6b6d0713
--- /dev/null
+++ b/src/apps/qcam/cam_select_dialog.cpp
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Utkarsh Tiwari <utkarsh02t@gmail.com>
+ *
+ * qcam - Camera Selection dialog
+ */
+
+#include "cam_select_dialog.h"
+
+#include <memory>
+
+#include <libcamera/camera.h>
+#include <libcamera/camera_manager.h>
+
+#include <QComboBox>
+#include <QDialogButtonBox>
+#include <QFormLayout>
+#include <QGuiApplication>
+#include <QLabel>
+#include <QScreen>
+#include <QString>
+
+CameraSelectorDialog::CameraSelectorDialog(libcamera::CameraManager *cameraManager,
+ QWidget *parent)
+ : QDialog(parent), cm_(cameraManager)
+{
+ /* Use a QFormLayout for the dialog. */
+ QFormLayout *layout = new QFormLayout(this);
+
+ /* Setup the camera id combo-box. */
+ cameraIdComboBox_ = new QComboBox;
+ for (const auto &cam : cm_->cameras())
+ cameraIdComboBox_->addItem(QString::fromStdString(cam->id()));
+
+ /* Set camera information labels. */
+ cameraLocation_ = new QLabel;
+ cameraModel_ = new QLabel;
+
+ updateCameraInfo(cameraIdComboBox_->currentText());
+ connect(cameraIdComboBox_, &QComboBox::currentTextChanged,
+ this, &CameraSelectorDialog::updateCameraInfo);
+
+ /* Setup the QDialogButton Box */
+ QDialogButtonBox *buttonBox =
+ new QDialogButtonBox(QDialogButtonBox::Ok |
+ QDialogButtonBox::Cancel);
+
+ connect(buttonBox, &QDialogButtonBox::accepted,
+ this, &QDialog::accept);
+ connect(buttonBox, &QDialogButtonBox::rejected,
+ this, &QDialog::reject);
+
+ /* Set the layout. */
+ layout->addRow("Camera:", cameraIdComboBox_);
+ layout->addRow("Location:", cameraLocation_);
+ layout->addRow("Model:", cameraModel_);
+ layout->addWidget(buttonBox);
+
+ /*
+ * Decrease the minimum width of dialog to fit on narrow screens, with a
+ * 20 pixels margin.
+ */
+ QRect screenGeometry = qGuiApp->primaryScreen()->availableGeometry();
+ if (screenGeometry.width() < minimumWidth())
+ setMinimumWidth(screenGeometry.width() - 20);
+}
+
+CameraSelectorDialog::~CameraSelectorDialog() = default;
+
+std::string CameraSelectorDialog::getCameraId()
+{
+ return cameraIdComboBox_->currentText().toStdString();
+}
+
+/* Hotplug / Unplug Support. */
+void CameraSelectorDialog::addCamera(QString cameraId)
+{
+ cameraIdComboBox_->addItem(cameraId);
+}
+
+void CameraSelectorDialog::removeCamera(QString cameraId)
+{
+ int cameraIndex = cameraIdComboBox_->findText(cameraId);
+ cameraIdComboBox_->removeItem(cameraIndex);
+}
+
+/* Camera Information */
+void CameraSelectorDialog::updateCameraInfo(QString cameraId)
+{
+ const std::shared_ptr<libcamera::Camera> &camera =
+ cm_->get(cameraId.toStdString());
+
+ if (!camera)
+ return;
+
+ const libcamera::ControlList &properties = camera->properties();
+
+ const auto &location = properties.get(libcamera::properties::Location);
+ if (location) {
+ switch (*location) {
+ case libcamera::properties::CameraLocationFront:
+ cameraLocation_->setText("Internal front camera");
+ break;
+ case libcamera::properties::CameraLocationBack:
+ cameraLocation_->setText("Internal back camera");
+ break;
+ case libcamera::properties::CameraLocationExternal:
+ cameraLocation_->setText("External camera");
+ break;
+ default:
+ cameraLocation_->setText("Unknown");
+ }
+ } else {
+ cameraLocation_->setText("Unknown");
+ }
+
+ const auto &model = properties.get(libcamera::properties::Model)
+ .value_or("Unknown");
+
+ cameraModel_->setText(QString::fromStdString(model));
+}
diff --git a/src/apps/qcam/cam_select_dialog.h b/src/apps/qcam/cam_select_dialog.h
new file mode 100644
index 00000000..4bec9ea9
--- /dev/null
+++ b/src/apps/qcam/cam_select_dialog.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Utkarsh Tiwari <utkarsh02t@gmail.com>
+ *
+ * qcam - Camera Selection dialog
+ */
+
+#pragma once
+
+#include <string>
+
+#include <libcamera/camera.h>
+#include <libcamera/camera_manager.h>
+#include <libcamera/controls.h>
+#include <libcamera/property_ids.h>
+
+#include <QDialog>
+#include <QString>
+
+class QComboBox;
+class QLabel;
+
+class CameraSelectorDialog : public QDialog
+{
+ Q_OBJECT
+public:
+ CameraSelectorDialog(libcamera::CameraManager *cameraManager,
+ QWidget *parent);
+ ~CameraSelectorDialog();
+
+ std::string getCameraId();
+
+ /* Hotplug / Unplug Support. */
+ void addCamera(QString cameraId);
+ void removeCamera(QString cameraId);
+
+ /* Camera Information */
+ void updateCameraInfo(QString cameraId);
+
+private:
+ libcamera::CameraManager *cm_;
+
+ /* UI elements. */
+ QComboBox *cameraIdComboBox_;
+ QLabel *cameraLocation_;
+ QLabel *cameraModel_;
+};
diff --git a/src/apps/qcam/format_converter.cpp b/src/apps/qcam/format_converter.cpp
new file mode 100644
index 00000000..32123493
--- /dev/null
+++ b/src/apps/qcam/format_converter.cpp
@@ -0,0 +1,359 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Convert buffer to RGB
+ */
+
+#include "format_converter.h"
+
+#include <errno.h>
+#include <utility>
+
+#include <QImage>
+
+#include <libcamera/formats.h>
+
+#include "../common/image.h"
+
+#define RGBSHIFT 8
+#ifndef MAX
+#define MAX(a,b) ((a)>(b)?(a):(b))
+#endif
+#ifndef MIN
+#define MIN(a,b) ((a)<(b)?(a):(b))
+#endif
+#ifndef CLAMP
+#define CLAMP(a,low,high) MAX((low),MIN((high),(a)))
+#endif
+#ifndef CLIP
+#define CLIP(x) CLAMP(x,0,255)
+#endif
+
+int FormatConverter::configure(const libcamera::PixelFormat &format,
+ const QSize &size, unsigned int stride)
+{
+ switch (format) {
+ case libcamera::formats::NV12:
+ formatFamily_ = YUVSemiPlanar;
+ horzSubSample_ = 2;
+ vertSubSample_ = 2;
+ nvSwap_ = false;
+ break;
+ case libcamera::formats::NV21:
+ formatFamily_ = YUVSemiPlanar;
+ horzSubSample_ = 2;
+ vertSubSample_ = 2;
+ nvSwap_ = true;
+ break;
+ case libcamera::formats::NV16:
+ formatFamily_ = YUVSemiPlanar;
+ horzSubSample_ = 2;
+ vertSubSample_ = 1;
+ nvSwap_ = false;
+ break;
+ case libcamera::formats::NV61:
+ formatFamily_ = YUVSemiPlanar;
+ horzSubSample_ = 2;
+ vertSubSample_ = 1;
+ nvSwap_ = true;
+ break;
+ case libcamera::formats::NV24:
+ formatFamily_ = YUVSemiPlanar;
+ horzSubSample_ = 1;
+ vertSubSample_ = 1;
+ nvSwap_ = false;
+ break;
+ case libcamera::formats::NV42:
+ formatFamily_ = YUVSemiPlanar;
+ horzSubSample_ = 1;
+ vertSubSample_ = 1;
+ nvSwap_ = true;
+ break;
+
+ case libcamera::formats::R8:
+ formatFamily_ = RGB;
+ r_pos_ = 0;
+ g_pos_ = 0;
+ b_pos_ = 0;
+ bpp_ = 1;
+ break;
+ case libcamera::formats::RGB888:
+ formatFamily_ = RGB;
+ r_pos_ = 2;
+ g_pos_ = 1;
+ b_pos_ = 0;
+ bpp_ = 3;
+ break;
+ case libcamera::formats::BGR888:
+ formatFamily_ = RGB;
+ r_pos_ = 0;
+ g_pos_ = 1;
+ b_pos_ = 2;
+ bpp_ = 3;
+ break;
+ case libcamera::formats::ARGB8888:
+ case libcamera::formats::XRGB8888:
+ formatFamily_ = RGB;
+ r_pos_ = 2;
+ g_pos_ = 1;
+ b_pos_ = 0;
+ bpp_ = 4;
+ break;
+ case libcamera::formats::RGBA8888:
+ case libcamera::formats::RGBX8888:
+ formatFamily_ = RGB;
+ r_pos_ = 3;
+ g_pos_ = 2;
+ b_pos_ = 1;
+ bpp_ = 4;
+ break;
+ case libcamera::formats::ABGR8888:
+ case libcamera::formats::XBGR8888:
+ formatFamily_ = RGB;
+ r_pos_ = 0;
+ g_pos_ = 1;
+ b_pos_ = 2;
+ bpp_ = 4;
+ break;
+ case libcamera::formats::BGRA8888:
+ case libcamera::formats::BGRX8888:
+ formatFamily_ = RGB;
+ r_pos_ = 1;
+ g_pos_ = 2;
+ b_pos_ = 3;
+ bpp_ = 4;
+ break;
+
+ case libcamera::formats::VYUY:
+ formatFamily_ = YUVPacked;
+ y_pos_ = 1;
+ cb_pos_ = 2;
+ break;
+ case libcamera::formats::YVYU:
+ formatFamily_ = YUVPacked;
+ y_pos_ = 0;
+ cb_pos_ = 3;
+ break;
+ case libcamera::formats::UYVY:
+ formatFamily_ = YUVPacked;
+ y_pos_ = 1;
+ cb_pos_ = 0;
+ break;
+ case libcamera::formats::YUYV:
+ formatFamily_ = YUVPacked;
+ y_pos_ = 0;
+ cb_pos_ = 1;
+ break;
+
+ case libcamera::formats::YUV420:
+ formatFamily_ = YUVPlanar;
+ horzSubSample_ = 2;
+ vertSubSample_ = 2;
+ nvSwap_ = false;
+ break;
+ case libcamera::formats::YVU420:
+ formatFamily_ = YUVPlanar;
+ horzSubSample_ = 2;
+ vertSubSample_ = 2;
+ nvSwap_ = true;
+ break;
+ case libcamera::formats::YUV422:
+ formatFamily_ = YUVPlanar;
+ horzSubSample_ = 2;
+ vertSubSample_ = 1;
+ nvSwap_ = false;
+ break;
+
+ case libcamera::formats::MJPEG:
+ formatFamily_ = MJPEG;
+ break;
+
+ default:
+ return -EINVAL;
+ };
+
+ format_ = format;
+ width_ = size.width();
+ height_ = size.height();
+ stride_ = stride;
+
+ return 0;
+}
+
+void FormatConverter::convert(const Image *src, size_t size, QImage *dst)
+{
+ switch (formatFamily_) {
+ case MJPEG:
+ dst->loadFromData(src->data(0).data(), size, "JPEG");
+ break;
+ case RGB:
+ convertRGB(src, dst->bits());
+ break;
+ case YUVPacked:
+ convertYUVPacked(src, dst->bits());
+ break;
+ case YUVSemiPlanar:
+ convertYUVSemiPlanar(src, dst->bits());
+ break;
+ case YUVPlanar:
+ convertYUVPlanar(src, dst->bits());
+ break;
+ };
+}
+
+static void yuv_to_rgb(int y, int u, int v, int *r, int *g, int *b)
+{
+ int c = y - 16;
+ int d = u - 128;
+ int e = v - 128;
+ *r = CLIP(( 298 * c + 409 * e + 128) >> RGBSHIFT);
+ *g = CLIP(( 298 * c - 100 * d - 208 * e + 128) >> RGBSHIFT);
+ *b = CLIP(( 298 * c + 516 * d + 128) >> RGBSHIFT);
+}
+
+void FormatConverter::convertRGB(const Image *srcImage, unsigned char *dst)
+{
+ const unsigned char *src = srcImage->data(0).data();
+ unsigned int x, y;
+ int r, g, b;
+
+ for (y = 0; y < height_; y++) {
+ for (x = 0; x < width_; x++) {
+ r = src[bpp_ * x + r_pos_];
+ g = src[bpp_ * x + g_pos_];
+ b = src[bpp_ * x + b_pos_];
+
+ dst[4 * x + 0] = b;
+ dst[4 * x + 1] = g;
+ dst[4 * x + 2] = r;
+ dst[4 * x + 3] = 0xff;
+ }
+
+ src += stride_;
+ dst += width_ * 4;
+ }
+}
+
+void FormatConverter::convertYUVPacked(const Image *srcImage, unsigned char *dst)
+{
+ const unsigned char *src = srcImage->data(0).data();
+ unsigned int src_x, src_y, dst_x, dst_y;
+ unsigned int src_stride;
+ unsigned int dst_stride;
+ unsigned int cr_pos;
+ int r, g, b, y, cr, cb;
+
+ cr_pos = (cb_pos_ + 2) % 4;
+ src_stride = stride_;
+ dst_stride = width_ * 4;
+
+ for (src_y = 0, dst_y = 0; dst_y < height_; src_y++, dst_y++) {
+ for (src_x = 0, dst_x = 0; dst_x < width_; ) {
+ cb = src[src_y * src_stride + src_x * 4 + cb_pos_];
+ cr = src[src_y * src_stride + src_x * 4 + cr_pos];
+
+ y = src[src_y * src_stride + src_x * 4 + y_pos_];
+ yuv_to_rgb(y, cb, cr, &r, &g, &b);
+ dst[dst_y * dst_stride + 4 * dst_x + 0] = b;
+ dst[dst_y * dst_stride + 4 * dst_x + 1] = g;
+ dst[dst_y * dst_stride + 4 * dst_x + 2] = r;
+ dst[dst_y * dst_stride + 4 * dst_x + 3] = 0xff;
+ dst_x++;
+
+ y = src[src_y * src_stride + src_x * 4 + y_pos_ + 2];
+ yuv_to_rgb(y, cb, cr, &r, &g, &b);
+ dst[dst_y * dst_stride + 4 * dst_x + 0] = b;
+ dst[dst_y * dst_stride + 4 * dst_x + 1] = g;
+ dst[dst_y * dst_stride + 4 * dst_x + 2] = r;
+ dst[dst_y * dst_stride + 4 * dst_x + 3] = 0xff;
+ dst_x++;
+
+ src_x++;
+ }
+ }
+}
+
+void FormatConverter::convertYUVPlanar(const Image *srcImage, unsigned char *dst)
+{
+ unsigned int c_stride = stride_ / horzSubSample_;
+ unsigned int c_inc = horzSubSample_ == 1 ? 1 : 0;
+ const unsigned char *src_y = srcImage->data(0).data();
+ const unsigned char *src_cb = srcImage->data(1).data();
+ const unsigned char *src_cr = srcImage->data(2).data();
+ int r, g, b;
+
+ if (nvSwap_)
+ std::swap(src_cb, src_cr);
+
+ for (unsigned int y = 0; y < height_; y++) {
+ const unsigned char *line_y = src_y + y * stride_;
+ const unsigned char *line_cb = src_cb + (y / vertSubSample_) *
+ c_stride;
+ const unsigned char *line_cr = src_cr + (y / vertSubSample_) *
+ c_stride;
+
+ for (unsigned int x = 0; x < width_; x += 2) {
+ yuv_to_rgb(*line_y, *line_cb, *line_cr, &r, &g, &b);
+ dst[0] = b;
+ dst[1] = g;
+ dst[2] = r;
+ dst[3] = 0xff;
+ line_y++;
+ line_cb += c_inc;
+ line_cr += c_inc;
+ dst += 4;
+
+ yuv_to_rgb(*line_y, *line_cb, *line_cr, &r, &g, &b);
+ dst[0] = b;
+ dst[1] = g;
+ dst[2] = r;
+ dst[3] = 0xff;
+ line_y++;
+ line_cb += 1;
+ line_cr += 1;
+ dst += 4;
+ }
+ }
+}
+
+void FormatConverter::convertYUVSemiPlanar(const Image *srcImage, unsigned char *dst)
+{
+ unsigned int c_stride = stride_ * (2 / horzSubSample_);
+ unsigned int c_inc = horzSubSample_ == 1 ? 2 : 0;
+ unsigned int cb_pos = nvSwap_ ? 1 : 0;
+ unsigned int cr_pos = nvSwap_ ? 0 : 1;
+ const unsigned char *src = srcImage->data(0).data();
+ const unsigned char *src_c = srcImage->data(1).data();
+ int r, g, b;
+
+ for (unsigned int y = 0; y < height_; y++) {
+ const unsigned char *src_y = src + y * stride_;
+ const unsigned char *src_cb = src_c + (y / vertSubSample_) *
+ c_stride + cb_pos;
+ const unsigned char *src_cr = src_c + (y / vertSubSample_) *
+ c_stride + cr_pos;
+
+ for (unsigned int x = 0; x < width_; x += 2) {
+ yuv_to_rgb(*src_y, *src_cb, *src_cr, &r, &g, &b);
+ dst[0] = b;
+ dst[1] = g;
+ dst[2] = r;
+ dst[3] = 0xff;
+ src_y++;
+ src_cb += c_inc;
+ src_cr += c_inc;
+ dst += 4;
+
+ yuv_to_rgb(*src_y, *src_cb, *src_cr, &r, &g, &b);
+ dst[0] = b;
+ dst[1] = g;
+ dst[2] = r;
+ dst[3] = 0xff;
+ src_y++;
+ src_cb += 2;
+ src_cr += 2;
+ dst += 4;
+ }
+ }
+}
diff --git a/src/apps/qcam/format_converter.h b/src/apps/qcam/format_converter.h
new file mode 100644
index 00000000..819a87a5
--- /dev/null
+++ b/src/apps/qcam/format_converter.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Convert buffer to RGB
+ */
+
+#pragma once
+
+#include <stddef.h>
+
+#include <QSize>
+
+#include <libcamera/pixel_format.h>
+
+class Image;
+class QImage;
+
+class FormatConverter
+{
+public:
+ int configure(const libcamera::PixelFormat &format, const QSize &size,
+ unsigned int stride);
+
+ void convert(const Image *src, size_t size, QImage *dst);
+
+private:
+ enum FormatFamily {
+ MJPEG,
+ RGB,
+ YUVPacked,
+ YUVPlanar,
+ YUVSemiPlanar,
+ };
+
+ void convertRGB(const Image *src, unsigned char *dst);
+ void convertYUVPacked(const Image *src, unsigned char *dst);
+ void convertYUVPlanar(const Image *src, unsigned char *dst);
+ void convertYUVSemiPlanar(const Image *src, unsigned char *dst);
+
+ libcamera::PixelFormat format_;
+ unsigned int width_;
+ unsigned int height_;
+ unsigned int stride_;
+
+ enum FormatFamily formatFamily_;
+
+ /* NV parameters */
+ unsigned int horzSubSample_;
+ unsigned int vertSubSample_;
+ bool nvSwap_;
+
+ /* RGB parameters */
+ unsigned int bpp_;
+ unsigned int r_pos_;
+ unsigned int g_pos_;
+ unsigned int b_pos_;
+
+ /* YUV parameters */
+ unsigned int y_pos_;
+ unsigned int cb_pos_;
+};
diff --git a/src/apps/qcam/main.cpp b/src/apps/qcam/main.cpp
new file mode 100644
index 00000000..d0bde141
--- /dev/null
+++ b/src/apps/qcam/main.cpp
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * qcam - The libcamera GUI test application
+ */
+
+#include <signal.h>
+#include <string.h>
+
+#include <QApplication>
+#include <QtDebug>
+
+#include <libcamera/camera_manager.h>
+
+#include "../common/options.h"
+#include "../common/stream_options.h"
+
+#include "main_window.h"
+#include "message_handler.h"
+
+using namespace libcamera;
+
+namespace {
+
+void signalHandler([[maybe_unused]] int signal)
+{
+ qInfo() << "Exiting";
+ qApp->quit();
+}
+
+OptionsParser::Options parseOptions(int argc, char *argv[])
+{
+ StreamKeyValueParser streamKeyValue;
+
+ OptionsParser parser;
+ parser.addOption(OptCamera, OptionString,
+ "Specify which camera to operate on", "camera",
+ ArgumentRequired, "camera");
+ parser.addOption(OptHelp, OptionNone, "Display this help message",
+ "help");
+ parser.addOption(OptRenderer, OptionString,
+ "Choose the renderer type {qt,gles} (default: qt)",
+ "renderer", ArgumentRequired, "renderer");
+ parser.addOption(OptStream, &streamKeyValue,
+ "Set configuration of a camera stream", "stream", true);
+ parser.addOption(OptVerbose, OptionNone,
+ "Print verbose log messages", "verbose");
+
+ OptionsParser::Options options = parser.parse(argc, argv);
+ if (options.isSet(OptHelp))
+ parser.usage();
+
+ return options;
+}
+
+} /* namespace */
+
+int main(int argc, char **argv)
+{
+ QApplication app(argc, argv);
+ int ret;
+
+ OptionsParser::Options options = parseOptions(argc, argv);
+ if (!options.valid())
+ return EXIT_FAILURE;
+ if (options.isSet(OptHelp))
+ return 0;
+
+ MessageHandler msgHandler(options.isSet(OptVerbose));
+
+ struct sigaction sa = {};
+ sa.sa_handler = &signalHandler;
+ sigaction(SIGINT, &sa, nullptr);
+
+ CameraManager *cm = new libcamera::CameraManager();
+
+ ret = cm->start();
+ if (ret) {
+ qInfo() << "Failed to start camera manager:"
+ << strerror(-ret);
+ return EXIT_FAILURE;
+ }
+
+ MainWindow *mainWindow = new MainWindow(cm, options);
+ mainWindow->show();
+ ret = app.exec();
+ delete mainWindow;
+
+ cm->stop();
+ delete cm;
+
+ return ret;
+}
diff --git a/src/apps/qcam/main_window.cpp b/src/apps/qcam/main_window.cpp
new file mode 100644
index 00000000..3880a846
--- /dev/null
+++ b/src/apps/qcam/main_window.cpp
@@ -0,0 +1,776 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * qcam - Main application window
+ */
+
+#include "main_window.h"
+
+#include <assert.h>
+#include <iomanip>
+#include <string>
+
+#include <libcamera/camera_manager.h>
+#include <libcamera/version.h>
+
+#include <QCoreApplication>
+#include <QFileDialog>
+#include <QImage>
+#include <QImageWriter>
+#include <QMutexLocker>
+#include <QStandardPaths>
+#include <QStringList>
+#include <QTimer>
+#include <QToolBar>
+#include <QToolButton>
+#include <QtDebug>
+
+#include "../common/dng_writer.h"
+#include "../common/image.h"
+
+#include "cam_select_dialog.h"
+#ifndef QT_NO_OPENGL
+#include "viewfinder_gl.h"
+#endif
+#include "viewfinder_qt.h"
+
+using namespace libcamera;
+
+/**
+ * \brief Custom QEvent to signal capture completion
+ */
+class CaptureEvent : public QEvent
+{
+public:
+ CaptureEvent()
+ : QEvent(type())
+ {
+ }
+
+ static Type type()
+ {
+ static int type = QEvent::registerEventType();
+ return static_cast<Type>(type);
+ }
+};
+
+/**
+ * \brief Custom QEvent to signal hotplug or unplug
+ */
+class HotplugEvent : public QEvent
+{
+public:
+ enum PlugEvent {
+ HotPlug,
+ HotUnplug
+ };
+
+ HotplugEvent(std::shared_ptr<Camera> camera, PlugEvent event)
+ : QEvent(type()), camera_(std::move(camera)), plugEvent_(event)
+ {
+ }
+
+ static Type type()
+ {
+ static int type = QEvent::registerEventType();
+ return static_cast<Type>(type);
+ }
+
+ PlugEvent hotplugEvent() const { return plugEvent_; }
+ Camera *camera() const { return camera_.get(); }
+
+private:
+ std::shared_ptr<Camera> camera_;
+ PlugEvent plugEvent_;
+};
+
+MainWindow::MainWindow(CameraManager *cm, const OptionsParser::Options &options)
+ : saveRaw_(nullptr), options_(options), cm_(cm), allocator_(nullptr),
+ isCapturing_(false), captureRaw_(false)
+{
+ int ret;
+
+ /*
+ * Initialize the UI: Create the toolbar, set the window title and
+ * create the viewfinder widget.
+ */
+ createToolbars();
+
+ title_ = "QCam " + QString::fromStdString(CameraManager::version());
+ setWindowTitle(title_);
+ connect(&titleTimer_, SIGNAL(timeout()), this, SLOT(updateTitle()));
+
+ /* Renderer type Qt or GLES, select Qt by default. */
+ std::string renderType = "qt";
+ if (options_.isSet(OptRenderer))
+ renderType = options_[OptRenderer].toString();
+
+ if (renderType == "qt") {
+ ViewFinderQt *viewfinder = new ViewFinderQt(this);
+ connect(viewfinder, &ViewFinderQt::renderComplete,
+ this, &MainWindow::renderComplete);
+ viewfinder_ = viewfinder;
+ setCentralWidget(viewfinder);
+#ifndef QT_NO_OPENGL
+ } else if (renderType == "gles") {
+ ViewFinderGL *viewfinder = new ViewFinderGL(this);
+ connect(viewfinder, &ViewFinderGL::renderComplete,
+ this, &MainWindow::renderComplete);
+ viewfinder_ = viewfinder;
+ setCentralWidget(viewfinder);
+#endif
+ } else {
+ qWarning() << "Invalid render type"
+ << QString::fromStdString(renderType);
+ quit();
+ return;
+ }
+
+ adjustSize();
+
+ /* Hotplug/unplug support */
+ cm_->cameraAdded.connect(this, &MainWindow::addCamera);
+ cm_->cameraRemoved.connect(this, &MainWindow::removeCamera);
+
+ cameraSelectorDialog_ = new CameraSelectorDialog(cm_, this);
+
+ /* Open the camera and start capture. */
+ ret = openCamera();
+ if (ret < 0) {
+ quit();
+ return;
+ }
+
+ startStopAction_->setChecked(true);
+}
+
+MainWindow::~MainWindow()
+{
+ if (camera_) {
+ stopCapture();
+ camera_->release();
+ camera_.reset();
+ }
+}
+
+bool MainWindow::event(QEvent *e)
+{
+ if (e->type() == CaptureEvent::type()) {
+ processCapture();
+ return true;
+ } else if (e->type() == HotplugEvent::type()) {
+ processHotplug(static_cast<HotplugEvent *>(e));
+ return true;
+ }
+
+ return QMainWindow::event(e);
+}
+
+int MainWindow::createToolbars()
+{
+ QAction *action;
+
+ toolbar_ = addToolBar("Main");
+
+ /* Disable right click context menu. */
+ toolbar_->setContextMenuPolicy(Qt::PreventContextMenu);
+
+ /* Quit action. */
+ action = toolbar_->addAction(QIcon::fromTheme("application-exit",
+ QIcon(":x-circle.svg")),
+ "Quit");
+ action->setShortcut(QKeySequence::Quit);
+ connect(action, &QAction::triggered, this, &MainWindow::quit);
+
+ /* Camera selector. */
+ cameraSelectButton_ = new QPushButton;
+ connect(cameraSelectButton_, &QPushButton::clicked,
+ this, &MainWindow::switchCamera);
+
+ toolbar_->addWidget(cameraSelectButton_);
+
+ toolbar_->addSeparator();
+
+ /* Start/Stop action. */
+ iconPlay_ = QIcon::fromTheme("media-playback-start",
+ QIcon(":play-circle.svg"));
+ iconStop_ = QIcon::fromTheme("media-playback-stop",
+ QIcon(":stop-circle.svg"));
+
+ action = toolbar_->addAction(iconPlay_, "Start Capture");
+ action->setCheckable(true);
+ action->setShortcut(Qt::Key_Space);
+ connect(action, &QAction::toggled, this, &MainWindow::toggleCapture);
+ startStopAction_ = action;
+
+ /* Save As... action. */
+ action = toolbar_->addAction(QIcon::fromTheme("document-save-as",
+ QIcon(":save.svg")),
+ "Save As...");
+ action->setShortcut(QKeySequence::SaveAs);
+ connect(action, &QAction::triggered, this, &MainWindow::saveImageAs);
+
+#ifdef HAVE_TIFF
+ /* Save Raw action. */
+ action = toolbar_->addAction(QIcon::fromTheme("camera-photo",
+ QIcon(":aperture.svg")),
+ "Save Raw");
+ action->setEnabled(false);
+ connect(action, &QAction::triggered, this, &MainWindow::captureRaw);
+ saveRaw_ = action;
+#endif
+
+ return 0;
+}
+
+void MainWindow::quit()
+{
+ QTimer::singleShot(0, QCoreApplication::instance(),
+ &QCoreApplication::quit);
+}
+
+void MainWindow::updateTitle()
+{
+ /* Calculate the average frame rate over the last period. */
+ unsigned int duration = frameRateInterval_.elapsed();
+ unsigned int frames = framesCaptured_ - previousFrames_;
+ double fps = frames * 1000.0 / duration;
+
+ /* Restart counters. */
+ frameRateInterval_.start();
+ previousFrames_ = framesCaptured_;
+
+ setWindowTitle(title_ + " : " + QString::number(fps, 'f', 2) + " fps");
+}
+
+/* -----------------------------------------------------------------------------
+ * Camera Selection
+ */
+
+void MainWindow::switchCamera()
+{
+ /* Get and acquire the new camera. */
+ std::shared_ptr<Camera> cam = chooseCamera();
+
+ if (!cam)
+ return;
+
+ if (camera_ && cam == camera_)
+ return;
+
+ if (cam->acquire()) {
+ qInfo() << "Failed to acquire camera" << cam->id().c_str();
+ return;
+ }
+
+ qInfo() << "Switching to camera" << cam->id().c_str();
+
+ /*
+ * Stop the capture session, release the current camera, replace it with
+ * the new camera and start a new capture session.
+ */
+ startStopAction_->setChecked(false);
+
+ if (camera_)
+ camera_->release();
+
+ camera_ = cam;
+
+ startStopAction_->setChecked(true);
+
+ /* Display the current cameraId in the toolbar .*/
+ cameraSelectButton_->setText(QString::fromStdString(cam->id()));
+}
+
+std::shared_ptr<Camera> MainWindow::chooseCamera()
+{
+ if (cameraSelectorDialog_->exec() != QDialog::Accepted)
+ return {};
+
+ std::string id = cameraSelectorDialog_->getCameraId();
+ return cm_->get(id);
+}
+
+int MainWindow::openCamera()
+{
+ /*
+ * If a camera is specified on the command line, get it. Otherwise, if
+ * only one camera is available, pick it automatically, else, display
+ * the selector dialog box.
+ */
+ if (options_.isSet(OptCamera)) {
+ std::string cameraName = static_cast<std::string>(options_[OptCamera]);
+ camera_ = cm_->get(cameraName);
+ if (!camera_)
+ qInfo() << "Camera" << cameraName.c_str() << "not found";
+ } else {
+ std::vector<std::shared_ptr<Camera>> cameras = cm_->cameras();
+ camera_ = (cameras.size() == 1) ? cameras[0] : chooseCamera();
+ if (!camera_)
+ qInfo() << "No camera detected";
+ }
+
+ if (!camera_)
+ return -ENODEV;
+
+ /* Acquire the camera. */
+ if (camera_->acquire()) {
+ qInfo() << "Failed to acquire camera";
+ camera_.reset();
+ return -EBUSY;
+ }
+
+ /* Set the camera switch button with the currently selected Camera id. */
+ cameraSelectButton_->setText(QString::fromStdString(camera_->id()));
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Capture Start & Stop
+ */
+
+void MainWindow::toggleCapture(bool start)
+{
+ if (start) {
+ startCapture();
+ startStopAction_->setIcon(iconStop_);
+ startStopAction_->setText("Stop Capture");
+ } else {
+ stopCapture();
+ startStopAction_->setIcon(iconPlay_);
+ startStopAction_->setText("Start Capture");
+ }
+}
+
+/**
+ * \brief Start capture with the current camera
+ *
+ * This function shall not be called directly, use toggleCapture() instead.
+ */
+int MainWindow::startCapture()
+{
+ std::vector<StreamRole> roles = StreamKeyValueParser::roles(options_[OptStream]);
+ int ret;
+
+ /* Verify roles are supported. */
+ switch (roles.size()) {
+ case 1:
+ if (roles[0] != StreamRole::Viewfinder) {
+ qWarning() << "Only viewfinder supported for single stream";
+ return -EINVAL;
+ }
+ break;
+ case 2:
+ if (roles[0] != StreamRole::Viewfinder ||
+ roles[1] != StreamRole::Raw) {
+ qWarning() << "Only viewfinder + raw supported for dual streams";
+ return -EINVAL;
+ }
+ break;
+ default:
+ qWarning() << "Unsupported stream configuration";
+ return -EINVAL;
+ }
+
+ /* Configure the camera. */
+ config_ = camera_->generateConfiguration(roles);
+ if (!config_) {
+ qWarning() << "Failed to generate configuration from roles";
+ return -EINVAL;
+ }
+
+ StreamConfiguration &vfConfig = config_->at(0);
+
+ /* Use a format supported by the viewfinder if available. */
+ std::vector<PixelFormat> formats = vfConfig.formats().pixelformats();
+ for (const PixelFormat &format : viewfinder_->nativeFormats()) {
+ auto match = std::find_if(formats.begin(), formats.end(),
+ [&](const PixelFormat &f) {
+ return f == format;
+ });
+ if (match != formats.end()) {
+ vfConfig.pixelFormat = format;
+ break;
+ }
+ }
+
+ /* Allow user to override configuration. */
+ if (StreamKeyValueParser::updateConfiguration(config_.get(),
+ options_[OptStream])) {
+ qWarning() << "Failed to update configuration";
+ return -EINVAL;
+ }
+
+ CameraConfiguration::Status validation = config_->validate();
+ if (validation == CameraConfiguration::Invalid) {
+ qWarning() << "Failed to create valid camera configuration";
+ return -EINVAL;
+ }
+
+ if (validation == CameraConfiguration::Adjusted)
+ qInfo() << "Stream configuration adjusted to "
+ << vfConfig.toString().c_str();
+
+ ret = camera_->configure(config_.get());
+ if (ret < 0) {
+ qInfo() << "Failed to configure camera";
+ return ret;
+ }
+
+ /* Store stream allocation. */
+ vfStream_ = config_->at(0).stream();
+ if (config_->size() == 2)
+ rawStream_ = config_->at(1).stream();
+ else
+ rawStream_ = nullptr;
+
+ /*
+ * Configure the viewfinder. If no color space is reported, default to
+ * sYCC.
+ */
+ ret = viewfinder_->setFormat(vfConfig.pixelFormat,
+ QSize(vfConfig.size.width, vfConfig.size.height),
+ vfConfig.colorSpace.value_or(ColorSpace::Sycc),
+ vfConfig.stride);
+ if (ret < 0) {
+ qInfo() << "Failed to set viewfinder format";
+ return ret;
+ }
+
+ adjustSize();
+
+ /* Configure the raw capture button. */
+ if (saveRaw_)
+ saveRaw_->setEnabled(config_->size() == 2);
+
+ /* Allocate and map buffers. */
+ allocator_ = new FrameBufferAllocator(camera_);
+ for (StreamConfiguration &config : *config_) {
+ Stream *stream = config.stream();
+
+ ret = allocator_->allocate(stream);
+ if (ret < 0) {
+ qWarning() << "Failed to allocate capture buffers";
+ goto error;
+ }
+
+ for (const std::unique_ptr<FrameBuffer> &buffer : allocator_->buffers(stream)) {
+ /* Map memory buffers and cache the mappings. */
+ std::unique_ptr<Image> image =
+ Image::fromFrameBuffer(buffer.get(), Image::MapMode::ReadOnly);
+ assert(image != nullptr);
+ mappedBuffers_[buffer.get()] = std::move(image);
+
+ /* Store buffers on the free list. */
+ freeBuffers_[stream].enqueue(buffer.get());
+ }
+ }
+
+ /* Create requests and fill them with buffers from the viewfinder. */
+ while (!freeBuffers_[vfStream_].isEmpty()) {
+ FrameBuffer *buffer = freeBuffers_[vfStream_].dequeue();
+
+ std::unique_ptr<Request> request = camera_->createRequest();
+ if (!request) {
+ qWarning() << "Can't create request";
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = request->addBuffer(vfStream_, buffer);
+ if (ret < 0) {
+ qWarning() << "Can't set buffer for request";
+ goto error;
+ }
+
+ requests_.push_back(std::move(request));
+ }
+
+ /* Start the title timer and the camera. */
+ titleTimer_.start(2000);
+ frameRateInterval_.start();
+ previousFrames_ = 0;
+ framesCaptured_ = 0;
+ lastBufferTime_ = 0;
+
+ ret = camera_->start();
+ if (ret) {
+ qInfo() << "Failed to start capture";
+ goto error;
+ }
+
+ camera_->requestCompleted.connect(this, &MainWindow::requestComplete);
+
+ /* Queue all requests. */
+ for (std::unique_ptr<Request> &request : requests_) {
+ ret = queueRequest(request.get());
+ if (ret < 0) {
+ qWarning() << "Can't queue request";
+ goto error_disconnect;
+ }
+ }
+
+ isCapturing_ = true;
+
+ return 0;
+
+error_disconnect:
+ camera_->requestCompleted.disconnect(this);
+ camera_->stop();
+
+error:
+ requests_.clear();
+
+ mappedBuffers_.clear();
+
+ freeBuffers_.clear();
+
+ delete allocator_;
+ allocator_ = nullptr;
+
+ return ret;
+}
+
+/**
+ * \brief Stop ongoing capture
+ *
+ * This function may be called directly when tearing down the MainWindow. Use
+ * toggleCapture() instead in all other cases.
+ */
+void MainWindow::stopCapture()
+{
+ if (!isCapturing_)
+ return;
+
+ viewfinder_->stop();
+ if (saveRaw_)
+ saveRaw_->setEnabled(false);
+ captureRaw_ = false;
+
+ int ret = camera_->stop();
+ if (ret)
+ qInfo() << "Failed to stop capture";
+
+ camera_->requestCompleted.disconnect(this);
+
+ mappedBuffers_.clear();
+
+ requests_.clear();
+ freeQueue_.clear();
+
+ delete allocator_;
+
+ isCapturing_ = false;
+
+ config_.reset();
+
+ /*
+ * A CaptureEvent may have been posted before we stopped the camera,
+ * but not processed yet. Clear the queue of done buffers to avoid
+ * racing with the event handler.
+ */
+ freeBuffers_.clear();
+ doneQueue_.clear();
+
+ titleTimer_.stop();
+ setWindowTitle(title_);
+}
+
+/* -----------------------------------------------------------------------------
+ * Camera hotplugging support
+ */
+
+void MainWindow::processHotplug(HotplugEvent *e)
+{
+ Camera *camera = e->camera();
+ QString cameraId = QString::fromStdString(camera->id());
+ HotplugEvent::PlugEvent event = e->hotplugEvent();
+
+ if (event == HotplugEvent::HotPlug) {
+ cameraSelectorDialog_->addCamera(cameraId);
+ } else if (event == HotplugEvent::HotUnplug) {
+ /* Check if the currently-streaming camera is removed. */
+ if (camera == camera_.get()) {
+ toggleCapture(false);
+ camera_->release();
+ camera_.reset();
+ }
+
+ cameraSelectorDialog_->removeCamera(cameraId);
+ }
+}
+
+void MainWindow::addCamera(std::shared_ptr<Camera> camera)
+{
+ qInfo() << "Adding new camera:" << camera->id().c_str();
+ QCoreApplication::postEvent(this,
+ new HotplugEvent(std::move(camera),
+ HotplugEvent::HotPlug));
+}
+
+void MainWindow::removeCamera(std::shared_ptr<Camera> camera)
+{
+ qInfo() << "Removing camera:" << camera->id().c_str();
+ QCoreApplication::postEvent(this,
+ new HotplugEvent(std::move(camera),
+ HotplugEvent::HotUnplug));
+}
+
+/* -----------------------------------------------------------------------------
+ * Image Save
+ */
+
+void MainWindow::saveImageAs()
+{
+ QImage image = viewfinder_->getCurrentImage();
+ QString defaultPath = QStandardPaths::writableLocation(QStandardPaths::PicturesLocation);
+
+ QString filename = QFileDialog::getSaveFileName(this, "Save Image", defaultPath,
+ "Image Files (*.png *.jpg *.jpeg)");
+ if (filename.isEmpty())
+ return;
+
+ QImageWriter writer(filename);
+ writer.setQuality(95);
+ writer.write(image);
+}
+
+void MainWindow::captureRaw()
+{
+ captureRaw_ = true;
+}
+
+void MainWindow::processRaw(FrameBuffer *buffer,
+ [[maybe_unused]] const ControlList &metadata)
+{
+#ifdef HAVE_TIFF
+ QString defaultPath = QStandardPaths::writableLocation(QStandardPaths::PicturesLocation);
+ QString filename = QFileDialog::getSaveFileName(this, "Save DNG", defaultPath,
+ "DNG Files (*.dng)");
+
+ if (!filename.isEmpty()) {
+ uint8_t *memory = mappedBuffers_[buffer]->data(0).data();
+ DNGWriter::write(filename.toStdString().c_str(), camera_.get(),
+ rawStream_->configuration(), metadata, buffer,
+ memory);
+ }
+#endif
+
+ {
+ QMutexLocker locker(&mutex_);
+ freeBuffers_[rawStream_].enqueue(buffer);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * Request Completion Handling
+ */
+
+void MainWindow::requestComplete(Request *request)
+{
+ if (request->status() == Request::RequestCancelled)
+ return;
+
+ /*
+ * We're running in the libcamera thread context, expensive operations
+ * are not allowed. Add the buffer to the done queue and post a
+ * CaptureEvent for the application thread to handle.
+ */
+ {
+ QMutexLocker locker(&mutex_);
+ doneQueue_.enqueue(request);
+ }
+
+ QCoreApplication::postEvent(this, new CaptureEvent);
+}
+
+void MainWindow::processCapture()
+{
+ /*
+ * Retrieve the next buffer from the done queue. The queue may be empty
+ * if stopCapture() has been called while a CaptureEvent was posted but
+ * not processed yet. Return immediately in that case.
+ */
+ Request *request;
+ {
+ QMutexLocker locker(&mutex_);
+ if (doneQueue_.isEmpty())
+ return;
+
+ request = doneQueue_.dequeue();
+ }
+
+ /* Process buffers. */
+ if (request->buffers().count(vfStream_))
+ processViewfinder(request->buffers().at(vfStream_));
+
+ if (request->buffers().count(rawStream_))
+ processRaw(request->buffers().at(rawStream_), request->metadata());
+
+ request->reuse();
+ QMutexLocker locker(&mutex_);
+ freeQueue_.enqueue(request);
+}
+
+void MainWindow::processViewfinder(FrameBuffer *buffer)
+{
+ framesCaptured_++;
+
+ const FrameMetadata &metadata = buffer->metadata();
+
+ double fps = metadata.timestamp - lastBufferTime_;
+ fps = lastBufferTime_ && fps ? 1000000000.0 / fps : 0.0;
+ lastBufferTime_ = metadata.timestamp;
+
+ QStringList bytesused;
+ for (const FrameMetadata::Plane &plane : metadata.planes())
+ bytesused << QString::number(plane.bytesused);
+
+ qDebug().noquote()
+ << QString("seq: %1").arg(metadata.sequence, 6, 10, QLatin1Char('0'))
+ << "bytesused: {" << bytesused.join(", ")
+ << "} timestamp:" << metadata.timestamp
+ << "fps:" << Qt::fixed << qSetRealNumberPrecision(2) << fps;
+
+ /* Render the frame on the viewfinder. */
+ viewfinder_->render(buffer, mappedBuffers_[buffer].get());
+}
+
+void MainWindow::renderComplete(FrameBuffer *buffer)
+{
+ Request *request;
+ {
+ QMutexLocker locker(&mutex_);
+ if (freeQueue_.isEmpty())
+ return;
+
+ request = freeQueue_.dequeue();
+ }
+
+ request->addBuffer(vfStream_, buffer);
+
+ if (captureRaw_) {
+ FrameBuffer *rawBuffer = nullptr;
+
+ {
+ QMutexLocker locker(&mutex_);
+ if (!freeBuffers_[rawStream_].isEmpty())
+ rawBuffer = freeBuffers_[rawStream_].dequeue();
+ }
+
+ if (rawBuffer) {
+ request->addBuffer(rawStream_, rawBuffer);
+ captureRaw_ = false;
+ } else {
+ qWarning() << "No free buffer available for RAW capture";
+ }
+ }
+ queueRequest(request);
+}
+
+int MainWindow::queueRequest(Request *request)
+{
+ return camera_->queueRequest(request);
+}
diff --git a/src/apps/qcam/main_window.h b/src/apps/qcam/main_window.h
new file mode 100644
index 00000000..81fcf915
--- /dev/null
+++ b/src/apps/qcam/main_window.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * qcam - Main application window
+ */
+
+#pragma once
+
+#include <memory>
+#include <vector>
+
+#include <libcamera/camera.h>
+#include <libcamera/camera_manager.h>
+#include <libcamera/controls.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/framebuffer_allocator.h>
+#include <libcamera/request.h>
+#include <libcamera/stream.h>
+
+#include <QElapsedTimer>
+#include <QIcon>
+#include <QMainWindow>
+#include <QMutex>
+#include <QObject>
+#include <QPushButton>
+#include <QQueue>
+#include <QTimer>
+
+#include "../common/stream_options.h"
+
+#include "viewfinder.h"
+
+class QAction;
+
+class CameraSelectorDialog;
+class Image;
+class HotplugEvent;
+
+enum {
+ OptCamera = 'c',
+ OptHelp = 'h',
+ OptRenderer = 'r',
+ OptStream = 's',
+ OptVerbose = 'v',
+};
+
+class MainWindow : public QMainWindow
+{
+ Q_OBJECT
+
+public:
+ MainWindow(libcamera::CameraManager *cm,
+ const OptionsParser::Options &options);
+ ~MainWindow();
+
+ bool event(QEvent *e) override;
+
+private Q_SLOTS:
+ void quit();
+ void updateTitle();
+
+ void switchCamera();
+ void toggleCapture(bool start);
+
+ void saveImageAs();
+ void captureRaw();
+ void processRaw(libcamera::FrameBuffer *buffer,
+ const libcamera::ControlList &metadata);
+
+ void renderComplete(libcamera::FrameBuffer *buffer);
+
+private:
+ int createToolbars();
+
+ std::shared_ptr<libcamera::Camera> chooseCamera();
+ int openCamera();
+
+ int startCapture();
+ void stopCapture();
+
+ void addCamera(std::shared_ptr<libcamera::Camera> camera);
+ void removeCamera(std::shared_ptr<libcamera::Camera> camera);
+
+ int queueRequest(libcamera::Request *request);
+ void requestComplete(libcamera::Request *request);
+ void processCapture();
+ void processHotplug(HotplugEvent *e);
+ void processViewfinder(libcamera::FrameBuffer *buffer);
+
+ /* UI elements */
+ QToolBar *toolbar_;
+ QAction *startStopAction_;
+ QPushButton *cameraSelectButton_;
+ QAction *saveRaw_;
+ ViewFinder *viewfinder_;
+
+ QIcon iconPlay_;
+ QIcon iconStop_;
+
+ QString title_;
+ QTimer titleTimer_;
+
+ CameraSelectorDialog *cameraSelectorDialog_;
+
+ /* Options */
+ const OptionsParser::Options &options_;
+
+ /* Camera manager, camera, configuration and buffers */
+ libcamera::CameraManager *cm_;
+ std::shared_ptr<libcamera::Camera> camera_;
+ libcamera::FrameBufferAllocator *allocator_;
+
+ std::unique_ptr<libcamera::CameraConfiguration> config_;
+ std::map<libcamera::FrameBuffer *, std::unique_ptr<Image>> mappedBuffers_;
+
+ /* Capture state, buffers queue and statistics */
+ bool isCapturing_;
+ bool captureRaw_;
+ libcamera::Stream *vfStream_;
+ libcamera::Stream *rawStream_;
+ std::map<const libcamera::Stream *, QQueue<libcamera::FrameBuffer *>> freeBuffers_;
+ QQueue<libcamera::Request *> doneQueue_;
+ QQueue<libcamera::Request *> freeQueue_;
+ QMutex mutex_; /* Protects freeBuffers_, doneQueue_, and freeQueue_ */
+
+ uint64_t lastBufferTime_;
+ QElapsedTimer frameRateInterval_;
+ uint32_t previousFrames_;
+ uint32_t framesCaptured_;
+
+ std::vector<std::unique_ptr<libcamera::Request>> requests_;
+};
diff --git a/src/apps/qcam/meson.build b/src/apps/qcam/meson.build
new file mode 100644
index 00000000..f7c14064
--- /dev/null
+++ b/src/apps/qcam/meson.build
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: CC0-1.0
+
+qt6 = import('qt6')
+qt6_dep = dependency('qt6',
+ method : 'pkg-config',
+ modules : ['Core', 'Gui', 'OpenGL', 'OpenGLWidgets', 'Widgets'],
+ required : get_option('qcam'),
+ version : '>=6.2')
+
+if not qt6_dep.found()
+ qcam_enabled = false
+ subdir_done()
+endif
+
+qcam_enabled = true
+
+qcam_sources = files([
+ 'cam_select_dialog.cpp',
+ 'format_converter.cpp',
+ 'main.cpp',
+ 'main_window.cpp',
+ 'message_handler.cpp',
+ 'viewfinder_gl.cpp',
+ 'viewfinder_qt.cpp',
+])
+
+qcam_moc_headers = files([
+ 'cam_select_dialog.h',
+ 'main_window.h',
+ 'viewfinder_gl.h',
+ 'viewfinder_qt.h',
+])
+
+qcam_resources = files([
+ 'assets/feathericons/feathericons.qrc',
+ 'assets/shader/shaders.qrc',
+])
+
+qt6_cpp_args = [
+ apps_cpp_args,
+ '-DQT_NO_KEYWORDS',
+ '-Wno-extra-semi',
+]
+
+resources = qt6.preprocess(moc_headers : qcam_moc_headers,
+ qresources : qcam_resources,
+ dependencies : qt6_dep)
+
+qcam = executable('qcam', qcam_sources, resources,
+ install : true,
+ install_tag : 'bin',
+ link_with : apps_lib,
+ dependencies : [
+ libatomic,
+ libcamera_public,
+ libtiff,
+ qt6_dep,
+ ],
+ cpp_args : qt6_cpp_args)
diff --git a/src/apps/qcam/message_handler.cpp b/src/apps/qcam/message_handler.cpp
new file mode 100644
index 00000000..c89714a9
--- /dev/null
+++ b/src/apps/qcam/message_handler.cpp
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * qcam - Log message handling
+ */
+
+#include "message_handler.h"
+
+QtMessageHandler MessageHandler::handler_ = nullptr;
+bool MessageHandler::verbose_ = false;
+
+MessageHandler::MessageHandler(bool verbose)
+{
+ verbose_ = verbose;
+ handler_ = qInstallMessageHandler(&MessageHandler::handleMessage);
+}
+
+void MessageHandler::handleMessage(QtMsgType type,
+ const QMessageLogContext &context,
+ const QString &msg)
+{
+ if (type == QtDebugMsg && !verbose_)
+ return;
+
+ handler_(type, context, msg);
+}
diff --git a/src/apps/qcam/message_handler.h b/src/apps/qcam/message_handler.h
new file mode 100644
index 00000000..92ef74d1
--- /dev/null
+++ b/src/apps/qcam/message_handler.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * Log message handling
+ */
+
+#pragma once
+
+#include <QtGlobal>
+
+class MessageHandler
+{
+public:
+ MessageHandler(bool verbose);
+
+private:
+ static void handleMessage(QtMsgType type,
+ const QMessageLogContext &context,
+ const QString &msg);
+
+ static QtMessageHandler handler_;
+ static bool verbose_;
+};
diff --git a/src/apps/qcam/viewfinder.h b/src/apps/qcam/viewfinder.h
new file mode 100644
index 00000000..914f88ec
--- /dev/null
+++ b/src/apps/qcam/viewfinder.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * qcam - Viewfinder base class
+ */
+
+#pragma once
+
+#include <QImage>
+#include <QList>
+#include <QSize>
+
+#include <libcamera/color_space.h>
+#include <libcamera/formats.h>
+#include <libcamera/framebuffer.h>
+
+class Image;
+
+class ViewFinder
+{
+public:
+ virtual ~ViewFinder() = default;
+
+ virtual const QList<libcamera::PixelFormat> &nativeFormats() const = 0;
+
+ virtual int setFormat(const libcamera::PixelFormat &format, const QSize &size,
+ const libcamera::ColorSpace &colorSpace,
+ unsigned int stride) = 0;
+ virtual void render(libcamera::FrameBuffer *buffer, Image *image) = 0;
+ virtual void stop() = 0;
+
+ virtual QImage getCurrentImage() = 0;
+};
diff --git a/src/apps/qcam/viewfinder_gl.cpp b/src/apps/qcam/viewfinder_gl.cpp
new file mode 100644
index 00000000..f31956ff
--- /dev/null
+++ b/src/apps/qcam/viewfinder_gl.cpp
@@ -0,0 +1,847 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Linaro
+ *
+ * OpenGL Viewfinder for rendering by OpenGL shader
+ */
+
+#include "viewfinder_gl.h"
+
+#include <array>
+
+#include <QByteArray>
+#include <QFile>
+#include <QImage>
+#include <QMatrix4x4>
+#include <QStringList>
+
+#include <libcamera/formats.h>
+
+#include "../common/image.h"
+
+static const QList<libcamera::PixelFormat> supportedFormats{
+ /* YUV - packed (single plane) */
+ libcamera::formats::UYVY,
+ libcamera::formats::VYUY,
+ libcamera::formats::YUYV,
+ libcamera::formats::YVYU,
+ /* YUV - semi planar (two planes) */
+ libcamera::formats::NV12,
+ libcamera::formats::NV21,
+ libcamera::formats::NV16,
+ libcamera::formats::NV61,
+ libcamera::formats::NV24,
+ libcamera::formats::NV42,
+ /* YUV - fully planar (three planes) */
+ libcamera::formats::YUV420,
+ libcamera::formats::YVU420,
+ /* RGB */
+ libcamera::formats::ABGR8888,
+ libcamera::formats::ARGB8888,
+ libcamera::formats::BGRA8888,
+ libcamera::formats::RGBA8888,
+ libcamera::formats::BGR888,
+ libcamera::formats::RGB888,
+ /* Raw Bayer 8-bit */
+ libcamera::formats::SBGGR8,
+ libcamera::formats::SGBRG8,
+ libcamera::formats::SGRBG8,
+ libcamera::formats::SRGGB8,
+ /* Raw Bayer 10-bit packed */
+ libcamera::formats::SBGGR10_CSI2P,
+ libcamera::formats::SGBRG10_CSI2P,
+ libcamera::formats::SGRBG10_CSI2P,
+ libcamera::formats::SRGGB10_CSI2P,
+ /* Raw Bayer 12-bit packed */
+ libcamera::formats::SBGGR12_CSI2P,
+ libcamera::formats::SGBRG12_CSI2P,
+ libcamera::formats::SGRBG12_CSI2P,
+ libcamera::formats::SRGGB12_CSI2P,
+};
+
+ViewFinderGL::ViewFinderGL(QWidget *parent)
+ : QOpenGLWidget(parent), buffer_(nullptr),
+ colorSpace_(libcamera::ColorSpace::Raw), image_(nullptr),
+ vertexBuffer_(QOpenGLBuffer::VertexBuffer)
+{
+}
+
+ViewFinderGL::~ViewFinderGL()
+{
+ removeShader();
+}
+
+const QList<libcamera::PixelFormat> &ViewFinderGL::nativeFormats() const
+{
+ return supportedFormats;
+}
+
+int ViewFinderGL::setFormat(const libcamera::PixelFormat &format, const QSize &size,
+ const libcamera::ColorSpace &colorSpace,
+ unsigned int stride)
+{
+ if (format != format_ || colorSpace != colorSpace_) {
+ /*
+ * If the fragment already exists, remove it and create a new
+ * one for the new format.
+ */
+ if (shaderProgram_.isLinked()) {
+ shaderProgram_.release();
+ shaderProgram_.removeShader(fragmentShader_.get());
+ fragmentShader_.reset();
+ }
+
+ if (!selectFormat(format))
+ return -1;
+
+ selectColorSpace(colorSpace);
+
+ format_ = format;
+ colorSpace_ = colorSpace;
+ }
+
+ size_ = size;
+ stride_ = stride;
+
+ updateGeometry();
+ return 0;
+}
+
+void ViewFinderGL::stop()
+{
+ if (buffer_) {
+ renderComplete(buffer_);
+ buffer_ = nullptr;
+ image_ = nullptr;
+ }
+}
+
+QImage ViewFinderGL::getCurrentImage()
+{
+ QMutexLocker locker(&mutex_);
+
+ return grabFramebuffer();
+}
+
+void ViewFinderGL::render(libcamera::FrameBuffer *buffer, Image *image)
+{
+ if (buffer_)
+ renderComplete(buffer_);
+
+ image_ = image;
+ update();
+ buffer_ = buffer;
+}
+
+bool ViewFinderGL::selectFormat(const libcamera::PixelFormat &format)
+{
+ bool ret = true;
+
+ /* Set min/mag filters to GL_LINEAR by default. */
+ textureMinMagFilters_ = GL_LINEAR;
+
+ /* Use identity.vert as the default vertex shader. */
+ vertexShaderFile_ = ":identity.vert";
+
+ fragmentShaderDefines_.clear();
+
+ switch (format) {
+ case libcamera::formats::NV12:
+ horzSubSample_ = 2;
+ vertSubSample_ = 2;
+ fragmentShaderDefines_.append("#define YUV_PATTERN_UV");
+ fragmentShaderFile_ = ":YUV_2_planes.frag";
+ break;
+ case libcamera::formats::NV21:
+ horzSubSample_ = 2;
+ vertSubSample_ = 2;
+ fragmentShaderDefines_.append("#define YUV_PATTERN_VU");
+ fragmentShaderFile_ = ":YUV_2_planes.frag";
+ break;
+ case libcamera::formats::NV16:
+ horzSubSample_ = 2;
+ vertSubSample_ = 1;
+ fragmentShaderDefines_.append("#define YUV_PATTERN_UV");
+ fragmentShaderFile_ = ":YUV_2_planes.frag";
+ break;
+ case libcamera::formats::NV61:
+ horzSubSample_ = 2;
+ vertSubSample_ = 1;
+ fragmentShaderDefines_.append("#define YUV_PATTERN_VU");
+ fragmentShaderFile_ = ":YUV_2_planes.frag";
+ break;
+ case libcamera::formats::NV24:
+ horzSubSample_ = 1;
+ vertSubSample_ = 1;
+ fragmentShaderDefines_.append("#define YUV_PATTERN_UV");
+ fragmentShaderFile_ = ":YUV_2_planes.frag";
+ break;
+ case libcamera::formats::NV42:
+ horzSubSample_ = 1;
+ vertSubSample_ = 1;
+ fragmentShaderDefines_.append("#define YUV_PATTERN_VU");
+ fragmentShaderFile_ = ":YUV_2_planes.frag";
+ break;
+ case libcamera::formats::YUV420:
+ horzSubSample_ = 2;
+ vertSubSample_ = 2;
+ fragmentShaderFile_ = ":YUV_3_planes.frag";
+ break;
+ case libcamera::formats::YVU420:
+ horzSubSample_ = 2;
+ vertSubSample_ = 2;
+ fragmentShaderFile_ = ":YUV_3_planes.frag";
+ break;
+ case libcamera::formats::UYVY:
+ fragmentShaderDefines_.append("#define YUV_PATTERN_UYVY");
+ fragmentShaderFile_ = ":YUV_packed.frag";
+ break;
+ case libcamera::formats::VYUY:
+ fragmentShaderDefines_.append("#define YUV_PATTERN_VYUY");
+ fragmentShaderFile_ = ":YUV_packed.frag";
+ break;
+ case libcamera::formats::YUYV:
+ fragmentShaderDefines_.append("#define YUV_PATTERN_YUYV");
+ fragmentShaderFile_ = ":YUV_packed.frag";
+ break;
+ case libcamera::formats::YVYU:
+ fragmentShaderDefines_.append("#define YUV_PATTERN_YVYU");
+ fragmentShaderFile_ = ":YUV_packed.frag";
+ break;
+ case libcamera::formats::ABGR8888:
+ fragmentShaderDefines_.append("#define RGB_PATTERN rgb");
+ fragmentShaderFile_ = ":RGB.frag";
+ break;
+ case libcamera::formats::ARGB8888:
+ fragmentShaderDefines_.append("#define RGB_PATTERN bgr");
+ fragmentShaderFile_ = ":RGB.frag";
+ break;
+ case libcamera::formats::BGRA8888:
+ fragmentShaderDefines_.append("#define RGB_PATTERN gba");
+ fragmentShaderFile_ = ":RGB.frag";
+ break;
+ case libcamera::formats::RGBA8888:
+ fragmentShaderDefines_.append("#define RGB_PATTERN abg");
+ fragmentShaderFile_ = ":RGB.frag";
+ break;
+ case libcamera::formats::BGR888:
+ fragmentShaderDefines_.append("#define RGB_PATTERN rgb");
+ fragmentShaderFile_ = ":RGB.frag";
+ break;
+ case libcamera::formats::RGB888:
+ fragmentShaderDefines_.append("#define RGB_PATTERN bgr");
+ fragmentShaderFile_ = ":RGB.frag";
+ break;
+ case libcamera::formats::SBGGR8:
+ firstRed_.setX(1.0);
+ firstRed_.setY(1.0);
+ vertexShaderFile_ = ":bayer_8.vert";
+ fragmentShaderFile_ = ":bayer_8.frag";
+ textureMinMagFilters_ = GL_NEAREST;
+ break;
+ case libcamera::formats::SGBRG8:
+ firstRed_.setX(0.0);
+ firstRed_.setY(1.0);
+ vertexShaderFile_ = ":bayer_8.vert";
+ fragmentShaderFile_ = ":bayer_8.frag";
+ textureMinMagFilters_ = GL_NEAREST;
+ break;
+ case libcamera::formats::SGRBG8:
+ firstRed_.setX(1.0);
+ firstRed_.setY(0.0);
+ vertexShaderFile_ = ":bayer_8.vert";
+ fragmentShaderFile_ = ":bayer_8.frag";
+ textureMinMagFilters_ = GL_NEAREST;
+ break;
+ case libcamera::formats::SRGGB8:
+ firstRed_.setX(0.0);
+ firstRed_.setY(0.0);
+ vertexShaderFile_ = ":bayer_8.vert";
+ fragmentShaderFile_ = ":bayer_8.frag";
+ textureMinMagFilters_ = GL_NEAREST;
+ break;
+ case libcamera::formats::SBGGR10_CSI2P:
+ firstRed_.setX(1.0);
+ firstRed_.setY(1.0);
+ fragmentShaderDefines_.append("#define RAW10P");
+ fragmentShaderFile_ = ":bayer_1x_packed.frag";
+ textureMinMagFilters_ = GL_NEAREST;
+ break;
+ case libcamera::formats::SGBRG10_CSI2P:
+ firstRed_.setX(0.0);
+ firstRed_.setY(1.0);
+ fragmentShaderDefines_.append("#define RAW10P");
+ fragmentShaderFile_ = ":bayer_1x_packed.frag";
+ textureMinMagFilters_ = GL_NEAREST;
+ break;
+ case libcamera::formats::SGRBG10_CSI2P:
+ firstRed_.setX(1.0);
+ firstRed_.setY(0.0);
+ fragmentShaderDefines_.append("#define RAW10P");
+ fragmentShaderFile_ = ":bayer_1x_packed.frag";
+ textureMinMagFilters_ = GL_NEAREST;
+ break;
+ case libcamera::formats::SRGGB10_CSI2P:
+ firstRed_.setX(0.0);
+ firstRed_.setY(0.0);
+ fragmentShaderDefines_.append("#define RAW10P");
+ fragmentShaderFile_ = ":bayer_1x_packed.frag";
+ textureMinMagFilters_ = GL_NEAREST;
+ break;
+ case libcamera::formats::SBGGR12_CSI2P:
+ firstRed_.setX(1.0);
+ firstRed_.setY(1.0);
+ fragmentShaderDefines_.append("#define RAW12P");
+ fragmentShaderFile_ = ":bayer_1x_packed.frag";
+ textureMinMagFilters_ = GL_NEAREST;
+ break;
+ case libcamera::formats::SGBRG12_CSI2P:
+ firstRed_.setX(0.0);
+ firstRed_.setY(1.0);
+ fragmentShaderDefines_.append("#define RAW12P");
+ fragmentShaderFile_ = ":bayer_1x_packed.frag";
+ textureMinMagFilters_ = GL_NEAREST;
+ break;
+ case libcamera::formats::SGRBG12_CSI2P:
+ firstRed_.setX(1.0);
+ firstRed_.setY(0.0);
+ fragmentShaderDefines_.append("#define RAW12P");
+ fragmentShaderFile_ = ":bayer_1x_packed.frag";
+ textureMinMagFilters_ = GL_NEAREST;
+ break;
+ case libcamera::formats::SRGGB12_CSI2P:
+ firstRed_.setX(0.0);
+ firstRed_.setY(0.0);
+ fragmentShaderDefines_.append("#define RAW12P");
+ fragmentShaderFile_ = ":bayer_1x_packed.frag";
+ textureMinMagFilters_ = GL_NEAREST;
+ break;
+ default:
+ ret = false;
+ qWarning() << "[ViewFinderGL]:"
+ << "format not supported.";
+ break;
+ };
+
+ return ret;
+}
+
+void ViewFinderGL::selectColorSpace(const libcamera::ColorSpace &colorSpace)
+{
+ std::array<double, 9> yuv2rgb;
+
+ /* OpenGL stores arrays in column-major order. */
+ switch (colorSpace.ycbcrEncoding) {
+ case libcamera::ColorSpace::YcbcrEncoding::None:
+ default:
+ yuv2rgb = {
+ 1.0000, 0.0000, 0.0000,
+ 0.0000, 1.0000, 0.0000,
+ 0.0000, 0.0000, 1.0000,
+ };
+ break;
+
+ case libcamera::ColorSpace::YcbcrEncoding::Rec601:
+ yuv2rgb = {
+ 1.0000, 1.0000, 1.0000,
+ 0.0000, -0.3441, 1.7720,
+ 1.4020, -0.7141, 0.0000,
+ };
+ break;
+
+ case libcamera::ColorSpace::YcbcrEncoding::Rec709:
+ yuv2rgb = {
+ 1.0000, 1.0000, 1.0000,
+ 0.0000, -0.1873, 1.8856,
+ 1.5748, -0.4681, 0.0000,
+ };
+ break;
+
+ case libcamera::ColorSpace::YcbcrEncoding::Rec2020:
+ yuv2rgb = {
+ 1.0000, 1.0000, 1.0000,
+ 0.0000, -0.1646, 1.8814,
+ 1.4746, -0.5714, 0.0000,
+ };
+ break;
+ }
+
+ double offset;
+
+ switch (colorSpace.range) {
+ case libcamera::ColorSpace::Range::Full:
+ default:
+ offset = 0.0;
+ break;
+
+ case libcamera::ColorSpace::Range::Limited:
+ offset = 16.0;
+
+ for (unsigned int i = 0; i < 3; ++i)
+ yuv2rgb[i] *= 255.0 / 219.0;
+ for (unsigned int i = 4; i < 9; ++i)
+ yuv2rgb[i] *= 255.0 / 224.0;
+ break;
+ }
+
+ QStringList matrix;
+
+ for (double coeff : yuv2rgb)
+ matrix.append(QString::number(coeff, 'f'));
+
+ fragmentShaderDefines_.append("#define YUV2RGB_MATRIX " + matrix.join(", "));
+ fragmentShaderDefines_.append(QString("#define YUV2RGB_Y_OFFSET %1")
+ .arg(offset, 0, 'f', 1));
+}
+
+bool ViewFinderGL::createVertexShader()
+{
+ /* Create Vertex Shader */
+ vertexShader_ = std::make_unique<QOpenGLShader>(QOpenGLShader::Vertex, this);
+
+ /* Compile the vertex shader */
+ if (!vertexShader_->compileSourceFile(vertexShaderFile_)) {
+ qWarning() << "[ViewFinderGL]:" << vertexShader_->log();
+ return false;
+ }
+
+ shaderProgram_.addShader(vertexShader_.get());
+ return true;
+}
+
+bool ViewFinderGL::createFragmentShader()
+{
+ int attributeVertex;
+ int attributeTexture;
+
+ /*
+ * Create the fragment shader, compile it, and add it to the shader
+ * program. The #define macros stored in fragmentShaderDefines_, if
+ * any, are prepended to the source code.
+ */
+ fragmentShader_ = std::make_unique<QOpenGLShader>(QOpenGLShader::Fragment, this);
+
+ QFile file(fragmentShaderFile_);
+ if (!file.open(QIODevice::ReadOnly | QIODevice::Text)) {
+ qWarning() << "Shader" << fragmentShaderFile_ << "not found";
+ return false;
+ }
+
+ QString defines = fragmentShaderDefines_.join('\n') + "\n";
+ QByteArray src = file.readAll();
+ src.prepend(defines.toUtf8());
+
+ if (!fragmentShader_->compileSourceCode(src)) {
+ qWarning() << "[ViewFinderGL]:" << fragmentShader_->log();
+ return false;
+ }
+
+ shaderProgram_.addShader(fragmentShader_.get());
+
+ /* Link shader pipeline */
+ if (!shaderProgram_.link()) {
+ qWarning() << "[ViewFinderGL]:" << shaderProgram_.log();
+ close();
+ }
+
+ attributeVertex = shaderProgram_.attributeLocation("vertexIn");
+ attributeTexture = shaderProgram_.attributeLocation("textureIn");
+
+ vertexBuffer_.bind();
+
+ shaderProgram_.enableAttributeArray(attributeVertex);
+ shaderProgram_.setAttributeBuffer(attributeVertex,
+ GL_FLOAT,
+ 0,
+ 2,
+ 2 * sizeof(GLfloat));
+
+ shaderProgram_.enableAttributeArray(attributeTexture);
+ shaderProgram_.setAttributeBuffer(attributeTexture,
+ GL_FLOAT,
+ 8 * sizeof(GLfloat),
+ 2,
+ 2 * sizeof(GLfloat));
+
+ vertexBuffer_.release();
+
+ projMatrixUniform_ = shaderProgram_.uniformLocation("proj_matrix");
+ textureUniformY_ = shaderProgram_.uniformLocation("tex_y");
+ textureUniformU_ = shaderProgram_.uniformLocation("tex_u");
+ textureUniformV_ = shaderProgram_.uniformLocation("tex_v");
+ textureUniformStep_ = shaderProgram_.uniformLocation("tex_step");
+ textureUniformSize_ = shaderProgram_.uniformLocation("tex_size");
+ textureUniformStrideFactor_ = shaderProgram_.uniformLocation("stride_factor");
+ textureUniformBayerFirstRed_ = shaderProgram_.uniformLocation("tex_bayer_first_red");
+
+ /* Create the textures. */
+ for (std::unique_ptr<QOpenGLTexture> &texture : textures_) {
+ if (texture)
+ continue;
+
+ texture = std::make_unique<QOpenGLTexture>(QOpenGLTexture::Target2D);
+ texture->create();
+ }
+
+ return true;
+}
+
+void ViewFinderGL::configureTexture(QOpenGLTexture &texture)
+{
+ glBindTexture(GL_TEXTURE_2D, texture.textureId());
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
+ textureMinMagFilters_);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
+ textureMinMagFilters_);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+}
+
+void ViewFinderGL::removeShader()
+{
+ if (shaderProgram_.isLinked()) {
+ shaderProgram_.release();
+ shaderProgram_.removeAllShaders();
+ }
+}
+
+void ViewFinderGL::initializeGL()
+{
+ initializeOpenGLFunctions();
+ glEnable(GL_TEXTURE_2D);
+ glDisable(GL_DEPTH_TEST);
+
+ const GLfloat coordinates[2][4][2]{
+ {
+ /* Vertex coordinates */
+ { -1.0f, -1.0f },
+ { -1.0f, +1.0f },
+ { +1.0f, +1.0f },
+ { +1.0f, -1.0f },
+ },
+ {
+ /* Texture coordinates */
+ { 0.0f, 1.0f },
+ { 0.0f, 0.0f },
+ { 1.0f, 0.0f },
+ { 1.0f, 1.0f },
+ },
+ };
+
+ vertexBuffer_.create();
+ vertexBuffer_.bind();
+ vertexBuffer_.allocate(coordinates, sizeof(coordinates));
+
+ /* Create Vertex Shader */
+ if (!createVertexShader())
+ qWarning() << "[ViewFinderGL]: create vertex shader failed.";
+}
+
+void ViewFinderGL::doRender()
+{
+ /* Stride of the first plane, in pixels. */
+ unsigned int stridePixels;
+
+ switch (format_) {
+ case libcamera::formats::NV12:
+ case libcamera::formats::NV21:
+ case libcamera::formats::NV16:
+ case libcamera::formats::NV61:
+ case libcamera::formats::NV24:
+ case libcamera::formats::NV42:
+ /* Activate texture Y */
+ glActiveTexture(GL_TEXTURE0);
+ configureTexture(*textures_[0]);
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_LUMINANCE,
+ stride_,
+ size_.height(),
+ 0,
+ GL_LUMINANCE,
+ GL_UNSIGNED_BYTE,
+ image_->data(0).data());
+ shaderProgram_.setUniformValue(textureUniformY_, 0);
+
+ /* Activate texture UV/VU */
+ glActiveTexture(GL_TEXTURE1);
+ configureTexture(*textures_[1]);
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_LUMINANCE_ALPHA,
+ stride_ / horzSubSample_,
+ size_.height() / vertSubSample_,
+ 0,
+ GL_LUMINANCE_ALPHA,
+ GL_UNSIGNED_BYTE,
+ image_->data(1).data());
+ shaderProgram_.setUniformValue(textureUniformU_, 1);
+
+ stridePixels = stride_;
+ break;
+
+ case libcamera::formats::YUV420:
+ /* Activate texture Y */
+ glActiveTexture(GL_TEXTURE0);
+ configureTexture(*textures_[0]);
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_LUMINANCE,
+ stride_,
+ size_.height(),
+ 0,
+ GL_LUMINANCE,
+ GL_UNSIGNED_BYTE,
+ image_->data(0).data());
+ shaderProgram_.setUniformValue(textureUniformY_, 0);
+
+ /* Activate texture U */
+ glActiveTexture(GL_TEXTURE1);
+ configureTexture(*textures_[1]);
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_LUMINANCE,
+ stride_ / horzSubSample_,
+ size_.height() / vertSubSample_,
+ 0,
+ GL_LUMINANCE,
+ GL_UNSIGNED_BYTE,
+ image_->data(1).data());
+ shaderProgram_.setUniformValue(textureUniformU_, 1);
+
+ /* Activate texture V */
+ glActiveTexture(GL_TEXTURE2);
+ configureTexture(*textures_[2]);
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_LUMINANCE,
+ stride_ / horzSubSample_,
+ size_.height() / vertSubSample_,
+ 0,
+ GL_LUMINANCE,
+ GL_UNSIGNED_BYTE,
+ image_->data(2).data());
+ shaderProgram_.setUniformValue(textureUniformV_, 2);
+
+ stridePixels = stride_;
+ break;
+
+ case libcamera::formats::YVU420:
+ /* Activate texture Y */
+ glActiveTexture(GL_TEXTURE0);
+ configureTexture(*textures_[0]);
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_LUMINANCE,
+ stride_,
+ size_.height(),
+ 0,
+ GL_LUMINANCE,
+ GL_UNSIGNED_BYTE,
+ image_->data(0).data());
+ shaderProgram_.setUniformValue(textureUniformY_, 0);
+
+ /* Activate texture V */
+ glActiveTexture(GL_TEXTURE2);
+ configureTexture(*textures_[2]);
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_LUMINANCE,
+ stride_ / horzSubSample_,
+ size_.height() / vertSubSample_,
+ 0,
+ GL_LUMINANCE,
+ GL_UNSIGNED_BYTE,
+ image_->data(1).data());
+ shaderProgram_.setUniformValue(textureUniformV_, 2);
+
+ /* Activate texture U */
+ glActiveTexture(GL_TEXTURE1);
+ configureTexture(*textures_[1]);
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_LUMINANCE,
+ stride_ / horzSubSample_,
+ size_.height() / vertSubSample_,
+ 0,
+ GL_LUMINANCE,
+ GL_UNSIGNED_BYTE,
+ image_->data(2).data());
+ shaderProgram_.setUniformValue(textureUniformU_, 1);
+
+ stridePixels = stride_;
+ break;
+
+ case libcamera::formats::UYVY:
+ case libcamera::formats::VYUY:
+ case libcamera::formats::YUYV:
+ case libcamera::formats::YVYU:
+ /*
+ * Packed YUV formats are stored in a RGBA texture to match the
+ * OpenGL texel size with the 4 bytes repeating pattern in YUV.
+ * The texture width is thus half of the image_ with.
+ */
+ glActiveTexture(GL_TEXTURE0);
+ configureTexture(*textures_[0]);
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ stride_ / 4,
+ size_.height(),
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ image_->data(0).data());
+ shaderProgram_.setUniformValue(textureUniformY_, 0);
+
+ /*
+ * The shader needs the step between two texture pixels in the
+ * horizontal direction, expressed in texture coordinate units
+ * ([0, 1]). There are exactly width - 1 steps between the
+ * leftmost and rightmost texels.
+ */
+ shaderProgram_.setUniformValue(textureUniformStep_,
+ 1.0f / (size_.width() / 2 - 1),
+ 1.0f /* not used */);
+
+ stridePixels = stride_ / 2;
+ break;
+
+ case libcamera::formats::ABGR8888:
+ case libcamera::formats::ARGB8888:
+ case libcamera::formats::BGRA8888:
+ case libcamera::formats::RGBA8888:
+ glActiveTexture(GL_TEXTURE0);
+ configureTexture(*textures_[0]);
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGBA,
+ stride_ / 4,
+ size_.height(),
+ 0,
+ GL_RGBA,
+ GL_UNSIGNED_BYTE,
+ image_->data(0).data());
+ shaderProgram_.setUniformValue(textureUniformY_, 0);
+
+ stridePixels = stride_ / 4;
+ break;
+
+ case libcamera::formats::BGR888:
+ case libcamera::formats::RGB888:
+ glActiveTexture(GL_TEXTURE0);
+ configureTexture(*textures_[0]);
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_RGB,
+ stride_ / 3,
+ size_.height(),
+ 0,
+ GL_RGB,
+ GL_UNSIGNED_BYTE,
+ image_->data(0).data());
+ shaderProgram_.setUniformValue(textureUniformY_, 0);
+
+ stridePixels = stride_ / 3;
+ break;
+
+ case libcamera::formats::SBGGR8:
+ case libcamera::formats::SGBRG8:
+ case libcamera::formats::SGRBG8:
+ case libcamera::formats::SRGGB8:
+ case libcamera::formats::SBGGR10_CSI2P:
+ case libcamera::formats::SGBRG10_CSI2P:
+ case libcamera::formats::SGRBG10_CSI2P:
+ case libcamera::formats::SRGGB10_CSI2P:
+ case libcamera::formats::SBGGR12_CSI2P:
+ case libcamera::formats::SGBRG12_CSI2P:
+ case libcamera::formats::SGRBG12_CSI2P:
+ case libcamera::formats::SRGGB12_CSI2P:
+ /*
+ * Raw Bayer 8-bit, and packed raw Bayer 10-bit/12-bit formats
+ * are stored in a GL_LUMINANCE texture. The texture width is
+ * equal to the stride.
+ */
+ glActiveTexture(GL_TEXTURE0);
+ configureTexture(*textures_[0]);
+ glTexImage2D(GL_TEXTURE_2D,
+ 0,
+ GL_LUMINANCE,
+ stride_,
+ size_.height(),
+ 0,
+ GL_LUMINANCE,
+ GL_UNSIGNED_BYTE,
+ image_->data(0).data());
+ shaderProgram_.setUniformValue(textureUniformY_, 0);
+ shaderProgram_.setUniformValue(textureUniformBayerFirstRed_,
+ firstRed_);
+ shaderProgram_.setUniformValue(textureUniformSize_,
+ size_.width(), /* in pixels */
+ size_.height());
+ shaderProgram_.setUniformValue(textureUniformStep_,
+ 1.0f / (stride_ - 1),
+ 1.0f / (size_.height() - 1));
+
+ /*
+ * The stride is already taken into account in the shaders, set
+ * the generic stride factor to 1.0.
+ */
+ stridePixels = size_.width();
+ break;
+
+ default:
+ stridePixels = size_.width();
+ break;
+ };
+
+ /*
+ * Compute the stride factor for the vertex shader, to map the
+ * horizontal texture coordinate range [0.0, 1.0] to the active portion
+ * of the image.
+ */
+ shaderProgram_.setUniformValue(textureUniformStrideFactor_,
+ static_cast<float>(size_.width() - 1) /
+ (stridePixels - 1));
+
+ /*
+ * Place the viewfinder in the centre of the widget, preserving the
+ * aspect ratio of the image.
+ */
+ QMatrix4x4 projMatrix;
+ QSizeF scaledSize = size_.scaled(size(), Qt::KeepAspectRatio);
+ projMatrix.scale(scaledSize.width() / size().width(),
+ scaledSize.height() / size().height());
+
+ shaderProgram_.setUniformValue(projMatrixUniform_, projMatrix);
+}
+
+void ViewFinderGL::paintGL()
+{
+ if (!fragmentShader_) {
+ if (!createFragmentShader()) {
+ qWarning() << "[ViewFinderGL]:"
+ << "create fragment shader failed.";
+ }
+ }
+
+ /* Bind shader pipeline for use. */
+ if (!shaderProgram_.bind()) {
+ qWarning() << "[ViewFinderGL]:" << shaderProgram_.log();
+ close();
+ }
+
+ if (!image_)
+ return;
+
+ glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+
+ doRender();
+ glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
+}
+
+QSize ViewFinderGL::sizeHint() const
+{
+ return size_.isValid() ? size_ : QSize(640, 480);
+}
diff --git a/src/apps/qcam/viewfinder_gl.h b/src/apps/qcam/viewfinder_gl.h
new file mode 100644
index 00000000..23c657bc
--- /dev/null
+++ b/src/apps/qcam/viewfinder_gl.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Linaro
+ *
+ * OpenGL Viewfinder for rendering by OpenGL shader
+ */
+
+#pragma once
+
+#include <array>
+#include <memory>
+
+#include <QImage>
+#include <QMutex>
+#include <QOpenGLBuffer>
+#include <QOpenGLFunctions>
+#include <QOpenGLShader>
+#include <QOpenGLShaderProgram>
+#include <QOpenGLTexture>
+#include <QOpenGLWidget>
+#include <QSize>
+
+#include <libcamera/formats.h>
+#include <libcamera/framebuffer.h>
+
+#include "viewfinder.h"
+
+class ViewFinderGL : public QOpenGLWidget,
+ public ViewFinder,
+ protected QOpenGLFunctions
+{
+ Q_OBJECT
+
+public:
+ ViewFinderGL(QWidget *parent = nullptr);
+ ~ViewFinderGL();
+
+ const QList<libcamera::PixelFormat> &nativeFormats() const override;
+
+ int setFormat(const libcamera::PixelFormat &format, const QSize &size,
+ const libcamera::ColorSpace &colorSpace,
+ unsigned int stride) override;
+ void render(libcamera::FrameBuffer *buffer, Image *image) override;
+ void stop() override;
+
+ QImage getCurrentImage() override;
+
+Q_SIGNALS:
+ void renderComplete(libcamera::FrameBuffer *buffer);
+
+protected:
+ void initializeGL() override;
+ void paintGL() override;
+ QSize sizeHint() const override;
+
+private:
+ bool selectFormat(const libcamera::PixelFormat &format);
+ void selectColorSpace(const libcamera::ColorSpace &colorSpace);
+
+ void configureTexture(QOpenGLTexture &texture);
+ bool createFragmentShader();
+ bool createVertexShader();
+ void removeShader();
+ void doRender();
+
+ /* Captured image size, format and buffer */
+ libcamera::FrameBuffer *buffer_;
+ libcamera::PixelFormat format_;
+ libcamera::ColorSpace colorSpace_;
+ QSize size_;
+ unsigned int stride_;
+ Image *image_;
+
+ /* Shaders */
+ QOpenGLShaderProgram shaderProgram_;
+ std::unique_ptr<QOpenGLShader> vertexShader_;
+ std::unique_ptr<QOpenGLShader> fragmentShader_;
+ QString vertexShaderFile_;
+ QString fragmentShaderFile_;
+ QStringList fragmentShaderDefines_;
+
+ /* Vertex buffer */
+ QOpenGLBuffer vertexBuffer_;
+
+ /* Textures */
+ std::array<std::unique_ptr<QOpenGLTexture>, 3> textures_;
+
+ /* Common texture parameters */
+ GLuint textureMinMagFilters_;
+ GLuint projMatrixUniform_;
+
+ /* YUV texture parameters */
+ GLuint textureUniformU_;
+ GLuint textureUniformV_;
+ GLuint textureUniformY_;
+ GLuint textureUniformStep_;
+ unsigned int horzSubSample_;
+ unsigned int vertSubSample_;
+
+ /* Raw Bayer texture parameters */
+ GLuint textureUniformSize_;
+ GLuint textureUniformStrideFactor_;
+ GLuint textureUniformBayerFirstRed_;
+ QPointF firstRed_;
+
+ QMutex mutex_; /* Prevent concurrent access to image_ */
+};
diff --git a/src/apps/qcam/viewfinder_qt.cpp b/src/apps/qcam/viewfinder_qt.cpp
new file mode 100644
index 00000000..1a238922
--- /dev/null
+++ b/src/apps/qcam/viewfinder_qt.cpp
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * qcam - QPainter-based ViewFinder
+ */
+
+#include "viewfinder_qt.h"
+
+#include <assert.h>
+#include <stdint.h>
+#include <utility>
+
+#include <libcamera/formats.h>
+
+#include <QImage>
+#include <QImageWriter>
+#include <QMap>
+#include <QMutexLocker>
+#include <QPainter>
+#include <QResizeEvent>
+#include <QtDebug>
+
+#include "../common/image.h"
+
+#include "format_converter.h"
+
+static const QMap<libcamera::PixelFormat, QImage::Format> nativeFormats
+{
+ { libcamera::formats::ABGR8888, QImage::Format_RGBX8888 },
+ { libcamera::formats::XBGR8888, QImage::Format_RGBX8888 },
+ { libcamera::formats::ARGB8888, QImage::Format_RGB32 },
+ { libcamera::formats::XRGB8888, QImage::Format_RGB32 },
+ { libcamera::formats::RGB888, QImage::Format_BGR888 },
+ { libcamera::formats::BGR888, QImage::Format_RGB888 },
+ { libcamera::formats::RGB565, QImage::Format_RGB16 },
+};
+
+ViewFinderQt::ViewFinderQt(QWidget *parent)
+ : QWidget(parent), place_(rect()), buffer_(nullptr)
+{
+ icon_ = QIcon(":camera-off.svg");
+
+ QPalette pal = palette();
+ pal.setColor(QPalette::Window, Qt::black);
+ setPalette(pal);
+}
+
+ViewFinderQt::~ViewFinderQt()
+{
+}
+
+const QList<libcamera::PixelFormat> &ViewFinderQt::nativeFormats() const
+{
+ static const QList<libcamera::PixelFormat> formats = ::nativeFormats.keys();
+ return formats;
+}
+
+int ViewFinderQt::setFormat(const libcamera::PixelFormat &format, const QSize &size,
+ [[maybe_unused]] const libcamera::ColorSpace &colorSpace,
+ unsigned int stride)
+{
+ image_ = QImage();
+
+ /*
+ * If format conversion is needed, configure the converter and allocate
+ * the destination image.
+ */
+ if (!::nativeFormats.contains(format)) {
+ int ret = converter_.configure(format, size, stride);
+ if (ret < 0)
+ return ret;
+
+ image_ = QImage(size, QImage::Format_RGB32);
+
+ qInfo() << "Using software format conversion from"
+ << format.toString().c_str();
+ } else {
+ qInfo() << "Zero-copy enabled";
+ }
+
+ format_ = format;
+ size_ = size;
+
+ updateGeometry();
+ return 0;
+}
+
+void ViewFinderQt::render(libcamera::FrameBuffer *buffer, Image *image)
+{
+ size_t size = buffer->metadata().planes()[0].bytesused;
+
+ {
+ QMutexLocker locker(&mutex_);
+
+ if (::nativeFormats.contains(format_)) {
+ /*
+ * If the frame format is identical to the display
+ * format, create a QImage that references the frame
+ * and store a reference to the frame buffer. The
+ * previously stored frame buffer, if any, will be
+ * released.
+ *
+ * \todo Get the stride from the buffer instead of
+ * computing it naively
+ */
+ assert(buffer->planes().size() == 1);
+ image_ = QImage(image->data(0).data(), size_.width(),
+ size_.height(), size / size_.height(),
+ ::nativeFormats[format_]);
+ std::swap(buffer, buffer_);
+ } else {
+ /*
+ * Otherwise, convert the format and release the frame
+ * buffer immediately.
+ */
+ converter_.convert(image, size, &image_);
+ }
+ }
+
+ /*
+ * Indicate the widget paints all its pixels, to optimize rendering by
+ * skipping erasing the widget before painting.
+ */
+ setAttribute(Qt::WA_OpaquePaintEvent, true);
+ update();
+
+ if (buffer)
+ renderComplete(buffer);
+}
+
+void ViewFinderQt::stop()
+{
+ image_ = QImage();
+
+ if (buffer_) {
+ renderComplete(buffer_);
+ buffer_ = nullptr;
+ }
+
+ /*
+ * The logo has a transparent background, reenable erasing the widget
+ * before painting.
+ */
+ setAttribute(Qt::WA_OpaquePaintEvent, false);
+ update();
+}
+
+QImage ViewFinderQt::getCurrentImage()
+{
+ QMutexLocker locker(&mutex_);
+
+ return image_.copy();
+}
+
+void ViewFinderQt::paintEvent(QPaintEvent *)
+{
+ QPainter painter(this);
+
+ painter.setBrush(palette().window());
+
+ /* If we have an image, draw it, with black letterbox rectangles. */
+ if (!image_.isNull()) {
+ if (place_.width() < width()) {
+ QRect rect{ 0, 0, (width() - place_.width()) / 2, height() };
+ painter.drawRect(rect);
+ rect.moveLeft(place_.right());
+ painter.drawRect(rect);
+ } else {
+ QRect rect{ 0, 0, width(), (height() - place_.height()) / 2 };
+ painter.drawRect(rect);
+ rect.moveTop(place_.bottom());
+ painter.drawRect(rect);
+ }
+
+ painter.drawImage(place_, image_, image_.rect());
+ return;
+ }
+
+ /*
+ * Otherwise, draw the camera stopped icon. Render it to the pixmap if
+ * the size has changed.
+ */
+ constexpr int margin = 20;
+
+ if (vfSize_ != size() || pixmap_.isNull()) {
+ QSize vfSize = size() - QSize{ 2 * margin, 2 * margin };
+ QSize pixmapSize{ 1, 1 };
+ pixmapSize.scale(vfSize, Qt::KeepAspectRatio);
+ pixmap_ = icon_.pixmap(pixmapSize);
+
+ vfSize_ = size();
+ }
+
+ QPoint point{ margin, margin };
+ if (pixmap_.width() < width() - 2 * margin)
+ point.setX((width() - pixmap_.width()) / 2);
+ else
+ point.setY((height() - pixmap_.height()) / 2);
+
+ painter.drawPixmap(point, pixmap_);
+}
+
+QSize ViewFinderQt::sizeHint() const
+{
+ return size_.isValid() ? size_ : QSize(640, 480);
+}
+
+void ViewFinderQt::resizeEvent(QResizeEvent *event)
+{
+ if (!size_.isValid())
+ return;
+
+ place_.setSize(size_.scaled(event->size(), Qt::KeepAspectRatio));
+ place_.moveCenter(rect().center());
+
+ QWidget::resizeEvent(event);
+}
diff --git a/src/apps/qcam/viewfinder_qt.h b/src/apps/qcam/viewfinder_qt.h
new file mode 100644
index 00000000..50fde88e
--- /dev/null
+++ b/src/apps/qcam/viewfinder_qt.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * qcam - QPainter-based ViewFinder
+ */
+
+#pragma once
+
+#include <QIcon>
+#include <QImage>
+#include <QList>
+#include <QMutex>
+#include <QSize>
+#include <QWidget>
+
+#include <libcamera/formats.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/pixel_format.h>
+
+#include "format_converter.h"
+#include "viewfinder.h"
+
+class ViewFinderQt : public QWidget, public ViewFinder
+{
+ Q_OBJECT
+
+public:
+ ViewFinderQt(QWidget *parent);
+ ~ViewFinderQt();
+
+ const QList<libcamera::PixelFormat> &nativeFormats() const override;
+
+ int setFormat(const libcamera::PixelFormat &format, const QSize &size,
+ const libcamera::ColorSpace &colorSpace,
+ unsigned int stride) override;
+ void render(libcamera::FrameBuffer *buffer, Image *image) override;
+ void stop() override;
+
+ QImage getCurrentImage() override;
+
+Q_SIGNALS:
+ void renderComplete(libcamera::FrameBuffer *buffer);
+
+protected:
+ void paintEvent(QPaintEvent *) override;
+ void resizeEvent(QResizeEvent *) override;
+ QSize sizeHint() const override;
+
+private:
+ FormatConverter converter_;
+
+ libcamera::PixelFormat format_;
+ QSize size_;
+ QRect place_;
+
+ /* Camera stopped icon */
+ QSize vfSize_;
+ QIcon icon_;
+ QPixmap pixmap_;
+
+ /* Buffer and render image */
+ libcamera::FrameBuffer *buffer_;
+ QImage image_;
+ QMutex mutex_; /* Prevent concurrent access to image_ */
+};
diff --git a/src/cam/buffer_writer.cpp b/src/cam/buffer_writer.cpp
deleted file mode 100644
index c5a5eb46..00000000
--- a/src/cam/buffer_writer.cpp
+++ /dev/null
@@ -1,88 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * buffer_writer.cpp - Buffer writer
- */
-
-#include <fcntl.h>
-#include <iomanip>
-#include <iostream>
-#include <sstream>
-#include <string.h>
-#include <sys/mman.h>
-#include <unistd.h>
-
-#include "buffer_writer.h"
-
-using namespace libcamera;
-
-BufferWriter::BufferWriter(const std::string &pattern)
- : pattern_(pattern)
-{
-}
-
-BufferWriter::~BufferWriter()
-{
- for (auto &iter : mappedBuffers_) {
- void *memory = iter.second.first;
- unsigned int length = iter.second.second;
- munmap(memory, length);
- }
- mappedBuffers_.clear();
-}
-
-void BufferWriter::mapBuffer(FrameBuffer *buffer)
-{
- for (const FrameBuffer::Plane &plane : buffer->planes()) {
- void *memory = mmap(NULL, plane.length, PROT_READ, MAP_SHARED,
- plane.fd.fd(), 0);
-
- mappedBuffers_[plane.fd.fd()] =
- std::make_pair(memory, plane.length);
- }
-}
-
-int BufferWriter::write(FrameBuffer *buffer, const std::string &streamName)
-{
- std::string filename;
- size_t pos;
- int fd, ret = 0;
-
- filename = pattern_;
- pos = filename.find_first_of('#');
- if (pos != std::string::npos) {
- std::stringstream ss;
- ss << streamName << "-" << std::setw(6)
- << std::setfill('0') << buffer->metadata().sequence;
- filename.replace(pos, 1, ss.str());
- }
-
- fd = open(filename.c_str(), O_CREAT | O_WRONLY |
- (pos == std::string::npos ? O_APPEND : O_TRUNC),
- S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
- if (fd == -1)
- return -errno;
-
- for (const FrameBuffer::Plane &plane : buffer->planes()) {
- void *data = mappedBuffers_[plane.fd.fd()].first;
- unsigned int length = plane.length;
-
- ret = ::write(fd, data, length);
- if (ret < 0) {
- ret = -errno;
- std::cerr << "write error: " << strerror(-ret)
- << std::endl;
- break;
- } else if (ret != (int)length) {
- std::cerr << "write error: only " << ret
- << " bytes written instead of "
- << length << std::endl;
- break;
- }
- }
-
- close(fd);
-
- return ret;
-}
diff --git a/src/cam/buffer_writer.h b/src/cam/buffer_writer.h
deleted file mode 100644
index 8c9b2436..00000000
--- a/src/cam/buffer_writer.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * buffer_writer.h - Buffer writer
- */
-#ifndef __LIBCAMERA_BUFFER_WRITER_H__
-#define __LIBCAMERA_BUFFER_WRITER_H__
-
-#include <map>
-#include <string>
-
-#include <libcamera/buffer.h>
-
-class BufferWriter
-{
-public:
- BufferWriter(const std::string &pattern = "frame-#.bin");
- ~BufferWriter();
-
- void mapBuffer(libcamera::FrameBuffer *buffer);
-
- int write(libcamera::FrameBuffer *buffer,
- const std::string &streamName);
-
-private:
- std::string pattern_;
- std::map<int, std::pair<void *, unsigned int>> mappedBuffers_;
-};
-
-#endif /* __LIBCAMERA_BUFFER_WRITER_H__ */
diff --git a/src/cam/capture.cpp b/src/cam/capture.cpp
deleted file mode 100644
index 55fa2dab..00000000
--- a/src/cam/capture.cpp
+++ /dev/null
@@ -1,204 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * capture.cpp - Cam capture
- */
-
-#include <chrono>
-#include <iomanip>
-#include <iostream>
-#include <limits.h>
-#include <sstream>
-
-#include "capture.h"
-#include "main.h"
-
-using namespace libcamera;
-
-Capture::Capture(std::shared_ptr<Camera> camera, CameraConfiguration *config)
- : camera_(camera), config_(config), writer_(nullptr)
-{
-}
-
-int Capture::run(EventLoop *loop, const OptionsParser::Options &options)
-{
- int ret;
-
- if (!camera_) {
- std::cout << "Can't capture without a camera" << std::endl;
- return -ENODEV;
- }
-
- ret = camera_->configure(config_);
- if (ret < 0) {
- std::cout << "Failed to configure camera" << std::endl;
- return ret;
- }
-
- streamName_.clear();
- for (unsigned int index = 0; index < config_->size(); ++index) {
- StreamConfiguration &cfg = config_->at(index);
- streamName_[cfg.stream()] = "stream" + std::to_string(index);
- }
-
- camera_->requestCompleted.connect(this, &Capture::requestComplete);
-
- if (options.isSet(OptFile)) {
- if (!options[OptFile].toString().empty())
- writer_ = new BufferWriter(options[OptFile]);
- else
- writer_ = new BufferWriter();
- }
-
-
- FrameBufferAllocator *allocator = new FrameBufferAllocator(camera_);
-
- ret = capture(loop, allocator);
-
- if (options.isSet(OptFile)) {
- delete writer_;
- writer_ = nullptr;
- }
-
- delete allocator;
-
- return ret;
-}
-
-int Capture::capture(EventLoop *loop, FrameBufferAllocator *allocator)
-{
- int ret;
-
- /* Identify the stream with the least number of buffers. */
- unsigned int nbuffers = UINT_MAX;
- for (StreamConfiguration &cfg : *config_) {
- ret = allocator->allocate(cfg.stream());
- if (ret < 0) {
- std::cerr << "Can't allocate buffers" << std::endl;
- return -ENOMEM;
- }
-
- unsigned int allocated = allocator->buffers(cfg.stream()).size();
- nbuffers = std::min(nbuffers, allocated);
- }
-
- /*
- * TODO: make cam tool smarter to support still capture by for
- * example pushing a button. For now run all streams all the time.
- */
-
- std::vector<Request *> requests;
- for (unsigned int i = 0; i < nbuffers; i++) {
- Request *request = camera_->createRequest();
- if (!request) {
- std::cerr << "Can't create request" << std::endl;
- return -ENOMEM;
- }
-
- for (StreamConfiguration &cfg : *config_) {
- Stream *stream = cfg.stream();
- const std::vector<std::unique_ptr<FrameBuffer>> &buffers =
- allocator->buffers(stream);
- const std::unique_ptr<FrameBuffer> &buffer = buffers[i];
-
- ret = request->addBuffer(stream, buffer.get());
- if (ret < 0) {
- std::cerr << "Can't set buffer for request"
- << std::endl;
- return ret;
- }
-
- if (writer_)
- writer_->mapBuffer(buffer.get());
- }
-
- requests.push_back(request);
- }
-
- ret = camera_->start();
- if (ret) {
- std::cout << "Failed to start capture" << std::endl;
- return ret;
- }
-
- for (Request *request : requests) {
- ret = camera_->queueRequest(request);
- if (ret < 0) {
- std::cerr << "Can't queue request" << std::endl;
- camera_->stop();
- return ret;
- }
- }
-
- std::cout << "Capture until user interrupts by SIGINT" << std::endl;
- ret = loop->exec();
- if (ret)
- std::cout << "Failed to run capture loop" << std::endl;
-
- ret = camera_->stop();
- if (ret)
- std::cout << "Failed to stop capture" << std::endl;
-
- return ret;
-}
-
-void Capture::requestComplete(Request *request)
-{
- if (request->status() == Request::RequestCancelled)
- return;
-
- const std::map<Stream *, FrameBuffer *> &buffers = request->buffers();
-
- std::chrono::steady_clock::time_point now = std::chrono::steady_clock::now();
- double fps = std::chrono::duration_cast<std::chrono::milliseconds>(now - last_).count();
- fps = last_ != std::chrono::steady_clock::time_point() && fps
- ? 1000.0 / fps : 0.0;
- last_ = now;
-
- std::stringstream info;
- info << "fps: " << std::fixed << std::setprecision(2) << fps;
-
- for (auto it = buffers.begin(); it != buffers.end(); ++it) {
- Stream *stream = it->first;
- FrameBuffer *buffer = it->second;
- const std::string &name = streamName_[stream];
-
- const FrameMetadata &metadata = buffer->metadata();
-
- info << " " << name
- << " seq: " << std::setw(6) << std::setfill('0') << metadata.sequence
- << " bytesused: ";
-
- unsigned int nplane = 0;
- for (const FrameMetadata::Plane &plane : metadata.planes) {
- info << plane.bytesused;
- if (++nplane < metadata.planes.size())
- info << "/";
- }
-
- if (writer_)
- writer_->write(buffer, name);
- }
-
- std::cout << info.str() << std::endl;
-
- /*
- * Create a new request and populate it with one buffer for each
- * stream.
- */
- request = camera_->createRequest();
- if (!request) {
- std::cerr << "Can't create request" << std::endl;
- return;
- }
-
- for (auto it = buffers.begin(); it != buffers.end(); ++it) {
- Stream *stream = it->first;
- FrameBuffer *buffer = it->second;
-
- request->addBuffer(stream, buffer);
- }
-
- camera_->queueRequest(request);
-}
diff --git a/src/cam/capture.h b/src/cam/capture.h
deleted file mode 100644
index 9bca5661..00000000
--- a/src/cam/capture.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * capture.h - Cam capture
- */
-#ifndef __CAM_CAPTURE_H__
-#define __CAM_CAPTURE_H__
-
-#include <chrono>
-#include <memory>
-
-#include <libcamera/buffer.h>
-#include <libcamera/camera.h>
-#include <libcamera/framebuffer_allocator.h>
-#include <libcamera/request.h>
-#include <libcamera/stream.h>
-
-#include "buffer_writer.h"
-#include "event_loop.h"
-#include "options.h"
-
-class Capture
-{
-public:
- Capture(std::shared_ptr<libcamera::Camera> camera,
- libcamera::CameraConfiguration *config);
-
- int run(EventLoop *loop, const OptionsParser::Options &options);
-private:
- int capture(EventLoop *loop,
- libcamera::FrameBufferAllocator *allocator);
-
- void requestComplete(libcamera::Request *request);
-
- std::shared_ptr<libcamera::Camera> camera_;
- libcamera::CameraConfiguration *config_;
-
- std::map<libcamera::Stream *, std::string> streamName_;
- BufferWriter *writer_;
- std::chrono::steady_clock::time_point last_;
-};
-
-#endif /* __CAM_CAPTURE_H__ */
diff --git a/src/cam/event_loop.cpp b/src/cam/event_loop.cpp
deleted file mode 100644
index e8ab8617..00000000
--- a/src/cam/event_loop.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * event_loop.cpp - cam - Event loop
- */
-
-#include <libcamera/event_dispatcher.h>
-
-#include "event_loop.h"
-
-using namespace libcamera;
-
-EventLoop::EventLoop(EventDispatcher *dispatcher)
- : dispatcher_(dispatcher)
-{
-}
-
-EventLoop::~EventLoop()
-{
-}
-
-int EventLoop::exec()
-{
- exitCode_ = -1;
- exit_.store(false, std::memory_order_release);
-
- while (!exit_.load(std::memory_order_acquire))
- dispatcher_->processEvents();
-
- return exitCode_;
-}
-
-void EventLoop::exit(int code)
-{
- exitCode_ = code;
- exit_.store(true, std::memory_order_release);
- dispatcher_->interrupt();
-}
diff --git a/src/cam/event_loop.h b/src/cam/event_loop.h
deleted file mode 100644
index 581c7cba..00000000
--- a/src/cam/event_loop.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * event_loop.h - cam - Event loop
- */
-#ifndef __CAM_EVENT_LOOP_H__
-#define __CAM_EVENT_LOOP_H__
-
-#include <atomic>
-
-#include <libcamera/event_notifier.h>
-
-namespace libcamera {
-class EventDispatcher;
-}
-
-class EventLoop
-{
-public:
- EventLoop(libcamera::EventDispatcher *dispatcher);
- ~EventLoop();
-
- int exec();
- void exit(int code = 0);
-
-private:
- libcamera::EventDispatcher *dispatcher_;
-
- std::atomic<bool> exit_;
- int exitCode_;
-};
-
-#endif /* __CAM_EVENT_LOOP_H__ */
diff --git a/src/cam/main.cpp b/src/cam/main.cpp
deleted file mode 100644
index 718740f4..00000000
--- a/src/cam/main.cpp
+++ /dev/null
@@ -1,385 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * main.cpp - cam - The libcamera swiss army knife
- */
-
-#include <iomanip>
-#include <iostream>
-#include <signal.h>
-#include <string.h>
-
-#include <libcamera/libcamera.h>
-#include <libcamera/property_ids.h>
-
-#include "capture.h"
-#include "event_loop.h"
-#include "main.h"
-#include "options.h"
-
-using namespace libcamera;
-
-class CamApp
-{
-public:
- CamApp();
- ~CamApp();
-
- static CamApp *instance();
-
- int init(int argc, char **argv);
- void cleanup();
-
- int exec();
- void quit();
-
-private:
- int parseOptions(int argc, char *argv[]);
- int prepareConfig();
- int listProperties();
- int infoConfiguration();
- int run();
-
- static CamApp *app_;
- OptionsParser::Options options_;
- CameraManager *cm_;
- std::shared_ptr<Camera> camera_;
- std::unique_ptr<libcamera::CameraConfiguration> config_;
- EventLoop *loop_;
-};
-
-CamApp *CamApp::app_ = nullptr;
-
-CamApp::CamApp()
- : cm_(nullptr), camera_(nullptr), config_(nullptr), loop_(nullptr)
-{
- CamApp::app_ = this;
-}
-
-CamApp::~CamApp()
-{
- delete cm_;
-}
-
-CamApp *CamApp::instance()
-{
- return CamApp::app_;
-}
-
-int CamApp::init(int argc, char **argv)
-{
- int ret;
-
- ret = parseOptions(argc, argv);
- if (ret < 0)
- return ret;
-
- cm_ = new CameraManager();
-
- ret = cm_->start();
- if (ret) {
- std::cout << "Failed to start camera manager: "
- << strerror(-ret) << std::endl;
- return ret;
- }
-
- if (options_.isSet(OptCamera)) {
- const std::string &cameraName = options_[OptCamera];
- char *endptr;
- unsigned long index = strtoul(cameraName.c_str(), &endptr, 10);
- if (*endptr == '\0' && index > 0 && index <= cm_->cameras().size())
- camera_ = cm_->cameras()[index - 1];
- else
- camera_ = cm_->get(cameraName);
-
- if (!camera_) {
- std::cout << "Camera "
- << std::string(options_[OptCamera])
- << " not found" << std::endl;
- cm_->stop();
- return -ENODEV;
- }
-
- if (camera_->acquire()) {
- std::cout << "Failed to acquire camera" << std::endl;
- camera_.reset();
- cm_->stop();
- return -EINVAL;
- }
-
- std::cout << "Using camera " << camera_->name() << std::endl;
-
- ret = prepareConfig();
- if (ret)
- return ret;
- }
-
- loop_ = new EventLoop(cm_->eventDispatcher());
-
- return 0;
-}
-
-void CamApp::cleanup()
-{
- delete loop_;
- loop_ = nullptr;
-
- if (camera_) {
- camera_->release();
- camera_.reset();
- }
-
- config_.reset();
-
- cm_->stop();
-}
-
-int CamApp::exec()
-{
- int ret;
-
- ret = run();
- cleanup();
-
- return ret;
-}
-
-void CamApp::quit()
-{
- if (loop_)
- loop_->exit();
-}
-
-int CamApp::parseOptions(int argc, char *argv[])
-{
- KeyValueParser streamKeyValue;
- streamKeyValue.addOption("role", OptionString,
- "Role for the stream (viewfinder, video, still, stillraw)",
- ArgumentRequired);
- streamKeyValue.addOption("width", OptionInteger, "Width in pixels",
- ArgumentRequired);
- streamKeyValue.addOption("height", OptionInteger, "Height in pixels",
- ArgumentRequired);
- streamKeyValue.addOption("pixelformat", OptionInteger, "Pixel format",
- ArgumentRequired);
-
- OptionsParser parser;
- parser.addOption(OptCamera, OptionString,
- "Specify which camera to operate on, by name or by index", "camera",
- ArgumentRequired, "camera");
- parser.addOption(OptCapture, OptionNone,
- "Capture until interrupted by user", "capture");
- parser.addOption(OptFile, OptionString,
- "Write captured frames to disk\n"
- "The first '#' character in the file name is expanded to the stream name and frame sequence number.\n"
- "The default file name is 'frame-#.bin'.",
- "file", ArgumentOptional, "filename");
- parser.addOption(OptStream, &streamKeyValue,
- "Set configuration of a camera stream", "stream", true);
- parser.addOption(OptHelp, OptionNone, "Display this help message",
- "help");
- parser.addOption(OptInfo, OptionNone,
- "Display information about stream(s)", "info");
- parser.addOption(OptList, OptionNone, "List all cameras", "list");
- parser.addOption(OptProps, OptionNone, "List cameras properties",
- "list-properties");
-
- options_ = parser.parse(argc, argv);
- if (!options_.valid())
- return -EINVAL;
-
- if (options_.empty() || options_.isSet(OptHelp)) {
- parser.usage();
- return options_.empty() ? -EINVAL : -EINTR;
- }
-
- return 0;
-}
-
-int CamApp::prepareConfig()
-{
- StreamRoles roles;
-
- if (options_.isSet(OptStream)) {
- const std::vector<OptionValue> &streamOptions =
- options_[OptStream].toArray();
-
- /* Use roles and get a default configuration. */
- for (auto const &value : streamOptions) {
- KeyValueParser::Options opt = value.toKeyValues();
-
- std::string role = opt.isSet("role")
- ? opt["role"].toString()
- : "viewfinder";
-
- if (role == "viewfinder") {
- roles.push_back(StreamRole::Viewfinder);
- } else if (role == "video") {
- roles.push_back(StreamRole::VideoRecording);
- } else if (role == "still") {
- roles.push_back(StreamRole::StillCapture);
- } else if (role == "stillraw") {
- roles.push_back(StreamRole::StillCaptureRaw);
- } else {
- std::cerr << "Unknown stream role "
- << role << std::endl;
- return -EINVAL;
- }
- }
- } else {
- /* If no configuration is provided assume a single video stream. */
- roles.push_back(StreamRole::VideoRecording);
- }
-
- config_ = camera_->generateConfiguration(roles);
- if (!config_ || config_->size() != roles.size()) {
- std::cerr << "Failed to get default stream configuration"
- << std::endl;
- return -EINVAL;
- }
-
- /* Apply configuration if explicitly requested. */
- if (options_.isSet(OptStream)) {
- const std::vector<OptionValue> &streamOptions =
- options_[OptStream].toArray();
-
- unsigned int i = 0;
- for (auto const &value : streamOptions) {
- KeyValueParser::Options opt = value.toKeyValues();
- StreamConfiguration &cfg = config_->at(i++);
-
- if (opt.isSet("width"))
- cfg.size.width = opt["width"];
-
- if (opt.isSet("height"))
- cfg.size.height = opt["height"];
-
- /* TODO: Translate 4CC string to ID. */
- if (opt.isSet("pixelformat"))
- cfg.pixelFormat = PixelFormat(opt["pixelformat"]);
- }
- }
-
- switch (config_->validate()) {
- case CameraConfiguration::Valid:
- break;
- case CameraConfiguration::Adjusted:
- std::cout << "Camera configuration adjusted" << std::endl;
- break;
- case CameraConfiguration::Invalid:
- std::cout << "Camera configuration invalid" << std::endl;
- config_.reset();
- return -EINVAL;
- }
-
- return 0;
-}
-
-int CamApp::listProperties()
-{
- if (!camera_) {
- std::cout << "Cannot list properties without a camera"
- << std::endl;
- return -EINVAL;
- }
-
- for (const auto &prop : camera_->properties()) {
- const ControlId *id = properties::properties.at(prop.first);
- const ControlValue &value = prop.second;
-
- std::cout << "Property: " << id->name() << " = "
- << value.toString() << std::endl;
- }
-
- return 0;
-}
-
-int CamApp::infoConfiguration()
-{
- if (!config_) {
- std::cout << "Cannot print stream information without a camera"
- << std::endl;
- return -EINVAL;
- }
-
- unsigned int index = 0;
- for (const StreamConfiguration &cfg : *config_) {
- std::cout << index << ": " << cfg.toString() << std::endl;
-
- const StreamFormats &formats = cfg.formats();
- for (PixelFormat pixelformat : formats.pixelformats()) {
- std::cout << " * Pixelformat: "
- << pixelformat.toString() << " "
- << formats.range(pixelformat).toString()
- << std::endl;
-
- for (const Size &size : formats.sizes(pixelformat))
- std::cout << " - " << size.toString()
- << std::endl;
- }
-
- index++;
- }
-
- return 0;
-}
-
-int CamApp::run()
-{
- int ret;
-
- if (options_.isSet(OptList)) {
- std::cout << "Available cameras:" << std::endl;
-
- unsigned int index = 1;
- for (const std::shared_ptr<Camera> &cam : cm_->cameras()) {
- std::cout << index << ": " << cam->name() << std::endl;
- index++;
- }
- }
-
- if (options_.isSet(OptProps)) {
- ret = listProperties();
- if (ret)
- return ret;
- }
-
- if (options_.isSet(OptInfo)) {
- ret = infoConfiguration();
- if (ret)
- return ret;
- }
-
- if (options_.isSet(OptCapture)) {
- Capture capture(camera_, config_.get());
- return capture.run(loop_, options_);
- }
-
- return 0;
-}
-
-void signalHandler(int signal)
-{
- std::cout << "Exiting" << std::endl;
- CamApp::instance()->quit();
-}
-
-int main(int argc, char **argv)
-{
- CamApp app;
- int ret;
-
- ret = app.init(argc, argv);
- if (ret)
- return ret == -EINTR ? 0 : EXIT_FAILURE;
-
- struct sigaction sa = {};
- sa.sa_handler = &signalHandler;
- sigaction(SIGINT, &sa, nullptr);
-
- if (app.exec())
- return EXIT_FAILURE;
-
- return 0;
-}
diff --git a/src/cam/main.h b/src/cam/main.h
deleted file mode 100644
index afcad435..00000000
--- a/src/cam/main.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * main.h - Cam application
- */
-#ifndef __CAM_MAIN_H__
-#define __CAM_MAIN_H__
-
-enum {
- OptCamera = 'c',
- OptCapture = 'C',
- OptFile = 'F',
- OptHelp = 'h',
- OptInfo = 'I',
- OptList = 'l',
- OptProps = 'p',
- OptStream = 's',
-};
-
-#endif /* __CAM_MAIN_H__ */
diff --git a/src/cam/meson.build b/src/cam/meson.build
deleted file mode 100644
index 2419d648..00000000
--- a/src/cam/meson.build
+++ /dev/null
@@ -1,11 +0,0 @@
-cam_sources = files([
- 'buffer_writer.cpp',
- 'capture.cpp',
- 'event_loop.cpp',
- 'main.cpp',
- 'options.cpp',
-])
-
-cam = executable('cam', cam_sources,
- dependencies : [ libatomic, libcamera_dep ],
- install : true)
diff --git a/src/cam/options.cpp b/src/cam/options.cpp
deleted file mode 100644
index 2c56eacf..00000000
--- a/src/cam/options.cpp
+++ /dev/null
@@ -1,537 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * options.cpp - cam - Options parsing
- */
-
-#include <assert.h>
-#include <getopt.h>
-#include <iomanip>
-#include <iostream>
-#include <string.h>
-
-#include "options.h"
-
-/* -----------------------------------------------------------------------------
- * Option
- */
-
-const char *Option::typeName() const
-{
- switch (type) {
- case OptionNone:
- return "none";
-
- case OptionInteger:
- return "integer";
-
- case OptionString:
- return "string";
-
- case OptionKeyValue:
- return "key=value";
- }
-
- return "unknown";
-}
-
-/* -----------------------------------------------------------------------------
- * OptionBase<T>
- */
-
-template<typename T>
-bool OptionsBase<T>::empty() const
-{
- return values_.empty();
-}
-
-template<typename T>
-bool OptionsBase<T>::valid() const
-{
- return valid_;
-}
-
-template<typename T>
-bool OptionsBase<T>::isSet(const T &opt) const
-{
- return values_.find(opt) != values_.end();
-}
-
-template<typename T>
-const OptionValue &OptionsBase<T>::operator[](const T &opt) const
-{
- return values_.find(opt)->second;
-}
-
-template<typename T>
-bool OptionsBase<T>::parseValue(const T &opt, const Option &option,
- const char *optarg)
-{
- OptionValue value;
-
- switch (option.type) {
- case OptionNone:
- break;
-
- case OptionInteger:
- unsigned int integer;
-
- if (optarg) {
- char *endptr;
- integer = strtoul(optarg, &endptr, 0);
- if (*endptr != '\0')
- return false;
- } else {
- integer = 0;
- }
-
- value = OptionValue(integer);
- break;
-
- case OptionString:
- value = OptionValue(optarg ? optarg : "");
- break;
-
- case OptionKeyValue:
- KeyValueParser *kvParser = option.keyValueParser;
- KeyValueParser::Options keyValues = kvParser->parse(optarg);
- if (!keyValues.valid())
- return false;
-
- value = OptionValue(keyValues);
- break;
- }
-
- if (option.isArray)
- values_[opt].addValue(value);
- else
- values_[opt] = value;
-
- return true;
-}
-
-template class OptionsBase<int>;
-template class OptionsBase<std::string>;
-
-/* -----------------------------------------------------------------------------
- * KeyValueParser
- */
-
-bool KeyValueParser::addOption(const char *name, OptionType type,
- const char *help, OptionArgument argument)
-{
- if (!name)
- return false;
- if (!help || help[0] == '\0')
- return false;
- if (argument != ArgumentNone && type == OptionNone)
- return false;
-
- /* Reject duplicate options. */
- if (optionsMap_.find(name) != optionsMap_.end())
- return false;
-
- optionsMap_[name] = Option({ 0, type, name, argument, nullptr,
- help, nullptr, false });
- return true;
-}
-
-KeyValueParser::Options KeyValueParser::parse(const char *arguments)
-{
- Options options;
-
- for (const char *pair = arguments; *arguments != '\0'; pair = arguments) {
- const char *comma = strchrnul(arguments, ',');
- size_t len = comma - pair;
-
- /* Skip over the comma. */
- arguments = *comma == ',' ? comma + 1 : comma;
-
- /* Skip to the next pair if the pair is empty. */
- if (!len)
- continue;
-
- std::string key;
- std::string value;
-
- const char *separator = static_cast<const char *>(memchr(pair, '=', len));
- if (!separator) {
- key = std::string(pair, len);
- value = "";
- } else {
- key = std::string(pair, separator - pair);
- value = std::string(separator + 1, comma - separator - 1);
- }
-
- /* The key is mandatory, the value might be optional. */
- if (key.empty())
- continue;
-
- if (optionsMap_.find(key) == optionsMap_.end()) {
- std::cerr << "Invalid option " << key << std::endl;
- return options;
- }
-
- OptionArgument arg = optionsMap_[key].argument;
- if (value.empty() && arg == ArgumentRequired) {
- std::cerr << "Option " << key << " requires an argument"
- << std::endl;
- return options;
- } else if (!value.empty() && arg == ArgumentNone) {
- std::cerr << "Option " << key << " takes no argument"
- << std::endl;
- return options;
- }
-
- const Option &option = optionsMap_[key];
- if (!options.parseValue(key, option, value.c_str())) {
- std::cerr << "Failed to parse '" << value << "' as "
- << option.typeName() << " for option " << key
- << std::endl;
- return options;
- }
- }
-
- options.valid_ = true;
- return options;
-}
-
-void KeyValueParser::usage(int indent)
-{
- unsigned int space = 0;
-
- for (auto const &iter : optionsMap_) {
- const Option &option = iter.second;
- unsigned int length = 14;
- if (option.argument != ArgumentNone)
- length += 1 + strlen(option.typeName());
- if (option.argument == ArgumentOptional)
- length += 2;
-
- if (length > space)
- space = length;
- }
-
- space = (space + 7) / 8 * 8;
-
- for (auto const &iter : optionsMap_) {
- const Option &option = iter.second;
- std::string argument = option.name;
-
- if (option.argument != ArgumentNone) {
- if (option.argument == ArgumentOptional)
- argument += "[=";
- else
- argument += "=";
- argument += option.typeName();
- if (option.argument == ArgumentOptional)
- argument += "]";
- }
-
- std::cerr << std::setw(indent) << std::right << " "
- << std::setw(space) << std::left << argument;
-
- for (const char *help = option.help, *end = help; end;) {
- end = strchr(help, '\n');
- if (end) {
- std::cerr << std::string(help, end - help + 1);
- std::cerr << std::setw(indent + space) << " ";
- help = end + 1;
- } else {
- std::cerr << help << std::endl;
- }
- }
- }
-}
-
-/* -----------------------------------------------------------------------------
- * OptionValue
- */
-
-OptionValue::OptionValue()
- : type_(ValueNone), integer_(0)
-{
-}
-
-OptionValue::OptionValue(int value)
- : type_(ValueInteger), integer_(value)
-{
-}
-
-OptionValue::OptionValue(const char *value)
- : type_(ValueString), integer_(0), string_(value)
-{
-}
-
-OptionValue::OptionValue(const std::string &value)
- : type_(ValueString), integer_(0), string_(value)
-{
-}
-
-OptionValue::OptionValue(const KeyValueParser::Options &value)
- : type_(ValueKeyValue), integer_(0), keyValues_(value)
-{
-}
-
-void OptionValue::addValue(const OptionValue &value)
-{
- assert(type_ == ValueNone || type_ == ValueArray);
-
- type_ = ValueArray;
- array_.push_back(value);
-}
-
-OptionValue::operator int() const
-{
- return toInteger();
-}
-
-OptionValue::operator std::string() const
-{
- return toString();
-}
-
-OptionValue::operator KeyValueParser::Options() const
-{
- return toKeyValues();
-}
-
-OptionValue::operator std::vector<OptionValue>() const
-{
- return toArray();
-}
-
-int OptionValue::toInteger() const
-{
- if (type_ != ValueInteger)
- return 0;
-
- return integer_;
-}
-
-std::string OptionValue::toString() const
-{
- if (type_ != ValueString)
- return std::string();
-
- return string_;
-}
-
-KeyValueParser::Options OptionValue::toKeyValues() const
-{
- if (type_ != ValueKeyValue)
- return KeyValueParser::Options();
-
- return keyValues_;
-}
-
-std::vector<OptionValue> OptionValue::toArray() const
-{
- if (type_ != ValueArray)
- return std::vector<OptionValue>{};
-
- return array_;
-}
-
-/* -----------------------------------------------------------------------------
- * OptionsParser
- */
-
-bool OptionsParser::addOption(int opt, OptionType type, const char *help,
- const char *name, OptionArgument argument,
- const char *argumentName, bool array)
-{
- /*
- * Options must have at least a short or long name, and a text message.
- * If an argument is accepted, it must be described by argumentName.
- */
- if (!isalnum(opt) && !name)
- return false;
- if (!help || help[0] == '\0')
- return false;
- if (argument != ArgumentNone && !argumentName)
- return false;
-
- /* Reject duplicate options. */
- if (optionsMap_.find(opt) != optionsMap_.end())
- return false;
-
- options_.push_back(Option({ opt, type, name, argument, argumentName,
- help, nullptr, array }));
- optionsMap_[opt] = &options_.back();
- return true;
-}
-
-bool OptionsParser::addOption(int opt, KeyValueParser *parser, const char *help,
- const char *name, bool array)
-{
- if (!addOption(opt, OptionKeyValue, help, name, ArgumentRequired,
- "key=value[,key=value,...]", array))
- return false;
-
- options_.back().keyValueParser = parser;
- return true;
-}
-
-OptionsParser::Options OptionsParser::parse(int argc, char **argv)
-{
- OptionsParser::Options options;
-
- /*
- * Allocate short and long options arrays large enough to contain all
- * options.
- */
- char shortOptions[options_.size() * 3 + 2];
- struct option longOptions[options_.size() + 1];
- unsigned int ids = 0;
- unsigned int idl = 0;
-
- shortOptions[ids++] = ':';
-
- for (const Option &option : options_) {
- if (option.hasShortOption()) {
- shortOptions[ids++] = option.opt;
- if (option.argument != ArgumentNone)
- shortOptions[ids++] = ':';
- if (option.argument == ArgumentOptional)
- shortOptions[ids++] = ':';
- }
-
- if (option.hasLongOption()) {
- longOptions[idl].name = option.name;
-
- switch (option.argument) {
- case ArgumentNone:
- longOptions[idl].has_arg = no_argument;
- break;
- case ArgumentRequired:
- longOptions[idl].has_arg = required_argument;
- break;
- case ArgumentOptional:
- longOptions[idl].has_arg = optional_argument;
- break;
- }
-
- longOptions[idl].flag = 0;
- longOptions[idl].val = option.opt;
- idl++;
- }
- }
-
- shortOptions[ids] = '\0';
- memset(&longOptions[idl], 0, sizeof(longOptions[idl]));
-
- opterr = 0;
-
- while (true) {
- int c = getopt_long(argc, argv, shortOptions, longOptions, nullptr);
-
- if (c == -1)
- break;
-
- if (c == '?' || c == ':') {
- if (c == '?')
- std::cerr << "Invalid option ";
- else
- std::cerr << "Missing argument for option ";
- std::cerr << argv[optind - 1] << std::endl;
-
- usage();
- return options;
- }
-
- const Option &option = *optionsMap_[c];
- if (!options.parseValue(c, option, optarg)) {
- parseValueError(option);
- usage();
- return options;
- }
- }
-
- options.valid_ = true;
- return options;
-}
-
-void OptionsParser::usage()
-{
- std::cerr << "Options:" << std::endl;
-
- unsigned int indent = 0;
-
- for (const Option &option : options_) {
- unsigned int length = 14;
- if (option.hasLongOption())
- length += 2 + strlen(option.name);
- if (option.argument != ArgumentNone)
- length += 1 + strlen(option.argumentName);
- if (option.argument == ArgumentOptional)
- length += 2;
- if (option.isArray)
- length += 4;
-
- if (length > indent)
- indent = length;
- }
-
- indent = (indent + 7) / 8 * 8;
-
- for (const Option &option : options_) {
- std::string argument;
- if (option.hasShortOption())
- argument = std::string(" -")
- + static_cast<char>(option.opt);
- else
- argument = " ";
-
- if (option.hasLongOption()) {
- if (option.hasShortOption())
- argument += ", ";
- else
- argument += " ";
- argument += std::string("--") + option.name;
- }
-
- if (option.argument != ArgumentNone) {
- if (option.argument == ArgumentOptional)
- argument += "[=";
- else
- argument += " ";
- argument += option.argumentName;
- if (option.argument == ArgumentOptional)
- argument += "]";
- }
-
- if (option.isArray)
- argument += " ...";
-
- std::cerr << std::setw(indent) << std::left << argument;
-
- for (const char *help = option.help, *end = help; end; ) {
- end = strchr(help, '\n');
- if (end) {
- std::cerr << std::string(help, end - help + 1);
- std::cerr << std::setw(indent) << " ";
- help = end + 1;
- } else {
- std::cerr << help << std::endl;
- }
- }
-
- if (option.keyValueParser)
- option.keyValueParser->usage(indent);
- }
-}
-
-void OptionsParser::parseValueError(const Option &option)
-{
- std::string optionName;
-
- if (option.name)
- optionName = "--" + std::string(option.name);
- else
- optionName = "-" + std::string(1, option.opt);
-
- std::cerr << "Can't parse " << option.typeName()
- << " argument for option " << optionName << std::endl;
-}
diff --git a/src/cam/options.h b/src/cam/options.h
deleted file mode 100644
index 5e346b47..00000000
--- a/src/cam/options.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * options.h - cam - Options parsing
- */
-#ifndef __CAM_OPTIONS_H__
-#define __CAM_OPTIONS_H__
-
-#include <ctype.h>
-#include <list>
-#include <map>
-#include <vector>
-
-class KeyValueParser;
-class OptionValue;
-
-enum OptionArgument {
- ArgumentNone,
- ArgumentRequired,
- ArgumentOptional,
-};
-
-enum OptionType {
- OptionNone,
- OptionInteger,
- OptionString,
- OptionKeyValue,
-};
-
-struct Option {
- int opt;
- OptionType type;
- const char *name;
- OptionArgument argument;
- const char *argumentName;
- const char *help;
- KeyValueParser *keyValueParser;
- bool isArray;
-
- bool hasShortOption() const { return isalnum(opt); }
- bool hasLongOption() const { return name != nullptr; }
- const char *typeName() const;
-};
-
-template<typename T>
-class OptionsBase
-{
-public:
- OptionsBase() : valid_(false) {}
-
- bool empty() const;
- bool valid() const;
- bool isSet(const T &opt) const;
- const OptionValue &operator[](const T &opt) const;
-
-private:
- friend class KeyValueParser;
- friend class OptionsParser;
-
- bool parseValue(const T &opt, const Option &option, const char *value);
-
- std::map<T, OptionValue> values_;
- bool valid_;
-};
-
-class KeyValueParser
-{
-public:
- class Options : public OptionsBase<std::string>
- {
- };
-
- bool addOption(const char *name, OptionType type, const char *help,
- OptionArgument argument = ArgumentNone);
-
- Options parse(const char *arguments);
- void usage(int indent);
-
-private:
- std::map<std::string, Option> optionsMap_;
-};
-
-class OptionValue
-{
-public:
- enum ValueType {
- ValueNone,
- ValueInteger,
- ValueString,
- ValueKeyValue,
- ValueArray,
- };
-
- OptionValue();
- OptionValue(int value);
- OptionValue(const char *value);
- OptionValue(const std::string &value);
- OptionValue(const KeyValueParser::Options &value);
-
- void addValue(const OptionValue &value);
-
- ValueType type() const { return type_; }
-
- operator int() const;
- operator std::string() const;
- operator KeyValueParser::Options() const;
- operator std::vector<OptionValue>() const;
-
- int toInteger() const;
- std::string toString() const;
- KeyValueParser::Options toKeyValues() const;
- std::vector<OptionValue> toArray() const;
-
-private:
- ValueType type_;
- int integer_;
- std::string string_;
- KeyValueParser::Options keyValues_;
- std::vector<OptionValue> array_;
-};
-
-class OptionsParser
-{
-public:
- class Options : public OptionsBase<int>
- {
- };
-
- bool addOption(int opt, OptionType type, const char *help,
- const char *name = nullptr,
- OptionArgument argument = ArgumentNone,
- const char *argumentName = nullptr, bool array = false);
- bool addOption(int opt, KeyValueParser *parser, const char *help,
- const char *name = nullptr, bool array = false);
-
- Options parse(int argc, char *argv[]);
- void usage();
-
-private:
- void parseValueError(const Option &option);
-
- std::list<Option> options_;
- std::map<unsigned int, Option *> optionsMap_;
-};
-
-#endif /* __CAM_OPTIONS_H__ */
diff --git a/src/gstreamer/gstlibcamera-controls.cpp.in b/src/gstreamer/gstlibcamera-controls.cpp.in
new file mode 100644
index 00000000..ace36b71
--- /dev/null
+++ b/src/gstreamer/gstlibcamera-controls.cpp.in
@@ -0,0 +1,332 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Jaslo Ziska
+ *
+ * GStreamer Camera Controls
+ *
+ * This file is auto-generated. Do not edit.
+ */
+
+#include <vector>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/geometry.h>
+
+#include "gstlibcamera-controls.h"
+
+using namespace libcamera;
+
+static void value_set_rectangle(GValue *value, const Rectangle &rect)
+{
+ Point top_left = rect.topLeft();
+ Size size = rect.size();
+
+ GValue x = G_VALUE_INIT;
+ g_value_init(&x, G_TYPE_INT);
+ g_value_set_int(&x, top_left.x);
+ gst_value_array_append_and_take_value(value, &x);
+
+ GValue y = G_VALUE_INIT;
+ g_value_init(&y, G_TYPE_INT);
+ g_value_set_int(&y, top_left.y);
+ gst_value_array_append_and_take_value(value, &y);
+
+ GValue width = G_VALUE_INIT;
+ g_value_init(&width, G_TYPE_INT);
+ g_value_set_int(&width, size.width);
+ gst_value_array_append_and_take_value(value, &width);
+
+ GValue height = G_VALUE_INIT;
+ g_value_init(&height, G_TYPE_INT);
+ g_value_set_int(&x, size.height);
+ gst_value_array_append_and_take_value(value, &height);
+}
+
+static Rectangle value_get_rectangle(const GValue *value)
+{
+ const GValue *r;
+ r = gst_value_array_get_value(value, 0);
+ int x = g_value_get_int(r);
+ r = gst_value_array_get_value(value, 1);
+ int y = g_value_get_int(r);
+ r = gst_value_array_get_value(value, 2);
+ int w = g_value_get_int(r);
+ r = gst_value_array_get_value(value, 3);
+ int h = g_value_get_int(r);
+
+ return Rectangle(x, y, w, h);
+}
+
+{% for vendor, ctrls in controls %}
+{%- for ctrl in ctrls if ctrl.is_enum %}
+static const GEnumValue {{ ctrl.name|snake_case }}_types[] = {
+{%- for enum in ctrl.enum_values %}
+ {
+ controls::{{ ctrl.namespace }}{{ enum.name }},
+ {{ enum.description|format_description|indent_str('\t\t') }},
+ "{{ enum.gst_name }}"
+ },
+{%- endfor %}
+ {0, NULL, NULL}
+};
+
+#define TYPE_{{ ctrl.name|snake_case|upper }} \
+ ({{ ctrl.name|snake_case }}_get_type())
+static GType {{ ctrl.name|snake_case }}_get_type()
+{
+ static GType {{ ctrl.name|snake_case }}_type = 0;
+
+ if (!{{ ctrl.name|snake_case }}_type)
+ {{ ctrl.name|snake_case }}_type =
+ g_enum_register_static("{{ ctrl.name }}",
+ {{ ctrl.name|snake_case }}_types);
+
+ return {{ ctrl.name|snake_case }}_type;
+}
+{% endfor %}
+{%- endfor %}
+
+void GstCameraControls::installProperties(GObjectClass *klass, int lastPropId)
+{
+{%- for vendor, ctrls in controls %}
+{%- for ctrl in ctrls %}
+
+{%- set spec %}
+{%- if ctrl.is_rectangle -%}
+gst_param_spec_array(
+{%- else -%}
+g_param_spec_{{ ctrl.gtype }}(
+{%- endif -%}
+{%- if ctrl.is_array %}
+ "{{ ctrl.vendor_prefix }}{{ ctrl.name|kebab_case }}-value",
+ "{{ ctrl.name }} Value",
+ "One {{ ctrl.name }} element value",
+{%- else %}
+ "{{ ctrl.vendor_prefix }}{{ ctrl.name|kebab_case }}",
+ "{{ ctrl.name }}",
+ {{ ctrl.description|format_description|indent_str('\t') }},
+{%- endif %}
+{%- if ctrl.is_enum %}
+ TYPE_{{ ctrl.name|snake_case|upper }},
+ {{ ctrl.default }},
+{%- elif ctrl.is_rectangle %}
+ g_param_spec_int(
+ "rectangle-value",
+ "Rectangle Value",
+ "One rectangle value, either x, y, width or height.",
+ {{ ctrl.min }}, {{ ctrl.max }}, {{ ctrl.default }},
+ (GParamFlags) (GST_PARAM_CONTROLLABLE | G_PARAM_READWRITE |
+ G_PARAM_STATIC_STRINGS)
+ ),
+{%- elif ctrl.gtype == 'boolean' %}
+ {{ ctrl.default }},
+{%- elif ctrl.gtype in ['float', 'int', 'int64', 'uchar'] %}
+ {{ ctrl.min }}, {{ ctrl.max }}, {{ ctrl.default }},
+{%- endif %}
+ (GParamFlags) (GST_PARAM_CONTROLLABLE | G_PARAM_READWRITE |
+ G_PARAM_STATIC_STRINGS)
+)
+{%- endset %}
+
+ g_object_class_install_property(
+ klass,
+ lastPropId + controls::{{ ctrl.namespace }}{{ ctrl.name|snake_case|upper }},
+{%- if ctrl.is_array %}
+ gst_param_spec_array(
+ "{{ ctrl.vendor_prefix }}{{ ctrl.name|kebab_case }}",
+ "{{ ctrl.name }}",
+ {{ ctrl.description|format_description|indent_str('\t\t\t') }},
+ {{ spec|indent_str('\t\t\t') }},
+ (GParamFlags) (GST_PARAM_CONTROLLABLE |
+ G_PARAM_READWRITE |
+ G_PARAM_STATIC_STRINGS)
+ )
+{%- else %}
+ {{ spec|indent_str('\t\t') }}
+{%- endif %}
+ );
+{%- endfor %}
+{%- endfor %}
+}
+
+bool GstCameraControls::getProperty(guint propId, GValue *value,
+ [[maybe_unused]] GParamSpec *pspec)
+{
+ if (!controls_acc_.contains(propId)) {
+ GST_WARNING("Control '%s' is not available, default value will "
+ "be returned",
+ controls::controls.at(propId)->name().c_str());
+ return true;
+ }
+ const ControlValue &cv = controls_acc_.get(propId);
+
+ switch (propId) {
+{%- for vendor, ctrls in controls %}
+{%- for ctrl in ctrls %}
+
+ case controls::{{ ctrl.namespace }}{{ ctrl.name|snake_case|upper }}: {
+ auto control = cv.get<{{ ctrl.type }}>();
+
+{%- if ctrl.is_array %}
+ for (size_t i = 0; i < control.size(); ++i) {
+ GValue element = G_VALUE_INIT;
+{%- if ctrl.is_rectangle %}
+ g_value_init(&element, GST_TYPE_PARAM_ARRAY_LIST);
+ value_set_rectangle(&element, control[i]);
+{%- else %}
+ g_value_init(&element, G_TYPE_{{ ctrl.gtype|upper }});
+ g_value_set_{{ ctrl.gtype }}(&element, control[i]);
+{%- endif %}
+ gst_value_array_append_and_take_value(value, &element);
+ }
+{%- else %}
+{%- if ctrl.is_rectangle %}
+ value_set_rectangle(value, control);
+{%- else %}
+ g_value_set_{{ ctrl.gtype }}(value, control);
+{%- endif %}
+{%- endif %}
+
+ return true;
+ }
+{%- endfor %}
+{%- endfor %}
+
+ default:
+ return false;
+ }
+}
+
+bool GstCameraControls::setProperty(guint propId, const GValue *value,
+ [[maybe_unused]] GParamSpec *pspec)
+{
+ /*
+ * Check whether the camera capabilities are already available.
+ * They might not be available if the pipeline has not started yet.
+ */
+ if (!capabilities_.empty()) {
+ /* If so, check that the control is supported by the camera. */
+ const ControlId *cid = capabilities_.idmap().at(propId);
+ auto info = capabilities_.find(cid);
+
+ if (info == capabilities_.end()) {
+ GST_WARNING("Control '%s' is not supported by the "
+ "camera and will be ignored",
+ cid->name().c_str());
+ return true;
+ }
+ }
+
+ switch (propId) {
+{%- for vendor, ctrls in controls %}
+{%- for ctrl in ctrls %}
+
+ case controls::{{ ctrl.namespace }}{{ ctrl.name|snake_case|upper }}: {
+ ControlValue control;
+{%- if ctrl.is_array %}
+ size_t size = gst_value_array_get_size(value);
+{%- if ctrl.size != 0 %}
+ if (size != {{ ctrl.size }}) {
+ GST_ERROR("Incorrect array size for control "
+ "'{{ ctrl.name|kebab_case }}', must be of "
+ "size {{ ctrl.size }}");
+ return true;
+ }
+{%- endif %}
+
+ std::vector<{{ ctrl.element_type }}> values(size);
+ for (size_t i = 0; i < size; ++i) {
+ const GValue *element =
+ gst_value_array_get_value(value, i);
+{%- if ctrl.is_rectangle %}
+ if (gst_value_array_get_size(element) != 4) {
+ GST_ERROR("Rectangle in control "
+ "'{{ ctrl.name|kebab_case }}' at"
+ "index %zu must be an array of size 4",
+ i);
+ return true;
+ }
+ values[i] = value_get_rectangle(element);
+{%- else %}
+ values[i] = g_value_get_{{ ctrl.gtype }}(element);
+{%- endif %}
+ }
+
+{%- if ctrl.size == 0 %}
+ control.set(Span<const {{ ctrl.element_type }}>(values.data(),
+ size));
+{%- else %}
+ control.set(Span<const {{ ctrl.element_type }},
+ {{ ctrl.size }}>(values.data(),
+ {{ ctrl.size }}));
+{%- endif %}
+{%- else %}
+{%- if ctrl.is_rectangle %}
+ if (gst_value_array_get_size(value) != 4) {
+ GST_ERROR("Rectangle in control "
+ "'{{ ctrl.name|kebab_case }}' must be an "
+ "array of size 4");
+ return true;
+ }
+ Rectangle val = value_get_rectangle(value);
+{%- else %}
+ auto val = g_value_get_{{ ctrl.gtype }}(value);
+{%- endif %}
+ control.set(val);
+{%- endif %}
+ controls_.set(propId, control);
+ controls_acc_.set(propId, control);
+ return true;
+ }
+{%- endfor %}
+{%- endfor %}
+
+ default:
+ return false;
+ }
+}
+
+void GstCameraControls::setCamera(const std::shared_ptr<libcamera::Camera> &cam)
+{
+ capabilities_ = cam->controls();
+
+ /*
+ * Check the controls which were set before the camera capabilities were
+ * known. This is required because GStreamer may set properties before
+ * the pipeline has started and thus before the camera was known.
+ */
+ ControlList new_controls;
+ for (auto control = controls_acc_.begin();
+ control != controls_acc_.end();
+ ++control) {
+ unsigned int id = control->first;
+ ControlValue value = control->second;
+
+ const ControlId *cid = capabilities_.idmap().at(id);
+ auto info = capabilities_.find(cid);
+
+ /* Only add controls which are supported. */
+ if (info != capabilities_.end())
+ new_controls.set(id, value);
+ else
+ GST_WARNING("Control '%s' is not supported by the "
+ "camera and will be ignored",
+ cid->name().c_str());
+ }
+
+ controls_acc_ = new_controls;
+ controls_ = new_controls;
+}
+
+void GstCameraControls::applyControls(std::unique_ptr<libcamera::Request> &request)
+{
+ request->controls().merge(controls_);
+ controls_.clear();
+}
+
+void GstCameraControls::readMetadata(libcamera::Request *request)
+{
+ controls_acc_.merge(request->metadata(),
+ ControlList::MergePolicy::OverwriteExisting);
+}
diff --git a/src/gstreamer/gstlibcamera-controls.h b/src/gstreamer/gstlibcamera-controls.h
new file mode 100644
index 00000000..749220b5
--- /dev/null
+++ b/src/gstreamer/gstlibcamera-controls.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Collabora Ltd.
+ * Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+ *
+ * GStreamer Camera Controls
+ */
+
+#pragma once
+
+#include <memory>
+
+#include <libcamera/camera.h>
+#include <libcamera/controls.h>
+#include <libcamera/request.h>
+
+#include "gstlibcamerasrc.h"
+
+namespace libcamera {
+
+class GstCameraControls
+{
+public:
+ static void installProperties(GObjectClass *klass, int lastProp);
+
+ bool getProperty(guint propId, GValue *value, GParamSpec *pspec);
+ bool setProperty(guint propId, const GValue *value, GParamSpec *pspec);
+
+ void setCamera(const std::shared_ptr<libcamera::Camera> &cam);
+
+ void applyControls(std::unique_ptr<libcamera::Request> &request);
+ void readMetadata(libcamera::Request *request);
+
+private:
+ /* Supported controls and limits of camera. */
+ ControlInfoMap capabilities_;
+ /* Set of user modified controls. */
+ ControlList controls_;
+ /* Accumulator of all controls ever set and metadata returned by camera */
+ ControlList controls_acc_;
+};
+
+} /* namespace libcamera */
diff --git a/src/gstreamer/gstlibcamera-utils.cpp b/src/gstreamer/gstlibcamera-utils.cpp
index a3cb0746..a466b305 100644
--- a/src/gstreamer/gstlibcamera-utils.cpp
+++ b/src/gstreamer/gstlibcamera-utils.cpp
@@ -3,61 +3,311 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamera-utils.c - GStreamer libcamera Utility Function
+ * GStreamer libcamera Utility Function
*/
#include "gstlibcamera-utils.h"
-#include <linux/drm_fourcc.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/formats.h>
using namespace libcamera;
static struct {
GstVideoFormat gst_format;
- guint drm_fourcc;
+ PixelFormat format;
} format_map[] = {
- { GST_VIDEO_FORMAT_ENCODED, DRM_FORMAT_MJPEG },
- { GST_VIDEO_FORMAT_RGB, DRM_FORMAT_BGR888 },
- { GST_VIDEO_FORMAT_BGR, DRM_FORMAT_RGB888 },
- { GST_VIDEO_FORMAT_ARGB, DRM_FORMAT_BGRA8888 },
- { GST_VIDEO_FORMAT_NV12, DRM_FORMAT_NV12 },
- { GST_VIDEO_FORMAT_NV21, DRM_FORMAT_NV21 },
- { GST_VIDEO_FORMAT_NV16, DRM_FORMAT_NV16 },
- { GST_VIDEO_FORMAT_NV61, DRM_FORMAT_NV61 },
- { GST_VIDEO_FORMAT_NV24, DRM_FORMAT_NV24 },
- { GST_VIDEO_FORMAT_UYVY, DRM_FORMAT_UYVY },
- { GST_VIDEO_FORMAT_VYUY, DRM_FORMAT_VYUY },
- { GST_VIDEO_FORMAT_YUY2, DRM_FORMAT_YUYV },
- { GST_VIDEO_FORMAT_YVYU, DRM_FORMAT_YVYU },
+ /* Compressed */
+ { GST_VIDEO_FORMAT_ENCODED, formats::MJPEG },
+
+ /* Bayer formats, gstreamer only supports 8-bit */
+ { GST_VIDEO_FORMAT_ENCODED, formats::SBGGR8 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGBRG8 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGRBG8 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SRGGB8 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SBGGR10 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGBRG10 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGRBG10 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SRGGB10 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SBGGR12 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGBRG12 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGRBG12 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SRGGB12 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SBGGR14 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGBRG14 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGRBG14 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SRGGB14 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SBGGR16 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGBRG16 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SGRBG16 },
+ { GST_VIDEO_FORMAT_ENCODED, formats::SRGGB16 },
+
+ /* Monochrome */
+ { GST_VIDEO_FORMAT_GRAY8, formats::R8 },
+ { GST_VIDEO_FORMAT_GRAY16_LE, formats::R16 },
+
+ /* RGB16 */
+ { GST_VIDEO_FORMAT_RGB16, formats::RGB565 },
+
+ /* RGB24 */
+ { GST_VIDEO_FORMAT_RGB, formats::BGR888 },
+ { GST_VIDEO_FORMAT_BGR, formats::RGB888 },
+
+ /* RGB32 */
+ { GST_VIDEO_FORMAT_BGRx, formats::XRGB8888 },
+ { GST_VIDEO_FORMAT_RGBx, formats::XBGR8888 },
+ { GST_VIDEO_FORMAT_xBGR, formats::RGBX8888 },
+ { GST_VIDEO_FORMAT_xRGB, formats::BGRX8888 },
+ { GST_VIDEO_FORMAT_BGRA, formats::ARGB8888 },
+ { GST_VIDEO_FORMAT_RGBA, formats::ABGR8888 },
+ { GST_VIDEO_FORMAT_ABGR, formats::RGBA8888 },
+ { GST_VIDEO_FORMAT_ARGB, formats::BGRA8888 },
+
+ /* YUV Semiplanar */
+ { GST_VIDEO_FORMAT_NV12, formats::NV12 },
+ { GST_VIDEO_FORMAT_NV21, formats::NV21 },
+ { GST_VIDEO_FORMAT_NV16, formats::NV16 },
+ { GST_VIDEO_FORMAT_NV61, formats::NV61 },
+ { GST_VIDEO_FORMAT_NV24, formats::NV24 },
+
+ /* YUV Planar */
+ { GST_VIDEO_FORMAT_I420, formats::YUV420 },
+ { GST_VIDEO_FORMAT_YV12, formats::YVU420 },
+ { GST_VIDEO_FORMAT_Y42B, formats::YUV422 },
+
+ /* YUV Packed */
+ { GST_VIDEO_FORMAT_UYVY, formats::UYVY },
+ { GST_VIDEO_FORMAT_VYUY, formats::VYUY },
+ { GST_VIDEO_FORMAT_YUY2, formats::YUYV },
+ { GST_VIDEO_FORMAT_YVYU, formats::YVYU },
+
/* \todo NV42 is used in libcamera but is not mapped in GStreamer yet. */
};
+static GstVideoColorimetry
+colorimetry_from_colorspace(const ColorSpace &colorSpace, GstVideoTransferFunction transfer)
+{
+ GstVideoColorimetry colorimetry;
+
+ switch (colorSpace.primaries) {
+ case ColorSpace::Primaries::Raw:
+ colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_UNKNOWN;
+ break;
+ case ColorSpace::Primaries::Smpte170m:
+ colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_SMPTE170M;
+ break;
+ case ColorSpace::Primaries::Rec709:
+ colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT709;
+ break;
+ case ColorSpace::Primaries::Rec2020:
+ colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT2020;
+ break;
+ }
+
+ switch (colorSpace.transferFunction) {
+ case ColorSpace::TransferFunction::Linear:
+ colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA10;
+ break;
+ case ColorSpace::TransferFunction::Srgb:
+ colorimetry.transfer = GST_VIDEO_TRANSFER_SRGB;
+ break;
+ case ColorSpace::TransferFunction::Rec709:
+ colorimetry.transfer = GST_VIDEO_TRANSFER_BT709;
+ if (transfer != GST_VIDEO_TRANSFER_UNKNOWN)
+ colorimetry.transfer = transfer;
+ break;
+ }
+
+ switch (colorSpace.ycbcrEncoding) {
+ case ColorSpace::YcbcrEncoding::None:
+ colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_RGB;
+ break;
+ case ColorSpace::YcbcrEncoding::Rec601:
+ colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT601;
+ break;
+ case ColorSpace::YcbcrEncoding::Rec709:
+ colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT709;
+ break;
+ case ColorSpace::YcbcrEncoding::Rec2020:
+ colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT2020;
+ break;
+ }
+
+ switch (colorSpace.range) {
+ case ColorSpace::Range::Full:
+ colorimetry.range = GST_VIDEO_COLOR_RANGE_0_255;
+ break;
+ case ColorSpace::Range::Limited:
+ colorimetry.range = GST_VIDEO_COLOR_RANGE_16_235;
+ break;
+ }
+
+ return colorimetry;
+}
+
+static std::optional<ColorSpace>
+colorspace_from_colorimetry(const GstVideoColorimetry &colorimetry,
+ GstVideoTransferFunction *transfer)
+{
+ std::optional<ColorSpace> colorspace = ColorSpace::Raw;
+
+ switch (colorimetry.primaries) {
+ case GST_VIDEO_COLOR_PRIMARIES_UNKNOWN:
+ /* Unknown primaries map to raw colorspace in gstreamer */
+ return ColorSpace::Raw;
+ case GST_VIDEO_COLOR_PRIMARIES_SMPTE170M:
+ colorspace->primaries = ColorSpace::Primaries::Smpte170m;
+ break;
+ case GST_VIDEO_COLOR_PRIMARIES_BT709:
+ colorspace->primaries = ColorSpace::Primaries::Rec709;
+ break;
+ case GST_VIDEO_COLOR_PRIMARIES_BT2020:
+ colorspace->primaries = ColorSpace::Primaries::Rec2020;
+ break;
+ default:
+ GST_WARNING("Colorimetry primaries %d not mapped in gstlibcamera",
+ colorimetry.primaries);
+ return std::nullopt;
+ }
+
+ switch (colorimetry.transfer) {
+ /* Transfer function mappings inspired from v4l2src plugin */
+ case GST_VIDEO_TRANSFER_GAMMA18:
+ case GST_VIDEO_TRANSFER_GAMMA20:
+ case GST_VIDEO_TRANSFER_GAMMA22:
+ case GST_VIDEO_TRANSFER_GAMMA28:
+ GST_WARNING("GAMMA 18, 20, 22, 28 transfer functions not supported");
+ [[fallthrough]];
+ case GST_VIDEO_TRANSFER_GAMMA10:
+ colorspace->transferFunction = ColorSpace::TransferFunction::Linear;
+ break;
+ case GST_VIDEO_TRANSFER_SRGB:
+ colorspace->transferFunction = ColorSpace::TransferFunction::Srgb;
+ break;
+#if GST_CHECK_VERSION(1, 18, 0)
+ case GST_VIDEO_TRANSFER_BT601:
+ case GST_VIDEO_TRANSFER_BT2020_10:
+#endif
+ case GST_VIDEO_TRANSFER_BT2020_12:
+ case GST_VIDEO_TRANSFER_BT709:
+ colorspace->transferFunction = ColorSpace::TransferFunction::Rec709;
+ *transfer = colorimetry.transfer;
+ break;
+ default:
+ GST_WARNING("Colorimetry transfer function %d not mapped in gstlibcamera",
+ colorimetry.transfer);
+ return std::nullopt;
+ }
+
+ switch (colorimetry.matrix) {
+ case GST_VIDEO_COLOR_MATRIX_RGB:
+ colorspace->ycbcrEncoding = ColorSpace::YcbcrEncoding::None;
+ break;
+ /* FCC is about the same as BT601 with less digit */
+ case GST_VIDEO_COLOR_MATRIX_FCC:
+ case GST_VIDEO_COLOR_MATRIX_BT601:
+ colorspace->ycbcrEncoding = ColorSpace::YcbcrEncoding::Rec601;
+ break;
+ case GST_VIDEO_COLOR_MATRIX_BT709:
+ colorspace->ycbcrEncoding = ColorSpace::YcbcrEncoding::Rec709;
+ break;
+ case GST_VIDEO_COLOR_MATRIX_BT2020:
+ colorspace->ycbcrEncoding = ColorSpace::YcbcrEncoding::Rec2020;
+ break;
+ default:
+ GST_WARNING("Colorimetry matrix %d not mapped in gstlibcamera",
+ colorimetry.matrix);
+ return std::nullopt;
+ }
+
+ switch (colorimetry.range) {
+ case GST_VIDEO_COLOR_RANGE_0_255:
+ colorspace->range = ColorSpace::Range::Full;
+ break;
+ case GST_VIDEO_COLOR_RANGE_16_235:
+ colorspace->range = ColorSpace::Range::Limited;
+ break;
+ default:
+ GST_WARNING("Colorimetry range %d not mapped in gstlibcamera",
+ colorimetry.range);
+ return std::nullopt;
+ }
+
+ return colorspace;
+}
+
static GstVideoFormat
-drm_to_gst_format(guint drm_fourcc)
+pixel_format_to_gst_format(const PixelFormat &format)
{
for (const auto &item : format_map) {
- if (item.drm_fourcc == drm_fourcc)
+ if (item.format == format)
return item.gst_format;
}
return GST_VIDEO_FORMAT_UNKNOWN;
}
-static guint
-gst_format_to_drm(GstVideoFormat gst_format)
+static PixelFormat
+gst_format_to_pixel_format(GstVideoFormat gst_format)
{
if (gst_format == GST_VIDEO_FORMAT_ENCODED)
- return DRM_FORMAT_INVALID;
+ return PixelFormat{};
for (const auto &item : format_map)
if (item.gst_format == gst_format)
- return item.drm_fourcc;
- return DRM_FORMAT_INVALID;
+ return item.format;
+ return PixelFormat{};
+}
+
+static const struct {
+ PixelFormat format;
+ const gchar *name;
+} bayer_map[]{
+ { formats::SBGGR8, "bggr" },
+ { formats::SGBRG8, "gbrg" },
+ { formats::SGRBG8, "grbg" },
+ { formats::SRGGB8, "rggb" },
+ { formats::SBGGR10, "bggr10le" },
+ { formats::SGBRG10, "gbrg10le" },
+ { formats::SGRBG10, "grbg10le" },
+ { formats::SRGGB10, "rggb10le" },
+ { formats::SBGGR12, "bggr12le" },
+ { formats::SGBRG12, "gbrg12le" },
+ { formats::SGRBG12, "grbg12le" },
+ { formats::SRGGB12, "rggb12le" },
+ { formats::SBGGR14, "bggr14le" },
+ { formats::SGBRG14, "gbrg14le" },
+ { formats::SGRBG14, "grbg14le" },
+ { formats::SRGGB14, "rggb14le" },
+ { formats::SBGGR16, "bggr16le" },
+ { formats::SGBRG16, "gbrg16le" },
+ { formats::SGRBG16, "grbg16le" },
+ { formats::SRGGB16, "rggb16le" },
+};
+
+static const gchar *
+bayer_format_to_string(PixelFormat format)
+{
+ for (auto &b : bayer_map) {
+ if (b.format == format)
+ return b.name;
+ }
+ return nullptr;
+}
+
+static PixelFormat
+bayer_format_from_string(const gchar *name)
+{
+ for (auto &b : bayer_map) {
+ if (strcmp(b.name, name) == 0)
+ return b.format;
+ }
+ return PixelFormat{};
}
static GstStructure *
-bare_structure_from_fourcc(guint fourcc)
+bare_structure_from_format(const PixelFormat &format)
{
- GstVideoFormat gst_format = drm_to_gst_format(fourcc);
+ GstVideoFormat gst_format = pixel_format_to_gst_format(format);
if (gst_format == GST_VIDEO_FORMAT_UNKNOWN)
return nullptr;
@@ -66,9 +316,17 @@ bare_structure_from_fourcc(guint fourcc)
return gst_structure_new("video/x-raw", "format", G_TYPE_STRING,
gst_video_format_to_string(gst_format), nullptr);
- switch (fourcc) {
- case DRM_FORMAT_MJPEG:
+ switch (format) {
+ case formats::MJPEG:
return gst_structure_new_empty("image/jpeg");
+
+ case formats::SBGGR8:
+ case formats::SGBRG8:
+ case formats::SGRBG8:
+ case formats::SRGGB8:
+ return gst_structure_new("video/x-bayer", "format", G_TYPE_STRING,
+ bayer_format_to_string(format), nullptr);
+
default:
return nullptr;
}
@@ -80,7 +338,7 @@ gst_libcamera_stream_formats_to_caps(const StreamFormats &formats)
GstCaps *caps = gst_caps_new_empty();
for (PixelFormat pixelformat : formats.pixelformats()) {
- g_autoptr(GstStructure) bare_s = bare_structure_from_fourcc(pixelformat);
+ g_autoptr(GstStructure) bare_s = bare_structure_from_format(pixelformat);
if (!bare_s) {
GST_WARNING("Unsupported DRM format %" GST_FOURCC_FORMAT,
@@ -103,13 +361,21 @@ gst_libcamera_stream_formats_to_caps(const StreamFormats &formats)
GValue val = G_VALUE_INIT;
g_value_init(&val, GST_TYPE_INT_RANGE);
- gst_value_set_int_range_step(&val, range.min.width, range.max.width, range.hStep);
- gst_structure_set_value(s, "width", &val);
- gst_value_set_int_range_step(&val, range.min.height, range.max.height, range.vStep);
- gst_structure_set_value(s, "height", &val);
+ if (range.min.width == range.max.width) {
+ gst_structure_set(s, "width", G_TYPE_INT, range.min.width, nullptr);
+ } else {
+ gst_value_set_int_range_step(&val, range.min.width, range.max.width, range.hStep);
+ gst_structure_set_value(s, "width", &val);
+ }
+ if (range.min.height == range.max.height) {
+ gst_structure_set(s, "height", G_TYPE_INT, range.min.height, nullptr);
+ } else {
+ gst_value_set_int_range_step(&val, range.min.height, range.max.height, range.vStep);
+ gst_structure_set_value(s, "height", &val);
+ }
g_value_unset(&val);
- gst_caps_append_structure(caps, s);
+ caps = gst_caps_merge_structure(caps, s);
}
}
@@ -117,33 +383,88 @@ gst_libcamera_stream_formats_to_caps(const StreamFormats &formats)
}
GstCaps *
-gst_libcamera_stream_configuration_to_caps(const StreamConfiguration &stream_cfg)
+gst_libcamera_stream_configuration_to_caps(const StreamConfiguration &stream_cfg,
+ GstVideoTransferFunction transfer)
{
GstCaps *caps = gst_caps_new_empty();
- GstStructure *s = bare_structure_from_fourcc(stream_cfg.pixelFormat);
+ GstStructure *s = bare_structure_from_format(stream_cfg.pixelFormat);
gst_structure_set(s,
"width", G_TYPE_INT, stream_cfg.size.width,
"height", G_TYPE_INT, stream_cfg.size.height,
nullptr);
+
+ if (stream_cfg.colorSpace) {
+ GstVideoColorimetry colorimetry = colorimetry_from_colorspace(stream_cfg.colorSpace.value(), transfer);
+ g_autofree gchar *colorimetry_str = gst_video_colorimetry_to_string(&colorimetry);
+
+ if (colorimetry_str)
+ gst_structure_set(s, "colorimetry", G_TYPE_STRING, colorimetry_str, nullptr);
+ else
+ g_error("Got invalid colorimetry from ColorSpace: %s",
+ ColorSpace::toString(stream_cfg.colorSpace).c_str());
+ }
+
gst_caps_append_structure(caps, s);
return caps;
}
-void
-gst_libcamera_configure_stream_from_caps(StreamConfiguration &stream_cfg,
- GstCaps *caps)
+void gst_libcamera_configure_stream_from_caps(StreamConfiguration &stream_cfg,
+ GstCaps *caps, GstVideoTransferFunction *transfer)
{
- GstVideoFormat gst_format = drm_to_gst_format(stream_cfg.pixelFormat);
+ GstVideoFormat gst_format = pixel_format_to_gst_format(stream_cfg.pixelFormat);
+ guint i;
+ gint best_fixed = -1, best_in_range = -1;
+ GstStructure *s;
+
+ /*
+ * These are delta weight computed from:
+ * ABS(width - stream_cfg.size.width) * ABS(height - stream_cfg.size.height)
+ */
+ guint best_fixed_delta = G_MAXUINT;
+ guint best_in_range_delta = G_MAXUINT;
/* First fixate the caps using default configuration value. */
g_assert(gst_caps_is_writable(caps));
- caps = gst_caps_truncate(caps);
- GstStructure *s = gst_caps_get_structure(caps, 0);
- gst_structure_fixate_field_nearest_int(s, "width", stream_cfg.size.width);
- gst_structure_fixate_field_nearest_int(s, "height", stream_cfg.size.height);
+ /* Lookup the structure for a close match to the stream_cfg.size */
+ for (i = 0; i < gst_caps_get_size(caps); i++) {
+ s = gst_caps_get_structure(caps, i);
+ gint width, height;
+ guint delta;
+
+ if (gst_structure_has_field_typed(s, "width", G_TYPE_INT) &&
+ gst_structure_has_field_typed(s, "height", G_TYPE_INT)) {
+ gst_structure_get_int(s, "width", &width);
+ gst_structure_get_int(s, "height", &height);
+
+ delta = ABS(width - (gint)stream_cfg.size.width) * ABS(height - (gint)stream_cfg.size.height);
+
+ if (delta < best_fixed_delta) {
+ best_fixed_delta = delta;
+ best_fixed = i;
+ }
+ } else {
+ gst_structure_fixate_field_nearest_int(s, "width", stream_cfg.size.width);
+ gst_structure_fixate_field_nearest_int(s, "height", stream_cfg.size.height);
+ gst_structure_get_int(s, "width", &width);
+ gst_structure_get_int(s, "height", &height);
+
+ delta = ABS(width - (gint)stream_cfg.size.width) * ABS(height - (gint)stream_cfg.size.height);
+
+ if (delta < best_in_range_delta) {
+ best_in_range_delta = delta;
+ best_in_range = i;
+ }
+ }
+ }
+
+ /* Prefer reliable fixed value over ranges */
+ if (best_fixed >= 0)
+ s = gst_caps_get_structure(caps, best_fixed);
+ else
+ s = gst_caps_get_structure(caps, best_in_range);
if (gst_structure_has_name(s, "video/x-raw")) {
const gchar *format = gst_video_format_to_string(gst_format);
@@ -154,9 +475,12 @@ gst_libcamera_configure_stream_from_caps(StreamConfiguration &stream_cfg,
if (gst_structure_has_name(s, "video/x-raw")) {
const gchar *format = gst_structure_get_string(s, "format");
gst_format = gst_video_format_from_string(format);
- stream_cfg.pixelFormat = PixelFormat(gst_format_to_drm(gst_format));
+ stream_cfg.pixelFormat = gst_format_to_pixel_format(gst_format);
+ } else if (gst_structure_has_name(s, "video/x-bayer")) {
+ const gchar *format = gst_structure_get_string(s, "format");
+ stream_cfg.pixelFormat = bayer_format_from_string(format);
} else if (gst_structure_has_name(s, "image/jpeg")) {
- stream_cfg.pixelFormat = PixelFormat(DRM_FORMAT_MJPEG);
+ stream_cfg.pixelFormat = formats::MJPEG;
} else {
g_critical("Unsupported media type: %s", gst_structure_get_name(s));
}
@@ -166,15 +490,131 @@ gst_libcamera_configure_stream_from_caps(StreamConfiguration &stream_cfg,
gst_structure_get_int(s, "height", &height);
stream_cfg.size.width = width;
stream_cfg.size.height = height;
+
+ /* Configure colorimetry */
+ if (gst_structure_has_field(s, "colorimetry")) {
+ const gchar *colorimetry_str = gst_structure_get_string(s, "colorimetry");
+ GstVideoColorimetry colorimetry;
+
+ if (!gst_video_colorimetry_from_string(&colorimetry, colorimetry_str))
+ g_critical("Invalid colorimetry %s", colorimetry_str);
+
+ stream_cfg.colorSpace = colorspace_from_colorimetry(colorimetry, transfer);
+ }
}
-void
-gst_libcamera_resume_task(GstTask *task)
+void gst_libcamera_get_framerate_from_caps(GstCaps *caps,
+ GstStructure *element_caps)
+{
+ GstStructure *s = gst_caps_get_structure(caps, 0);
+ /*
+ * Default to 30 fps. If the "framerate" fraction is invalid below,
+ * libcamerasrc will set 30fps as the framerate.
+ */
+ gint fps_n = 30, fps_d = 1;
+
+ if (gst_structure_has_field_typed(s, "framerate", GST_TYPE_FRACTION)) {
+ if (!gst_structure_get_fraction(s, "framerate", &fps_n, &fps_d))
+ GST_WARNING("Invalid framerate in the caps");
+ }
+
+ gst_structure_set(element_caps, "framerate", GST_TYPE_FRACTION,
+ fps_n, fps_d, nullptr);
+}
+
+void gst_libcamera_clamp_and_set_frameduration(ControlList &initCtrls,
+ const ControlInfoMap &cam_ctrls,
+ GstStructure *element_caps)
+{
+ gint fps_caps_n, fps_caps_d;
+
+ if (!gst_structure_has_field_typed(element_caps, "framerate", GST_TYPE_FRACTION))
+ return;
+
+ auto iterFrameDuration = cam_ctrls.find(&controls::FrameDurationLimits);
+ if (iterFrameDuration == cam_ctrls.end()) {
+ GST_WARNING("FrameDurationLimits not found in camera controls.");
+ return;
+ }
+
+ const GValue *framerate = gst_structure_get_value(element_caps, "framerate");
+
+ fps_caps_n = gst_value_get_fraction_numerator(framerate);
+ fps_caps_d = gst_value_get_fraction_denominator(framerate);
+
+ int64_t target_duration = (fps_caps_d * 1000000.0) / fps_caps_n;
+ int64_t min_frame_duration = iterFrameDuration->second.min().get<int64_t>();
+ int64_t max_frame_duration = iterFrameDuration->second.max().get<int64_t>();
+
+ int64_t frame_duration = std::clamp(target_duration,
+ min_frame_duration,
+ max_frame_duration);
+
+ if (frame_duration != target_duration) {
+ gint framerate_clamped = 1000000 / frame_duration;
+
+ /*
+ * Update the clamped framerate which then will be exposed in
+ * downstream caps.
+ */
+ gst_structure_set(element_caps, "framerate", GST_TYPE_FRACTION,
+ framerate_clamped, 1, nullptr);
+ }
+
+ initCtrls.set(controls::FrameDurationLimits,
+ { frame_duration, frame_duration });
+}
+
+void gst_libcamera_framerate_to_caps(GstCaps *caps, const GstStructure *element_caps)
+{
+ const GValue *framerate = gst_structure_get_value(element_caps, "framerate");
+ if (!GST_VALUE_HOLDS_FRACTION(framerate))
+ return;
+
+ GstStructure *s = gst_caps_get_structure(caps, 0);
+ gint fps_caps_n, fps_caps_d;
+
+ fps_caps_n = gst_value_get_fraction_numerator(framerate);
+ fps_caps_d = gst_value_get_fraction_denominator(framerate);
+
+ gst_structure_set(s, "framerate", GST_TYPE_FRACTION, fps_caps_n, fps_caps_d, nullptr);
+}
+
+#if !GST_CHECK_VERSION(1, 17, 1)
+gboolean
+gst_task_resume(GstTask *task)
{
/* We only want to resume the task if it's paused. */
GLibLocker lock(GST_OBJECT(task));
- if (GST_TASK_STATE(task) == GST_TASK_PAUSED) {
- GST_TASK_STATE(task) = GST_TASK_STARTED;
- GST_TASK_SIGNAL(task);
+ if (GST_TASK_STATE(task) != GST_TASK_PAUSED)
+ return FALSE;
+
+ GST_TASK_STATE(task) = GST_TASK_STARTED;
+ GST_TASK_SIGNAL(task);
+ return TRUE;
+}
+#endif
+
+G_LOCK_DEFINE_STATIC(cm_singleton_lock);
+static std::weak_ptr<CameraManager> cm_singleton_ptr;
+
+std::shared_ptr<CameraManager>
+gst_libcamera_get_camera_manager(int &ret)
+{
+ std::shared_ptr<CameraManager> cm;
+
+ G_LOCK(cm_singleton_lock);
+
+ cm = cm_singleton_ptr.lock();
+ if (!cm) {
+ cm = std::make_shared<CameraManager>();
+ cm_singleton_ptr = cm;
+ ret = cm->start();
+ } else {
+ ret = 0;
}
+
+ G_UNLOCK(cm_singleton_lock);
+
+ return cm;
}
diff --git a/src/gstreamer/gstlibcamera-utils.h b/src/gstreamer/gstlibcamera-utils.h
index 2b3f26b6..4978987c 100644
--- a/src/gstreamer/gstlibcamera-utils.h
+++ b/src/gstreamer/gstlibcamera-utils.h
@@ -3,22 +3,40 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamera-utils.h - GStreamer libcamera Utility Functions
+ * GStreamer libcamera Utility Functions
*/
-#ifndef __GST_LIBCAMERA_UTILS_H__
-#define __GST_LIBCAMERA_UTILS_H__
+#pragma once
+
+#include <libcamera/camera_manager.h>
+#include <libcamera/controls.h>
+#include <libcamera/stream.h>
#include <gst/gst.h>
#include <gst/video/video.h>
-#include <libcamera/stream.h>
-
GstCaps *gst_libcamera_stream_formats_to_caps(const libcamera::StreamFormats &formats);
-GstCaps *gst_libcamera_stream_configuration_to_caps(const libcamera::StreamConfiguration &stream_cfg);
+GstCaps *gst_libcamera_stream_configuration_to_caps(const libcamera::StreamConfiguration &stream_cfg,
+ GstVideoTransferFunction transfer);
void gst_libcamera_configure_stream_from_caps(libcamera::StreamConfiguration &stream_cfg,
- GstCaps *caps);
-void gst_libcamera_resume_task(GstTask *task);
+ GstCaps *caps, GstVideoTransferFunction *transfer);
+void gst_libcamera_get_framerate_from_caps(GstCaps *caps, GstStructure *element_caps);
+void gst_libcamera_clamp_and_set_frameduration(libcamera::ControlList &controls,
+ const libcamera::ControlInfoMap &camera_controls,
+ GstStructure *element_caps);
+void gst_libcamera_framerate_to_caps(GstCaps *caps, const GstStructure *element_caps);
+
+#if !GST_CHECK_VERSION(1, 16, 0)
+static inline void gst_clear_event(GstEvent **event_ptr)
+{
+ g_clear_pointer(event_ptr, gst_mini_object_unref);
+}
+#endif
+
+#if !GST_CHECK_VERSION(1, 17, 1)
+gboolean gst_task_resume(GstTask *task);
+#endif
+std::shared_ptr<libcamera::CameraManager> gst_libcamera_get_camera_manager(int &ret);
/**
* \class GLibLocker
@@ -69,5 +87,3 @@ public:
private:
GRecMutex *mutex_;
};
-
-#endif /* __GST_LIBCAMERA_UTILS_H__ */
diff --git a/src/gstreamer/gstlibcamera.cpp b/src/gstreamer/gstlibcamera.cpp
index 81c7bb19..bff98979 100644
--- a/src/gstreamer/gstlibcamera.cpp
+++ b/src/gstreamer/gstlibcamera.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2019, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamera.c - GStreamer plugin
+ * GStreamer plugin
*/
#include "gstlibcameraprovider.h"
@@ -24,4 +24,4 @@ plugin_init(GstPlugin *plugin)
GST_PLUGIN_DEFINE(GST_VERSION_MAJOR, GST_VERSION_MINOR,
libcamera, "libcamera capture plugin",
- plugin_init, VERSION, "LGPL", PACKAGE, "https://libcamera.org");
+ plugin_init, VERSION, "LGPL", PACKAGE, "https://libcamera.org")
diff --git a/src/gstreamer/gstlibcameraallocator.cpp b/src/gstreamer/gstlibcameraallocator.cpp
index 1d5959c0..d4492d99 100644
--- a/src/gstreamer/gstlibcameraallocator.cpp
+++ b/src/gstreamer/gstlibcameraallocator.cpp
@@ -3,11 +3,13 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcameraallocator.cpp - GStreamer Custom Allocator
+ * GStreamer Custom Allocator
*/
#include "gstlibcameraallocator.h"
+#include <utility>
+
#include <libcamera/camera.h>
#include <libcamera/framebuffer_allocator.h>
#include <libcamera/stream.h>
@@ -52,8 +54,10 @@ FrameWrap::FrameWrap(GstAllocator *allocator, FrameBuffer *buffer,
outstandingPlanes_(0)
{
for (const FrameBuffer::Plane &plane : buffer->planes()) {
- GstMemory *mem = gst_fd_allocator_alloc(allocator, plane.fd.fd(), plane.length,
+ GstMemory *mem = gst_fd_allocator_alloc(allocator, plane.fd.get(),
+ plane.offset + plane.length,
GST_FD_MEMORY_FLAG_DONT_CLOSE);
+ gst_memory_resize(mem, plane.offset, plane.length);
gst_mini_object_set_qdata(GST_MINI_OBJECT(mem), getQuark(), this, nullptr);
GST_MINI_OBJECT(mem)->dispose = gst_libcamera_allocator_release;
g_object_unref(mem->allocator);
@@ -70,7 +74,7 @@ FrameWrap::~FrameWrap()
}
}
-GQuark FrameWrap::getQuark(void)
+GQuark FrameWrap::getQuark()
{
static gsize frame_quark = 0;
@@ -98,25 +102,33 @@ struct _GstLibcameraAllocator {
* FrameWrap.
*/
GHashTable *pools;
+ /*
+ * The camera manager represents the library, which needs to be kept
+ * alive until all the memory has been released.
+ */
+ std::shared_ptr<CameraManager> *cm_ptr;
};
G_DEFINE_TYPE(GstLibcameraAllocator, gst_libcamera_allocator,
- GST_TYPE_DMABUF_ALLOCATOR);
+ GST_TYPE_DMABUF_ALLOCATOR)
static gboolean
gst_libcamera_allocator_release(GstMiniObject *mini_object)
{
GstMemory *mem = GST_MEMORY_CAST(mini_object);
GstLibcameraAllocator *self = GST_LIBCAMERA_ALLOCATOR(mem->allocator);
- GLibLocker lock(GST_OBJECT(self));
- auto *frame = reinterpret_cast<FrameWrap *>(gst_mini_object_get_qdata(mini_object, FrameWrap::getQuark()));
- gst_memory_ref(mem);
+ {
+ GLibLocker lock(GST_OBJECT(self));
+ auto *frame = reinterpret_cast<FrameWrap *>(gst_mini_object_get_qdata(mini_object, FrameWrap::getQuark()));
- if (frame->releasePlane()) {
- auto *pool = reinterpret_cast<GQueue *>(g_hash_table_lookup(self->pools, frame->stream_));
- g_return_val_if_fail(pool, TRUE);
- g_queue_push_tail(pool, frame);
+ gst_memory_ref(mem);
+
+ if (frame->releasePlane()) {
+ auto *pool = reinterpret_cast<GQueue *>(g_hash_table_lookup(self->pools, frame->stream_));
+ g_return_val_if_fail(pool, TRUE);
+ g_queue_push_tail(pool, frame);
+ }
}
/* Keep last in case we are holding on the last allocator ref. */
@@ -168,6 +180,9 @@ gst_libcamera_allocator_finalize(GObject *object)
delete self->fb_allocator;
+ /* Keep last. */
+ delete self->cm_ptr;
+
G_OBJECT_CLASS(gst_libcamera_allocator_parent_class)->finalize(object);
}
@@ -183,17 +198,23 @@ gst_libcamera_allocator_class_init(GstLibcameraAllocatorClass *klass)
}
GstLibcameraAllocator *
-gst_libcamera_allocator_new(std::shared_ptr<Camera> camera)
+gst_libcamera_allocator_new(std::shared_ptr<Camera> camera,
+ CameraConfiguration *config_)
{
- auto *self = GST_LIBCAMERA_ALLOCATOR(g_object_new(GST_TYPE_LIBCAMERA_ALLOCATOR,
- nullptr));
+ g_autoptr(GstLibcameraAllocator) self = GST_LIBCAMERA_ALLOCATOR(g_object_new(GST_TYPE_LIBCAMERA_ALLOCATOR,
+ nullptr));
+ gint ret;
+
+ self->cm_ptr = new std::shared_ptr<CameraManager>(gst_libcamera_get_camera_manager(ret));
+ if (ret)
+ return nullptr;
self->fb_allocator = new FrameBufferAllocator(camera);
- for (Stream *stream : camera->streams()) {
- gint ret;
+ for (StreamConfiguration &streamCfg : *config_) {
+ Stream *stream = streamCfg.stream();
ret = self->fb_allocator->allocate(stream);
- if (ret == 0)
+ if (ret <= 0)
return nullptr;
GQueue *pool = g_queue_new();
@@ -207,7 +228,7 @@ gst_libcamera_allocator_new(std::shared_ptr<Camera> camera)
g_hash_table_insert(self->pools, stream, pool);
}
- return self;
+ return std::exchange(self, nullptr);
}
bool
diff --git a/src/gstreamer/gstlibcameraallocator.h b/src/gstreamer/gstlibcameraallocator.h
index befdcad6..1a6ba346 100644
--- a/src/gstreamer/gstlibcameraallocator.h
+++ b/src/gstreamer/gstlibcameraallocator.h
@@ -3,22 +3,23 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcameraallocator.h - GStreamer Custom Allocator
+ * GStreamer Custom Allocator
*/
-#ifndef __GST_LIBCAMERA_ALLOCATOR_H__
-#define __GST_LIBCAMERA_ALLOCATOR_H__
+#pragma once
#include <gst/gst.h>
#include <gst/allocators/allocators.h>
+#include <libcamera/camera.h>
#include <libcamera/stream.h>
#define GST_TYPE_LIBCAMERA_ALLOCATOR gst_libcamera_allocator_get_type()
G_DECLARE_FINAL_TYPE(GstLibcameraAllocator, gst_libcamera_allocator,
GST_LIBCAMERA, ALLOCATOR, GstDmaBufAllocator)
-GstLibcameraAllocator *gst_libcamera_allocator_new(std::shared_ptr<libcamera::Camera> camera);
+GstLibcameraAllocator *gst_libcamera_allocator_new(std::shared_ptr<libcamera::Camera> camera,
+ libcamera::CameraConfiguration *config_);
bool gst_libcamera_allocator_prepare_buffer(GstLibcameraAllocator *self,
libcamera::Stream *stream,
@@ -28,5 +29,3 @@ gsize gst_libcamera_allocator_get_pool_size(GstLibcameraAllocator *allocator,
libcamera::Stream *stream);
libcamera::FrameBuffer *gst_libcamera_memory_get_frame_buffer(GstMemory *mem);
-
-#endif /* __GST_LIBCAMERA_ALLOCATOR_H__ */
diff --git a/src/gstreamer/gstlibcamerapad.cpp b/src/gstreamer/gstlibcamerapad.cpp
index e184495a..7b22aebe 100644
--- a/src/gstreamer/gstlibcamerapad.cpp
+++ b/src/gstreamer/gstlibcamerapad.cpp
@@ -3,7 +3,7 @@
* Copyright (C) 2019, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamerapad.cpp - GStreamer Capture Pad
+ * GStreamer Capture Pad
*/
#include "gstlibcamerapad.h"
@@ -18,7 +18,6 @@ struct _GstLibcameraPad {
GstPad parent;
StreamRole role;
GstLibcameraPool *pool;
- GQueue pending_buffers;
GstClockTime latency;
};
@@ -27,7 +26,7 @@ enum {
PROP_STREAM_ROLE
};
-G_DEFINE_TYPE(GstLibcameraPad, gst_libcamera_pad, GST_TYPE_PAD);
+G_DEFINE_TYPE(GstLibcameraPad, gst_libcamera_pad, GST_TYPE_PAD)
static void
gst_libcamera_pad_set_property(GObject *object, guint prop_id,
@@ -55,7 +54,7 @@ gst_libcamera_pad_get_property(GObject *object, guint prop_id, GValue *value,
switch (prop_id) {
case PROP_STREAM_ROLE:
- g_value_set_enum(value, self->role);
+ g_value_set_enum(value, static_cast<gint>(self->role));
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
@@ -84,13 +83,23 @@ gst_libcamera_pad_init(GstLibcameraPad *self)
}
static GType
-gst_libcamera_stream_role_get_type(void)
+gst_libcamera_stream_role_get_type()
{
static GType type = 0;
static const GEnumValue values[] = {
- { StillCapture, "libcamera::StillCapture", "still-capture" },
- { VideoRecording, "libcamera::VideoRecording", "video-recording" },
- { Viewfinder, "libcamera::Viewfinder", "view-finder" },
+ {
+ static_cast<gint>(StreamRole::StillCapture),
+ "libcamera::StillCapture",
+ "still-capture",
+ }, {
+ static_cast<gint>(StreamRole::VideoRecording),
+ "libcamera::VideoRecording",
+ "video-recording",
+ }, {
+ static_cast<gint>(StreamRole::Viewfinder),
+ "libcamera::Viewfinder",
+ "view-finder",
+ },
{ 0, NULL, NULL }
};
@@ -111,7 +120,7 @@ gst_libcamera_pad_class_init(GstLibcameraPadClass *klass)
auto *spec = g_param_spec_enum("stream-role", "Stream Role",
"The selected stream role",
gst_libcamera_stream_role_get_type(),
- VideoRecording,
+ static_cast<gint>(StreamRole::VideoRecording),
(GParamFlags)(GST_PARAM_MUTABLE_READY
| G_PARAM_CONSTRUCT
| G_PARAM_READWRITE
@@ -156,40 +165,6 @@ gst_libcamera_pad_get_stream(GstPad *pad)
}
void
-gst_libcamera_pad_queue_buffer(GstPad *pad, GstBuffer *buffer)
-{
- auto *self = GST_LIBCAMERA_PAD(pad);
- GLibLocker lock(GST_OBJECT(self));
-
- g_queue_push_head(&self->pending_buffers, buffer);
-}
-
-GstFlowReturn
-gst_libcamera_pad_push_pending(GstPad *pad)
-{
- auto *self = GST_LIBCAMERA_PAD(pad);
- GstBuffer *buffer;
-
- {
- GLibLocker lock(GST_OBJECT(self));
- buffer = GST_BUFFER(g_queue_pop_tail(&self->pending_buffers));
- }
-
- if (!buffer)
- return GST_FLOW_OK;
-
- return gst_pad_push(pad, buffer);
-}
-
-bool
-gst_libcamera_pad_has_pending(GstPad *pad)
-{
- auto *self = GST_LIBCAMERA_PAD(pad);
- GLibLocker lock(GST_OBJECT(self));
- return self->pending_buffers.length > 0;
-}
-
-void
gst_libcamera_pad_set_latency(GstPad *pad, GstClockTime latency)
{
auto *self = GST_LIBCAMERA_PAD(pad);
diff --git a/src/gstreamer/gstlibcamerapad.h b/src/gstreamer/gstlibcamerapad.h
index 779f2d13..630c168a 100644
--- a/src/gstreamer/gstlibcamerapad.h
+++ b/src/gstreamer/gstlibcamerapad.h
@@ -3,11 +3,10 @@
* Copyright (C) 2019, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamerapad.h - GStreamer Capture Element
+ * GStreamer Capture Element
*/
-#ifndef __GST_LIBCAMERA_PAD_H__
-#define __GST_LIBCAMERA_PAD_H__
+#pragma once
#include "gstlibcamerapool.h"
@@ -26,12 +25,4 @@ void gst_libcamera_pad_set_pool(GstPad *pad, GstLibcameraPool *pool);
libcamera::Stream *gst_libcamera_pad_get_stream(GstPad *pad);
-void gst_libcamera_pad_queue_buffer(GstPad *pad, GstBuffer *buffer);
-
-GstFlowReturn gst_libcamera_pad_push_pending(GstPad *pad);
-
-bool gst_libcamera_pad_has_pending(GstPad *pad);
-
void gst_libcamera_pad_set_latency(GstPad *pad, GstClockTime latency);
-
-#endif /* __GST_LIBCAMERA_PAD_H__ */
diff --git a/src/gstreamer/gstlibcamerapool.cpp b/src/gstreamer/gstlibcamerapool.cpp
index 8f536169..9cd7eccb 100644
--- a/src/gstreamer/gstlibcamerapool.cpp
+++ b/src/gstreamer/gstlibcamerapool.cpp
@@ -3,11 +3,13 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamerapool.cpp - GStreamer Buffer Pool
+ * GStreamer Buffer Pool
*/
#include "gstlibcamerapool.h"
+#include <deque>
+
#include <libcamera/stream.h>
#include "gstlibcamera-utils.h"
@@ -24,24 +26,43 @@ static guint signals[N_SIGNALS];
struct _GstLibcameraPool {
GstBufferPool parent;
- GstAtomicQueue *queue;
+ std::deque<GstBuffer *> *queue;
GstLibcameraAllocator *allocator;
Stream *stream;
};
-G_DEFINE_TYPE(GstLibcameraPool, gst_libcamera_pool, GST_TYPE_BUFFER_POOL);
+G_DEFINE_TYPE(GstLibcameraPool, gst_libcamera_pool, GST_TYPE_BUFFER_POOL)
+
+static GstBuffer *
+gst_libcamera_pool_pop_buffer(GstLibcameraPool *self)
+{
+ GLibLocker lock(GST_OBJECT(self));
+ GstBuffer *buf;
+
+ if (self->queue->empty())
+ return nullptr;
+
+ buf = self->queue->front();
+ self->queue->pop_front();
+
+ return buf;
+}
static GstFlowReturn
gst_libcamera_pool_acquire_buffer(GstBufferPool *pool, GstBuffer **buffer,
- GstBufferPoolAcquireParams *params)
+ [[maybe_unused]] GstBufferPoolAcquireParams *params)
{
GstLibcameraPool *self = GST_LIBCAMERA_POOL(pool);
- GstBuffer *buf = GST_BUFFER(gst_atomic_queue_pop(self->queue));
+ GstBuffer *buf = gst_libcamera_pool_pop_buffer(self);
+
if (!buf)
return GST_FLOW_ERROR;
- if (!gst_libcamera_allocator_prepare_buffer(self->allocator, self->stream, buf))
+ if (!gst_libcamera_allocator_prepare_buffer(self->allocator, self->stream, buf)) {
+ GLibLocker lock(GST_OBJECT(self));
+ self->queue->push_back(buf);
return GST_FLOW_ERROR;
+ }
*buffer = buf;
return GST_FLOW_OK;
@@ -62,9 +83,13 @@ static void
gst_libcamera_pool_release_buffer(GstBufferPool *pool, GstBuffer *buffer)
{
GstLibcameraPool *self = GST_LIBCAMERA_POOL(pool);
- bool do_notify = gst_atomic_queue_length(self->queue) == 0;
+ bool do_notify;
- gst_atomic_queue_push(self->queue, buffer);
+ {
+ GLibLocker lock(GST_OBJECT(self));
+ do_notify = self->queue->empty();
+ self->queue->push_back(buffer);
+ }
if (do_notify)
g_signal_emit(self, signals[SIGNAL_BUFFER_NOTIFY], 0);
@@ -73,7 +98,7 @@ gst_libcamera_pool_release_buffer(GstBufferPool *pool, GstBuffer *buffer)
static void
gst_libcamera_pool_init(GstLibcameraPool *self)
{
- self->queue = gst_atomic_queue_new(4);
+ self->queue = new std::deque<GstBuffer *>();
}
static void
@@ -82,10 +107,10 @@ gst_libcamera_pool_finalize(GObject *object)
GstLibcameraPool *self = GST_LIBCAMERA_POOL(object);
GstBuffer *buf;
- while ((buf = GST_BUFFER(gst_atomic_queue_pop(self->queue))))
+ while ((buf = gst_libcamera_pool_pop_buffer(self)))
gst_buffer_unref(buf);
- gst_atomic_queue_unref(self->queue);
+ delete self->queue;
g_object_unref(self->allocator);
G_OBJECT_CLASS(gst_libcamera_pool_parent_class)->finalize(object);
@@ -120,7 +145,7 @@ gst_libcamera_pool_new(GstLibcameraAllocator *allocator, Stream *stream)
gsize pool_size = gst_libcamera_allocator_get_pool_size(allocator, stream);
for (gsize i = 0; i < pool_size; i++) {
GstBuffer *buffer = gst_buffer_new();
- gst_atomic_queue_push(pool->queue, buffer);
+ pool->queue->push_back(buffer);
}
return pool;
@@ -132,13 +157,6 @@ gst_libcamera_pool_get_stream(GstLibcameraPool *self)
return self->stream;
}
-Stream *
-gst_libcamera_buffer_get_stream(GstBuffer *buffer)
-{
- auto *self = (GstLibcameraPool *)buffer->pool;
- return self->stream;
-}
-
FrameBuffer *
gst_libcamera_buffer_get_frame_buffer(GstBuffer *buffer)
{
diff --git a/src/gstreamer/gstlibcamerapool.h b/src/gstreamer/gstlibcamerapool.h
index a3f1b685..2a7a9c77 100644
--- a/src/gstreamer/gstlibcamerapool.h
+++ b/src/gstreamer/gstlibcamerapool.h
@@ -3,14 +3,13 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamerapool.h - GStreamer Buffer Pool
+ * GStreamer Buffer Pool
*
* This is a partial implementation of GstBufferPool intended for internal use
* only. This pool cannot be configured or activated.
*/
-#ifndef __GST_LIBCAMERA_POOL_H__
-#define __GST_LIBCAMERA_POOL_H__
+#pragma once
#include "gstlibcameraallocator.h"
@@ -26,9 +25,4 @@ GstLibcameraPool *gst_libcamera_pool_new(GstLibcameraAllocator *allocator,
libcamera::Stream *gst_libcamera_pool_get_stream(GstLibcameraPool *self);
-libcamera::Stream *gst_libcamera_buffer_get_stream(GstBuffer *buffer);
-
libcamera::FrameBuffer *gst_libcamera_buffer_get_frame_buffer(GstBuffer *buffer);
-
-
-#endif /* __GST_LIBCAMERA_POOL_H__ */
diff --git a/src/gstreamer/gstlibcameraprovider.cpp b/src/gstreamer/gstlibcameraprovider.cpp
index 914ed4fb..5da96ea3 100644
--- a/src/gstreamer/gstlibcameraprovider.cpp
+++ b/src/gstreamer/gstlibcameraprovider.cpp
@@ -3,9 +3,11 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcameraprovider.c - GStreamer Device Provider
+ * GStreamer Device Provider
*/
+#include <array>
+
#include "gstlibcameraprovider.h"
#include <libcamera/camera.h>
@@ -35,14 +37,14 @@ enum {
#define GST_TYPE_LIBCAMERA_DEVICE gst_libcamera_device_get_type()
G_DECLARE_FINAL_TYPE(GstLibcameraDevice, gst_libcamera_device,
- GST_LIBCAMERA, DEVICE, GstDevice);
+ GST_LIBCAMERA, DEVICE, GstDevice)
struct _GstLibcameraDevice {
GstDevice parent;
gchar *name;
};
-G_DEFINE_TYPE(GstLibcameraDevice, gst_libcamera_device, GST_TYPE_DEVICE);
+G_DEFINE_TYPE(GstLibcameraDevice, gst_libcamera_device, GST_TYPE_DEVICE)
static GstElement *
gst_libcamera_device_create_element(GstDevice *device, const gchar *name)
@@ -89,7 +91,7 @@ gst_libcamera_device_set_property(GObject *object, guint prop_id,
}
static void
-gst_libcamera_device_init(GstLibcameraDevice *self)
+gst_libcamera_device_init([[maybe_unused]] GstLibcameraDevice *self)
{
}
@@ -101,7 +103,7 @@ gst_libcamera_device_finalize(GObject *object)
g_free(self->name);
- G_OBJECT_GET_CLASS(klass)->finalize(object);
+ G_OBJECT_CLASS(klass)->finalize(object);
}
static void
@@ -126,12 +128,15 @@ gst_libcamera_device_class_init(GstLibcameraDeviceClass *klass)
static GstDevice *
gst_libcamera_device_new(const std::shared_ptr<Camera> &camera)
{
+ static const std::array roles{ StreamRole::VideoRecording };
g_autoptr(GstCaps) caps = gst_caps_new_empty();
- const gchar *name = camera->name().c_str();
- StreamRoles roles;
+ const gchar *name = camera->id().c_str();
- roles.push_back(StreamRole::VideoRecording);
std::unique_ptr<CameraConfiguration> config = camera->generateConfiguration(roles);
+ if (!config || config->size() != roles.size()) {
+ GST_ERROR("Failed to generate a default configuration for %s", name);
+ return nullptr;
+ }
for (const StreamConfiguration &stream_cfg : *config) {
GstCaps *sub_caps = gst_libcamera_stream_formats_to_caps(stream_cfg.formats());
@@ -158,19 +163,18 @@ gst_libcamera_device_new(const std::shared_ptr<Camera> &camera)
struct _GstLibcameraProvider {
GstDeviceProvider parent;
- CameraManager *cm;
};
G_DEFINE_TYPE_WITH_CODE(GstLibcameraProvider, gst_libcamera_provider,
GST_TYPE_DEVICE_PROVIDER,
GST_DEBUG_CATEGORY_INIT(provider_debug, "libcamera-provider", 0,
- "libcamera Device Provider"));
+ "libcamera Device Provider"))
static GList *
gst_libcamera_provider_probe(GstDeviceProvider *provider)
{
GstLibcameraProvider *self = GST_LIBCAMERA_PROVIDER(provider);
- CameraManager *cm = self->cm;
+ std::shared_ptr<CameraManager> cm;
GList *devices = nullptr;
gint ret;
@@ -181,7 +185,7 @@ gst_libcamera_provider_probe(GstDeviceProvider *provider)
* gains monitoring support. Meanwhile we need to cycle start()/stop()
* to ensure every probe() calls return the latest list.
*/
- ret = cm->start();
+ cm = gst_libcamera_get_camera_manager(ret);
if (ret) {
GST_ERROR_OBJECT(self, "Failed to retrieve device list: %s",
g_strerror(-ret));
@@ -189,13 +193,19 @@ gst_libcamera_provider_probe(GstDeviceProvider *provider)
}
for (const std::shared_ptr<Camera> &camera : cm->cameras()) {
- GST_INFO_OBJECT(self, "Found camera '%s'", camera->name().c_str());
+ GST_INFO_OBJECT(self, "Found camera '%s'", camera->id().c_str());
+
+ GstDevice *dev = gst_libcamera_device_new(camera);
+ if (!dev) {
+ GST_ERROR_OBJECT(self, "Failed to add camera '%s'",
+ camera->id().c_str());
+ return nullptr;
+ }
+
devices = g_list_append(devices,
- g_object_ref_sink(gst_libcamera_device_new(camera)));
+ g_object_ref_sink(dev));
}
- cm->stop();
-
return devices;
}
@@ -204,31 +214,16 @@ gst_libcamera_provider_init(GstLibcameraProvider *self)
{
GstDeviceProvider *provider = GST_DEVICE_PROVIDER(self);
- self->cm = new CameraManager();
-
/* Avoid devices being duplicated. */
gst_device_provider_hide_provider(provider, "v4l2deviceprovider");
}
static void
-gst_libcamera_provider_finalize(GObject *object)
-{
- GstLibcameraProvider *self = GST_LIBCAMERA_PROVIDER(object);
- gpointer klass = gst_libcamera_provider_parent_class;
-
- delete self->cm;
-
- return G_OBJECT_GET_CLASS(klass)->finalize(object);
-}
-
-static void
gst_libcamera_provider_class_init(GstLibcameraProviderClass *klass)
{
GstDeviceProviderClass *provider_class = GST_DEVICE_PROVIDER_CLASS(klass);
- GObjectClass *object_class = G_OBJECT_CLASS(klass);
provider_class->probe = gst_libcamera_provider_probe;
- object_class->finalize = gst_libcamera_provider_finalize;
gst_device_provider_class_set_metadata(provider_class,
"libcamera Device Provider",
diff --git a/src/gstreamer/gstlibcameraprovider.h b/src/gstreamer/gstlibcameraprovider.h
index bdd19db8..19708b9d 100644
--- a/src/gstreamer/gstlibcameraprovider.h
+++ b/src/gstreamer/gstlibcameraprovider.h
@@ -3,11 +3,10 @@
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcameraprovider.h - GStreamer Device Provider
+ * GStreamer Device Provider
*/
-#ifndef __GST_LIBCAMERA_PROVIDER_H__
-#define __GST_LIBCAMERA_PROVIDER_H__
+#pragma once
#include <gst/gst.h>
@@ -18,6 +17,3 @@ G_DECLARE_FINAL_TYPE(GstLibcameraProvider, gst_libcamera_provider,
GST_LIBCAMERA, PROVIDER, GstDeviceProvider)
G_END_DECLS
-
-#endif /* __GST_LIBCAMERA_PROVIDER_H__ */
-
diff --git a/src/gstreamer/gstlibcamerasrc.cpp b/src/gstreamer/gstlibcamerasrc.cpp
index 9755922a..5e9e843d 100644
--- a/src/gstreamer/gstlibcamerasrc.cpp
+++ b/src/gstreamer/gstlibcamerasrc.cpp
@@ -3,16 +3,14 @@
* Copyright (C) 2019, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamerasrc.cpp - GStreamer Capture Element
+ * GStreamer Capture Element
*/
/**
* \todo The following is a list of items that needs implementation in the GStreamer plugin
* - Implement GstElement::send_event
- * + Allowing application to send EOS
* + Allowing application to use FLUSH/FLUSH_STOP
* + Prevent the main thread from accessing streaming thread
- * - Implement renegotiation (even if slow)
* - Implement GstElement::request-new-pad (multi stream)
* + Evaluate if a single streaming thread is fine
* - Add application driven request (snapshot)
@@ -25,26 +23,25 @@
* - Add timestamp support
* - Use unique names to select the camera devices
* - Add GstVideoMeta support (strides and offsets)
- *
- * \todo libcamera UVC drivers picks the lowest possible resolution first, this
- * should be fixed so that we get a decent resolution and framerate for the
- * role by default.
*/
#include "gstlibcamerasrc.h"
+#include <atomic>
#include <queue>
#include <vector>
-#include <gst/base/base.h>
-
#include <libcamera/camera.h>
#include <libcamera/camera_manager.h>
+#include <libcamera/control_ids.h>
+#include <gst/base/base.h>
+
+#include "gstlibcamera-controls.h"
+#include "gstlibcamera-utils.h"
#include "gstlibcameraallocator.h"
#include "gstlibcamerapad.h"
#include "gstlibcamerapool.h"
-#include "gstlibcamera-utils.h"
using namespace libcamera;
@@ -52,19 +49,21 @@ GST_DEBUG_CATEGORY_STATIC(source_debug);
#define GST_CAT_DEFAULT source_debug
struct RequestWrap {
- RequestWrap(Request *request);
+ RequestWrap(std::unique_ptr<Request> request);
~RequestWrap();
- void attachBuffer(GstBuffer *buffer);
+ void attachBuffer(Stream *stream, GstBuffer *buffer);
GstBuffer *detachBuffer(Stream *stream);
- /* For ptr comparison only. */
- Request *request_;
+ std::unique_ptr<Request> request_;
std::map<Stream *, GstBuffer *> buffers_;
+
+ GstClockTime latency_;
+ GstClockTime pts_;
};
-RequestWrap::RequestWrap(Request *request)
- : request_(request)
+RequestWrap::RequestWrap(std::unique_ptr<Request> request)
+ : request_(std::move(request)), latency_(0), pts_(GST_CLOCK_TIME_NONE)
{
}
@@ -76,10 +75,9 @@ RequestWrap::~RequestWrap()
}
}
-void RequestWrap::attachBuffer(GstBuffer *buffer)
+void RequestWrap::attachBuffer(Stream *stream, GstBuffer *buffer)
{
FrameBuffer *fb = gst_libcamera_buffer_get_frame_buffer(buffer);
- Stream *stream = gst_libcamera_buffer_get_stream(buffer);
request_->addBuffer(stream, fb);
@@ -109,13 +107,34 @@ GstBuffer *RequestWrap::detachBuffer(Stream *stream)
struct GstLibcameraSrcState {
GstLibcameraSrc *src_;
- std::unique_ptr<CameraManager> cm_;
+ std::shared_ptr<CameraManager> cm_;
std::shared_ptr<Camera> cam_;
std::unique_ptr<CameraConfiguration> config_;
- std::vector<GstPad *> srcpads_;
- std::queue<std::unique_ptr<RequestWrap>> requests_;
+ std::vector<GstPad *> srcpads_; /* Protected by stream_lock */
+
+ /*
+ * Contention on this lock_ must be minimized, as it has to be taken in
+ * the realtime-sensitive requestCompleted() handler to protect
+ * queuedRequests_ and completedRequests_.
+ *
+ * stream_lock must be taken before lock_ in contexts where both locks
+ * need to be taken. In particular, this means that the lock_ must not
+ * be held while calling into other graph elements (e.g. when calling
+ * gst_pad_query()).
+ */
+ GMutex lock_;
+ std::queue<std::unique_ptr<RequestWrap>> queuedRequests_;
+ std::queue<std::unique_ptr<RequestWrap>> completedRequests_;
+
+ ControlList initControls_;
+ guint group_id_;
+ GstCameraControls controls_;
+
+ int queueRequest();
void requestCompleted(Request *request);
+ int processRequest();
+ void clearRequests();
};
struct _GstLibcameraSrc {
@@ -126,6 +145,8 @@ struct _GstLibcameraSrc {
gchar *camera_name;
+ std::atomic<GstEvent *> pending_eos;
+
GstLibcameraSrcState *state;
GstLibcameraAllocator *allocator;
GstFlowCombiner *flow_combiner;
@@ -133,14 +154,20 @@ struct _GstLibcameraSrc {
enum {
PROP_0,
- PROP_CAMERA_NAME
+ PROP_CAMERA_NAME,
+ PROP_LAST
};
+static void gst_libcamera_src_child_proxy_init(gpointer g_iface,
+ gpointer iface_data);
+
G_DEFINE_TYPE_WITH_CODE(GstLibcameraSrc, gst_libcamera_src, GST_TYPE_ELEMENT,
+ G_IMPLEMENT_INTERFACE(GST_TYPE_CHILD_PROXY,
+ gst_libcamera_src_child_proxy_init)
GST_DEBUG_CATEGORY_INIT(source_debug, "libcamerasrc", 0,
- "libcamera Source"));
+ "libcamera Source"))
-#define TEMPLATE_CAPS GST_STATIC_CAPS("video/x-raw; image/jpeg")
+#define TEMPLATE_CAPS GST_STATIC_CAPS("video/x-raw; image/jpeg; video/x-bayer")
/* For the simple case, we have a src pad that is always present. */
GstStaticPadTemplate src_template = {
@@ -149,43 +176,131 @@ GstStaticPadTemplate src_template = {
/* More pads can be requested in state < PAUSED */
GstStaticPadTemplate request_src_template = {
- "src_%s", GST_PAD_SRC, GST_PAD_REQUEST, TEMPLATE_CAPS
+ "src_%u", GST_PAD_SRC, GST_PAD_REQUEST, TEMPLATE_CAPS
};
+/* Must be called with stream_lock held. */
+int GstLibcameraSrcState::queueRequest()
+{
+ std::unique_ptr<Request> request = cam_->createRequest();
+ if (!request)
+ return -ENOMEM;
+
+ /* Apply controls */
+ controls_.applyControls(request);
+
+ std::unique_ptr<RequestWrap> wrap =
+ std::make_unique<RequestWrap>(std::move(request));
+
+ for (GstPad *srcpad : srcpads_) {
+ Stream *stream = gst_libcamera_pad_get_stream(srcpad);
+ GstLibcameraPool *pool = gst_libcamera_pad_get_pool(srcpad);
+ GstBuffer *buffer;
+ GstFlowReturn ret;
+
+ ret = gst_buffer_pool_acquire_buffer(GST_BUFFER_POOL(pool),
+ &buffer, nullptr);
+ if (ret != GST_FLOW_OK) {
+ /*
+ * RequestWrap has ownership of the request, and we
+ * won't be queueing this one due to lack of buffers.
+ */
+ return -ENOBUFS;
+ }
+
+ wrap->attachBuffer(stream, buffer);
+ }
+
+ GST_TRACE_OBJECT(src_, "Requesting buffers");
+ cam_->queueRequest(wrap->request_.get());
+
+ {
+ GLibLocker locker(&lock_);
+ queuedRequests_.push(std::move(wrap));
+ }
+
+ /* The RequestWrap will be deleted in the completion handler. */
+ return 0;
+}
+
void
GstLibcameraSrcState::requestCompleted(Request *request)
{
- GLibLocker lock(GST_OBJECT(src_));
-
GST_DEBUG_OBJECT(src_, "buffers are ready");
- std::unique_ptr<RequestWrap> wrap = std::move(requests_.front());
- requests_.pop();
+ std::unique_ptr<RequestWrap> wrap;
- g_return_if_fail(wrap->request_ == request);
+ {
+ GLibLocker locker(&lock_);
+
+ controls_.readMetadata(request);
+
+ wrap = std::move(queuedRequests_.front());
+ queuedRequests_.pop();
+ }
+
+ g_return_if_fail(wrap->request_.get() == request);
if ((request->status() == Request::RequestCancelled)) {
GST_DEBUG_OBJECT(src_, "Request was cancelled");
return;
}
- GstBuffer *buffer;
+ if (GST_ELEMENT_CLOCK(src_)) {
+ int64_t timestamp = request->metadata().get(controls::SensorTimestamp).value_or(0);
+
+ GstClockTime gst_base_time = GST_ELEMENT(src_)->base_time;
+ GstClockTime gst_now = gst_clock_get_time(GST_ELEMENT_CLOCK(src_));
+ /* \todo Need to expose which reference clock the timestamp relates to. */
+ GstClockTime sys_now = g_get_monotonic_time() * 1000;
+
+ /* Deduced from: sys_now - sys_base_time == gst_now - gst_base_time */
+ GstClockTime sys_base_time = sys_now - (gst_now - gst_base_time);
+ wrap->pts_ = timestamp - sys_base_time;
+ wrap->latency_ = sys_now - timestamp;
+ }
+
+ {
+ GLibLocker locker(&lock_);
+ completedRequests_.push(std::move(wrap));
+ }
+
+ gst_task_resume(src_->task);
+}
+
+/* Must be called with stream_lock held. */
+int GstLibcameraSrcState::processRequest()
+{
+ std::unique_ptr<RequestWrap> wrap;
+ int err = 0;
+
+ {
+ GLibLocker locker(&lock_);
+
+ if (!completedRequests_.empty()) {
+ wrap = std::move(completedRequests_.front());
+ completedRequests_.pop();
+ }
+
+ if (completedRequests_.empty())
+ err = -ENOBUFS;
+ }
+
+ if (!wrap)
+ return -ENOBUFS;
+
+ GstFlowReturn ret = GST_FLOW_OK;
+ gst_flow_combiner_reset(src_->flow_combiner);
+
for (GstPad *srcpad : srcpads_) {
Stream *stream = gst_libcamera_pad_get_stream(srcpad);
- buffer = wrap->detachBuffer(stream);
+ GstBuffer *buffer = wrap->detachBuffer(stream);
FrameBuffer *fb = gst_libcamera_buffer_get_frame_buffer(buffer);
- if (GST_ELEMENT_CLOCK(src_)) {
- GstClockTime gst_base_time = GST_ELEMENT(src_)->base_time;
- GstClockTime gst_now = gst_clock_get_time(GST_ELEMENT_CLOCK(src_));
- /* \todo Need to expose which reference clock the timestamp relates to. */
- GstClockTime sys_now = g_get_monotonic_time() * 1000;
-
- /* Deduced from: sys_now - sys_base_time == gst_now - gst_base_time */
- GstClockTime sys_base_time = sys_now - (gst_now - gst_base_time);
- GST_BUFFER_PTS(buffer) = fb->metadata().timestamp - sys_base_time;
- gst_libcamera_pad_set_latency(srcpad, sys_now - fb->metadata().timestamp);
+ if (GST_CLOCK_TIME_IS_VALID(wrap->pts_)) {
+ GST_BUFFER_PTS(buffer) = wrap->pts_;
+ gst_libcamera_pad_set_latency(srcpad, wrap->latency_);
} else {
GST_BUFFER_PTS(buffer) = 0;
}
@@ -193,22 +308,72 @@ GstLibcameraSrcState::requestCompleted(Request *request)
GST_BUFFER_OFFSET(buffer) = fb->metadata().sequence;
GST_BUFFER_OFFSET_END(buffer) = fb->metadata().sequence;
- gst_libcamera_pad_queue_buffer(srcpad, buffer);
+ ret = gst_pad_push(srcpad, buffer);
+ ret = gst_flow_combiner_update_pad_flow(src_->flow_combiner,
+ srcpad, ret);
+ }
+
+ switch (ret) {
+ case GST_FLOW_OK:
+ break;
+
+ case GST_FLOW_NOT_NEGOTIATED: {
+ bool reconfigure = false;
+ for (GstPad *srcpad : srcpads_) {
+ if (gst_pad_needs_reconfigure(srcpad)) {
+ reconfigure = true;
+ break;
+ }
+ }
+
+ /* If no pads need a reconfiguration something went wrong. */
+ if (!reconfigure)
+ err = -EPIPE;
+
+ break;
+ }
+
+ case GST_FLOW_EOS: {
+ g_autoptr(GstEvent) eos = gst_event_new_eos();
+ guint32 seqnum = gst_util_seqnum_next();
+ gst_event_set_seqnum(eos, seqnum);
+ for (GstPad *srcpad : srcpads_)
+ gst_pad_push_event(srcpad, gst_event_ref(eos));
+
+ err = -EPIPE;
+ break;
}
- gst_libcamera_resume_task(this->src_->task);
+ case GST_FLOW_FLUSHING:
+ err = -EPIPE;
+ break;
+
+ default:
+ GST_ELEMENT_FLOW_ERROR(src_, ret);
+
+ err = -EPIPE;
+ break;
+ }
+
+ return err;
+}
+
+void GstLibcameraSrcState::clearRequests()
+{
+ GLibLocker locker(&lock_);
+ completedRequests_ = {};
}
static bool
gst_libcamera_src_open(GstLibcameraSrc *self)
{
- std::unique_ptr<CameraManager> cm = std::make_unique<CameraManager>();
+ std::shared_ptr<CameraManager> cm;
std::shared_ptr<Camera> cam;
- gint ret = 0;
+ gint ret;
GST_DEBUG_OBJECT(self, "Opening camera device ...");
- ret = cm->start();
+ cm = gst_libcamera_get_camera_manager(ret);
if (ret) {
GST_ELEMENT_ERROR(self, LIBRARY, INIT,
("Failed listing cameras."),
@@ -224,137 +389,257 @@ gst_libcamera_src_open(GstLibcameraSrc *self)
}
if (camera_name) {
- cam = cm->get(self->camera_name);
+ cam = cm->get(camera_name);
if (!cam) {
GST_ELEMENT_ERROR(self, RESOURCE, NOT_FOUND,
- ("Could not find a camera named '%s'.", self->camera_name),
+ ("Could not find a camera named '%s'.", camera_name),
("libcamera::CameraMananger::get() returned nullptr"));
return false;
}
} else {
- if (cm->cameras().empty()) {
+ auto cameras = cm->cameras();
+ if (cameras.empty()) {
GST_ELEMENT_ERROR(self, RESOURCE, NOT_FOUND,
("Could not find any supported camera on this system."),
("libcamera::CameraMananger::cameras() is empty"));
return false;
}
- cam = cm->cameras()[0];
+ cam = cameras[0];
}
- GST_INFO_OBJECT(self, "Using camera named '%s'", cam->name().c_str());
+ GST_INFO_OBJECT(self, "Using camera '%s'", cam->id().c_str());
ret = cam->acquire();
if (ret) {
GST_ELEMENT_ERROR(self, RESOURCE, BUSY,
- ("Camera name '%s' is already in use.", cam->name().c_str()),
+ ("Camera '%s' is already in use.", cam->id().c_str()),
("libcamera::Camera::acquire() failed: %s", g_strerror(ret)));
return false;
}
+ self->state->controls_.setCamera(cam);
+
cam->requestCompleted.connect(self->state, &GstLibcameraSrcState::requestCompleted);
/* No need to lock here, we didn't start our threads yet. */
- self->state->cm_ = std::move(cm);
+ self->state->cm_ = cm;
self->state->cam_ = cam;
return true;
}
+/* Must be called with stream_lock held. */
+static bool
+gst_libcamera_src_negotiate(GstLibcameraSrc *self)
+{
+ GstLibcameraSrcState *state = self->state;
+ std::vector<GstVideoTransferFunction> transfer(state->srcpads_.size(),
+ GST_VIDEO_TRANSFER_UNKNOWN);
+
+ g_autoptr(GstStructure) element_caps = gst_structure_new_empty("caps");
+
+ for (gsize i = 0; i < state->srcpads_.size(); i++) {
+ GstPad *srcpad = state->srcpads_[i];
+ StreamConfiguration &stream_cfg = state->config_->at(i);
+
+ /* Retrieve the supported caps. */
+ g_autoptr(GstCaps) filter = gst_libcamera_stream_formats_to_caps(stream_cfg.formats());
+ g_autoptr(GstCaps) caps = gst_pad_peer_query_caps(srcpad, filter);
+ if (gst_caps_is_empty(caps))
+ return false;
+
+ /* Fixate caps and configure the stream. */
+ caps = gst_caps_make_writable(caps);
+ gst_libcamera_configure_stream_from_caps(stream_cfg, caps, &transfer[i]);
+ gst_libcamera_get_framerate_from_caps(caps, element_caps);
+ }
+
+ /* Validate the configuration. */
+ if (state->config_->validate() == CameraConfiguration::Invalid)
+ return false;
+
+ int ret = state->cam_->configure(state->config_.get());
+ if (ret) {
+ GST_ELEMENT_ERROR(self, RESOURCE, SETTINGS,
+ ("Failed to configure camera: %s", g_strerror(-ret)),
+ ("Camera::configure() failed with error code %i", ret));
+ return false;
+ }
+
+ /* Check frame duration bounds within controls::FrameDurationLimits */
+ gst_libcamera_clamp_and_set_frameduration(state->initControls_,
+ state->cam_->controls(), element_caps);
+
+ /*
+ * Regardless if it has been modified, create clean caps and push the
+ * caps event. Downstream will decide if the caps are acceptable.
+ */
+ for (gsize i = 0; i < state->srcpads_.size(); i++) {
+ GstPad *srcpad = state->srcpads_[i];
+ const StreamConfiguration &stream_cfg = state->config_->at(i);
+
+ g_autoptr(GstCaps) caps = gst_libcamera_stream_configuration_to_caps(stream_cfg, transfer[i]);
+ gst_libcamera_framerate_to_caps(caps, element_caps);
+
+ if (!gst_pad_push_event(srcpad, gst_event_new_caps(caps)))
+ return false;
+ }
+
+ if (self->allocator)
+ g_clear_object(&self->allocator);
+
+ self->allocator = gst_libcamera_allocator_new(state->cam_, state->config_.get());
+ if (!self->allocator) {
+ GST_ELEMENT_ERROR(self, RESOURCE, NO_SPACE_LEFT,
+ ("Failed to allocate memory"),
+ ("gst_libcamera_allocator_new() failed."));
+ return false;
+ }
+
+ for (gsize i = 0; i < state->srcpads_.size(); i++) {
+ GstPad *srcpad = state->srcpads_[i];
+ const StreamConfiguration &stream_cfg = state->config_->at(i);
+
+ GstLibcameraPool *pool = gst_libcamera_pool_new(self->allocator,
+ stream_cfg.stream());
+ g_signal_connect_swapped(pool, "buffer-notify",
+ G_CALLBACK(gst_task_resume), self->task);
+
+ gst_libcamera_pad_set_pool(srcpad, pool);
+
+ /* Clear all reconfigure flags. */
+ gst_pad_check_reconfigure(srcpad);
+ }
+
+ return true;
+}
+
static void
gst_libcamera_src_task_run(gpointer user_data)
{
GstLibcameraSrc *self = GST_LIBCAMERA_SRC(user_data);
GstLibcameraSrcState *state = self->state;
- Request *request = state->cam_->createRequest();
- auto wrap = std::make_unique<RequestWrap>(request);
- for (GstPad *srcpad : state->srcpads_) {
- GstLibcameraPool *pool = gst_libcamera_pad_get_pool(srcpad);
- GstBuffer *buffer;
- GstFlowReturn ret;
+ /*
+ * Start by pausing the task. The task may also get resumed by the
+ * buffer-notify signal when new buffers are queued back to the pool,
+ * or by the request completion handler when a new request has
+ * completed. Both will resume the task after adding the buffers or
+ * request to their respective lists, which are checked below to decide
+ * if the task needs to be resumed for another iteration. This is thus
+ * guaranteed to be race-free, the lock taken by gst_task_pause() and
+ * gst_task_resume() serves as a memory barrier.
+ */
+ gst_task_pause(self->task);
- ret = gst_buffer_pool_acquire_buffer(GST_BUFFER_POOL(pool),
- &buffer, nullptr);
- if (ret != GST_FLOW_OK) {
- /*
- * RequestWrap does not take ownership, and we won't be
- * queueing this one due to lack of buffers.
- */
- delete request;
- request = nullptr;
- break;
- }
+ bool doResume = false;
- wrap->attachBuffer(buffer);
- }
+ g_autoptr(GstEvent) event = self->pending_eos.exchange(nullptr);
+ if (event) {
+ for (GstPad *srcpad : state->srcpads_)
+ gst_pad_push_event(srcpad, gst_event_ref(event));
- if (request) {
- GLibLocker lock(GST_OBJECT(self));
- GST_TRACE_OBJECT(self, "Requesting buffers");
- state->cam_->queueRequest(request);
- state->requests_.push(std::move(wrap));
+ return;
}
- GstFlowReturn ret = GST_FLOW_OK;
- gst_flow_combiner_reset(self->flow_combiner);
+ /* Check if a srcpad requested a renegotiation. */
+ bool reconfigure = false;
for (GstPad *srcpad : state->srcpads_) {
- ret = gst_libcamera_pad_push_pending(srcpad);
- ret = gst_flow_combiner_update_pad_flow(self->flow_combiner,
- srcpad, ret);
+ if (gst_pad_check_reconfigure(srcpad)) {
+ /* Check if the caps even need changing. */
+ g_autoptr(GstCaps) caps = gst_pad_get_current_caps(srcpad);
+ if (!gst_pad_peer_query_accept_caps(srcpad, caps)) {
+ reconfigure = true;
+ break;
+ }
+ }
}
- {
- /*
- * Here we need to decide if we want to pause or stop the task. This
- * needs to happen in lock step with the callback thread which may want
- * to resume the task.
- */
- GLibLocker lock(GST_OBJECT(self));
- if (ret != GST_FLOW_OK) {
- if (ret == GST_FLOW_EOS) {
- g_autoptr(GstEvent) eos = gst_event_new_eos();
- guint32 seqnum = gst_util_seqnum_next();
- gst_event_set_seqnum(eos, seqnum);
- for (GstPad *srcpad : state->srcpads_)
- gst_pad_push_event(srcpad, gst_event_ref(eos));
- } else if (ret != GST_FLOW_FLUSHING) {
- GST_ELEMENT_FLOW_ERROR(self, ret);
- }
+ if (reconfigure) {
+ state->cam_->stop();
+ state->clearRequests();
+
+ if (!gst_libcamera_src_negotiate(self)) {
+ GST_ELEMENT_FLOW_ERROR(self, GST_FLOW_NOT_NEGOTIATED);
gst_task_stop(self->task);
- return;
}
- bool do_pause = true;
- for (GstPad *srcpad : state->srcpads_) {
- if (gst_libcamera_pad_has_pending(srcpad)) {
- do_pause = false;
- break;
- }
- }
+ state->cam_->start(&state->initControls_);
+ }
- if (do_pause)
- gst_task_pause(self->task);
+ /*
+ * Create and queue one request. If no buffers are available the
+ * function returns -ENOBUFS, which we ignore here as that's not a
+ * fatal error.
+ */
+ int ret = state->queueRequest();
+ switch (ret) {
+ case 0:
+ /*
+ * The request was successfully queued, there may be enough
+ * buffers to create a new one. Don't pause the task to give it
+ * another try.
+ */
+ doResume = true;
+ break;
+
+ case -ENOMEM:
+ GST_ELEMENT_ERROR(self, RESOURCE, NO_SPACE_LEFT,
+ ("Failed to allocate request for camera '%s'.",
+ state->cam_->id().c_str()),
+ ("libcamera::Camera::createRequest() failed"));
+ gst_task_stop(self->task);
+ return;
+
+ case -ENOBUFS:
+ default:
+ break;
}
+
+ /*
+ * Process one completed request, if available, and record if further
+ * requests are ready for processing.
+ */
+ ret = state->processRequest();
+ switch (ret) {
+ case 0:
+ /* Another completed request is available, resume the task. */
+ doResume = true;
+ break;
+
+ case -EPIPE:
+ gst_task_stop(self->task);
+ return;
+
+ case -ENOBUFS:
+ default:
+ break;
+ }
+
+ /* Resume the task for another iteration if needed. */
+ if (doResume)
+ gst_task_resume(self->task);
}
static void
-gst_libcamera_src_task_enter(GstTask *task, GThread *thread, gpointer user_data)
+gst_libcamera_src_task_enter(GstTask *task, [[maybe_unused]] GThread *thread,
+ gpointer user_data)
{
GstLibcameraSrc *self = GST_LIBCAMERA_SRC(user_data);
GLibRecLocker lock(&self->stream_lock);
GstLibcameraSrcState *state = self->state;
- GstFlowReturn flow_ret = GST_FLOW_OK;
gint ret;
GST_DEBUG_OBJECT(self, "Streaming thread has started");
- guint group_id = gst_util_group_id_next();
- StreamRoles roles;
+ gint stream_id_num = 0;
+ std::vector<StreamRole> roles;
for (GstPad *srcpad : state->srcpads_) {
/* Create stream-id and push stream-start. */
- g_autofree gchar *stream_id = gst_pad_create_stream_id(srcpad, GST_ELEMENT(self), nullptr);
+ g_autofree gchar *stream_id_intermediate = g_strdup_printf("%i%i", state->group_id_, stream_id_num++);
+ g_autofree gchar *stream_id = gst_pad_create_stream_id(srcpad, GST_ELEMENT(self), stream_id_intermediate);
GstEvent *event = gst_event_new_stream_start(stream_id);
- gst_event_set_group_id(event, group_id);
+ gst_event_set_group_id(event, state->group_id_);
gst_pad_push_event(srcpad, event);
/* Collect the streams roles for the next iteration. */
@@ -363,90 +648,33 @@ gst_libcamera_src_task_enter(GstTask *task, GThread *thread, gpointer user_data)
/* Generate the stream configurations, there should be one per pad. */
state->config_ = state->cam_->generateConfiguration(roles);
- /*
- * \todo Check if camera may increase or decrease the number of streams
- * regardless of the number of roles.
- */
- g_assert(state->config_->size() == state->srcpads_.size());
-
- for (gsize i = 0; i < state->srcpads_.size(); i++) {
- GstPad *srcpad = state->srcpads_[i];
- StreamConfiguration &stream_cfg = state->config_->at(i);
-
- /* Retrieve the supported caps. */
- g_autoptr(GstCaps) filter = gst_libcamera_stream_formats_to_caps(stream_cfg.formats());
- g_autoptr(GstCaps) caps = gst_pad_peer_query_caps(srcpad, filter);
- if (gst_caps_is_empty(caps)) {
- flow_ret = GST_FLOW_NOT_NEGOTIATED;
- break;
- }
-
- /* Fixate caps and configure the stream. */
- caps = gst_caps_make_writable(caps);
- gst_libcamera_configure_stream_from_caps(stream_cfg, caps);
- }
-
- if (flow_ret != GST_FLOW_OK)
- goto done;
-
- /* Validate the configuration. */
- if (state->config_->validate() == CameraConfiguration::Invalid) {
- flow_ret = GST_FLOW_NOT_NEGOTIATED;
- goto done;
- }
-
- /*
- * Regardless if it has been modified, create clean caps and push the
- * caps event. Downstream will decide if the caps are acceptable.
- */
- for (gsize i = 0; i < state->srcpads_.size(); i++) {
- GstPad *srcpad = state->srcpads_[i];
- const StreamConfiguration &stream_cfg = state->config_->at(i);
-
- g_autoptr(GstCaps) caps = gst_libcamera_stream_configuration_to_caps(stream_cfg);
- if (!gst_pad_push_event(srcpad, gst_event_new_caps(caps))) {
- flow_ret = GST_FLOW_NOT_NEGOTIATED;
- break;
- }
-
- /* Send an open segment event with time format. */
- GstSegment segment;
- gst_segment_init(&segment, GST_FORMAT_TIME);
- gst_pad_push_event(srcpad, gst_event_new_segment(&segment));
- }
-
- ret = state->cam_->configure(state->config_.get());
- if (ret) {
+ if (state->config_ == nullptr) {
GST_ELEMENT_ERROR(self, RESOURCE, SETTINGS,
- ("Failed to configure camera: %s", g_strerror(-ret)),
- ("Camera::configure() failed with error code %i", ret));
+ ("Failed to generate camera configuration from roles"),
+ ("Camera::generateConfiguration() returned nullptr"));
gst_task_stop(task);
return;
}
+ g_assert(state->config_->size() == state->srcpads_.size());
- self->allocator = gst_libcamera_allocator_new(state->cam_);
- if (!self->allocator) {
- GST_ELEMENT_ERROR(self, RESOURCE, NO_SPACE_LEFT,
- ("Failed to allocate memory"),
- ("gst_libcamera_allocator_new() failed."));
+ if (!gst_libcamera_src_negotiate(self)) {
+ state->initControls_.clear();
+ GST_ELEMENT_FLOW_ERROR(self, GST_FLOW_NOT_NEGOTIATED);
gst_task_stop(task);
return;
}
self->flow_combiner = gst_flow_combiner_new();
- for (gsize i = 0; i < state->srcpads_.size(); i++) {
- GstPad *srcpad = state->srcpads_[i];
- const StreamConfiguration &stream_cfg = state->config_->at(i);
- GstLibcameraPool *pool = gst_libcamera_pool_new(self->allocator,
- stream_cfg.stream());
- g_signal_connect_swapped(pool, "buffer-notify",
- G_CALLBACK(gst_libcamera_resume_task), task);
-
- gst_libcamera_pad_set_pool(srcpad, pool);
+ for (GstPad *srcpad : state->srcpads_) {
gst_flow_combiner_add_pad(self->flow_combiner, srcpad);
+
+ /* Send an open segment event with time format. */
+ GstSegment segment;
+ gst_segment_init(&segment, GST_FORMAT_TIME);
+ gst_pad_push_event(srcpad, gst_event_new_segment(&segment));
}
- ret = state->cam_->start();
+ ret = state->cam_->start(&state->initControls_);
if (ret) {
GST_ELEMENT_ERROR(self, RESOURCE, SETTINGS,
("Failed to start the camera: %s", g_strerror(-ret)),
@@ -454,20 +682,12 @@ gst_libcamera_src_task_enter(GstTask *task, GThread *thread, gpointer user_data)
gst_task_stop(task);
return;
}
-
-done:
- switch (flow_ret) {
- case GST_FLOW_NOT_NEGOTIATED:
- GST_ELEMENT_FLOW_ERROR(self, flow_ret);
- gst_task_stop(task);
- break;
- default:
- break;
- }
}
static void
-gst_libcamera_src_task_leave(GstTask *task, GThread *thread, gpointer user_data)
+gst_libcamera_src_task_leave([[maybe_unused]] GstTask *task,
+ [[maybe_unused]] GThread *thread,
+ gpointer user_data)
{
GstLibcameraSrc *self = GST_LIBCAMERA_SRC(user_data);
GstLibcameraSrcState *state = self->state;
@@ -475,9 +695,13 @@ gst_libcamera_src_task_leave(GstTask *task, GThread *thread, gpointer user_data)
GST_DEBUG_OBJECT(self, "Streaming thread is about to stop");
state->cam_->stop();
+ state->clearRequests();
- for (GstPad *srcpad : state->srcpads_)
- gst_libcamera_pad_set_pool(srcpad, nullptr);
+ {
+ GLibRecLocker locker(&self->stream_lock);
+ for (GstPad *srcpad : state->srcpads_)
+ gst_libcamera_pad_set_pool(srcpad, nullptr);
+ }
g_clear_object(&self->allocator);
g_clear_pointer(&self->flow_combiner,
@@ -492,15 +716,16 @@ gst_libcamera_src_close(GstLibcameraSrc *self)
GST_DEBUG_OBJECT(self, "Releasing resources");
+ state->config_.reset();
+
ret = state->cam_->release();
if (ret) {
GST_ELEMENT_WARNING(self, RESOURCE, BUSY,
- ("Camera name '%s' is still in use.", state->cam_->name().c_str()),
+ ("Camera '%s' is still in use.", state->cam_->id().c_str()),
("libcamera::Camera.release() failed: %s", g_strerror(-ret)));
}
state->cam_.reset();
- state->cm_->stop();
state->cm_.reset();
}
@@ -510,6 +735,7 @@ gst_libcamera_src_set_property(GObject *object, guint prop_id,
{
GLibLocker lock(GST_OBJECT(object));
GstLibcameraSrc *self = GST_LIBCAMERA_SRC(object);
+ GstLibcameraSrcState *state = self->state;
switch (prop_id) {
case PROP_CAMERA_NAME:
@@ -517,7 +743,8 @@ gst_libcamera_src_set_property(GObject *object, guint prop_id,
self->camera_name = g_value_dup_string(value);
break;
default:
- G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
+ if (!state->controls_.setProperty(prop_id - PROP_LAST, value, pspec))
+ G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
break;
}
}
@@ -528,13 +755,15 @@ gst_libcamera_src_get_property(GObject *object, guint prop_id, GValue *value,
{
GLibLocker lock(GST_OBJECT(object));
GstLibcameraSrc *self = GST_LIBCAMERA_SRC(object);
+ GstLibcameraSrcState *state = self->state;
switch (prop_id) {
case PROP_CAMERA_NAME:
g_value_set_string(value, self->camera_name);
break;
default:
- G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
+ if (!state->controls_.getProperty(prop_id - PROP_LAST, value, pspec))
+ G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
break;
}
}
@@ -557,6 +786,7 @@ gst_libcamera_src_change_state(GstElement *element, GstStateChange transition)
break;
case GST_STATE_CHANGE_READY_TO_PAUSED:
/* This needs to be called after pads activation.*/
+ self->state->group_id_ = gst_util_group_id_next();
if (!gst_task_pause(self->task))
return GST_STATE_CHANGE_FAILURE;
ret = GST_STATE_CHANGE_NO_PREROLL;
@@ -587,6 +817,27 @@ gst_libcamera_src_change_state(GstElement *element, GstStateChange transition)
return ret;
}
+static gboolean
+gst_libcamera_src_send_event(GstElement *element, GstEvent *event)
+{
+ GstLibcameraSrc *self = GST_LIBCAMERA_SRC(element);
+ gboolean ret = FALSE;
+
+ switch (GST_EVENT_TYPE(event)) {
+ case GST_EVENT_EOS: {
+ GstEvent *oldEvent = self->pending_eos.exchange(event);
+ gst_clear_event(&oldEvent);
+ ret = TRUE;
+ break;
+ }
+ default:
+ gst_event_unref(event);
+ break;
+ }
+
+ return ret;
+}
+
static void
gst_libcamera_src_finalize(GObject *object)
{
@@ -595,6 +846,7 @@ gst_libcamera_src_finalize(GObject *object)
g_rec_mutex_clear(&self->stream_lock);
g_clear_object(&self->task);
+ g_mutex_clear(&self->state->lock_);
g_free(self->camera_name);
delete self->state;
@@ -613,14 +865,71 @@ gst_libcamera_src_init(GstLibcameraSrc *self)
gst_task_set_leave_callback(self->task, gst_libcamera_src_task_leave, self, nullptr);
gst_task_set_lock(self->task, &self->stream_lock);
- state->srcpads_.push_back(gst_pad_new_from_template(templ, "src"));
- gst_element_add_pad(GST_ELEMENT(self), state->srcpads_[0]);
+ g_mutex_init(&state->lock_);
+
+ GstPad *pad = gst_pad_new_from_template(templ, "src");
+ state->srcpads_.push_back(pad);
+ gst_element_add_pad(GST_ELEMENT(self), pad);
+ gst_child_proxy_child_added(GST_CHILD_PROXY(self), G_OBJECT(pad), GST_OBJECT_NAME(pad));
+
+ GST_OBJECT_FLAG_SET(self, GST_ELEMENT_FLAG_SOURCE);
/* C-style friend. */
state->src_ = self;
self->state = state;
}
+static GstPad *
+gst_libcamera_src_request_new_pad(GstElement *element, GstPadTemplate *templ,
+ const gchar *name, [[maybe_unused]] const GstCaps *caps)
+{
+ GstLibcameraSrc *self = GST_LIBCAMERA_SRC(element);
+ g_autoptr(GstPad) pad = NULL;
+
+ GST_DEBUG_OBJECT(self, "new request pad created");
+
+ pad = gst_pad_new_from_template(templ, name);
+ g_object_ref_sink(pad);
+
+ if (gst_element_add_pad(element, pad)) {
+ GLibRecLocker lock(&self->stream_lock);
+ self->state->srcpads_.push_back(reinterpret_cast<GstPad *>(g_object_ref(pad)));
+ } else {
+ GST_ELEMENT_ERROR(element, STREAM, FAILED,
+ ("Internal data stream error."),
+ ("Could not add pad to element"));
+ return NULL;
+ }
+
+ gst_child_proxy_child_added(GST_CHILD_PROXY(self), G_OBJECT(pad), GST_OBJECT_NAME(pad));
+
+ return reinterpret_cast<GstPad *>(g_steal_pointer(&pad));
+}
+
+static void
+gst_libcamera_src_release_pad(GstElement *element, GstPad *pad)
+{
+ GstLibcameraSrc *self = GST_LIBCAMERA_SRC(element);
+
+ gst_child_proxy_child_removed(GST_CHILD_PROXY(self), G_OBJECT(pad), GST_OBJECT_NAME(pad));
+
+ GST_DEBUG_OBJECT(self, "Pad %" GST_PTR_FORMAT " being released", pad);
+
+ {
+ GLibRecLocker lock(&self->stream_lock);
+ std::vector<GstPad *> &pads = self->state->srcpads_;
+ auto begin_iterator = pads.begin();
+ auto end_iterator = pads.end();
+ auto pad_iterator = std::find(begin_iterator, end_iterator, pad);
+
+ if (pad_iterator != end_iterator) {
+ g_object_unref(*pad_iterator);
+ pads.erase(pad_iterator);
+ }
+ }
+ gst_element_remove_pad(element, pad);
+}
+
static void
gst_libcamera_src_class_init(GstLibcameraSrcClass *klass)
{
@@ -631,12 +940,15 @@ gst_libcamera_src_class_init(GstLibcameraSrcClass *klass)
object_class->get_property = gst_libcamera_src_get_property;
object_class->finalize = gst_libcamera_src_finalize;
+ element_class->request_new_pad = gst_libcamera_src_request_new_pad;
+ element_class->release_pad = gst_libcamera_src_release_pad;
element_class->change_state = gst_libcamera_src_change_state;
+ element_class->send_event = gst_libcamera_src_send_event;
gst_element_class_set_metadata(element_class,
"libcamera Source", "Source/Video",
"Linux Camera source using libcamera",
- "Nicolas Dufresne <nicolas.dufresne@collabora.com");
+ "Nicolas Dufresne <nicolas.dufresne@collabora.com>");
gst_element_class_add_static_pad_template_with_gtype(element_class,
&src_template,
GST_TYPE_LIBCAMERA_PAD);
@@ -651,4 +963,36 @@ gst_libcamera_src_class_init(GstLibcameraSrcClass *klass)
| G_PARAM_READWRITE
| G_PARAM_STATIC_STRINGS));
g_object_class_install_property(object_class, PROP_CAMERA_NAME, spec);
+
+ GstCameraControls::installProperties(object_class, PROP_LAST);
+}
+
+/* GstChildProxy implementation */
+static GObject *
+gst_libcamera_src_child_proxy_get_child_by_index(GstChildProxy *child_proxy,
+ guint index)
+{
+ GLibLocker lock(GST_OBJECT(child_proxy));
+ GObject *obj = nullptr;
+
+ obj = reinterpret_cast<GObject *>(g_list_nth_data(GST_ELEMENT(child_proxy)->srcpads, index));
+ if (obj)
+ gst_object_ref(obj);
+
+ return obj;
+}
+
+static guint
+gst_libcamera_src_child_proxy_get_children_count(GstChildProxy *child_proxy)
+{
+ GLibLocker lock(GST_OBJECT(child_proxy));
+ return GST_ELEMENT_CAST(child_proxy)->numsrcpads;
+}
+
+static void
+gst_libcamera_src_child_proxy_init(gpointer g_iface, [[maybe_unused]] gpointer iface_data)
+{
+ GstChildProxyInterface *iface = reinterpret_cast<GstChildProxyInterface *>(g_iface);
+ iface->get_child_by_index = gst_libcamera_src_child_proxy_get_child_by_index;
+ iface->get_children_count = gst_libcamera_src_child_proxy_get_children_count;
}
diff --git a/src/gstreamer/gstlibcamerasrc.h b/src/gstreamer/gstlibcamerasrc.h
index 0144cbc4..a27db9ca 100644
--- a/src/gstreamer/gstlibcamerasrc.h
+++ b/src/gstreamer/gstlibcamerasrc.h
@@ -3,11 +3,10 @@
* Copyright (C) 2019, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
- * gstlibcamerasrc.h - GStreamer Capture Element
+ * GStreamer Capture Element
*/
-#ifndef __GST_LIBCAMERA_SRC_H__
-#define __GST_LIBCAMERA_SRC_H__
+#pragma once
#include <gst/gst.h>
@@ -18,5 +17,3 @@ G_DECLARE_FINAL_TYPE(GstLibcameraSrc, gst_libcamera_src,
GST_LIBCAMERA, SRC, GstElement)
G_END_DECLS
-
-#endif /* __GST_LIBCAMERA_SRC_H__ */
diff --git a/src/gstreamer/meson.build b/src/gstreamer/meson.build
index e119e472..6b7e53b5 100644
--- a/src/gstreamer/meson.build
+++ b/src/gstreamer/meson.build
@@ -1,3 +1,20 @@
+# SPDX-License-Identifier: CC0-1.0
+
+glib_dep = dependency('glib-2.0', required : get_option('gstreamer'))
+
+gst_dep_version = '>=1.14.0'
+gstvideo_dep = dependency('gstreamer-video-1.0', version : gst_dep_version,
+ required : get_option('gstreamer'))
+gstallocator_dep = dependency('gstreamer-allocators-1.0', version : gst_dep_version,
+ required : get_option('gstreamer'))
+
+if not glib_dep.found() or not gstvideo_dep.found() or not gstallocator_dep.found()
+ gst_enabled = false
+ subdir_done()
+endif
+
+gst_enabled = true
+
libcamera_gst_sources = [
'gstlibcamera-utils.cpp',
'gstlibcamera.cpp',
@@ -8,33 +25,46 @@ libcamera_gst_sources = [
'gstlibcamerasrc.cpp',
]
+# Generate gstreamer control properties
+
+gen_gst_controls_template = files('gstlibcamera-controls.cpp.in')
+libcamera_gst_sources += custom_target('gstlibcamera-controls.cpp',
+ input : controls_files,
+ output : 'gstlibcamera-controls.cpp',
+ command : [gen_gst_controls, '-o', '@OUTPUT@',
+ '-t', gen_gst_controls_template, '@INPUT@'],
+ env : py_build_env)
+
libcamera_gst_cpp_args = [
'-DVERSION="@0@"'.format(libcamera_git_version),
'-DPACKAGE="@0@"'.format(meson.project_name()),
+ '-DGLIB_VERSION_MIN_REQUIRED=GLIB_VERSION_2_40',
]
-glib_dep = dependency('glib-2.0', required : get_option('gstreamer'))
+# The G_DECLARE_FINAL_TYPE macro creates static inline functions that were
+# not marked as possibly unused prior to GLib v2.63.0. This causes clang to
+# complain about the ones we are not using. Silence the -Wunused-function
+# warning in that case.
+if cc.get_id() == 'clang' and glib_dep.version().version_compare('<2.63.0')
+ libcamera_gst_cpp_args += ['-Wno-unused-function']
+endif
-gst_dep_version = '>=1.14.0'
-gstvideo_dep = dependency('gstreamer-video-1.0', version : gst_dep_version,
- required : get_option('gstreamer'))
-gstallocator_dep = dependency('gstreamer-allocators-1.0', version : gst_dep_version,
- required : get_option('gstreamer'))
+libcamera_gst = shared_library('gstlibcamera',
+ libcamera_gst_sources,
+ cpp_args : libcamera_gst_cpp_args,
+ dependencies : [libcamera_public, gstvideo_dep, gstallocator_dep],
+ install : true,
+ install_dir : '@0@/gstreamer-1.0'.format(get_option('libdir')),
+)
-if glib_dep.found() and gstvideo_dep.found() and gstallocator_dep.found()
- # The G_DECLARE_FINAL_TYPE macro creates static inline functions that were
- # not marked as possibly unused prior to GLib v2.63.0. This causes clang to
- # complain about the ones we are not using. Silence the -Wunused-function
- # warning in that case.
- if cc.get_id() == 'clang' and glib_dep.version().version_compare('<2.63.0')
- libcamera_gst_cpp_args += [ '-Wno-unused-function' ]
- endif
-
- libcamera_gst = shared_library('gstlibcamera',
- libcamera_gst_sources,
- cpp_args : libcamera_gst_cpp_args,
- dependencies : [libcamera_dep, gstvideo_dep, gstallocator_dep],
- install: true,
- install_dir : '@0@/gstreamer-1.0'.format(get_option('libdir')),
- )
-endif
+# Make the plugin visible to GStreamer inside meson devenv.
+fs = import('fs')
+gst_plugin_path = fs.parent(libcamera_gst.full_path())
+
+gst_env = environment()
+gst_env.prepend('GST_PLUGIN_PATH', gst_plugin_path)
+
+# Avoid polluting the system registry.
+gst_env.set('GST_REGISTRY', gst_plugin_path / 'registry.data')
+
+meson.add_devenv(gst_env)
diff --git a/src/ipa/ipa-sign-install.sh b/src/ipa/ipa-sign-install.sh
new file mode 100755
index 00000000..71696d5a
--- /dev/null
+++ b/src/ipa/ipa-sign-install.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (C) 2020, Google Inc.
+#
+# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+#
+# Regenerate IPA module signatures when installing
+
+key=$1
+shift
+modules=$*
+
+ipa_sign=$(dirname "$0")/ipa-sign.sh
+
+echo "Regenerating IPA modules signatures"
+
+for module in ${modules} ; do
+ module="${MESON_INSTALL_DESTDIR_PREFIX}/${module}"
+ if [ -f "${module}" ] ; then
+ "${ipa_sign}" "${key}" "${module}" "${module}.sign"
+ fi
+done
diff --git a/src/ipa/ipa-sign.sh b/src/ipa/ipa-sign.sh
new file mode 100755
index 00000000..69024213
--- /dev/null
+++ b/src/ipa/ipa-sign.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (C) 2020, Google Inc.
+#
+# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+#
+# Generate a signature for an IPA module
+
+key="$1"
+input="$2"
+output="$3"
+
+openssl dgst -sha256 -sign "${key}" -out "${output}" "${input}"
diff --git a/src/ipa/ipu3/algorithms/af.cpp b/src/ipa/ipu3/algorithms/af.cpp
new file mode 100644
index 00000000..cf68fb59
--- /dev/null
+++ b/src/ipa/ipu3/algorithms/af.cpp
@@ -0,0 +1,458 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Red Hat
+ *
+ * IPU3 auto focus algorithm
+ */
+
+#include "af.h"
+
+#include <algorithm>
+#include <chrono>
+#include <cmath>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <linux/videodev2.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+
+/**
+ * \file af.h
+ */
+
+/*
+ * Static variables from ChromiumOS Intel Camera HAL and ia_imaging library:
+ * - https://chromium.googlesource.com/chromiumos/platform/arc-camera/+/master/hal/intel/psl/ipu3/statsConverter/ipu3-stats.h
+ * - https://chromium.googlesource.com/chromiumos/platform/camera/+/refs/heads/main/hal/intel/ipu3/include/ia_imaging/af_public.h
+ */
+
+/** The minimum horizontal grid dimension. */
+static constexpr uint8_t kAfMinGridWidth = 16;
+/** The minimum vertical grid dimension. */
+static constexpr uint8_t kAfMinGridHeight = 16;
+/** The maximum horizontal grid dimension. */
+static constexpr uint8_t kAfMaxGridWidth = 32;
+/** The maximum vertical grid dimension. */
+static constexpr uint8_t kAfMaxGridHeight = 24;
+/** The minimum value of Log2 of the width of the grid cell. */
+static constexpr uint16_t kAfMinGridBlockWidth = 4;
+/** The minimum value of Log2 of the height of the grid cell. */
+static constexpr uint16_t kAfMinGridBlockHeight = 3;
+/** The maximum value of Log2 of the width of the grid cell. */
+static constexpr uint16_t kAfMaxGridBlockWidth = 6;
+/** The maximum value of Log2 of the height of the grid cell. */
+static constexpr uint16_t kAfMaxGridBlockHeight = 6;
+/** The number of blocks in vertical axis per slice. */
+static constexpr uint16_t kAfDefaultHeightPerSlice = 2;
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+
+namespace ipa::ipu3::algorithms {
+
+LOG_DEFINE_CATEGORY(IPU3Af)
+
+/**
+ * Maximum focus steps of the VCM control
+ * \todo should be obtained from the VCM driver
+ */
+static constexpr uint32_t kMaxFocusSteps = 1023;
+
+/* Minimum focus step for searching appropriate focus */
+static constexpr uint32_t kCoarseSearchStep = 30;
+static constexpr uint32_t kFineSearchStep = 1;
+
+/* Max ratio of variance change, 0.0 < kMaxChange < 1.0 */
+static constexpr double kMaxChange = 0.5;
+
+/* The numbers of frame to be ignored, before performing focus scan. */
+static constexpr uint32_t kIgnoreFrame = 10;
+
+/* Fine scan range 0 < kFineRange < 1 */
+static constexpr double kFineRange = 0.05;
+
+/* Settings for IPU3 AF filter */
+static struct ipu3_uapi_af_filter_config afFilterConfigDefault = {
+ .y1_coeff_0 = { 0, 1, 3, 7 },
+ .y1_coeff_1 = { 11, 13, 1, 2 },
+ .y1_coeff_2 = { 8, 19, 34, 242 },
+ .y1_sign_vec = 0x7fdffbfe,
+ .y2_coeff_0 = { 0, 1, 6, 6 },
+ .y2_coeff_1 = { 13, 25, 3, 0 },
+ .y2_coeff_2 = { 25, 3, 177, 254 },
+ .y2_sign_vec = 0x4e53ca72,
+ .y_calc = { 8, 8, 8, 8 },
+ .nf = { 0, 9, 0, 9, 0 },
+};
+
+/**
+ * \class Af
+ * \brief An auto-focus algorithm based on IPU3 statistics
+ *
+ * This algorithm is used to determine the position of the lens to make a
+ * focused image. The IPU3 AF processing block computes the statistics that
+ * are composed by two types of filtered value and stores in a AF buffer.
+ * Typically, for a clear image, it has a relatively higher contrast than a
+ * blurred one. Therefore, if an image with the highest contrast can be
+ * found through the scan, the position of the len indicates to a clearest
+ * image.
+ */
+Af::Af()
+ : focus_(0), bestFocus_(0), currentVariance_(0.0), previousVariance_(0.0),
+ coarseCompleted_(false), fineCompleted_(false)
+{
+}
+
+/**
+ * \brief Configure the Af given a configInfo
+ * \param[in] context The shared IPA context
+ * \param[in] configInfo The IPA configuration data
+ * \return 0 on success, a negative error code otherwise
+ */
+int Af::configure(IPAContext &context, const IPAConfigInfo &configInfo)
+{
+ struct ipu3_uapi_grid_config &grid = context.configuration.af.afGrid;
+ grid.width = kAfMinGridWidth;
+ grid.height = kAfMinGridHeight;
+ grid.block_width_log2 = kAfMinGridBlockWidth;
+ grid.block_height_log2 = kAfMinGridBlockHeight;
+
+ /*
+ * \todo - while this clamping code is effectively a no-op, it satisfies
+ * the compiler that the constant definitions of the hardware limits
+ * are used, and paves the way to support dynamic grid sizing in the
+ * future. While the block_{width,height}_log2 remain assigned to the
+ * minimum, this code should be optimized out by the compiler.
+ */
+ grid.width = std::clamp(grid.width, kAfMinGridWidth, kAfMaxGridWidth);
+ grid.height = std::clamp(grid.height, kAfMinGridHeight, kAfMaxGridHeight);
+
+ grid.block_width_log2 = std::clamp(grid.block_width_log2,
+ kAfMinGridBlockWidth,
+ kAfMaxGridBlockWidth);
+
+ grid.block_height_log2 = std::clamp(grid.block_height_log2,
+ kAfMinGridBlockHeight,
+ kAfMaxGridBlockHeight);
+
+ grid.height_per_slice = kAfDefaultHeightPerSlice;
+
+ /* Position the AF grid in the center of the BDS output. */
+ Rectangle bds(configInfo.bdsOutputSize);
+ Size gridSize(grid.width << grid.block_width_log2,
+ grid.height << grid.block_height_log2);
+
+ /*
+ * \todo - Support request metadata
+ * - Set the ROI based on any input controls in the request
+ * - Return the AF ROI as metadata in the Request
+ */
+ Rectangle roi = gridSize.centeredTo(bds.center());
+ Point start = roi.topLeft();
+
+ /* x_start and y_start should be even */
+ grid.x_start = utils::alignDown(start.x, 2);
+ grid.y_start = utils::alignDown(start.y, 2);
+ grid.y_start |= IPU3_UAPI_GRID_Y_START_EN;
+
+ /* Initial max focus step */
+ maxStep_ = kMaxFocusSteps;
+
+ /* Initial frame ignore counter */
+ afIgnoreFrameReset();
+
+ /* Initial focus value */
+ context.activeState.af.focus = 0;
+ /* Maximum variance of the AF statistics */
+ context.activeState.af.maxVariance = 0;
+ /* The stable AF value flag. if it is true, the AF should be in a stable state. */
+ context.activeState.af.stable = false;
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void Af::prepare(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ ipu3_uapi_params *params)
+{
+ const struct ipu3_uapi_grid_config &grid = context.configuration.af.afGrid;
+ params->acc_param.af.grid_cfg = grid;
+ params->acc_param.af.filter_config = afFilterConfigDefault;
+
+ /* Enable AF processing block */
+ params->use.acc_af = 1;
+}
+
+/**
+ * \brief AF coarse scan
+ * \param[in] context The shared IPA context
+ *
+ * Find a near focused image using a coarse step. The step is determined by
+ * kCoarseSearchStep.
+ */
+void Af::afCoarseScan(IPAContext &context)
+{
+ if (coarseCompleted_)
+ return;
+
+ if (afNeedIgnoreFrame())
+ return;
+
+ if (afScan(context, kCoarseSearchStep)) {
+ coarseCompleted_ = true;
+ context.activeState.af.maxVariance = 0;
+ focus_ = context.activeState.af.focus -
+ (context.activeState.af.focus * kFineRange);
+ context.activeState.af.focus = focus_;
+ previousVariance_ = 0;
+ maxStep_ = std::clamp(focus_ + static_cast<uint32_t>((focus_ * kFineRange)),
+ 0U, kMaxFocusSteps);
+ }
+}
+
+/**
+ * \brief AF fine scan
+ * \param[in] context The shared IPA context
+ *
+ * Find an optimum lens position with moving 1 step for each search.
+ */
+void Af::afFineScan(IPAContext &context)
+{
+ if (!coarseCompleted_)
+ return;
+
+ if (afNeedIgnoreFrame())
+ return;
+
+ if (afScan(context, kFineSearchStep)) {
+ context.activeState.af.stable = true;
+ fineCompleted_ = true;
+ }
+}
+
+/**
+ * \brief AF reset
+ * \param[in] context The shared IPA context
+ *
+ * Reset all the parameters to start over the AF process.
+ */
+void Af::afReset(IPAContext &context)
+{
+ if (afNeedIgnoreFrame())
+ return;
+
+ context.activeState.af.maxVariance = 0;
+ context.activeState.af.focus = 0;
+ focus_ = 0;
+ context.activeState.af.stable = false;
+ ignoreCounter_ = kIgnoreFrame;
+ previousVariance_ = 0.0;
+ coarseCompleted_ = false;
+ fineCompleted_ = false;
+ maxStep_ = kMaxFocusSteps;
+}
+
+/**
+ * \brief AF variance comparison
+ * \param[in] context The IPA context
+ * \param[in] min_step The VCM movement step
+ *
+ * We always pick the largest variance to replace the previous one. The image
+ * with a larger variance also indicates it is a clearer image than previous
+ * one. If we find a negative derivative, we return immediately.
+ *
+ * \return True, if it finds a AF value.
+ */
+bool Af::afScan(IPAContext &context, int min_step)
+{
+ if (focus_ > maxStep_) {
+ /* If reach the max step, move lens to the position. */
+ context.activeState.af.focus = bestFocus_;
+ return true;
+ } else {
+ /*
+ * Find the maximum of the variance by estimating its
+ * derivative. If the direction changes, it means we have
+ * passed a maximum one step before.
+ */
+ if ((currentVariance_ - context.activeState.af.maxVariance) >=
+ -(context.activeState.af.maxVariance * 0.1)) {
+ /*
+ * Positive and zero derivative:
+ * The variance is still increasing. The focus could be
+ * increased for the next comparison. Also, the max variance
+ * and previous focus value are updated.
+ */
+ bestFocus_ = focus_;
+ focus_ += min_step;
+ context.activeState.af.focus = focus_;
+ context.activeState.af.maxVariance = currentVariance_;
+ } else {
+ /*
+ * Negative derivative:
+ * The variance starts to decrease which means the maximum
+ * variance is found. Set focus step to previous good one
+ * then return immediately.
+ */
+ context.activeState.af.focus = bestFocus_;
+ return true;
+ }
+ }
+
+ previousVariance_ = currentVariance_;
+ LOG(IPU3Af, Debug) << " Previous step is "
+ << bestFocus_
+ << " Current step is "
+ << focus_;
+ return false;
+}
+
+/**
+ * \brief Determine the frame to be ignored
+ * \return Return True if the frame should be ignored, false otherwise
+ */
+bool Af::afNeedIgnoreFrame()
+{
+ if (ignoreCounter_ == 0)
+ return false;
+ else
+ ignoreCounter_--;
+ return true;
+}
+
+/**
+ * \brief Reset frame ignore counter
+ */
+void Af::afIgnoreFrameReset()
+{
+ ignoreCounter_ = kIgnoreFrame;
+}
+
+/**
+ * \brief Estimate variance
+ * \param[in] y_items The AF filter data set from the IPU3 statistics buffer
+ * \param[in] isY1 Selects between filter Y1 or Y2 to calculate the variance
+ *
+ * Calculate the mean of the data set provided by \a y_item, and then calculate
+ * the variance of that data set from the mean.
+ *
+ * The operation can work on one of two sets of values contained within the
+ * y_item data set supplied by the IPU3. The two data sets are the results of
+ * both the Y1 and Y2 filters which are used to support coarse (Y1) and fine
+ * (Y2) calculations of the contrast.
+ *
+ * \return The variance of the values in the data set \a y_item selected by \a isY1
+ */
+double Af::afEstimateVariance(Span<const y_table_item_t> y_items, bool isY1)
+{
+ uint32_t total = 0;
+ double mean;
+ double var_sum = 0;
+
+ for (auto y : y_items)
+ total += isY1 ? y.y1_avg : y.y2_avg;
+
+ mean = total / y_items.size();
+
+ for (auto y : y_items) {
+ double avg = isY1 ? y.y1_avg : y.y2_avg;
+ var_sum += pow(avg - mean, 2);
+ }
+
+ return var_sum / y_items.size();
+}
+
+/**
+ * \brief Determine out-of-focus situation
+ * \param[in] context The IPA context
+ *
+ * Out-of-focus means that the variance change rate for a focused and a new
+ * variance is greater than a threshold.
+ *
+ * \return True if the variance threshold is crossed indicating lost focus,
+ * false otherwise
+ */
+bool Af::afIsOutOfFocus(IPAContext &context)
+{
+ const uint32_t diff_var = std::abs(currentVariance_ -
+ context.activeState.af.maxVariance);
+ const double var_ratio = diff_var / context.activeState.af.maxVariance;
+
+ LOG(IPU3Af, Debug) << "Variance change rate: "
+ << var_ratio
+ << " Current VCM step: "
+ << context.activeState.af.focus;
+
+ if (var_ratio > kMaxChange)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * \brief Determine the max contrast image and lens position
+ * \param[in] context The IPA context
+ * \param[in] frame The frame context sequence number
+ * \param[in] frameContext The current frame context
+ * \param[in] stats The statistics buffer of IPU3
+ * \param[out] metadata Metadata for the frame, to be filled by the algorithm
+ *
+ * Ideally, a clear image also has a relatively higher contrast. So, every
+ * image for each focus step should be tested to find an optimal focus step.
+ *
+ * The Hill Climbing Algorithm[1] is used to find the maximum variance of the
+ * AF statistics which is the AF output of IPU3. The focus step is increased
+ * then the variance of the AF statistics are estimated. If it finds the
+ * negative derivative we have just passed the peak, and we infer that the best
+ * focus is found.
+ *
+ * [1] Hill Climbing Algorithm, https://en.wikipedia.org/wiki/Hill_climbing
+ */
+void Af::process(IPAContext &context, [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ const ipu3_uapi_stats_3a *stats,
+ [[maybe_unused]] ControlList &metadata)
+{
+ /* Evaluate the AF buffer length */
+ uint32_t afRawBufferLen = context.configuration.af.afGrid.width *
+ context.configuration.af.afGrid.height;
+
+ ASSERT(afRawBufferLen < IPU3_UAPI_AF_Y_TABLE_MAX_SIZE);
+
+ Span<const y_table_item_t> y_items(reinterpret_cast<const y_table_item_t *>(&stats->af_raw_buffer.y_table),
+ afRawBufferLen);
+
+ /*
+ * Calculate the mean and the variance of AF statistics for a given grid.
+ * For coarse: y1 are used.
+ * For fine: y2 results are used.
+ */
+ currentVariance_ = afEstimateVariance(y_items, !coarseCompleted_);
+
+ if (!context.activeState.af.stable) {
+ afCoarseScan(context);
+ afFineScan(context);
+ } else {
+ if (afIsOutOfFocus(context))
+ afReset(context);
+ else
+ afIgnoreFrameReset();
+ }
+}
+
+REGISTER_IPA_ALGORITHM(Af, "Af")
+
+} /* namespace ipa::ipu3::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/ipu3/algorithms/af.h b/src/ipa/ipu3/algorithms/af.h
new file mode 100644
index 00000000..68126d46
--- /dev/null
+++ b/src/ipa/ipu3/algorithms/af.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Red Hat
+ *
+ * IPU3 Af algorithm
+ */
+
+#pragma once
+
+#include <linux/intel-ipu3.h>
+
+#include <libcamera/base/utils.h>
+
+#include <libcamera/geometry.h>
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::ipu3::algorithms {
+
+class Af : public Algorithm
+{
+ /* The format of y_table. From ipu3-ipa repo */
+ typedef struct __attribute__((packed)) y_table_item {
+ uint16_t y1_avg;
+ uint16_t y2_avg;
+ } y_table_item_t;
+public:
+ Af();
+ ~Af() = default;
+
+ int configure(IPAContext &context, const IPAConfigInfo &configInfo) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ ipu3_uapi_params *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ipu3_uapi_stats_3a *stats,
+ ControlList &metadata) override;
+
+private:
+ void afCoarseScan(IPAContext &context);
+ void afFineScan(IPAContext &context);
+ bool afScan(IPAContext &context, int min_step);
+ void afReset(IPAContext &context);
+ bool afNeedIgnoreFrame();
+ void afIgnoreFrameReset();
+ double afEstimateVariance(Span<const y_table_item_t> y_items, bool isY1);
+
+ bool afIsOutOfFocus(IPAContext &context);
+
+ /* VCM step configuration. It is the current setting of the VCM step. */
+ uint32_t focus_;
+ /* The best VCM step. It is a local optimum VCM step during scanning. */
+ uint32_t bestFocus_;
+ /* Current AF statistic variance. */
+ double currentVariance_;
+ /* The frames are ignore before starting measuring. */
+ uint32_t ignoreCounter_;
+ /* It is used to determine the derivative during scanning */
+ double previousVariance_;
+ /* The designated maximum range of focus scanning. */
+ uint32_t maxStep_;
+ /* If the coarse scan completes, it is set to true. */
+ bool coarseCompleted_;
+ /* If the fine scan completes, it is set to true. */
+ bool fineCompleted_;
+};
+
+} /* namespace ipa::ipu3::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/ipu3/algorithms/agc.cpp b/src/ipa/ipu3/algorithms/agc.cpp
new file mode 100644
index 00000000..39d0aebb
--- /dev/null
+++ b/src/ipa/ipu3/algorithms/agc.cpp
@@ -0,0 +1,255 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Ideas On Board
+ *
+ * AGC/AEC mean-based control algorithm
+ */
+
+#include "agc.h"
+
+#include <algorithm>
+#include <chrono>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/control_ids.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+
+#include "libipa/colours.h"
+#include "libipa/histogram.h"
+
+/**
+ * \file agc.h
+ */
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+
+namespace ipa::ipu3::algorithms {
+
+/**
+ * \class Agc
+ * \brief A mean-based auto-exposure algorithm
+ *
+ * This algorithm calculates an exposure time and an analogue gain so that the
+ * average value of the green channel of the brightest 2% of pixels approaches
+ * 0.5. The AWB gains are not used here, and all cells in the grid have the same
+ * weight, like an average-metering case. In this metering mode, the camera uses
+ * light information from the entire scene and creates an average for the final
+ * exposure setting, giving no weighting to any particular portion of the
+ * metered area.
+ *
+ * Reference: Battiato, Messina & Castorina. (2008). Exposure
+ * Correction for Imaging Devices: An Overview. 10.1201/9781420054538.ch12.
+ */
+
+LOG_DEFINE_CATEGORY(IPU3Agc)
+
+/* Minimum limit for analogue gain value */
+static constexpr double kMinAnalogueGain = 1.0;
+
+/* \todo Honour the FrameDurationLimits control instead of hardcoding a limit */
+static constexpr utils::Duration kMaxExposureTime = 60ms;
+
+/* Histogram constants */
+static constexpr uint32_t knumHistogramBins = 256;
+
+Agc::Agc()
+ : minExposureTime_(0s), maxExposureTime_(0s)
+{
+}
+
+/**
+ * \brief Initialise the AGC algorithm from tuning files
+ * \param[in] context The shared IPA context
+ * \param[in] tuningData The YamlObject containing Agc tuning data
+ *
+ * This function calls the base class' tuningData parsers to discover which
+ * control values are supported.
+ *
+ * \return 0 on success or errors from the base class
+ */
+int Agc::init(IPAContext &context, const YamlObject &tuningData)
+{
+ int ret;
+
+ ret = parseTuningData(tuningData);
+ if (ret)
+ return ret;
+
+ context.ctrlMap.merge(controls());
+
+ return 0;
+}
+
+/**
+ * \brief Configure the AGC given a configInfo
+ * \param[in] context The shared IPA context
+ * \param[in] configInfo The IPA configuration data
+ *
+ * \return 0
+ */
+int Agc::configure(IPAContext &context,
+ [[maybe_unused]] const IPAConfigInfo &configInfo)
+{
+ const IPASessionConfiguration &configuration = context.configuration;
+ IPAActiveState &activeState = context.activeState;
+
+ stride_ = configuration.grid.stride;
+ bdsGrid_ = configuration.grid.bdsGrid;
+
+ minExposureTime_ = configuration.agc.minExposureTime;
+ maxExposureTime_ = std::min(configuration.agc.maxExposureTime,
+ kMaxExposureTime);
+
+ minAnalogueGain_ = std::max(configuration.agc.minAnalogueGain, kMinAnalogueGain);
+ maxAnalogueGain_ = configuration.agc.maxAnalogueGain;
+
+ /* Configure the default exposure and gain. */
+ activeState.agc.gain = minAnalogueGain_;
+ activeState.agc.exposure = 10ms / configuration.sensor.lineDuration;
+
+ context.activeState.agc.constraintMode = constraintModes().begin()->first;
+ context.activeState.agc.exposureMode = exposureModeHelpers().begin()->first;
+
+ /* \todo Run this again when FrameDurationLimits is passed in */
+ setLimits(minExposureTime_, maxExposureTime_, minAnalogueGain_,
+ maxAnalogueGain_);
+ resetFrameCount();
+
+ return 0;
+}
+
+Histogram Agc::parseStatistics(const ipu3_uapi_stats_3a *stats,
+ const ipu3_uapi_grid_config &grid)
+{
+ uint32_t hist[knumHistogramBins] = { 0 };
+
+ rgbTriples_.clear();
+
+ for (unsigned int cellY = 0; cellY < grid.height; cellY++) {
+ for (unsigned int cellX = 0; cellX < grid.width; cellX++) {
+ uint32_t cellPosition = cellY * stride_ + cellX;
+
+ const ipu3_uapi_awb_set_item *cell =
+ reinterpret_cast<const ipu3_uapi_awb_set_item *>(
+ &stats->awb_raw_buffer.meta_data[cellPosition]);
+
+ rgbTriples_.push_back({
+ cell->R_avg,
+ (cell->Gr_avg + cell->Gb_avg) / 2,
+ cell->B_avg
+ });
+
+ /*
+ * Store the average green value to estimate the
+ * brightness. Even the overexposed pixels are
+ * taken into account.
+ */
+ hist[(cell->Gr_avg + cell->Gb_avg) / 2]++;
+ }
+ }
+
+ return Histogram(Span<uint32_t>(hist));
+}
+
+/**
+ * \brief Estimate the relative luminance of the frame with a given gain
+ * \param[in] gain The gain to apply in estimating luminance
+ *
+ * The estimation is based on the AWB statistics for the current frame. Red,
+ * green and blue averages for all cells are first multiplied by the gain, and
+ * then saturated to approximate the sensor behaviour at high brightness
+ * values. The approximation is quite rough, as it doesn't take into account
+ * non-linearities when approaching saturation.
+ *
+ * The relative luminance (Y) is computed from the linear RGB components using
+ * the Rec. 601 formula. The values are normalized to the [0.0, 1.0] range,
+ * where 1.0 corresponds to a theoretical perfect reflector of 100% reference
+ * white.
+ *
+ * More detailed information can be found in:
+ * https://en.wikipedia.org/wiki/Relative_luminance
+ *
+ * \return The relative luminance of the frame
+ */
+double Agc::estimateLuminance(double gain) const
+{
+ RGB<double> sum{ 0.0 };
+
+ for (unsigned int i = 0; i < rgbTriples_.size(); i++) {
+ sum.r() += std::min(std::get<0>(rgbTriples_[i]) * gain, 255.0);
+ sum.g() += std::min(std::get<1>(rgbTriples_[i]) * gain, 255.0);
+ sum.b() += std::min(std::get<2>(rgbTriples_[i]) * gain, 255.0);
+ }
+
+ RGB<double> gains{{ rGain_, gGain_, bGain_ }};
+ double ySum = rec601LuminanceFromRGB(sum * gains);
+ return ySum / (bdsGrid_.height * bdsGrid_.width) / 255;
+}
+
+/**
+ * \brief Process IPU3 statistics, and run AGC operations
+ * \param[in] context The shared IPA context
+ * \param[in] frame The current frame sequence number
+ * \param[in] frameContext The current frame context
+ * \param[in] stats The IPU3 statistics and ISP results
+ * \param[out] metadata Metadata for the frame, to be filled by the algorithm
+ *
+ * Identify the current image brightness, and use that to estimate the optimal
+ * new exposure and gain for the scene.
+ */
+void Agc::process(IPAContext &context, [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ipu3_uapi_stats_3a *stats,
+ ControlList &metadata)
+{
+ Histogram hist = parseStatistics(stats, context.configuration.grid.bdsGrid);
+ rGain_ = context.activeState.awb.gains.red;
+ gGain_ = context.activeState.awb.gains.blue;
+ bGain_ = context.activeState.awb.gains.green;
+
+ /*
+ * The Agc algorithm needs to know the effective exposure value that was
+ * applied to the sensor when the statistics were collected.
+ */
+ utils::Duration exposureTime = context.configuration.sensor.lineDuration
+ * frameContext.sensor.exposure;
+ double analogueGain = frameContext.sensor.gain;
+ utils::Duration effectiveExposureValue = exposureTime * analogueGain;
+
+ utils::Duration newExposureTime;
+ double aGain, dGain;
+ std::tie(newExposureTime, aGain, dGain) =
+ calculateNewEv(context.activeState.agc.constraintMode,
+ context.activeState.agc.exposureMode, hist,
+ effectiveExposureValue);
+
+ LOG(IPU3Agc, Debug)
+ << "Divided up exposure time, analogue gain and digital gain are "
+ << newExposureTime << ", " << aGain << " and " << dGain;
+
+ IPAActiveState &activeState = context.activeState;
+ /* Update the estimated exposure time and gain. */
+ activeState.agc.exposure = newExposureTime / context.configuration.sensor.lineDuration;
+ activeState.agc.gain = aGain;
+
+ metadata.set(controls::AnalogueGain, frameContext.sensor.gain);
+ metadata.set(controls::ExposureTime, exposureTime.get<std::micro>());
+
+ /* \todo Use VBlank value calculated from each frame exposure. */
+ uint32_t vTotal = context.configuration.sensor.size.height
+ + context.configuration.sensor.defVBlank;
+ utils::Duration frameDuration = context.configuration.sensor.lineDuration
+ * vTotal;
+ metadata.set(controls::FrameDuration, frameDuration.get<std::micro>());
+}
+
+REGISTER_IPA_ALGORITHM(Agc, "Agc")
+
+} /* namespace ipa::ipu3::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/ipu3/algorithms/agc.h b/src/ipa/ipu3/algorithms/agc.h
new file mode 100644
index 00000000..890c271b
--- /dev/null
+++ b/src/ipa/ipu3/algorithms/agc.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Ideas On Board
+ *
+ * IPU3 AGC/AEC mean-based control algorithm
+ */
+
+#pragma once
+
+#include <linux/intel-ipu3.h>
+
+#include <libcamera/base/utils.h>
+
+#include <libcamera/geometry.h>
+
+#include "libipa/agc_mean_luminance.h"
+#include "libipa/histogram.h"
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+struct IPACameraSensorInfo;
+
+namespace ipa::ipu3::algorithms {
+
+class Agc : public Algorithm, public AgcMeanLuminance
+{
+public:
+ Agc();
+ ~Agc() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ int configure(IPAContext &context, const IPAConfigInfo &configInfo) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ipu3_uapi_stats_3a *stats,
+ ControlList &metadata) override;
+
+private:
+ double estimateLuminance(double gain) const override;
+ Histogram parseStatistics(const ipu3_uapi_stats_3a *stats,
+ const ipu3_uapi_grid_config &grid);
+
+ utils::Duration minExposureTime_;
+ utils::Duration maxExposureTime_;
+
+ double minAnalogueGain_;
+ double maxAnalogueGain_;
+
+ uint32_t stride_;
+ double rGain_;
+ double gGain_;
+ double bGain_;
+ ipu3_uapi_grid_config bdsGrid_;
+ std::vector<std::tuple<uint8_t, uint8_t, uint8_t>> rgbTriples_;
+};
+
+} /* namespace ipa::ipu3::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/ipu3/algorithms/algorithm.h b/src/ipa/ipu3/algorithms/algorithm.h
new file mode 100644
index 00000000..c7801f93
--- /dev/null
+++ b/src/ipa/ipu3/algorithms/algorithm.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Ideas On Board
+ *
+ * IPU3 control algorithm interface
+ */
+
+#pragma once
+
+#include <libipa/algorithm.h>
+
+#include "module.h"
+
+namespace libcamera {
+
+namespace ipa::ipu3 {
+
+using Algorithm = libcamera::ipa::Algorithm<Module>;
+
+} /* namespace ipa::ipu3 */
+
+} /* namespace libcamera */
diff --git a/src/ipa/ipu3/algorithms/awb.cpp b/src/ipa/ipu3/algorithms/awb.cpp
new file mode 100644
index 00000000..55de05d9
--- /dev/null
+++ b/src/ipa/ipu3/algorithms/awb.cpp
@@ -0,0 +1,480 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Ideas On Board
+ *
+ * AWB control algorithm
+ */
+#include "awb.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+
+#include "libipa/colours.h"
+
+/**
+ * \file awb.h
+ */
+
+namespace libcamera {
+
+namespace ipa::ipu3::algorithms {
+
+LOG_DEFINE_CATEGORY(IPU3Awb)
+
+/*
+ * When zones are used for the grey world algorithm, they are only considered if
+ * their average green value is at least 16/255 (after black level subtraction)
+ * to exclude zones that are too dark and don't provide relevant colour
+ * information (on the opposite side of the spectrum, saturated regions are
+ * excluded by the ImgU statistics engine).
+ */
+static constexpr uint32_t kMinGreenLevelInZone = 16;
+
+/*
+ * Minimum proportion of non-saturated cells in a zone for the zone to be used
+ * by the AWB algorithm.
+ */
+static constexpr double kMaxCellSaturationRatio = 0.8;
+
+/*
+ * Maximum ratio of saturated pixels in a cell for the cell to be considered
+ * non-saturated and counted by the AWB algorithm.
+ */
+static constexpr uint32_t kMinCellsPerZoneRatio = 255 * 90 / 100;
+
+/**
+ * \struct Accumulator
+ * \brief RGB statistics for a given zone
+ *
+ * Accumulate red, green and blue values for each non-saturated item over a
+ * zone. Items can for instance be pixels, but also the average of groups of
+ * pixels, depending on who uses the accumulator.
+ * \todo move this description and structure into a common header
+ *
+ * Zones which are saturated beyond the threshold defined in
+ * ipu3_uapi_awb_config_s are not included in the average.
+ *
+ * \var Accumulator::counted
+ * \brief Number of unsaturated cells used to calculate the sums
+ *
+ * \var Accumulator::sum
+ * \brief A structure containing the average red, green and blue sums
+ *
+ * \var Accumulator::sum.red
+ * \brief Sum of the average red values of each unsaturated cell in the zone
+ *
+ * \var Accumulator::sum.green
+ * \brief Sum of the average green values of each unsaturated cell in the zone
+ *
+ * \var Accumulator::sum.blue
+ * \brief Sum of the average blue values of each unsaturated cell in the zone
+ */
+
+/**
+ * \struct Awb::AwbStatus
+ * \brief AWB parameters calculated
+ *
+ * The AwbStatus structure is intended to store the AWB
+ * parameters calculated by the algorithm
+ *
+ * \var AwbStatus::temperatureK
+ * \brief Color temperature calculated
+ *
+ * \var AwbStatus::redGain
+ * \brief Gain calculated for the red channel
+ *
+ * \var AwbStatus::greenGain
+ * \brief Gain calculated for the green channel
+ *
+ * \var AwbStatus::blueGain
+ * \brief Gain calculated for the blue channel
+ */
+
+/* Default settings for Bayer noise reduction replicated from the Kernel */
+static const struct ipu3_uapi_bnr_static_config imguCssBnrDefaults = {
+ .wb_gains = { 16, 16, 16, 16 },
+ .wb_gains_thr = { 255, 255, 255, 255 },
+ .thr_coeffs = { 1700, 0, 31, 31, 0, 16 },
+ .thr_ctrl_shd = { 26, 26, 26, 26 },
+ .opt_center = { -648, 0, -366, 0 },
+ .lut = {
+ { 17, 23, 28, 32, 36, 39, 42, 45,
+ 48, 51, 53, 55, 58, 60, 62, 64,
+ 66, 68, 70, 72, 73, 75, 77, 78,
+ 80, 82, 83, 85, 86, 88, 89, 90 } },
+ .bp_ctrl = { 20, 0, 1, 40, 0, 6, 0, 6, 0 },
+ .dn_detect_ctrl = { 9, 3, 4, 0, 8, 0, 1, 1, 1, 1, 0 },
+ .column_size = 1296,
+ .opt_center_sqr = { 419904, 133956 },
+};
+
+/* Default color correction matrix defined as an identity matrix */
+static const struct ipu3_uapi_ccm_mat_config imguCssCcmDefault = {
+ 8191, 0, 0, 0,
+ 0, 8191, 0, 0,
+ 0, 0, 8191, 0
+};
+
+/**
+ * \class Awb
+ * \brief A Grey world white balance correction algorithm
+ *
+ * The Grey World algorithm assumes that the scene, in average, is neutral grey.
+ * Reference: Lam, Edmund & Fung, George. (2008). Automatic White Balancing in
+ * Digital Photography. 10.1201/9781420054538.ch10.
+ *
+ * The IPU3 generates statistics from the Bayer Down Scaler output into a grid
+ * defined in the ipu3_uapi_awb_config_s structure.
+ *
+ * - Cells are defined in Pixels
+ * - Zones are defined in Cells
+ *
+ * 80 cells
+ * /───────────── 1280 pixels ───────────\
+ * 16 zones
+ * 16
+ * ┌────┬────┬────┬────┬────┬─ ──────┬────┐ \
+ * │Cell│ │ │ │ │ | │ │ │
+ * 16 │ px │ │ │ │ │ | │ │ │
+ * ├────┼────┼────┼────┼────┼─ ──────┼────┤ │
+ * │ │ │ │ │ │ | │ │
+ * │ │ │ │ │ │ | │ │ 7
+ * │ ── │ ── │ ── │ ── │ ── │ ── ── ─┤ ── │ 1 2 4
+ * │ │ │ │ │ │ | │ │ 2 0 5
+ *
+ * │ │ │ │ │ │ | │ │ z p c
+ * ├────┼────┼────┼────┼────┼─ ──────┼────┤ o i e
+ * │ │ │ │ │ │ | │ │ n x l
+ * │ │ | │ │ e e l
+ * ├─── ───┼─ ──────┼────┤ s l s
+ * │ │ | │ │ s
+ * │ │ | │ │
+ * ├─── Zone of Cells ───┼─ ──────┼────┤ │
+ * │ (5 x 4) │ | │ │ │
+ * │ │ | │ │ │
+ * ├── ───┼─ ──────┼────┤ │
+ * │ │ │ | │ │ │
+ * │ │ │ │ │ │ | │ │ │
+ * └────┴────┴────┴────┴────┴─ ──────┴────┘ /
+ *
+ *
+ * In each cell, the ImgU computes for each colour component the average of all
+ * unsaturated pixels (below a programmable threshold). It also provides the
+ * ratio of saturated pixels in the cell.
+ *
+ * The AWB algorithm operates on a coarser grid, made by grouping cells from the
+ * hardware grid into zones. The number of zones is fixed to \a kAwbStatsSizeX x
+ * \a kAwbStatsSizeY. For example, a frame of 1280x720 is divided into 80x45
+ * cells of [16x16] pixels and 16x12 zones of [5x4] cells each
+ * (\a kAwbStatsSizeX=16 and \a kAwbStatsSizeY=12). If the number of cells isn't
+ * an exact multiple of the number of zones, the right-most and bottom-most
+ * cells are ignored. The grid configuration is computed by
+ * IPAIPU3::calculateBdsGrid().
+ *
+ * Before calculating the gains, the algorithm aggregates the cell averages for
+ * each zone in generateAwbStats(). Cells that have a too high ratio of
+ * saturated pixels are ignored, and only zones that contain enough
+ * non-saturated cells are then used by the algorithm.
+ *
+ * The Grey World algorithm will then estimate the red and blue gains to apply, and
+ * store the results in the metadata. The green gain is always set to 1.
+ */
+
+Awb::Awb()
+ : Algorithm()
+{
+ asyncResults_.blueGain = 1.0;
+ asyncResults_.greenGain = 1.0;
+ asyncResults_.redGain = 1.0;
+ asyncResults_.temperatureK = 4500;
+
+ zones_.reserve(kAwbStatsSizeX * kAwbStatsSizeY);
+}
+
+Awb::~Awb() = default;
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::configure
+ */
+int Awb::configure(IPAContext &context,
+ [[maybe_unused]] const IPAConfigInfo &configInfo)
+{
+ const ipu3_uapi_grid_config &grid = context.configuration.grid.bdsGrid;
+ stride_ = context.configuration.grid.stride;
+
+ cellsPerZoneX_ = std::round(grid.width / static_cast<double>(kAwbStatsSizeX));
+ cellsPerZoneY_ = std::round(grid.height / static_cast<double>(kAwbStatsSizeY));
+
+ /*
+ * Configure the minimum proportion of cells counted within a zone
+ * for it to be relevant for the grey world algorithm.
+ * \todo This proportion could be configured.
+ */
+ cellsPerZoneThreshold_ = cellsPerZoneX_ * cellsPerZoneY_ * kMaxCellSaturationRatio;
+ LOG(IPU3Awb, Debug) << "Threshold for AWB is set to " << cellsPerZoneThreshold_;
+
+ return 0;
+}
+
+constexpr uint16_t Awb::threshold(float value)
+{
+ /* AWB thresholds are in the range [0, 8191] */
+ return value * 8191;
+}
+
+constexpr uint16_t Awb::gainValue(double gain)
+{
+ /*
+ * The colour gains applied by the BNR for the four channels (Gr, R, B
+ * and Gb) are expressed in the parameters structure as 16-bit integers
+ * that store a fixed-point U3.13 value in the range [0, 8[.
+ *
+ * The real gain value is equal to the gain parameter plus one, i.e.
+ *
+ * Pout = Pin * (1 + gain / 8192)
+ *
+ * where 'Pin' is the input pixel value, 'Pout' the output pixel value,
+ * and 'gain' the gain in the parameters structure as a 16-bit integer.
+ */
+ return std::clamp((gain - 1.0) * 8192, 0.0, 65535.0);
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void Awb::prepare(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ ipu3_uapi_params *params)
+{
+ /*
+ * Green saturation thresholds are reduced because we are using the
+ * green channel only in the exposure computation.
+ */
+ params->acc_param.awb.config.rgbs_thr_r = threshold(1.0);
+ params->acc_param.awb.config.rgbs_thr_gr = threshold(0.9);
+ params->acc_param.awb.config.rgbs_thr_gb = threshold(0.9);
+ params->acc_param.awb.config.rgbs_thr_b = threshold(1.0);
+
+ /*
+ * Enable saturation inclusion on thr_b for ImgU to update the
+ * ipu3_uapi_awb_set_item->sat_ratio field.
+ */
+ params->acc_param.awb.config.rgbs_thr_b |= IPU3_UAPI_AWB_RGBS_THR_B_INCL_SAT |
+ IPU3_UAPI_AWB_RGBS_THR_B_EN;
+
+ const ipu3_uapi_grid_config &grid = context.configuration.grid.bdsGrid;
+
+ params->acc_param.awb.config.grid = context.configuration.grid.bdsGrid;
+
+ /*
+ * Optical center is column start (respectively row start) of the
+ * cell of interest minus its X center (respectively Y center).
+ *
+ * For the moment use BDS as a first approximation, but it should
+ * be calculated based on Shading (SHD) parameters.
+ */
+ params->acc_param.bnr = imguCssBnrDefaults;
+ Size &bdsOutputSize = context.configuration.grid.bdsOutputSize;
+ params->acc_param.bnr.column_size = bdsOutputSize.width;
+ params->acc_param.bnr.opt_center.x_reset = grid.x_start - (bdsOutputSize.width / 2);
+ params->acc_param.bnr.opt_center.y_reset = grid.y_start - (bdsOutputSize.height / 2);
+ params->acc_param.bnr.opt_center_sqr.x_sqr_reset = params->acc_param.bnr.opt_center.x_reset
+ * params->acc_param.bnr.opt_center.x_reset;
+ params->acc_param.bnr.opt_center_sqr.y_sqr_reset = params->acc_param.bnr.opt_center.y_reset
+ * params->acc_param.bnr.opt_center.y_reset;
+
+ params->acc_param.bnr.wb_gains.gr = gainValue(context.activeState.awb.gains.green);
+ params->acc_param.bnr.wb_gains.r = gainValue(context.activeState.awb.gains.red);
+ params->acc_param.bnr.wb_gains.b = gainValue(context.activeState.awb.gains.blue);
+ params->acc_param.bnr.wb_gains.gb = gainValue(context.activeState.awb.gains.green);
+
+ LOG(IPU3Awb, Debug) << "Color temperature estimated: " << asyncResults_.temperatureK;
+
+ /* The CCM matrix may change when color temperature will be used */
+ params->acc_param.ccm = imguCssCcmDefault;
+
+ params->use.acc_awb = 1;
+ params->use.acc_bnr = 1;
+ params->use.acc_ccm = 1;
+}
+
+/* Generate an RGB vector with the average values for each zone */
+void Awb::generateZones()
+{
+ zones_.clear();
+
+ for (unsigned int i = 0; i < kAwbStatsSizeX * kAwbStatsSizeY; i++) {
+ double counted = awbStats_[i].counted;
+ if (counted >= cellsPerZoneThreshold_) {
+ RGB<double> zone{{
+ static_cast<double>(awbStats_[i].sum.red),
+ static_cast<double>(awbStats_[i].sum.green),
+ static_cast<double>(awbStats_[i].sum.blue)
+ }};
+
+ zone /= counted;
+
+ if (zone.g() >= kMinGreenLevelInZone)
+ zones_.push_back(zone);
+ }
+ }
+}
+
+/* Translate the IPU3 statistics into the default statistics zone array */
+void Awb::generateAwbStats(const ipu3_uapi_stats_3a *stats)
+{
+ /*
+ * Generate a (kAwbStatsSizeX x kAwbStatsSizeY) array from the IPU3 grid which is
+ * (grid.width x grid.height).
+ */
+ for (unsigned int cellY = 0; cellY < kAwbStatsSizeY * cellsPerZoneY_; cellY++) {
+ for (unsigned int cellX = 0; cellX < kAwbStatsSizeX * cellsPerZoneX_; cellX++) {
+ uint32_t cellPosition = cellY * stride_ + cellX;
+ uint32_t zoneX = cellX / cellsPerZoneX_;
+ uint32_t zoneY = cellY / cellsPerZoneY_;
+
+ uint32_t awbZonePosition = zoneY * kAwbStatsSizeX + zoneX;
+
+ /* Cast the initial IPU3 structure to simplify the reading */
+ const ipu3_uapi_awb_set_item *currentCell =
+ reinterpret_cast<const ipu3_uapi_awb_set_item *>(
+ &stats->awb_raw_buffer.meta_data[cellPosition]
+ );
+
+ /*
+ * Use cells which have less than 90%
+ * saturation as an initial means to include
+ * otherwise bright cells which are not fully
+ * saturated.
+ *
+ * \todo The 90% saturation rate may require
+ * further empirical measurements and
+ * optimisation during camera tuning phases.
+ */
+ if (currentCell->sat_ratio <= kMinCellsPerZoneRatio) {
+ /* The cell is not saturated, use the current cell */
+ awbStats_[awbZonePosition].counted++;
+ uint32_t greenValue = currentCell->Gr_avg + currentCell->Gb_avg;
+ awbStats_[awbZonePosition].sum.green += greenValue / 2;
+ awbStats_[awbZonePosition].sum.red += currentCell->R_avg;
+ awbStats_[awbZonePosition].sum.blue += currentCell->B_avg;
+ }
+ }
+ }
+}
+
+void Awb::clearAwbStats()
+{
+ for (unsigned int i = 0; i < kAwbStatsSizeX * kAwbStatsSizeY; i++) {
+ awbStats_[i].sum.blue = 0;
+ awbStats_[i].sum.red = 0;
+ awbStats_[i].sum.green = 0;
+ awbStats_[i].counted = 0;
+ }
+}
+
+void Awb::awbGreyWorld()
+{
+ LOG(IPU3Awb, Debug) << "Grey world AWB";
+ /*
+ * Make a separate list of the derivatives for each of red and blue, so
+ * that we can sort them to exclude the extreme gains. We could
+ * consider some variations, such as normalising all the zones first, or
+ * doing an L2 average etc.
+ */
+ std::vector<RGB<double>> &redDerivative(zones_);
+ std::vector<RGB<double>> blueDerivative(redDerivative);
+ std::sort(redDerivative.begin(), redDerivative.end(),
+ [](RGB<double> const &a, RGB<double> const &b) {
+ return a.g() * b.r() < b.g() * a.r();
+ });
+ std::sort(blueDerivative.begin(), blueDerivative.end(),
+ [](RGB<double> const &a, RGB<double> const &b) {
+ return a.g() * b.b() < b.g() * a.b();
+ });
+
+ /* Average the middle half of the values. */
+ int discard = redDerivative.size() / 4;
+
+ RGB<double> sumRed{ 0.0 };
+ RGB<double> sumBlue{ 0.0 };
+ for (auto ri = redDerivative.begin() + discard,
+ bi = blueDerivative.begin() + discard;
+ ri != redDerivative.end() - discard; ri++, bi++)
+ sumRed += *ri, sumBlue += *bi;
+
+ double redGain = sumRed.g() / (sumRed.r() + 1),
+ blueGain = sumBlue.g() / (sumBlue.b() + 1);
+
+ /* Color temperature is not relevant in Grey world but still useful to estimate it :-) */
+ asyncResults_.temperatureK = estimateCCT({{ sumRed.r(), sumRed.g(), sumBlue.b() }});
+
+ /*
+ * Gain values are unsigned integer value ranging [0, 8) with 13 bit
+ * fractional part.
+ */
+ redGain = std::clamp(redGain, 0.0, 65535.0 / 8192);
+ blueGain = std::clamp(blueGain, 0.0, 65535.0 / 8192);
+
+ asyncResults_.redGain = redGain;
+ /* Hardcode the green gain to 1.0. */
+ asyncResults_.greenGain = 1.0;
+ asyncResults_.blueGain = blueGain;
+}
+
+void Awb::calculateWBGains(const ipu3_uapi_stats_3a *stats)
+{
+ ASSERT(stats->stats_3a_status.awb_en);
+
+ clearAwbStats();
+ generateAwbStats(stats);
+ generateZones();
+
+ LOG(IPU3Awb, Debug) << "Valid zones: " << zones_.size();
+
+ if (zones_.size() > 10) {
+ awbGreyWorld();
+ LOG(IPU3Awb, Debug) << "Gain found for red: " << asyncResults_.redGain
+ << " and for blue: " << asyncResults_.blueGain;
+ }
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::process
+ */
+void Awb::process(IPAContext &context, [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ const ipu3_uapi_stats_3a *stats,
+ [[maybe_unused]] ControlList &metadata)
+{
+ calculateWBGains(stats);
+
+ /*
+ * Gains are only recalculated if enough zones were detected.
+ * The results are cached, so if no results were calculated, we set the
+ * cached values from asyncResults_ here.
+ */
+ context.activeState.awb.gains.blue = asyncResults_.blueGain;
+ context.activeState.awb.gains.green = asyncResults_.greenGain;
+ context.activeState.awb.gains.red = asyncResults_.redGain;
+ context.activeState.awb.temperatureK = asyncResults_.temperatureK;
+
+ metadata.set(controls::AwbEnable, true);
+ metadata.set(controls::ColourGains, {
+ static_cast<float>(context.activeState.awb.gains.red),
+ static_cast<float>(context.activeState.awb.gains.blue)
+ });
+ metadata.set(controls::ColourTemperature,
+ context.activeState.awb.temperatureK);
+}
+
+REGISTER_IPA_ALGORITHM(Awb, "Awb")
+
+} /* namespace ipa::ipu3::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/ipu3/algorithms/awb.h b/src/ipa/ipu3/algorithms/awb.h
new file mode 100644
index 00000000..1916990a
--- /dev/null
+++ b/src/ipa/ipu3/algorithms/awb.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Ideas On Board
+ *
+ * IPU3 AWB control algorithm
+ */
+
+#pragma once
+
+#include <vector>
+
+#include <linux/intel-ipu3.h>
+
+#include <libcamera/geometry.h>
+
+#include "libipa/vector.h"
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::ipu3::algorithms {
+
+/* Region size for the statistics generation algorithm */
+static constexpr uint32_t kAwbStatsSizeX = 16;
+static constexpr uint32_t kAwbStatsSizeY = 12;
+
+struct Accumulator {
+ unsigned int counted;
+ struct {
+ uint64_t red;
+ uint64_t green;
+ uint64_t blue;
+ } sum;
+};
+
+class Awb : public Algorithm
+{
+public:
+ Awb();
+ ~Awb();
+
+ int configure(IPAContext &context, const IPAConfigInfo &configInfo) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ ipu3_uapi_params *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ipu3_uapi_stats_3a *stats,
+ ControlList &metadata) override;
+
+private:
+ struct AwbStatus {
+ double temperatureK;
+ double redGain;
+ double greenGain;
+ double blueGain;
+ };
+
+private:
+ void calculateWBGains(const ipu3_uapi_stats_3a *stats);
+ void generateZones();
+ void generateAwbStats(const ipu3_uapi_stats_3a *stats);
+ void clearAwbStats();
+ void awbGreyWorld();
+ static constexpr uint16_t threshold(float value);
+ static constexpr uint16_t gainValue(double gain);
+
+ std::vector<RGB<double>> zones_;
+ Accumulator awbStats_[kAwbStatsSizeX * kAwbStatsSizeY];
+ AwbStatus asyncResults_;
+
+ uint32_t stride_;
+ uint32_t cellsPerZoneX_;
+ uint32_t cellsPerZoneY_;
+ uint32_t cellsPerZoneThreshold_;
+};
+
+} /* namespace ipa::ipu3::algorithms */
+
+} /* namespace libcamera*/
diff --git a/src/ipa/ipu3/algorithms/blc.cpp b/src/ipa/ipu3/algorithms/blc.cpp
new file mode 100644
index 00000000..35748fb2
--- /dev/null
+++ b/src/ipa/ipu3/algorithms/blc.cpp
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google inc.
+ *
+ * IPU3 Black Level Correction control
+ */
+
+#include "blc.h"
+
+/**
+ * \file blc.h
+ * \brief IPU3 Black Level Correction control
+ */
+
+namespace libcamera {
+
+namespace ipa::ipu3::algorithms {
+
+/**
+ * \class BlackLevelCorrection
+ * \brief A class to handle black level correction
+ *
+ * The pixels output by the camera normally include a black level, because
+ * sensors do not always report a signal level of '0' for black. Pixels at or
+ * below this level should be considered black. To achieve that, the ImgU BLC
+ * algorithm subtracts a configurable offset from all pixels.
+ *
+ * The black level can be measured at runtime from an optical dark region of the
+ * camera sensor, or measured during the camera tuning process. The first option
+ * isn't currently supported.
+ */
+
+BlackLevelCorrection::BlackLevelCorrection()
+{
+}
+
+/**
+ * \brief Fill in the parameter structure, and enable black level correction
+ * \param[in] context The shared IPA context
+ * \param[in] frame The frame context sequence number
+ * \param[in] frameContext The FrameContext for this frame
+ * \param[out] params The IPU3 parameters
+ *
+ * Populate the IPU3 parameter structure with the correction values for each
+ * channel and enable the corresponding ImgU block processing.
+ */
+void BlackLevelCorrection::prepare([[maybe_unused]] IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ ipu3_uapi_params *params)
+{
+ /*
+ * The Optical Black Level correction values
+ * \todo The correction values should come from sensor specific
+ * tuning processes. This is a first rough approximation.
+ */
+ params->obgrid_param.gr = 64;
+ params->obgrid_param.r = 64;
+ params->obgrid_param.b = 64;
+ params->obgrid_param.gb = 64;
+
+ /* Enable the custom black level correction processing */
+ params->use.obgrid = 1;
+ params->use.obgrid_param = 1;
+}
+
+REGISTER_IPA_ALGORITHM(BlackLevelCorrection, "BlackLevelCorrection")
+
+} /* namespace ipa::ipu3::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/ipu3/algorithms/blc.h b/src/ipa/ipu3/algorithms/blc.h
new file mode 100644
index 00000000..62748045
--- /dev/null
+++ b/src/ipa/ipu3/algorithms/blc.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google inc.
+ *
+ * IPU3 Black Level Correction control
+ */
+
+#pragma once
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::ipu3::algorithms {
+
+class BlackLevelCorrection : public Algorithm
+{
+public:
+ BlackLevelCorrection();
+
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ ipu3_uapi_params *params) override;
+};
+
+} /* namespace ipa::ipu3::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/ipu3/algorithms/meson.build b/src/ipa/ipu3/algorithms/meson.build
new file mode 100644
index 00000000..b70a551c
--- /dev/null
+++ b/src/ipa/ipu3/algorithms/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: CC0-1.0
+
+ipu3_ipa_algorithms = files([
+ 'af.cpp',
+ 'agc.cpp',
+ 'awb.cpp',
+ 'blc.cpp',
+ 'tone_mapping.cpp',
+])
diff --git a/src/ipa/ipu3/algorithms/tone_mapping.cpp b/src/ipa/ipu3/algorithms/tone_mapping.cpp
new file mode 100644
index 00000000..160338c1
--- /dev/null
+++ b/src/ipa/ipu3/algorithms/tone_mapping.cpp
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google inc.
+ *
+ * IPU3 ToneMapping and Gamma control
+ */
+
+#include "tone_mapping.h"
+
+#include <cmath>
+#include <string.h>
+
+/**
+ * \file tone_mapping.h
+ */
+
+namespace libcamera {
+
+namespace ipa::ipu3::algorithms {
+
+/**
+ * \class ToneMapping
+ * \brief A class to handle tone mapping based on gamma
+ *
+ * This algorithm improves the image dynamic using a look-up table which is
+ * generated based on a gamma parameter.
+ */
+
+ToneMapping::ToneMapping()
+ : gamma_(1.0)
+{
+}
+
+/**
+ * \brief Configure the tone mapping given a configInfo
+ * \param[in] context The shared IPA context
+ * \param[in] configInfo The IPA configuration data
+ *
+ * \return 0
+ */
+int ToneMapping::configure(IPAContext &context,
+ [[maybe_unused]] const IPAConfigInfo &configInfo)
+{
+ /* Initialise tone mapping gamma value. */
+ context.activeState.toneMapping.gamma = 0.0;
+
+ return 0;
+}
+
+/**
+ * \brief Fill in the parameter structure, and enable gamma control
+ * \param[in] context The shared IPA context
+ * \param[in] frame The frame context sequence number
+ * \param[in] frameContext The FrameContext for this frame
+ * \param[out] params The IPU3 parameters
+ *
+ * Populate the IPU3 parameter structure with our tone mapping look up table and
+ * enable the gamma control module in the processing blocks.
+ */
+void ToneMapping::prepare([[maybe_unused]] IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ ipu3_uapi_params *params)
+{
+ /* Copy the calculated LUT into the parameters buffer. */
+ memcpy(params->acc_param.gamma.gc_lut.lut,
+ context.activeState.toneMapping.gammaCorrection.lut,
+ IPU3_UAPI_GAMMA_CORR_LUT_ENTRIES *
+ sizeof(params->acc_param.gamma.gc_lut.lut[0]));
+
+ /* Enable the custom gamma table. */
+ params->use.acc_gamma = 1;
+ params->acc_param.gamma.gc_ctrl.enable = 1;
+}
+
+/**
+ * \brief Calculate the tone mapping look up table
+ * \param[in] context The shared IPA context
+ * \param[in] frame The current frame sequence number
+ * \param[in] frameContext The current frame context
+ * \param[in] stats The IPU3 statistics and ISP results
+ * \param[out] metadata Metadata for the frame, to be filled by the algorithm
+ *
+ * The tone mapping look up table is generated as an inverse power curve from
+ * our gamma setting.
+ */
+void ToneMapping::process(IPAContext &context, [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ [[maybe_unused]] const ipu3_uapi_stats_3a *stats,
+ [[maybe_unused]] ControlList &metadata)
+{
+ /*
+ * Hardcode gamma to 1.1 as a default for now.
+ *
+ * \todo Expose gamma control setting through the libcamera control API
+ */
+ gamma_ = 1.1;
+
+ if (context.activeState.toneMapping.gamma == gamma_)
+ return;
+
+ struct ipu3_uapi_gamma_corr_lut &lut =
+ context.activeState.toneMapping.gammaCorrection;
+
+ for (uint32_t i = 0; i < std::size(lut.lut); i++) {
+ double j = static_cast<double>(i) / (std::size(lut.lut) - 1);
+ double gamma = std::pow(j, 1.0 / gamma_);
+
+ /* The output value is expressed on 13 bits. */
+ lut.lut[i] = gamma * 8191;
+ }
+
+ context.activeState.toneMapping.gamma = gamma_;
+}
+
+REGISTER_IPA_ALGORITHM(ToneMapping, "ToneMapping")
+
+} /* namespace ipa::ipu3::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/ipu3/algorithms/tone_mapping.h b/src/ipa/ipu3/algorithms/tone_mapping.h
new file mode 100644
index 00000000..b2b38010
--- /dev/null
+++ b/src/ipa/ipu3/algorithms/tone_mapping.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google inc.
+ *
+ * IPU3 ToneMapping and Gamma control
+ */
+
+#pragma once
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::ipu3::algorithms {
+
+class ToneMapping : public Algorithm
+{
+public:
+ ToneMapping();
+
+ int configure(IPAContext &context, const IPAConfigInfo &configInfo) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext, ipu3_uapi_params *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ipu3_uapi_stats_3a *stats,
+ ControlList &metadata) override;
+
+private:
+ double gamma_;
+};
+
+} /* namespace ipa::ipu3::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/ipu3/data/meson.build b/src/ipa/ipu3/data/meson.build
new file mode 100644
index 00000000..0f7cd5c6
--- /dev/null
+++ b/src/ipa/ipu3/data/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: CC0-1.0
+
+conf_files = files([
+ 'uncalibrated.yaml',
+])
+
+install_data(conf_files,
+ install_dir : ipa_data_dir / 'ipu3',
+ install_tag : 'runtime')
diff --git a/src/ipa/ipu3/data/uncalibrated.yaml b/src/ipa/ipu3/data/uncalibrated.yaml
new file mode 100644
index 00000000..794ab3ed
--- /dev/null
+++ b/src/ipa/ipu3/data/uncalibrated.yaml
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Af:
+ - Agc:
+ - Awb:
+ - BlackLevelCorrection:
+ - ToneMapping:
+...
diff --git a/src/ipa/ipu3/ipa_context.cpp b/src/ipa/ipu3/ipa_context.cpp
new file mode 100644
index 00000000..3b22f791
--- /dev/null
+++ b/src/ipa/ipu3/ipa_context.cpp
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * IPU3 IPA Context
+ */
+
+#include "ipa_context.h"
+
+/**
+ * \file ipa_context.h
+ * \brief Context and state information shared between the algorithms
+ */
+
+namespace libcamera::ipa::ipu3 {
+
+/**
+ * \struct IPASessionConfiguration
+ * \brief Session configuration for the IPA module
+ *
+ * The session configuration contains all IPA configuration parameters that
+ * remain constant during the capture session, from IPA module start to stop.
+ * It is typically set during the configure() operation of the IPA module, but
+ * may also be updated in the start() operation.
+ */
+
+/**
+ * \struct IPAActiveState
+ * \brief The active state of the IPA algorithms
+ *
+ * The IPA is fed with the statistics generated from the latest frame captured
+ * by the hardware. The statistics are then processed by the IPA algorithms to
+ * compute ISP parameters required for the next frame capture. The current state
+ * of the algorithms is reflected through the IPAActiveState to store the values
+ * most recently computed by the IPA algorithms.
+ */
+
+/**
+ * \struct IPAContext
+ * \brief Global IPA context data shared between all algorithms
+ *
+ * \fn IPAContext::IPAContext
+ * \brief Initialize the instance with the given number of frame contexts
+ * \param[in] frameContextSize Size of the frame context ring buffer
+ *
+ * \var IPAContext::configuration
+ * \brief The IPA session configuration, immutable during the session
+ *
+ * \var IPAContext::frameContexts
+ * \brief Ring buffer of the IPAFrameContext(s)
+ *
+ * \var IPAContext::activeState
+ * \brief The current state of IPA algorithms
+ *
+ * \var IPAContext::ctrlMap
+ * \brief A ControlInfoMap::Map of controls populated by the algorithms
+ */
+
+/**
+ * \var IPASessionConfiguration::grid
+ * \brief Grid configuration of the IPA
+ *
+ * \var IPASessionConfiguration::grid.bdsGrid
+ * \brief Bayer Down Scaler grid plane config used by the kernel
+ *
+ * \var IPASessionConfiguration::grid.bdsOutputSize
+ * \brief BDS output size configured by the pipeline handler
+ *
+ * \var IPASessionConfiguration::grid.stride
+ * \brief Number of cells on one line including the ImgU padding
+ */
+
+/**
+ * \var IPASessionConfiguration::af
+ * \brief AF grid configuration of the IPA
+ *
+ * \var IPASessionConfiguration::af.afGrid
+ * \brief AF scene grid configuration
+ */
+
+/**
+ * \var IPAActiveState::af
+ * \brief Context for the Automatic Focus algorithm
+ *
+ * \var IPAActiveState::af.focus
+ * \brief Current position of the lens
+ *
+ * \var IPAActiveState::af.maxVariance
+ * \brief The maximum variance of the current image
+ *
+ * \var IPAActiveState::af.stable
+ * \brief It is set to true, if the best focus is found
+ */
+
+/**
+ * \var IPASessionConfiguration::agc
+ * \brief AGC parameters configuration of the IPA
+ *
+ * \var IPASessionConfiguration::agc.minExposureTime
+ * \brief Minimum exposure time supported with the configured sensor
+ *
+ * \var IPASessionConfiguration::agc.maxExposureTime
+ * \brief Maximum exposure time supported with the configured sensor
+ *
+ * \var IPASessionConfiguration::agc.minAnalogueGain
+ * \brief Minimum analogue gain supported with the configured sensor
+ *
+ * \var IPASessionConfiguration::agc.maxAnalogueGain
+ * \brief Maximum analogue gain supported with the configured sensor
+ */
+
+/**
+ * \var IPASessionConfiguration::sensor
+ * \brief Sensor-specific configuration of the IPA
+ *
+ * \var IPASessionConfiguration::sensor.lineDuration
+ * \brief Line duration in microseconds
+ *
+ * \var IPASessionConfiguration::sensor.defVBlank
+ * \brief The default vblank value of the sensor
+ *
+ * \var IPASessionConfiguration::sensor.size
+ * \brief Sensor output resolution
+ */
+
+/**
+ * \var IPAActiveState::agc
+ * \brief Context for the Automatic Gain Control algorithm
+ *
+ * The exposure and gain determined are expected to be applied to the sensor
+ * at the earliest opportunity.
+ *
+ * \var IPAActiveState::agc.exposure
+ * \brief Exposure time expressed as a number of lines
+ *
+ * \var IPAActiveState::agc.gain
+ * \brief Analogue gain multiplier
+ *
+ * The gain should be adapted to the sensor specific gain code before applying.
+ */
+
+/**
+ * \var IPAActiveState::awb
+ * \brief Context for the Automatic White Balance algorithm
+ *
+ * \var IPAActiveState::awb.gains
+ * \brief White balance gains
+ *
+ * \var IPAActiveState::awb.gains.red
+ * \brief White balance gain for R channel
+ *
+ * \var IPAActiveState::awb.gains.green
+ * \brief White balance gain for G channel
+ *
+ * \var IPAActiveState::awb.gains.blue
+ * \brief White balance gain for B channel
+ *
+ * \var IPAActiveState::awb.temperatureK
+ * \brief Estimated color temperature
+ */
+
+/**
+ * \var IPAActiveState::toneMapping
+ * \brief Context for ToneMapping and Gamma control
+ *
+ * \var IPAActiveState::toneMapping.gamma
+ * \brief Gamma value for the LUT
+ *
+ * \var IPAActiveState::toneMapping.gammaCorrection
+ * \brief Per-pixel tone mapping implemented as a LUT
+ *
+ * The LUT structure is defined by the IPU3 kernel interface. See
+ * <linux/intel-ipu3.h> struct ipu3_uapi_gamma_corr_lut for further details.
+ */
+
+/**
+ * \struct IPAFrameContext
+ * \brief IPU3-specific FrameContext
+ *
+ * \var IPAFrameContext::sensor
+ * \brief Effective sensor values that were applied for the frame
+ *
+ * \var IPAFrameContext::sensor.exposure
+ * \brief Exposure time expressed as a number of lines
+ *
+ * \var IPAFrameContext::sensor.gain
+ * \brief Analogue gain multiplier
+ */
+
+} /* namespace libcamera::ipa::ipu3 */
diff --git a/src/ipa/ipu3/ipa_context.h b/src/ipa/ipu3/ipa_context.h
new file mode 100644
index 00000000..97fcf06c
--- /dev/null
+++ b/src/ipa/ipu3/ipa_context.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * IPU3 IPA Context
+ *
+ */
+
+#pragma once
+
+#include <linux/intel-ipu3.h>
+
+#include <libcamera/base/utils.h>
+
+#include <libcamera/controls.h>
+#include <libcamera/geometry.h>
+
+#include <libipa/fc_queue.h>
+
+namespace libcamera {
+
+namespace ipa::ipu3 {
+
+struct IPASessionConfiguration {
+ struct {
+ ipu3_uapi_grid_config bdsGrid;
+ Size bdsOutputSize;
+ uint32_t stride;
+ } grid;
+
+ struct {
+ ipu3_uapi_grid_config afGrid;
+ } af;
+
+ struct {
+ utils::Duration minExposureTime;
+ utils::Duration maxExposureTime;
+ double minAnalogueGain;
+ double maxAnalogueGain;
+ } agc;
+
+ struct {
+ int32_t defVBlank;
+ utils::Duration lineDuration;
+ Size size;
+ } sensor;
+};
+
+struct IPAActiveState {
+ struct {
+ uint32_t focus;
+ double maxVariance;
+ bool stable;
+ } af;
+
+ struct {
+ uint32_t exposure;
+ double gain;
+ uint32_t constraintMode;
+ uint32_t exposureMode;
+ } agc;
+
+ struct {
+ struct {
+ double red;
+ double green;
+ double blue;
+ } gains;
+
+ double temperatureK;
+ } awb;
+
+ struct {
+ double gamma;
+ struct ipu3_uapi_gamma_corr_lut gammaCorrection;
+ } toneMapping;
+};
+
+struct IPAFrameContext : public FrameContext {
+ struct {
+ uint32_t exposure;
+ double gain;
+ } sensor;
+};
+
+struct IPAContext {
+ IPAContext(unsigned int frameContextSize)
+ : frameContexts(frameContextSize)
+ {
+ }
+
+ IPASessionConfiguration configuration;
+ IPAActiveState activeState;
+
+ FCQueue<IPAFrameContext> frameContexts;
+
+ ControlInfoMap::Map ctrlMap;
+};
+
+} /* namespace ipa::ipu3 */
+
+} /* namespace libcamera*/
diff --git a/src/ipa/ipu3/ipu3-ipa-design-guide.rst b/src/ipa/ipu3/ipu3-ipa-design-guide.rst
new file mode 100644
index 00000000..85d735c6
--- /dev/null
+++ b/src/ipa/ipu3/ipu3-ipa-design-guide.rst
@@ -0,0 +1,162 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+IPU3 IPA Architecture Design and Overview
+=========================================
+
+The IPU3 IPA is built as a modular and extensible framework with an
+upper layer to manage the interactions with the pipeline handler, and
+the image processing algorithms split to compartmentalise the processing
+required for each processing block, making use of the fixed-function
+accelerators provided by the ImgU ISP.
+
+The core IPU3 class is responsible for initialisation and construction
+of the algorithm components, processing controls set by the requests
+from applications, and managing events from the pipeline handler.
+
+::
+
+ ┌───────────────────────────────────────────┐
+ │ IPU3 Pipeline Handler │
+ │ ┌────────┐ ┌────────┐ ┌────────┐ │
+ │ │ │ │ │ │ │ │
+ │ │ Sensor ├───►│ CIO2 ├───►│ ImgU ├──►
+ │ │ │ │ │ │ │ │
+ │ └────────┘ └────────┘ └─▲────┬─┘ │ P: Parameter Buffer
+ │ │P │ │ S: Statistics Buffer
+ │ │ │S │
+ └─┬───┬───┬──────┬────┬────┬────┬─┴────▼─┬──┘ 1: init()
+ │ │ │ │ ▲ │ ▲ │ ▲ │ ▲ │ 2: configure()
+ │1 │2 │3 │4│ │4│ │4│ │4│ │5 3: mapBuffers(), start()
+ │ │ │ │ │ │ │ │ │ │ │ │ 4: (▼) queueRequest(), computeParams(), processStats()
+ ▼ ▼ ▼ ▼ │ ▼ │ ▼ │ ▼ │ ▼ (▲) setSensorControls, paramsComputed, metadataReady Signals
+ ┌──────────────────┴────┴────┴────┴─────────┐ 5: stop(), unmapBuffers()
+ │ IPU3 IPA │
+ │ ┌───────────────────────┐ │
+ │ ┌───────────┐ │ Algorithms │ │
+ │ │IPAContext │ │ ┌─────────┐ │ │
+ │ │ ┌───────┐ │ │ │ ... │ │ │
+ │ │ │ │ │ │ ┌─┴───────┐ │ │ │
+ │ │ │ SC │ │ │ │ Tonemap ├─┘ │ │
+ │ │ │ │ ◄───► ┌─┴───────┐ │ │ │
+ │ │ ├───────┤ │ │ │ AWB ├─┘ │ │
+ │ │ │ │ │ │ ┌─┴───────┐ │ │ │
+ │ │ │ FC │ │ │ │ AGC ├─┘ │ │
+ │ │ │ │ │ │ │ │ │ │
+ │ │ └───────┘ │ │ └─────────┘ │ │
+ │ └───────────┘ └───────────────────────┘ │
+ └───────────────────────────────────────────┘
+ SC: IPASessionConfiguration
+ FC: IPAFrameContext(s)
+
+The IPA instance is constructed and initialised at the point a Camera is
+created by the IPU3 pipeline handler. The initialisation call provides
+details about which camera sensor is being used, and the controls that
+it has available, along with their default values and ranges.
+
+Buffers
+~~~~~~~
+
+The IPA will have Parameter and Statistics buffers shared with it from
+the IPU3 Pipeline handler. These buffers will be passed to the IPA using
+the ``mapBuffers()`` call before the ``start()`` operation occurs.
+
+The IPA will map the buffers into CPU-accessible memory, associated with
+a buffer ID, and further events for sending or receiving parameter and
+statistics buffers will reference the ID to avoid expensive memory
+mapping operations, or the passing of file handles during streaming.
+
+After the ``stop()`` operation occurs, these buffers will be unmapped
+when requested by the pipeline handler using the ``unmapBuffers()`` call
+and no further access to the buffers is permitted.
+
+Context
+~~~~~~~
+
+Algorithm calls will always have the ``IPAContext`` available to them.
+This context comprises of two parts:
+
+- IPA Session Configuration
+- IPA Frame Context
+
+The session configuration structure ``IPASessionConfiguration``
+represents constant parameters determined before streaming commenced
+during ``configure()``.
+
+The IPA Frame Context provides the storage for algorithms for a single
+frame operation.
+
+The ``IPAFrameContext`` structure may be extended to an array, list, or
+queue to store historical state for each frame, allowing algorithms to
+obtain and reference results of calculations which are deeply pipelined.
+This may only be done if an algorithm needs to know the context that was
+applied at the frame the statistics were produced for, rather than the
+previous or current frame.
+
+Presently there is a single ``IPAFrameContext`` without historical data,
+and the context is maintained and updated through successive processing
+operations.
+
+Operating
+~~~~~~~~~
+
+There are three main interactions with the algorithms for the IPU3 IPA
+to operate when running:
+
+- configure()
+- queueRequest()
+- computeParams()
+- processStats()
+
+The configuration phase allows the pipeline-handler to inform the IPA of
+the current stream configurations, which is then passed into each
+algorithm to provide an opportunity to identify and track state of the
+hardware, such as image size or ImgU pipeline configurations.
+
+Pre-frame preparation
+~~~~~~~~~~~~~~~~~~~~~
+
+When configured, the IPA is notified by the pipeline handler of the
+Camera ``start()`` event, after which incoming requests will be queued
+for processing, requiring a parameter buffer (``ipu3_uapi_params``) to
+be populated for the ImgU. This is given to the IPA through
+``computeParams()``, and then passed directly to each algorithm
+through the ``prepare()`` call allowing the ISP configuration to be
+updated for the needs of each component that the algorithm is
+responsible for.
+
+The algorithm should set the use flag (``ipu3_uapi_flags``) for any
+structure that it modifies, and it should take care to ensure that any
+structure set by a use flag is fully initialised to suitable values.
+
+The parameter buffer is returned to the pipeline handler through the
+``paramsComputed`` signal, and from there queued to the ImgU along
+with a raw frame captured with the CIO2.
+
+Post-frame completion
+~~~~~~~~~~~~~~~~~~~~~
+
+When the capture of an image is completed, and successfully processed
+through the ImgU, the generated statistics buffer
+(``ipu3_uapi_stats_3a``) is given to the IPA through
+``processStats()``. This provides the IPA with an opportunity to
+examine the results of the ISP and run the calculations required by each
+algorithm on the new data. The algorithms may require context from the
+operations of other algorithms, for example, the AWB might choose to use
+a scene brightness determined by the AGC. It is important that the
+algorithms are ordered to ensure that required results are determined
+before they are needed.
+
+The ordering of the algorithm processing is determined by their
+placement in the ``IPU3::algorithms_`` ordered list.
+
+Finally, the IPA metadata for the completed frame is returned back via
+the ``metadataReady`` signal.
+
+Sensor Controls
+~~~~~~~~~~~~~~~
+
+The AutoExposure and AutoGain (AGC) algorithm differs slightly from the
+others as it requires operating directly on the sensor, as opposed to
+through the ImgU ISP. To support this, there is a ``setSensorControls``
+signal to allow the IPA to request controls to be set on the camera
+sensor through the pipeline handler.
diff --git a/src/ipa/ipu3/ipu3.cpp b/src/ipa/ipu3/ipu3.cpp
new file mode 100644
index 00000000..1cae08bf
--- /dev/null
+++ b/src/ipa/ipu3/ipu3.cpp
@@ -0,0 +1,692 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * IPU3 Image Processing Algorithms
+ */
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+#include <limits>
+#include <map>
+#include <memory>
+#include <stdint.h>
+#include <utility>
+#include <vector>
+
+#include <linux/intel-ipu3.h>
+#include <linux/v4l2-controls.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+#include <libcamera/request.h>
+
+#include <libcamera/ipa/ipa_interface.h>
+#include <libcamera/ipa/ipa_module_info.h>
+#include <libcamera/ipa/ipu3_ipa_interface.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "libipa/camera_sensor_helper.h"
+
+#include "ipa_context.h"
+#include "module.h"
+
+/* Minimum grid width, expressed as a number of cells */
+static constexpr uint32_t kMinGridWidth = 16;
+/* Maximum grid width, expressed as a number of cells */
+static constexpr uint32_t kMaxGridWidth = 80;
+/* Minimum grid height, expressed as a number of cells */
+static constexpr uint32_t kMinGridHeight = 16;
+/* Maximum grid height, expressed as a number of cells */
+static constexpr uint32_t kMaxGridHeight = 60;
+/* log2 of the minimum grid cell width and height, in pixels */
+static constexpr uint32_t kMinCellSizeLog2 = 3;
+/* log2 of the maximum grid cell width and height, in pixels */
+static constexpr uint32_t kMaxCellSizeLog2 = 6;
+
+/* Maximum number of frame contexts to be held */
+static constexpr uint32_t kMaxFrameContexts = 16;
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(IPAIPU3)
+
+using namespace std::literals::chrono_literals;
+
+namespace ipa::ipu3 {
+
+/**
+ * \brief The IPU3 IPA implementation
+ *
+ * The IPU3 Pipeline defines an IPU3-specific interface for communication
+ * between the PipelineHandler and the IPA module.
+ *
+ * We extend the IPAIPU3Interface to implement our algorithms and handle
+ * calls from the IPU3 PipelineHandler to satisfy requests from the
+ * application.
+ *
+ * At initialisation time, a CameraSensorHelper is instantiated to support
+ * camera-specific calculations, while the default controls are computed, and
+ * the algorithms are instantiated from the tuning data file.
+ *
+ * The IPU3 ImgU operates with a grid layout to divide the overall frame into
+ * rectangular cells of pixels. When the IPA is configured, we determine the
+ * best grid for the statistics based on the pipeline handler Bayer Down Scaler
+ * output size.
+ *
+ * Two main events are then handled to operate the IPU3 ImgU by populating its
+ * parameter buffer, and adapting the settings of the sensor attached to the
+ * IPU3 CIO2 through sensor-specific V4L2 controls.
+ *
+ * In computeParams(), we populate the ImgU parameter buffer with
+ * settings to configure the device in preparation for handling the frame
+ * queued in the Request.
+ *
+ * When the frame has completed processing, the ImgU will generate a statistics
+ * buffer which is given to the IPA with processStats(). In this we run the
+ * algorithms to parse the statistics and cache any results for the next
+ * computeParams() call.
+ *
+ * The individual algorithms are split into modular components that are called
+ * iteratively to allow them to process statistics from the ImgU in the order
+ * defined in the tuning data file.
+ *
+ * The current implementation supports five core algorithms:
+ *
+ * - Auto focus (AF)
+ * - Automatic gain and exposure control (AGC)
+ * - Automatic white balance (AWB)
+ * - Black level correction (BLC)
+ * - Tone mapping (Gamma)
+ *
+ * AWB is implemented using a Greyworld algorithm, and calculates the red and
+ * blue gains to apply to generate a neutral grey frame overall.
+ *
+ * AGC is handled by calculating a histogram of the green channel to estimate an
+ * analogue gain and exposure time which will provide a well exposed frame. A
+ * low-pass IIR filter is used to smooth the changes to the sensor to reduce
+ * perceivable steps.
+ *
+ * The tone mapping algorithm provides a gamma correction table to improve the
+ * contrast of the scene.
+ *
+ * The black level compensation algorithm subtracts a hardcoded black level from
+ * all pixels.
+ *
+ * The IPU3 ImgU has further processing blocks to support image quality
+ * improvements through bayer and temporal noise reductions, however those are
+ * not supported in the current implementation, and will use default settings as
+ * provided by the kernel driver.
+ *
+ * Demosaicing is operating with the default parameters and could be further
+ * optimised to provide improved sharpening coefficients, checker artifact
+ * removal, and false color correction.
+ *
+ * Additional image enhancements can be made by providing lens and
+ * sensor-specific tuning to adapt for Black Level compensation (BLC), Lens
+ * shading correction (SHD) and Color correction (CCM).
+ */
+class IPAIPU3 : public IPAIPU3Interface, public Module
+{
+public:
+ IPAIPU3();
+
+ int init(const IPASettings &settings,
+ const IPACameraSensorInfo &sensorInfo,
+ const ControlInfoMap &sensorControls,
+ ControlInfoMap *ipaControls) override;
+
+ int start() override;
+ void stop() override;
+
+ int configure(const IPAConfigInfo &configInfo,
+ ControlInfoMap *ipaControls) override;
+
+ void mapBuffers(const std::vector<IPABuffer> &buffers) override;
+ void unmapBuffers(const std::vector<unsigned int> &ids) override;
+
+ void queueRequest(const uint32_t frame, const ControlList &controls) override;
+ void computeParams(const uint32_t frame, const uint32_t bufferId) override;
+ void processStats(const uint32_t frame, const int64_t frameTimestamp,
+ const uint32_t bufferId,
+ const ControlList &sensorControls) override;
+
+protected:
+ std::string logPrefix() const override;
+
+private:
+ void updateControls(const IPACameraSensorInfo &sensorInfo,
+ const ControlInfoMap &sensorControls,
+ ControlInfoMap *ipaControls);
+ void updateSessionConfiguration(const ControlInfoMap &sensorControls);
+
+ void setControls(unsigned int frame);
+ void calculateBdsGrid(const Size &bdsOutputSize);
+
+ std::map<unsigned int, MappedFrameBuffer> buffers_;
+
+ ControlInfoMap sensorCtrls_;
+ ControlInfoMap lensCtrls_;
+
+ IPACameraSensorInfo sensorInfo_;
+
+ /* Interface to the Camera Helper */
+ std::unique_ptr<CameraSensorHelper> camHelper_;
+
+ /* Local parameter storage */
+ struct IPAContext context_;
+};
+
+IPAIPU3::IPAIPU3()
+ : context_(kMaxFrameContexts)
+{
+}
+
+std::string IPAIPU3::logPrefix() const
+{
+ return "ipu3";
+}
+
+/**
+ * \brief Compute IPASessionConfiguration using the sensor information and the
+ * sensor V4L2 controls
+ */
+void IPAIPU3::updateSessionConfiguration(const ControlInfoMap &sensorControls)
+{
+ const ControlInfo vBlank = sensorControls.find(V4L2_CID_VBLANK)->second;
+ context_.configuration.sensor.defVBlank = vBlank.def().get<int32_t>();
+
+ const ControlInfo &v4l2Exposure = sensorControls.find(V4L2_CID_EXPOSURE)->second;
+ int32_t minExposure = v4l2Exposure.min().get<int32_t>();
+ int32_t maxExposure = v4l2Exposure.max().get<int32_t>();
+
+ const ControlInfo &v4l2Gain = sensorControls.find(V4L2_CID_ANALOGUE_GAIN)->second;
+ int32_t minGain = v4l2Gain.min().get<int32_t>();
+ int32_t maxGain = v4l2Gain.max().get<int32_t>();
+
+ /*
+ * When the AGC computes the new exposure values for a frame, it needs
+ * to know the limits for exposure time and analogue gain.
+ * As it depends on the sensor, update it with the controls.
+ *
+ * \todo take VBLANK into account for maximum exposure time
+ */
+ context_.configuration.agc.minExposureTime = minExposure * context_.configuration.sensor.lineDuration;
+ context_.configuration.agc.maxExposureTime = maxExposure * context_.configuration.sensor.lineDuration;
+ context_.configuration.agc.minAnalogueGain = camHelper_->gain(minGain);
+ context_.configuration.agc.maxAnalogueGain = camHelper_->gain(maxGain);
+}
+
+/**
+ * \brief Compute camera controls using the sensor information and the sensor
+ * V4L2 controls
+ *
+ * Some of the camera controls are computed by the pipeline handler, some others
+ * by the IPA module which is in charge of handling, for example, the exposure
+ * time and the frame duration.
+ *
+ * This function computes:
+ * - controls::ExposureTime
+ * - controls::FrameDurationLimits
+ */
+void IPAIPU3::updateControls(const IPACameraSensorInfo &sensorInfo,
+ const ControlInfoMap &sensorControls,
+ ControlInfoMap *ipaControls)
+{
+ ControlInfoMap::Map controls{};
+ double lineDuration = context_.configuration.sensor.lineDuration.get<std::micro>();
+
+ /*
+ * Compute exposure time limits by using line length and pixel rate
+ * converted to microseconds. Use the V4L2_CID_EXPOSURE control to get
+ * exposure min, max and default and convert it from lines to
+ * microseconds.
+ */
+ const ControlInfo &v4l2Exposure = sensorControls.find(V4L2_CID_EXPOSURE)->second;
+ int32_t minExposure = v4l2Exposure.min().get<int32_t>() * lineDuration;
+ int32_t maxExposure = v4l2Exposure.max().get<int32_t>() * lineDuration;
+ int32_t defExposure = v4l2Exposure.def().get<int32_t>() * lineDuration;
+ controls[&controls::ExposureTime] = ControlInfo(minExposure, maxExposure,
+ defExposure);
+
+ /*
+ * Compute the frame duration limits.
+ *
+ * The frame length is computed assuming a fixed line length combined
+ * with the vertical frame sizes.
+ */
+ const ControlInfo &v4l2HBlank = sensorControls.find(V4L2_CID_HBLANK)->second;
+ uint32_t hblank = v4l2HBlank.def().get<int32_t>();
+ uint32_t lineLength = sensorInfo.outputSize.width + hblank;
+
+ const ControlInfo &v4l2VBlank = sensorControls.find(V4L2_CID_VBLANK)->second;
+ std::array<uint32_t, 3> frameHeights{
+ v4l2VBlank.min().get<int32_t>() + sensorInfo.outputSize.height,
+ v4l2VBlank.max().get<int32_t>() + sensorInfo.outputSize.height,
+ v4l2VBlank.def().get<int32_t>() + sensorInfo.outputSize.height,
+ };
+
+ std::array<int64_t, 3> frameDurations;
+ for (unsigned int i = 0; i < frameHeights.size(); ++i) {
+ uint64_t frameSize = lineLength * frameHeights[i];
+ frameDurations[i] = frameSize / (sensorInfo.pixelRate / 1000000U);
+ }
+
+ controls[&controls::FrameDurationLimits] = ControlInfo(frameDurations[0],
+ frameDurations[1],
+ frameDurations[2]);
+
+ controls.merge(context_.ctrlMap);
+ *ipaControls = ControlInfoMap(std::move(controls), controls::controls);
+}
+
+/**
+ * \brief Initialize the IPA module and its controls
+ *
+ * This function receives the camera sensor information from the pipeline
+ * handler, computes the limits of the controls it handles and returns
+ * them in the \a ipaControls output parameter.
+ */
+int IPAIPU3::init(const IPASettings &settings,
+ const IPACameraSensorInfo &sensorInfo,
+ const ControlInfoMap &sensorControls,
+ ControlInfoMap *ipaControls)
+{
+ camHelper_ = CameraSensorHelperFactoryBase::create(settings.sensorModel);
+ if (camHelper_ == nullptr) {
+ LOG(IPAIPU3, Error)
+ << "Failed to create camera sensor helper for "
+ << settings.sensorModel;
+ return -ENODEV;
+ }
+
+ /* Clean context */
+ context_.configuration = {};
+ context_.configuration.sensor.lineDuration =
+ sensorInfo.minLineLength * 1.0s / sensorInfo.pixelRate;
+
+ /* Load the tuning data file. */
+ File file(settings.configurationFile);
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ int ret = file.error();
+ LOG(IPAIPU3, Error)
+ << "Failed to open configuration file "
+ << settings.configurationFile << ": " << strerror(-ret);
+ return ret;
+ }
+
+ std::unique_ptr<libcamera::YamlObject> data = YamlParser::parse(file);
+ if (!data)
+ return -EINVAL;
+
+ unsigned int version = (*data)["version"].get<uint32_t>(0);
+ if (version != 1) {
+ LOG(IPAIPU3, Error)
+ << "Invalid tuning file version " << version;
+ return -EINVAL;
+ }
+
+ if (!data->contains("algorithms")) {
+ LOG(IPAIPU3, Error)
+ << "Tuning file doesn't contain any algorithm";
+ return -EINVAL;
+ }
+
+ int ret = createAlgorithms(context_, (*data)["algorithms"]);
+ if (ret)
+ return ret;
+
+ /* Initialize controls. */
+ updateControls(sensorInfo, sensorControls, ipaControls);
+
+ return 0;
+}
+
+/**
+ * \brief Perform any processing required before the first frame
+ */
+int IPAIPU3::start()
+{
+ /*
+ * Set the sensors V4L2 controls before the first frame to ensure that
+ * we have an expected and known configuration from the start.
+ */
+ setControls(0);
+
+ return 0;
+}
+
+/**
+ * \brief Ensure that all processing has completed
+ */
+void IPAIPU3::stop()
+{
+ context_.frameContexts.clear();
+}
+
+/**
+ * \brief Calculate a grid for the AWB statistics
+ *
+ * This function calculates a grid for the AWB algorithm in the IPU3 firmware.
+ * Its input is the BDS output size calculated in the ImgU.
+ * It is limited for now to the simplest method: find the lesser error
+ * with the width/height and respective log2 width/height of the cells.
+ *
+ * \todo The frame is divided into cells which can be 8x8 => 64x64.
+ * As a smaller cell improves the algorithm precision, adapting the
+ * x_start and y_start parameters of the grid would provoke a loss of
+ * some pixels but would also result in more accurate algorithms.
+ */
+void IPAIPU3::calculateBdsGrid(const Size &bdsOutputSize)
+{
+ Size best;
+ Size bestLog2;
+
+ /* Set the BDS output size in the IPAConfiguration structure */
+ context_.configuration.grid.bdsOutputSize = bdsOutputSize;
+
+ uint32_t minError = std::numeric_limits<uint32_t>::max();
+ for (uint32_t shift = kMinCellSizeLog2; shift <= kMaxCellSizeLog2; ++shift) {
+ uint32_t width = std::clamp(bdsOutputSize.width >> shift,
+ kMinGridWidth,
+ kMaxGridWidth);
+
+ width = width << shift;
+ uint32_t error = utils::abs_diff(width, bdsOutputSize.width);
+ if (error >= minError)
+ continue;
+
+ minError = error;
+ best.width = width;
+ bestLog2.width = shift;
+ }
+
+ minError = std::numeric_limits<uint32_t>::max();
+ for (uint32_t shift = kMinCellSizeLog2; shift <= kMaxCellSizeLog2; ++shift) {
+ uint32_t height = std::clamp(bdsOutputSize.height >> shift,
+ kMinGridHeight,
+ kMaxGridHeight);
+
+ height = height << shift;
+ uint32_t error = utils::abs_diff(height, bdsOutputSize.height);
+ if (error >= minError)
+ continue;
+
+ minError = error;
+ best.height = height;
+ bestLog2.height = shift;
+ }
+
+ struct ipu3_uapi_grid_config &bdsGrid = context_.configuration.grid.bdsGrid;
+ bdsGrid.x_start = 0;
+ bdsGrid.y_start = 0;
+ bdsGrid.width = best.width >> bestLog2.width;
+ bdsGrid.block_width_log2 = bestLog2.width;
+ bdsGrid.height = best.height >> bestLog2.height;
+ bdsGrid.block_height_log2 = bestLog2.height;
+
+ /* The ImgU pads the lines to a multiple of 4 cells. */
+ context_.configuration.grid.stride = utils::alignUp(bdsGrid.width, 4);
+
+ LOG(IPAIPU3, Debug) << "Best grid found is: ("
+ << (int)bdsGrid.width << " << " << (int)bdsGrid.block_width_log2 << ") x ("
+ << (int)bdsGrid.height << " << " << (int)bdsGrid.block_height_log2 << ")";
+}
+
+/**
+ * \brief Configure the IPU3 IPA
+ * \param[in] configInfo The IPA configuration data, received from the pipeline
+ * handler
+ * \param[in] ipaControls The IPA controls to update
+ *
+ * Calculate the best grid for the statistics based on the pipeline handler BDS
+ * output, and parse the minimum and maximum exposure and analogue gain control
+ * values.
+ *
+ * \todo Document what the BDS is, ideally in a block diagram of the ImgU.
+ *
+ * All algorithm modules are called to allow them to prepare the
+ * \a IPASessionConfiguration structure for the \a IPAContext.
+ */
+int IPAIPU3::configure(const IPAConfigInfo &configInfo,
+ ControlInfoMap *ipaControls)
+{
+ if (configInfo.sensorControls.empty()) {
+ LOG(IPAIPU3, Error) << "No sensor controls provided";
+ return -ENODATA;
+ }
+
+ sensorInfo_ = configInfo.sensorInfo;
+
+ lensCtrls_ = configInfo.lensControls;
+
+ /* Clear the IPA context for the new streaming session. */
+ context_.activeState = {};
+ context_.configuration = {};
+ context_.frameContexts.clear();
+
+ /* Initialise the sensor configuration. */
+ context_.configuration.sensor.lineDuration =
+ sensorInfo_.minLineLength * 1.0s / sensorInfo_.pixelRate;
+ context_.configuration.sensor.size = sensorInfo_.outputSize;
+
+ /*
+ * Compute the sensor V4L2 controls to be used by the algorithms and
+ * to be set on the sensor.
+ */
+ sensorCtrls_ = configInfo.sensorControls;
+
+ calculateBdsGrid(configInfo.bdsOutputSize);
+
+ /* Update the camera controls using the new sensor settings. */
+ updateControls(sensorInfo_, sensorCtrls_, ipaControls);
+
+ /* Update the IPASessionConfiguration using the sensor settings. */
+ updateSessionConfiguration(sensorCtrls_);
+
+ for (auto const &algo : algorithms()) {
+ int ret = algo->configure(context_, configInfo);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * \brief Map the parameters and stats buffers allocated in the pipeline handler
+ * \param[in] buffers The buffers to map
+ */
+void IPAIPU3::mapBuffers(const std::vector<IPABuffer> &buffers)
+{
+ for (const IPABuffer &buffer : buffers) {
+ const FrameBuffer fb(buffer.planes);
+ buffers_.emplace(buffer.id,
+ MappedFrameBuffer(&fb, MappedFrameBuffer::MapFlag::ReadWrite));
+ }
+}
+
+/**
+ * \brief Unmap the parameters and stats buffers
+ * \param[in] ids The IDs of the buffers to unmap
+ */
+void IPAIPU3::unmapBuffers(const std::vector<unsigned int> &ids)
+{
+ for (unsigned int id : ids) {
+ auto it = buffers_.find(id);
+ if (it == buffers_.end())
+ continue;
+
+ buffers_.erase(it);
+ }
+}
+
+/**
+ * \brief Fill and return a buffer with ISP processing parameters for a frame
+ * \param[in] frame The frame number
+ * \param[in] bufferId ID of the parameter buffer to fill
+ *
+ * Algorithms are expected to fill the IPU3 parameter buffer for the next
+ * frame given their most recent processing of the ImgU statistics.
+ */
+void IPAIPU3::computeParams(const uint32_t frame, const uint32_t bufferId)
+{
+ auto it = buffers_.find(bufferId);
+ if (it == buffers_.end()) {
+ LOG(IPAIPU3, Error) << "Could not find param buffer!";
+ return;
+ }
+
+ Span<uint8_t> mem = it->second.planes()[0];
+ ipu3_uapi_params *params =
+ reinterpret_cast<ipu3_uapi_params *>(mem.data());
+
+ /*
+ * The incoming params buffer may contain uninitialised data, or the
+ * parameters of previously queued frames. Clearing the entire buffer
+ * may be an expensive operation, and the kernel will only read from
+ * structures which have their associated use-flag set.
+ *
+ * It is the responsibility of the algorithms to set the use flags
+ * accordingly for any data structure they update during prepare().
+ */
+ params->use = {};
+
+ IPAFrameContext &frameContext = context_.frameContexts.get(frame);
+
+ for (auto const &algo : algorithms())
+ algo->prepare(context_, frame, frameContext, params);
+
+ paramsComputed.emit(frame);
+}
+
+/**
+ * \brief Process the statistics generated by the ImgU
+ * \param[in] frame The frame number
+ * \param[in] frameTimestamp Timestamp of the frame
+ * \param[in] bufferId ID of the statistics buffer
+ * \param[in] sensorControls Sensor controls
+ *
+ * Parse the most recently processed image statistics from the ImgU. The
+ * statistics are passed to each algorithm module to run their calculations and
+ * update their state accordingly.
+ */
+void IPAIPU3::processStats(const uint32_t frame,
+ [[maybe_unused]] const int64_t frameTimestamp,
+ const uint32_t bufferId, const ControlList &sensorControls)
+{
+ auto it = buffers_.find(bufferId);
+ if (it == buffers_.end()) {
+ LOG(IPAIPU3, Error) << "Could not find stats buffer!";
+ return;
+ }
+
+ Span<uint8_t> mem = it->second.planes()[0];
+ const ipu3_uapi_stats_3a *stats =
+ reinterpret_cast<ipu3_uapi_stats_3a *>(mem.data());
+
+ IPAFrameContext &frameContext = context_.frameContexts.get(frame);
+
+ frameContext.sensor.exposure = sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>();
+ frameContext.sensor.gain = camHelper_->gain(sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>());
+
+ ControlList metadata(controls::controls);
+
+ for (auto const &algo : algorithms())
+ algo->process(context_, frame, frameContext, stats, metadata);
+
+ setControls(frame);
+
+ /*
+ * \todo The Metadata provides a path to getting extended data
+ * out to the application. Further data such as a simplifed Histogram
+ * might have value to be exposed, however such data may be
+ * difficult to report in a generically parsable way and we
+ * likely want to avoid putting platform specific metadata in.
+ */
+
+ metadataReady.emit(frame, metadata);
+}
+
+/**
+ * \brief Queue a request and process the control list from the application
+ * \param[in] frame The number of the frame which will be processed next
+ * \param[in] controls The controls for the \a frame
+ *
+ * Parse the request to handle any IPA-managed controls that were set from the
+ * application such as manual sensor settings.
+ */
+void IPAIPU3::queueRequest(const uint32_t frame, const ControlList &controls)
+{
+ IPAFrameContext &frameContext = context_.frameContexts.alloc(frame);
+
+ for (auto const &algo : algorithms())
+ algo->queueRequest(context_, frame, frameContext, controls);
+}
+
+/**
+ * \brief Handle sensor controls for a given \a frame number
+ * \param[in] frame The frame on which the sensor controls should be set
+ *
+ * Send the desired sensor control values to the pipeline handler to request
+ * that they are applied on the camera sensor.
+ */
+void IPAIPU3::setControls(unsigned int frame)
+{
+ int32_t exposure = context_.activeState.agc.exposure;
+ int32_t gain = camHelper_->gainCode(context_.activeState.agc.gain);
+
+ ControlList ctrls(sensorCtrls_);
+ ctrls.set(V4L2_CID_EXPOSURE, exposure);
+ ctrls.set(V4L2_CID_ANALOGUE_GAIN, gain);
+
+ ControlList lensCtrls(lensCtrls_);
+ lensCtrls.set(V4L2_CID_FOCUS_ABSOLUTE,
+ static_cast<int32_t>(context_.activeState.af.focus));
+
+ setSensorControls.emit(frame, ctrls, lensCtrls);
+}
+
+} /* namespace ipa::ipu3 */
+
+/**
+ * \brief External IPA module interface
+ *
+ * The IPAModuleInfo is required to match an IPA module construction against the
+ * intented pipeline handler with the module. The API and pipeline handler
+ * versions must match the corresponding IPA interface and pipeline handler.
+ *
+ * \sa struct IPAModuleInfo
+ */
+extern "C" {
+const struct IPAModuleInfo ipaModuleInfo = {
+ IPA_MODULE_API_VERSION,
+ 1,
+ "ipu3",
+ "ipu3",
+};
+
+/**
+ * \brief Create an instance of the IPA interface
+ *
+ * This function is the entry point of the IPA module. It is called by the IPA
+ * manager to create an instance of the IPA interface for each camera. When
+ * matched against with a pipeline handler, the IPAManager will construct an IPA
+ * instance for each associated Camera.
+ */
+IPAInterface *ipaCreate()
+{
+ return new ipa::ipu3::IPAIPU3();
+}
+}
+
+} /* namespace libcamera */
diff --git a/src/ipa/ipu3/meson.build b/src/ipa/ipu3/meson.build
new file mode 100644
index 00000000..34de6213
--- /dev/null
+++ b/src/ipa/ipu3/meson.build
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: CC0-1.0
+
+subdir('algorithms')
+subdir('data')
+
+ipa_name = 'ipa_ipu3'
+
+ipu3_ipa_sources = files([
+ 'ipa_context.cpp',
+ 'ipu3.cpp',
+])
+
+ipu3_ipa_sources += ipu3_ipa_algorithms
+
+mod = shared_module(ipa_name, ipu3_ipa_sources,
+ name_prefix : '',
+ include_directories : [ipa_includes],
+ dependencies : [libcamera_private, libipa_dep],
+ install : true,
+ install_dir : ipa_install_dir)
+
+if ipa_sign_module
+ custom_target(ipa_name + '.so.sign',
+ input : mod,
+ output : ipa_name + '.so.sign',
+ command : [ipa_sign, ipa_priv_key, '@INPUT@', '@OUTPUT@'],
+ install : false,
+ build_by_default : true)
+endif
+
+ipa_names += ipa_name
diff --git a/src/ipa/ipu3/module.h b/src/ipa/ipu3/module.h
new file mode 100644
index 00000000..60f65cc4
--- /dev/null
+++ b/src/ipa/ipu3/module.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Ideas On Board
+ *
+ * IPU3 IPA Module
+ */
+
+#pragma once
+
+#include <linux/intel-ipu3.h>
+
+#include <libcamera/ipa/ipu3_ipa_interface.h>
+
+#include <libipa/module.h>
+
+#include "ipa_context.h"
+
+namespace libcamera {
+
+namespace ipa::ipu3 {
+
+using Module = ipa::Module<IPAContext, IPAFrameContext, IPAConfigInfo,
+ ipu3_uapi_params, ipu3_uapi_stats_3a>;
+
+} /* namespace ipa::ipu3 */
+
+} /* namespace libcamera*/
diff --git a/src/ipa/libipa/agc_mean_luminance.cpp b/src/ipa/libipa/agc_mean_luminance.cpp
new file mode 100644
index 00000000..02555a44
--- /dev/null
+++ b/src/ipa/libipa/agc_mean_luminance.cpp
@@ -0,0 +1,578 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Base class for mean luminance AGC algorithms
+ */
+
+#include "agc_mean_luminance.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+#include <libcamera/control_ids.h>
+
+#include "exposure_mode_helper.h"
+
+using namespace libcamera::controls;
+
+/**
+ * \file agc_mean_luminance.h
+ * \brief Base class implementing mean luminance AEGC
+ */
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(AgcMeanLuminance)
+
+namespace ipa {
+
+/*
+ * Number of frames for which to run the algorithm at full speed, before slowing
+ * down to prevent large and jarring changes in exposure from frame to frame.
+ */
+static constexpr uint32_t kNumStartupFrames = 10;
+
+/*
+ * Default relative luminance target
+ *
+ * This value should be chosen so that when the camera points at a grey target,
+ * the resulting image brightness looks "right". Custom values can be passed
+ * as the relativeLuminanceTarget value in sensor tuning files.
+ */
+static constexpr double kDefaultRelativeLuminanceTarget = 0.16;
+
+/**
+ * \struct AgcMeanLuminance::AgcConstraint
+ * \brief The boundaries and target for an AeConstraintMode constraint
+ *
+ * This structure describes an AeConstraintMode constraint for the purposes of
+ * this algorithm. These constraints are expressed as a pair of quantile
+ * boundaries for a histogram, along with a luminance target and a bounds-type.
+ * The algorithm uses the constraints by ensuring that the defined portion of a
+ * luminance histogram (I.E. lying between the two quantiles) is above or below
+ * the given luminance value.
+ */
+
+/**
+ * \enum AgcMeanLuminance::AgcConstraint::Bound
+ * \brief Specify whether the constraint defines a lower or upper bound
+ * \var AgcMeanLuminance::AgcConstraint::Lower
+ * \brief The constraint defines a lower bound
+ * \var AgcMeanLuminance::AgcConstraint::Upper
+ * \brief The constraint defines an upper bound
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::bound
+ * \brief The type of constraint bound
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::qLo
+ * \brief The lower quantile to use for the constraint
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::qHi
+ * \brief The upper quantile to use for the constraint
+ */
+
+/**
+ * \var AgcMeanLuminance::AgcConstraint::yTarget
+ * \brief The luminance target for the constraint
+ */
+
+/**
+ * \class AgcMeanLuminance
+ * \brief A mean-based auto-exposure algorithm
+ *
+ * This algorithm calculates an exposure time, analogue and digital gain such
+ * that the normalised mean luminance value of an image is driven towards a
+ * target, which itself is discovered from tuning data. The algorithm is a
+ * two-stage process.
+ *
+ * In the first stage, an initial gain value is derived by iteratively comparing
+ * the gain-adjusted mean luminance across the entire image against a target,
+ * and selecting a value which pushes it as closely as possible towards the
+ * target.
+ *
+ * In the second stage we calculate the gain required to drive the average of a
+ * section of a histogram to a target value, where the target and the boundaries
+ * of the section of the histogram used in the calculation are taken from the
+ * values defined for the currently configured AeConstraintMode within the
+ * tuning data. This class provides a helper function to parse those tuning data
+ * to discover the constraints, and so requires a specific format for those
+ * data which is described in \ref parseTuningData(). The gain from the first
+ * stage is then clamped to the gain from this stage.
+ *
+ * The final gain is used to adjust the effective exposure value of the image,
+ * and that new exposure value is divided into exposure time, analogue gain and
+ * digital gain according to the selected AeExposureMode. This class uses the
+ * \ref ExposureModeHelper class to assist in that division, and expects the
+ * data needed to initialise that class to be present in tuning data in a
+ * format described in \ref parseTuningData().
+ *
+ * In order to be able to use this algorithm an IPA module needs to be able to
+ * do the following:
+ *
+ * 1. Provide a luminance estimation across an entire image.
+ * 2. Provide a luminance Histogram for the image to use in calculating
+ * constraint compliance. The precision of the Histogram that is available
+ * will determine the supportable precision of the constraints.
+ *
+ * IPA modules that want to use this class to implement their AEGC algorithm
+ * should derive it and provide an overriding estimateLuminance() function for
+ * this class to use. They must call parseTuningData() in init(), and must also
+ * call setLimits() and resetFrameCounter() in configure(). They may then use
+ * calculateNewEv() in process(). If the limits passed to setLimits() change for
+ * any reason (for example, in response to a FrameDurationLimit control being
+ * passed in queueRequest()) then setLimits() must be called again with the new
+ * values.
+ */
+
+AgcMeanLuminance::AgcMeanLuminance()
+ : frameCount_(0), filteredExposure_(0s), relativeLuminanceTarget_(0)
+{
+}
+
+AgcMeanLuminance::~AgcMeanLuminance() = default;
+
+void AgcMeanLuminance::parseRelativeLuminanceTarget(const YamlObject &tuningData)
+{
+ relativeLuminanceTarget_ =
+ tuningData["relativeLuminanceTarget"].get<double>(kDefaultRelativeLuminanceTarget);
+}
+
+void AgcMeanLuminance::parseConstraint(const YamlObject &modeDict, int32_t id)
+{
+ for (const auto &[boundName, content] : modeDict.asDict()) {
+ if (boundName != "upper" && boundName != "lower") {
+ LOG(AgcMeanLuminance, Warning)
+ << "Ignoring unknown constraint bound '" << boundName << "'";
+ continue;
+ }
+
+ unsigned int idx = static_cast<unsigned int>(boundName == "upper");
+ AgcConstraint::Bound bound = static_cast<AgcConstraint::Bound>(idx);
+ double qLo = content["qLo"].get<double>().value_or(0.98);
+ double qHi = content["qHi"].get<double>().value_or(1.0);
+ double yTarget =
+ content["yTarget"].getList<double>().value_or(std::vector<double>{ 0.5 }).at(0);
+
+ AgcConstraint constraint = { bound, qLo, qHi, yTarget };
+
+ if (!constraintModes_.count(id))
+ constraintModes_[id] = {};
+
+ if (idx)
+ constraintModes_[id].push_back(constraint);
+ else
+ constraintModes_[id].insert(constraintModes_[id].begin(), constraint);
+ }
+}
+
+int AgcMeanLuminance::parseConstraintModes(const YamlObject &tuningData)
+{
+ std::vector<ControlValue> availableConstraintModes;
+
+ const YamlObject &yamlConstraintModes = tuningData[controls::AeConstraintMode.name()];
+ if (yamlConstraintModes.isDictionary()) {
+ for (const auto &[modeName, modeDict] : yamlConstraintModes.asDict()) {
+ if (AeConstraintModeNameValueMap.find(modeName) ==
+ AeConstraintModeNameValueMap.end()) {
+ LOG(AgcMeanLuminance, Warning)
+ << "Skipping unknown constraint mode '" << modeName << "'";
+ continue;
+ }
+
+ if (!modeDict.isDictionary()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Invalid constraint mode '" << modeName << "'";
+ return -EINVAL;
+ }
+
+ parseConstraint(modeDict,
+ AeConstraintModeNameValueMap.at(modeName));
+ availableConstraintModes.push_back(
+ AeConstraintModeNameValueMap.at(modeName));
+ }
+ }
+
+ /*
+ * If the tuning data file contains no constraints then we use the
+ * default constraint that the IPU3/RkISP1 Agc algorithms were adhering
+ * to anyway before centralisation; this constraint forces the top 2% of
+ * the histogram to be at least 0.5.
+ */
+ if (constraintModes_.empty()) {
+ AgcConstraint constraint = {
+ AgcConstraint::Bound::Lower,
+ 0.98,
+ 1.0,
+ 0.5
+ };
+
+ constraintModes_[controls::ConstraintNormal].insert(
+ constraintModes_[controls::ConstraintNormal].begin(),
+ constraint);
+ availableConstraintModes.push_back(
+ AeConstraintModeNameValueMap.at("ConstraintNormal"));
+ }
+
+ controls_[&controls::AeConstraintMode] = ControlInfo(availableConstraintModes);
+
+ return 0;
+}
+
+int AgcMeanLuminance::parseExposureModes(const YamlObject &tuningData)
+{
+ std::vector<ControlValue> availableExposureModes;
+
+ const YamlObject &yamlExposureModes = tuningData[controls::AeExposureMode.name()];
+ if (yamlExposureModes.isDictionary()) {
+ for (const auto &[modeName, modeValues] : yamlExposureModes.asDict()) {
+ if (AeExposureModeNameValueMap.find(modeName) ==
+ AeExposureModeNameValueMap.end()) {
+ LOG(AgcMeanLuminance, Warning)
+ << "Skipping unknown exposure mode '" << modeName << "'";
+ continue;
+ }
+
+ if (!modeValues.isDictionary()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Invalid exposure mode '" << modeName << "'";
+ return -EINVAL;
+ }
+
+ std::vector<uint32_t> exposureTimes =
+ modeValues["exposureTime"].getList<uint32_t>().value_or(std::vector<uint32_t>{});
+ std::vector<double> gains =
+ modeValues["gain"].getList<double>().value_or(std::vector<double>{});
+
+ if (exposureTimes.size() != gains.size()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Exposure time and gain array sizes unequal";
+ return -EINVAL;
+ }
+
+ if (exposureTimes.empty()) {
+ LOG(AgcMeanLuminance, Error)
+ << "Exposure time and gain arrays are empty";
+ return -EINVAL;
+ }
+
+ std::vector<std::pair<utils::Duration, double>> stages;
+ for (unsigned int i = 0; i < exposureTimes.size(); i++) {
+ stages.push_back({
+ std::chrono::microseconds(exposureTimes[i]),
+ gains[i]
+ });
+ }
+
+ std::shared_ptr<ExposureModeHelper> helper =
+ std::make_shared<ExposureModeHelper>(stages);
+
+ exposureModeHelpers_[AeExposureModeNameValueMap.at(modeName)] = helper;
+ availableExposureModes.push_back(AeExposureModeNameValueMap.at(modeName));
+ }
+ }
+
+ /*
+ * If we don't have any exposure modes in the tuning data we create an
+ * ExposureModeHelper using an empty vector of stages. This will result
+ * in the ExposureModeHelper simply driving the exposure time as high as
+ * possible before touching gain.
+ */
+ if (availableExposureModes.empty()) {
+ int32_t exposureModeId = AeExposureModeNameValueMap.at("ExposureNormal");
+ std::vector<std::pair<utils::Duration, double>> stages = { };
+
+ std::shared_ptr<ExposureModeHelper> helper =
+ std::make_shared<ExposureModeHelper>(stages);
+
+ exposureModeHelpers_[exposureModeId] = helper;
+ availableExposureModes.push_back(exposureModeId);
+ }
+
+ controls_[&controls::AeExposureMode] = ControlInfo(availableExposureModes);
+
+ return 0;
+}
+
+/**
+ * \brief Parse tuning data for AeConstraintMode and AeExposureMode controls
+ * \param[in] tuningData the YamlObject representing the tuning data
+ *
+ * This function parses tuning data to build the list of allowed values for the
+ * AeConstraintMode and AeExposureMode controls. Those tuning data must provide
+ * the data in a specific format; the Agc algorithm's tuning data should contain
+ * a dictionary called AeConstraintMode containing per-mode setting dictionaries
+ * with the key being a value from \ref controls::AeConstraintModeNameValueMap.
+ * Each mode dict may contain either a "lower" or "upper" key or both, for
+ * example:
+ *
+ * \code{.unparsed}
+ * algorithms:
+ * - Agc:
+ * AeConstraintMode:
+ * ConstraintNormal:
+ * lower:
+ * qLo: 0.98
+ * qHi: 1.0
+ * yTarget: 0.5
+ * ConstraintHighlight:
+ * lower:
+ * qLo: 0.98
+ * qHi: 1.0
+ * yTarget: 0.5
+ * upper:
+ * qLo: 0.98
+ * qHi: 1.0
+ * yTarget: 0.8
+ *
+ * \endcode
+ *
+ * For the AeExposureMode control the data should contain a dictionary called
+ * AeExposureMode containing per-mode setting dictionaries with the key being a
+ * value from \ref controls::AeExposureModeNameValueMap. Each mode dict should
+ * contain an array of exposure times with the key "exposureTime" and an array
+ * of gain values with the key "gain", in this format:
+ *
+ * \code{.unparsed}
+ * algorithms:
+ * - Agc:
+ * AeExposureMode:
+ * ExposureNormal:
+ * exposureTime: [ 100, 10000, 30000, 60000, 120000 ]
+ * gain: [ 2.0, 4.0, 6.0, 8.0, 10.0 ]
+ * ExposureShort:
+ * exposureTime: [ 100, 10000, 30000, 60000, 120000 ]
+ * gain: [ 2.0, 4.0, 6.0, 8.0, 10.0 ]
+ *
+ * \endcode
+ *
+ * \return 0 on success or a negative error code
+ */
+int AgcMeanLuminance::parseTuningData(const YamlObject &tuningData)
+{
+ int ret;
+
+ parseRelativeLuminanceTarget(tuningData);
+
+ ret = parseConstraintModes(tuningData);
+ if (ret)
+ return ret;
+
+ return parseExposureModes(tuningData);
+}
+
+/**
+ * \brief Set the ExposureModeHelper limits for this class
+ * \param[in] minExposureTime Minimum exposure time to allow
+ * \param[in] maxExposureTime Maximum ewposure time to allow
+ * \param[in] minGain Minimum gain to allow
+ * \param[in] maxGain Maximum gain to allow
+ *
+ * This function calls \ref ExposureModeHelper::setLimits() for each
+ * ExposureModeHelper that has been created for this class.
+ */
+void AgcMeanLuminance::setLimits(utils::Duration minExposureTime,
+ utils::Duration maxExposureTime,
+ double minGain, double maxGain)
+{
+ for (auto &[id, helper] : exposureModeHelpers_)
+ helper->setLimits(minExposureTime, maxExposureTime, minGain, maxGain);
+}
+
+/**
+ * \fn AgcMeanLuminance::constraintModes()
+ * \brief Get the constraint modes that have been parsed from tuning data
+ */
+
+/**
+ * \fn AgcMeanLuminance::exposureModeHelpers()
+ * \brief Get the ExposureModeHelpers that have been parsed from tuning data
+ */
+
+/**
+ * \fn AgcMeanLuminance::controls()
+ * \brief Get the controls that have been generated after parsing tuning data
+ */
+
+/**
+ * \fn AgcMeanLuminance::estimateLuminance(const double gain)
+ * \brief Estimate the luminance of an image, adjusted by a given gain
+ * \param[in] gain The gain with which to adjust the luminance estimate
+ *
+ * This function estimates the average relative luminance of the frame that
+ * would be output by the sensor if an additional \a gain was applied. It is a
+ * pure virtual function because estimation of luminance is a hardware-specific
+ * operation, which depends wholly on the format of the stats that are delivered
+ * to libcamera from the ISP. Derived classes must override this function with
+ * one that calculates the normalised mean luminance value across the entire
+ * image.
+ *
+ * \return The normalised relative luminance of the image
+ */
+
+/**
+ * \brief Estimate the initial gain needed to achieve a relative luminance
+ * target
+ * \return The calculated initial gain
+ */
+double AgcMeanLuminance::estimateInitialGain() const
+{
+ double yTarget = relativeLuminanceTarget_;
+ double yGain = 1.0;
+
+ /*
+ * To account for non-linearity caused by saturation, the value needs to
+ * be estimated in an iterative process, as multiplying by a gain will
+ * not increase the relative luminance by the same factor if some image
+ * regions are saturated.
+ */
+ for (unsigned int i = 0; i < 8; i++) {
+ double yValue = estimateLuminance(yGain);
+ double extra_gain = std::min(10.0, yTarget / (yValue + .001));
+
+ yGain *= extra_gain;
+ LOG(AgcMeanLuminance, Debug) << "Y value: " << yValue
+ << ", Y target: " << yTarget
+ << ", gives gain " << yGain;
+
+ if (utils::abs_diff(extra_gain, 1.0) < 0.01)
+ break;
+ }
+
+ return yGain;
+}
+
+/**
+ * \brief Clamp gain within the bounds of a defined constraint
+ * \param[in] constraintModeIndex The index of the constraint to adhere to
+ * \param[in] hist A histogram over which to calculate inter-quantile means
+ * \param[in] gain The gain to clamp
+ *
+ * \return The gain clamped within the constraint bounds
+ */
+double AgcMeanLuminance::constraintClampGain(uint32_t constraintModeIndex,
+ const Histogram &hist,
+ double gain)
+{
+ std::vector<AgcConstraint> &constraints = constraintModes_[constraintModeIndex];
+ for (const AgcConstraint &constraint : constraints) {
+ double newGain = constraint.yTarget * hist.bins() /
+ hist.interQuantileMean(constraint.qLo, constraint.qHi);
+
+ if (constraint.bound == AgcConstraint::Bound::Lower &&
+ newGain > gain)
+ gain = newGain;
+
+ if (constraint.bound == AgcConstraint::Bound::Upper &&
+ newGain < gain)
+ gain = newGain;
+ }
+
+ return gain;
+}
+
+/**
+ * \brief Apply a filter on the exposure value to limit the speed of changes
+ * \param[in] exposureValue The target exposure from the AGC algorithm
+ *
+ * The speed of the filter is adaptive, and will produce the target quicker
+ * during startup, or when the target exposure is within 20% of the most recent
+ * filter output.
+ *
+ * \return The filtered exposure
+ */
+utils::Duration AgcMeanLuminance::filterExposure(utils::Duration exposureValue)
+{
+ double speed = 0.2;
+
+ /* Adapt instantly if we are in startup phase. */
+ if (frameCount_ < kNumStartupFrames)
+ speed = 1.0;
+
+ /*
+ * If we are close to the desired result, go faster to avoid making
+ * multiple micro-adjustments.
+ * \todo Make this customisable?
+ */
+ if (filteredExposure_ < 1.2 * exposureValue &&
+ filteredExposure_ > 0.8 * exposureValue)
+ speed = sqrt(speed);
+
+ filteredExposure_ = speed * exposureValue +
+ filteredExposure_ * (1.0 - speed);
+
+ return filteredExposure_;
+}
+
+/**
+ * \brief Calculate the new exposure value and splut it between exposure time
+ * and gain
+ * \param[in] constraintModeIndex The index of the current constraint mode
+ * \param[in] exposureModeIndex The index of the current exposure mode
+ * \param[in] yHist A Histogram from the ISP statistics to use in constraining
+ * the calculated gain
+ * \param[in] effectiveExposureValue The EV applied to the frame from which the
+ * statistics in use derive
+ *
+ * Calculate a new exposure value to try to obtain the target. The calculated
+ * exposure value is filtered to prevent rapid changes from frame to frame, and
+ * divided into exposure time, analogue and digital gain.
+ *
+ * \return Tuple of exposure time, analogue gain, and digital gain
+ */
+std::tuple<utils::Duration, double, double>
+AgcMeanLuminance::calculateNewEv(uint32_t constraintModeIndex,
+ uint32_t exposureModeIndex,
+ const Histogram &yHist,
+ utils::Duration effectiveExposureValue)
+{
+ /*
+ * The pipeline handler should validate that we have received an allowed
+ * value for AeExposureMode.
+ */
+ std::shared_ptr<ExposureModeHelper> exposureModeHelper =
+ exposureModeHelpers_.at(exposureModeIndex);
+
+ double gain = estimateInitialGain();
+ gain = constraintClampGain(constraintModeIndex, yHist, gain);
+
+ /*
+ * We don't check whether we're already close to the target, because
+ * even if the effective exposure value is the same as the last frame's
+ * we could have switched to an exposure mode that would require a new
+ * pass through the splitExposure() function.
+ */
+
+ utils::Duration newExposureValue = effectiveExposureValue * gain;
+
+ /*
+ * We filter the exposure value to make sure changes are not too jarring
+ * from frame to frame.
+ */
+ newExposureValue = filterExposure(newExposureValue);
+
+ frameCount_++;
+ return exposureModeHelper->splitExposure(newExposureValue);
+}
+
+/**
+ * \fn AgcMeanLuminance::resetFrameCount()
+ * \brief Reset the frame counter
+ *
+ * This function resets the internal frame counter, which exists to help the
+ * algorithm decide whether it should respond instantly or not. The expectation
+ * is for derived classes to call this function before each camera start call in
+ * their configure() function.
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/agc_mean_luminance.h b/src/ipa/libipa/agc_mean_luminance.h
new file mode 100644
index 00000000..c41391cb
--- /dev/null
+++ b/src/ipa/libipa/agc_mean_luminance.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ agc_mean_luminance.h - Base class for mean luminance AGC algorithms
+ */
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <tuple>
+#include <vector>
+
+#include <libcamera/base/utils.h>
+
+#include <libcamera/controls.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "exposure_mode_helper.h"
+#include "histogram.h"
+
+namespace libcamera {
+
+namespace ipa {
+
+class AgcMeanLuminance
+{
+public:
+ AgcMeanLuminance();
+ virtual ~AgcMeanLuminance();
+
+ struct AgcConstraint {
+ enum class Bound {
+ Lower = 0,
+ Upper = 1
+ };
+ Bound bound;
+ double qLo;
+ double qHi;
+ double yTarget;
+ };
+
+ int parseTuningData(const YamlObject &tuningData);
+
+ void setLimits(utils::Duration minExposureTime, utils::Duration maxExposureTime,
+ double minGain, double maxGain);
+
+ std::map<int32_t, std::vector<AgcConstraint>> constraintModes()
+ {
+ return constraintModes_;
+ }
+
+ std::map<int32_t, std::shared_ptr<ExposureModeHelper>> exposureModeHelpers()
+ {
+ return exposureModeHelpers_;
+ }
+
+ ControlInfoMap::Map controls()
+ {
+ return controls_;
+ }
+
+ std::tuple<utils::Duration, double, double>
+ calculateNewEv(uint32_t constraintModeIndex, uint32_t exposureModeIndex,
+ const Histogram &yHist, utils::Duration effectiveExposureValue);
+
+ void resetFrameCount()
+ {
+ frameCount_ = 0;
+ }
+
+private:
+ virtual double estimateLuminance(const double gain) const = 0;
+
+ void parseRelativeLuminanceTarget(const YamlObject &tuningData);
+ void parseConstraint(const YamlObject &modeDict, int32_t id);
+ int parseConstraintModes(const YamlObject &tuningData);
+ int parseExposureModes(const YamlObject &tuningData);
+ double estimateInitialGain() const;
+ double constraintClampGain(uint32_t constraintModeIndex,
+ const Histogram &hist,
+ double gain);
+ utils::Duration filterExposure(utils::Duration exposureValue);
+
+ uint64_t frameCount_;
+ utils::Duration filteredExposure_;
+ double relativeLuminanceTarget_;
+
+ std::map<int32_t, std::vector<AgcConstraint>> constraintModes_;
+ std::map<int32_t, std::shared_ptr<ExposureModeHelper>> exposureModeHelpers_;
+ ControlInfoMap::Map controls_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/algorithm.cpp b/src/ipa/libipa/algorithm.cpp
new file mode 100644
index 00000000..201efdfd
--- /dev/null
+++ b/src/ipa/libipa/algorithm.cpp
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Ideas On Board
+ *
+ * IPA control algorithm interface
+ */
+
+#include "algorithm.h"
+
+/**
+ * \file algorithm.h
+ * \brief Algorithm common interface
+ */
+
+namespace libcamera {
+
+namespace ipa {
+
+/**
+ * \class Algorithm
+ * \brief The base class for all IPA algorithms
+ * \tparam Module The IPA module type for this class of algorithms
+ *
+ * The Algorithm class defines a standard interface for IPA algorithms
+ * compatible with the \a Module. By abstracting algorithms, it makes possible
+ * the implementation of generic code to manage algorithms regardless of their
+ * specific type.
+ *
+ * To specialize the Algorithm class template, an IPA module shall specialize
+ * the Module class template with module-specific context and configuration
+ * types, and pass the specialized Module class as the \a Module template
+ * argument.
+ */
+
+/**
+ * \typedef Algorithm::Module
+ * \brief The IPA module type for this class of algorithms
+ */
+
+/**
+ * \fn Algorithm::init()
+ * \brief Initialize the Algorithm with tuning data
+ * \param[in] context The shared IPA context
+ * \param[in] tuningData The tuning data for the algorithm
+ *
+ * This function is called once, when the IPA module is initialized, to
+ * initialize the algorithm. The \a tuningData YamlObject contains the tuning
+ * data for algorithm.
+ *
+ * \return 0 if successful, an error code otherwise
+ */
+
+/**
+ * \fn Algorithm::configure()
+ * \brief Configure the Algorithm given an IPAConfigInfo
+ * \param[in] context The shared IPA context
+ * \param[in] configInfo The IPA configuration data, received from the pipeline
+ * handler
+ *
+ * Algorithms may implement a configure operation to pre-calculate
+ * parameters prior to commencing streaming.
+ *
+ * Configuration state may be stored in the IPASessionConfiguration structure of
+ * the IPAContext.
+ *
+ * \return 0 if successful, an error code otherwise
+ */
+
+/**
+ * \fn Algorithm::queueRequest()
+ * \brief Provide control values to the algorithm
+ * \param[in] context The shared IPA context
+ * \param[in] frame The frame number to apply the control values
+ * \param[in] frameContext The current frame's context
+ * \param[in] controls The list of user controls
+ *
+ * This function is called for each request queued to the camera. It provides
+ * the controls stored in the request to the algorithm. The \a frame number
+ * is the Request sequence number and identifies the desired corresponding
+ * frame to target for the controls to take effect.
+ *
+ * Algorithms shall read the applicable controls and store their value for later
+ * use during frame processing.
+ */
+
+/**
+ * \fn Algorithm::prepare()
+ * \brief Fill the \a params buffer with ISP processing parameters for a frame
+ * \param[in] context The shared IPA context
+ * \param[in] frame The frame context sequence number
+ * \param[in] frameContext The FrameContext for this frame
+ * \param[out] params The ISP specific parameters
+ *
+ * This function is called for every frame when the camera is running before it
+ * is processed by the ISP to prepare the ISP processing parameters for that
+ * frame.
+ *
+ * Algorithms shall fill in the parameter structure fields appropriately to
+ * configure the ISP processing blocks that they are responsible for. This
+ * includes setting fields and flags that enable those processing blocks.
+ */
+
+/**
+ * \fn Algorithm::process()
+ * \brief Process ISP statistics, and run algorithm operations
+ * \param[in] context The shared IPA context
+ * \param[in] frame The frame context sequence number
+ * \param[in] frameContext The current frame's context
+ * \param[in] stats The IPA statistics and ISP results
+ * \param[out] metadata Metadata for the frame, to be filled by the algorithm
+ *
+ * This function is called while camera is running for every frame processed by
+ * the ISP, to process statistics generated from that frame by the ISP.
+ * Algorithms shall use this data to run calculations, update their state
+ * accordingly, and fill the frame metadata.
+ *
+ * Processing shall not take an undue amount of time, and any extended or
+ * computationally expensive calculations or operations must be handled
+ * asynchronously in a separate thread.
+ *
+ * Algorithms can store state in their respective IPAFrameContext structures,
+ * and reference state from the IPAFrameContext of other algorithms.
+ *
+ * \todo Historical data may be required as part of the processing.
+ * Either the previous frame, or the IPAFrameContext state of the frame
+ * that generated the statistics for this operation may be required for
+ * some advanced algorithms to prevent oscillations or support control
+ * loops correctly. Only a single IPAFrameContext is available currently,
+ * and so any data stored may represent the results of the previously
+ * completed operations.
+ *
+ * Care shall be taken to ensure the ordering of access to the information
+ * such that the algorithms use up to date state as required.
+ */
+
+/**
+ * \class AlgorithmFactory
+ * \brief Registration of Algorithm classes and creation of instances
+ * \tparam _Algorithm The algorithm class type for this factory
+ *
+ * To facilitate instantiation of Algorithm classes, the AlgorithmFactory class
+ * implements auto-registration of algorithms with the IPA Module class. Each
+ * Algorithm subclass shall register itself using the REGISTER_IPA_ALGORITHM()
+ * macro, which will create a corresponding instance of an AlgorithmFactory and
+ * register it with the IPA Module.
+ */
+
+/**
+ * \fn AlgorithmFactory::AlgorithmFactory()
+ * \brief Construct an algorithm factory
+ * \param[in] name Name of the algorithm class
+ *
+ * Creating an instance of the factory automatically registers is with the IPA
+ * Module class, enabling creation of algorithm instances through
+ * Module::createAlgorithm().
+ *
+ * The factory \a name identifies the algorithm and shall be unique.
+ */
+
+/**
+ * \fn AlgorithmFactory::create()
+ * \brief Create an instance of the Algorithm corresponding to the factory
+ * \return A pointer to a newly constructed instance of the Algorithm subclass
+ * corresponding to the factory
+ */
+
+/**
+ * \def REGISTER_IPA_ALGORITHM
+ * \brief Register an algorithm with the IPA module
+ * \param[in] algorithm Class name of Algorithm derived class to register
+ * \param[in] name Name of the algorithm
+ *
+ * Register an Algorithm subclass with the IPA module to make it available for
+ * instantiation through Module::createAlgorithm(). The \a name identifies the
+ * algorithm and must be unique across all algorithms registered for the IPA
+ * module.
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/algorithm.h b/src/ipa/libipa/algorithm.h
new file mode 100644
index 00000000..9a19dbd6
--- /dev/null
+++ b/src/ipa/libipa/algorithm.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Ideas On Board
+ *
+ * ISP control algorithm interface
+ */
+#pragma once
+
+#include <memory>
+#include <stdint.h>
+#include <string>
+
+#include <libcamera/controls.h>
+
+namespace libcamera {
+
+class YamlObject;
+
+namespace ipa {
+
+template<typename _Module>
+class Algorithm
+{
+public:
+ using Module = _Module;
+
+ virtual ~Algorithm() {}
+
+ virtual int init([[maybe_unused]] typename Module::Context &context,
+ [[maybe_unused]] const YamlObject &tuningData)
+ {
+ return 0;
+ }
+
+ virtual int configure([[maybe_unused]] typename Module::Context &context,
+ [[maybe_unused]] const typename Module::Config &configInfo)
+ {
+ return 0;
+ }
+
+ virtual void queueRequest([[maybe_unused]] typename Module::Context &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] typename Module::FrameContext &frameContext,
+ [[maybe_unused]] const ControlList &controls)
+ {
+ }
+
+ virtual void prepare([[maybe_unused]] typename Module::Context &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] typename Module::FrameContext &frameContext,
+ [[maybe_unused]] typename Module::Params *params)
+ {
+ }
+
+ virtual void process([[maybe_unused]] typename Module::Context &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] typename Module::FrameContext &frameContext,
+ [[maybe_unused]] const typename Module::Stats *stats,
+ [[maybe_unused]] ControlList &metadata)
+ {
+ }
+};
+
+template<typename _Module>
+class AlgorithmFactoryBase
+{
+public:
+ AlgorithmFactoryBase(const char *name)
+ : name_(name)
+ {
+ _Module::registerAlgorithm(this);
+ }
+
+ virtual ~AlgorithmFactoryBase() = default;
+
+ const std::string &name() const { return name_; }
+
+ virtual std::unique_ptr<Algorithm<_Module>> create() const = 0;
+
+private:
+ std::string name_;
+};
+
+template<typename _Algorithm>
+class AlgorithmFactory : public AlgorithmFactoryBase<typename _Algorithm::Module>
+{
+public:
+ AlgorithmFactory(const char *name)
+ : AlgorithmFactoryBase<typename _Algorithm::Module>(name)
+ {
+ }
+
+ ~AlgorithmFactory() = default;
+
+ std::unique_ptr<Algorithm<typename _Algorithm::Module>> create() const override
+ {
+ return std::make_unique<_Algorithm>();
+ }
+};
+
+#define REGISTER_IPA_ALGORITHM(algorithm, name) \
+static AlgorithmFactory<algorithm> global_##algorithm##Factory(name);
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/camera_sensor_helper.cpp b/src/ipa/libipa/camera_sensor_helper.cpp
new file mode 100644
index 00000000..7c66cd57
--- /dev/null
+++ b/src/ipa/libipa/camera_sensor_helper.cpp
@@ -0,0 +1,752 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Helper class that performs sensor-specific
+ * parameter computations
+ */
+#include "camera_sensor_helper.h"
+
+#include <cmath>
+#include <limits>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file camera_sensor_helper.h
+ * \brief Helper class that performs sensor-specific parameter computations
+ *
+ * Computation of sensor configuration parameters is a sensor specific
+ * operation. Each CameraHelper derived class computes the value of
+ * configuration parameters, for example the analogue gain value, using
+ * sensor-specific functions and constants.
+ *
+ * Every subclass of CameraSensorHelper shall be registered with libipa using
+ * the REGISTER_CAMERA_SENSOR_HELPER() macro.
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(CameraSensorHelper)
+
+namespace ipa {
+
+/**
+ * \class CameraSensorHelper
+ * \brief Base class for computing sensor tuning parameters using
+ * sensor-specific constants
+ *
+ * Instances derived from CameraSensorHelper class are sensor-specific.
+ * Each supported sensor will have an associated base class defined.
+ */
+
+/**
+ * \fn CameraSensorHelper::CameraSensorHelper()
+ * \brief Construct a CameraSensorHelper instance
+ *
+ * CameraSensorHelper derived class instances shall never be constructed
+ * manually but always through the CameraSensorHelperFactoryBase::create()
+ * function.
+ */
+
+/**
+ * \fn CameraSensorHelper::blackLevel()
+ * \brief Fetch the black level of the sensor
+ *
+ * This function returns the black level of the sensor scaled to a 16bit pixel
+ * width. If it is unknown an empty optional is returned.
+ *
+ * \todo Fill the blanks and add pedestal values for all supported sensors. Once
+ * done, drop the std::optional<>.
+ *
+ * Black levels are typically the result of the following phenomena:
+ * - Pedestal added by the sensor to pixel values. They are typically fixed,
+ * sometimes programmable and should be reported in datasheets (but
+ * documentation is not always available).
+ * - Dark currents and other physical effects that add charge to pixels in the
+ * absence of light. Those can depend on the integration time and the sensor
+ * die temperature, and their contribution to pixel values depend on the
+ * sensor gains.
+ *
+ * The pedestal is usually the value with the biggest contribution to the
+ * overall black level. In most cases it is either known before or in rare cases
+ * (there is not a single driver with such a control in the linux kernel) can be
+ * queried from the sensor. This function provides that fixed, known value.
+ *
+ * \return The black level of the sensor, or std::nullopt if not known
+ */
+
+/**
+ * \brief Compute gain code from the analogue gain absolute value
+ * \param[in] gain The real gain to pass
+ *
+ * This function aims to abstract the calculation of the gain letting the IPA
+ * use the real gain for its estimations.
+ *
+ * \return The gain code to pass to V4L2
+ */
+uint32_t CameraSensorHelper::gainCode(double gain) const
+{
+ if (auto *l = std::get_if<AnalogueGainLinear>(&gain_)) {
+ ASSERT(l->m0 == 0 || l->m1 == 0);
+
+ return (l->c0 - l->c1 * gain) /
+ (l->m1 * gain - l->m0);
+ } else if (auto *e = std::get_if<AnalogueGainExp>(&gain_)) {
+ ASSERT(e->a != 0 && e->m != 0);
+
+ return std::log2(gain / e->a) / e->m;
+ } else {
+ ASSERT(false);
+ return 0;
+ }
+}
+
+/**
+ * \brief Compute the real gain from the V4L2 subdev control gain code
+ * \param[in] gainCode The V4L2 subdev control gain
+ *
+ * This function aims to abstract the calculation of the gain letting the IPA
+ * use the real gain for its estimations. It is the counterpart of the function
+ * CameraSensorHelper::gainCode.
+ *
+ * \return The real gain
+ */
+double CameraSensorHelper::gain(uint32_t gainCode) const
+{
+ double gain = static_cast<double>(gainCode);
+
+ if (auto *l = std::get_if<AnalogueGainLinear>(&gain_)) {
+ ASSERT(l->m0 == 0 || l->m1 == 0);
+
+ return (l->m0 * gain + l->c0) /
+ (l->m1 * gain + l->c1);
+ } else if (auto *e = std::get_if<AnalogueGainExp>(&gain_)) {
+ ASSERT(e->a != 0 && e->m != 0);
+
+ return e->a * std::exp2(e->m * gain);
+ } else {
+ ASSERT(false);
+ return 0.0;
+ }
+}
+
+/**
+ * \struct CameraSensorHelper::AnalogueGainLinear
+ * \brief Analogue gain constants for the linear gain model
+ *
+ * The relationship between the integer gain parameter and the resulting gain
+ * multiplier is given by the following equation:
+ *
+ * \f$gain=\frac{m0x+c0}{m1x+c1}\f$
+ *
+ * Where 'x' is the gain control parameter, and m0, m1, c0 and c1 are
+ * image-sensor-specific constants of the sensor.
+ * These constants are static parameters, and for any given image sensor either
+ * m0 or m1 shall be zero.
+ *
+ * The full Gain equation therefore reduces to either:
+ *
+ * \f$gain=\frac{c0}{m1x+c1}\f$ or \f$\frac{m0x+c0}{c1}\f$
+ *
+ * \var CameraSensorHelper::AnalogueGainLinear::m0
+ * \brief Constant used in the linear gain coding/decoding
+ *
+ * \note Either m0 or m1 shall be zero.
+ *
+ * \var CameraSensorHelper::AnalogueGainLinear::c0
+ * \brief Constant used in the linear gain coding/decoding
+ *
+ * \var CameraSensorHelper::AnalogueGainLinear::m1
+ * \brief Constant used in the linear gain coding/decoding
+ *
+ * \note Either m0 or m1 shall be zero.
+ *
+ * \var CameraSensorHelper::AnalogueGainLinear::c1
+ * \brief Constant used in the linear gain coding/decoding
+ */
+
+/**
+ * \struct CameraSensorHelper::AnalogueGainExp
+ * \brief Analogue gain constants for the exponential gain model
+ *
+ * The relationship between the integer gain parameter and the resulting gain
+ * multiplier is given by the following equation:
+ *
+ * \f$gain = a \cdot 2^{m \cdot x}\f$
+ *
+ * Where 'x' is the gain control parameter, and 'a' and 'm' are image
+ * sensor-specific constants.
+ *
+ * This is a subset of the MIPI CCS exponential gain model with the linear
+ * factor 'a' being a constant, but with the exponent being configurable
+ * through the 'm' coefficient.
+ *
+ * When the gain is expressed in dB, 'a' is equal to 1 and 'm' to
+ * \f$log_{2}{10^{\frac{1}{20}}}\f$.
+ *
+ * \var CameraSensorHelper::AnalogueGainExp::a
+ * \brief Constant used in the exponential gain coding/decoding
+ *
+ * \var CameraSensorHelper::AnalogueGainExp::m
+ * \brief Constant used in the exponential gain coding/decoding
+ */
+
+/**
+ * \var CameraSensorHelper::blackLevel_
+ * \brief The black level of the sensor
+ * \sa CameraSensorHelper::blackLevel()
+ */
+
+/**
+ * \var CameraSensorHelper::gain_
+ * \brief The analogue gain parameters used for calculation
+ *
+ * The analogue gain is calculated through a formula, and its parameters are
+ * sensor specific. Use this variable to store the values at init time.
+ */
+
+/**
+ * \class CameraSensorHelperFactoryBase
+ * \brief Base class for camera sensor helper factories
+ *
+ * The CameraSensorHelperFactoryBase class is the base of all specializations of
+ * the CameraSensorHelperFactory class template. It implements the factory
+ * registration, maintains a registry of factories, and provides access to the
+ * registered factories.
+ */
+
+/**
+ * \brief Construct a camera sensor helper factory base
+ * \param[in] name Name of the camera sensor helper class
+ *
+ * Creating an instance of the factory base registers it with the global list of
+ * factories, accessible through the factories() function.
+ *
+ * The factory \a name is used to look up factories and shall be unique.
+ */
+CameraSensorHelperFactoryBase::CameraSensorHelperFactoryBase(const std::string name)
+ : name_(name)
+{
+ registerType(this);
+}
+
+/**
+ * \brief Create an instance of the CameraSensorHelper corresponding to
+ * a named factory
+ * \param[in] name Name of the factory
+ *
+ * \return A unique pointer to a new instance of the CameraSensorHelper subclass
+ * corresponding to the named factory or a null pointer if no such factory
+ * exists
+ */
+std::unique_ptr<CameraSensorHelper> CameraSensorHelperFactoryBase::create(const std::string &name)
+{
+ const std::vector<CameraSensorHelperFactoryBase *> &factories =
+ CameraSensorHelperFactoryBase::factories();
+
+ for (const CameraSensorHelperFactoryBase *factory : factories) {
+ if (name != factory->name_)
+ continue;
+
+ return factory->createInstance();
+ }
+
+ return nullptr;
+}
+
+/**
+ * \brief Add a camera sensor helper class to the registry
+ * \param[in] factory Factory to use to construct the camera sensor helper
+ *
+ * The caller is responsible to guarantee the uniqueness of the camera sensor
+ * helper name.
+ */
+void CameraSensorHelperFactoryBase::registerType(CameraSensorHelperFactoryBase *factory)
+{
+ std::vector<CameraSensorHelperFactoryBase *> &factories =
+ CameraSensorHelperFactoryBase::factories();
+
+ factories.push_back(factory);
+}
+
+/**
+ * \brief Retrieve the list of all camera sensor helper factories
+ * \return The list of camera sensor helper factories
+ */
+std::vector<CameraSensorHelperFactoryBase *> &CameraSensorHelperFactoryBase::factories()
+{
+ /*
+ * The static factories map is defined inside the function to ensure
+ * it gets initialized on first use, without any dependency on link
+ * order.
+ */
+ static std::vector<CameraSensorHelperFactoryBase *> factories;
+ return factories;
+}
+
+/**
+ * \class CameraSensorHelperFactory
+ * \brief Registration of CameraSensorHelperFactory classes and creation of instances
+ * \tparam _Helper The camera sensor helper class type for this factory
+ *
+ * To facilitate discovery and instantiation of CameraSensorHelper classes, the
+ * CameraSensorHelperFactory class implements auto-registration of camera sensor
+ * helpers. Each CameraSensorHelper subclass shall register itself using the
+ * REGISTER_CAMERA_SENSOR_HELPER() macro, which will create a corresponding
+ * instance of a CameraSensorHelperFactory subclass and register it with the
+ * static list of factories.
+ */
+
+/**
+ * \fn CameraSensorHelperFactory::CameraSensorHelperFactory(const char *name)
+ * \brief Construct a camera sensor helper factory
+ * \param[in] name Name of the camera sensor helper class
+ *
+ * Creating an instance of the factory registers it with the global list of
+ * factories, accessible through the CameraSensorHelperFactoryBase::factories()
+ * function.
+ *
+ * The factory \a name is used to look up factories and shall be unique.
+ */
+
+/**
+ * \fn CameraSensorHelperFactory::createInstance() const
+ * \brief Create an instance of the CameraSensorHelper corresponding to the
+ * factory
+ *
+ * \return A unique pointer to a newly constructed instance of the
+ * CameraSensorHelper subclass corresponding to the factory
+ */
+
+/**
+ * \def REGISTER_CAMERA_SENSOR_HELPER
+ * \brief Register a camera sensor helper with the camera sensor helper factory
+ * \param[in] name Sensor model name used to register the class
+ * \param[in] helper Class name of CameraSensorHelper derived class to register
+ *
+ * Register a CameraSensorHelper subclass with the factory and make it available
+ * to try and match sensors.
+ */
+
+/* -----------------------------------------------------------------------------
+ * Sensor-specific subclasses
+ */
+
+#ifndef __DOXYGEN__
+
+/*
+ * Helper function to compute the m parameter of the exponential gain model
+ * when the gain code is expressed in dB.
+ */
+static constexpr double expGainDb(double step)
+{
+ constexpr double log2_10 = 3.321928094887362;
+
+ /*
+ * The gain code is expressed in step * dB (e.g. in 0.1 dB steps):
+ *
+ * G_code = G_dB/step = 20/step*log10(G_linear)
+ *
+ * Inverting the formula, we get
+ *
+ * G_linear = 10^(step/20*G_code) = 2^(log2(10)*step/20*G_code)
+ */
+ return log2_10 * step / 20;
+}
+
+class CameraSensorHelperAr0144 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperAr0144()
+ {
+ /* Power-on default value: 168 at 12bits. */
+ blackLevel_ = 2688;
+ }
+
+ uint32_t gainCode(double gain) const override
+ {
+ /* The recommended minimum gain is 1.6842 to avoid artifacts. */
+ gain = std::clamp(gain, 1.0 / (1.0 - 13.0 / 32.0), 18.45);
+
+ /*
+ * The analogue gain is made of a coarse exponential gain in
+ * the range [2^0, 2^4] and a fine inversely linear gain in the
+ * range [1.0, 2.0[. There is an additional fixed 1.153125
+ * multiplier when the coarse gain reaches 2^2.
+ */
+
+ if (gain > 4.0)
+ gain /= 1.153125;
+
+ unsigned int coarse = std::log2(gain);
+ unsigned int fine = (1 - (1 << coarse) / gain) * 32;
+
+ /* The fine gain rounding depends on the coarse gain. */
+ if (coarse == 1 || coarse == 3)
+ fine &= ~1;
+ else if (coarse == 4)
+ fine &= ~3;
+
+ return (coarse << 4) | (fine & 0xf);
+ }
+
+ double gain(uint32_t gainCode) const override
+ {
+ unsigned int coarse = gainCode >> 4;
+ unsigned int fine = gainCode & 0xf;
+ unsigned int d1;
+ double d2, m;
+
+ switch (coarse) {
+ default:
+ case 0:
+ d1 = 1;
+ d2 = 32.0;
+ m = 1.0;
+ break;
+ case 1:
+ d1 = 2;
+ d2 = 16.0;
+ m = 1.0;
+ break;
+ case 2:
+ d1 = 1;
+ d2 = 32.0;
+ m = 1.153125;
+ break;
+ case 3:
+ d1 = 2;
+ d2 = 16.0;
+ m = 1.153125;
+ break;
+ case 4:
+ d1 = 4;
+ d2 = 8.0;
+ m = 1.153125;
+ break;
+ }
+
+ /*
+ * With infinite precision, the calculated gain would be exact,
+ * and the reverse conversion with gainCode() would produce the
+ * same gain code. In the real world, rounding errors may cause
+ * the calculated gain to be lower by an amount negligible for
+ * all purposes, except for the reverse conversion. Converting
+ * the gain to a gain code could then return the quantized value
+ * just lower than the original gain code. To avoid this, tests
+ * showed that adding the machine epsilon to the multiplier m is
+ * sufficient.
+ */
+ m += std::numeric_limits<decltype(m)>::epsilon();
+
+ return m * (1 << coarse) / (1.0 - (fine / d1) / d2);
+ }
+
+private:
+ static constexpr double kStep_ = 16;
+};
+REGISTER_CAMERA_SENSOR_HELPER("ar0144", CameraSensorHelperAr0144)
+
+class CameraSensorHelperAr0521 : public CameraSensorHelper
+{
+public:
+ uint32_t gainCode(double gain) const override
+ {
+ gain = std::clamp(gain, 1.0, 15.5);
+ unsigned int coarse = std::log2(gain);
+ unsigned int fine = (gain / (1 << coarse) - 1) * kStep_;
+
+ return (coarse << 4) | (fine & 0xf);
+ }
+
+ double gain(uint32_t gainCode) const override
+ {
+ unsigned int coarse = gainCode >> 4;
+ unsigned int fine = gainCode & 0xf;
+
+ return (1 << coarse) * (1 + fine / kStep_);
+ }
+
+private:
+ static constexpr double kStep_ = 16;
+};
+REGISTER_CAMERA_SENSOR_HELPER("ar0521", CameraSensorHelperAr0521)
+
+class CameraSensorHelperGc05a2 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperGc05a2()
+ {
+ /* From datasheet: 64 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 100, 0, 0, 1024 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("gc05a2", CameraSensorHelperGc05a2)
+
+class CameraSensorHelperGc08a3 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperGc08a3()
+ {
+ /* From datasheet: 64 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 100, 0, 0, 1024 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("gc08a3", CameraSensorHelperGc08a3)
+
+class CameraSensorHelperImx214 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx214()
+ {
+ /* From datasheet: 64 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 0, 512, -1, 512 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx214", CameraSensorHelperImx214)
+
+class CameraSensorHelperImx219 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx219()
+ {
+ /* From datasheet: 64 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 0, 256, -1, 256 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx219", CameraSensorHelperImx219)
+
+class CameraSensorHelperImx258 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx258()
+ {
+ /* From datasheet: 0x40 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 0, 512, -1, 512 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx258", CameraSensorHelperImx258)
+
+class CameraSensorHelperImx283 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx283()
+ {
+ /* From datasheet: 0x32 at 10bits. */
+ blackLevel_ = 3200;
+ gain_ = AnalogueGainLinear{ 0, 2048, -1, 2048 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx283", CameraSensorHelperImx283)
+
+class CameraSensorHelperImx290 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx290()
+ {
+ /* From datasheet: 0xf0 at 12bits. */
+ blackLevel_ = 3840;
+ gain_ = AnalogueGainExp{ 1.0, expGainDb(0.3) };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx290", CameraSensorHelperImx290)
+
+class CameraSensorHelperImx296 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx296()
+ {
+ gain_ = AnalogueGainExp{ 1.0, expGainDb(0.1) };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx296", CameraSensorHelperImx296)
+
+class CameraSensorHelperImx327 : public CameraSensorHelperImx290
+{
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx327", CameraSensorHelperImx327)
+
+class CameraSensorHelperImx335 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx335()
+ {
+ /* From datasheet: 0x32 at 10bits. */
+ blackLevel_ = 3200;
+ gain_ = AnalogueGainExp{ 1.0, expGainDb(0.3) };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx335", CameraSensorHelperImx335)
+
+class CameraSensorHelperImx415 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx415()
+ {
+ gain_ = AnalogueGainExp{ 1.0, expGainDb(0.3) };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx415", CameraSensorHelperImx415)
+
+class CameraSensorHelperImx462 : public CameraSensorHelperImx290
+{
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx462", CameraSensorHelperImx462)
+
+class CameraSensorHelperImx477 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperImx477()
+ {
+ gain_ = AnalogueGainLinear{ 0, 1024, -1, 1024 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("imx477", CameraSensorHelperImx477)
+
+class CameraSensorHelperOv2685 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv2685()
+ {
+ /*
+ * The Sensor Manual doesn't appear to document the gain model.
+ * This has been validated with some empirical testing only.
+ */
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov2685", CameraSensorHelperOv2685)
+
+class CameraSensorHelperOv2740 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv2740()
+ {
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov2740", CameraSensorHelperOv2740)
+
+class CameraSensorHelperOv4689 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv4689()
+ {
+ /* From datasheet: 0x40 at 12bits. */
+ blackLevel_ = 1024;
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov4689", CameraSensorHelperOv4689)
+
+class CameraSensorHelperOv5640 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv5640()
+ {
+ /* From datasheet: 0x10 at 10bits. */
+ blackLevel_ = 1024;
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 16 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov5640", CameraSensorHelperOv5640)
+
+class CameraSensorHelperOv5647 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv5647()
+ {
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 16 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov5647", CameraSensorHelperOv5647)
+
+class CameraSensorHelperOv5670 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv5670()
+ {
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov5670", CameraSensorHelperOv5670)
+
+class CameraSensorHelperOv5675 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv5675()
+ {
+ /* From Linux kernel driver: 0x40 at 10bits. */
+ blackLevel_ = 4096;
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov5675", CameraSensorHelperOv5675)
+
+class CameraSensorHelperOv5693 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv5693()
+ {
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 16 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov5693", CameraSensorHelperOv5693)
+
+class CameraSensorHelperOv64a40 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv64a40()
+ {
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov64a40", CameraSensorHelperOv64a40)
+
+class CameraSensorHelperOv8858 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv8858()
+ {
+ /*
+ * \todo Validate the selected 1/128 step value as it differs
+ * from what the sensor manual describes.
+ *
+ * See: https://patchwork.linuxtv.org/project/linux-media/patch/20221106171129.166892-2-nicholas@rothemail.net/#142267
+ */
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov8858", CameraSensorHelperOv8858)
+
+class CameraSensorHelperOv8865 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv8865()
+ {
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov8865", CameraSensorHelperOv8865)
+
+class CameraSensorHelperOv13858 : public CameraSensorHelper
+{
+public:
+ CameraSensorHelperOv13858()
+ {
+ gain_ = AnalogueGainLinear{ 1, 0, 0, 128 };
+ }
+};
+REGISTER_CAMERA_SENSOR_HELPER("ov13858", CameraSensorHelperOv13858)
+
+#endif /* __DOXYGEN__ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/camera_sensor_helper.h b/src/ipa/libipa/camera_sensor_helper.h
new file mode 100644
index 00000000..a9300a64
--- /dev/null
+++ b/src/ipa/libipa/camera_sensor_helper.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Helper class that performs sensor-specific parameter computations
+ */
+
+#pragma once
+
+#include <memory>
+#include <optional>
+#include <stdint.h>
+#include <string>
+#include <variant>
+#include <vector>
+
+#include <libcamera/base/class.h>
+
+namespace libcamera {
+
+namespace ipa {
+
+class CameraSensorHelper
+{
+public:
+ CameraSensorHelper() = default;
+ virtual ~CameraSensorHelper() = default;
+
+ std::optional<int16_t> blackLevel() const { return blackLevel_; }
+ virtual uint32_t gainCode(double gain) const;
+ virtual double gain(uint32_t gainCode) const;
+
+protected:
+ struct AnalogueGainLinear {
+ int16_t m0;
+ int16_t c0;
+ int16_t m1;
+ int16_t c1;
+ };
+
+ struct AnalogueGainExp {
+ double a;
+ double m;
+ };
+
+ std::optional<int16_t> blackLevel_;
+ std::variant<std::monostate, AnalogueGainLinear, AnalogueGainExp> gain_;
+
+private:
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraSensorHelper)
+};
+
+class CameraSensorHelperFactoryBase
+{
+public:
+ CameraSensorHelperFactoryBase(const std::string name);
+ virtual ~CameraSensorHelperFactoryBase() = default;
+
+ static std::unique_ptr<CameraSensorHelper> create(const std::string &name);
+
+ static std::vector<CameraSensorHelperFactoryBase *> &factories();
+
+private:
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraSensorHelperFactoryBase)
+
+ static void registerType(CameraSensorHelperFactoryBase *factory);
+
+ virtual std::unique_ptr<CameraSensorHelper> createInstance() const = 0;
+
+ std::string name_;
+};
+
+template<typename _Helper>
+class CameraSensorHelperFactory final : public CameraSensorHelperFactoryBase
+{
+public:
+ CameraSensorHelperFactory(const char *name)
+ : CameraSensorHelperFactoryBase(name)
+ {
+ }
+
+private:
+ std::unique_ptr<CameraSensorHelper> createInstance() const override
+ {
+ return std::make_unique<_Helper>();
+ }
+};
+
+#define REGISTER_CAMERA_SENSOR_HELPER(name, helper) \
+static CameraSensorHelperFactory<helper> global_##helper##Factory(name);
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/colours.cpp b/src/ipa/libipa/colours.cpp
new file mode 100644
index 00000000..97124cf4
--- /dev/null
+++ b/src/ipa/libipa/colours.cpp
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * libipa miscellaneous colour helpers
+ */
+
+#include "colours.h"
+
+#include <algorithm>
+#include <cmath>
+
+namespace libcamera {
+
+namespace ipa {
+
+/**
+ * \file colours.h
+ * \brief Functions to reduce code duplication between IPA modules
+ */
+
+/**
+ * \brief Estimate luminance from RGB values following ITU-R BT.601
+ * \param[in] rgb The RGB value
+ *
+ * This function estimates a luminance value from a triplet of Red, Green and
+ * Blue values, following the formula defined by ITU-R Recommendation BT.601-7
+ * which can be found at https://www.itu.int/rec/R-REC-BT.601
+ *
+ * \return The estimated luminance value
+ */
+double rec601LuminanceFromRGB(const RGB<double> &rgb)
+{
+ static const Vector<double, 3> rgb2y{{
+ 0.299, 0.587, 0.114
+ }};
+
+ return rgb.dot(rgb2y);
+}
+
+/**
+ * \brief Estimate correlated colour temperature from RGB color space input
+ * \param[in] rgb The RGB value
+ *
+ * This function estimates the correlated color temperature RGB color space
+ * input. In physics and color science, the Planckian locus or black body locus
+ * is the path or locus that the color of an incandescent black body would take
+ * in a particular chromaticity space as the black body temperature changes.
+ *
+ * If a narrow range of color temperatures is considered (those encapsulating
+ * daylight being the most practical case) one can approximate the Planckian
+ * locus in order to calculate the CCT in terms of chromaticity coordinates.
+ *
+ * More detailed information can be found in:
+ * https://en.wikipedia.org/wiki/Color_temperature#Approximation
+ *
+ * \return The estimated color temperature
+ */
+uint32_t estimateCCT(const RGB<double> &rgb)
+{
+ /*
+ * Convert the RGB values to CIE tristimulus values (XYZ) and divide by
+ * the sum of X, Y and Z to calculate the CIE xy chromaticity.
+ */
+ static const Matrix<double, 3, 3> rgb2xyz({
+ -0.14282, 1.54924, -0.95641,
+ -0.32466, 1.57837, -0.73191,
+ -0.68202, 0.77073, 0.56332
+ });
+
+ Vector<double, 3> xyz = rgb2xyz * rgb;
+ xyz /= xyz.sum();
+
+ /* Calculate CCT */
+ double n = (xyz.x() - 0.3320) / (0.1858 - xyz.y());
+ return 449 * n * n * n + 3525 * n * n + 6823.3 * n + 5520.33;
+}
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/colours.h b/src/ipa/libipa/colours.h
new file mode 100644
index 00000000..fa6a8b57
--- /dev/null
+++ b/src/ipa/libipa/colours.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * libipa miscellaneous colour helpers
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include "vector.h"
+
+namespace libcamera {
+
+namespace ipa {
+
+double rec601LuminanceFromRGB(const RGB<double> &rgb);
+uint32_t estimateCCT(const RGB<double> &rgb);
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/exposure_mode_helper.cpp b/src/ipa/libipa/exposure_mode_helper.cpp
new file mode 100644
index 00000000..f235316d
--- /dev/null
+++ b/src/ipa/libipa/exposure_mode_helper.cpp
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class that performs computations relating to exposure
+ */
+#include "exposure_mode_helper.h"
+
+#include <algorithm>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file exposure_mode_helper.h
+ * \brief Helper class that performs computations relating to exposure
+ *
+ * AEGC algorithms have a need to split exposure between exposure time, analogue
+ * and digital gain. Multiple implementations do so based on paired stages of
+ * exposure time and gain limits; provide a helper to avoid duplicating the code.
+ */
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(ExposureModeHelper)
+
+namespace ipa {
+
+/**
+ * \class ExposureModeHelper
+ * \brief Class for splitting exposure into exposure time and total gain
+ *
+ * The ExposureModeHelper class provides a standard interface through which an
+ * AEGC algorithm can divide exposure between exposure time and gain. It is
+ * configured with a set of exposure time and gain pairs and works by initially
+ * fixing gain at 1.0 and increasing exposure time up to the exposure time value
+ * from the first pair in the set in an attempt to meet the required exposure
+ * value.
+ *
+ * If the required exposure is not achievable by the first exposure time value
+ * alone it ramps gain up to the value from the first pair in the set. If the
+ * required exposure is still not met it then allows exposure time to ramp up to
+ * the exposure time value from the second pair in the set, and continues in this
+ * vein until either the required exposure time is met, or else the hardware's
+ * exposure time or gain limits are reached.
+ *
+ * This method allows users to strike a balance between a well-exposed image and
+ * an acceptable frame-rate, as opposed to simply maximising exposure time
+ * followed by gain. The same helpers can be used to perform the latter
+ * operation if needed by passing an empty set of pairs to the initialisation
+ * function.
+ *
+ * The gain values may exceed a camera sensor's analogue gain limits if either
+ * it or the IPA is also capable of digital gain. The configure() function must
+ * be called with the hardware's limits to inform the helper of those
+ * constraints. Any gain that is needed will be applied as analogue gain first
+ * until the hardware's limit is reached, following which digital gain will be
+ * used.
+ */
+
+/**
+ * \brief Construct an ExposureModeHelper instance
+ * \param[in] stages The vector of paired exposure time and gain limits
+ *
+ * The input stages are exposure time and _total_ gain pairs; the gain
+ * encompasses both analogue and digital gain.
+ *
+ * The vector of stages may be empty. In that case, the helper will simply use
+ * the runtime limits set through setLimits() instead.
+ */
+ExposureModeHelper::ExposureModeHelper(const Span<std::pair<utils::Duration, double>> stages)
+{
+ minExposureTime_ = 0us;
+ maxExposureTime_ = 0us;
+ minGain_ = 0;
+ maxGain_ = 0;
+
+ for (const auto &[s, g] : stages) {
+ exposureTimes_.push_back(s);
+ gains_.push_back(g);
+ }
+}
+
+/**
+ * \brief Set the exposure time and gain limits
+ * \param[in] minExposureTime The minimum exposure time supported
+ * \param[in] maxExposureTime The maximum exposure time supported
+ * \param[in] minGain The minimum analogue gain supported
+ * \param[in] maxGain The maximum analogue gain supported
+ *
+ * This function configures the exposure time and analogue gain limits that need
+ * to be adhered to as the helper divides up exposure. Note that this function
+ * *must* be called whenever those limits change and before splitExposure() is
+ * used.
+ *
+ * If the algorithm using the helpers needs to indicate that either exposure time
+ * or analogue gain or both should be fixed it can do so by setting both the
+ * minima and maxima to the same value.
+ */
+void ExposureModeHelper::setLimits(utils::Duration minExposureTime,
+ utils::Duration maxExposureTime,
+ double minGain, double maxGain)
+{
+ minExposureTime_ = minExposureTime;
+ maxExposureTime_ = maxExposureTime;
+ minGain_ = minGain;
+ maxGain_ = maxGain;
+}
+
+utils::Duration ExposureModeHelper::clampExposureTime(utils::Duration exposureTime) const
+{
+ return std::clamp(exposureTime, minExposureTime_, maxExposureTime_);
+}
+
+double ExposureModeHelper::clampGain(double gain) const
+{
+ return std::clamp(gain, minGain_, maxGain_);
+}
+
+/**
+ * \brief Split exposure into exposure time and gain
+ * \param[in] exposure Exposure value
+ *
+ * This function divides a given exposure into exposure time, analogue and
+ * digital gain by iterating through stages of exposure time and gain limits.
+ * At each stage the current stage's exposure time limit is multiplied by the
+ * previous stage's gain limit (or 1.0 initially) to see if the combination of
+ * the two can meet the required exposure. If they cannot then the current
+ * stage's exposure time limit is multiplied by the same stage's gain limit to
+ * see if that combination can meet the required exposure time. If they cannot
+ * then the function moves to consider the next stage.
+ *
+ * When a combination of exposure time and gain _stage_ limits are found that
+ * are sufficient to meet the required exposure, the function attempts to reduce
+ * exposure time as much as possible whilst fixing gain and still meeting the
+ * exposure. If a _runtime_ limit prevents exposure time from being lowered
+ * enough to meet the exposure with gain fixed at the stage limit, gain is also
+ * lowered to compensate.
+ *
+ * Once the exposure time and gain values are ascertained, gain is assigned as
+ * analogue gain as much as possible, with digital gain only in use if the
+ * maximum analogue gain runtime limit is unable to accommodate the exposure
+ * value.
+ *
+ * If no combination of exposure time and gain limits is found that meets the
+ * required exposure, the helper falls-back to simply maximising the exposure
+ * time first, followed by analogue gain, followed by digital gain.
+ *
+ * \return Tuple of exposure time, analogue gain, and digital gain
+ */
+std::tuple<utils::Duration, double, double>
+ExposureModeHelper::splitExposure(utils::Duration exposure) const
+{
+ ASSERT(maxExposureTime_);
+ ASSERT(maxGain_);
+
+ bool gainFixed = minGain_ == maxGain_;
+ bool exposureTimeFixed = minExposureTime_ == maxExposureTime_;
+
+ /*
+ * There's no point entering the loop if we cannot change either gain
+ * nor exposure time anyway.
+ */
+ if (exposureTimeFixed && gainFixed)
+ return { minExposureTime_, minGain_, exposure / (minExposureTime_ * minGain_) };
+
+ utils::Duration exposureTime;
+ double stageGain = 1.0;
+ double gain;
+
+ for (unsigned int stage = 0; stage < gains_.size(); stage++) {
+ double lastStageGain = stage == 0 ? 1.0 : clampGain(gains_[stage - 1]);
+ utils::Duration stageExposureTime = clampExposureTime(exposureTimes_[stage]);
+ stageGain = clampGain(gains_[stage]);
+
+ /*
+ * We perform the clamping on both exposure time and gain in
+ * case the helper has had limits set that prevent those values
+ * being lowered beyond a certain minimum...this can happen at
+ * runtime for various reasons and so would not be known when
+ * the stage limits are initialised.
+ */
+
+ if (stageExposureTime * lastStageGain >= exposure) {
+ exposureTime = clampExposureTime(exposure / clampGain(lastStageGain));
+ gain = clampGain(exposure / exposureTime);
+
+ return { exposureTime, gain, exposure / (exposureTime * gain) };
+ }
+
+ if (stageExposureTime * stageGain >= exposure) {
+ exposureTime = clampExposureTime(exposure / clampGain(stageGain));
+ gain = clampGain(exposure / exposureTime);
+
+ return { exposureTime, gain, exposure / (exposureTime * gain) };
+ }
+ }
+
+ /*
+ * From here on all we can do is max out the exposure time, followed by
+ * the analogue gain. If we still haven't achieved the target we send
+ * the rest of the exposure time to digital gain. If we were given no
+ * stages to use then the default stageGain of 1.0 is used so that
+ * exposure time is maxed before gain is touched at all.
+ */
+ exposureTime = clampExposureTime(exposure / clampGain(stageGain));
+ gain = clampGain(exposure / exposureTime);
+
+ return { exposureTime, gain, exposure / (exposureTime * gain) };
+}
+
+/**
+ * \fn ExposureModeHelper::minExposureTime()
+ * \brief Retrieve the configured minimum exposure time limit set through
+ * setLimits()
+ * \return The minExposureTime_ value
+ */
+
+/**
+ * \fn ExposureModeHelper::maxExposureTime()
+ * \brief Retrieve the configured maximum exposure time set through setLimits()
+ * \return The maxExposureTime_ value
+ */
+
+/**
+ * \fn ExposureModeHelper::minGain()
+ * \brief Retrieve the configured minimum gain set through setLimits()
+ * \return The minGain_ value
+ */
+
+/**
+ * \fn ExposureModeHelper::maxGain()
+ * \brief Retrieve the configured maximum gain set through setLimits()
+ * \return The maxGain_ value
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/exposure_mode_helper.h b/src/ipa/libipa/exposure_mode_helper.h
new file mode 100644
index 00000000..c5be1b67
--- /dev/null
+++ b/src/ipa/libipa/exposure_mode_helper.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class that performs computations relating to exposure
+ */
+
+#pragma once
+
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include <libcamera/base/span.h>
+#include <libcamera/base/utils.h>
+
+namespace libcamera {
+
+namespace ipa {
+
+class ExposureModeHelper
+{
+public:
+ ExposureModeHelper(const Span<std::pair<utils::Duration, double>> stages);
+ ~ExposureModeHelper() = default;
+
+ void setLimits(utils::Duration minExposureTime, utils::Duration maxExposureTime,
+ double minGain, double maxGain);
+
+ std::tuple<utils::Duration, double, double>
+ splitExposure(utils::Duration exposure) const;
+
+ utils::Duration minExposureTime() const { return minExposureTime_; }
+ utils::Duration maxExposureTime() const { return maxExposureTime_; }
+ double minGain() const { return minGain_; }
+ double maxGain() const { return maxGain_; }
+
+private:
+ utils::Duration clampExposureTime(utils::Duration exposureTime) const;
+ double clampGain(double gain) const;
+
+ std::vector<utils::Duration> exposureTimes_;
+ std::vector<double> gains_;
+
+ utils::Duration minExposureTime_;
+ utils::Duration maxExposureTime_;
+ double minGain_;
+ double maxGain_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/fc_queue.cpp b/src/ipa/libipa/fc_queue.cpp
new file mode 100644
index 00000000..0365e919
--- /dev/null
+++ b/src/ipa/libipa/fc_queue.cpp
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * IPA Frame context queue
+ */
+
+#include "fc_queue.h"
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(FCQueue)
+
+namespace ipa {
+
+/**
+ * \file fc_queue.h
+ * \brief Queue of per-frame contexts
+ */
+
+/**
+ * \struct FrameContext
+ * \brief Context for a frame
+ *
+ * The frame context stores data specific to a single frame processed by the
+ * IPA module. Each frame processed by the IPA module has a context associated
+ * with it, accessible through the Frame Context Queue.
+ *
+ * Fields in the frame context should reflect values and controls associated
+ * with the specific frame as requested by the application, and as configured by
+ * the hardware. Fields can be read by algorithms to determine if they should
+ * update any specific action for this frame, and finally to update the metadata
+ * control lists when the frame is fully completed.
+ *
+ * \var FrameContext::frame
+ * \brief The frame number
+ */
+
+/**
+ * \class FCQueue
+ * \brief A support class for managing FrameContext instances in IPA modules
+ * \tparam FrameContext The IPA module-specific FrameContext derived class type
+ *
+ * Along with the Module and Algorithm classes, the frame context queue is a
+ * core component of the libipa infrastructure. It stores per-frame contexts
+ * used by the Algorithm operations. By centralizing the lifetime management of
+ * the contexts and implementing safeguards against underflows and overflows, it
+ * simplifies IPA modules and improves their reliability.
+ *
+ * The queue references frame contexts by a monotonically increasing sequence
+ * number. The FCQueue design assumes that this number matches both the sequence
+ * number of the corresponding frame, as generated by the camera sensor, and the
+ * sequence number of the request. This allows IPA modules to obtain the frame
+ * context from any location where a request or a frame is available.
+ *
+ * A frame context normally begins its lifetime when the corresponding request
+ * is queued, way before the frame is captured by the camera sensor. IPA modules
+ * allocate the context from the queue at that point, calling alloc() using the
+ * request number. The queue initializes the context, and the IPA module then
+ * populates it with data from the request. The context can be later retrieved
+ * with a call to get(), typically when the IPA module is requested to provide
+ * sensor or ISP parameters or receives statistics for a frame. The frame number
+ * is used at that point to identify the context.
+ *
+ * If an application fails to queue requests to the camera fast enough, frames
+ * may be produced by the camera sensor and processed by the IPA module without
+ * a corresponding request having been queued to the IPA module. This creates an
+ * underrun condition, where the IPA module will try to get a frame context that
+ * hasn't been allocated. In this case, the get() function will allocate and
+ * initialize a context for the frame, and log a message. Algorithms will not
+ * apply the controls associated with the late request, but should otherwise
+ * behave correctly.
+ *
+ * \todo Mark the frame context with a per-frame control error flag in case of
+ * underrun, and research how algorithms should handle this.
+ *
+ * At its core, the queue uses a circular buffer to avoid dynamic memory
+ * allocation at runtime. The buffer is pre-allocated with a maximum number of
+ * entries when the FCQueue instance is constructed. Entries are initialized on
+ * first use by alloc() or, in underrun conditions, get(). The queue is not
+ * allowed to overflow, which must be ensured by pipeline handlers never
+ * queuing more in-flight requests to the IPA module than the queue size. If an
+ * overflow condition is detected, the queue will log a fatal error.
+ *
+ * IPA module-specific frame context implementations shall inherit from the
+ * FrameContext base class to support the minimum required features for a
+ * FrameContext.
+ */
+
+/**
+ * \fn FCQueue::FCQueue(unsigned int size)
+ * \brief Construct a frame contexts queue of a specified size
+ * \param[in] size The number of contexts in the queue
+ */
+
+/**
+ * \fn FCQueue::clear()
+ * \brief Clear the contexts queue
+ *
+ * IPA modules must clear the frame context queue at the beginning of a new
+ * streaming session, in IPAModule::start().
+ *
+ * \todo Fix any issue this may cause with requests queued before the camera is
+ * started.
+ */
+
+/**
+ * \fn FCQueue::alloc(uint32_t frame)
+ * \brief Allocate and return a FrameContext for the \a frame
+ * \param[in] frame The frame context sequence number
+ *
+ * The first call to obtain a FrameContext from the FCQueue should be handled
+ * through this function. The FrameContext will be initialised, if not
+ * initialised already, and returned to the caller.
+ *
+ * If the FrameContext was already initialized for this \a frame, a warning will
+ * be reported and the previously initialized FrameContext is returned.
+ *
+ * Frame contexts are expected to be initialised when a Request is first passed
+ * to the IPA module in IPAModule::queueRequest().
+ *
+ * \return A reference to the FrameContext for sequence \a frame
+ */
+
+/**
+ * \fn FCQueue::get(uint32_t frame)
+ * \brief Obtain the FrameContext for the \a frame
+ * \param[in] frame The frame context sequence number
+ *
+ * If the FrameContext is not correctly initialised for the \a frame, it will be
+ * initialised.
+ *
+ * \return A reference to the FrameContext for sequence \a frame
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/fc_queue.h b/src/ipa/libipa/fc_queue.h
new file mode 100644
index 00000000..a1d13652
--- /dev/null
+++ b/src/ipa/libipa/fc_queue.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * IPA Frame context queue
+ */
+
+#pragma once
+
+#include <stdint.h>
+#include <vector>
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(FCQueue)
+
+namespace ipa {
+
+template<typename FrameContext>
+class FCQueue;
+
+struct FrameContext {
+private:
+ template<typename T> friend class FCQueue;
+ uint32_t frame;
+ bool initialised = false;
+};
+
+template<typename FrameContext>
+class FCQueue
+{
+public:
+ FCQueue(unsigned int size)
+ : contexts_(size)
+ {
+ }
+
+ void clear()
+ {
+ for (FrameContext &ctx : contexts_) {
+ ctx.initialised = false;
+ ctx.frame = 0;
+ }
+ }
+
+ FrameContext &alloc(const uint32_t frame)
+ {
+ FrameContext &frameContext = contexts_[frame % contexts_.size()];
+
+ /*
+ * Do not re-initialise if a get() call has already fetched this
+ * frame context to preseve the context.
+ *
+ * \todo If the the sequence number of the context to initialise
+ * is smaller than the sequence number of the queue slot to use,
+ * it means that we had a serious request underrun and more
+ * frames than the queue size has been produced since the last
+ * time the application has queued a request. Does this deserve
+ * an error condition ?
+ */
+ if (frame != 0 && frame <= frameContext.frame)
+ LOG(FCQueue, Warning)
+ << "Frame " << frame << " already initialised";
+ else
+ init(frameContext, frame);
+
+ return frameContext;
+ }
+
+ FrameContext &get(uint32_t frame)
+ {
+ FrameContext &frameContext = contexts_[frame % contexts_.size()];
+
+ /*
+ * If the IPA algorithms try to access a frame context slot which
+ * has been already overwritten by a newer context, it means the
+ * frame context queue has overflowed and the desired context
+ * has been forever lost. The pipeline handler shall avoid
+ * queueing more requests to the IPA than the frame context
+ * queue size.
+ */
+ if (frame < frameContext.frame)
+ LOG(FCQueue, Fatal) << "Frame context for " << frame
+ << " has been overwritten by "
+ << frameContext.frame;
+
+ if (frame == 0 && !frameContext.initialised) {
+ /*
+ * If the IPA calls get() at start() time it will get an
+ * un-intialized FrameContext as the below "frame ==
+ * frameContext.frame" check will return success because
+ * FrameContexts are zeroed at creation time.
+ *
+ * Make sure the FrameContext gets initialised if get()
+ * is called before alloc() by the IPA for frame#0.
+ */
+ init(frameContext, frame);
+
+ return frameContext;
+ }
+
+ if (frame == frameContext.frame)
+ return frameContext;
+
+ /*
+ * The frame context has been retrieved before it was
+ * initialised through the initialise() call. This indicates an
+ * algorithm attempted to access a Frame context before it was
+ * queued to the IPA. Controls applied for this request may be
+ * left unhandled.
+ *
+ * \todo Set an error flag for per-frame control errors.
+ */
+ LOG(FCQueue, Warning)
+ << "Obtained an uninitialised FrameContext for " << frame;
+
+ init(frameContext, frame);
+
+ return frameContext;
+ }
+
+private:
+ void init(FrameContext &frameContext, const uint32_t frame)
+ {
+ frameContext = {};
+ frameContext.frame = frame;
+ frameContext.initialised = true;
+ }
+
+ std::vector<FrameContext> contexts_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/fixedpoint.cpp b/src/ipa/libipa/fixedpoint.cpp
new file mode 100644
index 00000000..6b698fc5
--- /dev/null
+++ b/src/ipa/libipa/fixedpoint.cpp
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Fixed / floating point conversions
+ */
+
+#include "fixedpoint.h"
+
+/**
+ * \file fixedpoint.h
+ */
+
+namespace libcamera {
+
+namespace ipa {
+
+/**
+ * \fn R floatingToFixedPoint(T number)
+ * \brief Convert a floating point number to a fixed-point representation
+ * \tparam I Bit width of the integer part of the fixed-point
+ * \tparam F Bit width of the fractional part of the fixed-point
+ * \tparam R Return type of the fixed-point representation
+ * \tparam T Input type of the floating point representation
+ * \param number The floating point number to convert to fixed point
+ * \return The converted value
+ */
+
+/**
+ * \fn R fixedToFloatingPoint(T number)
+ * \brief Convert a fixed-point number to a floating point representation
+ * \tparam I Bit width of the integer part of the fixed-point
+ * \tparam F Bit width of the fractional part of the fixed-point
+ * \tparam R Return type of the floating point representation
+ * \tparam T Input type of the fixed-point representation
+ * \param number The fixed point number to convert to floating point
+ * \return The converted value
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/fixedpoint.h b/src/ipa/libipa/fixedpoint.h
new file mode 100644
index 00000000..709cf50f
--- /dev/null
+++ b/src/ipa/libipa/fixedpoint.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Fixed / floating point conversions
+ */
+
+#pragma once
+
+#include <cmath>
+#include <type_traits>
+
+namespace libcamera {
+
+namespace ipa {
+
+#ifndef __DOXYGEN__
+template<unsigned int I, unsigned int F, typename R, typename T,
+ std::enable_if_t<std::is_integral_v<R> &&
+ std::is_floating_point_v<T>> * = nullptr>
+#else
+template<unsigned int I, unsigned int F, typename R, typename T>
+#endif
+constexpr R floatingToFixedPoint(T number)
+{
+ static_assert(sizeof(int) >= sizeof(R));
+ static_assert(I + F <= sizeof(R) * 8);
+
+ /*
+ * The intermediate cast to int is needed on arm platforms to properly
+ * cast negative values. See
+ * https://embeddeduse.com/2013/08/25/casting-a-negative-float-to-an-unsigned-int/
+ */
+ R mask = (1 << (F + I)) - 1;
+ R frac = static_cast<R>(static_cast<int>(std::round(number * (1 << F)))) & mask;
+
+ return frac;
+}
+
+#ifndef __DOXYGEN__
+template<unsigned int I, unsigned int F, typename R, typename T,
+ std::enable_if_t<std::is_floating_point_v<R> &&
+ std::is_integral_v<T>> * = nullptr>
+#else
+template<unsigned int I, unsigned int F, typename R, typename T>
+#endif
+constexpr R fixedToFloatingPoint(T number)
+{
+ static_assert(sizeof(int) >= sizeof(T));
+ static_assert(I + F <= sizeof(T) * 8);
+
+ /*
+ * Recreate the upper bits in case of a negative number by shifting the sign
+ * bit from the fixed point to the first bit of the unsigned and then right shifting
+ * by the same amount which keeps the sign bit in place.
+ * This can be optimized by the compiler quite well.
+ */
+ int remaining_bits = sizeof(int) * 8 - (I + F);
+ int t = static_cast<int>(static_cast<unsigned>(number) << remaining_bits) >> remaining_bits;
+ return static_cast<R>(t) / static_cast<R>(1 << F);
+}
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/histogram.cpp b/src/ipa/libipa/histogram.cpp
new file mode 100644
index 00000000..10e44b54
--- /dev/null
+++ b/src/ipa/libipa/histogram.cpp
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * histogram calculations
+ */
+#include "histogram.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file histogram.h
+ * \brief Class to represent Histograms and manipulate them
+ */
+
+namespace libcamera {
+
+namespace ipa {
+
+/**
+ * \class Histogram
+ * \brief The base class for creating histograms
+ *
+ * This class stores a cumulative frequency histogram, which is a mapping that
+ * counts the cumulative number of observations in all of the bins up to the
+ * specified bin. It can be used to find quantiles and averages between quantiles.
+ */
+
+/**
+ * \fn Histogram::Histogram()
+ * \brief Construct an empty Histogram
+ *
+ * This empty constructor exists largely to allow Histograms to be embedded in
+ * other classes which may be created before the contents of the Histogram are
+ * known.
+ */
+
+/**
+ * \brief Create a cumulative histogram
+ * \param[in] data A (non-cumulative) histogram
+ */
+Histogram::Histogram(Span<const uint32_t> data)
+{
+ cumulative_.resize(data.size() + 1);
+ cumulative_[0] = 0;
+ for (const auto &[i, value] : utils::enumerate(data))
+ cumulative_[i + 1] = cumulative_[i] + value;
+}
+
+/**
+ * \fn Histogram::Histogram(Span<const uint32_t> data, Transform transform)
+ * \brief Create a cumulative histogram
+ * \param[in] data A (non-cumulative) histogram
+ * \param[in] transform The transformation function to apply to every bin
+ */
+
+/**
+ * \fn Histogram::bins()
+ * \brief Retrieve the number of bins currently used by the Histogram
+ * \return Number of bins
+ */
+
+/**
+ * \fn Histogram::data()
+ * \brief Retrieve the internal data
+ * \return The data
+ */
+
+/**
+ * \fn Histogram::total()
+ * \brief Retrieve the total number of values in the data set
+ * \return Number of values
+ */
+
+/**
+ * \brief Cumulative frequency up to a (fractional) point in a bin
+ * \param[in] bin The bin up to which to cumulate
+ *
+ * With F(p) the cumulative frequency of the histogram, the value is 0 at
+ * the bottom of the histogram, and the maximum is the number of bins.
+ * The pixels are spread evenly throughout the “bin” in which they lie, so that
+ * F(p) is a continuous (monotonically increasing) function.
+ *
+ * \return The cumulative frequency from 0 up to the specified bin
+ */
+uint64_t Histogram::cumulativeFrequency(double bin) const
+{
+ if (bin <= 0)
+ return 0;
+ else if (bin >= bins())
+ return total();
+ int b = static_cast<int32_t>(bin);
+ return cumulative_[b] +
+ (bin - b) * (cumulative_[b + 1] - cumulative_[b]);
+}
+
+/**
+ * \brief Return the (fractional) bin of the point through the histogram
+ * \param[in] q the desired point (0 <= q <= 1)
+ * \param[in] first low limit (default is 0)
+ * \param[in] last high limit (default is UINT_MAX)
+ *
+ * A quantile gives us the point p = Q(q) in the range such that a proportion
+ * q of the pixels lie below p. A familiar quantile is Q(0.5) which is the median
+ * of a distribution.
+ *
+ * \return The fractional bin of the point
+ */
+double Histogram::quantile(double q, uint32_t first, uint32_t last) const
+{
+ if (last == UINT_MAX)
+ last = cumulative_.size() - 2;
+ ASSERT(first <= last);
+
+ uint64_t item = q * total();
+ /* Binary search to find the right bin */
+ while (first < last) {
+ int middle = (first + last) / 2;
+ /* Is it between first and middle ? */
+ if (cumulative_[middle + 1] > item)
+ last = middle;
+ else
+ first = middle + 1;
+ }
+ ASSERT(item >= cumulative_[first] && item <= cumulative_[last + 1]);
+
+ double frac;
+ if (cumulative_[first + 1] == cumulative_[first])
+ frac = 0;
+ else
+ frac = (item - cumulative_[first]) / (cumulative_[first + 1] - cumulative_[first]);
+ return first + frac;
+}
+
+/**
+ * \brief Calculate the mean between two quantiles
+ * \param[in] lowQuantile low Quantile
+ * \param[in] highQuantile high Quantile
+ *
+ * Quantiles are not ideal for metering as they suffer several limitations.
+ * Instead, a concept is introduced here: inter-quantile mean.
+ * It returns the mean of all pixels between lowQuantile and highQuantile.
+ *
+ * \return The mean histogram bin value between the two quantiles
+ */
+double Histogram::interQuantileMean(double lowQuantile, double highQuantile) const
+{
+ ASSERT(highQuantile > lowQuantile);
+ /* Proportion of pixels which lies below lowQuantile */
+ double lowPoint = quantile(lowQuantile);
+ /* Proportion of pixels which lies below highQuantile */
+ double highPoint = quantile(highQuantile, static_cast<uint32_t>(lowPoint));
+ double sumBinFreq = 0, cumulFreq = 0;
+
+ for (double p_next = floor(lowPoint) + 1.0;
+ p_next <= ceil(highPoint);
+ lowPoint = p_next, p_next += 1.0) {
+ int bin = floor(lowPoint);
+ double freq = (cumulative_[bin + 1] - cumulative_[bin])
+ * (std::min(p_next, highPoint) - lowPoint);
+
+ /* Accumulate weighted bin */
+ sumBinFreq += bin * freq;
+ /* Accumulate weights */
+ cumulFreq += freq;
+ }
+ /* add 0.5 to give an average for bin mid-points */
+ return sumBinFreq / cumulFreq + 0.5;
+}
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/histogram.h b/src/ipa/libipa/histogram.h
new file mode 100644
index 00000000..a926002c
--- /dev/null
+++ b/src/ipa/libipa/histogram.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * histogram calculation interface
+ */
+
+#pragma once
+
+#include <limits.h>
+#include <stdint.h>
+#include <type_traits>
+#include <vector>
+
+#include <libcamera/base/span.h>
+#include <libcamera/base/utils.h>
+
+namespace libcamera {
+
+namespace ipa {
+
+class Histogram
+{
+public:
+ Histogram() { cumulative_.push_back(0); }
+ Histogram(Span<const uint32_t> data);
+
+ template<typename Transform,
+ std::enable_if_t<std::is_invocable_v<Transform, uint32_t>> * = nullptr>
+ Histogram(Span<const uint32_t> data, Transform transform)
+ {
+ cumulative_.resize(data.size() + 1);
+ cumulative_[0] = 0;
+ for (const auto &[i, value] : utils::enumerate(data))
+ cumulative_[i + 1] = cumulative_[i] + transform(value);
+ }
+
+ size_t bins() const { return cumulative_.size() - 1; }
+ const Span<const uint64_t> data() const { return cumulative_; }
+ uint64_t total() const { return cumulative_[cumulative_.size() - 1]; }
+ uint64_t cumulativeFrequency(double bin) const;
+ double quantile(double q, uint32_t first = 0, uint32_t last = UINT_MAX) const;
+ double interQuantileMean(double lowQuantile, double hiQuantile) const;
+
+private:
+ std::vector<uint64_t> cumulative_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/interpolator.cpp b/src/ipa/libipa/interpolator.cpp
new file mode 100644
index 00000000..73e8d3b7
--- /dev/null
+++ b/src/ipa/libipa/interpolator.cpp
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class for interpolating objects
+ */
+#include "interpolator.h"
+
+#include <algorithm>
+#include <string>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "interpolator.h"
+
+/**
+ * \file interpolator.h
+ * \brief Helper class for linear interpolating a set of objects
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Interpolator)
+
+namespace ipa {
+
+/**
+ * \class Interpolator
+ * \brief Class for storing, retrieving, and interpolating objects
+ * \tparam T Type of objects stored in the interpolator
+ *
+ * The main use case is to pass a map from color temperatures to corresponding
+ * objects (eg. matrices for color correction), and then requesting a
+ * interpolated object for a specific color temperature. This class will
+ * abstract away the interpolation portion.
+ */
+
+/**
+ * \fn Interpolator::Interpolator()
+ * \brief Construct an empty interpolator
+ */
+
+/**
+ * \fn Interpolator::Interpolator(const std::map<unsigned int, T> &data)
+ * \brief Construct an interpolator from a map of objects
+ * \param data Map from which to construct the interpolator
+ */
+
+/**
+ * \fn Interpolator::Interpolator(std::map<unsigned int, T> &&data)
+ * \brief Construct an interpolator from a map of objects
+ * \param data Map from which to construct the interpolator
+ */
+
+/**
+ * \fn int Interpolator<T>::readYaml(const libcamera::YamlObject &yaml,
+ const std::string &key_name,
+ const std::string &value_name)
+ * \brief Initialize an Interpolator instance from yaml
+ * \tparam T Type of data stored in the interpolator
+ * \param[in] yaml The yaml object that contains the map of unsigned integers to
+ * objects
+ * \param[in] key_name The name of the key in the yaml object
+ * \param[in] value_name The name of the value in the yaml object
+ *
+ * The yaml object is expected to be a list of maps. Each map has two or more
+ * pairs: one of \a key_name to the key value (usually color temperature), and
+ * one or more of \a value_name to the object. This is a bit difficult to
+ * explain, so here is an example (in python, as it is easier to parse than
+ * yaml):
+ * [
+ * {
+ * 'ct': 2860,
+ * 'ccm': [ 2.12089, -0.52461, -0.59629,
+ * -0.85342, 2.80445, -0.95103,
+ * -0.26897, -1.14788, 2.41685 ],
+ * 'offsets': [ 0, 0, 0 ]
+ * },
+ *
+ * {
+ * 'ct': 2960,
+ * 'ccm': [ 2.26962, -0.54174, -0.72789,
+ * -0.77008, 2.60271, -0.83262,
+ * -0.26036, -1.51254, 2.77289 ],
+ * 'offsets': [ 0, 0, 0 ]
+ * },
+ *
+ * {
+ * 'ct': 3603,
+ * 'ccm': [ 2.18644, -0.66148, -0.52496,
+ * -0.77828, 2.69474, -0.91645,
+ * -0.25239, -0.83059, 2.08298 ],
+ * 'offsets': [ 0, 0, 0 ]
+ * },
+ * ]
+ *
+ * In this case, \a key_name would be 'ct', and \a value_name can be either
+ * 'ccm' or 'offsets'. This way multiple interpolators can be defined in
+ * one set of color temperature ranges in the tuning file, and they can be
+ * retrieved separately with the \a value_name parameter.
+ *
+ * \return Zero on success, negative error code otherwise
+ */
+
+/**
+ * \fn void Interpolator<T>::setQuantization(const unsigned int q)
+ * \brief Set the quantization value
+ * \param[in] q The quantization value
+ *
+ * Sets the quantization value. When this is set, 'key' gets quantized to this
+ * size, before doing the interpolation. This can help in reducing the number of
+ * updates pushed to the hardware.
+ *
+ * Note that normally a threshold needs to be combined with quantization.
+ * Otherwise a value that swings around the edge of the quantization step will
+ * lead to constant updates.
+ */
+
+/**
+ * \fn void Interpolator<T>::setData(std::map<unsigned int, T> &&data)
+ * \brief Set the internal map
+ *
+ * Overwrites the internal map using move semantics.
+ */
+
+/**
+ * \fn const T& Interpolator<T>::getInterpolated()
+ * \brief Retrieve an interpolated value for the given key
+ * \param[in] key The unsigned integer key of the object to retrieve
+ * \param[out] quantizedKey If provided, the key value after quantization
+ * \return The object corresponding to the key. The object is cached internally,
+ * so on successive calls with the same key (after quantization) interpolation
+ * is not recalculated.
+ */
+
+/**
+ * \fn void Interpolator<T>::interpolate(const T &a, const T &b, T &dest, double
+ * lambda)
+ * \brief Interpolate between two instances of T
+ * \param a The first value to interpolate
+ * \param b The second value to interpolate
+ * \param dest The destination for the interpolated value
+ * \param lambda The interpolation factor (0..1)
+ *
+ * Interpolates between \a a and \a b according to \a lambda. It calculates
+ * dest = a * (1.0 - lambda) + b * lambda;
+ *
+ * If T supports multiplication with double and addition, this function can be
+ * used as is. For other types this function can be overwritten using partial
+ * template specialization.
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/interpolator.h b/src/ipa/libipa/interpolator.h
new file mode 100644
index 00000000..fffce214
--- /dev/null
+++ b/src/ipa/libipa/interpolator.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class for interpolating maps of objects
+ */
+
+#pragma once
+
+#include <algorithm>
+#include <cmath>
+#include <map>
+#include <string>
+#include <tuple>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Interpolator)
+
+namespace ipa {
+
+template<typename T>
+class Interpolator
+{
+public:
+ Interpolator() = default;
+ Interpolator(const std::map<unsigned int, T> &data)
+ : data_(data)
+ {
+ }
+ Interpolator(std::map<unsigned int, T> &&data)
+ : data_(std::move(data))
+ {
+ }
+
+ ~Interpolator() = default;
+
+ int readYaml(const libcamera::YamlObject &yaml,
+ const std::string &key_name,
+ const std::string &value_name)
+ {
+ data_.clear();
+ lastInterpolatedKey_.reset();
+
+ if (!yaml.isList()) {
+ LOG(Interpolator, Error) << "yaml object must be a list";
+ return -EINVAL;
+ }
+
+ for (const auto &value : yaml.asList()) {
+ unsigned int ct = std::stoul(value[key_name].get<std::string>(""));
+ std::optional<T> data =
+ value[value_name].get<T>();
+ if (!data) {
+ return -EINVAL;
+ }
+
+ data_[ct] = *data;
+ }
+
+ if (data_.size() < 1) {
+ LOG(Interpolator, Error) << "Need at least one element";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ void setQuantization(const unsigned int q)
+ {
+ quantization_ = q;
+ }
+
+ void setData(std::map<unsigned int, T> &&data)
+ {
+ data_ = std::move(data);
+ lastInterpolatedKey_.reset();
+ }
+
+ const T &getInterpolated(unsigned int key, unsigned int *quantizedKey = nullptr)
+ {
+ ASSERT(data_.size() > 0);
+
+ if (quantization_ > 0)
+ key = std::lround(key / static_cast<double>(quantization_)) * quantization_;
+
+ if (quantizedKey)
+ *quantizedKey = key;
+
+ if (lastInterpolatedKey_.has_value() &&
+ *lastInterpolatedKey_ == key)
+ return lastInterpolatedValue_;
+
+ auto it = data_.lower_bound(key);
+
+ if (it == data_.begin())
+ return it->second;
+
+ if (it == data_.end())
+ return std::prev(it)->second;
+
+ if (it->first == key)
+ return it->second;
+
+ auto it2 = std::prev(it);
+ double lambda = (key - it2->first) / static_cast<double>(it->first - it2->first);
+ interpolate(it2->second, it->second, lastInterpolatedValue_, lambda);
+ lastInterpolatedKey_ = key;
+
+ return lastInterpolatedValue_;
+ }
+
+ void interpolate(const T &a, const T &b, T &dest, double lambda)
+ {
+ dest = a * (1.0 - lambda) + b * lambda;
+ }
+
+private:
+ std::map<unsigned int, T> data_;
+ T lastInterpolatedValue_;
+ std::optional<unsigned int> lastInterpolatedKey_;
+ unsigned int quantization_ = 0;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/ipa_interface_wrapper.cpp b/src/ipa/libipa/ipa_interface_wrapper.cpp
deleted file mode 100644
index b93c1c1f..00000000
--- a/src/ipa/libipa/ipa_interface_wrapper.cpp
+++ /dev/null
@@ -1,245 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipa_interface_wrapper.cpp - Image Processing Algorithm interface wrapper
- */
-
-#include "ipa_interface_wrapper.h"
-
-#include <map>
-#include <string.h>
-#include <unistd.h>
-#include <vector>
-
-#include <ipa/ipa_interface.h>
-
-#include "byte_stream_buffer.h"
-
-/**
- * \file ipa_interface_wrapper.h
- * \brief Image Processing Algorithm interface wrapper
- */
-
-namespace libcamera {
-
-/**
- * \class IPAInterfaceWrapper
- * \brief Wrap an IPAInterface and expose it as an ipa_context
- *
- * This class implements the ipa_context API based on a provided IPAInterface.
- * It helps IPAs that implement the IPAInterface API to provide the external
- * ipa_context API.
- *
- * To use the wrapper, an IPA module simple creates a new instance of its
- * IPAInterface implementation, and passes it to the constructor of the
- * IPAInterfaceWrapper. As IPAInterfaceWrapper inherits from ipa_context, the
- * constructed wrapper can then be directly returned from the IPA module's
- * ipaCreate() function.
- *
- * \code{.cpp}
- * class MyIPA : public IPAInterface
- * {
- * ...
- * };
- *
- * struct ipa_context *ipaCreate()
- * {
- * return new IPAInterfaceWrapper(std::make_unique<MyIPA>());
- * }
- * \endcode
- *
- * The wrapper takes ownership of the IPAInterface and will automatically
- * delete it when the wrapper is destroyed.
- */
-
-/**
- * \brief Construct an IPAInterfaceWrapper wrapping \a interface
- * \param[in] interface The interface to wrap
- */
-IPAInterfaceWrapper::IPAInterfaceWrapper(std::unique_ptr<IPAInterface> interface)
- : ipa_(std::move(interface)), callbacks_(nullptr), cb_ctx_(nullptr)
-{
- ops = &operations_;
-
- ipa_->queueFrameAction.connect(this, &IPAInterfaceWrapper::queueFrameAction);
-}
-
-void IPAInterfaceWrapper::destroy(struct ipa_context *_ctx)
-{
- IPAInterfaceWrapper *ctx = static_cast<IPAInterfaceWrapper *>(_ctx);
-
- delete ctx;
-}
-
-void *IPAInterfaceWrapper::get_interface(struct ipa_context *_ctx)
-{
- IPAInterfaceWrapper *ctx = static_cast<IPAInterfaceWrapper *>(_ctx);
-
- return ctx->ipa_.get();
-}
-
-void IPAInterfaceWrapper::init(struct ipa_context *_ctx)
-{
- IPAInterfaceWrapper *ctx = static_cast<IPAInterfaceWrapper *>(_ctx);
-
- ctx->ipa_->init();
-}
-
-void IPAInterfaceWrapper::register_callbacks(struct ipa_context *_ctx,
- const struct ipa_callback_ops *callbacks,
- void *cb_ctx)
-{
- IPAInterfaceWrapper *ctx = static_cast<IPAInterfaceWrapper *>(_ctx);
-
- ctx->callbacks_ = callbacks;
- ctx->cb_ctx_ = cb_ctx;
-}
-
-void IPAInterfaceWrapper::configure(struct ipa_context *_ctx,
- const struct ipa_stream *streams,
- unsigned int num_streams,
- const struct ipa_control_info_map *maps,
- unsigned int num_maps)
-{
- IPAInterfaceWrapper *ctx = static_cast<IPAInterfaceWrapper *>(_ctx);
-
- ctx->serializer_.reset();
-
- /* Translate the IPA stream configurations map. */
- std::map<unsigned int, IPAStream> ipaStreams;
-
- for (unsigned int i = 0; i < num_streams; ++i) {
- const struct ipa_stream &stream = streams[i];
-
- ipaStreams[stream.id] = {
- stream.pixel_format,
- Size(stream.width, stream.height),
- };
- }
-
- /* Translate the IPA entity controls map. */
- std::map<unsigned int, const ControlInfoMap &> entityControls;
- std::map<unsigned int, ControlInfoMap> infoMaps;
-
- for (unsigned int i = 0; i < num_maps; ++i) {
- const struct ipa_control_info_map &ipa_map = maps[i];
- ByteStreamBuffer byteStream(ipa_map.data, ipa_map.size);
- unsigned int id = ipa_map.id;
-
- infoMaps[id] = ctx->serializer_.deserialize<ControlInfoMap>(byteStream);
- entityControls.emplace(id, infoMaps[id]);
- }
-
- ctx->ipa_->configure(ipaStreams, entityControls);
-}
-
-void IPAInterfaceWrapper::map_buffers(struct ipa_context *_ctx,
- const struct ipa_buffer *_buffers,
- size_t num_buffers)
-{
- IPAInterfaceWrapper *ctx = static_cast<IPAInterfaceWrapper *>(_ctx);
- std::vector<IPABuffer> buffers(num_buffers);
-
- for (unsigned int i = 0; i < num_buffers; ++i) {
- const struct ipa_buffer &_buffer = _buffers[i];
- IPABuffer &buffer = buffers[i];
- std::vector<FrameBuffer::Plane> &planes = buffer.planes;
-
- buffer.id = _buffer.id;
-
- planes.resize(_buffer.num_planes);
- for (unsigned int j = 0; j < _buffer.num_planes; ++j) {
- planes[j].fd = FileDescriptor(_buffer.planes[j].dmabuf);
- planes[j].length = _buffer.planes[j].length;
- }
- }
-
- ctx->ipa_->mapBuffers(buffers);
-}
-
-void IPAInterfaceWrapper::unmap_buffers(struct ipa_context *_ctx,
- const unsigned int *_ids,
- size_t num_buffers)
-{
- IPAInterfaceWrapper *ctx = static_cast<IPAInterfaceWrapper *>(_ctx);
- std::vector<unsigned int> ids(_ids, _ids + num_buffers);
- ctx->ipa_->unmapBuffers(ids);
-}
-
-void IPAInterfaceWrapper::process_event(struct ipa_context *_ctx,
- const struct ipa_operation_data *data)
-{
- IPAInterfaceWrapper *ctx = static_cast<IPAInterfaceWrapper *>(_ctx);
- IPAOperationData opData;
-
- opData.operation = data->operation;
-
- opData.data.resize(data->num_data);
- memcpy(opData.data.data(), data->data,
- data->num_data * sizeof(*data->data));
-
- opData.controls.resize(data->num_lists);
- for (unsigned int i = 0; i < data->num_lists; ++i) {
- const struct ipa_control_list *c_list = &data->lists[i];
- ByteStreamBuffer byteStream(c_list->data, c_list->size);
- opData.controls[i] = ctx->serializer_.deserialize<ControlList>(byteStream);
- }
-
- ctx->ipa_->processEvent(opData);
-}
-
-void IPAInterfaceWrapper::queueFrameAction(unsigned int frame,
- const IPAOperationData &data)
-{
- if (!callbacks_)
- return;
-
- struct ipa_operation_data c_data;
- c_data.operation = data.operation;
- c_data.data = data.data.data();
- c_data.num_data = data.data.size();
-
- struct ipa_control_list control_lists[data.controls.size()];
- c_data.lists = control_lists;
- c_data.num_lists = data.controls.size();
-
- std::size_t listsSize = 0;
- for (const auto &list : data.controls)
- listsSize += serializer_.binarySize(list);
-
- std::vector<uint8_t> binaryData(listsSize);
- ByteStreamBuffer byteStreamBuffer(binaryData.data(), listsSize);
-
- unsigned int i = 0;
- for (const auto &list : data.controls) {
- struct ipa_control_list &c_list = control_lists[i];
- c_list.size = serializer_.binarySize(list);
-
- ByteStreamBuffer b = byteStreamBuffer.carveOut(c_list.size);
- serializer_.serialize(list, b);
-
- c_list.data = b.base();
- }
-
- callbacks_->queue_frame_action(cb_ctx_, frame, c_data);
-}
-
-#ifndef __DOXYGEN__
-/*
- * This construct confuses Doygen and makes it believe that all members of the
- * operations is a member of IPAInterfaceWrapper. It must thus be hidden.
- */
-const struct ipa_context_ops IPAInterfaceWrapper::operations_ = {
- .destroy = &IPAInterfaceWrapper::destroy,
- .get_interface = &IPAInterfaceWrapper::get_interface,
- .init = &IPAInterfaceWrapper::init,
- .register_callbacks = &IPAInterfaceWrapper::register_callbacks,
- .configure = &IPAInterfaceWrapper::configure,
- .map_buffers = &IPAInterfaceWrapper::map_buffers,
- .unmap_buffers = &IPAInterfaceWrapper::unmap_buffers,
- .process_event = &IPAInterfaceWrapper::process_event,
-};
-#endif
-
-} /* namespace libcamera */
diff --git a/src/ipa/libipa/ipa_interface_wrapper.h b/src/ipa/libipa/ipa_interface_wrapper.h
deleted file mode 100644
index 3fb7b447..00000000
--- a/src/ipa/libipa/ipa_interface_wrapper.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipa_interface_wrapper.h - Image Processing Algorithm interface wrapper
- */
-#ifndef __LIBCAMERA_IPA_INTERFACE_WRAPPER_H__
-#define __LIBCAMERA_IPA_INTERFACE_WRAPPER_H__
-
-#include <memory>
-
-#include <ipa/ipa_interface.h>
-
-#include "control_serializer.h"
-
-namespace libcamera {
-
-class IPAInterfaceWrapper : public ipa_context
-{
-public:
- IPAInterfaceWrapper(std::unique_ptr<IPAInterface> interface);
-
-private:
- static void destroy(struct ipa_context *ctx);
- static void *get_interface(struct ipa_context *ctx);
- static void init(struct ipa_context *ctx);
- static void register_callbacks(struct ipa_context *ctx,
- const struct ipa_callback_ops *callbacks,
- void *cb_ctx);
- static void configure(struct ipa_context *ctx,
- const struct ipa_stream *streams,
- unsigned int num_streams,
- const struct ipa_control_info_map *maps,
- unsigned int num_maps);
- static void map_buffers(struct ipa_context *ctx,
- const struct ipa_buffer *c_buffers,
- size_t num_buffers);
- static void unmap_buffers(struct ipa_context *ctx,
- const unsigned int *ids,
- size_t num_buffers);
- static void process_event(struct ipa_context *ctx,
- const struct ipa_operation_data *data);
-
- static const struct ipa_context_ops operations_;
-
- void queueFrameAction(unsigned int frame, const IPAOperationData &data);
-
- std::unique_ptr<IPAInterface> ipa_;
- const struct ipa_callback_ops *callbacks_;
- void *cb_ctx_;
-
- ControlSerializer serializer_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_IPA_INTERFACE_WRAPPER_H__ */
diff --git a/src/ipa/libipa/lsc_polynomial.cpp b/src/ipa/libipa/lsc_polynomial.cpp
new file mode 100644
index 00000000..f607d86c
--- /dev/null
+++ b/src/ipa/libipa/lsc_polynomial.cpp
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * Polynomial class to represent lens shading correction
+ */
+
+#include "lsc_polynomial.h"
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file lsc_polynomial.h
+ * \brief LscPolynomial class
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(LscPolynomial)
+
+namespace ipa {
+
+/**
+ * \class LscPolynomial
+ * \brief Class for handling even polynomials used in lens shading correction
+ *
+ * Shading artifacts of camera lenses can be modeled using even radial
+ * polynomials. This class implements a polynomial with 5 coefficients which
+ * follows the definition of the FixVignetteRadial opcode in the Adobe DNG
+ * specification.
+ */
+
+/**
+ * \fn LscPolynomial::LscPolynomial(double cx = 0.0, double cy = 0.0, double k0 = 0.0,
+ double k1 = 0.0, double k2 = 0.0, double k3 = 0.0,
+ double k4 = 0.0)
+ * \brief Construct a polynomial using the given coefficients
+ * \param cx Center-x relative to the image in normalized coordinates (0..1)
+ * \param cy Center-y relative to the image in normalized coordinates (0..1)
+ * \param k0 Coefficient of the polynomial
+ * \param k1 Coefficient of the polynomial
+ * \param k2 Coefficient of the polynomial
+ * \param k3 Coefficient of the polynomial
+ * \param k4 Coefficient of the polynomial
+ */
+
+/**
+ * \fn LscPolynomial::sampleAtNormalizedPixelPos(double x, double y)
+ * \brief Sample the polynomial at the given normalized pixel position
+ *
+ * This functions samples the polynomial at the given pixel position divided by
+ * the value returned by getM().
+ *
+ * \param x x position in normalized coordinates
+ * \param y y position in normalized coordinates
+ * \return The sampled value
+ */
+
+/**
+ * \fn LscPolynomial::getM()
+ * \brief Get the value m as described in the dng specification
+ *
+ * Returns m according to dng spec. m represents the Euclidean distance
+ * (in pixels) from the optical center to the farthest pixel in the
+ * image.
+ *
+ * \return The sampled value
+ */
+
+/**
+ * \fn LscPolynomial::setReferenceImageSize(const Size &size)
+ * \brief Set the reference image size
+ *
+ * Set the reference image size that is used for subsequent calls to getM() and
+ * sampleAtNormalizedPixelPos()
+ *
+ * \param size The size of the reference image
+ */
+
+} // namespace ipa
+} // namespace libcamera
diff --git a/src/ipa/libipa/lsc_polynomial.h b/src/ipa/libipa/lsc_polynomial.h
new file mode 100644
index 00000000..c898faeb
--- /dev/null
+++ b/src/ipa/libipa/lsc_polynomial.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * Helper for radial polynomial used in lens shading correction.
+ */
+#pragma once
+
+#include <algorithm>
+#include <array>
+#include <assert.h>
+#include <cmath>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/span.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(LscPolynomial)
+
+namespace ipa {
+
+class LscPolynomial
+{
+public:
+ LscPolynomial(double cx = 0.0, double cy = 0.0, double k0 = 0.0,
+ double k1 = 0.0, double k2 = 0.0, double k3 = 0.0,
+ double k4 = 0.0)
+ : cx_(cx), cy_(cy), cnx_(0), cny_(0),
+ coefficients_({ k0, k1, k2, k3, k4 })
+ {
+ }
+
+ double sampleAtNormalizedPixelPos(double x, double y) const
+ {
+ double dx = x - cnx_;
+ double dy = y - cny_;
+ double r = sqrt(dx * dx + dy * dy);
+ double res = 1.0;
+ for (unsigned int i = 0; i < coefficients_.size(); i++) {
+ res += coefficients_[i] * std::pow(r, (i + 1) * 2);
+ }
+ return res;
+ }
+
+ double getM() const
+ {
+ double cpx = imageSize_.width * cx_;
+ double cpy = imageSize_.height * cy_;
+ double mx = std::max(cpx, std::fabs(imageSize_.width - cpx));
+ double my = std::max(cpy, std::fabs(imageSize_.height - cpy));
+
+ return sqrt(mx * mx + my * my);
+ }
+
+ void setReferenceImageSize(const Size &size)
+ {
+ assert(!size.isNull());
+ imageSize_ = size;
+
+ /* Calculate normalized centers */
+ double m = getM();
+ cnx_ = (size.width * cx_) / m;
+ cny_ = (size.height * cy_) / m;
+ }
+
+private:
+ double cx_;
+ double cy_;
+ double cnx_;
+ double cny_;
+ std::array<double, 5> coefficients_;
+
+ Size imageSize_;
+};
+
+} /* namespace ipa */
+
+#ifndef __DOXYGEN__
+
+template<>
+struct YamlObject::Getter<ipa::LscPolynomial> {
+ std::optional<ipa::LscPolynomial> get(const YamlObject &obj) const
+ {
+ std::optional<double> cx = obj["cx"].get<double>();
+ std::optional<double> cy = obj["cy"].get<double>();
+ std::optional<double> k0 = obj["k0"].get<double>();
+ std::optional<double> k1 = obj["k1"].get<double>();
+ std::optional<double> k2 = obj["k2"].get<double>();
+ std::optional<double> k3 = obj["k3"].get<double>();
+ std::optional<double> k4 = obj["k4"].get<double>();
+
+ if (!(cx && cy && k0 && k1 && k2 && k3 && k4))
+ LOG(LscPolynomial, Error)
+ << "Polynomial is missing a parameter";
+
+ return ipa::LscPolynomial(*cx, *cy, *k0, *k1, *k2, *k3, *k4);
+ }
+};
+
+#endif
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/lux.cpp b/src/ipa/libipa/lux.cpp
new file mode 100644
index 00000000..bae8198f
--- /dev/null
+++ b/src/ipa/libipa/lux.cpp
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class that implements lux estimation
+ */
+#include "lux.h"
+
+#include <algorithm>
+#include <chrono>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "histogram.h"
+
+/**
+ * \file lux.h
+ * \brief Helper class that implements lux estimation
+ *
+ * Estimating the lux level of an image is a common operation that can for
+ * instance be used to adjust the target Y value in AGC or for Bayesian AWB
+ * estimation.
+ */
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(Lux)
+
+namespace ipa {
+
+/**
+ * \class Lux
+ * \brief Class that implements lux estimation
+ *
+ * IPAs that wish to use lux estimation should create a Lux algorithm module
+ * that lightly wraps this module by providing the platform-specific luminance
+ * histogram. The Lux entry in the tuning file must then precede the algorithms
+ * that depend on the estimated lux value.
+ */
+
+/**
+ * \var Lux::binSize_
+ * \brief The maximum count of each bin
+ */
+
+/**
+ * \var Lux::referenceExposureTime_
+ * \brief The exposure time of the reference image, in microseconds
+ */
+
+/**
+ * \var Lux::referenceAnalogueGain_
+ * \brief The analogue gain of the reference image
+ */
+
+/**
+ * \var Lux::referenceDigitalGain_
+ * \brief The analogue gain of the reference image
+ */
+
+/**
+ * \var Lux::referenceY_
+ * \brief The measured luminance of the reference image, out of the bin size
+ *
+ * \sa binSize_
+ */
+
+/**
+ * \var Lux::referenceLux_
+ * \brief The estimated lux level of the reference image
+ */
+
+/**
+ * \brief Construct the Lux helper module
+ * \param[in] binSize The maximum count of each bin
+ */
+Lux::Lux(unsigned int binSize)
+ : binSize_(binSize)
+{
+}
+
+/**
+ * \brief Parse tuning data
+ * \param[in] tuningData The YamlObject representing the tuning data
+ *
+ * This function parses yaml tuning data for the common Lux module. It requires
+ * reference exposure time, analogue gain, digital gain, and lux values.
+ *
+ * \code{.unparsed}
+ * algorithms:
+ * - Lux:
+ * referenceExposureTime: 10000
+ * referenceAnalogueGain: 4.0
+ * referenceDigitalGain: 1.0
+ * referenceY: 12000
+ * referenceLux: 1000
+ * \endcode
+ *
+ * \return 0 on success or a negative error code
+ */
+int Lux::parseTuningData(const YamlObject &tuningData)
+{
+ auto value = tuningData["referenceExposureTime"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceExposureTime'";
+ return -EINVAL;
+ }
+ referenceExposureTime_ = *value * 1.0us;
+
+ value = tuningData["referenceAnalogueGain"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceAnalogueGain'";
+ return -EINVAL;
+ }
+ referenceAnalogueGain_ = *value;
+
+ value = tuningData["referenceDigitalGain"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceDigitalGain'";
+ return -EINVAL;
+ }
+ referenceDigitalGain_ = *value;
+
+ value = tuningData["referenceY"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceY'";
+ return -EINVAL;
+ }
+ referenceY_ = *value;
+
+ value = tuningData["referenceLux"].get<double>();
+ if (!value) {
+ LOG(Lux, Error) << "Missing tuning parameter: "
+ << "'referenceLux'";
+ return -EINVAL;
+ }
+ referenceLux_ = *value;
+
+ return 0;
+}
+
+/**
+ * \brief Estimate lux given runtime values
+ * \param[in] exposureTime Exposure time applied to the frame
+ * \param[in] aGain Analogue gain applied to the frame
+ * \param[in] dGain Digital gain applied to the frame
+ * \param[in] yHist Histogram from the ISP statistics
+ *
+ * Estimate the lux given the exposure time, gain, and histogram.
+ *
+ * \return Estimated lux value
+ */
+double Lux::estimateLux(utils::Duration exposureTime,
+ double aGain, double dGain,
+ const Histogram &yHist) const
+{
+ double currentY = yHist.interQuantileMean(0, 1);
+ double exposureTimeRatio = referenceExposureTime_ / exposureTime;
+ double aGainRatio = referenceAnalogueGain_ / aGain;
+ double dGainRatio = referenceDigitalGain_ / dGain;
+ double yRatio = currentY * (binSize_ / yHist.bins()) / referenceY_;
+
+ double estimatedLux = exposureTimeRatio * aGainRatio * dGainRatio *
+ yRatio * referenceLux_;
+
+ LOG(Lux, Debug) << "Estimated lux " << estimatedLux;
+ return estimatedLux;
+}
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/lux.h b/src/ipa/libipa/lux.h
new file mode 100644
index 00000000..93ca6479
--- /dev/null
+++ b/src/ipa/libipa/lux.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Helper class that implements lux estimation
+ */
+
+#pragma once
+
+#include <libcamera/base/utils.h>
+
+namespace libcamera {
+
+class YamlObject;
+
+namespace ipa {
+
+class Histogram;
+
+class Lux
+{
+public:
+ Lux(unsigned int binSize);
+
+ int parseTuningData(const YamlObject &tuningData);
+ double estimateLux(utils::Duration exposureTime,
+ double aGain, double dGain,
+ const Histogram &yHist) const;
+
+private:
+ unsigned int binSize_;
+ utils::Duration referenceExposureTime_;
+ double referenceAnalogueGain_;
+ double referenceDigitalGain_;
+ double referenceY_;
+ double referenceLux_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/meson.build b/src/ipa/libipa/meson.build
index 6f3cd486..f2b2f4be 100644
--- a/src/ipa/libipa/meson.build
+++ b/src/ipa/libipa/meson.build
@@ -1,13 +1,45 @@
+# SPDX-License-Identifier: CC0-1.0
+
libipa_headers = files([
- 'ipa_interface_wrapper.h',
+ 'agc_mean_luminance.h',
+ 'algorithm.h',
+ 'camera_sensor_helper.h',
+ 'colours.h',
+ 'exposure_mode_helper.h',
+ 'fc_queue.h',
+ 'fixedpoint.h',
+ 'histogram.h',
+ 'interpolator.h',
+ 'lsc_polynomial.h',
+ 'lux.h',
+ 'module.h',
+ 'pwl.h',
+ 'vector.h',
])
libipa_sources = files([
- 'ipa_interface_wrapper.cpp',
+ 'agc_mean_luminance.cpp',
+ 'algorithm.cpp',
+ 'camera_sensor_helper.cpp',
+ 'colours.cpp',
+ 'exposure_mode_helper.cpp',
+ 'fc_queue.cpp',
+ 'fixedpoint.cpp',
+ 'histogram.cpp',
+ 'interpolator.cpp',
+ 'lsc_polynomial.cpp',
+ 'lux.cpp',
+ 'module.cpp',
+ 'pwl.cpp',
+ 'vector.cpp',
])
libipa_includes = include_directories('..')
-libipa = static_library('ipa', libipa_sources,
+libipa = static_library('ipa', [libipa_sources, libipa_headers],
include_directories : ipa_includes,
- dependencies : libcamera_dep)
+ dependencies : libcamera_private)
+
+libipa_dep = declare_dependency(sources : libipa_headers,
+ include_directories : libipa_includes,
+ link_with : libipa)
diff --git a/src/ipa/libipa/module.cpp b/src/ipa/libipa/module.cpp
new file mode 100644
index 00000000..64ca9141
--- /dev/null
+++ b/src/ipa/libipa/module.cpp
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Ideas On Board
+ *
+ * IPA Module
+ */
+
+#include "module.h"
+
+/**
+ * \file module.h
+ * \brief IPA Module common interface
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(IPAModuleAlgo)
+
+/**
+ * \brief The IPA (Image Processing Algorithm) namespace
+ *
+ * The IPA namespace groups all types specific to IPA modules. It serves as the
+ * top-level namespace for the IPA library libipa, and also contains
+ * module-specific namespaces for IPA modules.
+ */
+namespace ipa {
+
+/**
+ * \class Module
+ * \brief The base class for all IPA modules
+ * \tparam Context The type of the shared IPA context
+ * \tparam FrameContext The type of the frame context
+ * \tparam Config The type of the IPA configuration data
+ * \tparam Params The type of the ISP specific parameters
+ * \tparam Stats The type of the IPA statistics and ISP results
+ *
+ * The Module class template defines a standard internal interface between IPA
+ * modules and libipa.
+ *
+ * While IPA modules are platform-specific, many of their internal functions are
+ * conceptually similar, even if they take different types of platform-specifc
+ * parameters. For instance, IPA modules could share code that instantiates,
+ * initializes and run algorithms if it wasn't for the fact that the the format
+ * of ISP parameters or statistics passed to the related functions is
+ * device-dependent.
+ *
+ * To enable a shared implementation of those common tasks in libipa, the Module
+ * class template defines a standard internal interface between IPA modules and
+ * libipa. The template parameters specify the types of module-dependent data.
+ * IPA modules shall create a specialization of the Module class template in
+ * their namespace, and use it to specialize other classes of libipa, such as
+ * the Algorithm class.
+ */
+
+/**
+ * \typedef Module::Context
+ * \brief The type of the shared IPA context
+ */
+
+/**
+ * \typedef Module::FrameContext
+ * \brief The type of the frame context
+ */
+
+/**
+ * \typedef Module::Config
+ * \brief The type of the IPA configuration data
+ */
+
+/**
+ * \typedef Module::Params
+ * \brief The type of the ISP specific parameters
+ */
+
+/**
+ * \typedef Module::Stats
+ * \brief The type of the IPA statistics and ISP results
+ */
+
+/**
+ * \fn Module::algorithms()
+ * \brief Retrieve the list of instantiated algorithms
+ * \return The list of instantiated algorithms
+ */
+
+/**
+ * \fn Module::createAlgorithms()
+ * \brief Create algorithms from YAML configuration data
+ * \param[in] context The IPA context
+ * \param[in] algorithms Algorithms configuration data as a parsed YamlObject
+ *
+ * This function iterates over the list of \a algorithms parsed from the YAML
+ * configuration file, and instantiates and initializes the corresponding
+ * algorithms. The configuration data is expected to be correct, any error
+ * causes the function to fail and return immediately.
+ *
+ * \return 0 on success, or a negative error code on failure
+ */
+
+/**
+ * \fn Module::registerAlgorithm()
+ * \brief Add an algorithm factory class to the list of available algorithms
+ * \param[in] factory Factory to use to construct the algorithm
+ *
+ * This function registers an algorithm factory. It is meant to be called by the
+ * AlgorithmFactory constructor only.
+ */
+
+/**
+ * \fn Module::createAlgorithm(const std::string &name)
+ * \brief Create an instance of an Algorithm by name
+ * \param[in] name The algorithm name
+ *
+ * This function is the entry point to algorithm instantiation for the IPA
+ * module. It creates and returns an instance of an algorithm identified by its
+ * \a name. If no such algorithm exists, the function returns nullptr.
+ *
+ * To make an algorithm available to the IPA module, it shall be registered with
+ * the REGISTER_IPA_ALGORITHM() macro.
+ *
+ * \return A new instance of the Algorithm subclass corresponding to the \a name
+ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/module.h b/src/ipa/libipa/module.h
new file mode 100644
index 00000000..0fb51916
--- /dev/null
+++ b/src/ipa/libipa/module.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Ideas On Board
+ *
+ * IPA module
+ */
+
+#pragma once
+
+#include <list>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(IPAModuleAlgo)
+
+namespace ipa {
+
+template<typename _Context, typename _FrameContext, typename _Config,
+ typename _Params, typename _Stats>
+class Module : public Loggable
+{
+public:
+ using Context = _Context;
+ using FrameContext = _FrameContext;
+ using Config = _Config;
+ using Params = _Params;
+ using Stats = _Stats;
+
+ virtual ~Module() {}
+
+ const std::list<std::unique_ptr<Algorithm<Module>>> &algorithms() const
+ {
+ return algorithms_;
+ }
+
+ int createAlgorithms(Context &context, const YamlObject &algorithms)
+ {
+ const auto &list = algorithms.asList();
+
+ for (const auto &[i, algo] : utils::enumerate(list)) {
+ if (!algo.isDictionary()) {
+ LOG(IPAModuleAlgo, Error)
+ << "Invalid YAML syntax for algorithm " << i;
+ algorithms_.clear();
+ return -EINVAL;
+ }
+
+ int ret = createAlgorithm(context, algo);
+ if (ret) {
+ algorithms_.clear();
+ return ret;
+ }
+ }
+
+ return 0;
+ }
+
+ static void registerAlgorithm(AlgorithmFactoryBase<Module> *factory)
+ {
+ factories().push_back(factory);
+ }
+
+private:
+ int createAlgorithm(Context &context, const YamlObject &data)
+ {
+ const auto &[name, algoData] = *data.asDict().begin();
+ std::unique_ptr<Algorithm<Module>> algo = createAlgorithm(name);
+ if (!algo) {
+ LOG(IPAModuleAlgo, Error)
+ << "Algorithm '" << name << "' not found";
+ return -EINVAL;
+ }
+
+ int ret = algo->init(context, algoData);
+ if (ret) {
+ LOG(IPAModuleAlgo, Error)
+ << "Algorithm '" << name << "' failed to initialize";
+ return ret;
+ }
+
+ LOG(IPAModuleAlgo, Debug)
+ << "Instantiated algorithm '" << name << "'";
+
+ algorithms_.push_back(std::move(algo));
+ return 0;
+ }
+
+ static std::unique_ptr<Algorithm<Module>> createAlgorithm(const std::string &name)
+ {
+ for (const AlgorithmFactoryBase<Module> *factory : factories()) {
+ if (factory->name() == name)
+ return factory->create();
+ }
+
+ return nullptr;
+ }
+
+ static std::vector<AlgorithmFactoryBase<Module> *> &factories()
+ {
+ /*
+ * The static factories map is defined inside the function to ensure
+ * it gets initialized on first use, without any dependency on
+ * link order.
+ */
+ static std::vector<AlgorithmFactoryBase<Module> *> factories;
+ return factories;
+ }
+
+ std::list<std::unique_ptr<Algorithm<Module>>> algorithms_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/pwl.cpp b/src/ipa/libipa/pwl.cpp
new file mode 100644
index 00000000..88fe2022
--- /dev/null
+++ b/src/ipa/libipa/pwl.cpp
@@ -0,0 +1,457 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * Piecewise linear functions
+ */
+
+#include "pwl.h"
+
+#include <cmath>
+#include <sstream>
+
+/**
+ * \file pwl.h
+ * \brief Piecewise linear functions
+ */
+
+namespace libcamera {
+
+namespace ipa {
+
+/**
+ * \class Pwl
+ * \brief Describe a univariate piecewise linear function in two-dimensional
+ * real space
+ *
+ * A piecewise linear function is a univariate function that maps reals to
+ * reals, and it is composed of multiple straight-line segments.
+ *
+ * While a mathematical piecewise linear function would usually be defined by
+ * a list of linear functions and for which values of the domain they apply,
+ * this Pwl class is instead defined by a list of points at which these line
+ * segments intersect. These intersecting points are known as knots.
+ *
+ * https://en.wikipedia.org/wiki/Piecewise_linear_function
+ *
+ * A consequence of the Pwl class being defined by knots instead of linear
+ * functions is that the values of the piecewise linear function past the ends
+ * of the function are constants as opposed to linear functions. In a
+ * mathematical piecewise linear function that is defined by multiple linear
+ * functions, the ends of the function are also linear functions and hence grow
+ * to infinity (or negative infinity). However, since this Pwl class is defined
+ * by knots, the y-value of the leftmost and rightmost knots will hold for all
+ * x values to negative infinity and positive infinity, respectively.
+ */
+
+/**
+ * \typedef Pwl::Point
+ * \brief Describe a point in two-dimensional real space
+ */
+
+/**
+ * \class Pwl::Interval
+ * \brief Describe an interval in one-dimensional real space
+ */
+
+/**
+ * \fn Pwl::Interval::Interval(double _start, double _end)
+ * \brief Construct an interval
+ * \param[in] _start Start of the interval
+ * \param[in] _end End of the interval
+ */
+
+/**
+ * \fn Pwl::Interval::contains
+ * \brief Check if a given value falls within the interval
+ * \param[in] value Value to check
+ * \return True if the value falls within the interval, including its bounds,
+ * or false otherwise
+ */
+
+/**
+ * \fn Pwl::Interval::clamp
+ * \brief Clamp a value such that it is within the interval
+ * \param[in] value Value to clamp
+ * \return The clamped value
+ */
+
+/**
+ * \fn Pwl::Interval::length
+ * \brief Compute the length of the interval
+ * \return The length of the interval
+ */
+
+/**
+ * \var Pwl::Interval::start
+ * \brief Start of the interval
+ */
+
+/**
+ * \var Pwl::Interval::end
+ * \brief End of the interval
+ */
+
+/**
+ * \brief Construct an empty piecewise linear function
+ */
+Pwl::Pwl()
+{
+}
+
+/**
+ * \brief Construct a piecewise linear function from a list of 2D points
+ * \param[in] points Vector of points from which to construct the piecewise
+ * linear function
+ *
+ * \a points must be in ascending order of x-value.
+ */
+Pwl::Pwl(const std::vector<Point> &points)
+ : points_(points)
+{
+}
+
+/**
+ * \copydoc Pwl::Pwl(const std::vector<Point> &points)
+ *
+ * The contents of the \a points vector is moved to the newly constructed Pwl
+ * instance.
+ */
+Pwl::Pwl(std::vector<Point> &&points)
+ : points_(std::move(points))
+{
+}
+
+/**
+ * \brief Append a point to the end of the piecewise linear function
+ * \param[in] x x-coordinate of the point to add to the piecewise linear function
+ * \param[in] y y-coordinate of the point to add to the piecewise linear function
+ * \param[in] eps Epsilon for the minimum x distance between points (optional)
+ *
+ * The point's x-coordinate must be greater than the x-coordinate of the last
+ * (= greatest) point already in the piecewise linear function.
+ */
+void Pwl::append(double x, double y, const double eps)
+{
+ if (points_.empty() || points_.back().x() + eps < x)
+ points_.push_back(Point({ x, y }));
+}
+
+/**
+ * \brief Prepend a point to the beginning of the piecewise linear function
+ * \param[in] x x-coordinate of the point to add to the piecewise linear function
+ * \param[in] y y-coordinate of the point to add to the piecewise linear function
+ * \param[in] eps Epsilon for the minimum x distance between points (optional)
+ *
+ * The point's x-coordinate must be less than the x-coordinate of the first
+ * (= smallest) point already in the piecewise linear function.
+ */
+void Pwl::prepend(double x, double y, const double eps)
+{
+ if (points_.empty() || points_.front().x() - eps > x)
+ points_.insert(points_.begin(), Point({ x, y }));
+}
+
+/**
+ * \fn Pwl::empty() const
+ * \brief Check if the piecewise linear function is empty
+ * \return True if there are no points in the function, false otherwise
+ */
+
+/**
+ * \fn Pwl::size() const
+ * \brief Retrieve the number of points in the piecewise linear function
+ * \return The number of points in the piecewise linear function
+ */
+
+/**
+ * \brief Get the domain of the piecewise linear function
+ * \return An interval representing the domain
+ */
+Pwl::Interval Pwl::domain() const
+{
+ return Interval(points_[0].x(), points_[points_.size() - 1].x());
+}
+
+/**
+ * \brief Get the range of the piecewise linear function
+ * \return An interval representing the range
+ */
+Pwl::Interval Pwl::range() const
+{
+ double lo = points_[0].y(), hi = lo;
+ for (auto &p : points_)
+ lo = std::min(lo, p.y()), hi = std::max(hi, p.y());
+ return Interval(lo, hi);
+}
+
+/**
+ * \brief Evaluate the piecewise linear function
+ * \param[in] x The x value to input into the function
+ * \param[inout] span Initial guess for span
+ * \param[in] updateSpan Set to true to update span
+ *
+ * Evaluate Pwl, optionally supplying an initial guess for the
+ * "span". The "span" may be optionally be updated. If you want to know
+ * the "span" value but don't have an initial guess you can set it to
+ * -1.
+ *
+ * \return The result of evaluating the piecewise linear function at position \a x
+ */
+double Pwl::eval(double x, int *span, bool updateSpan) const
+{
+ int index = findSpan(x, span && *span != -1
+ ? *span
+ : points_.size() / 2 - 1);
+ if (span && updateSpan)
+ *span = index;
+ return points_[index].y() +
+ (x - points_[index].x()) * (points_[index + 1].y() - points_[index].y()) /
+ (points_[index + 1].x() - points_[index].x());
+}
+
+int Pwl::findSpan(double x, int span) const
+{
+ /*
+ * Pwls are generally small, so linear search may well be faster than
+ * binary, though could review this if large Pwls start turning up.
+ */
+ int lastSpan = points_.size() - 2;
+ /*
+ * some algorithms may call us with span pointing directly at the last
+ * control point
+ */
+ span = std::max(0, std::min(lastSpan, span));
+ while (span < lastSpan && x >= points_[span + 1].x())
+ span++;
+ while (span && x < points_[span].x())
+ span--;
+ return span;
+}
+
+/**
+ * \brief Compute the inverse function
+ * \param[in] eps Epsilon for the minimum x distance between points (optional)
+ *
+ * The output includes whether the resulting inverse function is a proper
+ * (true) inverse, or only a best effort (e.g. input was non-monotonic).
+ *
+ * \return A pair of the inverse piecewise linear function, and whether or not
+ * the result is a proper/true inverse
+ */
+std::pair<Pwl, bool> Pwl::inverse(const double eps) const
+{
+ bool appended = false, prepended = false, neither = false;
+ Pwl inverse;
+
+ for (Point const &p : points_) {
+ if (inverse.empty()) {
+ inverse.append(p.y(), p.x(), eps);
+ } else if (std::abs(inverse.points_.back().x() - p.y()) <= eps ||
+ std::abs(inverse.points_.front().x() - p.y()) <= eps) {
+ /* do nothing */;
+ } else if (p.y() > inverse.points_.back().x()) {
+ inverse.append(p.y(), p.x(), eps);
+ appended = true;
+ } else if (p.y() < inverse.points_.front().x()) {
+ inverse.prepend(p.y(), p.x(), eps);
+ prepended = true;
+ } else {
+ neither = true;
+ }
+ }
+
+ /*
+ * This is not a proper inverse if we found ourselves putting points
+ * onto both ends of the inverse, or if there were points that couldn't
+ * go on either.
+ */
+ bool trueInverse = !(neither || (appended && prepended));
+
+ return { inverse, trueInverse };
+}
+
+/**
+ * \brief Compose two piecewise linear functions together
+ * \param[in] other The "other" piecewise linear function
+ * \param[in] eps Epsilon for the minimum x distance between points (optional)
+ *
+ * The "this" function is done first, and "other" after.
+ *
+ * \return The composed piecewise linear function
+ */
+Pwl Pwl::compose(Pwl const &other, const double eps) const
+{
+ double thisX = points_[0].x(), thisY = points_[0].y();
+ int thisSpan = 0, otherSpan = other.findSpan(thisY, 0);
+ Pwl result({ Point({ thisX, other.eval(thisY, &otherSpan, false) }) });
+
+ while (thisSpan != (int)points_.size() - 1) {
+ double dx = points_[thisSpan + 1].x() - points_[thisSpan].x(),
+ dy = points_[thisSpan + 1].y() - points_[thisSpan].y();
+ if (std::abs(dy) > eps &&
+ otherSpan + 1 < (int)other.points_.size() &&
+ points_[thisSpan + 1].y() >= other.points_[otherSpan + 1].x() + eps) {
+ /*
+ * next control point in result will be where this
+ * function's y reaches the next span in other
+ */
+ thisX = points_[thisSpan].x() +
+ (other.points_[otherSpan + 1].x() -
+ points_[thisSpan].y()) *
+ dx / dy;
+ thisY = other.points_[++otherSpan].x();
+ } else if (std::abs(dy) > eps && otherSpan > 0 &&
+ points_[thisSpan + 1].y() <=
+ other.points_[otherSpan - 1].x() - eps) {
+ /*
+ * next control point in result will be where this
+ * function's y reaches the previous span in other
+ */
+ thisX = points_[thisSpan].x() +
+ (other.points_[otherSpan + 1].x() -
+ points_[thisSpan].y()) *
+ dx / dy;
+ thisY = other.points_[--otherSpan].x();
+ } else {
+ /* we stay in the same span in other */
+ thisSpan++;
+ thisX = points_[thisSpan].x(),
+ thisY = points_[thisSpan].y();
+ }
+ result.append(thisX, other.eval(thisY, &otherSpan, false),
+ eps);
+ }
+ return result;
+}
+
+/**
+ * \brief Apply function to (x, y) values at every control point
+ * \param[in] f Function to be applied
+ */
+void Pwl::map(std::function<void(double x, double y)> f) const
+{
+ for (auto &pt : points_)
+ f(pt.x(), pt.y());
+}
+
+/**
+ * \brief Apply function to (x, y0, y1) values wherever either Pwl has a
+ * control point.
+ * \param[in] pwl0 First piecewise linear function
+ * \param[in] pwl1 Second piecewise linear function
+ * \param[in] f Function to be applied
+ *
+ * This applies the function \a f to every parameter (x, y0, y1), where x is
+ * the combined list of x-values from \a pwl0 and \a pwl1, y0 is the y-value
+ * for the given x in \a pwl0, and y1 is the y-value for the same x in \a pwl1.
+ */
+void Pwl::map2(Pwl const &pwl0, Pwl const &pwl1,
+ std::function<void(double x, double y0, double y1)> f)
+{
+ int span0 = 0, span1 = 0;
+ double x = std::min(pwl0.points_[0].x(), pwl1.points_[0].x());
+ f(x, pwl0.eval(x, &span0, false), pwl1.eval(x, &span1, false));
+
+ while (span0 < (int)pwl0.points_.size() - 1 ||
+ span1 < (int)pwl1.points_.size() - 1) {
+ if (span0 == (int)pwl0.points_.size() - 1)
+ x = pwl1.points_[++span1].x();
+ else if (span1 == (int)pwl1.points_.size() - 1)
+ x = pwl0.points_[++span0].x();
+ else if (pwl0.points_[span0 + 1].x() > pwl1.points_[span1 + 1].x())
+ x = pwl1.points_[++span1].x();
+ else
+ x = pwl0.points_[++span0].x();
+ f(x, pwl0.eval(x, &span0, false), pwl1.eval(x, &span1, false));
+ }
+}
+
+/**
+ * \brief Combine two Pwls
+ * \param[in] pwl0 First piecewise linear function
+ * \param[in] pwl1 Second piecewise linear function
+ * \param[in] f Function to be applied
+ * \param[in] eps Epsilon for the minimum x distance between points (optional)
+ *
+ * Create a new Pwl where the y values are given by running \a f wherever
+ * either pwl has a knot.
+ *
+ * \return The combined pwl
+ */
+Pwl Pwl::combine(Pwl const &pwl0, Pwl const &pwl1,
+ std::function<double(double x, double y0, double y1)> f,
+ const double eps)
+{
+ Pwl result;
+ map2(pwl0, pwl1, [&](double x, double y0, double y1) {
+ result.append(x, f(x, y0, y1), eps);
+ });
+ return result;
+}
+
+/**
+ * \brief Multiply the piecewise linear function
+ * \param[in] d Scalar multiplier to multiply the function by
+ * \return This function, after it has been multiplied by \a d
+ */
+Pwl &Pwl::operator*=(double d)
+{
+ for (auto &pt : points_)
+ pt[1] *= d;
+ return *this;
+}
+
+/**
+ * \brief Assemble and return a string describing the piecewise linear function
+ * \return A string describing the piecewise linear function
+ */
+std::string Pwl::toString() const
+{
+ std::stringstream ss;
+ ss << "Pwl { ";
+ for (auto &p : points_)
+ ss << "(" << p.x() << ", " << p.y() << ") ";
+ ss << "}";
+ return ss.str();
+}
+
+} /* namespace ipa */
+
+#ifndef __DOXYGEN__
+/*
+ * The YAML data shall be a list of numerical values with an even number of
+ * elements. They are parsed in pairs into x and y points in the piecewise
+ * linear function, and added in order. x must be monotonically increasing.
+ */
+template<>
+std::optional<ipa::Pwl>
+YamlObject::Getter<ipa::Pwl>::get(const YamlObject &obj) const
+{
+ if (!obj.size() || obj.size() % 2)
+ return std::nullopt;
+
+ ipa::Pwl pwl;
+
+ const auto &list = obj.asList();
+
+ for (auto it = list.begin(); it != list.end(); it++) {
+ auto x = it->get<double>();
+ if (!x)
+ return std::nullopt;
+ auto y = (++it)->get<double>();
+ if (!y)
+ return std::nullopt;
+
+ pwl.append(*x, *y);
+ }
+
+ if (pwl.size() != obj.size() / 2)
+ return std::nullopt;
+
+ return pwl;
+}
+#endif /* __DOXYGEN__ */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/pwl.h b/src/ipa/libipa/pwl.h
new file mode 100644
index 00000000..d4ec9f4f
--- /dev/null
+++ b/src/ipa/libipa/pwl.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * Piecewise linear functions interface
+ */
+#pragma once
+
+#include <algorithm>
+#include <functional>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "vector.h"
+
+namespace libcamera {
+
+namespace ipa {
+
+class Pwl
+{
+public:
+ using Point = Vector<double, 2>;
+
+ struct Interval {
+ Interval(double _start, double _end)
+ : start(_start), end(_end) {}
+
+ bool contains(double value)
+ {
+ return value >= start && value <= end;
+ }
+
+ double clamp(double value)
+ {
+ return std::clamp(value, start, end);
+ }
+
+ double length() const { return end - start; }
+
+ double start, end;
+ };
+
+ Pwl();
+ Pwl(const std::vector<Point> &points);
+ Pwl(std::vector<Point> &&points);
+
+ void append(double x, double y, double eps = 1e-6);
+
+ bool empty() const { return points_.empty(); }
+ size_t size() const { return points_.size(); }
+
+ Interval domain() const;
+ Interval range() const;
+
+ double eval(double x, int *span = nullptr,
+ bool updateSpan = true) const;
+
+ std::pair<Pwl, bool> inverse(double eps = 1e-6) const;
+ Pwl compose(const Pwl &other, double eps = 1e-6) const;
+
+ void map(std::function<void(double x, double y)> f) const;
+
+ static Pwl
+ combine(const Pwl &pwl0, const Pwl &pwl1,
+ std::function<double(double x, double y0, double y1)> f,
+ double eps = 1e-6);
+
+ Pwl &operator*=(double d);
+
+ std::string toString() const;
+
+private:
+ static void map2(const Pwl &pwl0, const Pwl &pwl1,
+ std::function<void(double x, double y0, double y1)> f);
+ void prepend(double x, double y, double eps = 1e-6);
+ int findSpan(double x, int span) const;
+
+ std::vector<Point> points_;
+};
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/vector.cpp b/src/ipa/libipa/vector.cpp
new file mode 100644
index 00000000..8019f8cf
--- /dev/null
+++ b/src/ipa/libipa/vector.cpp
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Vector and related operations
+ */
+
+#include "vector.h"
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file vector.h
+ * \brief Vector class
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Vector)
+
+namespace ipa {
+
+/**
+ * \class Vector
+ * \brief Vector class
+ * \tparam T Type of numerical values to be stored in the vector
+ * \tparam Rows Number of dimension of the vector (= number of elements)
+ */
+
+/**
+ * \fn Vector::Vector()
+ * \brief Construct an uninitialized vector
+ */
+
+/**
+ * \fn Vector::Vector(T scalar)
+ * \brief Construct a vector filled with a \a scalar value
+ * \param[in] scalar The scalar value
+ */
+
+/**
+ * \fn Vector::Vector(const std::array<T, Rows> &data)
+ * \brief Construct vector from supplied data
+ * \param data Data from which to construct a vector
+ *
+ * The size of \a data must be equal to the dimension size Rows of the vector.
+ */
+
+/**
+ * \fn T Vector::operator[](size_t i) const
+ * \brief Index to an element in the vector
+ * \param i Index of element to retrieve
+ * \return Element at index \a i from the vector
+ */
+
+/**
+ * \fn T &Vector::operator[](size_t i)
+ * \copydoc Vector::operator[](size_t i) const
+ */
+
+/**
+ * \fn Vector::operator-() const
+ * \brief Negate a Vector by negating both all of its coordinates
+ * \return The negated vector
+ */
+
+/**
+ * \fn Vector::operator+(Vector const &other) const
+ * \brief Calculate the sum of this vector and \a other element-wise
+ * \param[in] other The other vector
+ * \return The element-wise sum of this vector and \a other
+ */
+
+/**
+ * \fn Vector::operator+(T scalar) const
+ * \brief Calculate the sum of this vector and \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return The element-wise sum of this vector and \a other
+ */
+
+/**
+ * \fn Vector::operator-(Vector const &other) const
+ * \brief Calculate the difference of this vector and \a other element-wise
+ * \param[in] other The other vector
+ * \return The element-wise subtraction of \a other from this vector
+ */
+
+/**
+ * \fn Vector::operator-(T scalar) const
+ * \brief Calculate the difference of this vector and \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return The element-wise subtraction of \a scalar from this vector
+ */
+
+/**
+ * \fn Vector::operator*(const Vector &other) const
+ * \brief Calculate the product of this vector and \a other element-wise
+ * \param[in] other The other vector
+ * \return The element-wise product of this vector and \a other
+ */
+
+/**
+ * \fn Vector::operator*(T scalar) const
+ * \brief Calculate the product of this vector and \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return The element-wise product of this vector and \a scalar
+ */
+
+/**
+ * \fn Vector::operator/(const Vector &other) const
+ * \brief Calculate the quotient of this vector and \a other element-wise
+ * \param[in] other The other vector
+ * \return The element-wise division of this vector by \a other
+ */
+
+/**
+ * \fn Vector::operator/(T scalar) const
+ * \brief Calculate the quotient of this vector and \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return The element-wise division of this vector by \a scalar
+ */
+
+/**
+ * \fn Vector::operator+=(Vector const &other)
+ * \brief Add \a other element-wise to this vector
+ * \param[in] other The other vector
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator+=(T scalar)
+ * \brief Add \a scalar element-wise to this vector
+ * \param[in] scalar The scalar
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator-=(Vector const &other)
+ * \brief Subtract \a other element-wise from this vector
+ * \param[in] other The other vector
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator-=(T scalar)
+ * \brief Subtract \a scalar element-wise from this vector
+ * \param[in] scalar The scalar
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator*=(const Vector &other)
+ * \brief Multiply this vector by \a other element-wise
+ * \param[in] other The other vector
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator*=(T scalar)
+ * \brief Multiply this vector by \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator/=(const Vector &other)
+ * \brief Divide this vector by \a other element-wise
+ * \param[in] other The other vector
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::operator/=(T scalar)
+ * \brief Divide this vector by \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return This vector
+ */
+
+/**
+ * \fn Vector::min(const Vector &other) const
+ * \brief Calculate the minimum of this vector and \a other element-wise
+ * \param[in] other The other vector
+ * \return The element-wise minimum of this vector and \a other
+ */
+
+/**
+ * \fn Vector::min(T scalar) const
+ * \brief Calculate the minimum of this vector and \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return The element-wise minimum of this vector and \a scalar
+ */
+
+/**
+ * \fn Vector::max(const Vector &other) const
+ * \brief Calculate the maximum of this vector and \a other element-wise
+ * \param[in] other The other vector
+ * \return The element-wise maximum of this vector and \a other
+ */
+
+/**
+ * \fn Vector::max(T scalar) const
+ * \brief Calculate the maximum of this vector and \a scalar element-wise
+ * \param[in] scalar The scalar
+ * \return The element-wise maximum of this vector and \a scalar
+ */
+
+/**
+ * \fn Vector::dot(const Vector<T, Rows> &other) const
+ * \brief Compute the dot product
+ * \param[in] other The other vector
+ * \return The dot product of the two vectors
+ */
+
+/**
+ * \fn constexpr T &Vector::x()
+ * \brief Convenience function to access the first element of the vector
+ * \return The first element of the vector
+ */
+
+/**
+ * \fn constexpr T &Vector::y()
+ * \brief Convenience function to access the second element of the vector
+ * \return The second element of the vector
+ */
+
+/**
+ * \fn constexpr T &Vector::z()
+ * \brief Convenience function to access the third element of the vector
+ * \return The third element of the vector
+ */
+
+/**
+ * \fn constexpr const T &Vector::x() const
+ * \copydoc Vector::x()
+ */
+
+/**
+ * \fn constexpr const T &Vector::y() const
+ * \copydoc Vector::y()
+ */
+
+/**
+ * \fn constexpr const T &Vector::z() const
+ * \copydoc Vector::z()
+ */
+
+/**
+ * \fn constexpr T &Vector::r()
+ * \brief Convenience function to access the first element of the vector
+ * \return The first element of the vector
+ */
+
+/**
+ * \fn constexpr T &Vector::g()
+ * \brief Convenience function to access the second element of the vector
+ * \return The second element of the vector
+ */
+
+/**
+ * \fn constexpr T &Vector::b()
+ * \brief Convenience function to access the third element of the vector
+ * \return The third element of the vector
+ */
+
+/**
+ * \fn constexpr const T &Vector::r() const
+ * \copydoc Vector::r()
+ */
+
+/**
+ * \fn constexpr const T &Vector::g() const
+ * \copydoc Vector::g()
+ */
+
+/**
+ * \fn constexpr const T &Vector::b() const
+ * \copydoc Vector::b()
+ */
+
+/**
+ * \fn Vector::length2()
+ * \brief Get the squared length of the vector
+ * \return The squared length of the vector
+ */
+
+/**
+ * \fn Vector::length()
+ * \brief Get the length of the vector
+ * \return The length of the vector
+ */
+
+/**
+ * \fn Vector::sum() const
+ * \brief Calculate the sum of all the vector elements
+ * \tparam R The type of the sum
+ *
+ * The type R of the sum defaults to the type T of the elements, but can be set
+ * explicitly to use a different type in case the type T would risk
+ * overflowing.
+ *
+ * \return The sum of all the vector elements
+ */
+
+/**
+ * \fn Vector<T, Rows> operator*(const Matrix<T, Rows, Cols> &m, const Vector<T, Cols> &v)
+ * \brief Multiply a matrix by a vector
+ * \tparam T Numerical type of the contents of the matrix and vector
+ * \tparam Rows The number of rows in the matrix
+ * \tparam Cols The number of columns in the matrix (= rows in the vector)
+ * \param m The matrix
+ * \param v The vector
+ * \return Product of matrix \a m and vector \a v
+ */
+
+/**
+ * \typedef RGB
+ * \brief A Vector of 3 elements representing an RGB pixel value
+ */
+
+/**
+ * \fn bool operator==(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
+ * \brief Compare vectors for equality
+ * \return True if the two vectors are equal, false otherwise
+ */
+
+/**
+ * \fn bool operator!=(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
+ * \brief Compare vectors for inequality
+ * \return True if the two vectors are not equal, false otherwise
+ */
+
+#ifndef __DOXYGEN__
+bool vectorValidateYaml(const YamlObject &obj, unsigned int size)
+{
+ if (!obj.isList())
+ return false;
+
+ if (obj.size() != size) {
+ LOG(Vector, Error)
+ << "Wrong number of values in YAML vector: expected "
+ << size << ", got " << obj.size();
+ return false;
+ }
+
+ return true;
+}
+#endif /* __DOXYGEN__ */
+
+} /* namespace ipa */
+
+} /* namespace libcamera */
diff --git a/src/ipa/libipa/vector.h b/src/ipa/libipa/vector.h
new file mode 100644
index 00000000..fe33c9d6
--- /dev/null
+++ b/src/ipa/libipa/vector.h
@@ -0,0 +1,370 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Vector and related operations
+ */
+#pragma once
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+#include <functional>
+#include <numeric>
+#include <optional>
+#include <ostream>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/span.h>
+
+#include "libcamera/internal/matrix.h"
+#include "libcamera/internal/yaml_parser.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Vector)
+
+namespace ipa {
+
+#ifndef __DOXYGEN__
+template<typename T, unsigned int Rows,
+ std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr>
+#else
+template<typename T, unsigned int Rows>
+#endif /* __DOXYGEN__ */
+class Vector
+{
+public:
+ constexpr Vector() = default;
+
+ constexpr explicit Vector(T scalar)
+ {
+ data_.fill(scalar);
+ }
+
+ constexpr Vector(const std::array<T, Rows> &data)
+ {
+ for (unsigned int i = 0; i < Rows; i++)
+ data_[i] = data[i];
+ }
+
+ const T &operator[](size_t i) const
+ {
+ ASSERT(i < data_.size());
+ return data_[i];
+ }
+
+ T &operator[](size_t i)
+ {
+ ASSERT(i < data_.size());
+ return data_[i];
+ }
+
+ constexpr Vector<T, Rows> operator-() const
+ {
+ Vector<T, Rows> ret;
+ for (unsigned int i = 0; i < Rows; i++)
+ ret[i] = -data_[i];
+ return ret;
+ }
+
+ constexpr Vector operator+(const Vector &other) const
+ {
+ return apply(*this, other, std::plus<>{});
+ }
+
+ constexpr Vector operator+(T scalar) const
+ {
+ return apply(*this, scalar, std::plus<>{});
+ }
+
+ constexpr Vector operator-(const Vector &other) const
+ {
+ return apply(*this, other, std::minus<>{});
+ }
+
+ constexpr Vector operator-(T scalar) const
+ {
+ return apply(*this, scalar, std::minus<>{});
+ }
+
+ constexpr Vector operator*(const Vector &other) const
+ {
+ return apply(*this, other, std::multiplies<>{});
+ }
+
+ constexpr Vector operator*(T scalar) const
+ {
+ return apply(*this, scalar, std::multiplies<>{});
+ }
+
+ constexpr Vector operator/(const Vector &other) const
+ {
+ return apply(*this, other, std::divides<>{});
+ }
+
+ constexpr Vector operator/(T scalar) const
+ {
+ return apply(*this, scalar, std::divides<>{});
+ }
+
+ Vector &operator+=(const Vector &other)
+ {
+ return apply(other, [](T a, T b) { return a + b; });
+ }
+
+ Vector &operator+=(T scalar)
+ {
+ return apply(scalar, [](T a, T b) { return a + b; });
+ }
+
+ Vector &operator-=(const Vector &other)
+ {
+ return apply(other, [](T a, T b) { return a - b; });
+ }
+
+ Vector &operator-=(T scalar)
+ {
+ return apply(scalar, [](T a, T b) { return a - b; });
+ }
+
+ Vector &operator*=(const Vector &other)
+ {
+ return apply(other, [](T a, T b) { return a * b; });
+ }
+
+ Vector &operator*=(T scalar)
+ {
+ return apply(scalar, [](T a, T b) { return a * b; });
+ }
+
+ Vector &operator/=(const Vector &other)
+ {
+ return apply(other, [](T a, T b) { return a / b; });
+ }
+
+ Vector &operator/=(T scalar)
+ {
+ return apply(scalar, [](T a, T b) { return a / b; });
+ }
+
+ constexpr Vector min(const Vector &other) const
+ {
+ return apply(*this, other, [](T a, T b) { return std::min(a, b); });
+ }
+
+ constexpr Vector min(T scalar) const
+ {
+ return apply(*this, scalar, [](T a, T b) { return std::min(a, b); });
+ }
+
+ constexpr Vector max(const Vector &other) const
+ {
+ return apply(*this, other, [](T a, T b) { return std::max(a, b); });
+ }
+
+ constexpr Vector max(T scalar) const
+ {
+ return apply(*this, scalar, [](T a, T b) -> T { return std::max(a, b); });
+ }
+
+ constexpr T dot(const Vector<T, Rows> &other) const
+ {
+ T ret = 0;
+ for (unsigned int i = 0; i < Rows; i++)
+ ret += data_[i] * other[i];
+ return ret;
+ }
+
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
+#endif /* __DOXYGEN__ */
+ constexpr const T &x() const { return data_[0]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
+#endif /* __DOXYGEN__ */
+ constexpr const T &y() const { return data_[1]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
+#endif /* __DOXYGEN__ */
+ constexpr const T &z() const { return data_[2]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
+#endif /* __DOXYGEN__ */
+ constexpr T &x() { return data_[0]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
+#endif /* __DOXYGEN__ */
+ constexpr T &y() { return data_[1]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
+#endif /* __DOXYGEN__ */
+ constexpr T &z() { return data_[2]; }
+
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
+#endif /* __DOXYGEN__ */
+ constexpr const T &r() const { return data_[0]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
+#endif /* __DOXYGEN__ */
+ constexpr const T &g() const { return data_[1]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
+#endif /* __DOXYGEN__ */
+ constexpr const T &b() const { return data_[2]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
+#endif /* __DOXYGEN__ */
+ constexpr T &r() { return data_[0]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
+#endif /* __DOXYGEN__ */
+ constexpr T &g() { return data_[1]; }
+#ifndef __DOXYGEN__
+ template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
+#endif /* __DOXYGEN__ */
+ constexpr T &b() { return data_[2]; }
+
+ constexpr double length2() const
+ {
+ double ret = 0;
+ for (unsigned int i = 0; i < Rows; i++)
+ ret += data_[i] * data_[i];
+ return ret;
+ }
+
+ constexpr double length() const
+ {
+ return std::sqrt(length2());
+ }
+
+ template<typename R = T>
+ constexpr R sum() const
+ {
+ return std::accumulate(data_.begin(), data_.end(), R{});
+ }
+
+private:
+ template<class BinaryOp>
+ static constexpr Vector apply(const Vector &lhs, const Vector &rhs, BinaryOp op)
+ {
+ Vector result;
+ std::transform(lhs.data_.begin(), lhs.data_.end(),
+ rhs.data_.begin(), result.data_.begin(),
+ op);
+
+ return result;
+ }
+
+ template<class BinaryOp>
+ static constexpr Vector apply(const Vector &lhs, T rhs, BinaryOp op)
+ {
+ Vector result;
+ std::transform(lhs.data_.begin(), lhs.data_.end(),
+ result.data_.begin(),
+ [&op, rhs](T v) { return op(v, rhs); });
+
+ return result;
+ }
+
+ template<class BinaryOp>
+ Vector &apply(const Vector &other, BinaryOp op)
+ {
+ auto itOther = other.data_.begin();
+ std::for_each(data_.begin(), data_.end(),
+ [&op, &itOther](T &v) { v = op(v, *itOther++); });
+
+ return *this;
+ }
+
+ template<class BinaryOp>
+ Vector &apply(T scalar, BinaryOp op)
+ {
+ std::for_each(data_.begin(), data_.end(),
+ [&op, scalar](T &v) { v = op(v, scalar); });
+
+ return *this;
+ }
+
+ std::array<T, Rows> data_;
+};
+
+template<typename T>
+using RGB = Vector<T, 3>;
+
+template<typename T, unsigned int Rows, unsigned int Cols>
+Vector<T, Rows> operator*(const Matrix<T, Rows, Cols> &m, const Vector<T, Cols> &v)
+{
+ Vector<T, Rows> result;
+
+ for (unsigned int i = 0; i < Rows; i++) {
+ T sum = 0;
+ for (unsigned int j = 0; j < Cols; j++)
+ sum += m[i][j] * v[j];
+ result[i] = sum;
+ }
+
+ return result;
+}
+
+template<typename T, unsigned int Rows>
+bool operator==(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
+{
+ for (unsigned int i = 0; i < Rows; i++) {
+ if (lhs[i] != rhs[i])
+ return false;
+ }
+
+ return true;
+}
+
+template<typename T, unsigned int Rows>
+bool operator!=(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
+{
+ return !(lhs == rhs);
+}
+
+#ifndef __DOXYGEN__
+bool vectorValidateYaml(const YamlObject &obj, unsigned int size);
+#endif /* __DOXYGEN__ */
+
+} /* namespace ipa */
+
+#ifndef __DOXYGEN__
+template<typename T, unsigned int Rows>
+std::ostream &operator<<(std::ostream &out, const ipa::Vector<T, Rows> &v)
+{
+ out << "Vector { ";
+ for (unsigned int i = 0; i < Rows; i++) {
+ out << v[i];
+ out << ((i + 1 < Rows) ? ", " : " ");
+ }
+ out << " }";
+
+ return out;
+}
+
+template<typename T, unsigned int Rows>
+struct YamlObject::Getter<ipa::Vector<T, Rows>> {
+ std::optional<ipa::Vector<T, Rows>> get(const YamlObject &obj) const
+ {
+ if (!ipa::vectorValidateYaml(obj, Rows))
+ return std::nullopt;
+
+ ipa::Vector<T, Rows> vector;
+
+ unsigned int i = 0;
+ for (const YamlObject &entry : obj.asList()) {
+ const auto value = entry.get<T>();
+ if (!value)
+ return std::nullopt;
+ vector[i++] = *value;
+ }
+
+ return vector;
+ }
+};
+#endif /* __DOXYGEN__ */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/agc.cpp b/src/ipa/mali-c55/algorithms/agc.cpp
new file mode 100644
index 00000000..70667db3
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/agc.cpp
@@ -0,0 +1,410 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board Oy
+ *
+ * agc.cpp - AGC/AEC mean-based control algorithm
+ */
+
+#include "agc.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/property_ids.h>
+
+#include "libipa/colours.h"
+#include "libipa/fixedpoint.h"
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+
+namespace ipa::mali_c55::algorithms {
+
+LOG_DEFINE_CATEGORY(MaliC55Agc)
+
+/*
+ * Number of histogram bins. This is only true for the specific configuration we
+ * set to the ISP; 4 separate histograms of 256 bins each. If that configuration
+ * ever changes then this constant will need updating.
+ */
+static constexpr unsigned int kNumHistogramBins = 256;
+
+/*
+ * The Mali-C55 ISP has a digital gain block which allows setting gain in Q5.8
+ * format, a range of 0.0 to (very nearly) 32.0. We clamp from 1.0 to the actual
+ * max value which is 8191 * 2^-8.
+ */
+static constexpr double kMinDigitalGain = 1.0;
+static constexpr double kMaxDigitalGain = 31.99609375;
+
+uint32_t AgcStatistics::decodeBinValue(uint16_t binVal)
+{
+ int exponent = (binVal & 0xf000) >> 12;
+ int mantissa = binVal & 0xfff;
+
+ if (!exponent)
+ return mantissa * 2;
+ else
+ return (mantissa + 4096) * std::pow(2, exponent);
+}
+
+/*
+ * We configure the ISP to give us 4 histograms of 256 bins each, with
+ * a single histogram per colour channel (R/Gr/Gb/B). The memory space
+ * containing the data is a single block containing all 4 histograms
+ * with the position of each colour's histogram within it dependent on
+ * the bayer pattern of the data input to the ISP.
+ *
+ * NOTE: The validity of this function depends on the parameters we have
+ * configured. With different skip/offset x, y values not all of the
+ * colour channels would be populated, and they may not be in the same
+ * planes as calculated here.
+ */
+int AgcStatistics::setBayerOrderIndices(BayerFormat::Order bayerOrder)
+{
+ switch (bayerOrder) {
+ case BayerFormat::Order::RGGB:
+ rIndex_ = 0;
+ grIndex_ = 1;
+ gbIndex_ = 2;
+ bIndex_ = 3;
+ break;
+ case BayerFormat::Order::GRBG:
+ grIndex_ = 0;
+ rIndex_ = 1;
+ bIndex_ = 2;
+ gbIndex_ = 3;
+ break;
+ case BayerFormat::Order::GBRG:
+ gbIndex_ = 0;
+ bIndex_ = 1;
+ rIndex_ = 2;
+ grIndex_ = 3;
+ break;
+ case BayerFormat::Order::BGGR:
+ bIndex_ = 0;
+ gbIndex_ = 1;
+ grIndex_ = 2;
+ rIndex_ = 3;
+ break;
+ default:
+ LOG(MaliC55Agc, Error)
+ << "Invalid bayer format " << bayerOrder;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void AgcStatistics::parseStatistics(const mali_c55_stats_buffer *stats)
+{
+ uint32_t r[256], g[256], b[256], y[256];
+
+ /*
+ * We need to decode the bin values for each histogram from their 16-bit
+ * compressed values to a 32-bit value. We also take the average of the
+ * Gr/Gb values into a single green histogram.
+ */
+ for (unsigned int i = 0; i < 256; i++) {
+ r[i] = decodeBinValue(stats->ae_1024bin_hist.bins[i + (256 * rIndex_)]);
+ g[i] = (decodeBinValue(stats->ae_1024bin_hist.bins[i + (256 * grIndex_)]) +
+ decodeBinValue(stats->ae_1024bin_hist.bins[i + (256 * gbIndex_)])) / 2;
+ b[i] = decodeBinValue(stats->ae_1024bin_hist.bins[i + (256 * bIndex_)]);
+
+ y[i] = rec601LuminanceFromRGB({ { static_cast<double>(r[i]),
+ static_cast<double>(g[i]),
+ static_cast<double>(b[i]) } });
+ }
+
+ rHist = Histogram(Span<uint32_t>(r, kNumHistogramBins));
+ gHist = Histogram(Span<uint32_t>(g, kNumHistogramBins));
+ bHist = Histogram(Span<uint32_t>(b, kNumHistogramBins));
+ yHist = Histogram(Span<uint32_t>(y, kNumHistogramBins));
+}
+
+Agc::Agc()
+ : AgcMeanLuminance()
+{
+}
+
+int Agc::init(IPAContext &context, const YamlObject &tuningData)
+{
+ int ret = parseTuningData(tuningData);
+ if (ret)
+ return ret;
+
+ context.ctrlMap[&controls::AeEnable] = ControlInfo(false, true);
+ context.ctrlMap[&controls::DigitalGain] = ControlInfo(
+ static_cast<float>(kMinDigitalGain),
+ static_cast<float>(kMaxDigitalGain),
+ static_cast<float>(kMinDigitalGain)
+ );
+ context.ctrlMap.merge(controls());
+
+ return 0;
+}
+
+int Agc::configure(IPAContext &context,
+ [[maybe_unused]] const IPACameraSensorInfo &configInfo)
+{
+ int ret = statistics_.setBayerOrderIndices(context.configuration.sensor.bayerOrder);
+ if (ret)
+ return ret;
+
+ /*
+ * Defaults; we use whatever the sensor's default exposure is and the
+ * minimum analogue gain. AEGC is _active_ by default.
+ */
+ context.activeState.agc.autoEnabled = true;
+ context.activeState.agc.automatic.sensorGain = context.configuration.agc.minAnalogueGain;
+ context.activeState.agc.automatic.exposure = context.configuration.agc.defaultExposure;
+ context.activeState.agc.automatic.ispGain = kMinDigitalGain;
+ context.activeState.agc.manual.sensorGain = context.configuration.agc.minAnalogueGain;
+ context.activeState.agc.manual.exposure = context.configuration.agc.defaultExposure;
+ context.activeState.agc.manual.ispGain = kMinDigitalGain;
+ context.activeState.agc.constraintMode = constraintModes().begin()->first;
+ context.activeState.agc.exposureMode = exposureModeHelpers().begin()->first;
+
+ /* \todo Run this again when FrameDurationLimits is passed in */
+ setLimits(context.configuration.agc.minShutterSpeed,
+ context.configuration.agc.maxShutterSpeed,
+ context.configuration.agc.minAnalogueGain,
+ context.configuration.agc.maxAnalogueGain);
+
+ resetFrameCount();
+
+ return 0;
+}
+
+void Agc::queueRequest(IPAContext &context, const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ const ControlList &controls)
+{
+ auto &agc = context.activeState.agc;
+
+ const auto &constraintMode = controls.get(controls::AeConstraintMode);
+ agc.constraintMode = constraintMode.value_or(agc.constraintMode);
+
+ const auto &exposureMode = controls.get(controls::AeExposureMode);
+ agc.exposureMode = exposureMode.value_or(agc.exposureMode);
+
+ const auto &agcEnable = controls.get(controls::AeEnable);
+ if (agcEnable && *agcEnable != agc.autoEnabled) {
+ agc.autoEnabled = *agcEnable;
+
+ LOG(MaliC55Agc, Info)
+ << (agc.autoEnabled ? "Enabling" : "Disabling")
+ << " AGC";
+ }
+
+ /*
+ * If the automatic exposure and gain is enabled we have no further work
+ * to do here...
+ */
+ if (agc.autoEnabled)
+ return;
+
+ /*
+ * ...otherwise we need to look for exposure and gain controls and use
+ * those to set the activeState.
+ */
+ const auto &exposure = controls.get(controls::ExposureTime);
+ if (exposure) {
+ agc.manual.exposure = *exposure * 1.0us / context.configuration.sensor.lineDuration;
+
+ LOG(MaliC55Agc, Debug)
+ << "Exposure set to " << agc.manual.exposure
+ << " on request sequence " << frame;
+ }
+
+ const auto &analogueGain = controls.get(controls::AnalogueGain);
+ if (analogueGain) {
+ agc.manual.sensorGain = *analogueGain;
+
+ LOG(MaliC55Agc, Debug)
+ << "Analogue gain set to " << agc.manual.sensorGain
+ << " on request sequence " << frame;
+ }
+
+ const auto &digitalGain = controls.get(controls::DigitalGain);
+ if (digitalGain) {
+ agc.manual.ispGain = *digitalGain;
+
+ LOG(MaliC55Agc, Debug)
+ << "Digital gain set to " << agc.manual.ispGain
+ << " on request sequence " << frame;
+ }
+}
+
+size_t Agc::fillGainParamBlock(IPAContext &context, IPAFrameContext &frameContext,
+ mali_c55_params_block block)
+{
+ IPAActiveState &activeState = context.activeState;
+ double gain;
+
+ if (activeState.agc.autoEnabled)
+ gain = activeState.agc.automatic.ispGain;
+ else
+ gain = activeState.agc.manual.ispGain;
+
+ block.header->type = MALI_C55_PARAM_BLOCK_DIGITAL_GAIN;
+ block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE;
+ block.header->size = sizeof(struct mali_c55_params_digital_gain);
+
+ block.digital_gain->gain = floatingToFixedPoint<5, 8, uint16_t, double>(gain);
+ frameContext.agc.ispGain = gain;
+
+ return block.header->size;
+}
+
+size_t Agc::fillParamsBuffer(mali_c55_params_block block,
+ enum mali_c55_param_block_type type)
+{
+ block.header->type = type;
+ block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE;
+ block.header->size = sizeof(struct mali_c55_params_aexp_hist);
+
+ /* Collect every 3rd pixel horizontally */
+ block.aexp_hist->skip_x = 1;
+ /* Start from first column */
+ block.aexp_hist->offset_x = 0;
+ /* Collect every pixel vertically */
+ block.aexp_hist->skip_y = 0;
+ /* Start from the first row */
+ block.aexp_hist->offset_y = 0;
+ /* 1x scaling (i.e. none) */
+ block.aexp_hist->scale_bottom = 0;
+ block.aexp_hist->scale_top = 0;
+ /* Collect all Bayer planes into 4 separate histograms */
+ block.aexp_hist->plane_mode = 1;
+ /* Tap the data immediately after the digital gain block */
+ block.aexp_hist->tap_point = MALI_C55_AEXP_HIST_TAP_FS;
+
+ return block.header->size;
+}
+
+size_t Agc::fillWeightsArrayBuffer(mali_c55_params_block block,
+ enum mali_c55_param_block_type type)
+{
+ block.header->type = type;
+ block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE;
+ block.header->size = sizeof(struct mali_c55_params_aexp_weights);
+
+ /* We use every zone - a 15x15 grid */
+ block.aexp_weights->nodes_used_horiz = 15;
+ block.aexp_weights->nodes_used_vert = 15;
+
+ /*
+ * We uniformly weight the zones to 1 - this results in the collected
+ * histograms containing a true pixel count, which we can then use to
+ * approximate colour channel averages for the image.
+ */
+ Span<uint8_t> weights{
+ block.aexp_weights->zone_weights,
+ MALI_C55_MAX_ZONES
+ };
+ std::fill(weights.begin(), weights.end(), 1);
+
+ return block.header->size;
+}
+
+void Agc::prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext, mali_c55_params_buffer *params)
+{
+ mali_c55_params_block block;
+
+ block.data = &params->data[params->total_size];
+ params->total_size += fillGainParamBlock(context, frameContext, block);
+
+ if (frame > 0)
+ return;
+
+ block.data = &params->data[params->total_size];
+ params->total_size += fillParamsBuffer(block,
+ MALI_C55_PARAM_BLOCK_AEXP_HIST);
+
+ block.data = &params->data[params->total_size];
+ params->total_size += fillWeightsArrayBuffer(block,
+ MALI_C55_PARAM_BLOCK_AEXP_HIST_WEIGHTS);
+
+ block.data = &params->data[params->total_size];
+ params->total_size += fillParamsBuffer(block,
+ MALI_C55_PARAM_BLOCK_AEXP_IHIST);
+
+ block.data = &params->data[params->total_size];
+ params->total_size += fillWeightsArrayBuffer(block,
+ MALI_C55_PARAM_BLOCK_AEXP_IHIST_WEIGHTS);
+}
+
+double Agc::estimateLuminance(const double gain) const
+{
+ double rAvg = statistics_.rHist.interQuantileMean(0, 1) * gain;
+ double gAvg = statistics_.gHist.interQuantileMean(0, 1) * gain;
+ double bAvg = statistics_.bHist.interQuantileMean(0, 1) * gain;
+ double yAvg = rec601LuminanceFromRGB({ { rAvg, gAvg, bAvg } });
+
+ return yAvg / kNumHistogramBins;
+}
+
+void Agc::process(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const mali_c55_stats_buffer *stats,
+ [[maybe_unused]] ControlList &metadata)
+{
+ IPASessionConfiguration &configuration = context.configuration;
+ IPAActiveState &activeState = context.activeState;
+
+ if (!stats) {
+ LOG(MaliC55Agc, Error) << "No statistics buffer passed to Agc";
+ return;
+ }
+
+ statistics_.parseStatistics(stats);
+ context.activeState.agc.temperatureK = estimateCCT({ { statistics_.rHist.interQuantileMean(0, 1),
+ statistics_.gHist.interQuantileMean(0, 1),
+ statistics_.bHist.interQuantileMean(0, 1) } });
+
+ /*
+ * The Agc algorithm needs to know the effective exposure value that was
+ * applied to the sensor when the statistics were collected.
+ */
+ uint32_t exposure = frameContext.agc.exposure;
+ double analogueGain = frameContext.agc.sensorGain;
+ double digitalGain = frameContext.agc.ispGain;
+ double totalGain = analogueGain * digitalGain;
+ utils::Duration currentShutter = exposure * configuration.sensor.lineDuration;
+ utils::Duration effectiveExposureValue = currentShutter * totalGain;
+
+ utils::Duration shutterTime;
+ double aGain, dGain;
+ std::tie(shutterTime, aGain, dGain) =
+ calculateNewEv(activeState.agc.constraintMode,
+ activeState.agc.exposureMode, statistics_.yHist,
+ effectiveExposureValue);
+
+ dGain = std::clamp(dGain, kMinDigitalGain, kMaxDigitalGain);
+
+ LOG(MaliC55Agc, Debug)
+ << "Divided up shutter, analogue gain and digital gain are "
+ << shutterTime << ", " << aGain << " and " << dGain;
+
+ activeState.agc.automatic.exposure = shutterTime / configuration.sensor.lineDuration;
+ activeState.agc.automatic.sensorGain = aGain;
+ activeState.agc.automatic.ispGain = dGain;
+
+ metadata.set(controls::ExposureTime, currentShutter.get<std::micro>());
+ metadata.set(controls::AnalogueGain, frameContext.agc.sensorGain);
+ metadata.set(controls::DigitalGain, frameContext.agc.ispGain);
+ metadata.set(controls::ColourTemperature, context.activeState.agc.temperatureK);
+}
+
+REGISTER_IPA_ALGORITHM(Agc, "Agc")
+
+} /* namespace ipa::mali_c55::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/agc.h b/src/ipa/mali-c55/algorithms/agc.h
new file mode 100644
index 00000000..c5c574e5
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/agc.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Ideas on Board Oy
+ *
+ * agc.h - Mali C55 AGC/AEC mean-based control algorithm
+ */
+
+#pragma once
+
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/bayer_format.h"
+
+#include "libipa/agc_mean_luminance.h"
+#include "libipa/histogram.h"
+
+#include "algorithm.h"
+#include "ipa_context.h"
+
+namespace libcamera {
+
+namespace ipa::mali_c55::algorithms {
+
+class AgcStatistics
+{
+public:
+ AgcStatistics()
+ {
+ }
+
+ int setBayerOrderIndices(BayerFormat::Order bayerOrder);
+ uint32_t decodeBinValue(uint16_t binVal);
+ void parseStatistics(const mali_c55_stats_buffer *stats);
+
+ Histogram rHist;
+ Histogram gHist;
+ Histogram bHist;
+ Histogram yHist;
+private:
+ unsigned int rIndex_;
+ unsigned int grIndex_;
+ unsigned int gbIndex_;
+ unsigned int bIndex_;
+};
+
+class Agc : public Algorithm, public AgcMeanLuminance
+{
+public:
+ Agc();
+ ~Agc() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ int configure(IPAContext &context,
+ const IPACameraSensorInfo &configInfo) override;
+ void queueRequest(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ mali_c55_params_buffer *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const mali_c55_stats_buffer *stats,
+ ControlList &metadata) override;
+
+private:
+ double estimateLuminance(const double gain) const override;
+ size_t fillGainParamBlock(IPAContext &context,
+ IPAFrameContext &frameContext,
+ mali_c55_params_block block);
+ size_t fillParamsBuffer(mali_c55_params_block block,
+ enum mali_c55_param_block_type type);
+ size_t fillWeightsArrayBuffer(mali_c55_params_block block,
+ enum mali_c55_param_block_type type);
+
+ AgcStatistics statistics_;
+};
+
+} /* namespace ipa::mali_c55::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/algorithm.h b/src/ipa/mali-c55/algorithms/algorithm.h
new file mode 100644
index 00000000..36a3bff0
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/algorithm.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * algorithm.h - Mali-C55 control algorithm interface
+ */
+
+#pragma once
+
+#include <linux/mali-c55-config.h>
+
+#include <libipa/algorithm.h>
+
+#include "module.h"
+
+namespace libcamera {
+
+namespace ipa::mali_c55 {
+
+class Algorithm : public libcamera::ipa::Algorithm<Module>
+{
+};
+
+union mali_c55_params_block {
+ struct mali_c55_params_block_header *header;
+ struct mali_c55_params_sensor_off_preshading *sensor_offs;
+ struct mali_c55_params_aexp_hist *aexp_hist;
+ struct mali_c55_params_aexp_weights *aexp_weights;
+ struct mali_c55_params_digital_gain *digital_gain;
+ struct mali_c55_params_awb_gains *awb_gains;
+ struct mali_c55_params_awb_config *awb_config;
+ struct mali_c55_params_mesh_shading_config *shading_config;
+ struct mali_c55_params_mesh_shading_selection *shading_selection;
+ __u8 *data;
+};
+
+} /* namespace ipa::mali_c55 */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/awb.cpp b/src/ipa/mali-c55/algorithms/awb.cpp
new file mode 100644
index 00000000..050b191b
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/awb.cpp
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board Oy
+ *
+ * awb.cpp - Mali C55 grey world auto white balance algorithm
+ */
+
+#include "awb.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/control_ids.h>
+
+#include "libipa/fixedpoint.h"
+
+namespace libcamera {
+
+namespace ipa::mali_c55::algorithms {
+
+LOG_DEFINE_CATEGORY(MaliC55Awb)
+
+/* Number of frames at which we should run AWB at full speed */
+static constexpr uint32_t kNumStartupFrames = 4;
+
+Awb::Awb()
+{
+}
+
+int Awb::configure([[maybe_unused]] IPAContext &context,
+ [[maybe_unused]] const IPACameraSensorInfo &configInfo)
+{
+ /*
+ * Initially we have no idea what the colour balance will be like, so
+ * for the first frame we will make no assumptions and leave the R/B
+ * channels unmodified.
+ */
+ context.activeState.awb.rGain = 1.0;
+ context.activeState.awb.bGain = 1.0;
+
+ return 0;
+}
+
+size_t Awb::fillGainsParamBlock(mali_c55_params_block block, IPAContext &context,
+ IPAFrameContext &frameContext)
+{
+ block.header->type = MALI_C55_PARAM_BLOCK_AWB_GAINS;
+ block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE;
+ block.header->size = sizeof(struct mali_c55_params_awb_gains);
+
+ double rGain = context.activeState.awb.rGain;
+ double bGain = context.activeState.awb.bGain;
+
+ /*
+ * The gains here map as follows:
+ * gain00 = R
+ * gain01 = Gr
+ * gain10 = Gb
+ * gain11 = B
+ *
+ * This holds true regardless of the bayer order of the input data, as
+ * the mapping is done internally in the ISP.
+ */
+ block.awb_gains->gain00 = floatingToFixedPoint<4, 8, uint16_t, double>(rGain);
+ block.awb_gains->gain01 = floatingToFixedPoint<4, 8, uint16_t, double>(1.0);
+ block.awb_gains->gain10 = floatingToFixedPoint<4, 8, uint16_t, double>(1.0);
+ block.awb_gains->gain11 = floatingToFixedPoint<4, 8, uint16_t, double>(bGain);
+
+ frameContext.awb.rGain = rGain;
+ frameContext.awb.bGain = bGain;
+
+ return sizeof(struct mali_c55_params_awb_gains);
+}
+
+size_t Awb::fillConfigParamBlock(mali_c55_params_block block)
+{
+ block.header->type = MALI_C55_PARAM_BLOCK_AWB_CONFIG;
+ block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE;
+ block.header->size = sizeof(struct mali_c55_params_awb_config);
+
+ /* Tap the stats after the purple fringe block */
+ block.awb_config->tap_point = MALI_C55_AWB_STATS_TAP_PF;
+
+ /* Get R/G and B/G ratios as statistics */
+ block.awb_config->stats_mode = MALI_C55_AWB_MODE_RGBG;
+
+ /* Default white level */
+ block.awb_config->white_level = 1023;
+
+ /* Default black level */
+ block.awb_config->black_level = 0;
+
+ /*
+ * By default pixels are included who's colour ratios are bounded in a
+ * region (on a cr ratio x cb ratio graph) defined by four points:
+ * (0.25, 0.25)
+ * (0.25, 1.99609375)
+ * (1.99609375, 1.99609375)
+ * (1.99609375, 0.25)
+ *
+ * The ratios themselves are stored in Q4.8 format.
+ *
+ * \todo should these perhaps be tunable?
+ */
+ block.awb_config->cr_max = 511;
+ block.awb_config->cr_min = 64;
+ block.awb_config->cb_max = 511;
+ block.awb_config->cb_min = 64;
+
+ /* We use the full 15x15 zoning scheme */
+ block.awb_config->nodes_used_horiz = 15;
+ block.awb_config->nodes_used_vert = 15;
+
+ /*
+ * We set the trimming boundaries equivalent to the main boundaries. In
+ * other words; no trimming.
+ */
+ block.awb_config->cr_high = 511;
+ block.awb_config->cr_low = 64;
+ block.awb_config->cb_high = 511;
+ block.awb_config->cb_low = 64;
+
+ return sizeof(struct mali_c55_params_awb_config);
+}
+
+void Awb::prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext, mali_c55_params_buffer *params)
+{
+ mali_c55_params_block block;
+ block.data = &params->data[params->total_size];
+
+ params->total_size += fillGainsParamBlock(block, context, frameContext);
+
+ if (frame > 0)
+ return;
+
+ block.data = &params->data[params->total_size];
+ params->total_size += fillConfigParamBlock(block);
+}
+
+void Awb::process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext, const mali_c55_stats_buffer *stats,
+ [[maybe_unused]] ControlList &metadata)
+{
+ const struct mali_c55_awb_average_ratios *awb_ratios = stats->awb_ratios;
+
+ /*
+ * The ISP produces average R:G and B:G ratios for zones. We take the
+ * average of all the zones with data and simply invert them to provide
+ * gain figures that we can apply to approximate a grey world.
+ */
+ unsigned int counted_zones = 0;
+ double rgSum = 0, bgSum = 0;
+
+ for (unsigned int i = 0; i < 225; i++) {
+ if (!awb_ratios[i].num_pixels)
+ continue;
+
+ /*
+ * The statistics are in Q4.8 format, so we convert to double
+ * here.
+ */
+ rgSum += fixedToFloatingPoint<4, 8, double, uint16_t>(awb_ratios[i].avg_rg_gr);
+ bgSum += fixedToFloatingPoint<4, 8, double, uint16_t>(awb_ratios[i].avg_bg_br);
+ counted_zones++;
+ }
+
+ /*
+ * Sometimes the first frame's statistics have no valid pixels, in which
+ * case we'll just assume a grey world until they say otherwise.
+ */
+ double rgAvg, bgAvg;
+ if (!counted_zones) {
+ rgAvg = 1.0;
+ bgAvg = 1.0;
+ } else {
+ rgAvg = rgSum / counted_zones;
+ bgAvg = bgSum / counted_zones;
+ }
+
+ /*
+ * The statistics are generated _after_ white balancing is performed in
+ * the ISP. To get the true ratio we therefore have to adjust the stats
+ * figure by the gains that were applied when the statistics for this
+ * frame were generated.
+ */
+ double rRatio = rgAvg / frameContext.awb.rGain;
+ double bRatio = bgAvg / frameContext.awb.bGain;
+
+ /*
+ * And then we can simply invert the ratio to find the gain we should
+ * apply.
+ */
+ double rGain = 1 / rRatio;
+ double bGain = 1 / bRatio;
+
+ /*
+ * Running at full speed, this algorithm results in oscillations in the
+ * colour balance. To remove those we dampen the speed at which it makes
+ * changes in gain, unless we're in the startup phase in which case we
+ * want to fix the miscolouring as quickly as possible.
+ */
+ double speed = frame < kNumStartupFrames ? 1.0 : 0.2;
+ rGain = speed * rGain + context.activeState.awb.rGain * (1.0 - speed);
+ bGain = speed * bGain + context.activeState.awb.bGain * (1.0 - speed);
+
+ context.activeState.awb.rGain = rGain;
+ context.activeState.awb.bGain = bGain;
+
+ metadata.set(controls::ColourGains, {
+ static_cast<float>(frameContext.awb.rGain),
+ static_cast<float>(frameContext.awb.bGain),
+ });
+
+ LOG(MaliC55Awb, Debug) << "For frame number " << frame << ": "
+ << "Average R/G Ratio: " << rgAvg
+ << ", Average B/G Ratio: " << bgAvg
+ << "\nrGain applied to this frame: " << frameContext.awb.rGain
+ << ", bGain applied to this frame: " << frameContext.awb.bGain
+ << "\nrGain to apply: " << context.activeState.awb.rGain
+ << ", bGain to apply: " << context.activeState.awb.bGain;
+}
+
+REGISTER_IPA_ALGORITHM(Awb, "Awb")
+
+} /* namespace ipa::mali_c55::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/awb.h b/src/ipa/mali-c55/algorithms/awb.h
new file mode 100644
index 00000000..800c2e83
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/awb.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * awb.h - Mali C55 grey world auto white balance algorithm
+ */
+
+#include "algorithm.h"
+#include "ipa_context.h"
+
+namespace libcamera {
+
+namespace ipa::mali_c55::algorithms {
+
+class Awb : public Algorithm
+{
+public:
+ Awb();
+ ~Awb() = default;
+
+ int configure(IPAContext &context,
+ const IPACameraSensorInfo &configInfo) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ mali_c55_params_buffer *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const mali_c55_stats_buffer *stats,
+ ControlList &metadata) override;
+
+private:
+ size_t fillGainsParamBlock(mali_c55_params_block block,
+ IPAContext &context,
+ IPAFrameContext &frameContext);
+ size_t fillConfigParamBlock(mali_c55_params_block block);
+};
+
+} /* namespace ipa::mali_c55::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/blc.cpp b/src/ipa/mali-c55/algorithms/blc.cpp
new file mode 100644
index 00000000..2a54c86a
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/blc.cpp
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * Mali-C55 sensor offset (black level) correction
+ */
+
+#include "blc.h"
+
+#include <libcamera/base/log.h>
+#include <libcamera/control_ids.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+/**
+ * \file blc.h
+ */
+
+namespace libcamera {
+
+namespace ipa::mali_c55::algorithms {
+
+/**
+ * \class BlackLevelCorrection
+ * \brief MaliC55 Black Level Correction control
+ */
+
+LOG_DEFINE_CATEGORY(MaliC55Blc)
+
+BlackLevelCorrection::BlackLevelCorrection()
+ : tuningParameters_(false)
+{
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int BlackLevelCorrection::init([[maybe_unused]] IPAContext &context,
+ const YamlObject &tuningData)
+{
+ offset00 = tuningData["offset00"].get<uint32_t>(0);
+ offset01 = tuningData["offset01"].get<uint32_t>(0);
+ offset10 = tuningData["offset10"].get<uint32_t>(0);
+ offset11 = tuningData["offset11"].get<uint32_t>(0);
+
+ if (offset00 > kMaxOffset || offset01 > kMaxOffset ||
+ offset10 > kMaxOffset || offset11 > kMaxOffset) {
+ LOG(MaliC55Blc, Error) << "Invalid black level offsets";
+ return -EINVAL;
+ }
+
+ tuningParameters_ = true;
+
+ LOG(MaliC55Blc, Debug)
+ << "Black levels: 00 " << offset00 << ", 01 " << offset01
+ << ", 10 " << offset10 << ", 11 " << offset11;
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::configure
+ */
+int BlackLevelCorrection::configure(IPAContext &context,
+ [[maybe_unused]] const IPACameraSensorInfo &configInfo)
+{
+ /*
+ * If no Black Levels were passed in through tuning data then we could
+ * use the value from the CameraSensorHelper if one is available.
+ */
+ if (context.configuration.sensor.blackLevel &&
+ !(offset00 + offset01 + offset10 + offset11)) {
+ offset00 = context.configuration.sensor.blackLevel;
+ offset01 = context.configuration.sensor.blackLevel;
+ offset10 = context.configuration.sensor.blackLevel;
+ offset11 = context.configuration.sensor.blackLevel;
+ }
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void BlackLevelCorrection::prepare([[maybe_unused]] IPAContext &context,
+ const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ mali_c55_params_buffer *params)
+{
+ mali_c55_params_block block;
+ block.data = &params->data[params->total_size];
+
+ if (frame > 0)
+ return;
+
+ if (!tuningParameters_)
+ return;
+
+ block.header->type = MALI_C55_PARAM_BLOCK_SENSOR_OFFS;
+ block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE;
+ block.header->size = sizeof(mali_c55_params_sensor_off_preshading);
+
+ block.sensor_offs->chan00 = offset00;
+ block.sensor_offs->chan01 = offset01;
+ block.sensor_offs->chan10 = offset10;
+ block.sensor_offs->chan11 = offset11;
+
+ params->total_size += block.header->size;
+}
+
+void BlackLevelCorrection::process([[maybe_unused]] IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ [[maybe_unused]] const mali_c55_stats_buffer *stats,
+ ControlList &metadata)
+{
+ /*
+ * Black Level Offsets in tuning data need to be 20-bit, whereas the
+ * metadata expects values from a 16-bit range. Right-shift to remove
+ * the 4 least significant bits.
+ *
+ * The black levels should be reported in the order R, Gr, Gb, B. We
+ * ignore that here given we're using matching values so far, but it
+ * would be safer to check the sensor's bayer order.
+ *
+ * \todo Account for bayer order.
+ */
+ metadata.set(controls::SensorBlackLevels, {
+ static_cast<int32_t>(offset00 >> 4),
+ static_cast<int32_t>(offset01 >> 4),
+ static_cast<int32_t>(offset10 >> 4),
+ static_cast<int32_t>(offset11 >> 4),
+ });
+}
+
+REGISTER_IPA_ALGORITHM(BlackLevelCorrection, "BlackLevelCorrection")
+
+} /* namespace ipa::mali_c55::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/blc.h b/src/ipa/mali-c55/algorithms/blc.h
new file mode 100644
index 00000000..9696e8e9
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/blc.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * Mali-C55 sensor offset (black level) correction
+ */
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::mali_c55::algorithms {
+
+class BlackLevelCorrection : public Algorithm
+{
+public:
+ BlackLevelCorrection();
+ ~BlackLevelCorrection() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ int configure(IPAContext &context,
+ const IPACameraSensorInfo &configInfo) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ mali_c55_params_buffer *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const mali_c55_stats_buffer *stats,
+ ControlList &metadata) override;
+
+private:
+ static constexpr uint32_t kMaxOffset = 0xfffff;
+
+ bool tuningParameters_;
+ uint32_t offset00;
+ uint32_t offset01;
+ uint32_t offset10;
+ uint32_t offset11;
+};
+
+} /* namespace ipa::mali_c55::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/lsc.cpp b/src/ipa/mali-c55/algorithms/lsc.cpp
new file mode 100644
index 00000000..c5afc04d
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/lsc.cpp
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board Oy
+ *
+ * lsc.cpp - Mali-C55 Lens shading correction algorithm
+ */
+
+#include "lsc.h"
+
+#include "libcamera/internal/yaml_parser.h"
+
+namespace libcamera {
+
+namespace ipa::mali_c55::algorithms {
+
+LOG_DEFINE_CATEGORY(MaliC55Lsc)
+
+int Lsc::init([[maybe_unused]] IPAContext &context, const YamlObject &tuningData)
+{
+ if (!tuningData.contains("meshScale")) {
+ LOG(MaliC55Lsc, Error) << "meshScale missing from tuningData";
+ return -EINVAL;
+ }
+
+ meshScale_ = tuningData["meshScale"].get<uint32_t>(0);
+
+ const YamlObject &yamlSets = tuningData["sets"];
+ if (!yamlSets.isList()) {
+ LOG(MaliC55Lsc, Error) << "LSC tables missing or invalid";
+ return -EINVAL;
+ }
+
+ size_t tableSize = 0;
+ const auto &sets = yamlSets.asList();
+ for (const auto &yamlSet : sets) {
+ uint32_t ct = yamlSet["ct"].get<uint32_t>(0);
+
+ if (!ct) {
+ LOG(MaliC55Lsc, Error) << "Invalid colour temperature";
+ return -EINVAL;
+ }
+
+ if (std::count(colourTemperatures_.begin(),
+ colourTemperatures_.end(), ct)) {
+ LOG(MaliC55Lsc, Error)
+ << "Multiple sets found for colour temperature";
+ return -EINVAL;
+ }
+
+ std::vector<uint8_t> rTable =
+ yamlSet["r"].getList<uint8_t>().value_or(std::vector<uint8_t>{});
+ std::vector<uint8_t> gTable =
+ yamlSet["g"].getList<uint8_t>().value_or(std::vector<uint8_t>{});
+ std::vector<uint8_t> bTable =
+ yamlSet["b"].getList<uint8_t>().value_or(std::vector<uint8_t>{});
+
+ /*
+ * Some validation to do; only 16x16 and 32x32 tables of
+ * coefficients are acceptable, and all tables across all of the
+ * sets must be the same size. The first time we encounter a
+ * table we check that it is an acceptable size and if so make
+ * sure all other tables are of equal size.
+ */
+ if (!tableSize) {
+ if (rTable.size() != 256 && rTable.size() != 1024) {
+ LOG(MaliC55Lsc, Error)
+ << "Invalid table size for colour temperature " << ct;
+ return -EINVAL;
+ }
+ tableSize = rTable.size();
+ }
+
+ if (rTable.size() != tableSize ||
+ gTable.size() != tableSize ||
+ bTable.size() != tableSize) {
+ LOG(MaliC55Lsc, Error)
+ << "Invalid or mismatched table size for colour temperature " << ct;
+ return -EINVAL;
+ }
+
+ if (colourTemperatures_.size() >= 3) {
+ LOG(MaliC55Lsc, Error)
+ << "A maximum of 3 colour temperatures are supported";
+ return -EINVAL;
+ }
+
+ for (unsigned int i = 0; i < tableSize; i++) {
+ mesh_[kRedOffset + i] |=
+ (rTable[i] << (colourTemperatures_.size() * 8));
+ mesh_[kGreenOffset + i] |=
+ (gTable[i] << (colourTemperatures_.size() * 8));
+ mesh_[kBlueOffset + i] |=
+ (bTable[i] << (colourTemperatures_.size() * 8));
+ }
+
+ colourTemperatures_.push_back(ct);
+ }
+
+ /*
+ * The mesh has either 16x16 or 32x32 nodes, we tell the driver which it
+ * is based on the number of values in the tuning data's table.
+ */
+ if (tableSize == 256)
+ meshSize_ = 15;
+ else
+ meshSize_ = 31;
+
+ return 0;
+}
+
+size_t Lsc::fillConfigParamsBlock(mali_c55_params_block block) const
+{
+ block.header->type = MALI_C55_PARAM_MESH_SHADING_CONFIG;
+ block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE;
+ block.header->size = sizeof(struct mali_c55_params_mesh_shading_config);
+
+ block.shading_config->mesh_show = false;
+ block.shading_config->mesh_scale = meshScale_;
+ block.shading_config->mesh_page_r = 0;
+ block.shading_config->mesh_page_g = 1;
+ block.shading_config->mesh_page_b = 2;
+ block.shading_config->mesh_width = meshSize_;
+ block.shading_config->mesh_height = meshSize_;
+
+ std::copy(mesh_.begin(), mesh_.end(), block.shading_config->mesh);
+
+ return block.header->size;
+}
+
+size_t Lsc::fillSelectionParamsBlock(mali_c55_params_block block, uint8_t bank,
+ uint8_t alpha) const
+{
+ block.header->type = MALI_C55_PARAM_MESH_SHADING_SELECTION;
+ block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE;
+ block.header->size = sizeof(struct mali_c55_params_mesh_shading_selection);
+
+ block.shading_selection->mesh_alpha_bank_r = bank;
+ block.shading_selection->mesh_alpha_bank_g = bank;
+ block.shading_selection->mesh_alpha_bank_b = bank;
+ block.shading_selection->mesh_alpha_r = alpha;
+ block.shading_selection->mesh_alpha_g = alpha;
+ block.shading_selection->mesh_alpha_b = alpha;
+ block.shading_selection->mesh_strength = 0x1000; /* Otherwise known as 1.0 */
+
+ return block.header->size;
+}
+
+std::tuple<uint8_t, uint8_t> Lsc::findBankAndAlpha(uint32_t ct) const
+{
+ unsigned int i;
+
+ ct = std::clamp<uint32_t>(ct, colourTemperatures_.front(),
+ colourTemperatures_.back());
+
+ for (i = 0; i < colourTemperatures_.size() - 1; i++) {
+ if (ct >= colourTemperatures_[i] &&
+ ct <= colourTemperatures_[i + 1])
+ break;
+ }
+
+ /*
+ * With the clamping, we're guaranteed an index into colourTemperatures_
+ * that's <= colourTemperatures_.size() - 1.
+ */
+ uint8_t alpha = (255 * (ct - colourTemperatures_[i])) /
+ (colourTemperatures_[i + 1] - colourTemperatures_[i]);
+
+ return { i, alpha };
+}
+
+void Lsc::prepare(IPAContext &context, [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ mali_c55_params_buffer *params)
+{
+ /*
+ * For each frame we assess the colour temperature of the **last** frame
+ * and then select an appropriately blended table of coefficients based
+ * on that ct. As a bit of a shortcut, if we've only a single table the
+ * handling is somewhat simpler; if it's the first frame we just select
+ * that table and if we're past the first frame then we can just do
+ * nothing - the config will never change.
+ */
+ uint32_t temperatureK = context.activeState.agc.temperatureK;
+ uint8_t bank, alpha;
+
+ if (colourTemperatures_.size() == 1) {
+ if (frame > 0)
+ return;
+
+ bank = 0;
+ alpha = 0;
+ } else {
+ std::tie(bank, alpha) = findBankAndAlpha(temperatureK);
+ }
+
+ mali_c55_params_block block;
+ block.data = &params->data[params->total_size];
+
+ params->total_size += fillSelectionParamsBlock(block, bank, alpha);
+
+ if (frame > 0)
+ return;
+
+ /*
+ * If this is the first frame, we need to load the parsed coefficient
+ * tables from tuning data to the ISP.
+ */
+ block.data = &params->data[params->total_size];
+ params->total_size += fillConfigParamsBlock(block);
+}
+
+REGISTER_IPA_ALGORITHM(Lsc, "Lsc")
+
+} /* namespace ipa::mali_c55::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/lsc.h b/src/ipa/mali-c55/algorithms/lsc.h
new file mode 100644
index 00000000..e613277a
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/lsc.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board Oy
+ *
+ * lsc.h - Mali-C55 Lens shading correction algorithm
+ */
+
+#include <map>
+#include <tuple>
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::mali_c55::algorithms {
+
+class Lsc : public Algorithm
+{
+public:
+ Lsc() = default;
+ ~Lsc() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ mali_c55_params_buffer *params) override;
+private:
+ static constexpr unsigned int kRedOffset = 0;
+ static constexpr unsigned int kGreenOffset = 1024;
+ static constexpr unsigned int kBlueOffset = 2048;
+
+ size_t fillConfigParamsBlock(mali_c55_params_block block) const;
+ size_t fillSelectionParamsBlock(mali_c55_params_block block,
+ uint8_t bank, uint8_t alpha) const;
+ std::tuple<uint8_t, uint8_t> findBankAndAlpha(uint32_t ct) const;
+
+ std::vector<uint32_t> mesh_ = std::vector<uint32_t>(3072);
+ std::vector<uint32_t> colourTemperatures_;
+ uint32_t meshScale_;
+ uint32_t meshSize_;
+};
+
+} /* namespace ipa::mali_c55::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/algorithms/meson.build b/src/ipa/mali-c55/algorithms/meson.build
new file mode 100644
index 00000000..1665da07
--- /dev/null
+++ b/src/ipa/mali-c55/algorithms/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: CC0-1.0
+
+mali_c55_ipa_algorithms = files([
+ 'agc.cpp',
+ 'awb.cpp',
+ 'blc.cpp',
+ 'lsc.cpp',
+])
diff --git a/src/ipa/mali-c55/data/imx415.yaml b/src/ipa/mali-c55/data/imx415.yaml
new file mode 100644
index 00000000..126b427a
--- /dev/null
+++ b/src/ipa/mali-c55/data/imx415.yaml
@@ -0,0 +1,325 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Agc:
+ - Awb:
+ - BlackLevelCorrection:
+ offset00: 51200
+ offset01: 51200
+ offset10: 51200
+ offset11: 51200
+ - Lsc:
+ meshScale: 4 # 1.0 - 2.0 Gain
+ sets:
+ - ct: 2500
+ r: [
+ 21, 20, 19, 17, 15, 14, 12, 11, 9, 9, 9, 9, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 13, 13, 10, 10, 13, 16, 17, 18, 21, 22,
+ 21, 20, 18, 16, 14, 13, 12, 11, 10, 9, 9, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 12, 15, 17, 18, 21, 21,
+ 20, 19, 17, 16, 14, 13, 12, 11, 10, 9, 8, 8, 8, 7, 7, 7, 8, 8, 8, 8, 8, 9, 9, 8, 8, 8, 11, 15, 17, 18, 21, 21,
+ 19, 19, 17, 15, 14, 13, 12, 11, 10, 8, 8, 7, 7, 7, 6, 6, 7, 7, 8, 8, 8, 8, 8, 7, 7, 8, 10, 14, 17, 18, 20, 22,
+ 19, 18, 17, 15, 14, 13, 11, 11, 9, 8, 8, 7, 7, 6, 5, 5, 5, 5, 6, 7, 8, 7, 7, 6, 7, 7, 10, 12, 16, 18, 20, 22,
+ 18, 18, 16, 15, 14, 12, 11, 10, 9, 8, 6, 6, 5, 5, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 8, 12, 16, 18, 19, 20,
+ 18, 18, 16, 14, 13, 12, 11, 9, 9, 7, 6, 5, 5, 5, 4, 4, 4, 5, 5, 4, 4, 5, 5, 5, 5, 6, 8, 11, 15, 18, 18, 19,
+ 18, 17, 15, 14, 13, 12, 11, 9, 8, 7, 6, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 6, 7, 9, 14, 17, 18, 18,
+ 18, 17, 15, 14, 13, 12, 11, 9, 8, 7, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 6, 8, 12, 17, 18, 18,
+ 18, 16, 15, 13, 12, 11, 10, 9, 8, 7, 5, 4, 4, 4, 4, 3, 3, 4, 4, 4, 3, 3, 4, 4, 4, 5, 6, 8, 12, 16, 19, 19,
+ 17, 16, 15, 13, 12, 11, 10, 8, 7, 6, 4, 4, 3, 3, 3, 3, 3, 3, 4, 4, 3, 3, 3, 4, 4, 5, 6, 9, 12, 16, 19, 20,
+ 17, 15, 15, 13, 12, 11, 10, 8, 6, 6, 5, 4, 3, 3, 3, 2, 3, 3, 4, 4, 3, 3, 2, 3, 4, 5, 6, 9, 11, 16, 19, 20,
+ 17, 15, 15, 14, 11, 11, 10, 8, 6, 5, 5, 4, 3, 3, 2, 2, 2, 3, 3, 3, 3, 3, 2, 3, 4, 5, 6, 8, 11, 16, 18, 19,
+ 16, 16, 15, 13, 11, 11, 10, 7, 6, 5, 4, 4, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 2, 3, 4, 6, 8, 11, 14, 17, 18,
+ 16, 16, 14, 13, 11, 10, 9, 7, 6, 5, 4, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 3, 4, 6, 8, 10, 14, 17, 18,
+ 16, 15, 14, 13, 13, 10, 9, 7, 6, 4, 4, 2, 2, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 4, 4, 6, 8, 11, 17, 18, 19,
+ 16, 15, 14, 14, 13, 12, 9, 8, 7, 5, 4, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 4, 5, 6, 8, 12, 17, 19, 20,
+ 17, 15, 15, 14, 13, 12, 9, 8, 7, 5, 3, 2, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 5, 6, 8, 13, 16, 19, 21,
+ 17, 16, 15, 13, 13, 12, 9, 8, 7, 5, 3, 2, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 5, 5, 7, 8, 13, 16, 19, 20,
+ 17, 16, 15, 14, 13, 12, 9, 8, 7, 5, 3, 2, 0, 0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 5, 6, 7, 9, 13, 17, 19, 20,
+ 18, 16, 15, 14, 13, 12, 9, 8, 7, 5, 4, 2, 1, 0, 0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 6, 8, 9, 13, 17, 20, 20,
+ 18, 16, 16, 15, 14, 12, 10, 9, 7, 6, 5, 3, 2, 1, 0, 0, 0, 1, 2, 3, 3, 3, 4, 5, 6, 7, 9, 10, 14, 18, 20, 20,
+ 18, 17, 16, 15, 14, 12, 10, 9, 8, 7, 6, 5, 3, 3, 1, 0, 0, 1, 2, 3, 4, 4, 5, 6, 6, 7, 9, 12, 15, 19, 20, 20,
+ 18, 18, 17, 16, 14, 13, 11, 10, 9, 8, 7, 6, 5, 5, 3, 1, 1, 1, 2, 3, 5, 5, 5, 6, 7, 9, 12, 15, 17, 19, 20, 20,
+ 18, 18, 17, 16, 15, 13, 12, 10, 10, 9, 8, 7, 6, 5, 4, 2, 1, 2, 3, 4, 5, 5, 6, 6, 8, 10, 13, 16, 18, 20, 20, 21,
+ 19, 18, 17, 16, 15, 14, 13, 11, 10, 10, 9, 8, 7, 6, 5, 4, 3, 2, 3, 3, 5, 5, 6, 7, 10, 11, 14, 17, 19, 20, 21, 22,
+ 20, 19, 18, 17, 16, 15, 13, 12, 11, 10, 10, 9, 8, 7, 6, 5, 4, 3, 3, 3, 5, 6, 6, 7, 10, 12, 14, 18, 20, 21, 22, 23,
+ 21, 20, 19, 18, 17, 16, 14, 13, 12, 11, 10, 10, 9, 7, 7, 5, 5, 4, 4, 5, 6, 6, 7, 8, 11, 13, 16, 19, 21, 22, 22, 22,
+ 22, 21, 20, 19, 18, 17, 16, 14, 13, 12, 12, 10, 9, 8, 7, 6, 6, 5, 5, 6, 7, 7, 8, 9, 12, 14, 18, 20, 21, 22, 22, 22,
+ 23, 22, 21, 20, 19, 17, 16, 15, 14, 14, 13, 12, 10, 9, 8, 7, 6, 5, 5, 6, 7, 8, 8, 10, 12, 15, 18, 20, 21, 22, 22, 22,
+ 24, 23, 22, 21, 20, 18, 17, 16, 15, 15, 14, 14, 13, 11, 9, 8, 6, 6, 6, 6, 7, 8, 9, 11, 14, 17, 19, 20, 21, 21, 21, 21,
+ 24, 24, 23, 21, 20, 19, 17, 16, 15, 15, 15, 14, 14, 14, 11, 9, 6, 5, 5, 6, 8, 8, 10, 12, 15, 17, 20, 20, 21, 21, 21, 21,
+ ]
+ g: [
+ 19, 18, 17, 15, 13, 12, 10, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 12, 12, 9, 9, 11, 15, 15, 16, 19, 20,
+ 19, 18, 16, 15, 12, 12, 10, 10, 8, 7, 7, 7, 7, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 7, 7, 11, 14, 15, 16, 19, 19,
+ 18, 17, 16, 14, 12, 12, 10, 10, 8, 7, 7, 6, 6, 5, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 10, 14, 15, 17, 19, 19,
+ 17, 17, 16, 14, 12, 12, 10, 10, 8, 7, 6, 6, 6, 5, 5, 4, 5, 5, 6, 6, 6, 7, 7, 5, 6, 6, 9, 13, 15, 17, 18, 20,
+ 17, 17, 15, 14, 12, 11, 10, 9, 8, 7, 6, 5, 5, 4, 4, 4, 4, 4, 5, 6, 6, 6, 5, 5, 5, 6, 8, 11, 15, 17, 18, 20,
+ 17, 17, 15, 13, 12, 11, 9, 9, 8, 7, 5, 5, 4, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 7, 11, 15, 17, 18, 18,
+ 17, 16, 15, 13, 12, 11, 9, 8, 8, 6, 5, 4, 4, 4, 3, 3, 4, 4, 3, 3, 3, 4, 4, 4, 4, 5, 6, 9, 14, 17, 17, 18,
+ 17, 16, 14, 13, 12, 11, 9, 8, 7, 6, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 6, 8, 13, 16, 17, 17,
+ 17, 15, 14, 13, 12, 11, 9, 8, 7, 6, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 5, 7, 12, 16, 17, 17,
+ 17, 15, 14, 12, 11, 10, 9, 8, 7, 6, 4, 4, 3, 3, 3, 2, 2, 3, 3, 3, 2, 2, 3, 3, 3, 4, 5, 7, 11, 15, 18, 18,
+ 16, 14, 13, 12, 11, 10, 9, 7, 7, 5, 4, 3, 3, 3, 2, 2, 2, 3, 3, 3, 2, 2, 2, 3, 3, 4, 5, 8, 11, 15, 18, 19,
+ 16, 14, 13, 12, 11, 10, 9, 7, 5, 5, 4, 3, 3, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 3, 4, 5, 8, 10, 15, 18, 19,
+ 16, 14, 14, 13, 11, 10, 9, 7, 5, 5, 4, 3, 2, 2, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 3, 3, 5, 7, 10, 15, 17, 18,
+ 16, 15, 14, 12, 11, 10, 9, 7, 5, 5, 4, 3, 2, 2, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 3, 5, 6, 10, 14, 17, 17,
+ 15, 15, 13, 12, 11, 10, 9, 7, 5, 4, 3, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 1, 2, 2, 3, 5, 7, 10, 14, 17, 18,
+ 15, 14, 13, 12, 12, 10, 9, 7, 6, 4, 3, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 4, 5, 7, 10, 17, 18, 18,
+ 15, 14, 14, 13, 12, 11, 9, 7, 6, 4, 3, 2, 1, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 5, 7, 12, 17, 19, 19,
+ 16, 14, 14, 13, 12, 12, 9, 7, 6, 4, 3, 2, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 3, 4, 4, 5, 8, 12, 17, 19, 20,
+ 16, 15, 14, 13, 12, 12, 9, 7, 7, 4, 3, 1, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 6, 8, 12, 17, 19, 20,
+ 17, 15, 14, 13, 12, 12, 9, 7, 7, 5, 3, 1, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 8, 12, 17, 19, 20,
+ 18, 15, 15, 14, 13, 12, 9, 8, 7, 5, 4, 2, 1, 0, 0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 6, 7, 9, 13, 17, 19, 20,
+ 18, 16, 15, 14, 13, 12, 9, 9, 7, 6, 5, 3, 2, 1, 0, 0, 0, 1, 2, 3, 3, 3, 4, 5, 6, 7, 9, 10, 14, 18, 20, 20,
+ 18, 16, 16, 15, 13, 12, 10, 9, 8, 7, 6, 5, 3, 3, 1, 0, 0, 1, 2, 3, 4, 4, 5, 5, 6, 7, 9, 12, 15, 19, 20, 20,
+ 18, 18, 16, 16, 14, 13, 10, 10, 9, 8, 7, 6, 5, 5, 3, 1, 1, 1, 2, 3, 5, 5, 5, 6, 6, 8, 11, 15, 17, 19, 20, 20,
+ 18, 18, 16, 16, 14, 13, 12, 10, 9, 9, 8, 7, 6, 5, 4, 3, 1, 2, 3, 5, 5, 5, 5, 6, 7, 10, 12, 15, 18, 20, 20, 20,
+ 18, 18, 17, 16, 15, 14, 12, 11, 10, 10, 9, 8, 7, 6, 5, 4, 3, 2, 3, 3, 5, 5, 5, 6, 9, 11, 14, 16, 19, 20, 20, 21,
+ 19, 19, 18, 17, 16, 15, 13, 12, 11, 10, 10, 9, 8, 7, 5, 4, 4, 3, 3, 3, 5, 5, 6, 7, 10, 12, 14, 18, 20, 20, 21, 22,
+ 21, 20, 18, 18, 17, 16, 14, 12, 11, 11, 10, 10, 8, 7, 7, 5, 4, 4, 4, 5, 6, 6, 6, 7, 11, 13, 16, 19, 20, 21, 21, 22,
+ 22, 21, 20, 19, 18, 16, 15, 14, 12, 12, 12, 10, 9, 8, 7, 6, 5, 4, 5, 6, 6, 7, 7, 8, 12, 13, 17, 19, 21, 21, 21, 21,
+ 23, 22, 21, 20, 18, 17, 16, 16, 14, 14, 13, 12, 10, 10, 8, 7, 6, 5, 5, 6, 7, 7, 8, 9, 12, 15, 18, 19, 21, 21, 21, 20,
+ 23, 22, 22, 21, 20, 18, 17, 16, 15, 15, 15, 14, 13, 11, 9, 8, 6, 6, 6, 6, 7, 8, 9, 10, 13, 16, 19, 20, 20, 21, 21, 20,
+ 24, 23, 22, 21, 20, 19, 17, 17, 16, 16, 15, 15, 14, 14, 11, 9, 6, 6, 6, 6, 8, 8, 10, 12, 15, 17, 19, 20, 20, 21, 21, 20,
+ ]
+ b: [
+ 11, 9, 9, 7, 6, 5, 4, 4, 3, 3, 3, 3, 3, 4, 5, 5, 5, 4, 5, 4, 4, 4, 7, 7, 3, 3, 5, 8, 8, 9, 11, 11,
+ 11, 10, 8, 7, 5, 5, 5, 4, 3, 3, 3, 3, 3, 3, 4, 4, 5, 4, 5, 4, 4, 4, 4, 4, 2, 2, 5, 8, 8, 9, 11, 11,
+ 10, 10, 7, 7, 5, 5, 5, 4, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 3, 1, 1, 5, 7, 9, 9, 11, 11,
+ 10, 9, 8, 7, 6, 5, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 3, 2, 1, 1, 4, 7, 9, 10, 12, 13,
+ 9, 9, 8, 7, 6, 5, 5, 5, 4, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3, 4, 3, 2, 1, 1, 1, 4, 6, 9, 10, 12, 13,
+ 9, 9, 9, 7, 6, 6, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2, 3, 3, 2, 1, 2, 2, 2, 1, 1, 1, 2, 6, 9, 10, 11, 12,
+ 8, 9, 9, 7, 7, 6, 5, 4, 4, 3, 3, 2, 2, 2, 2, 2, 3, 3, 2, 1, 1, 1, 1, 0, 0, 1, 2, 5, 9, 11, 11, 11,
+ 8, 9, 9, 7, 7, 6, 5, 4, 4, 3, 3, 2, 2, 2, 2, 2, 3, 3, 2, 1, 1, 1, 1, 0, 0, 0, 1, 3, 8, 11, 11, 11,
+ 9, 9, 8, 7, 7, 7, 6, 4, 4, 3, 3, 2, 2, 2, 2, 2, 3, 3, 2, 1, 1, 1, 1, 0, 0, 0, 1, 3, 7, 11, 11, 11,
+ 9, 9, 8, 7, 7, 7, 6, 5, 4, 3, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 0, 0, 0, 0, 0, 1, 3, 7, 10, 12, 13,
+ 9, 9, 8, 8, 7, 7, 6, 5, 4, 3, 2, 1, 2, 1, 2, 2, 2, 3, 2, 1, 0, 0, 0, 0, 0, 0, 1, 4, 7, 10, 13, 13,
+ 9, 8, 8, 8, 7, 7, 6, 5, 3, 3, 2, 1, 1, 1, 1, 1, 2, 3, 3, 2, 1, 0, 0, 0, 0, 1, 2, 4, 7, 11, 13, 13,
+ 9, 8, 8, 7, 7, 6, 6, 5, 3, 3, 2, 2, 2, 1, 1, 1, 2, 3, 3, 2, 1, 0, 0, 0, 0, 1, 2, 4, 6, 11, 13, 13,
+ 9, 8, 8, 7, 7, 6, 6, 4, 3, 3, 2, 2, 1, 1, 1, 1, 2, 3, 3, 3, 1, 1, 0, 0, 1, 1, 2, 3, 6, 10, 12, 13,
+ 9, 8, 8, 7, 7, 6, 6, 4, 3, 3, 2, 1, 1, 1, 1, 1, 1, 3, 3, 3, 1, 1, 1, 1, 1, 2, 2, 4, 6, 10, 13, 13,
+ 9, 9, 8, 8, 8, 6, 6, 4, 4, 3, 2, 1, 1, 1, 1, 1, 2, 2, 3, 3, 1, 1, 1, 1, 2, 2, 3, 4, 6, 13, 14, 14,
+ 9, 9, 8, 8, 8, 8, 6, 5, 4, 3, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 1, 1, 2, 2, 3, 3, 5, 9, 13, 15, 15,
+ 10, 9, 9, 9, 10, 10, 6, 6, 5, 3, 2, 1, 0, 0, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 4, 5, 9, 13, 15, 16,
+ 10, 10, 9, 9, 10, 10, 7, 6, 5, 3, 2, 1, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 4, 5, 9, 13, 15, 16,
+ 11, 10, 9, 9, 10, 10, 7, 6, 6, 3, 2, 1, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 5, 6, 9, 13, 15, 16,
+ 12, 10, 10, 10, 10, 10, 7, 6, 6, 4, 3, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 5, 5, 6, 7, 10, 14, 15, 16,
+ 12, 11, 10, 10, 10, 10, 8, 7, 6, 6, 4, 3, 2, 1, 1, 1, 1, 2, 2, 3, 4, 4, 4, 4, 5, 5, 6, 8, 11, 15, 16, 16,
+ 12, 12, 11, 11, 10, 10, 9, 8, 7, 6, 6, 4, 4, 3, 2, 2, 2, 2, 3, 4, 4, 4, 5, 5, 5, 6, 7, 10, 13, 15, 16, 15,
+ 12, 12, 12, 12, 11, 10, 10, 8, 8, 7, 7, 6, 5, 5, 3, 2, 2, 2, 3, 4, 5, 5, 5, 5, 6, 7, 10, 13, 14, 16, 16, 16,
+ 12, 12, 13, 12, 12, 11, 11, 9, 8, 8, 8, 7, 6, 6, 5, 4, 3, 3, 3, 5, 6, 6, 6, 6, 7, 9, 11, 14, 15, 17, 17, 16,
+ 13, 13, 13, 13, 12, 12, 11, 11, 10, 10, 9, 8, 7, 7, 6, 5, 5, 4, 4, 4, 5, 6, 6, 6, 9, 10, 12, 14, 16, 17, 17, 18,
+ 13, 13, 14, 13, 13, 13, 12, 12, 11, 10, 10, 9, 8, 7, 6, 6, 6, 5, 4, 4, 6, 6, 6, 7, 9, 11, 12, 16, 17, 17, 17, 18,
+ 15, 15, 15, 15, 14, 13, 13, 13, 12, 11, 11, 10, 9, 9, 8, 7, 6, 5, 5, 5, 6, 7, 7, 8, 10, 12, 14, 17, 17, 18, 17, 17,
+ 16, 16, 16, 16, 15, 14, 14, 14, 13, 13, 12, 11, 11, 9, 8, 7, 7, 6, 6, 6, 7, 7, 8, 8, 10, 12, 16, 17, 18, 18, 17, 17,
+ 18, 17, 17, 16, 16, 15, 14, 14, 14, 14, 14, 13, 11, 11, 9, 8, 7, 7, 6, 6, 7, 7, 8, 9, 10, 13, 16, 17, 17, 18, 17, 16,
+ 18, 17, 17, 17, 16, 15, 15, 15, 15, 15, 15, 14, 14, 12, 10, 9, 7, 7, 6, 6, 7, 7, 8, 9, 11, 14, 16, 16, 17, 17, 16, 15,
+ 18, 18, 17, 17, 17, 16, 15, 15, 15, 16, 15, 15, 14, 14, 12, 9, 7, 6, 6, 6, 7, 7, 9, 10, 12, 14, 15, 16, 16, 16, 15, 15,
+ ]
+ - ct: 5500
+ r: [
+ 19, 18, 17, 16, 15, 13, 11, 10, 9, 9, 9, 8, 8, 8, 8, 8, 8, 9, 10, 10, 8, 8, 9, 9, 9, 11, 14, 15, 16, 16, 18, 18,
+ 18, 18, 17, 15, 14, 13, 11, 11, 9, 9, 8, 8, 7, 7, 7, 7, 7, 8, 9, 8, 8, 8, 9, 9, 8, 8, 11, 14, 16, 17, 17, 18,
+ 18, 17, 17, 15, 14, 13, 12, 11, 9, 9, 8, 7, 7, 6, 6, 6, 6, 8, 8, 8, 8, 8, 8, 8, 7, 7, 10, 13, 16, 17, 18, 18,
+ 17, 17, 16, 15, 14, 12, 11, 11, 9, 8, 7, 7, 6, 5, 5, 5, 5, 6, 8, 7, 8, 8, 8, 6, 6, 6, 9, 12, 16, 16, 18, 19,
+ 17, 17, 16, 14, 13, 12, 11, 10, 10, 8, 7, 7, 5, 5, 5, 5, 5, 5, 6, 7, 7, 7, 6, 6, 6, 6, 8, 11, 15, 16, 17, 19,
+ 18, 17, 16, 14, 13, 12, 11, 10, 10, 8, 7, 6, 5, 5, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 10, 14, 16, 17, 18,
+ 18, 17, 16, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 3, 4, 5, 5, 5, 5, 5, 6, 10, 13, 16, 16, 17,
+ 18, 16, 15, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 4, 4, 4, 4, 5, 6, 8, 12, 16, 16, 16,
+ 17, 16, 15, 13, 12, 11, 10, 9, 8, 7, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 4, 4, 5, 7, 11, 16, 16, 16,
+ 17, 16, 15, 12, 12, 12, 10, 8, 7, 7, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 4, 5, 7, 10, 16, 16, 16,
+ 16, 16, 14, 12, 12, 11, 10, 8, 7, 6, 4, 4, 3, 3, 3, 3, 3, 4, 3, 3, 2, 2, 2, 3, 3, 4, 4, 7, 10, 14, 16, 16,
+ 16, 15, 14, 13, 12, 11, 10, 8, 7, 6, 5, 4, 3, 3, 3, 3, 3, 4, 3, 3, 3, 2, 2, 3, 3, 3, 5, 7, 10, 14, 15, 16,
+ 16, 15, 15, 13, 11, 11, 11, 9, 7, 5, 5, 4, 3, 2, 2, 2, 3, 3, 3, 3, 2, 2, 2, 2, 3, 3, 5, 6, 9, 14, 15, 16,
+ 16, 15, 15, 13, 11, 11, 11, 10, 7, 5, 4, 4, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 3, 3, 5, 6, 10, 13, 15, 17,
+ 15, 15, 14, 12, 11, 11, 11, 10, 7, 4, 4, 3, 2, 2, 2, 2, 2, 2, 3, 3, 2, 2, 2, 3, 3, 3, 5, 6, 9, 13, 16, 17,
+ 15, 15, 14, 12, 11, 11, 10, 9, 7, 4, 4, 3, 1, 1, 1, 1, 2, 2, 3, 3, 2, 2, 3, 3, 3, 3, 5, 7, 10, 15, 17, 17,
+ 15, 15, 14, 12, 11, 11, 10, 9, 7, 4, 4, 3, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 5, 7, 11, 16, 17, 18,
+ 16, 15, 15, 12, 12, 11, 10, 9, 6, 4, 4, 3, 1, 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 7, 12, 15, 17, 18,
+ 16, 16, 15, 12, 12, 11, 10, 8, 6, 5, 4, 3, 1, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5, 6, 8, 12, 15, 17, 18,
+ 15, 15, 15, 13, 12, 12, 10, 8, 7, 5, 4, 3, 2, 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 3, 4, 5, 7, 9, 12, 16, 17, 18,
+ 15, 15, 15, 13, 13, 12, 10, 8, 7, 5, 5, 3, 2, 1, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 5, 5, 7, 9, 13, 16, 18, 18,
+ 15, 15, 15, 14, 13, 12, 10, 8, 7, 6, 5, 5, 3, 2, 1, 0, 1, 1, 1, 2, 3, 3, 3, 5, 5, 6, 8, 10, 14, 17, 18, 19,
+ 16, 16, 16, 15, 13, 12, 10, 9, 8, 7, 6, 6, 5, 4, 2, 1, 1, 1, 1, 2, 3, 3, 5, 5, 6, 7, 9, 12, 15, 18, 18, 19,
+ 17, 16, 16, 16, 13, 12, 11, 10, 9, 8, 7, 7, 6, 5, 4, 2, 1, 1, 2, 3, 4, 4, 5, 5, 6, 8, 11, 14, 16, 18, 19, 19,
+ 18, 18, 17, 16, 14, 13, 12, 10, 9, 8, 8, 7, 7, 5, 5, 3, 2, 2, 4, 4, 5, 5, 5, 5, 7, 10, 12, 16, 17, 19, 19, 20,
+ 18, 18, 17, 16, 15, 13, 12, 10, 10, 9, 9, 9, 7, 6, 5, 4, 3, 3, 4, 4, 5, 5, 5, 6, 7, 11, 15, 17, 18, 19, 19, 20,
+ 19, 18, 18, 17, 16, 14, 13, 11, 10, 10, 9, 9, 8, 6, 5, 5, 4, 4, 4, 4, 6, 6, 6, 7, 9, 11, 15, 17, 19, 19, 20, 21,
+ 20, 19, 19, 19, 17, 16, 13, 12, 11, 11, 10, 9, 9, 7, 6, 5, 5, 4, 4, 5, 6, 7, 7, 8, 9, 12, 15, 18, 19, 19, 20, 20,
+ 21, 20, 20, 19, 19, 16, 16, 13, 12, 12, 11, 11, 9, 8, 7, 6, 6, 5, 5, 6, 7, 7, 8, 8, 10, 13, 17, 19, 19, 20, 20, 20,
+ 22, 21, 20, 20, 19, 17, 16, 14, 13, 13, 14, 12, 11, 9, 8, 7, 6, 5, 5, 6, 7, 7, 8, 9, 11, 15, 17, 19, 19, 20, 20, 20,
+ 22, 22, 21, 20, 19, 18, 16, 15, 14, 14, 15, 15, 13, 11, 9, 8, 7, 5, 5, 6, 7, 8, 9, 10, 13, 16, 18, 18, 19, 20, 19, 19,
+ 22, 22, 21, 20, 19, 19, 16, 16, 15, 15, 15, 15, 15, 13, 10, 9, 7, 5, 5, 6, 8, 8, 10, 11, 14, 16, 18, 18, 19, 19, 19, 19,
+ ]
+ g: [
+ 16, 16, 15, 14, 13, 11, 10, 9, 8, 7, 7, 7, 6, 6, 6, 6, 6, 7, 8, 8, 6, 6, 7, 7, 7, 9, 12, 13, 13, 14, 14, 14,
+ 16, 16, 15, 14, 13, 11, 10, 9, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 7, 7, 6, 6, 7, 7, 6, 6, 10, 12, 13, 14, 14, 14,
+ 16, 15, 15, 13, 13, 11, 10, 9, 8, 7, 7, 6, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 5, 5, 8, 12, 14, 15, 15, 16,
+ 15, 15, 14, 13, 12, 11, 10, 10, 8, 7, 6, 6, 5, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 5, 5, 5, 7, 11, 14, 15, 15, 17,
+ 16, 15, 14, 13, 12, 11, 10, 10, 9, 7, 6, 6, 4, 4, 4, 3, 3, 4, 4, 6, 6, 5, 5, 4, 4, 5, 6, 10, 14, 14, 16, 16,
+ 16, 15, 15, 13, 12, 11, 10, 10, 9, 7, 6, 6, 4, 4, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 9, 13, 14, 15, 16,
+ 16, 15, 15, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 3, 4, 5, 8, 13, 14, 15, 15,
+ 16, 15, 14, 12, 11, 10, 9, 8, 7, 6, 6, 5, 4, 3, 3, 3, 3, 3, 2, 2, 2, 3, 3, 3, 3, 4, 5, 7, 11, 14, 15, 15,
+ 16, 15, 14, 12, 11, 10, 9, 8, 7, 6, 5, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 4, 6, 10, 14, 15, 14,
+ 16, 15, 14, 12, 11, 11, 9, 8, 7, 6, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 4, 6, 10, 15, 15, 15,
+ 15, 15, 13, 12, 11, 11, 10, 8, 7, 5, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 3, 6, 9, 13, 15, 15,
+ 15, 14, 13, 12, 11, 11, 10, 8, 7, 5, 5, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 4, 6, 9, 13, 14, 15,
+ 15, 14, 14, 13, 11, 11, 10, 8, 7, 5, 5, 3, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2, 1, 2, 2, 2, 4, 5, 8, 13, 14, 15,
+ 15, 14, 14, 13, 11, 11, 11, 10, 7, 5, 4, 3, 2, 2, 2, 2, 2, 2, 3, 3, 2, 2, 1, 2, 2, 2, 4, 5, 8, 13, 14, 15,
+ 15, 14, 13, 12, 11, 11, 10, 9, 7, 4, 4, 3, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 5, 8, 13, 15, 16,
+ 15, 15, 13, 12, 11, 11, 10, 9, 7, 4, 4, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 4, 6, 8, 15, 16, 16,
+ 15, 15, 14, 12, 11, 11, 10, 9, 7, 4, 4, 2, 1, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 4, 6, 11, 15, 16, 17,
+ 15, 15, 15, 12, 12, 11, 10, 10, 7, 4, 4, 2, 1, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 6, 11, 15, 17, 17,
+ 15, 15, 15, 12, 12, 12, 10, 9, 7, 5, 4, 2, 1, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 5, 7, 11, 15, 17, 17,
+ 15, 15, 15, 13, 12, 12, 10, 9, 7, 5, 4, 3, 2, 0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 3, 4, 4, 6, 8, 12, 15, 17, 17,
+ 15, 15, 15, 13, 13, 12, 10, 9, 7, 6, 5, 3, 2, 1, 0, 0, 1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 6, 9, 13, 16, 17, 18,
+ 15, 15, 15, 14, 13, 13, 10, 9, 8, 6, 6, 5, 3, 2, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 7, 10, 14, 17, 18, 18,
+ 15, 16, 16, 15, 13, 13, 11, 9, 8, 7, 6, 6, 5, 4, 2, 1, 1, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 12, 15, 17, 18, 18,
+ 16, 16, 16, 16, 13, 13, 11, 10, 9, 8, 7, 7, 6, 6, 4, 2, 2, 2, 2, 3, 5, 5, 5, 5, 6, 8, 11, 14, 16, 18, 18, 18,
+ 17, 17, 17, 16, 14, 13, 13, 11, 10, 9, 8, 8, 7, 6, 5, 4, 2, 2, 4, 5, 5, 5, 5, 5, 7, 9, 12, 15, 17, 18, 18, 18,
+ 18, 18, 17, 16, 15, 14, 13, 11, 10, 10, 9, 9, 7, 6, 5, 5, 4, 3, 4, 4, 5, 5, 5, 6, 7, 10, 15, 16, 18, 18, 18, 19,
+ 19, 18, 18, 17, 16, 14, 13, 12, 11, 10, 10, 9, 9, 7, 6, 5, 5, 4, 4, 4, 6, 6, 6, 6, 8, 10, 15, 16, 18, 18, 18, 19,
+ 20, 19, 19, 19, 17, 16, 14, 13, 12, 11, 11, 10, 9, 7, 7, 6, 5, 4, 4, 5, 6, 6, 6, 7, 9, 11, 15, 17, 18, 18, 18, 18,
+ 22, 20, 20, 20, 19, 17, 16, 14, 13, 13, 12, 11, 10, 9, 7, 6, 6, 5, 5, 6, 7, 7, 7, 8, 9, 12, 16, 18, 18, 19, 18, 18,
+ 22, 22, 21, 20, 19, 18, 17, 16, 14, 14, 15, 13, 11, 10, 9, 8, 7, 6, 5, 6, 7, 8, 8, 9, 10, 14, 17, 18, 18, 19, 18, 17,
+ 22, 22, 22, 21, 20, 19, 17, 17, 16, 16, 16, 16, 14, 12, 10, 8, 7, 6, 6, 7, 8, 8, 8, 10, 13, 16, 18, 18, 18, 18, 18, 17,
+ 22, 22, 22, 21, 20, 20, 18, 17, 16, 16, 17, 17, 16, 14, 11, 10, 8, 6, 6, 7, 8, 8, 10, 11, 14, 17, 18, 18, 18, 18, 18, 17,
+ ]
+ b: [
+ 13, 12, 12, 12, 11, 9, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 6, 7, 7, 6, 6, 5, 5, 5, 6, 9, 9, 9, 10, 9, 8,
+ 13, 13, 12, 11, 11, 9, 9, 8, 7, 7, 7, 6, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 5, 5, 4, 4, 7, 9, 9, 10, 10, 9,
+ 13, 13, 12, 11, 11, 9, 9, 8, 7, 7, 6, 6, 5, 4, 4, 4, 4, 5, 6, 6, 6, 5, 5, 5, 3, 3, 6, 9, 10, 11, 10, 11,
+ 13, 13, 12, 11, 11, 10, 9, 9, 7, 7, 6, 5, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 4, 3, 3, 5, 8, 10, 11, 11, 12,
+ 13, 13, 12, 11, 11, 10, 9, 9, 8, 7, 6, 6, 4, 4, 3, 3, 3, 4, 4, 5, 5, 5, 4, 3, 2, 3, 4, 8, 11, 11, 11, 12,
+ 13, 13, 13, 11, 11, 10, 9, 9, 8, 7, 6, 6, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 3, 7, 11, 11, 11, 12,
+ 14, 14, 13, 11, 11, 10, 9, 8, 8, 7, 6, 6, 4, 3, 3, 3, 3, 3, 2, 2, 2, 3, 2, 2, 2, 2, 3, 6, 11, 11, 11, 11,
+ 14, 14, 13, 11, 11, 10, 9, 8, 7, 7, 6, 5, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 9, 11, 11, 11,
+ 14, 14, 13, 12, 11, 11, 9, 8, 7, 6, 5, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 8, 12, 12, 11,
+ 14, 13, 13, 12, 12, 11, 10, 8, 7, 6, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 8, 12, 12, 13,
+ 13, 13, 13, 12, 12, 12, 11, 9, 7, 6, 4, 3, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 4, 8, 12, 12, 13,
+ 14, 13, 13, 12, 12, 11, 11, 9, 7, 6, 5, 3, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 4, 8, 12, 13, 13,
+ 14, 13, 13, 13, 12, 11, 11, 9, 7, 6, 5, 4, 2, 1, 1, 1, 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 4, 7, 12, 13, 13,
+ 14, 13, 13, 13, 12, 12, 12, 11, 7, 5, 5, 4, 2, 1, 1, 1, 1, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 4, 7, 12, 13, 13,
+ 14, 14, 13, 13, 12, 12, 11, 11, 7, 5, 4, 3, 1, 1, 1, 1, 1, 2, 3, 3, 2, 2, 2, 3, 3, 3, 4, 4, 7, 12, 13, 14,
+ 14, 14, 13, 13, 12, 12, 11, 11, 8, 5, 4, 3, 1, 1, 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 3, 4, 4, 5, 8, 13, 15, 15,
+ 14, 15, 14, 13, 13, 12, 11, 11, 8, 5, 4, 3, 1, 0, 0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 6, 10, 15, 15, 16,
+ 15, 15, 15, 14, 13, 13, 12, 11, 8, 5, 4, 3, 1, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 6, 11, 15, 16, 16,
+ 15, 15, 15, 14, 14, 14, 12, 11, 8, 6, 5, 3, 1, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 6, 7, 11, 15, 16, 16,
+ 15, 15, 15, 15, 14, 14, 12, 11, 9, 7, 5, 3, 2, 1, 0, 1, 1, 1, 2, 3, 3, 4, 4, 4, 4, 5, 6, 8, 12, 15, 16, 16,
+ 15, 16, 15, 15, 15, 14, 13, 11, 9, 7, 6, 5, 3, 2, 1, 1, 1, 1, 2, 3, 3, 4, 4, 4, 5, 6, 7, 9, 12, 16, 16, 16,
+ 15, 16, 16, 15, 15, 15, 13, 11, 10, 8, 7, 6, 4, 3, 2, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6, 7, 8, 10, 14, 16, 16, 16,
+ 16, 16, 17, 16, 15, 15, 14, 12, 11, 9, 8, 8, 6, 5, 3, 2, 2, 2, 3, 3, 5, 5, 6, 6, 7, 8, 9, 12, 15, 16, 16, 16,
+ 16, 17, 18, 17, 16, 15, 14, 13, 11, 10, 9, 9, 8, 6, 5, 3, 3, 3, 3, 4, 6, 6, 6, 6, 7, 8, 11, 14, 16, 16, 16, 16,
+ 17, 18, 18, 18, 17, 16, 16, 14, 12, 11, 10, 9, 8, 7, 6, 5, 3, 3, 5, 6, 6, 6, 6, 6, 8, 10, 12, 16, 17, 17, 17, 16,
+ 18, 18, 18, 18, 18, 17, 16, 14, 13, 12, 11, 11, 8, 8, 6, 6, 5, 4, 5, 5, 6, 6, 6, 7, 8, 11, 15, 16, 17, 17, 16, 16,
+ 18, 19, 19, 19, 19, 17, 17, 15, 14, 13, 12, 11, 11, 8, 7, 6, 6, 5, 5, 5, 7, 7, 7, 8, 9, 11, 15, 17, 17, 17, 16, 16,
+ 20, 20, 20, 20, 19, 19, 17, 17, 15, 14, 14, 12, 11, 9, 8, 7, 7, 6, 6, 6, 8, 8, 8, 8, 9, 12, 15, 18, 18, 16, 16, 16,
+ 22, 22, 22, 22, 21, 20, 19, 18, 17, 16, 15, 14, 12, 11, 10, 8, 8, 7, 7, 7, 8, 8, 8, 9, 10, 13, 17, 18, 18, 16, 16, 15,
+ 23, 22, 22, 22, 22, 21, 20, 20, 18, 18, 19, 16, 14, 13, 11, 10, 9, 8, 7, 7, 8, 9, 9, 10, 11, 15, 17, 18, 17, 17, 16, 14,
+ 23, 23, 23, 23, 23, 22, 21, 21, 20, 20, 20, 19, 18, 15, 12, 11, 10, 8, 8, 8, 9, 9, 10, 11, 13, 17, 17, 17, 17, 16, 15, 13,
+ 23, 23, 24, 24, 23, 23, 22, 21, 21, 21, 20, 20, 19, 17, 14, 12, 11, 9, 8, 9, 9, 10, 10, 12, 15, 17, 17, 17, 16, 16, 15, 13,
+ ]
+ - ct: 8500
+ r: [
+ 18, 17, 16, 15, 13, 12, 10, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 12, 12, 8, 8, 10, 13, 14, 15, 17, 18,
+ 17, 17, 16, 14, 12, 11, 10, 10, 8, 8, 8, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 10, 13, 14, 15, 17, 17,
+ 17, 16, 15, 13, 12, 11, 10, 10, 8, 8, 7, 7, 7, 6, 7, 7, 8, 8, 8, 7, 7, 7, 7, 7, 6, 6, 9, 12, 14, 15, 17, 17,
+ 16, 16, 15, 13, 12, 11, 10, 10, 9, 7, 7, 7, 6, 6, 5, 5, 6, 6, 7, 7, 7, 7, 7, 5, 5, 5, 8, 11, 14, 15, 17, 19,
+ 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 7, 6, 6, 5, 5, 5, 5, 5, 5, 6, 6, 6, 5, 5, 5, 5, 7, 10, 13, 15, 17, 19,
+ 15, 15, 14, 13, 12, 11, 9, 9, 8, 7, 6, 5, 5, 5, 4, 4, 5, 5, 5, 4, 4, 4, 4, 4, 4, 5, 6, 9, 13, 15, 16, 17,
+ 15, 15, 13, 12, 11, 11, 9, 8, 8, 6, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 4, 4, 4, 4, 4, 5, 8, 13, 15, 15, 16,
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 4, 5, 7, 11, 14, 15, 15,
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 4, 4, 4, 3, 3, 4, 3, 3, 3, 2, 3, 3, 3, 3, 4, 6, 10, 14, 15, 15,
+ 15, 13, 13, 11, 11, 10, 9, 8, 7, 6, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 3, 4, 6, 10, 13, 16, 16,
+ 14, 13, 12, 11, 10, 10, 9, 7, 7, 5, 4, 3, 3, 3, 3, 2, 2, 3, 3, 3, 2, 2, 2, 2, 2, 3, 4, 6, 9, 13, 16, 17,
+ 14, 13, 12, 11, 10, 10, 9, 7, 6, 5, 4, 3, 3, 3, 2, 2, 2, 3, 3, 3, 2, 2, 1, 2, 2, 3, 4, 6, 9, 13, 16, 17,
+ 14, 13, 12, 12, 10, 10, 9, 7, 5, 5, 4, 3, 3, 2, 2, 2, 2, 3, 3, 3, 2, 2, 1, 1, 2, 3, 4, 6, 9, 13, 15, 17,
+ 14, 13, 12, 11, 10, 9, 9, 7, 5, 5, 4, 3, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2, 1, 1, 2, 3, 4, 6, 9, 12, 15, 16,
+ 13, 13, 12, 11, 10, 9, 8, 7, 5, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 2, 2, 3, 4, 6, 8, 12, 15, 16,
+ 13, 13, 12, 11, 11, 9, 8, 7, 6, 4, 3, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 1, 1, 2, 3, 3, 4, 6, 9, 15, 16, 16,
+ 13, 13, 12, 12, 11, 10, 9, 7, 6, 4, 3, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 4, 4, 6, 10, 15, 17, 18,
+ 14, 13, 13, 12, 12, 11, 9, 7, 6, 4, 3, 2, 1, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 4, 5, 7, 11, 15, 17, 18,
+ 14, 13, 13, 12, 12, 11, 9, 7, 7, 4, 3, 1, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 4, 4, 5, 7, 11, 15, 17, 18,
+ 15, 13, 13, 12, 12, 11, 8, 7, 6, 4, 3, 1, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 4, 5, 6, 7, 11, 15, 17, 18,
+ 15, 14, 13, 13, 12, 11, 8, 8, 7, 5, 4, 2, 1, 0, 0, 0, 0, 1, 2, 2, 2, 2, 3, 3, 5, 5, 7, 8, 11, 15, 17, 18,
+ 15, 14, 14, 13, 12, 11, 9, 8, 7, 6, 5, 3, 2, 1, 0, 0, 0, 1, 2, 2, 3, 3, 3, 5, 5, 6, 8, 9, 13, 16, 18, 18,
+ 15, 14, 14, 13, 12, 11, 9, 9, 8, 7, 6, 5, 3, 3, 1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 5, 6, 8, 11, 14, 17, 18, 18,
+ 16, 16, 15, 14, 13, 12, 10, 9, 8, 8, 7, 6, 5, 5, 3, 1, 1, 1, 2, 3, 4, 4, 5, 5, 6, 8, 10, 13, 15, 17, 18, 18,
+ 16, 16, 15, 14, 14, 12, 11, 10, 9, 9, 8, 7, 6, 5, 4, 3, 1, 1, 2, 4, 4, 5, 5, 5, 7, 9, 11, 14, 16, 18, 18, 19,
+ 16, 16, 15, 15, 14, 13, 12, 11, 10, 10, 9, 8, 7, 6, 5, 4, 3, 2, 3, 3, 4, 5, 5, 6, 9, 10, 13, 15, 18, 18, 19, 19,
+ 17, 17, 16, 15, 15, 14, 12, 11, 11, 10, 10, 9, 8, 7, 6, 5, 4, 3, 3, 3, 5, 5, 6, 6, 9, 11, 13, 17, 18, 19, 19, 20,
+ 19, 18, 17, 17, 15, 15, 13, 12, 11, 11, 10, 10, 9, 8, 7, 5, 5, 4, 4, 5, 6, 6, 6, 7, 10, 12, 15, 18, 19, 19, 19, 20,
+ 19, 19, 18, 17, 17, 16, 14, 13, 12, 12, 11, 10, 9, 8, 7, 6, 6, 5, 5, 6, 6, 6, 7, 8, 11, 12, 16, 18, 19, 19, 19, 19,
+ 20, 19, 19, 18, 17, 16, 15, 14, 13, 13, 12, 12, 10, 9, 8, 7, 6, 5, 5, 6, 6, 7, 8, 9, 11, 14, 17, 18, 19, 19, 19, 19,
+ 20, 20, 19, 18, 18, 17, 15, 15, 14, 14, 14, 13, 12, 11, 9, 7, 6, 5, 5, 6, 7, 7, 8, 9, 12, 15, 17, 18, 18, 19, 18, 18,
+ 21, 20, 20, 19, 18, 18, 15, 15, 14, 14, 14, 14, 13, 13, 11, 9, 6, 5, 5, 6, 7, 7, 9, 10, 13, 15, 18, 18, 18, 18, 18, 18,
+ ]
+ g: [
+ 16, 16, 15, 13, 12, 10, 9, 8, 7, 7, 7, 6, 6, 6, 7, 7, 7, 6, 6, 6, 6, 6, 11, 11, 6, 6, 9, 12, 12, 13, 15, 15,
+ 16, 15, 14, 13, 11, 10, 9, 9, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 9, 12, 12, 13, 15, 15,
+ 15, 15, 14, 12, 11, 11, 9, 9, 8, 7, 6, 6, 6, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 5, 4, 5, 8, 11, 13, 14, 15, 15,
+ 15, 15, 14, 12, 11, 10, 9, 9, 8, 6, 6, 6, 5, 5, 4, 4, 4, 4, 6, 6, 6, 6, 6, 4, 4, 4, 7, 11, 13, 14, 15, 17,
+ 15, 15, 13, 12, 11, 10, 9, 9, 8, 6, 6, 5, 5, 4, 4, 3, 3, 4, 4, 5, 5, 5, 4, 4, 3, 4, 6, 9, 13, 14, 15, 17,
+ 15, 14, 13, 12, 11, 10, 9, 8, 8, 6, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 3, 4, 5, 8, 13, 14, 14, 15,
+ 14, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 4, 7, 12, 14, 14, 14,
+ 14, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 4, 6, 11, 14, 14, 14,
+ 14, 13, 12, 11, 11, 10, 9, 8, 7, 6, 4, 4, 3, 3, 3, 2, 3, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 5, 10, 14, 14, 14,
+ 15, 13, 12, 11, 11, 10, 9, 8, 7, 6, 4, 4, 3, 3, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 5, 10, 13, 15, 16,
+ 14, 13, 12, 11, 11, 10, 9, 7, 7, 5, 4, 3, 2, 2, 2, 2, 2, 2, 3, 2, 2, 1, 2, 2, 2, 2, 3, 6, 9, 13, 16, 16,
+ 14, 13, 12, 11, 10, 10, 9, 7, 5, 5, 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 2, 2, 2, 4, 6, 8, 13, 16, 16,
+ 14, 13, 13, 12, 10, 10, 9, 7, 5, 5, 4, 3, 2, 2, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 2, 2, 4, 5, 8, 13, 15, 16,
+ 14, 13, 13, 12, 10, 10, 9, 7, 5, 5, 4, 3, 2, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 3, 5, 8, 12, 14, 15,
+ 14, 13, 12, 11, 10, 10, 9, 7, 6, 4, 3, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 2, 3, 3, 5, 8, 12, 15, 15,
+ 14, 13, 12, 11, 11, 10, 9, 7, 6, 4, 3, 2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 2, 3, 4, 5, 8, 14, 16, 16,
+ 14, 13, 13, 12, 12, 10, 9, 7, 6, 4, 3, 2, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 5, 10, 15, 16, 17,
+ 14, 13, 13, 13, 12, 12, 9, 7, 7, 4, 3, 2, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 4, 6, 11, 15, 17, 17,
+ 14, 14, 13, 12, 12, 12, 9, 8, 7, 5, 3, 2, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 5, 7, 11, 15, 17, 17,
+ 15, 14, 13, 13, 12, 12, 9, 8, 7, 5, 3, 2, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4, 5, 5, 7, 11, 15, 17, 17,
+ 16, 14, 14, 13, 13, 12, 9, 8, 7, 5, 4, 2, 1, 0, 0, 0, 0, 1, 2, 2, 3, 3, 3, 3, 5, 5, 6, 8, 12, 16, 17, 17,
+ 16, 15, 14, 14, 13, 12, 10, 9, 7, 6, 5, 4, 2, 1, 0, 0, 0, 1, 2, 3, 3, 3, 3, 5, 5, 6, 8, 9, 13, 17, 17, 17,
+ 16, 15, 15, 14, 13, 12, 10, 10, 8, 7, 6, 5, 4, 3, 2, 0, 1, 1, 2, 3, 3, 4, 5, 5, 5, 6, 8, 11, 14, 17, 17, 17,
+ 16, 16, 15, 15, 14, 13, 11, 10, 9, 8, 7, 6, 5, 5, 3, 2, 1, 1, 2, 3, 5, 5, 5, 5, 6, 7, 10, 13, 16, 17, 17, 17,
+ 17, 17, 15, 15, 14, 13, 12, 11, 10, 9, 9, 7, 6, 6, 5, 3, 2, 2, 3, 4, 5, 5, 5, 6, 6, 9, 11, 14, 16, 18, 18, 18,
+ 17, 17, 16, 15, 15, 14, 13, 12, 11, 10, 10, 9, 7, 7, 5, 4, 3, 2, 3, 4, 5, 5, 5, 6, 9, 10, 13, 15, 18, 18, 18, 18,
+ 18, 17, 17, 16, 15, 15, 14, 12, 11, 11, 11, 9, 8, 7, 6, 5, 4, 3, 3, 4, 5, 5, 6, 6, 9, 11, 13, 17, 18, 18, 18, 18,
+ 20, 19, 18, 18, 16, 16, 14, 13, 12, 11, 11, 10, 9, 8, 7, 5, 5, 4, 4, 5, 6, 6, 6, 7, 10, 11, 15, 18, 18, 18, 18, 18,
+ 20, 20, 19, 18, 18, 17, 15, 14, 13, 13, 12, 11, 10, 9, 8, 6, 6, 5, 5, 6, 6, 7, 7, 8, 11, 12, 16, 18, 18, 18, 18, 18,
+ 22, 21, 20, 19, 18, 17, 17, 17, 15, 15, 14, 13, 11, 10, 8, 7, 6, 6, 6, 6, 7, 7, 8, 8, 11, 14, 17, 18, 18, 18, 18, 17,
+ 22, 22, 21, 20, 19, 18, 17, 17, 17, 16, 16, 15, 14, 12, 10, 8, 7, 6, 6, 7, 7, 8, 8, 10, 13, 16, 18, 18, 18, 18, 18, 16,
+ 22, 22, 22, 21, 20, 19, 18, 17, 17, 17, 16, 16, 15, 15, 12, 10, 7, 6, 6, 7, 8, 8, 9, 11, 14, 16, 18, 18, 18, 18, 17, 16,
+ ]
+ b: [
+ 13, 13, 13, 11, 10, 9, 9, 8, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 9, 9, 4, 4, 7, 9, 9, 9, 10, 10,
+ 13, 13, 12, 11, 10, 9, 9, 8, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 7, 9, 9, 10, 10, 10,
+ 13, 13, 12, 11, 10, 10, 9, 9, 7, 7, 6, 6, 6, 5, 5, 5, 5, 6, 6, 6, 5, 5, 5, 4, 3, 3, 6, 9, 10, 11, 11, 11,
+ 13, 13, 12, 11, 10, 10, 9, 9, 8, 7, 6, 6, 5, 5, 4, 4, 4, 4, 5, 5, 5, 5, 5, 3, 2, 3, 5, 9, 10, 11, 11, 13,
+ 13, 13, 12, 11, 11, 10, 9, 9, 8, 7, 6, 5, 5, 4, 4, 3, 3, 4, 4, 5, 5, 5, 3, 2, 2, 3, 5, 8, 11, 11, 12, 13,
+ 13, 12, 12, 11, 11, 10, 9, 8, 8, 7, 5, 5, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 3, 7, 11, 11, 11, 11,
+ 13, 13, 12, 11, 11, 10, 9, 8, 8, 7, 5, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 3, 2, 2, 2, 2, 3, 6, 11, 11, 11, 11,
+ 13, 13, 12, 11, 11, 11, 9, 8, 7, 6, 5, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 5, 10, 12, 12, 11,
+ 13, 13, 13, 11, 11, 11, 10, 8, 7, 6, 5, 4, 3, 3, 3, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 4, 9, 12, 12, 12,
+ 14, 13, 13, 12, 11, 11, 10, 8, 7, 6, 4, 4, 3, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 4, 9, 12, 13, 14,
+ 13, 13, 12, 12, 11, 11, 10, 8, 7, 6, 4, 3, 2, 2, 2, 1, 2, 3, 3, 2, 2, 1, 2, 2, 2, 2, 2, 4, 8, 12, 14, 14,
+ 13, 13, 12, 12, 11, 11, 11, 8, 7, 6, 5, 3, 2, 2, 1, 1, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 3, 4, 8, 12, 14, 14,
+ 13, 13, 12, 12, 12, 11, 11, 8, 7, 6, 5, 3, 2, 1, 1, 1, 1, 2, 3, 3, 2, 2, 2, 2, 2, 3, 3, 4, 8, 12, 14, 14,
+ 13, 13, 13, 12, 12, 11, 11, 8, 6, 6, 5, 3, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 4, 7, 12, 13, 14,
+ 13, 13, 13, 12, 12, 11, 10, 8, 6, 5, 4, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 5, 7, 12, 14, 14,
+ 14, 14, 13, 13, 13, 11, 10, 8, 7, 5, 4, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 4, 4, 5, 8, 14, 16, 16,
+ 14, 14, 13, 13, 13, 12, 11, 9, 7, 5, 4, 2, 1, 0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 4, 4, 4, 6, 10, 15, 16, 16,
+ 15, 14, 14, 14, 15, 15, 11, 9, 8, 5, 4, 2, 1, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 7, 11, 15, 16, 17,
+ 15, 15, 14, 14, 15, 15, 12, 10, 9, 6, 4, 2, 1, 0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 5, 5, 6, 7, 11, 15, 16, 16,
+ 15, 15, 15, 15, 15, 15, 12, 10, 9, 6, 5, 2, 1, 0, 0, 0, 1, 1, 1, 3, 3, 3, 4, 4, 5, 6, 6, 7, 11, 15, 16, 17,
+ 16, 16, 15, 16, 15, 15, 12, 11, 10, 7, 5, 3, 2, 1, 0, 0, 1, 1, 2, 3, 4, 4, 4, 5, 6, 6, 7, 9, 12, 16, 16, 16,
+ 16, 16, 16, 16, 16, 15, 13, 12, 10, 9, 6, 5, 3, 2, 1, 1, 1, 2, 3, 4, 4, 4, 5, 6, 6, 7, 8, 9, 14, 16, 17, 16,
+ 16, 16, 16, 16, 16, 15, 14, 12, 11, 10, 8, 6, 5, 4, 2, 1, 1, 2, 3, 4, 5, 5, 6, 6, 6, 7, 9, 12, 15, 16, 16, 16,
+ 17, 17, 18, 17, 16, 16, 14, 13, 12, 11, 9, 8, 6, 5, 4, 2, 2, 2, 3, 4, 6, 6, 6, 6, 7, 8, 11, 14, 16, 17, 16, 16,
+ 17, 17, 18, 18, 17, 16, 16, 14, 13, 12, 11, 9, 8, 7, 5, 4, 2, 3, 4, 6, 6, 6, 6, 7, 8, 10, 12, 15, 17, 17, 17, 16,
+ 18, 18, 18, 18, 18, 17, 16, 15, 14, 13, 12, 11, 9, 8, 6, 5, 4, 4, 4, 5, 6, 6, 7, 7, 10, 11, 14, 16, 17, 17, 17, 17,
+ 18, 18, 19, 19, 19, 18, 17, 16, 15, 14, 14, 11, 10, 9, 7, 6, 6, 5, 5, 5, 6, 7, 7, 7, 10, 12, 14, 17, 17, 17, 17, 17,
+ 20, 20, 20, 20, 20, 19, 18, 17, 16, 15, 14, 13, 11, 10, 9, 7, 6, 6, 6, 6, 7, 7, 7, 8, 11, 12, 15, 18, 18, 17, 17, 16,
+ 22, 21, 21, 21, 21, 21, 20, 19, 17, 16, 16, 14, 13, 11, 10, 8, 7, 7, 7, 7, 8, 8, 8, 9, 11, 13, 17, 18, 18, 17, 16, 15,
+ 23, 22, 22, 22, 22, 21, 21, 20, 19, 19, 18, 16, 14, 13, 11, 9, 8, 8, 8, 8, 8, 8, 9, 10, 12, 15, 18, 18, 18, 17, 16, 14,
+ 23, 24, 24, 23, 23, 22, 22, 21, 21, 20, 20, 19, 18, 15, 13, 11, 9, 8, 8, 8, 9, 9, 10, 11, 13, 16, 18, 18, 17, 17, 15, 14,
+ 24, 24, 24, 24, 24, 23, 22, 22, 22, 22, 20, 20, 19, 18, 15, 13, 10, 9, 9, 9, 9, 10, 11, 12, 15, 17, 18, 18, 17, 16, 15, 13,
+ ]
+...
diff --git a/src/ipa/mali-c55/data/meson.build b/src/ipa/mali-c55/data/meson.build
new file mode 100644
index 00000000..8a5fdd36
--- /dev/null
+++ b/src/ipa/mali-c55/data/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: CC0-1.0
+
+conf_files = files([
+ 'imx415.yaml',
+ 'uncalibrated.yaml'
+])
+
+install_data(conf_files,
+ install_dir : ipa_data_dir / 'mali-c55')
diff --git a/src/ipa/mali-c55/data/uncalibrated.yaml b/src/ipa/mali-c55/data/uncalibrated.yaml
new file mode 100644
index 00000000..6dcc0295
--- /dev/null
+++ b/src/ipa/mali-c55/data/uncalibrated.yaml
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Agc:
+...
diff --git a/src/ipa/mali-c55/ipa_context.cpp b/src/ipa/mali-c55/ipa_context.cpp
new file mode 100644
index 00000000..99f76ecd
--- /dev/null
+++ b/src/ipa/mali-c55/ipa_context.cpp
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * ipa_context.cpp - MaliC55 IPA Context
+ */
+
+#include "ipa_context.h"
+
+/**
+ * \file ipa_context.h
+ * \brief Context and state information shared between the algorithms
+ */
+
+namespace libcamera::ipa::mali_c55 {
+
+/**
+ * \struct IPASessionConfiguration
+ * \brief Session configuration for the IPA module
+ *
+ * The session configuration contains all IPA configuration parameters that
+ * remain constant during the capture session, from IPA module start to stop.
+ * It is typically set during the configure() operation of the IPA module, but
+ * may also be updated in the start() operation.
+ */
+
+/**
+ * \struct IPAActiveState
+ * \brief Active state for algorithms
+ *
+ * The active state contains all algorithm-specific data that needs to be
+ * maintained by algorithms across frames. Unlike the session configuration,
+ * the active state is mutable and constantly updated by algorithms. The active
+ * state is accessible through the IPAContext structure.
+ *
+ * The active state stores two distinct categories of information:
+ *
+ * - The consolidated value of all algorithm controls. Requests passed to
+ * the queueRequest() function store values for controls that the
+ * application wants to modify for that particular frame, and the
+ * queueRequest() function updates the active state with those values.
+ * The active state thus contains a consolidated view of the value of all
+ * controls handled by the algorithm.
+ *
+ * - The value of parameters computed by the algorithm when running in auto
+ * mode. Algorithms running in auto mode compute new parameters every
+ * time statistics buffers are received (either synchronously, or
+ * possibly in a background thread). The latest computed value of those
+ * parameters is stored in the active state in the process() function.
+ *
+ * Each of the members in the active state belongs to a specific algorithm. A
+ * member may be read by any algorithm, but shall only be written by its owner.
+ */
+
+/**
+ * \struct IPAFrameContext
+ * \brief Per-frame context for algorithms
+ *
+ * The frame context stores two distinct categories of information:
+ *
+ * - The value of the controls to be applied to the frame. These values are
+ * typically set in the queueRequest() function, from the consolidated
+ * control values stored in the active state. The frame context thus stores
+ * values for all controls related to the algorithm, not limited to the
+ * controls specified in the corresponding request, but consolidated from all
+ * requests that have been queued so far.
+ *
+ * For controls that can be set manually or computed by an algorithm
+ * (depending on the algorithm operation mode), such as for instance the
+ * colour gains for the AWB algorithm, the control value will be stored in
+ * the frame context in the queueRequest() function only when operating in
+ * manual mode. When operating in auto mode, the values are computed by the
+ * algorithm in process(), stored in the active state, and copied to the
+ * frame context in prepare(), just before being stored in the ISP parameters
+ * buffer.
+ *
+ * The queueRequest() function can also store ancillary data in the frame
+ * context, such as flags to indicate if (and what) control values have
+ * changed compared to the previous request.
+ *
+ * - Status information computed by the algorithm for a frame. For instance,
+ * the colour temperature estimated by the AWB algorithm from ISP statistics
+ * calculated on a frame is stored in the frame context for that frame in
+ * the process() function.
+ */
+
+/**
+ * \struct IPAContext
+ * \brief Global IPA context data shared between all algorithms
+ *
+ * \var IPAContext::configuration
+ * \brief The IPA session configuration, immutable during the session
+ *
+ * \var IPAContext::activeState
+ * \brief The IPA active state, storing the latest state for all algorithms
+ *
+ * \var IPAContext::frameContexts
+ * \brief Ring buffer of per-frame contexts
+ */
+
+} /* namespace libcamera::ipa::mali_c55 */
diff --git a/src/ipa/mali-c55/ipa_context.h b/src/ipa/mali-c55/ipa_context.h
new file mode 100644
index 00000000..5e3e2fbd
--- /dev/null
+++ b/src/ipa/mali-c55/ipa_context.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * ipa_context.h - Mali-C55 IPA Context
+ */
+
+#pragma once
+
+#include <libcamera/base/utils.h>
+#include <libcamera/controls.h>
+
+#include "libcamera/internal/bayer_format.h"
+
+#include <libipa/fc_queue.h>
+
+namespace libcamera {
+
+namespace ipa::mali_c55 {
+
+struct IPASessionConfiguration {
+ struct {
+ utils::Duration minShutterSpeed;
+ utils::Duration maxShutterSpeed;
+ uint32_t defaultExposure;
+ double minAnalogueGain;
+ double maxAnalogueGain;
+ } agc;
+
+ struct {
+ BayerFormat::Order bayerOrder;
+ utils::Duration lineDuration;
+ uint32_t blackLevel;
+ } sensor;
+};
+
+struct IPAActiveState {
+ struct {
+ struct {
+ uint32_t exposure;
+ double sensorGain;
+ double ispGain;
+ } automatic;
+ struct {
+ uint32_t exposure;
+ double sensorGain;
+ double ispGain;
+ } manual;
+ bool autoEnabled;
+ uint32_t constraintMode;
+ uint32_t exposureMode;
+ uint32_t temperatureK;
+ } agc;
+
+ struct {
+ double rGain;
+ double bGain;
+ } awb;
+};
+
+struct IPAFrameContext : public FrameContext {
+ struct {
+ uint32_t exposure;
+ double sensorGain;
+ double ispGain;
+ } agc;
+
+ struct {
+ double rGain;
+ double bGain;
+ } awb;
+};
+
+struct IPAContext {
+ IPAContext(unsigned int frameContextSize)
+ : frameContexts(frameContextSize)
+ {
+ }
+
+ IPASessionConfiguration configuration;
+ IPAActiveState activeState;
+
+ FCQueue<IPAFrameContext> frameContexts;
+
+ ControlInfoMap::Map ctrlMap;
+};
+
+} /* namespace ipa::mali_c55 */
+
+} /* namespace libcamera*/
diff --git a/src/ipa/mali-c55/mali-c55.cpp b/src/ipa/mali-c55/mali-c55.cpp
new file mode 100644
index 00000000..c6941a95
--- /dev/null
+++ b/src/ipa/mali-c55/mali-c55.cpp
@@ -0,0 +1,399 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Ideas on Board Oy
+ *
+ * mali-c55.cpp - Mali-C55 ISP image processing algorithms
+ */
+
+#include <map>
+#include <string.h>
+#include <vector>
+
+#include <linux/mali-c55-config.h>
+#include <linux/v4l2-controls.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/ipa/ipa_interface.h>
+#include <libcamera/ipa/ipa_module_info.h>
+#include <libcamera/ipa/mali-c55_ipa_interface.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/mapped_framebuffer.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "algorithms/algorithm.h"
+#include "libipa/camera_sensor_helper.h"
+
+#include "ipa_context.h"
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(IPAMaliC55)
+
+using namespace std::literals::chrono_literals;
+
+namespace ipa::mali_c55 {
+
+/* Maximum number of frame contexts to be held */
+static constexpr uint32_t kMaxFrameContexts = 16;
+
+class IPAMaliC55 : public IPAMaliC55Interface, public Module
+{
+public:
+ IPAMaliC55();
+
+ int init(const IPASettings &settings, const IPAConfigInfo &ipaConfig,
+ ControlInfoMap *ipaControls) override;
+ int start() override;
+ void stop() override;
+ int configure(const IPAConfigInfo &ipaConfig, uint8_t bayerOrder,
+ ControlInfoMap *ipaControls) override;
+ void mapBuffers(const std::vector<IPABuffer> &buffers, bool readOnly) override;
+ void unmapBuffers(const std::vector<IPABuffer> &buffers) override;
+ void queueRequest(const uint32_t request, const ControlList &controls) override;
+ void fillParams(unsigned int request, uint32_t bufferId) override;
+ void processStats(unsigned int request, unsigned int bufferId,
+ const ControlList &sensorControls) override;
+
+protected:
+ std::string logPrefix() const override;
+
+private:
+ void updateSessionConfiguration(const IPACameraSensorInfo &info,
+ const ControlInfoMap &sensorControls,
+ BayerFormat::Order bayerOrder);
+ void updateControls(const IPACameraSensorInfo &sensorInfo,
+ const ControlInfoMap &sensorControls,
+ ControlInfoMap *ipaControls);
+ void setControls();
+
+ std::map<unsigned int, MappedFrameBuffer> buffers_;
+
+ ControlInfoMap sensorControls_;
+
+ /* Interface to the Camera Helper */
+ std::unique_ptr<CameraSensorHelper> camHelper_;
+
+ /* Local parameter storage */
+ struct IPAContext context_;
+};
+
+namespace {
+
+} /* namespace */
+
+IPAMaliC55::IPAMaliC55()
+ : context_(kMaxFrameContexts)
+{
+}
+
+std::string IPAMaliC55::logPrefix() const
+{
+ return "mali-c55";
+}
+
+int IPAMaliC55::init(const IPASettings &settings, const IPAConfigInfo &ipaConfig,
+ ControlInfoMap *ipaControls)
+{
+ camHelper_ = CameraSensorHelperFactoryBase::create(settings.sensorModel);
+ if (!camHelper_) {
+ LOG(IPAMaliC55, Error)
+ << "Failed to create camera sensor helper for "
+ << settings.sensorModel;
+ return -ENODEV;
+ }
+
+ File file(settings.configurationFile);
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ int ret = file.error();
+ LOG(IPAMaliC55, Error)
+ << "Failed to open configuration file "
+ << settings.configurationFile << ": " << strerror(-ret);
+ return ret;
+ }
+
+ std::unique_ptr<libcamera::YamlObject> data = YamlParser::parse(file);
+ if (!data)
+ return -EINVAL;
+
+ if (!data->contains("algorithms")) {
+ LOG(IPAMaliC55, Error)
+ << "Tuning file doesn't contain any algorithm";
+ return -EINVAL;
+ }
+
+ int ret = createAlgorithms(context_, (*data)["algorithms"]);
+ if (ret)
+ return ret;
+
+ updateControls(ipaConfig.sensorInfo, ipaConfig.sensorControls, ipaControls);
+
+ return 0;
+}
+
+void IPAMaliC55::setControls()
+{
+ IPAActiveState &activeState = context_.activeState;
+ uint32_t exposure;
+ uint32_t gain;
+
+ if (activeState.agc.autoEnabled) {
+ exposure = activeState.agc.automatic.exposure;
+ gain = camHelper_->gainCode(activeState.agc.automatic.sensorGain);
+ } else {
+ exposure = activeState.agc.manual.exposure;
+ gain = camHelper_->gainCode(activeState.agc.manual.sensorGain);
+ }
+
+ ControlList ctrls(sensorControls_);
+ ctrls.set(V4L2_CID_EXPOSURE, static_cast<int32_t>(exposure));
+ ctrls.set(V4L2_CID_ANALOGUE_GAIN, static_cast<int32_t>(gain));
+
+ setSensorControls.emit(ctrls);
+}
+
+int IPAMaliC55::start()
+{
+ return 0;
+}
+
+void IPAMaliC55::stop()
+{
+ context_.frameContexts.clear();
+}
+
+void IPAMaliC55::updateSessionConfiguration(const IPACameraSensorInfo &info,
+ const ControlInfoMap &sensorControls,
+ BayerFormat::Order bayerOrder)
+{
+ context_.configuration.sensor.bayerOrder = bayerOrder;
+
+ const ControlInfo &v4l2Exposure = sensorControls.find(V4L2_CID_EXPOSURE)->second;
+ int32_t minExposure = v4l2Exposure.min().get<int32_t>();
+ int32_t maxExposure = v4l2Exposure.max().get<int32_t>();
+ int32_t defExposure = v4l2Exposure.def().get<int32_t>();
+
+ const ControlInfo &v4l2Gain = sensorControls.find(V4L2_CID_ANALOGUE_GAIN)->second;
+ int32_t minGain = v4l2Gain.min().get<int32_t>();
+ int32_t maxGain = v4l2Gain.max().get<int32_t>();
+
+ /*
+ * When the AGC computes the new exposure values for a frame, it needs
+ * to know the limits for shutter speed and analogue gain.
+ * As it depends on the sensor, update it with the controls.
+ *
+ * \todo take VBLANK into account for maximum shutter speed
+ */
+ context_.configuration.sensor.lineDuration = info.minLineLength * 1.0s / info.pixelRate;
+ context_.configuration.agc.minShutterSpeed = minExposure * context_.configuration.sensor.lineDuration;
+ context_.configuration.agc.maxShutterSpeed = maxExposure * context_.configuration.sensor.lineDuration;
+ context_.configuration.agc.defaultExposure = defExposure;
+ context_.configuration.agc.minAnalogueGain = camHelper_->gain(minGain);
+ context_.configuration.agc.maxAnalogueGain = camHelper_->gain(maxGain);
+
+ if (camHelper_->blackLevel().has_value()) {
+ /*
+ * The black level from CameraSensorHelper is a 16-bit value.
+ * The Mali-C55 ISP expects 20-bit settings, so we shift it to
+ * the appropriate width
+ */
+ context_.configuration.sensor.blackLevel =
+ camHelper_->blackLevel().value() << 4;
+ }
+}
+
+void IPAMaliC55::updateControls(const IPACameraSensorInfo &sensorInfo,
+ const ControlInfoMap &sensorControls,
+ ControlInfoMap *ipaControls)
+{
+ ControlInfoMap::Map ctrlMap;
+
+ /*
+ * Compute the frame duration limits.
+ *
+ * The frame length is computed assuming a fixed line length combined
+ * with the vertical frame sizes.
+ */
+ const ControlInfo &v4l2HBlank = sensorControls.find(V4L2_CID_HBLANK)->second;
+ uint32_t hblank = v4l2HBlank.def().get<int32_t>();
+ uint32_t lineLength = sensorInfo.outputSize.width + hblank;
+
+ const ControlInfo &v4l2VBlank = sensorControls.find(V4L2_CID_VBLANK)->second;
+ std::array<uint32_t, 3> frameHeights{
+ v4l2VBlank.min().get<int32_t>() + sensorInfo.outputSize.height,
+ v4l2VBlank.max().get<int32_t>() + sensorInfo.outputSize.height,
+ v4l2VBlank.def().get<int32_t>() + sensorInfo.outputSize.height,
+ };
+
+ std::array<int64_t, 3> frameDurations;
+ for (unsigned int i = 0; i < frameHeights.size(); ++i) {
+ uint64_t frameSize = lineLength * frameHeights[i];
+ frameDurations[i] = frameSize / (sensorInfo.pixelRate / 1000000U);
+ }
+
+ ctrlMap[&controls::FrameDurationLimits] = ControlInfo(frameDurations[0],
+ frameDurations[1],
+ frameDurations[2]);
+
+ /*
+ * Compute exposure time limits from the V4L2_CID_EXPOSURE control
+ * limits and the line duration.
+ */
+ double lineDuration = sensorInfo.minLineLength / sensorInfo.pixelRate;
+
+ const ControlInfo &v4l2Exposure = sensorControls.find(V4L2_CID_EXPOSURE)->second;
+ int32_t minExposure = v4l2Exposure.min().get<int32_t>() * lineDuration;
+ int32_t maxExposure = v4l2Exposure.max().get<int32_t>() * lineDuration;
+ int32_t defExposure = v4l2Exposure.def().get<int32_t>() * lineDuration;
+ ctrlMap[&controls::ExposureTime] = ControlInfo(minExposure, maxExposure, defExposure);
+
+ /* Compute the analogue gain limits. */
+ const ControlInfo &v4l2Gain = sensorControls.find(V4L2_CID_ANALOGUE_GAIN)->second;
+ float minGain = camHelper_->gain(v4l2Gain.min().get<int32_t>());
+ float maxGain = camHelper_->gain(v4l2Gain.max().get<int32_t>());
+ float defGain = camHelper_->gain(v4l2Gain.def().get<int32_t>());
+ ctrlMap[&controls::AnalogueGain] = ControlInfo(minGain, maxGain, defGain);
+
+ /*
+ * Merge in any controls that we support either statically or from the
+ * algorithms.
+ */
+ ctrlMap.merge(context_.ctrlMap);
+
+ *ipaControls = ControlInfoMap(std::move(ctrlMap), controls::controls);
+}
+
+int IPAMaliC55::configure(const IPAConfigInfo &ipaConfig, uint8_t bayerOrder,
+ ControlInfoMap *ipaControls)
+{
+ sensorControls_ = ipaConfig.sensorControls;
+
+ /* Clear the IPA context before the streaming session. */
+ context_.configuration = {};
+ context_.activeState = {};
+ context_.frameContexts.clear();
+
+ const IPACameraSensorInfo &info = ipaConfig.sensorInfo;
+
+ updateSessionConfiguration(info, ipaConfig.sensorControls,
+ static_cast<BayerFormat::Order>(bayerOrder));
+ updateControls(info, ipaConfig.sensorControls, ipaControls);
+
+ for (auto const &a : algorithms()) {
+ Algorithm *algo = static_cast<Algorithm *>(a.get());
+
+ int ret = algo->configure(context_, info);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void IPAMaliC55::mapBuffers(const std::vector<IPABuffer> &buffers, bool readOnly)
+{
+ for (const IPABuffer &buffer : buffers) {
+ const FrameBuffer fb(buffer.planes);
+ buffers_.emplace(
+ buffer.id,
+ MappedFrameBuffer(
+ &fb,
+ readOnly ? MappedFrameBuffer::MapFlag::Read
+ : MappedFrameBuffer::MapFlag::ReadWrite));
+ }
+}
+
+void IPAMaliC55::unmapBuffers(const std::vector<IPABuffer> &buffers)
+{
+ for (const IPABuffer &buffer : buffers) {
+ auto it = buffers_.find(buffer.id);
+ if (it == buffers_.end())
+ continue;
+
+ buffers_.erase(buffer.id);
+ }
+}
+
+void IPAMaliC55::queueRequest(const uint32_t request, const ControlList &controls)
+{
+ IPAFrameContext &frameContext = context_.frameContexts.alloc(request);
+
+ for (auto const &a : algorithms()) {
+ Algorithm *algo = static_cast<Algorithm *>(a.get());
+
+ algo->queueRequest(context_, request, frameContext, controls);
+ }
+}
+
+void IPAMaliC55::fillParams(unsigned int request,
+ [[maybe_unused]] uint32_t bufferId)
+{
+ struct mali_c55_params_buffer *params;
+ IPAFrameContext &frameContext = context_.frameContexts.get(request);
+
+ params = reinterpret_cast<mali_c55_params_buffer *>(
+ buffers_.at(bufferId).planes()[0].data());
+ memset(params, 0, sizeof(mali_c55_params_buffer));
+
+ params->version = MALI_C55_PARAM_BUFFER_V1;
+
+ for (auto const &algo : algorithms()) {
+ algo->prepare(context_, request, frameContext, params);
+
+ ASSERT(params->total_size <= MALI_C55_PARAMS_MAX_SIZE);
+ }
+
+ paramsComputed.emit(request);
+}
+
+void IPAMaliC55::processStats(unsigned int request, unsigned int bufferId,
+ const ControlList &sensorControls)
+{
+ IPAFrameContext &frameContext = context_.frameContexts.get(request);
+ const mali_c55_stats_buffer *stats = nullptr;
+
+ stats = reinterpret_cast<mali_c55_stats_buffer *>(
+ buffers_.at(bufferId).planes()[0].data());
+
+ frameContext.agc.exposure =
+ sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>();
+ frameContext.agc.sensorGain =
+ camHelper_->gain(sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>());
+
+ ControlList metadata(controls::controls);
+
+ for (auto const &a : algorithms()) {
+ Algorithm *algo = static_cast<Algorithm *>(a.get());
+
+ algo->process(context_, request, frameContext, stats, metadata);
+ }
+
+ setControls();
+
+ statsProcessed.emit(request, metadata);
+}
+
+} /* namespace ipa::mali_c55 */
+
+/*
+ * External IPA module interface
+ */
+extern "C" {
+const struct IPAModuleInfo ipaModuleInfo = {
+ IPA_MODULE_API_VERSION,
+ 1,
+ "mali-c55",
+ "mali-c55",
+};
+
+IPAInterface *ipaCreate()
+{
+ return new ipa::mali_c55::IPAMaliC55();
+}
+
+} /* extern "C" */
+
+} /* namespace libcamera */
diff --git a/src/ipa/mali-c55/meson.build b/src/ipa/mali-c55/meson.build
new file mode 100644
index 00000000..864d90ec
--- /dev/null
+++ b/src/ipa/mali-c55/meson.build
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: CC0-1.0
+
+subdir('algorithms')
+subdir('data')
+
+ipa_name = 'ipa_mali_c55'
+
+mali_c55_ipa_sources = files([
+ 'ipa_context.cpp',
+ 'mali-c55.cpp'
+])
+
+mali_c55_ipa_sources += mali_c55_ipa_algorithms
+
+mod = shared_module(ipa_name,
+ mali_c55_ipa_sources,
+ name_prefix : '',
+ include_directories : [ipa_includes, libipa_includes],
+ dependencies : libcamera_private,
+ link_with : libipa,
+ install : true,
+ install_dir : ipa_install_dir)
+
+if ipa_sign_module
+ custom_target(ipa_name + '.so.sign',
+ input : mod,
+ output : ipa_name + '.so.sign',
+ command : [ipa_sign, ipa_priv_key, '@INPUT@', '@OUTPUT@'],
+ install : false,
+ build_by_default : true)
+endif
+
+ipa_names += ipa_name
diff --git a/src/ipa/mali-c55/module.h b/src/ipa/mali-c55/module.h
new file mode 100644
index 00000000..1d85ec1f
--- /dev/null
+++ b/src/ipa/mali-c55/module.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * module.h - Mali-C55 IPA Module
+ */
+
+#pragma once
+
+#include <linux/mali-c55-config.h>
+
+#include <libcamera/ipa/mali-c55_ipa_interface.h>
+
+#include <libipa/module.h>
+
+#include "ipa_context.h"
+
+namespace libcamera {
+
+namespace ipa::mali_c55 {
+
+using Module = ipa::Module<IPAContext, IPAFrameContext, IPACameraSensorInfo,
+ mali_c55_params_buffer, mali_c55_stats_buffer>;
+
+} /* namespace ipa::mali_c55 */
+
+} /* namespace libcamera*/
diff --git a/src/ipa/meson.build b/src/ipa/meson.build
index 73278a60..0ad4631d 100644
--- a/src/ipa/meson.build
+++ b/src/ipa/meson.build
@@ -1,19 +1,80 @@
-ipa_install_dir = join_paths(get_option('libdir'), 'libcamera')
+# SPDX-License-Identifier: CC0-1.0
ipa_includes = [
libcamera_includes,
- libcamera_internal_includes,
]
+ipa_install_dir = libcamera_libdir
+ipa_data_dir = libcamera_datadir / 'ipa'
+ipa_sysconf_dir = libcamera_sysconfdir / 'ipa'
+
+config_h.set('IPA_CONFIG_DIR',
+ '"' + get_option('prefix') / ipa_sysconf_dir +
+ ':' + get_option('prefix') / ipa_data_dir + '"')
+
config_h.set('IPA_MODULE_DIR',
- '"' + join_paths(get_option('prefix'), ipa_install_dir) + '"')
+ '"' + get_option('prefix') / ipa_install_dir + '"')
+
+summary({
+ 'IPA_CONFIG_DIR' : config_h.get('IPA_CONFIG_DIR'),
+ 'IPA_MODULE_DIR' : config_h.get('IPA_MODULE_DIR'),
+ }, section : 'Paths')
subdir('libipa')
-ipas = ['rkisp1', 'vimc']
+ipa_sign = files('ipa-sign.sh')
+
+ipa_names = []
+
+ipa_modules = get_option('ipas')
+
+# Tests require the vimc IPA, similar to vimc pipline-handler for their
+# execution. Include it automatically when tests are enabled.
+if get_option('test') and 'vimc' not in ipa_modules
+ message('Enabling vimc IPA to support tests')
+ ipa_modules += ['vimc']
+endif
+
+enabled_ipa_modules = []
+enabled_ipa_names = []
+ipa_names = []
-foreach pipeline : get_option('pipelines')
- if ipas.contains(pipeline)
- subdir(pipeline)
+subdirs = []
+foreach pipeline : pipelines
+ # The current implementation expects the IPA module name to match the
+ # pipeline name.
+ # \todo Make the IPA naming scheme more flexible.
+ if not ipa_modules.contains(pipeline)
+ continue
endif
+ enabled_ipa_names += pipeline
+
+ # Allow multi-level directory structuring for the IPAs if needed.
+ pipeline = pipeline.split('/')[0]
+ if pipeline in subdirs
+ continue
+ endif
+
+ subdirs += pipeline
+ subdir(pipeline)
+
+ # Don't reuse the pipeline variable below, the subdirectory may have
+ # overwritten it.
endforeach
+
+# The ipa-sign-install.sh script which uses the enabled_ipa_modules variable
+# will itself prepend MESON_INSTALL_DESTDIR_PREFIX to each ipa module name,
+# therefore we must not include the prefix string here.
+foreach ipa_name : ipa_names
+ enabled_ipa_modules += ipa_install_dir / ipa_name + '.so'
+endforeach
+
+if ipa_sign_module
+ # Regenerate the signatures for all IPA modules. We can't simply install the
+ # .sign files, as meson strips the DT_RPATH and DT_RUNPATH from binaries at
+ # install time, which invalidates the signatures.
+ meson.add_install_script('ipa-sign-install.sh',
+ ipa_priv_key.full_path(),
+ enabled_ipa_modules,
+ install_tag : 'runtime')
+endif
diff --git a/src/ipa/rkisp1/algorithms/agc.cpp b/src/ipa/rkisp1/algorithms/agc.cpp
new file mode 100644
index 00000000..40e5a8f4
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/agc.cpp
@@ -0,0 +1,470 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * AGC/AEC mean-based control algorithm
+ */
+
+#include "agc.h"
+
+#include <algorithm>
+#include <chrono>
+#include <cmath>
+#include <tuple>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/ipa/core_ipa_interface.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "libipa/histogram.h"
+
+/**
+ * \file agc.h
+ */
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class Agc
+ * \brief A mean-based auto-exposure algorithm
+ */
+
+LOG_DEFINE_CATEGORY(RkISP1Agc)
+
+int Agc::parseMeteringModes(IPAContext &context, const YamlObject &tuningData)
+{
+ if (!tuningData.isDictionary())
+ LOG(RkISP1Agc, Warning)
+ << "'AeMeteringMode' parameter not found in tuning file";
+
+ for (const auto &[key, value] : tuningData.asDict()) {
+ if (controls::AeMeteringModeNameValueMap.find(key) ==
+ controls::AeMeteringModeNameValueMap.end()) {
+ LOG(RkISP1Agc, Warning)
+ << "Skipping unknown metering mode '" << key << "'";
+ continue;
+ }
+
+ std::vector<uint8_t> weights =
+ value.getList<uint8_t>().value_or(std::vector<uint8_t>{});
+ if (weights.size() != context.hw->numHistogramWeights) {
+ LOG(RkISP1Agc, Warning)
+ << "Failed to read metering mode'" << key << "'";
+ continue;
+ }
+
+ meteringModes_[controls::AeMeteringModeNameValueMap.at(key)] = weights;
+ }
+
+ if (meteringModes_.empty()) {
+ LOG(RkISP1Agc, Warning)
+ << "No metering modes read from tuning file; defaulting to matrix";
+ int32_t meteringModeId = controls::AeMeteringModeNameValueMap.at("MeteringMatrix");
+ std::vector<uint8_t> weights(context.hw->numHistogramWeights, 1);
+
+ meteringModes_[meteringModeId] = weights;
+ }
+
+ std::vector<ControlValue> meteringModes;
+ std::vector<int> meteringModeKeys = utils::map_keys(meteringModes_);
+ std::transform(meteringModeKeys.begin(), meteringModeKeys.end(),
+ std::back_inserter(meteringModes),
+ [](int x) { return ControlValue(x); });
+ context.ctrlMap[&controls::AeMeteringMode] = ControlInfo(meteringModes);
+
+ return 0;
+}
+
+uint8_t Agc::computeHistogramPredivider(const Size &size,
+ enum rkisp1_cif_isp_histogram_mode mode)
+{
+ /*
+ * The maximum number of pixels that could potentially be in one bin is
+ * if all the pixels of the image are in it, multiplied by 3 for the
+ * three color channels. The counter for each bin is 16 bits wide, so
+ * `factor` thus contains the number of times we'd wrap around. This is
+ * obviously the number of pixels that we need to skip to make sure
+ * that we don't wrap around, but we compute the square root of it
+ * instead, as the skip that we need to program is for both the x and y
+ * directions.
+ *
+ * Even though it looks like dividing into a counter of 65536 would
+ * overflow by 1, this is apparently fine according to the hardware
+ * documentation, and this successfully gets the expected documented
+ * predivider size for cases where:
+ * (width / predivider) * (height / predivider) * 3 == 65536.
+ *
+ * There's a bit of extra rounding math to make sure the rounding goes
+ * the correct direction so that the square of the step is big enough
+ * to encompass the `factor` number of pixels that we need to skip.
+ *
+ * \todo Take into account weights. That is, if the weights are low
+ * enough we can potentially reduce the predivider to increase
+ * precision. This needs some investigation however, as this hardware
+ * behavior is undocumented and is only an educated guess.
+ */
+ int count = mode == RKISP1_CIF_ISP_HISTOGRAM_MODE_RGB_COMBINED ? 3 : 1;
+ double factor = size.width * size.height * count / 65536.0;
+ double root = std::sqrt(factor);
+ uint8_t predivider = static_cast<uint8_t>(std::ceil(root));
+
+ return std::clamp<uint8_t>(predivider, 3, 127);
+}
+
+Agc::Agc()
+{
+ supportsRaw_ = true;
+}
+
+/**
+ * \brief Initialise the AGC algorithm from tuning files
+ * \param[in] context The shared IPA context
+ * \param[in] tuningData The YamlObject containing Agc tuning data
+ *
+ * This function calls the base class' tuningData parsers to discover which
+ * control values are supported.
+ *
+ * \return 0 on success or errors from the base class
+ */
+int Agc::init(IPAContext &context, const YamlObject &tuningData)
+{
+ int ret;
+
+ ret = parseTuningData(tuningData);
+ if (ret)
+ return ret;
+
+ const YamlObject &yamlMeteringModes = tuningData["AeMeteringMode"];
+ ret = parseMeteringModes(context, yamlMeteringModes);
+ if (ret)
+ return ret;
+
+ context.ctrlMap[&controls::AeEnable] = ControlInfo(false, true);
+ context.ctrlMap.merge(controls());
+
+ return 0;
+}
+
+/**
+ * \brief Configure the AGC given a configInfo
+ * \param[in] context The shared IPA context
+ * \param[in] configInfo The IPA configuration data
+ *
+ * \return 0
+ */
+int Agc::configure(IPAContext &context, const IPACameraSensorInfo &configInfo)
+{
+ /* Configure the default exposure and gain. */
+ context.activeState.agc.automatic.gain = context.configuration.sensor.minAnalogueGain;
+ context.activeState.agc.automatic.exposure =
+ 10ms / context.configuration.sensor.lineDuration;
+ context.activeState.agc.manual.gain = context.activeState.agc.automatic.gain;
+ context.activeState.agc.manual.exposure = context.activeState.agc.automatic.exposure;
+ context.activeState.agc.autoEnabled = !context.configuration.raw;
+
+ context.activeState.agc.constraintMode =
+ static_cast<controls::AeConstraintModeEnum>(constraintModes().begin()->first);
+ context.activeState.agc.exposureMode =
+ static_cast<controls::AeExposureModeEnum>(exposureModeHelpers().begin()->first);
+ context.activeState.agc.meteringMode =
+ static_cast<controls::AeMeteringModeEnum>(meteringModes_.begin()->first);
+
+ /*
+ * \todo This should probably come from FrameDurationLimits instead,
+ * except it's computed in the IPA and not here so we'd have to
+ * recompute it.
+ */
+ context.activeState.agc.maxFrameDuration = context.configuration.sensor.maxExposureTime;
+
+ /*
+ * Define the measurement window for AGC as a centered rectangle
+ * covering 3/4 of the image width and height.
+ */
+ context.configuration.agc.measureWindow.h_offs = configInfo.outputSize.width / 8;
+ context.configuration.agc.measureWindow.v_offs = configInfo.outputSize.height / 8;
+ context.configuration.agc.measureWindow.h_size = 3 * configInfo.outputSize.width / 4;
+ context.configuration.agc.measureWindow.v_size = 3 * configInfo.outputSize.height / 4;
+
+ setLimits(context.configuration.sensor.minExposureTime,
+ context.configuration.sensor.maxExposureTime,
+ context.configuration.sensor.minAnalogueGain,
+ context.configuration.sensor.maxAnalogueGain);
+
+ resetFrameCount();
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::queueRequest
+ */
+void Agc::queueRequest(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls)
+{
+ auto &agc = context.activeState.agc;
+
+ if (!context.configuration.raw) {
+ const auto &agcEnable = controls.get(controls::AeEnable);
+ if (agcEnable && *agcEnable != agc.autoEnabled) {
+ agc.autoEnabled = *agcEnable;
+
+ LOG(RkISP1Agc, Debug)
+ << (agc.autoEnabled ? "Enabling" : "Disabling")
+ << " AGC";
+ }
+ }
+
+ const auto &exposure = controls.get(controls::ExposureTime);
+ if (exposure && !agc.autoEnabled) {
+ agc.manual.exposure = *exposure * 1.0us
+ / context.configuration.sensor.lineDuration;
+
+ LOG(RkISP1Agc, Debug)
+ << "Set exposure to " << agc.manual.exposure;
+ }
+
+ const auto &gain = controls.get(controls::AnalogueGain);
+ if (gain && !agc.autoEnabled) {
+ agc.manual.gain = *gain;
+
+ LOG(RkISP1Agc, Debug) << "Set gain to " << agc.manual.gain;
+ }
+
+ frameContext.agc.autoEnabled = agc.autoEnabled;
+
+ if (!frameContext.agc.autoEnabled) {
+ frameContext.agc.exposure = agc.manual.exposure;
+ frameContext.agc.gain = agc.manual.gain;
+ }
+
+ const auto &meteringMode = controls.get(controls::AeMeteringMode);
+ if (meteringMode) {
+ frameContext.agc.updateMetering = agc.meteringMode != *meteringMode;
+ agc.meteringMode =
+ static_cast<controls::AeMeteringModeEnum>(*meteringMode);
+ }
+ frameContext.agc.meteringMode = agc.meteringMode;
+
+ const auto &exposureMode = controls.get(controls::AeExposureMode);
+ if (exposureMode)
+ agc.exposureMode =
+ static_cast<controls::AeExposureModeEnum>(*exposureMode);
+ frameContext.agc.exposureMode = agc.exposureMode;
+
+ const auto &constraintMode = controls.get(controls::AeConstraintMode);
+ if (constraintMode)
+ agc.constraintMode =
+ static_cast<controls::AeConstraintModeEnum>(*constraintMode);
+ frameContext.agc.constraintMode = agc.constraintMode;
+
+ const auto &frameDurationLimits = controls.get(controls::FrameDurationLimits);
+ if (frameDurationLimits) {
+ utils::Duration maxFrameDuration =
+ std::chrono::milliseconds((*frameDurationLimits).back());
+ agc.maxFrameDuration = maxFrameDuration;
+ }
+ frameContext.agc.maxFrameDuration = agc.maxFrameDuration;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void Agc::prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext, RkISP1Params *params)
+{
+ if (frameContext.agc.autoEnabled) {
+ frameContext.agc.exposure = context.activeState.agc.automatic.exposure;
+ frameContext.agc.gain = context.activeState.agc.automatic.gain;
+ }
+
+ if (frame > 0 && !frameContext.agc.updateMetering)
+ return;
+
+ /*
+ * Configure the AEC measurements. Set the window, measure
+ * continuously, and estimate Y as (R + G + B) x (85/256).
+ */
+ auto aecConfig = params->block<BlockType::Aec>();
+ aecConfig.setEnabled(true);
+
+ aecConfig->meas_window = context.configuration.agc.measureWindow;
+ aecConfig->autostop = RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0;
+ aecConfig->mode = RKISP1_CIF_ISP_EXP_MEASURING_MODE_1;
+
+ /*
+ * Configure the histogram measurement. Set the window, produce a
+ * luminance histogram, and set the weights and predivider.
+ */
+ auto hstConfig = params->block<BlockType::Hst>();
+ hstConfig.setEnabled(true);
+
+ hstConfig->meas_window = context.configuration.agc.measureWindow;
+ hstConfig->mode = RKISP1_CIF_ISP_HISTOGRAM_MODE_Y_HISTOGRAM;
+
+ Span<uint8_t> weights{
+ hstConfig->hist_weight,
+ context.hw->numHistogramWeights
+ };
+ std::vector<uint8_t> &modeWeights = meteringModes_.at(frameContext.agc.meteringMode);
+ std::copy(modeWeights.begin(), modeWeights.end(), weights.begin());
+
+ struct rkisp1_cif_isp_window window = hstConfig->meas_window;
+ Size windowSize = { window.h_size, window.v_size };
+ hstConfig->histogram_predivider =
+ computeHistogramPredivider(windowSize,
+ static_cast<rkisp1_cif_isp_histogram_mode>(hstConfig->mode));
+}
+
+void Agc::fillMetadata(IPAContext &context, IPAFrameContext &frameContext,
+ ControlList &metadata)
+{
+ utils::Duration exposureTime = context.configuration.sensor.lineDuration
+ * frameContext.sensor.exposure;
+ metadata.set(controls::AnalogueGain, frameContext.sensor.gain);
+ metadata.set(controls::ExposureTime, exposureTime.get<std::micro>());
+ metadata.set(controls::AeEnable, frameContext.agc.autoEnabled);
+
+ /* \todo Use VBlank value calculated from each frame exposure. */
+ uint32_t vTotal = context.configuration.sensor.size.height
+ + context.configuration.sensor.defVBlank;
+ utils::Duration frameDuration = context.configuration.sensor.lineDuration
+ * vTotal;
+ metadata.set(controls::FrameDuration, frameDuration.get<std::micro>());
+
+ metadata.set(controls::AeMeteringMode, frameContext.agc.meteringMode);
+ metadata.set(controls::AeExposureMode, frameContext.agc.exposureMode);
+ metadata.set(controls::AeConstraintMode, frameContext.agc.constraintMode);
+}
+
+/**
+ * \brief Estimate the relative luminance of the frame with a given gain
+ * \param[in] gain The gain to apply to the frame
+ *
+ * This function estimates the average relative luminance of the frame that
+ * would be output by the sensor if an additional \a gain was applied.
+ *
+ * The estimation is based on the AE statistics for the current frame. Y
+ * averages for all cells are first multiplied by the gain, and then saturated
+ * to approximate the sensor behaviour at high brightness values. The
+ * approximation is quite rough, as it doesn't take into account non-linearities
+ * when approaching saturation. In this case, saturating after the conversion to
+ * YUV doesn't take into account the fact that the R, G and B components
+ * contribute differently to the relative luminance.
+ *
+ * The values are normalized to the [0.0, 1.0] range, where 1.0 corresponds to a
+ * theoretical perfect reflector of 100% reference white.
+ *
+ * More detailed information can be found in:
+ * https://en.wikipedia.org/wiki/Relative_luminance
+ *
+ * \return The relative luminance
+ */
+double Agc::estimateLuminance(double gain) const
+{
+ double ySum = 0.0;
+
+ /* Sum the averages, saturated to 255. */
+ for (uint8_t expMean : expMeans_)
+ ySum += std::min(expMean * gain, 255.0);
+
+ /* \todo Weight with the AWB gains */
+
+ return ySum / expMeans_.size() / 255;
+}
+
+/**
+ * \brief Process RkISP1 statistics, and run AGC operations
+ * \param[in] context The shared IPA context
+ * \param[in] frame The frame context sequence number
+ * \param[in] frameContext The current frame context
+ * \param[in] stats The RKISP1 statistics and ISP results
+ * \param[out] metadata Metadata for the frame, to be filled by the algorithm
+ *
+ * Identify the current image brightness, and use that to estimate the optimal
+ * new exposure and gain for the scene.
+ */
+void Agc::process(IPAContext &context, [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext, const rkisp1_stat_buffer *stats,
+ ControlList &metadata)
+{
+ if (!stats) {
+ fillMetadata(context, frameContext, metadata);
+ return;
+ }
+
+ if (!(stats->meas_type & RKISP1_CIF_ISP_STAT_AUTOEXP)) {
+ fillMetadata(context, frameContext, metadata);
+ LOG(RkISP1Agc, Error) << "AUTOEXP data is missing in statistics";
+ return;
+ }
+
+ /*
+ * \todo Verify that the exposure and gain applied by the sensor for
+ * this frame match what has been requested. This isn't a hard
+ * requirement for stability of the AGC (the guarantee we need in
+ * automatic mode is a perfect match between the frame and the values
+ * we receive), but is important in manual mode.
+ */
+
+ const rkisp1_cif_isp_stat *params = &stats->params;
+
+ /* The lower 4 bits are fractional and meant to be discarded. */
+ Histogram hist({ params->hist.hist_bins, context.hw->numHistogramBins },
+ [](uint32_t x) { return x >> 4; });
+ expMeans_ = { params->ae.exp_mean, context.hw->numAeCells };
+
+ utils::Duration maxExposureTime =
+ std::clamp(frameContext.agc.maxFrameDuration,
+ context.configuration.sensor.minExposureTime,
+ context.configuration.sensor.maxExposureTime);
+ setLimits(context.configuration.sensor.minExposureTime,
+ maxExposureTime,
+ context.configuration.sensor.minAnalogueGain,
+ context.configuration.sensor.maxAnalogueGain);
+
+ /*
+ * The Agc algorithm needs to know the effective exposure value that was
+ * applied to the sensor when the statistics were collected.
+ */
+ utils::Duration exposureTime = context.configuration.sensor.lineDuration
+ * frameContext.sensor.exposure;
+ double analogueGain = frameContext.sensor.gain;
+ utils::Duration effectiveExposureValue = exposureTime * analogueGain;
+
+ utils::Duration newExposureTime;
+ double aGain, dGain;
+ std::tie(newExposureTime, aGain, dGain) =
+ calculateNewEv(frameContext.agc.constraintMode,
+ frameContext.agc.exposureMode,
+ hist, effectiveExposureValue);
+
+ LOG(RkISP1Agc, Debug)
+ << "Divided up exposure time, analogue gain and digital gain are "
+ << newExposureTime << ", " << aGain << " and " << dGain;
+
+ IPAActiveState &activeState = context.activeState;
+ /* Update the estimated exposure and gain. */
+ activeState.agc.automatic.exposure = newExposureTime
+ / context.configuration.sensor.lineDuration;
+ activeState.agc.automatic.gain = aGain;
+
+ fillMetadata(context, frameContext, metadata);
+ expMeans_ = {};
+}
+
+REGISTER_IPA_ALGORITHM(Agc, "Agc")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/agc.h b/src/ipa/rkisp1/algorithms/agc.h
new file mode 100644
index 00000000..aa86f2c5
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/agc.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 AGC/AEC mean-based control algorithm
+ */
+
+#pragma once
+
+#include <linux/rkisp1-config.h>
+
+#include <libcamera/base/span.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/geometry.h>
+
+#include "libipa/agc_mean_luminance.h"
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class Agc : public Algorithm, public AgcMeanLuminance
+{
+public:
+ Agc();
+ ~Agc() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ int configure(IPAContext &context, const IPACameraSensorInfo &configInfo) override;
+ void queueRequest(IPAContext &context,
+ const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ RkISP1Params *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata) override;
+
+private:
+ int parseMeteringModes(IPAContext &context, const YamlObject &tuningData);
+ uint8_t computeHistogramPredivider(const Size &size,
+ enum rkisp1_cif_isp_histogram_mode mode);
+
+ void fillMetadata(IPAContext &context, IPAFrameContext &frameContext,
+ ControlList &metadata);
+ double estimateLuminance(double gain) const override;
+
+ Span<const uint8_t> expMeans_;
+
+ std::map<int32_t, std::vector<uint8_t>> meteringModes_;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/algorithm.h b/src/ipa/rkisp1/algorithms/algorithm.h
new file mode 100644
index 00000000..715cfcd8
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/algorithm.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Ideas On Board
+ *
+ * RkISP1 control algorithm interface
+ */
+
+#pragma once
+
+#include <libipa/algorithm.h>
+
+#include "module.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1 {
+
+class Algorithm : public libcamera::ipa::Algorithm<Module>
+{
+public:
+ Algorithm()
+ : disabled_(false), supportsRaw_(false)
+ {
+ }
+
+ bool disabled_;
+ bool supportsRaw_;
+};
+
+} /* namespace ipa::rkisp1 */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/awb.cpp b/src/ipa/rkisp1/algorithms/awb.cpp
new file mode 100644
index 00000000..cffaa06a
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/awb.cpp
@@ -0,0 +1,350 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * AWB control algorithm
+ */
+
+#include "awb.h"
+
+#include <algorithm>
+#include <ios>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+
+#include "libipa/colours.h"
+
+/**
+ * \file awb.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class Awb
+ * \brief A Grey world white balance correction algorithm
+ */
+
+LOG_DEFINE_CATEGORY(RkISP1Awb)
+
+constexpr int32_t kMinColourTemperature = 2500;
+constexpr int32_t kMaxColourTemperature = 10000;
+constexpr int32_t kDefaultColourTemperature = 5000;
+
+/* Minimum mean value below which AWB can't operate. */
+constexpr double kMeanMinThreshold = 2.0;
+
+Awb::Awb()
+ : rgbMode_(false)
+{
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int Awb::init(IPAContext &context, const YamlObject &tuningData)
+{
+ auto &cmap = context.ctrlMap;
+ cmap[&controls::ColourTemperature] = ControlInfo(kMinColourTemperature,
+ kMaxColourTemperature,
+ kDefaultColourTemperature);
+
+ Interpolator<Vector<double, 2>> gainCurve;
+ int ret = gainCurve.readYaml(tuningData["colourGains"], "ct", "gains");
+ if (ret < 0)
+ LOG(RkISP1Awb, Warning)
+ << "Failed to parse 'colourGains' "
+ << "parameter from tuning file; "
+ << "manual colour temperature will not work properly";
+ else
+ colourGainCurve_ = gainCurve;
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::configure
+ */
+int Awb::configure(IPAContext &context,
+ const IPACameraSensorInfo &configInfo)
+{
+ context.activeState.awb.gains.manual = RGB<double>{ 1.0 };
+ context.activeState.awb.gains.automatic = RGB<double>{ 1.0 };
+ context.activeState.awb.autoEnabled = true;
+ context.activeState.awb.temperatureK = kDefaultColourTemperature;
+
+ /*
+ * Define the measurement window for AWB as a centered rectangle
+ * covering 3/4 of the image width and height.
+ */
+ context.configuration.awb.measureWindow.h_offs = configInfo.outputSize.width / 8;
+ context.configuration.awb.measureWindow.v_offs = configInfo.outputSize.height / 8;
+ context.configuration.awb.measureWindow.h_size = 3 * configInfo.outputSize.width / 4;
+ context.configuration.awb.measureWindow.v_size = 3 * configInfo.outputSize.height / 4;
+
+ context.configuration.awb.enabled = true;
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::queueRequest
+ */
+void Awb::queueRequest(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls)
+{
+ auto &awb = context.activeState.awb;
+
+ const auto &awbEnable = controls.get(controls::AwbEnable);
+ if (awbEnable && *awbEnable != awb.autoEnabled) {
+ awb.autoEnabled = *awbEnable;
+
+ LOG(RkISP1Awb, Debug)
+ << (*awbEnable ? "Enabling" : "Disabling") << " AWB";
+ }
+
+ frameContext.awb.autoEnabled = awb.autoEnabled;
+
+ if (awb.autoEnabled)
+ return;
+
+ const auto &colourGains = controls.get(controls::ColourGains);
+ const auto &colourTemperature = controls.get(controls::ColourTemperature);
+ bool update = false;
+ if (colourGains) {
+ awb.gains.manual.r() = (*colourGains)[0];
+ awb.gains.manual.b() = (*colourGains)[1];
+ /*
+ * \todo: Colour temperature reported in metadata is now
+ * incorrect, as we can't deduce the temperature from the gains.
+ * This will be fixed with the bayes AWB algorithm.
+ */
+ update = true;
+ } else if (colourTemperature && colourGainCurve_) {
+ const auto &gains = colourGainCurve_->getInterpolated(*colourTemperature);
+ awb.gains.manual.r() = gains[0];
+ awb.gains.manual.b() = gains[1];
+ awb.temperatureK = *colourTemperature;
+ update = true;
+ }
+
+ if (update)
+ LOG(RkISP1Awb, Debug)
+ << "Set colour gains to " << awb.gains.manual;
+
+ frameContext.awb.gains = awb.gains.manual;
+ frameContext.awb.temperatureK = awb.temperatureK;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void Awb::prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext, RkISP1Params *params)
+{
+ /*
+ * This is the latest time we can read the active state. This is the
+ * most up-to-date automatic values we can read.
+ */
+ if (frameContext.awb.autoEnabled) {
+ frameContext.awb.gains = context.activeState.awb.gains.automatic;
+ frameContext.awb.temperatureK = context.activeState.awb.temperatureK;
+ }
+
+ auto gainConfig = params->block<BlockType::AwbGain>();
+ gainConfig.setEnabled(true);
+
+ gainConfig->gain_green_b = std::clamp<int>(256 * frameContext.awb.gains.g(), 0, 0x3ff);
+ gainConfig->gain_blue = std::clamp<int>(256 * frameContext.awb.gains.b(), 0, 0x3ff);
+ gainConfig->gain_red = std::clamp<int>(256 * frameContext.awb.gains.r(), 0, 0x3ff);
+ gainConfig->gain_green_r = std::clamp<int>(256 * frameContext.awb.gains.g(), 0, 0x3ff);
+
+ /* If we have already set the AWB measurement parameters, return. */
+ if (frame > 0)
+ return;
+
+ auto awbConfig = params->block<BlockType::Awb>();
+ awbConfig.setEnabled(true);
+
+ /* Configure the measure window for AWB. */
+ awbConfig->awb_wnd = context.configuration.awb.measureWindow;
+
+ /* Number of frames to use to estimate the means (0 means 1 frame). */
+ awbConfig->frames = 0;
+
+ /* Select RGB or YCbCr means measurement. */
+ if (rgbMode_) {
+ awbConfig->awb_mode = RKISP1_CIF_ISP_AWB_MODE_RGB;
+
+ /*
+ * For RGB-based measurements, pixels are selected with maximum
+ * red, green and blue thresholds that are set in the
+ * awb_ref_cr, awb_min_y and awb_ref_cb respectively. The other
+ * values are not used, set them to 0.
+ */
+ awbConfig->awb_ref_cr = 250;
+ awbConfig->min_y = 250;
+ awbConfig->awb_ref_cb = 250;
+
+ awbConfig->max_y = 0;
+ awbConfig->min_c = 0;
+ awbConfig->max_csum = 0;
+ } else {
+ awbConfig->awb_mode = RKISP1_CIF_ISP_AWB_MODE_YCBCR;
+
+ /* Set the reference Cr and Cb (AWB target) to white. */
+ awbConfig->awb_ref_cb = 128;
+ awbConfig->awb_ref_cr = 128;
+
+ /*
+ * Filter out pixels based on luminance and chrominance values.
+ * The acceptable luma values are specified as a [16, 250]
+ * range, while the acceptable chroma values are specified with
+ * a minimum of 16 and a maximum Cb+Cr sum of 250.
+ */
+ awbConfig->min_y = 16;
+ awbConfig->max_y = 250;
+ awbConfig->min_c = 16;
+ awbConfig->max_csum = 250;
+ }
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::process
+ */
+void Awb::process(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata)
+{
+ const rkisp1_cif_isp_stat *params = &stats->params;
+ const rkisp1_cif_isp_awb_stat *awb = &params->awb;
+ IPAActiveState &activeState = context.activeState;
+ RGB<double> rgbMeans;
+
+ metadata.set(controls::AwbEnable, frameContext.awb.autoEnabled);
+ metadata.set(controls::ColourGains, {
+ static_cast<float>(frameContext.awb.gains.r()),
+ static_cast<float>(frameContext.awb.gains.b())
+ });
+ metadata.set(controls::ColourTemperature, frameContext.awb.temperatureK);
+
+ if (!stats || !(stats->meas_type & RKISP1_CIF_ISP_STAT_AWB)) {
+ LOG(RkISP1Awb, Error) << "AWB data is missing in statistics";
+ return;
+ }
+
+ if (rgbMode_) {
+ rgbMeans = {{
+ static_cast<double>(awb->awb_mean[0].mean_y_or_g),
+ static_cast<double>(awb->awb_mean[0].mean_cr_or_r),
+ static_cast<double>(awb->awb_mean[0].mean_cb_or_b)
+ }};
+ } else {
+ /* Get the YCbCr mean values */
+ Vector<double, 3> yuvMeans({
+ static_cast<double>(awb->awb_mean[0].mean_y_or_g),
+ static_cast<double>(awb->awb_mean[0].mean_cb_or_b),
+ static_cast<double>(awb->awb_mean[0].mean_cr_or_r)
+ });
+
+ /*
+ * Convert from YCbCr to RGB. The hardware uses the following
+ * formulas:
+ *
+ * Y = 16 + 0.2500 R + 0.5000 G + 0.1094 B
+ * Cb = 128 - 0.1406 R - 0.2969 G + 0.4375 B
+ * Cr = 128 + 0.4375 R - 0.3750 G - 0.0625 B
+ *
+ * This seems to be based on limited range BT.601 with Q1.6
+ * precision.
+ *
+ * The inverse matrix is:
+ *
+ * [[1,1636, -0,0623, 1,6008]
+ * [1,1636, -0,4045, -0,7949]
+ * [1,1636, 1,9912, -0,0250]]
+ */
+ static const Matrix<double, 3, 3> yuv2rgbMatrix({
+ 1.1636, -0.0623, 1.6008,
+ 1.1636, -0.4045, -0.7949,
+ 1.1636, 1.9912, -0.0250
+ });
+ static const Vector<double, 3> yuv2rgbOffset({
+ 16, 128, 128
+ });
+
+ rgbMeans = yuv2rgbMatrix * (yuvMeans - yuv2rgbOffset);
+
+ /*
+ * Due to hardware rounding errors in the YCbCr means, the
+ * calculated RGB means may be negative. This would lead to
+ * negative gains, messing up calculation. Prevent this by
+ * clamping the means to positive values.
+ */
+ rgbMeans = rgbMeans.max(0.0);
+ }
+
+ /*
+ * The ISP computes the AWB means after applying the colour gains,
+ * divide by the gains that were used to get the raw means from the
+ * sensor.
+ */
+ rgbMeans /= frameContext.awb.gains;
+
+ /*
+ * If the means are too small we don't have enough information to
+ * meaningfully calculate gains. Freeze the algorithm in that case.
+ */
+ if (rgbMeans.r() < kMeanMinThreshold && rgbMeans.g() < kMeanMinThreshold &&
+ rgbMeans.b() < kMeanMinThreshold)
+ return;
+
+ activeState.awb.temperatureK = estimateCCT(rgbMeans);
+
+ /*
+ * Estimate the red and blue gains to apply in a grey world. The green
+ * gain is hardcoded to 1.0. Avoid divisions by zero by clamping the
+ * divisor to a minimum value of 1.0.
+ */
+ RGB<double> gains({
+ rgbMeans.g() / std::max(rgbMeans.r(), 1.0),
+ 1.0,
+ rgbMeans.g() / std::max(rgbMeans.b(), 1.0)
+ });
+
+ /*
+ * Clamp the gain values to the hardware, which expresses gains as Q2.8
+ * unsigned integer values. Set the minimum just above zero to avoid
+ * divisions by zero when computing the raw means in subsequent
+ * iterations.
+ */
+ gains = gains.max(1.0 / 256).min(1023.0 / 256);
+
+ /* Filter the values to avoid oscillations. */
+ double speed = 0.2;
+ gains = gains * speed + activeState.awb.gains.automatic * (1 - speed);
+
+ activeState.awb.gains.automatic = gains;
+
+ LOG(RkISP1Awb, Debug)
+ << std::showpoint
+ << "Means " << rgbMeans << ", gains "
+ << activeState.awb.gains.automatic << ", temp "
+ << activeState.awb.temperatureK << "K";
+}
+
+REGISTER_IPA_ALGORITHM(Awb, "Awb")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/awb.h b/src/ipa/rkisp1/algorithms/awb.h
new file mode 100644
index 00000000..e4248048
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/awb.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * AWB control algorithm
+ */
+
+#pragma once
+
+#include <optional>
+
+#include "libipa/interpolator.h"
+#include "libipa/vector.h"
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class Awb : public Algorithm
+{
+public:
+ Awb();
+ ~Awb() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ int configure(IPAContext &context, const IPACameraSensorInfo &configInfo) override;
+ void queueRequest(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ RkISP1Params *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata) override;
+
+private:
+ std::optional<Interpolator<Vector<double, 2>>> colourGainCurve_;
+ bool rgbMode_;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/blc.cpp b/src/ipa/rkisp1/algorithms/blc.cpp
new file mode 100644
index 00000000..98cb7145
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/blc.cpp
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Black Level Correction control
+ */
+
+#include "blc.h"
+
+#include <linux/videodev2.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+/**
+ * \file blc.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class BlackLevelCorrection
+ * \brief RkISP1 Black Level Correction control
+ *
+ * The pixels output by the camera normally include a black level, because
+ * sensors do not always report a signal level of '0' for black. Pixels at or
+ * below this level should be considered black. To achieve that, the RkISP BLC
+ * algorithm subtracts a configurable offset from all pixels.
+ *
+ * The black level can be measured at runtime from an optical dark region of the
+ * camera sensor, or measured during the camera tuning process. The first option
+ * isn't currently supported.
+ */
+
+LOG_DEFINE_CATEGORY(RkISP1Blc)
+
+BlackLevelCorrection::BlackLevelCorrection()
+{
+ /*
+ * This is a bit of a hack. In raw mode no black level correction
+ * happens. This flag is used to ensure the metadata gets populated with
+ * the black level which is needed to capture proper raw images for
+ * tuning.
+ */
+ supportsRaw_ = true;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int BlackLevelCorrection::init(IPAContext &context, const YamlObject &tuningData)
+{
+ std::optional<int16_t> levelRed = tuningData["R"].get<int16_t>();
+ std::optional<int16_t> levelGreenR = tuningData["Gr"].get<int16_t>();
+ std::optional<int16_t> levelGreenB = tuningData["Gb"].get<int16_t>();
+ std::optional<int16_t> levelBlue = tuningData["B"].get<int16_t>();
+ bool tuningHasLevels = levelRed && levelGreenR && levelGreenB && levelBlue;
+
+ auto blackLevel = context.camHelper->blackLevel();
+ if (!blackLevel) {
+ /*
+ * Not all camera sensor helpers have been updated with black
+ * levels. Print a warning and fall back to the levels from the
+ * tuning data to preserve backward compatibility. This should
+ * be removed once all helpers provide the data.
+ */
+ LOG(RkISP1Blc, Warning)
+ << "No black levels provided by camera sensor helper"
+ << ", please fix";
+
+ blackLevelRed_ = levelRed.value_or(4096);
+ blackLevelGreenR_ = levelGreenR.value_or(4096);
+ blackLevelGreenB_ = levelGreenB.value_or(4096);
+ blackLevelBlue_ = levelBlue.value_or(4096);
+ } else if (tuningHasLevels) {
+ /*
+ * If black levels are provided in the tuning file, use them to
+ * avoid breaking existing camera tuning. This is deprecated and
+ * will be removed.
+ */
+ LOG(RkISP1Blc, Warning)
+ << "Deprecated: black levels overwritten by tuning file";
+
+ blackLevelRed_ = *levelRed;
+ blackLevelGreenR_ = *levelGreenR;
+ blackLevelGreenB_ = *levelGreenB;
+ blackLevelBlue_ = *levelBlue;
+ } else {
+ blackLevelRed_ = *blackLevel;
+ blackLevelGreenR_ = *blackLevel;
+ blackLevelGreenB_ = *blackLevel;
+ blackLevelBlue_ = *blackLevel;
+ }
+
+ LOG(RkISP1Blc, Debug)
+ << "Black levels: red " << blackLevelRed_
+ << ", green (red) " << blackLevelGreenR_
+ << ", green (blue) " << blackLevelGreenB_
+ << ", blue " << blackLevelBlue_;
+
+ return 0;
+}
+
+int BlackLevelCorrection::configure(IPAContext &context,
+ [[maybe_unused]] const IPACameraSensorInfo &configInfo)
+{
+ /*
+ * BLC on ISP versions that include the companding block requires usage
+ * of the extensible parameters format.
+ */
+ supported_ = context.configuration.paramFormat == V4L2_META_FMT_RK_ISP1_EXT_PARAMS ||
+ !context.hw->compand;
+
+ if (!supported_)
+ LOG(RkISP1Blc, Warning)
+ << "BLC in companding block requires extensible parameters";
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void BlackLevelCorrection::prepare(IPAContext &context,
+ const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ RkISP1Params *params)
+{
+ if (context.configuration.raw)
+ return;
+
+ if (frame > 0)
+ return;
+
+ if (!supported_)
+ return;
+
+ if (context.hw->compand) {
+ auto config = params->block<BlockType::CompandBls>();
+ config.setEnabled(true);
+
+ /*
+ * Scale up to the 20-bit black levels used by the companding
+ * block.
+ */
+ config->r = blackLevelRed_ << 4;
+ config->gr = blackLevelGreenR_ << 4;
+ config->gb = blackLevelGreenB_ << 4;
+ config->b = blackLevelBlue_ << 4;
+ } else {
+ auto config = params->block<BlockType::Bls>();
+ config.setEnabled(true);
+
+ config->enable_auto = 0;
+
+ /* Scale down to the 12-bit black levels used by the BLS block. */
+ config->fixed_val.r = blackLevelRed_ >> 4;
+ config->fixed_val.gr = blackLevelGreenR_ >> 4;
+ config->fixed_val.gb = blackLevelGreenB_ >> 4;
+ config->fixed_val.b = blackLevelBlue_ >> 4;
+ }
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::process
+ */
+void BlackLevelCorrection::process([[maybe_unused]] IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ [[maybe_unused]] const rkisp1_stat_buffer *stats,
+ ControlList &metadata)
+{
+ metadata.set(controls::SensorBlackLevels,
+ { static_cast<int32_t>(blackLevelRed_),
+ static_cast<int32_t>(blackLevelGreenR_),
+ static_cast<int32_t>(blackLevelGreenB_),
+ static_cast<int32_t>(blackLevelBlue_) });
+}
+
+REGISTER_IPA_ALGORITHM(BlackLevelCorrection, "BlackLevelCorrection")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/blc.h b/src/ipa/rkisp1/algorithms/blc.h
new file mode 100644
index 00000000..f797ae44
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/blc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Black Level Correction control
+ */
+
+#pragma once
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class BlackLevelCorrection : public Algorithm
+{
+public:
+ BlackLevelCorrection();
+ ~BlackLevelCorrection() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ int configure(IPAContext &context,
+ const IPACameraSensorInfo &configInfo) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ RkISP1Params *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata) override;
+
+private:
+ bool supported_;
+
+ int16_t blackLevelRed_;
+ int16_t blackLevelGreenR_;
+ int16_t blackLevelGreenB_;
+ int16_t blackLevelBlue_;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/ccm.cpp b/src/ipa/rkisp1/algorithms/ccm.cpp
new file mode 100644
index 00000000..e2b5cf4d
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/ccm.cpp
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * RkISP1 Color Correction Matrix control algorithm
+ */
+
+#include "ccm.h"
+
+#include <map>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/control_ids.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "libipa/fixedpoint.h"
+#include "libipa/interpolator.h"
+
+/**
+ * \file ccm.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class Ccm
+ * \brief A color correction matrix algorithm
+ */
+
+LOG_DEFINE_CATEGORY(RkISP1Ccm)
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int Ccm::init([[maybe_unused]] IPAContext &context, const YamlObject &tuningData)
+{
+ int ret = ccm_.readYaml(tuningData["ccms"], "ct", "ccm");
+ if (ret < 0) {
+ LOG(RkISP1Ccm, Warning)
+ << "Failed to parse 'ccm' "
+ << "parameter from tuning file; falling back to unit matrix";
+ ccm_.setData({ { 0, Matrix<float, 3, 3>::identity() } });
+ }
+
+ ret = offsets_.readYaml(tuningData["ccms"], "ct", "offsets");
+ if (ret < 0) {
+ LOG(RkISP1Ccm, Warning)
+ << "Failed to parse 'offsets' "
+ << "parameter from tuning file; falling back to zero offsets";
+
+ offsets_.setData({ { 0, Matrix<int16_t, 3, 1>({ 0, 0, 0 }) } });
+ }
+
+ return 0;
+}
+
+void Ccm::setParameters(struct rkisp1_cif_isp_ctk_config &config,
+ const Matrix<float, 3, 3> &matrix,
+ const Matrix<int16_t, 3, 1> &offsets)
+{
+ /*
+ * 4 bit integer and 7 bit fractional, ranging from -8 (0x400) to
+ * +7.992 (0x3ff)
+ */
+ for (unsigned int i = 0; i < 3; i++) {
+ for (unsigned int j = 0; j < 3; j++)
+ config.coeff[i][j] =
+ floatingToFixedPoint<4, 7, uint16_t, double>(matrix[i][j]);
+ }
+
+ for (unsigned int i = 0; i < 3; i++)
+ config.ct_offset[i] = offsets[i][0] & 0xfff;
+
+ LOG(RkISP1Ccm, Debug) << "Setting matrix " << matrix;
+ LOG(RkISP1Ccm, Debug) << "Setting offsets " << offsets;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void Ccm::prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext, RkISP1Params *params)
+{
+ uint32_t ct = context.activeState.awb.temperatureK;
+
+ /*
+ * \todo The colour temperature will likely be noisy, add filtering to
+ * avoid updating the CCM matrix all the time.
+ */
+ if (frame > 0 && ct == ct_) {
+ frameContext.ccm.ccm = context.activeState.ccm.ccm;
+ return;
+ }
+
+ ct_ = ct;
+ Matrix<float, 3, 3> ccm = ccm_.getInterpolated(ct);
+ Matrix<int16_t, 3, 1> offsets = offsets_.getInterpolated(ct);
+
+ context.activeState.ccm.ccm = ccm;
+ frameContext.ccm.ccm = ccm;
+
+ auto config = params->block<BlockType::Ctk>();
+ config.setEnabled(true);
+ setParameters(*config, ccm, offsets);
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::process
+ */
+void Ccm::process([[maybe_unused]] IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ [[maybe_unused]] const rkisp1_stat_buffer *stats,
+ ControlList &metadata)
+{
+ float m[9];
+ for (unsigned int i = 0; i < 3; i++) {
+ for (unsigned int j = 0; j < 3; j++)
+ m[i * 3 + j] = frameContext.ccm.ccm[i][j];
+ }
+ metadata.set(controls::ColourCorrectionMatrix, m);
+}
+
+REGISTER_IPA_ALGORITHM(Ccm, "Ccm")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/ccm.h b/src/ipa/rkisp1/algorithms/ccm.h
new file mode 100644
index 00000000..a5d9a9a4
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/ccm.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * RkISP1 Color Correction Matrix control algorithm
+ */
+
+#pragma once
+
+#include <linux/rkisp1-config.h>
+
+#include "libcamera/internal/matrix.h"
+
+#include "libipa/interpolator.h"
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class Ccm : public Algorithm
+{
+public:
+ Ccm() {}
+ ~Ccm() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ RkISP1Params *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata) override;
+
+private:
+ void parseYaml(const YamlObject &tuningData);
+ void setParameters(struct rkisp1_cif_isp_ctk_config &config,
+ const Matrix<float, 3, 3> &matrix,
+ const Matrix<int16_t, 3, 1> &offsets);
+
+ unsigned int ct_;
+ Interpolator<Matrix<float, 3, 3>> ccm_;
+ Interpolator<Matrix<int16_t, 3, 1>> offsets_;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/cproc.cpp b/src/ipa/rkisp1/algorithms/cproc.cpp
new file mode 100644
index 00000000..d1fff699
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/cproc.cpp
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Color Processing control
+ */
+
+#include "cproc.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+
+/**
+ * \file cproc.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class ColorProcessing
+ * \brief RkISP1 Color Processing control
+ *
+ * The ColorProcessing algorithm is responsible for applying brightness,
+ * contrast and saturation corrections. The values are directly provided
+ * through requests by the corresponding controls.
+ */
+
+LOG_DEFINE_CATEGORY(RkISP1CProc)
+
+namespace {
+
+constexpr float kDefaultBrightness = 0.0f;
+constexpr float kDefaultContrast = 1.0f;
+constexpr float kDefaultSaturation = 1.0f;
+
+int convertBrightness(const float v)
+{
+ return std::clamp<int>(std::lround(v * 128), -128, 127);
+}
+
+int convertContrastOrSaturation(const float v)
+{
+ return std::clamp<int>(std::lround(v * 128), 0, 255);
+}
+
+} /* namespace */
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int ColorProcessing::init(IPAContext &context,
+ [[maybe_unused]] const YamlObject &tuningData)
+{
+ auto &cmap = context.ctrlMap;
+
+ cmap[&controls::Brightness] = ControlInfo(-1.0f, 0.993f, kDefaultBrightness);
+ cmap[&controls::Contrast] = ControlInfo(0.0f, 1.993f, kDefaultContrast);
+ cmap[&controls::Saturation] = ControlInfo(0.0f, 1.993f, kDefaultSaturation);
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::configure
+ */
+int ColorProcessing::configure(IPAContext &context,
+ [[maybe_unused]] const IPACameraSensorInfo &configInfo)
+{
+ auto &cproc = context.activeState.cproc;
+
+ cproc.brightness = convertBrightness(kDefaultBrightness);
+ cproc.contrast = convertContrastOrSaturation(kDefaultContrast);
+ cproc.saturation = convertContrastOrSaturation(kDefaultSaturation);
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::queueRequest
+ */
+void ColorProcessing::queueRequest(IPAContext &context,
+ const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls)
+{
+ auto &cproc = context.activeState.cproc;
+ bool update = false;
+
+ if (frame == 0)
+ update = true;
+
+ const auto &brightness = controls.get(controls::Brightness);
+ if (brightness) {
+ int value = convertBrightness(*brightness);
+ if (cproc.brightness != value) {
+ cproc.brightness = value;
+ update = true;
+ }
+
+ LOG(RkISP1CProc, Debug) << "Set brightness to " << value;
+ }
+
+ const auto &contrast = controls.get(controls::Contrast);
+ if (contrast) {
+ int value = convertContrastOrSaturation(*contrast);
+ if (cproc.contrast != value) {
+ cproc.contrast = value;
+ update = true;
+ }
+
+ LOG(RkISP1CProc, Debug) << "Set contrast to " << value;
+ }
+
+ const auto saturation = controls.get(controls::Saturation);
+ if (saturation) {
+ int value = convertContrastOrSaturation(*saturation);
+ if (cproc.saturation != value) {
+ cproc.saturation = value;
+ update = true;
+ }
+
+ LOG(RkISP1CProc, Debug) << "Set saturation to " << value;
+ }
+
+ frameContext.cproc.brightness = cproc.brightness;
+ frameContext.cproc.contrast = cproc.contrast;
+ frameContext.cproc.saturation = cproc.saturation;
+ frameContext.cproc.update = update;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void ColorProcessing::prepare([[maybe_unused]] IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ RkISP1Params *params)
+{
+ /* Check if the algorithm configuration has been updated. */
+ if (!frameContext.cproc.update)
+ return;
+
+ auto config = params->block<BlockType::Cproc>();
+ config.setEnabled(true);
+ config->brightness = frameContext.cproc.brightness;
+ config->contrast = frameContext.cproc.contrast;
+ config->sat = frameContext.cproc.saturation;
+}
+
+REGISTER_IPA_ALGORITHM(ColorProcessing, "ColorProcessing")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/cproc.h b/src/ipa/rkisp1/algorithms/cproc.h
new file mode 100644
index 00000000..fd38fd17
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/cproc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Color Processing control
+ */
+
+#pragma once
+
+#include <sys/types.h>
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class ColorProcessing : public Algorithm
+{
+public:
+ ColorProcessing() = default;
+ ~ColorProcessing() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ int configure(IPAContext &context,
+ const IPACameraSensorInfo &configInfo) override;
+ void queueRequest(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ RkISP1Params *params) override;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/dpcc.cpp b/src/ipa/rkisp1/algorithms/dpcc.cpp
new file mode 100644
index 00000000..78946281
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/dpcc.cpp
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Defect Pixel Cluster Correction control
+ */
+
+#include "dpcc.h"
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "linux/rkisp1-config.h"
+
+/**
+ * \file dpcc.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class DefectPixelClusterCorrection
+ * \brief RkISP1 Defect Pixel Cluster Correction control
+ *
+ * Depending of the sensor quality, some pixels can be defective and then
+ * appear significantly brighter or darker than the other pixels.
+ *
+ * The Defect Pixel Cluster Correction algorithms is responsible to minimize
+ * the impact of the pixels. This can be done with algorithms applied at run
+ * time (on-the-fly method) or with a table of defective pixels. Only the first
+ * method is supported for the moment.
+ */
+
+LOG_DEFINE_CATEGORY(RkISP1Dpcc)
+
+DefectPixelClusterCorrection::DefectPixelClusterCorrection()
+ : config_({})
+{
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int DefectPixelClusterCorrection::init([[maybe_unused]] IPAContext &context,
+ const YamlObject &tuningData)
+{
+ config_.mode = RKISP1_CIF_ISP_DPCC_MODE_STAGE1_ENABLE;
+ config_.output_mode = RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_G_CENTER
+ | RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_RB_CENTER;
+
+ config_.set_use = tuningData["fixed-set"].get<bool>(false)
+ ? RKISP1_CIF_ISP_DPCC_SET_USE_STAGE1_USE_FIX_SET : 0;
+
+ /* Get all defined sets to apply (up to 3). */
+ const YamlObject &setsObject = tuningData["sets"];
+ if (!setsObject.isList()) {
+ LOG(RkISP1Dpcc, Error)
+ << "'sets' parameter not found in tuning file";
+ return -EINVAL;
+ }
+
+ if (setsObject.size() > RKISP1_CIF_ISP_DPCC_METHODS_MAX) {
+ LOG(RkISP1Dpcc, Error)
+ << "'sets' size in tuning file (" << setsObject.size()
+ << ") exceeds the maximum hardware capacity (3)";
+ return -EINVAL;
+ }
+
+ for (std::size_t i = 0; i < setsObject.size(); ++i) {
+ struct rkisp1_cif_isp_dpcc_methods_config &method = config_.methods[i];
+ const YamlObject &set = setsObject[i];
+ uint16_t value;
+
+ /* Enable set if described in YAML tuning file. */
+ config_.set_use |= 1 << i;
+
+ /* PG Method */
+ const YamlObject &pgObject = set["pg-factor"];
+
+ if (pgObject.contains("green")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_GREEN_ENABLE;
+
+ value = pgObject["green"].get<uint16_t>(0);
+ method.pg_fac |= RKISP1_CIF_ISP_DPCC_PG_FAC_G(value);
+ }
+
+ if (pgObject.contains("red-blue")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_RED_BLUE_ENABLE;
+
+ value = pgObject["red-blue"].get<uint16_t>(0);
+ method.pg_fac |= RKISP1_CIF_ISP_DPCC_PG_FAC_RB(value);
+ }
+
+ /* RO Method */
+ const YamlObject &roObject = set["ro-limits"];
+
+ if (roObject.contains("green")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_GREEN_ENABLE;
+
+ value = roObject["green"].get<uint16_t>(0);
+ config_.ro_limits |=
+ RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_G(i, value);
+ }
+
+ if (roObject.contains("red-blue")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_RED_BLUE_ENABLE;
+
+ value = roObject["red-blue"].get<uint16_t>(0);
+ config_.ro_limits |=
+ RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_RB(i, value);
+ }
+
+ /* RG Method */
+ const YamlObject &rgObject = set["rg-factor"];
+ method.rg_fac = 0;
+
+ if (rgObject.contains("green")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_GREEN_ENABLE;
+
+ value = rgObject["green"].get<uint16_t>(0);
+ method.rg_fac |= RKISP1_CIF_ISP_DPCC_RG_FAC_G(value);
+ }
+
+ if (rgObject.contains("red-blue")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_RED_BLUE_ENABLE;
+
+ value = rgObject["red-blue"].get<uint16_t>(0);
+ method.rg_fac |= RKISP1_CIF_ISP_DPCC_RG_FAC_RB(value);
+ }
+
+ /* RND Method */
+ const YamlObject &rndOffsetsObject = set["rnd-offsets"];
+
+ if (rndOffsetsObject.contains("green")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_GREEN_ENABLE;
+
+ value = rndOffsetsObject["green"].get<uint16_t>(0);
+ config_.rnd_offs |=
+ RKISP1_CIF_ISP_DPCC_RND_OFFS_n_G(i, value);
+ }
+
+ if (rndOffsetsObject.contains("red-blue")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_RED_BLUE_ENABLE;
+
+ value = rndOffsetsObject["red-blue"].get<uint16_t>(0);
+ config_.rnd_offs |=
+ RKISP1_CIF_ISP_DPCC_RND_OFFS_n_RB(i, value);
+ }
+
+ const YamlObject &rndThresholdObject = set["rnd-threshold"];
+ method.rnd_thresh = 0;
+
+ if (rndThresholdObject.contains("green")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_GREEN_ENABLE;
+
+ value = rndThresholdObject["green"].get<uint16_t>(0);
+ method.rnd_thresh |=
+ RKISP1_CIF_ISP_DPCC_RND_THRESH_G(value);
+ }
+
+ if (rndThresholdObject.contains("red-blue")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_RED_BLUE_ENABLE;
+
+ value = rndThresholdObject["red-blue"].get<uint16_t>(0);
+ method.rnd_thresh |=
+ RKISP1_CIF_ISP_DPCC_RND_THRESH_RB(value);
+ }
+
+ /* LC Method */
+ const YamlObject &lcThresholdObject = set["line-threshold"];
+ method.line_thresh = 0;
+
+ if (lcThresholdObject.contains("green")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_GREEN_ENABLE;
+
+ value = lcThresholdObject["green"].get<uint16_t>(0);
+ method.line_thresh |=
+ RKISP1_CIF_ISP_DPCC_LINE_THRESH_G(value);
+ }
+
+ if (lcThresholdObject.contains("red-blue")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_RED_BLUE_ENABLE;
+
+ value = lcThresholdObject["red-blue"].get<uint16_t>(0);
+ method.line_thresh |=
+ RKISP1_CIF_ISP_DPCC_LINE_THRESH_RB(value);
+ }
+
+ const YamlObject &lcTMadFactorObject = set["line-mad-factor"];
+ method.line_mad_fac = 0;
+
+ if (lcTMadFactorObject.contains("green")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_GREEN_ENABLE;
+
+ value = lcTMadFactorObject["green"].get<uint16_t>(0);
+ method.line_mad_fac |=
+ RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_G(value);
+ }
+
+ if (lcTMadFactorObject.contains("red-blue")) {
+ method.method |=
+ RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_RED_BLUE_ENABLE;
+
+ value = lcTMadFactorObject["red-blue"].get<uint16_t>(0);
+ method.line_mad_fac |=
+ RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_RB(value);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void DefectPixelClusterCorrection::prepare([[maybe_unused]] IPAContext &context,
+ const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ RkISP1Params *params)
+{
+ if (frame > 0)
+ return;
+
+ auto config = params->block<BlockType::Dpcc>();
+ config.setEnabled(true);
+ *config = config_;
+}
+
+REGISTER_IPA_ALGORITHM(DefectPixelClusterCorrection, "DefectPixelClusterCorrection")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/dpcc.h b/src/ipa/rkisp1/algorithms/dpcc.h
new file mode 100644
index 00000000..b77766c3
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/dpcc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Defect Pixel Cluster Correction control
+ */
+
+#pragma once
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class DefectPixelClusterCorrection : public Algorithm
+{
+public:
+ DefectPixelClusterCorrection();
+ ~DefectPixelClusterCorrection() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ RkISP1Params *params) override;
+
+private:
+ rkisp1_cif_isp_dpcc_config config_;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/dpf.cpp b/src/ipa/rkisp1/algorithms/dpf.cpp
new file mode 100644
index 00000000..cb6095da
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/dpf.cpp
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Denoise Pre-Filter control
+ */
+
+#include "dpf.h"
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+
+#include "linux/rkisp1-config.h"
+
+/**
+ * \file dpf.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class Dpf
+ * \brief RkISP1 Denoise Pre-Filter control
+ *
+ * The denoise pre-filter algorithm is a bilateral filter which combines a
+ * range filter and a domain filter. The denoise pre-filter is applied before
+ * demosaicing.
+ */
+
+LOG_DEFINE_CATEGORY(RkISP1Dpf)
+
+Dpf::Dpf()
+ : config_({}), strengthConfig_({})
+{
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int Dpf::init([[maybe_unused]] IPAContext &context,
+ const YamlObject &tuningData)
+{
+ std::vector<uint8_t> values;
+
+ /*
+ * The domain kernel is configured with a 9x9 kernel for the green
+ * pixels, and a 13x9 or 9x9 kernel for red and blue pixels.
+ */
+ const YamlObject &dFObject = tuningData["DomainFilter"];
+
+ /*
+ * For the green component, we have the 9x9 kernel specified
+ * as 6 coefficients:
+ * Y
+ * ^
+ * 4 | 6 5 4 5 6
+ * 3 | 5 3 3 5
+ * 2 | 5 3 2 3 5
+ * 1 | 3 1 1 3
+ * 0 - 4 2 0 2 4
+ * -1 | 3 1 1 3
+ * -2 | 5 3 2 3 5
+ * -3 | 5 3 3 5
+ * -4 | 6 5 4 5 6
+ * +---------|--------> X
+ * -4....-1 0 1 2 3 4
+ */
+ values = dFObject["g"].getList<uint8_t>().value_or(std::vector<uint8_t>{});
+ if (values.size() != RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS) {
+ LOG(RkISP1Dpf, Error)
+ << "Invalid 'DomainFilter:g': expected "
+ << RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS
+ << " elements, got " << values.size();
+ return -EINVAL;
+ }
+
+ std::copy_n(values.begin(), values.size(),
+ std::begin(config_.g_flt.spatial_coeff));
+
+ config_.g_flt.gr_enable = true;
+ config_.g_flt.gb_enable = true;
+
+ /*
+ * For the red and blue components, we have the 13x9 kernel specified
+ * as 6 coefficients:
+ *
+ * Y
+ * ^
+ * 4 | 6 5 4 3 4 5 6
+ * |
+ * 2 | 5 4 2 1 2 4 5
+ * |
+ * 0 - 5 3 1 0 1 3 5
+ * |
+ * -2 | 5 4 2 1 2 4 5
+ * |
+ * -4 | 6 5 4 3 4 5 6
+ * +-------------|------------> X
+ * -6 -4 -2 0 2 4 6
+ *
+ * For a 9x9 kernel, columns -6 and 6 are dropped, so coefficient
+ * number 6 is not used.
+ */
+ values = dFObject["rb"].getList<uint8_t>().value_or(std::vector<uint8_t>{});
+ if (values.size() != RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS &&
+ values.size() != RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS - 1) {
+ LOG(RkISP1Dpf, Error)
+ << "Invalid 'DomainFilter:rb': expected "
+ << RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS - 1
+ << " or " << RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS
+ << " elements, got " << values.size();
+ return -EINVAL;
+ }
+
+ config_.rb_flt.fltsize = values.size() == RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS
+ ? RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_13x9
+ : RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_9x9;
+
+ std::copy_n(values.begin(), values.size(),
+ std::begin(config_.rb_flt.spatial_coeff));
+
+ config_.rb_flt.r_enable = true;
+ config_.rb_flt.b_enable = true;
+
+ /*
+ * The range kernel is configured with a noise level lookup table (NLL)
+ * which stores a piecewise linear function that characterizes the
+ * sensor noise profile as a noise level function curve (NLF).
+ */
+ const YamlObject &rFObject = tuningData["NoiseLevelFunction"];
+
+ std::vector<uint16_t> nllValues;
+ nllValues = rFObject["coeff"].getList<uint16_t>().value_or(std::vector<uint16_t>{});
+ if (nllValues.size() != RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS) {
+ LOG(RkISP1Dpf, Error)
+ << "Invalid 'RangeFilter:coeff': expected "
+ << RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS
+ << " elements, got " << nllValues.size();
+ return -EINVAL;
+ }
+
+ std::copy_n(nllValues.begin(), nllValues.size(),
+ std::begin(config_.nll.coeff));
+
+ std::string scaleMode = rFObject["scale-mode"].get<std::string>("");
+ if (scaleMode == "linear") {
+ config_.nll.scale_mode = RKISP1_CIF_ISP_NLL_SCALE_LINEAR;
+ } else if (scaleMode == "logarithmic") {
+ config_.nll.scale_mode = RKISP1_CIF_ISP_NLL_SCALE_LOGARITHMIC;
+ } else {
+ LOG(RkISP1Dpf, Error)
+ << "Invalid 'RangeFilter:scale-mode': expected "
+ << "'linear' or 'logarithmic' value, got "
+ << scaleMode;
+ return -EINVAL;
+ }
+
+ const YamlObject &fSObject = tuningData["FilterStrength"];
+
+ strengthConfig_.r = fSObject["r"].get<uint16_t>(64);
+ strengthConfig_.g = fSObject["g"].get<uint16_t>(64);
+ strengthConfig_.b = fSObject["b"].get<uint16_t>(64);
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::queueRequest
+ */
+void Dpf::queueRequest(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls)
+{
+ auto &dpf = context.activeState.dpf;
+ bool update = false;
+
+ const auto &denoise = controls.get(controls::draft::NoiseReductionMode);
+ if (denoise) {
+ LOG(RkISP1Dpf, Debug) << "Set denoise to " << *denoise;
+
+ switch (*denoise) {
+ case controls::draft::NoiseReductionModeOff:
+ if (dpf.denoise) {
+ dpf.denoise = false;
+ update = true;
+ }
+ break;
+ case controls::draft::NoiseReductionModeMinimal:
+ case controls::draft::NoiseReductionModeHighQuality:
+ case controls::draft::NoiseReductionModeFast:
+ if (!dpf.denoise) {
+ dpf.denoise = true;
+ update = true;
+ }
+ break;
+ default:
+ LOG(RkISP1Dpf, Error)
+ << "Unsupported denoise value "
+ << *denoise;
+ break;
+ }
+ }
+
+ frameContext.dpf.denoise = dpf.denoise;
+ frameContext.dpf.update = update;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void Dpf::prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext, RkISP1Params *params)
+{
+ if (!frameContext.dpf.update && frame > 0)
+ return;
+
+ auto config = params->block<BlockType::Dpf>();
+ config.setEnabled(frameContext.dpf.denoise);
+
+ if (frameContext.dpf.denoise) {
+ *config = config_;
+
+ const auto &awb = context.configuration.awb;
+ const auto &lsc = context.configuration.lsc;
+
+ auto &mode = config->gain.mode;
+
+ /*
+ * The DPF needs to take into account the total amount of
+ * digital gain, which comes from the AWB and LSC modules. The
+ * DPF hardware can be programmed with a digital gain value
+ * manually, but can also use the gains supplied by the AWB and
+ * LSC modules automatically when they are enabled. Use that
+ * mode of operation as it simplifies control of the DPF.
+ */
+ if (awb.enabled && lsc.enabled)
+ mode = RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_LSC_GAINS;
+ else if (awb.enabled)
+ mode = RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_GAINS;
+ else if (lsc.enabled)
+ mode = RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS;
+ else
+ mode = RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED;
+ }
+
+ if (frame == 0) {
+ auto strengthConfig = params->block<BlockType::DpfStrength>();
+ strengthConfig.setEnabled(true);
+ *strengthConfig = strengthConfig_;
+ }
+}
+
+REGISTER_IPA_ALGORITHM(Dpf, "Dpf")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/dpf.h b/src/ipa/rkisp1/algorithms/dpf.h
new file mode 100644
index 00000000..2dd8cd36
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/dpf.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Denoise Pre-Filter control
+ */
+
+#pragma once
+
+#include <sys/types.h>
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class Dpf : public Algorithm
+{
+public:
+ Dpf();
+ ~Dpf() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ void queueRequest(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ RkISP1Params *params) override;
+
+private:
+ struct rkisp1_cif_isp_dpf_config config_;
+ struct rkisp1_cif_isp_dpf_strength_config strengthConfig_;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/filter.cpp b/src/ipa/rkisp1/algorithms/filter.cpp
new file mode 100644
index 00000000..7598ef8a
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/filter.cpp
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Filter control
+ */
+
+#include "filter.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+
+/**
+ * \file filter.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class Filter
+ * \brief RkISP1 Filter control
+ *
+ * Denoise and Sharpness filters will be applied by RkISP1 during the
+ * demosaicing step. The denoise filter is responsible for removing noise from
+ * the image, while the sharpness filter will enhance its acutance.
+ *
+ * \todo In current version the denoise and sharpness control is based on user
+ * controls. In a future version it should be controlled automatically by the
+ * algorithm.
+ */
+
+LOG_DEFINE_CATEGORY(RkISP1Filter)
+
+static constexpr uint32_t kFiltLumWeightDefault = 0x00022040;
+static constexpr uint32_t kFiltModeDefault = 0x000004f2;
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::queueRequest
+ */
+void Filter::queueRequest(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls)
+{
+ auto &filter = context.activeState.filter;
+ bool update = false;
+
+ const auto &sharpness = controls.get(controls::Sharpness);
+ if (sharpness) {
+ unsigned int value = std::round(std::clamp(*sharpness, 0.0f, 10.0f));
+
+ if (filter.sharpness != value) {
+ filter.sharpness = value;
+ update = true;
+ }
+
+ LOG(RkISP1Filter, Debug) << "Set sharpness to " << *sharpness;
+ }
+
+ const auto &denoise = controls.get(controls::draft::NoiseReductionMode);
+ if (denoise) {
+ LOG(RkISP1Filter, Debug) << "Set denoise to " << *denoise;
+
+ switch (*denoise) {
+ case controls::draft::NoiseReductionModeOff:
+ if (filter.denoise != 0) {
+ filter.denoise = 0;
+ update = true;
+ }
+ break;
+ case controls::draft::NoiseReductionModeMinimal:
+ if (filter.denoise != 1) {
+ filter.denoise = 1;
+ update = true;
+ }
+ break;
+ case controls::draft::NoiseReductionModeHighQuality:
+ case controls::draft::NoiseReductionModeFast:
+ if (filter.denoise != 3) {
+ filter.denoise = 3;
+ update = true;
+ }
+ break;
+ default:
+ LOG(RkISP1Filter, Error)
+ << "Unsupported denoise value "
+ << *denoise;
+ break;
+ }
+ }
+
+ frameContext.filter.denoise = filter.denoise;
+ frameContext.filter.sharpness = filter.sharpness;
+ frameContext.filter.update = update;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void Filter::prepare([[maybe_unused]] IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext, RkISP1Params *params)
+{
+ /* Check if the algorithm configuration has been updated. */
+ if (!frameContext.filter.update)
+ return;
+
+ static constexpr uint16_t filt_fac_sh0[] = {
+ 0x04, 0x07, 0x0a, 0x0c, 0x10, 0x14, 0x1a, 0x1e, 0x24, 0x2a, 0x30
+ };
+
+ static constexpr uint16_t filt_fac_sh1[] = {
+ 0x04, 0x08, 0x0c, 0x10, 0x16, 0x1b, 0x20, 0x26, 0x2c, 0x30, 0x3f
+ };
+
+ static constexpr uint16_t filt_fac_mid[] = {
+ 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x10, 0x13, 0x17, 0x1d, 0x22, 0x28
+ };
+
+ static constexpr uint16_t filt_fac_bl0[] = {
+ 0x02, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x10, 0x15, 0x1a, 0x24
+ };
+
+ static constexpr uint16_t filt_fac_bl1[] = {
+ 0x00, 0x00, 0x00, 0x02, 0x04, 0x04, 0x06, 0x08, 0x0d, 0x14, 0x20
+ };
+
+ static constexpr uint16_t filt_thresh_sh0[] = {
+ 0, 18, 26, 36, 41, 75, 90, 120, 170, 250, 1023
+ };
+
+ static constexpr uint16_t filt_thresh_sh1[] = {
+ 0, 33, 44, 51, 67, 100, 120, 150, 200, 300, 1023
+ };
+
+ static constexpr uint16_t filt_thresh_bl0[] = {
+ 0, 8, 13, 23, 26, 50, 60, 80, 140, 180, 1023
+ };
+
+ static constexpr uint16_t filt_thresh_bl1[] = {
+ 0, 2, 5, 10, 15, 20, 26, 51, 100, 150, 1023
+ };
+
+ static constexpr uint16_t stage1_select[] = {
+ 6, 6, 4, 4, 3, 3, 2, 2, 2, 1, 0
+ };
+
+ static constexpr uint16_t filt_chr_v_mode[] = {
+ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
+ };
+
+ static constexpr uint16_t filt_chr_h_mode[] = {
+ 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
+ };
+
+ uint8_t denoise = frameContext.filter.denoise;
+ uint8_t sharpness = frameContext.filter.sharpness;
+
+ auto config = params->block<BlockType::Flt>();
+ config.setEnabled(true);
+
+ config->fac_sh0 = filt_fac_sh0[sharpness];
+ config->fac_sh1 = filt_fac_sh1[sharpness];
+ config->fac_mid = filt_fac_mid[sharpness];
+ config->fac_bl0 = filt_fac_bl0[sharpness];
+ config->fac_bl1 = filt_fac_bl1[sharpness];
+
+ config->lum_weight = kFiltLumWeightDefault;
+ config->mode = kFiltModeDefault;
+ config->thresh_sh0 = filt_thresh_sh0[denoise];
+ config->thresh_sh1 = filt_thresh_sh1[denoise];
+ config->thresh_bl0 = filt_thresh_bl0[denoise];
+ config->thresh_bl1 = filt_thresh_bl1[denoise];
+ config->grn_stage1 = stage1_select[denoise];
+ config->chr_v_mode = filt_chr_v_mode[denoise];
+ config->chr_h_mode = filt_chr_h_mode[denoise];
+
+ /*
+ * Combined high denoising and high sharpening requires some
+ * adjustments to the configuration of the filters. A first stage
+ * filter with a lower strength must be selected, and the blur factors
+ * must be decreased.
+ */
+ if (denoise == 9) {
+ if (sharpness > 3)
+ config->grn_stage1 = 2;
+ } else if (denoise == 10) {
+ if (sharpness > 5)
+ config->grn_stage1 = 2;
+ else if (sharpness > 3)
+ config->grn_stage1 = 1;
+ }
+
+ if (denoise > 7) {
+ if (sharpness > 7) {
+ config->fac_bl0 /= 2;
+ config->fac_bl1 /= 4;
+ } else if (sharpness > 4) {
+ config->fac_bl0 = config->fac_bl0 * 3 / 4;
+ config->fac_bl1 /= 2;
+ }
+ }
+}
+
+REGISTER_IPA_ALGORITHM(Filter, "Filter")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/filter.h b/src/ipa/rkisp1/algorithms/filter.h
new file mode 100644
index 00000000..8f858e57
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/filter.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Filter control
+ */
+
+#pragma once
+
+#include <sys/types.h>
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class Filter : public Algorithm
+{
+public:
+ Filter() = default;
+ ~Filter() = default;
+
+ void queueRequest(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ RkISP1Params *params) override;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/goc.cpp b/src/ipa/rkisp1/algorithms/goc.cpp
new file mode 100644
index 00000000..a9493678
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/goc.cpp
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * RkISP1 Gamma out control
+ */
+#include "goc.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/control_ids.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "linux/rkisp1-config.h"
+
+/**
+ * \file goc.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class GammaOutCorrection
+ * \brief RkISP1 Gamma out correction
+ *
+ * This algorithm implements the gamma out curve for the RkISP1. It defaults to
+ * a gamma value of 2.2.
+ *
+ * As gamma is internally represented as a piecewise linear function with only
+ * 17 knots, the difference between gamma=2.2 and sRGB gamma is minimal.
+ * Therefore sRGB gamma was not implemented as special case.
+ *
+ * Useful links:
+ * - https://www.cambridgeincolour.com/tutorials/gamma-correction.htm
+ * - https://en.wikipedia.org/wiki/SRGB
+ */
+
+LOG_DEFINE_CATEGORY(RkISP1Gamma)
+
+const float kDefaultGamma = 2.2f;
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int GammaOutCorrection::init(IPAContext &context, const YamlObject &tuningData)
+{
+ if (context.hw->numGammaOutSamples !=
+ RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10) {
+ LOG(RkISP1Gamma, Error)
+ << "Gamma is not implemented for RkISP1 V12";
+ return -EINVAL;
+ }
+
+ defaultGamma_ = tuningData["gamma"].get<double>(kDefaultGamma);
+ context.ctrlMap[&controls::Gamma] = ControlInfo(0.1f, 10.0f, defaultGamma_);
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::configure
+ */
+int GammaOutCorrection::configure(IPAContext &context,
+ [[maybe_unused]] const IPACameraSensorInfo &configInfo)
+{
+ context.activeState.goc.gamma = defaultGamma_;
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::queueRequest
+ */
+void GammaOutCorrection::queueRequest(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls)
+{
+ if (frame == 0)
+ frameContext.goc.update = true;
+
+ const auto &gamma = controls.get(controls::Gamma);
+ if (gamma) {
+ context.activeState.goc.gamma = *gamma;
+ frameContext.goc.update = true;
+ LOG(RkISP1Gamma, Debug) << "Set gamma to " << *gamma;
+ }
+
+ frameContext.goc.gamma = context.activeState.goc.gamma;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void GammaOutCorrection::prepare(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ RkISP1Params *params)
+{
+ ASSERT(context.hw->numGammaOutSamples ==
+ RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10);
+
+ if (!frameContext.goc.update)
+ return;
+
+ /*
+ * The logarithmic segments as specified in the reference.
+ * Plus an additional 0 to make the loop easier
+ */
+ static constexpr std::array<unsigned int, RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10> segments = {
+ 64, 64, 64, 64, 128, 128, 128, 128, 256,
+ 256, 256, 512, 512, 512, 512, 512, 0
+ };
+
+ auto config = params->block<BlockType::Goc>();
+ config.setEnabled(true);
+
+ __u16 *gamma_y = config->gamma_y;
+
+ unsigned x = 0;
+ for (const auto [i, size] : utils::enumerate(segments)) {
+ gamma_y[i] = std::pow(x / 4096.0, 1.0 / frameContext.goc.gamma) * 1023.0;
+ x += size;
+ }
+
+ config->mode = RKISP1_CIF_ISP_GOC_MODE_LOGARITHMIC;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::process
+ */
+void GammaOutCorrection::process([[maybe_unused]] IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ [[maybe_unused]] const rkisp1_stat_buffer *stats,
+ ControlList &metadata)
+{
+ metadata.set(controls::Gamma, frameContext.goc.gamma);
+}
+
+REGISTER_IPA_ALGORITHM(GammaOutCorrection, "GammaOutCorrection")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/goc.h b/src/ipa/rkisp1/algorithms/goc.h
new file mode 100644
index 00000000..bb2ddfc9
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/goc.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * RkISP1 Gamma out control
+ */
+
+#pragma once
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class GammaOutCorrection : public Algorithm
+{
+public:
+ GammaOutCorrection() = default;
+ ~GammaOutCorrection() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ int configure(IPAContext &context,
+ const IPACameraSensorInfo &configInfo) override;
+ void queueRequest(IPAContext &context,
+ const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const ControlList &controls) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ RkISP1Params *params) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata) override;
+
+private:
+ float defaultGamma_;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/gsl.cpp b/src/ipa/rkisp1/algorithms/gsl.cpp
new file mode 100644
index 00000000..9604c0ac
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/gsl.cpp
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Gamma Sensor Linearization control
+ */
+
+#include "gsl.h"
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "linux/rkisp1-config.h"
+
+/**
+ * \file gsl.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class GammaSensorLinearization
+ * \brief RkISP1 Gamma Sensor Linearization control
+ *
+ * This algorithm linearizes the sensor output to compensate the sensor
+ * non-linearities by applying piecewise linear functions to the red, green and
+ * blue channels.
+ *
+ * The curves are specified in the tuning data and defined using 17 points.
+ *
+ * - The X coordinates are expressed using 16 intervals, with the first point
+ * at X coordinate 0. Each interval is expressed as a 2-bit value DX (from
+ * GAMMA_DX_1 to GAMMA_DX_16), stored in the RKISP1_CIF_ISP_GAMMA_DX_LO and
+ * RKISP1_CIF_ISP_GAMMA_DX_HI registers. The real interval is equal to
+ * \f$2^{dx+4}\f$. X coordinates are shared between the red, green and blue
+ * curves.
+ *
+ * - The Y coordinates are specified as 17 values separately for the
+ * red, green and blue channels, with a 12-bit resolution. Each value must be
+ * in the [-2048, 2047] range compared to the previous value.
+ */
+
+LOG_DEFINE_CATEGORY(RkISP1Gsl)
+
+static constexpr unsigned int kDegammaXIntervals = 16;
+
+GammaSensorLinearization::GammaSensorLinearization()
+{
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int GammaSensorLinearization::init([[maybe_unused]] IPAContext &context,
+ const YamlObject &tuningData)
+{
+ std::vector<uint16_t> xIntervals =
+ tuningData["x-intervals"].getList<uint16_t>().value_or(std::vector<uint16_t>{});
+ if (xIntervals.size() != kDegammaXIntervals) {
+ LOG(RkISP1Gsl, Error)
+ << "Invalid 'x' coordinates: expected "
+ << kDegammaXIntervals << " elements, got "
+ << xIntervals.size();
+
+ return -EINVAL;
+ }
+
+ /* Compute gammaDx_ intervals from xIntervals values */
+ gammaDx_[0] = 0;
+ gammaDx_[1] = 0;
+ for (unsigned int i = 0; i < kDegammaXIntervals; ++i)
+ gammaDx_[i / 8] |= (xIntervals[i] & 0x07) << ((i % 8) * 4);
+
+ const YamlObject &yObject = tuningData["y"];
+ if (!yObject.isDictionary()) {
+ LOG(RkISP1Gsl, Error)
+ << "Issue while parsing 'y' in tuning file: "
+ << "entry must be a dictionary";
+ return -EINVAL;
+ }
+
+ curveYr_ = yObject["red"].getList<uint16_t>().value_or(std::vector<uint16_t>{});
+ if (curveYr_.size() != RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE) {
+ LOG(RkISP1Gsl, Error)
+ << "Invalid 'y:red' coordinates: expected "
+ << RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE
+ << " elements, got " << curveYr_.size();
+ return -EINVAL;
+ }
+
+ curveYg_ = yObject["green"].getList<uint16_t>().value_or(std::vector<uint16_t>{});
+ if (curveYg_.size() != RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE) {
+ LOG(RkISP1Gsl, Error)
+ << "Invalid 'y:green' coordinates: expected "
+ << RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE
+ << " elements, got " << curveYg_.size();
+ return -EINVAL;
+ }
+
+ curveYb_ = yObject["blue"].getList<uint16_t>().value_or(std::vector<uint16_t>{});
+ if (curveYb_.size() != RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE) {
+ LOG(RkISP1Gsl, Error)
+ << "Invalid 'y:blue' coordinates: expected "
+ << RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE
+ << " elements, got " << curveYb_.size();
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void GammaSensorLinearization::prepare([[maybe_unused]] IPAContext &context,
+ const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ RkISP1Params *params)
+{
+ if (frame > 0)
+ return;
+
+ auto config = params->block<BlockType::Sdg>();
+ config.setEnabled(true);
+
+ config->xa_pnts.gamma_dx0 = gammaDx_[0];
+ config->xa_pnts.gamma_dx1 = gammaDx_[1];
+
+ std::copy(curveYr_.begin(), curveYr_.end(), config->curve_r.gamma_y);
+ std::copy(curveYg_.begin(), curveYg_.end(), config->curve_g.gamma_y);
+ std::copy(curveYb_.begin(), curveYb_.end(), config->curve_b.gamma_y);
+}
+
+REGISTER_IPA_ALGORITHM(GammaSensorLinearization, "GammaSensorLinearization")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/gsl.h b/src/ipa/rkisp1/algorithms/gsl.h
new file mode 100644
index 00000000..91cf6efa
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/gsl.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Gamma Sensor Linearization control
+ */
+
+#pragma once
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class GammaSensorLinearization : public Algorithm
+{
+public:
+ GammaSensorLinearization();
+ ~GammaSensorLinearization() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ RkISP1Params *params) override;
+
+private:
+ uint32_t gammaDx_[2];
+ std::vector<uint16_t> curveYr_;
+ std::vector<uint16_t> curveYg_;
+ std::vector<uint16_t> curveYb_;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/lsc.cpp b/src/ipa/rkisp1/algorithms/lsc.cpp
new file mode 100644
index 00000000..e47aa2f0
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/lsc.cpp
@@ -0,0 +1,438 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Lens Shading Correction control
+ */
+
+#include "lsc.h"
+
+#include <algorithm>
+#include <cmath>
+#include <numeric>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "libipa/lsc_polynomial.h"
+#include "linux/rkisp1-config.h"
+
+/**
+ * \file lsc.h
+ */
+
+namespace libcamera {
+
+namespace ipa {
+
+constexpr int kColourTemperatureChangeThreshhold = 10;
+
+template<typename T>
+void interpolateVector(const std::vector<T> &a, const std::vector<T> &b,
+ std::vector<T> &dest, double lambda)
+{
+ assert(a.size() == b.size());
+ dest.resize(a.size());
+ for (size_t i = 0; i < a.size(); i++) {
+ dest[i] = a[i] * (1.0 - lambda) + b[i] * lambda;
+ }
+}
+
+template<>
+void Interpolator<rkisp1::algorithms::LensShadingCorrection::Components>::
+ interpolate(const rkisp1::algorithms::LensShadingCorrection::Components &a,
+ const rkisp1::algorithms::LensShadingCorrection::Components &b,
+ rkisp1::algorithms::LensShadingCorrection::Components &dest,
+ double lambda)
+{
+ interpolateVector(a.r, b.r, dest.r, lambda);
+ interpolateVector(a.gr, b.gr, dest.gr, lambda);
+ interpolateVector(a.gb, b.gb, dest.gb, lambda);
+ interpolateVector(a.b, b.b, dest.b, lambda);
+}
+
+} /* namespace ipa */
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class LensShadingCorrection
+ * \brief RkISP1 Lens Shading Correction control
+ *
+ * Due to the optical characteristics of the lens, the light intensity received
+ * by the sensor is not uniform.
+ *
+ * The Lens Shading Correction algorithm applies multipliers to all pixels
+ * to compensate for the lens shading effect. The coefficients are
+ * specified in a downscaled table in the YAML tuning file.
+ */
+
+LOG_DEFINE_CATEGORY(RkISP1Lsc)
+
+class LscPolynomialLoader
+{
+public:
+ LscPolynomialLoader(const Size &sensorSize,
+ const Rectangle &cropRectangle,
+ const std::vector<double> &xSizes,
+ const std::vector<double> &ySizes)
+ : sensorSize_(sensorSize),
+ cropRectangle_(cropRectangle),
+ xSizes_(xSizes),
+ ySizes_(ySizes)
+ {
+ }
+
+ int parseLscData(const YamlObject &yamlSets,
+ std::map<unsigned int, LensShadingCorrection::Components> &lscData)
+ {
+ const auto &sets = yamlSets.asList();
+ for (const auto &yamlSet : sets) {
+ std::optional<LscPolynomial> pr, pgr, pgb, pb;
+ uint32_t ct = yamlSet["ct"].get<uint32_t>(0);
+
+ if (lscData.count(ct)) {
+ LOG(RkISP1Lsc, Error)
+ << "Multiple sets found for "
+ << "color temperature " << ct;
+ return -EINVAL;
+ }
+
+ LensShadingCorrection::Components &set = lscData[ct];
+ pr = yamlSet["r"].get<LscPolynomial>();
+ pgr = yamlSet["gr"].get<LscPolynomial>();
+ pgb = yamlSet["gb"].get<LscPolynomial>();
+ pb = yamlSet["b"].get<LscPolynomial>();
+
+ if (!(pr || pgr || pgb || pb)) {
+ LOG(RkISP1Lsc, Error)
+ << "Failed to parse polynomial for "
+ << "colour temperature " << ct;
+ return -EINVAL;
+ }
+
+ set.ct = ct;
+ pr->setReferenceImageSize(sensorSize_);
+ pgr->setReferenceImageSize(sensorSize_);
+ pgb->setReferenceImageSize(sensorSize_);
+ pb->setReferenceImageSize(sensorSize_);
+ set.r = samplePolynomial(*pr);
+ set.gr = samplePolynomial(*pgr);
+ set.gb = samplePolynomial(*pgb);
+ set.b = samplePolynomial(*pb);
+ }
+
+ if (lscData.empty()) {
+ LOG(RkISP1Lsc, Error) << "Failed to load any sets";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+private:
+ /*
+ * The lsc grid has custom spacing defined on half the range (see
+ * parseSizes() for details). For easier handling this function converts
+ * the spaces vector to positions and mirrors them. E.g.:
+ *
+ * input: | 0.2 | 0.3 |
+ * output: 0.0 0.2 0.5 0.8 1.0
+ */
+ std::vector<double> sizesListToPositions(const std::vector<double> &sizes)
+ {
+ const int half = sizes.size();
+ std::vector<double> res(half * 2 + 1);
+ double x = 0.0;
+
+ res[half] = 0.5;
+ for (int i = 1; i <= half; i++) {
+ x += sizes[half - i];
+ res[half - i] = 0.5 - x;
+ res[half + i] = 0.5 + x;
+ }
+
+ return res;
+ }
+
+ std::vector<uint16_t> samplePolynomial(const LscPolynomial &poly)
+ {
+ constexpr int k = RKISP1_CIF_ISP_LSC_SAMPLES_MAX;
+
+ double m = poly.getM();
+ double x0 = cropRectangle_.x / m;
+ double y0 = cropRectangle_.y / m;
+ double w = cropRectangle_.width / m;
+ double h = cropRectangle_.height / m;
+ std::vector<uint16_t> res;
+
+ assert(xSizes_.size() * 2 + 1 == k);
+ assert(ySizes_.size() * 2 + 1 == k);
+
+ res.reserve(k * k);
+
+ std::vector<double> xPos(sizesListToPositions(xSizes_));
+ std::vector<double> yPos(sizesListToPositions(ySizes_));
+
+ for (int y = 0; y < k; y++) {
+ for (int x = 0; x < k; x++) {
+ double xp = x0 + xPos[x] * w;
+ double yp = y0 + yPos[y] * h;
+ /*
+ * The hardware uses 2.10 fixed point format and
+ * limits the legal values to [1..3.999]. Scale
+ * and clamp the sampled value accordingly.
+ */
+ int v = static_cast<int>(
+ poly.sampleAtNormalizedPixelPos(xp, yp) *
+ 1024);
+ v = std::min(std::max(v, 1024), 4095);
+ res.push_back(v);
+ }
+ }
+ return res;
+ }
+
+ Size sensorSize_;
+ Rectangle cropRectangle_;
+ const std::vector<double> &xSizes_;
+ const std::vector<double> &ySizes_;
+};
+
+class LscTableLoader
+{
+public:
+ int parseLscData(const YamlObject &yamlSets,
+ std::map<unsigned int, LensShadingCorrection::Components> &lscData)
+ {
+ const auto &sets = yamlSets.asList();
+
+ for (const auto &yamlSet : sets) {
+ uint32_t ct = yamlSet["ct"].get<uint32_t>(0);
+
+ if (lscData.count(ct)) {
+ LOG(RkISP1Lsc, Error)
+ << "Multiple sets found for color temperature "
+ << ct;
+ return -EINVAL;
+ }
+
+ LensShadingCorrection::Components &set = lscData[ct];
+
+ set.ct = ct;
+ set.r = parseTable(yamlSet, "r");
+ set.gr = parseTable(yamlSet, "gr");
+ set.gb = parseTable(yamlSet, "gb");
+ set.b = parseTable(yamlSet, "b");
+
+ if (set.r.empty() || set.gr.empty() ||
+ set.gb.empty() || set.b.empty()) {
+ LOG(RkISP1Lsc, Error)
+ << "Set for color temperature " << ct
+ << " is missing tables";
+ return -EINVAL;
+ }
+ }
+
+ if (lscData.empty()) {
+ LOG(RkISP1Lsc, Error) << "Failed to load any sets";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+private:
+ std::vector<uint16_t> parseTable(const YamlObject &tuningData,
+ const char *prop)
+ {
+ static constexpr unsigned int kLscNumSamples =
+ RKISP1_CIF_ISP_LSC_SAMPLES_MAX * RKISP1_CIF_ISP_LSC_SAMPLES_MAX;
+
+ std::vector<uint16_t> table =
+ tuningData[prop].getList<uint16_t>().value_or(std::vector<uint16_t>{});
+ if (table.size() != kLscNumSamples) {
+ LOG(RkISP1Lsc, Error)
+ << "Invalid '" << prop << "' values: expected "
+ << kLscNumSamples
+ << " elements, got " << table.size();
+ return {};
+ }
+
+ return table;
+ }
+};
+
+static std::vector<double> parseSizes(const YamlObject &tuningData,
+ const char *prop)
+{
+ std::vector<double> sizes =
+ tuningData[prop].getList<double>().value_or(std::vector<double>{});
+ if (sizes.size() != RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE) {
+ LOG(RkISP1Lsc, Error)
+ << "Invalid '" << prop << "' values: expected "
+ << RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE
+ << " elements, got " << sizes.size();
+ return {};
+ }
+
+ /*
+ * The sum of all elements must be 0.5 to satisfy hardware constraints.
+ * Validate it here, allowing a 1% tolerance as rounding errors may
+ * prevent an exact match (further adjustments will be performed in
+ * LensShadingCorrection::prepare()).
+ */
+ double sum = std::accumulate(sizes.begin(), sizes.end(), 0.0);
+ if (sum < 0.495 || sum > 0.505) {
+ LOG(RkISP1Lsc, Error)
+ << "Invalid '" << prop << "' values: sum of the elements"
+ << " should be 0.5, got " << sum;
+ return {};
+ }
+
+ return sizes;
+}
+
+LensShadingCorrection::LensShadingCorrection()
+ : lastAppliedCt_(0), lastAppliedQuantizedCt_(0)
+{
+ sets_.setQuantization(kColourTemperatureChangeThreshhold);
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int LensShadingCorrection::init([[maybe_unused]] IPAContext &context,
+ const YamlObject &tuningData)
+{
+ xSize_ = parseSizes(tuningData, "x-size");
+ ySize_ = parseSizes(tuningData, "y-size");
+
+ if (xSize_.empty() || ySize_.empty())
+ return -EINVAL;
+
+ /* Get all defined sets to apply. */
+ const YamlObject &yamlSets = tuningData["sets"];
+ if (!yamlSets.isList()) {
+ LOG(RkISP1Lsc, Error)
+ << "'sets' parameter not found in tuning file";
+ return -EINVAL;
+ }
+
+ std::map<unsigned int, Components> lscData;
+ int res = 0;
+ std::string type = tuningData["type"].get<std::string>("table");
+ if (type == "table") {
+ LOG(RkISP1Lsc, Debug) << "Loading tabular LSC data.";
+ auto loader = LscTableLoader();
+ res = loader.parseLscData(yamlSets, lscData);
+ } else if (type == "polynomial") {
+ LOG(RkISP1Lsc, Debug) << "Loading polynomial LSC data.";
+ auto loader = LscPolynomialLoader(context.sensorInfo.activeAreaSize,
+ context.sensorInfo.analogCrop,
+ xSize_,
+ ySize_);
+ res = loader.parseLscData(yamlSets, lscData);
+ } else {
+ LOG(RkISP1Lsc, Error) << "Unsupported LSC data type '"
+ << type << "'";
+ res = -EINVAL;
+ }
+
+ if (res)
+ return res;
+
+ sets_.setData(std::move(lscData));
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::configure
+ */
+int LensShadingCorrection::configure(IPAContext &context,
+ [[maybe_unused]] const IPACameraSensorInfo &configInfo)
+{
+ const Size &size = context.configuration.sensor.size;
+ Size totalSize{};
+
+ for (unsigned int i = 0; i < RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE; ++i) {
+ xSizes_[i] = xSize_[i] * size.width;
+ ySizes_[i] = ySize_[i] * size.height;
+
+ /*
+ * To prevent unexpected behavior of the ISP, the sum of x_size_tbl and
+ * y_size_tbl items shall be equal to respectively size.width/2 and
+ * size.height/2. Enforce it by computing the last tables value to avoid
+ * rounding-induced errors.
+ */
+ if (i == RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE - 1) {
+ xSizes_[i] = size.width / 2 - totalSize.width;
+ ySizes_[i] = size.height / 2 - totalSize.height;
+ }
+
+ totalSize.width += xSizes_[i];
+ totalSize.height += ySizes_[i];
+
+ xGrad_[i] = std::round(32768 / xSizes_[i]);
+ yGrad_[i] = std::round(32768 / ySizes_[i]);
+ }
+
+ context.configuration.lsc.enabled = true;
+ return 0;
+}
+
+void LensShadingCorrection::setParameters(rkisp1_cif_isp_lsc_config &config)
+{
+ memcpy(config.x_grad_tbl, xGrad_, sizeof(config.x_grad_tbl));
+ memcpy(config.y_grad_tbl, yGrad_, sizeof(config.y_grad_tbl));
+ memcpy(config.x_size_tbl, xSizes_, sizeof(config.x_size_tbl));
+ memcpy(config.y_size_tbl, ySizes_, sizeof(config.y_size_tbl));
+}
+
+void LensShadingCorrection::copyTable(rkisp1_cif_isp_lsc_config &config,
+ const Components &set)
+{
+ std::copy(set.r.begin(), set.r.end(), &config.r_data_tbl[0][0]);
+ std::copy(set.gr.begin(), set.gr.end(), &config.gr_data_tbl[0][0]);
+ std::copy(set.gb.begin(), set.gb.end(), &config.gb_data_tbl[0][0]);
+ std::copy(set.b.begin(), set.b.end(), &config.b_data_tbl[0][0]);
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::prepare
+ */
+void LensShadingCorrection::prepare(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ RkISP1Params *params)
+{
+ uint32_t ct = context.activeState.awb.temperatureK;
+ if (std::abs(static_cast<int>(ct) - static_cast<int>(lastAppliedCt_)) <
+ kColourTemperatureChangeThreshhold)
+ return;
+ unsigned int quantizedCt;
+ const Components &set = sets_.getInterpolated(ct, &quantizedCt);
+ if (lastAppliedQuantizedCt_ == quantizedCt)
+ return;
+
+ auto config = params->block<BlockType::Lsc>();
+ config.setEnabled(true);
+ setParameters(*config);
+ copyTable(*config, set);
+
+ lastAppliedCt_ = ct;
+ lastAppliedQuantizedCt_ = quantizedCt;
+
+ LOG(RkISP1Lsc, Debug)
+ << "ct is " << ct << ", quantized to "
+ << quantizedCt;
+}
+
+REGISTER_IPA_ALGORITHM(LensShadingCorrection, "LensShadingCorrection")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/lsc.h b/src/ipa/rkisp1/algorithms/lsc.h
new file mode 100644
index 00000000..5a0824e3
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/lsc.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 Lens Shading Correction control
+ */
+
+#pragma once
+
+#include <map>
+
+#include "libipa/interpolator.h"
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class LensShadingCorrection : public Algorithm
+{
+public:
+ LensShadingCorrection();
+ ~LensShadingCorrection() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ int configure(IPAContext &context, const IPACameraSensorInfo &configInfo) override;
+ void prepare(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ RkISP1Params *params) override;
+
+ struct Components {
+ uint32_t ct;
+ std::vector<uint16_t> r;
+ std::vector<uint16_t> gr;
+ std::vector<uint16_t> gb;
+ std::vector<uint16_t> b;
+ };
+
+private:
+ void setParameters(rkisp1_cif_isp_lsc_config &config);
+ void copyTable(rkisp1_cif_isp_lsc_config &config, const Components &set0);
+ void interpolateTable(rkisp1_cif_isp_lsc_config &config,
+ const Components &set0, const Components &set1,
+ const uint32_t ct);
+
+ ipa::Interpolator<Components> sets_;
+ std::vector<double> xSize_;
+ std::vector<double> ySize_;
+ uint16_t xGrad_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
+ uint16_t yGrad_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
+ uint16_t xSizes_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
+ uint16_t ySizes_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
+
+ unsigned int lastAppliedCt_;
+ unsigned int lastAppliedQuantizedCt_;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/lux.cpp b/src/ipa/rkisp1/algorithms/lux.cpp
new file mode 100644
index 00000000..b0f74963
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/lux.cpp
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * lux.cpp - RkISP1 Lux control
+ */
+
+#include "lux.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+
+#include "libipa/histogram.h"
+#include "libipa/lux.h"
+
+/**
+ * \file lux.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class Lux
+ * \brief RkISP1 Lux control
+ *
+ * The Lux algorithm is responsible for estimating the lux level of the image.
+ * It doesn't take or generate any controls, but it provides a lux level for
+ * other algorithms (such as AGC) to use.
+ */
+
+/**
+ * \brief Construct an rkisp1 Lux algo module
+ *
+ * The Lux helper is initialized to 65535 as that is the max bin count on the
+ * rkisp1.
+ */
+Lux::Lux()
+ : lux_(65535)
+{
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int Lux::init([[maybe_unused]] IPAContext &context, const YamlObject &tuningData)
+{
+ return lux_.parseTuningData(tuningData);
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::process
+ */
+void Lux::process(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata)
+{
+ utils::Duration exposureTime = context.configuration.sensor.lineDuration
+ * frameContext.sensor.exposure;
+ double gain = frameContext.sensor.gain;
+
+ /* \todo Deduplicate the histogram calculation from AGC */
+ const rkisp1_cif_isp_stat *params = &stats->params;
+ Histogram yHist({ params->hist.hist_bins, context.hw->numHistogramBins },
+ [](uint32_t x) { return x >> 4; });
+
+ double lux = lux_.estimateLux(exposureTime, gain, 1.0, yHist);
+ frameContext.lux.lux = lux;
+ metadata.set(controls::Lux, lux);
+}
+
+REGISTER_IPA_ALGORITHM(Lux, "Lux")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/lux.h b/src/ipa/rkisp1/algorithms/lux.h
new file mode 100644
index 00000000..8a90de55
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/lux.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * lux.h - RkISP1 Lux control
+ */
+
+#pragma once
+
+#include <sys/types.h>
+
+#include "libipa/lux.h"
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class Lux : public Algorithm
+{
+public:
+ Lux();
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata) override;
+
+private:
+ ipa::Lux lux_;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/meson.build b/src/ipa/rkisp1/algorithms/meson.build
new file mode 100644
index 00000000..c66b0b70
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/meson.build
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: CC0-1.0
+
+rkisp1_ipa_algorithms = files([
+ 'agc.cpp',
+ 'awb.cpp',
+ 'blc.cpp',
+ 'ccm.cpp',
+ 'cproc.cpp',
+ 'dpcc.cpp',
+ 'dpf.cpp',
+ 'filter.cpp',
+ 'goc.cpp',
+ 'gsl.cpp',
+ 'lsc.cpp',
+ 'lux.cpp',
+])
diff --git a/src/ipa/rkisp1/data/imx219.yaml b/src/ipa/rkisp1/data/imx219.yaml
new file mode 100644
index 00000000..0d99cb52
--- /dev/null
+++ b/src/ipa/rkisp1/data/imx219.yaml
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Agc:
+ - Awb:
+ - BlackLevelCorrection:
+ - LensShadingCorrection:
+ x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ sets:
+ - ct: 5800
+ r: [
+ 1501, 1480, 1478, 1362, 1179, 1056, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1030, 1053, 1134, 1185, 1520, 1480, 1463, 1179, 1056, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1027, 1046, 1134, 1533, 1471, 1179, 1056, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1025, 1039, 1471,
+ 1314, 1068, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1025, 1314, 1150, 1028, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1150, 1050, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1076, 1026,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1052, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1050, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1050, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1050, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1025, 1086, 1037, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1025, 1057, 1182, 1071, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1057, 1161,
+ 1345, 1146, 1027, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1036, 1161, 1298, 1612, 1328, 1089, 1025, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1025, 1036, 1161, 1324, 1463, 1884, 1651, 1339, 1103, 1032,
+ 1025, 1024, 1024, 1024, 1024, 1025, 1038, 1101, 1204, 1324, 1463, 1497, 1933,
+ 1884, 1587, 1275, 1079, 1052, 1046, 1046, 1046, 1046, 1055, 1101, 1204, 1336,
+ 1487, 1493, 1476,
+ ]
+ gr: [
+ 1262, 1250, 1094, 1027, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1250, 1095, 1028, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1095, 1030, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1030,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1025, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1041, 1051, 1025, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1051, 1165, 1088,
+ 1051, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1051, 1165, 1261,
+ ]
+ gb: [
+ 1259, 1248, 1092, 1027, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1248, 1092, 1027, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1092, 1029, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1029,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1025, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1041, 1051, 1025, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1052, 1166, 1090,
+ 1051, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1052, 1166, 1266,
+ ]
+ b: [
+ 1380, 1378, 1377, 1247, 1080, 1025, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1030, 1406, 1378, 1284, 1092, 1027, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1406, 1338, 1129, 1029, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1338,
+ 1205, 1043, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1205, 1094, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1116, 1039, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1070, 1025,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1052, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1052, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1052, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1052, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1070, 1025, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1025, 1109, 1036, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1025, 1057,
+ 1175, 1082, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1057, 1176, 1293, 1172, 1036, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1054, 1185, 1334, 1438, 1294, 1099, 1025, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1054, 1185, 1334, 1334, 1462,
+ 1438, 1226, 1059, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1025, 1054, 1185,
+ 1326, 1334, 1334,
+ ]
+...
diff --git a/src/ipa/rkisp1/data/imx258.yaml b/src/ipa/rkisp1/data/imx258.yaml
new file mode 100644
index 00000000..202af36a
--- /dev/null
+++ b/src/ipa/rkisp1/data/imx258.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Agc:
+ - Awb:
+ - BlackLevelCorrection:
+ - LensShadingCorrection:
+ x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ sets:
+ #4208x3120_A_70 - A
+ - ct: 2856
+ resolution: 4208x3120
+ r: [1483, 1423, 1410, 1414, 1417, 1384, 1356, 1348, 1349, 1348, 1393, 1392, 1409, 1444, 1460, 1475, 1568, 1462, 1409, 1398, 1391, 1361, 1343, 1328, 1312, 1316, 1325, 1328, 1372, 1395, 1427, 1410, 1440, 1525, 1441, 1366, 1373, 1364, 1338, 1312, 1287, 1270, 1262, 1267, 1305, 1339, 1380, 1402, 1425, 1424, 1510, 1423, 1376, 1375, 1353, 1309, 1253, 1220, 1201, 1192, 1203, 1243, 1286, 1338, 1375, 1427, 1438, 1499, 1405, 1353, 1354, 1331, 1269, 1207, 1169, 1140, 1137, 1145, 1186, 1246, 1309, 1373, 1399, 1438, 1512, 1391, 1349, 1351, 1306, 1236, 1174, 1121, 1089, 1083, 1098, 1139, 1202, 1276, 1349, 1384, 1428, 1494, 1401, 1337, 1336, 1277, 1211, 1138, 1082, 1057, 1053, 1067, 1110, 1166, 1253, 1331, 1375, 1417, 1485, 1401, 1341, 1316, 1269, 1184, 1115, 1063, 1037, 1029, 1042, 1082, 1144, 1234, 1322, 1368, 1405, 1480, 1387, 1329, 1305, 1257, 1179, 1104, 1049, 1028, 1024, 1037, 1078, 1144, 1231, 1312, 1363, 1404, 1456, 1401, 1341, 1313, 1254, 1177, 1104, 1053, 1041, 1026, 1042, 1082, 1149, 1229, 1322, 1372, 1397, 1457, 1397, 1344, 1312, 1271, 1191, 1122, 1070, 1052, 1044, 1061, 1097, 1166, 1245, 1334, 1382, 1405, 1476, 1400, 1342, 1333, 1293, 1213, 1146, 1099, 1073, 1061, 1081, 1134, 1202, 1273, 1332, 1380, 1411, 1484, 1414, 1350, 1344, 1301, 1251, 1181, 1133, 1109, 1100, 1118, 1164, 1218, 1299, 1338, 1373, 1408, 1459, 1397, 1360, 1342, 1339, 1293, 1231, 1181, 1149, 1155, 1161, 1202, 1256, 1315, 1364, 1383, 1396, 1479, 1382, 1342, 1358, 1346, 1314, 1284, 1231, 1210, 1198, 1224, 1251, 1303, 1338, 1361, 1381, 1394, 1455, 1386, 1338, 1342, 1341, 1326, 1296, 1274, 1254, 1249, 1262, 1280, 1319, 1357, 1367, 1373, 1379, 1462, 1426, 1340, 1356, 1354, 1330, 1344, 1291, 1275, 1255, 1272, 1298, 1333, 1374, 1390, 1393, 1418, 1580, ]
+ gr: [1274, 1203, 1200, 1184, 1165, 1167, 1155, 1160, 1155, 1158, 1164, 1181, 1196, 1223, 1219, 1220, 1369, 1233, 1172, 1161, 1158, 1146, 1149, 1142, 1129, 1133, 1137, 1144, 1155, 1173, 1189, 1204, 1205, 1268, 1215, 1172, 1148, 1137, 1135, 1124, 1123, 1114, 1110, 1116, 1131, 1149, 1161, 1175, 1191, 1220, 1263, 1185, 1153, 1140, 1137, 1119, 1106, 1094, 1088, 1086, 1099, 1107, 1125, 1152, 1154, 1187, 1209, 1255, 1195, 1141, 1133, 1133, 1112, 1083, 1081, 1066, 1057, 1067, 1088, 1103, 1134, 1154, 1172, 1199, 1255, 1186, 1136, 1127, 1121, 1094, 1077, 1055, 1044, 1040, 1048, 1067, 1086, 1121, 1146, 1155, 1185, 1258, 1177, 1127, 1117, 1104, 1082, 1063, 1044, 1038, 1027, 1036, 1057, 1070, 1101, 1138, 1151, 1177, 1245, 1184, 1116, 1119, 1098, 1070, 1045, 1037, 1030, 1027, 1026, 1045, 1062, 1099, 1132, 1149, 1179, 1238, 1172, 1120, 1113, 1100, 1070, 1042, 1029, 1027, 1029, 1027, 1042, 1066, 1088, 1126, 1149, 1174, 1223, 1162, 1118, 1117, 1093, 1065, 1039, 1030, 1028, 1022, 1028, 1045, 1060, 1101, 1134, 1146, 1165, 1246, 1172, 1116, 1119, 1102, 1075, 1046, 1029, 1032, 1030, 1038, 1049, 1073, 1097, 1132, 1146, 1168, 1231, 1178, 1118, 1123, 1111, 1083, 1062, 1041, 1038, 1033, 1041, 1054, 1074, 1109, 1135, 1144, 1175, 1244, 1193, 1136, 1123, 1118, 1100, 1070, 1045, 1036, 1044, 1047, 1067, 1090, 1116, 1135, 1158, 1174, 1232, 1198, 1142, 1127, 1130, 1107, 1085, 1068, 1060, 1057, 1069, 1079, 1102, 1115, 1124, 1154, 1178, 1241, 1192, 1136, 1125, 1113, 1116, 1096, 1081, 1075, 1075, 1088, 1097, 1116, 1124, 1135, 1155, 1177, 1232, 1183, 1142, 1119, 1113, 1099, 1101, 1088, 1084, 1085, 1089, 1103, 1109, 1122, 1133, 1147, 1175, 1258, 1238, 1162, 1161, 1143, 1124, 1131, 1108, 1111, 1107, 1115, 1116, 1138, 1137, 1150, 1163, 1186, 1381, ]
+ gb: [1277, 1217, 1179, 1179, 1163, 1158, 1151, 1150, 1149, 1143, 1151, 1172, 1184, 1207, 1216, 1246, 1375, 1242, 1194, 1166, 1151, 1144, 1145, 1135, 1130, 1129, 1132, 1137, 1154, 1166, 1189, 1207, 1210, 1290, 1229, 1177, 1153, 1144, 1140, 1135, 1124, 1110, 1104, 1115, 1126, 1148, 1162, 1171, 1199, 1220, 1268, 1226, 1163, 1152, 1138, 1130, 1111, 1091, 1088, 1086, 1089, 1097, 1126, 1147, 1164, 1187, 1206, 1273, 1212, 1151, 1141, 1132, 1117, 1093, 1075, 1060, 1059, 1062, 1088, 1108, 1133, 1162, 1168, 1204, 1278, 1207, 1141, 1130, 1126, 1095, 1075, 1063, 1046, 1044, 1054, 1069, 1084, 1120, 1153, 1167, 1195, 1269, 1200, 1141, 1126, 1113, 1092, 1063, 1045, 1033, 1036, 1038, 1055, 1080, 1117, 1139, 1165, 1182, 1262, 1195, 1130, 1128, 1115, 1079, 1052, 1041, 1031, 1024, 1028, 1046, 1072, 1110, 1141, 1160, 1175, 1258, 1189, 1136, 1124, 1105, 1077, 1049, 1029, 1021, 1029, 1033, 1040, 1074, 1108, 1143, 1152, 1173, 1237, 1200, 1130, 1126, 1109, 1080, 1050, 1030, 1031, 1027, 1031, 1043, 1069, 1099, 1141, 1152, 1168, 1249, 1203, 1132, 1124, 1113, 1082, 1058, 1032, 1030, 1024, 1033, 1050, 1083, 1109, 1151, 1156, 1178, 1253, 1204, 1130, 1128, 1112, 1088, 1060, 1045, 1030, 1027, 1036, 1058, 1082, 1120, 1145, 1160, 1176, 1246, 1195, 1137, 1123, 1121, 1102, 1072, 1046, 1037, 1037, 1047, 1072, 1090, 1125, 1140, 1158, 1177, 1252, 1209, 1147, 1128, 1125, 1114, 1088, 1063, 1053, 1051, 1058, 1084, 1101, 1128, 1140, 1159, 1176, 1243, 1195, 1138, 1130, 1127, 1113, 1101, 1076, 1071, 1067, 1082, 1087, 1111, 1125, 1140, 1151, 1183, 1235, 1189, 1137, 1126, 1122, 1112, 1104, 1091, 1089, 1081, 1085, 1103, 1112, 1125, 1140, 1157, 1175, 1242, 1234, 1181, 1161, 1150, 1127, 1117, 1101, 1094, 1094, 1102, 1117, 1130, 1138, 1155, 1171, 1192, 1399, ]
+ b: [1309, 1209, 1169, 1157, 1149, 1136, 1116, 1117, 1126, 1128, 1127, 1141, 1143, 1182, 1196, 1209, 1398, 1231, 1176, 1140, 1123, 1119, 1113, 1111, 1122, 1105, 1117, 1116, 1135, 1130, 1135, 1171, 1169, 1271, 1251, 1154, 1132, 1118, 1104, 1109, 1103, 1094, 1088, 1104, 1093, 1120, 1130, 1135, 1151, 1180, 1267, 1219, 1136, 1111, 1125, 1106, 1107, 1082, 1074, 1077, 1074, 1101, 1112, 1117, 1136, 1139, 1173, 1256, 1205, 1125, 1108, 1118, 1110, 1091, 1081, 1065, 1068, 1065, 1086, 1087, 1105, 1123, 1119, 1156, 1249, 1195, 1106, 1112, 1101, 1085, 1068, 1064, 1053, 1043, 1048, 1068, 1073, 1095, 1117, 1118, 1123, 1251, 1193, 1101, 1091, 1097, 1081, 1052, 1043, 1045, 1041, 1045, 1052, 1065, 1100, 1112, 1112, 1123, 1200, 1180, 1096, 1103, 1083, 1069, 1053, 1045, 1035, 1034, 1035, 1045, 1062, 1087, 1108, 1113, 1113, 1228, 1176, 1093, 1095, 1080, 1062, 1055, 1035, 1033, 1028, 1037, 1039, 1064, 1080, 1115, 1121, 1120, 1202, 1174, 1086, 1087, 1078, 1064, 1049, 1037, 1027, 1022, 1031, 1045, 1058, 1087, 1113, 1108, 1113, 1207, 1200, 1095, 1102, 1092, 1072, 1052, 1043, 1033, 1024, 1033, 1043, 1069, 1095, 1112, 1128, 1123, 1220, 1215, 1101, 1091, 1096, 1080, 1059, 1051, 1040, 1031, 1040, 1064, 1064, 1095, 1111, 1112, 1141, 1222, 1198, 1119, 1108, 1097, 1080, 1059, 1050, 1043, 1034, 1043, 1063, 1073, 1100, 1107, 1114, 1131, 1212, 1197, 1136, 1094, 1109, 1096, 1078, 1054, 1052, 1051, 1060, 1063, 1078, 1101, 1109, 1116, 1142, 1256, 1212, 1112, 1098, 1097, 1094, 1084, 1074, 1061, 1051, 1057, 1064, 1080, 1089, 1102, 1115, 1136, 1227, 1185, 1118, 1081, 1059, 1072, 1068, 1057, 1049, 1048, 1054, 1066, 1058, 1067, 1096, 1109, 1143, 1223, 1291, 1173, 1131, 1113, 1087, 1077, 1090, 1081, 1090, 1086, 1090, 1092, 1103, 1144, 1149, 1216, 1387, ]
+ #4208x3120_D50_70 - D50
+ - ct: 5003
+ resolution: 4208x3120
+ r: [1240, 1212, 1218, 1191, 1191, 1171, 1136, 1144, 1113, 1148, 1182, 1166, 1210, 1211, 1213, 1240, 1336, 1236, 1193, 1176, 1158, 1147, 1126, 1107, 1122, 1107, 1107, 1110, 1146, 1176, 1194, 1195, 1219, 1259, 1210, 1157, 1156, 1153, 1123, 1115, 1094, 1074, 1078, 1081, 1098, 1130, 1163, 1170, 1179, 1220, 1284, 1228, 1146, 1159, 1132, 1101, 1074, 1059, 1053, 1044, 1060, 1072, 1102, 1131, 1156, 1186, 1227, 1272, 1219, 1176, 1150, 1124, 1091, 1043, 1036, 1025, 1025, 1031, 1042, 1076, 1095, 1155, 1188, 1209, 1296, 1206, 1161, 1128, 1101, 1065, 1032, 1019, 1018, 1027, 1018, 1034, 1057, 1102, 1139, 1161, 1211, 1274, 1184, 1133, 1119, 1097, 1042, 1018, 1020, 1027, 1034, 1030, 1032, 1042, 1075, 1119, 1164, 1199, 1270, 1205, 1124, 1114, 1086, 1033, 1015, 1023, 1039, 1039, 1033, 1026, 1041, 1074, 1111, 1142, 1206, 1278, 1193, 1118, 1098, 1084, 1023, 1003, 1016, 1047, 1059, 1038, 1025, 1046, 1063, 1124, 1148, 1190, 1238, 1191, 1124, 1107, 1069, 1027, 1009, 1012, 1036, 1045, 1036, 1020, 1024, 1058, 1118, 1158, 1183, 1262, 1213, 1121, 1112, 1076, 1030, 1012, 1003, 1019, 1028, 1013, 1020, 1036, 1078, 1123, 1155, 1176, 1228, 1221, 1135, 1117, 1105, 1055, 1020, 1005, 1007, 1007, 1004, 1017, 1048, 1088, 1131, 1169, 1183, 1280, 1209, 1141, 1125, 1105, 1074, 1025, 1012, 1008, 1000, 1011, 1024, 1050, 1113, 1128, 1154, 1199, 1290, 1217, 1142, 1134, 1120, 1101, 1054, 1028, 1014, 1006, 1017, 1040, 1078, 1105, 1136, 1164, 1188, 1250, 1195, 1130, 1148, 1120, 1108, 1083, 1053, 1041, 1032, 1061, 1067, 1097, 1127, 1136, 1152, 1181, 1227, 1166, 1145, 1140, 1141, 1119, 1092, 1075, 1072, 1052, 1065, 1089, 1107, 1147, 1154, 1158, 1183, 1230, 1136, 1147, 1150, 1168, 1139, 1113, 1098, 1055, 1048, 1072, 1079, 1129, 1147, 1173, 1188, 1181, 1283, ]
+ gr: [1246, 1183, 1160, 1143, 1145, 1138, 1113, 1111, 1117, 1116, 1132, 1145, 1167, 1167, 1196, 1197, 1335, 1205, 1152, 1123, 1122, 1123, 1103, 1107, 1102, 1097, 1102, 1099, 1128, 1141, 1157, 1152, 1184, 1242, 1204, 1141, 1112, 1106, 1102, 1093, 1096, 1085, 1076, 1085, 1094, 1107, 1123, 1146, 1162, 1178, 1218, 1169, 1130, 1114, 1100, 1096, 1083, 1072, 1059, 1065, 1070, 1087, 1096, 1116, 1134, 1155, 1174, 1238, 1159, 1126, 1105, 1102, 1083, 1062, 1060, 1049, 1047, 1054, 1063, 1084, 1111, 1131, 1140, 1164, 1243, 1167, 1114, 1105, 1088, 1067, 1047, 1034, 1034, 1028, 1042, 1042, 1059, 1096, 1114, 1135, 1170, 1200, 1156, 1101, 1098, 1089, 1068, 1048, 1027, 1034, 1029, 1032, 1047, 1043, 1088, 1111, 1130, 1160, 1201, 1143, 1100, 1086, 1087, 1051, 1034, 1029, 1028, 1030, 1019, 1033, 1044, 1087, 1109, 1124, 1155, 1211, 1148, 1098, 1088, 1077, 1058, 1037, 1026, 1025, 1034, 1033, 1031, 1054, 1074, 1107, 1134, 1159, 1211, 1150, 1090, 1084, 1074, 1056, 1029, 1020, 1028, 1025, 1027, 1031, 1044, 1080, 1109, 1126, 1152, 1208, 1131, 1101, 1088, 1073, 1048, 1035, 1030, 1026, 1024, 1034, 1038, 1053, 1083, 1104, 1124, 1160, 1206, 1147, 1103, 1082, 1082, 1060, 1035, 1026, 1023, 1018, 1031, 1044, 1058, 1096, 1114, 1128, 1153, 1208, 1170, 1112, 1098, 1088, 1070, 1049, 1027, 1027, 1023, 1031, 1046, 1071, 1085, 1106, 1129, 1150, 1228, 1164, 1111, 1101, 1089, 1078, 1058, 1040, 1030, 1032, 1037, 1060, 1073, 1102, 1097, 1125, 1156, 1223, 1181, 1115, 1097, 1093, 1083, 1072, 1056, 1047, 1041, 1057, 1071, 1079, 1081, 1102, 1124, 1141, 1195, 1170, 1109, 1091, 1089, 1061, 1074, 1049, 1054, 1052, 1057, 1067, 1076, 1097, 1106, 1121, 1141, 1211, 1173, 1129, 1108, 1099, 1093, 1092, 1076, 1063, 1057, 1065, 1090, 1107, 1117, 1140, 1123, 1175, 1343, ]
+ gb: [1238, 1183, 1160, 1160, 1134, 1134, 1124, 1108, 1131, 1127, 1124, 1145, 1172, 1188, 1201, 1217, 1349, 1216, 1160, 1128, 1120, 1117, 1110, 1108, 1105, 1102, 1111, 1114, 1125, 1144, 1160, 1162, 1192, 1260, 1212, 1141, 1127, 1118, 1101, 1104, 1103, 1086, 1077, 1086, 1105, 1116, 1126, 1147, 1167, 1191, 1242, 1191, 1130, 1126, 1103, 1093, 1082, 1074, 1070, 1064, 1064, 1079, 1099, 1113, 1132, 1156, 1185, 1247, 1175, 1117, 1114, 1109, 1081, 1067, 1061, 1047, 1044, 1051, 1066, 1083, 1108, 1134, 1141, 1180, 1248, 1187, 1108, 1106, 1095, 1076, 1052, 1044, 1036, 1034, 1042, 1052, 1070, 1105, 1124, 1140, 1161, 1228, 1171, 1091, 1095, 1088, 1069, 1041, 1035, 1034, 1034, 1037, 1048, 1062, 1090, 1120, 1129, 1165, 1223, 1158, 1108, 1093, 1080, 1052, 1030, 1034, 1027, 1030, 1028, 1034, 1054, 1083, 1112, 1133, 1141, 1208, 1158, 1099, 1091, 1075, 1047, 1031, 1017, 1021, 1035, 1027, 1033, 1054, 1088, 1110, 1120, 1146, 1211, 1171, 1099, 1093, 1079, 1056, 1029, 1021, 1030, 1025, 1031, 1037, 1047, 1077, 1116, 1122, 1132, 1203, 1179, 1093, 1087, 1076, 1053, 1038, 1028, 1024, 1024, 1024, 1040, 1058, 1082, 1108, 1114, 1144, 1198, 1167, 1091, 1091, 1087, 1059, 1047, 1029, 1016, 1021, 1036, 1045, 1066, 1093, 1113, 1116, 1144, 1205, 1159, 1113, 1099, 1091, 1069, 1047, 1029, 1029, 1024, 1037, 1054, 1072, 1088, 1109, 1125, 1150, 1200, 1186, 1114, 1097, 1098, 1087, 1065, 1035, 1033, 1043, 1042, 1054, 1076, 1089, 1111, 1126, 1130, 1214, 1153, 1106, 1100, 1090, 1086, 1082, 1057, 1059, 1053, 1059, 1066, 1077, 1088, 1113, 1117, 1144, 1203, 1147, 1107, 1110, 1090, 1088, 1072, 1070, 1060, 1062, 1058, 1074, 1087, 1096, 1109, 1126, 1150, 1216, 1170, 1145, 1128, 1108, 1088, 1110, 1085, 1070, 1064, 1078, 1077, 1101, 1107, 1136, 1148, 1163, 1345, ]
+ b: [1252, 1185, 1146, 1139, 1147, 1130, 1114, 1111, 1122, 1111, 1121, 1123, 1144, 1150, 1171, 1167, 1303, 1187, 1152, 1125, 1101, 1104, 1096, 1101, 1099, 1093, 1096, 1098, 1103, 1118, 1141, 1160, 1156, 1226, 1222, 1125, 1112, 1118, 1104, 1094, 1083, 1073, 1073, 1094, 1099, 1103, 1114, 1133, 1146, 1174, 1212, 1162, 1123, 1104, 1110, 1100, 1081, 1066, 1065, 1057, 1053, 1072, 1094, 1107, 1117, 1136, 1162, 1226, 1197, 1124, 1088, 1092, 1084, 1066, 1055, 1051, 1044, 1049, 1061, 1081, 1096, 1102, 1134, 1143, 1234, 1171, 1110, 1099, 1075, 1070, 1051, 1052, 1030, 1030, 1035, 1055, 1071, 1092, 1100, 1113, 1128, 1214, 1174, 1099, 1080, 1069, 1054, 1047, 1032, 1031, 1027, 1034, 1042, 1061, 1086, 1091, 1113, 1139, 1222, 1156, 1088, 1089, 1072, 1051, 1036, 1032, 1026, 1030, 1024, 1040, 1047, 1074, 1091, 1109, 1131, 1198, 1158, 1090, 1079, 1071, 1047, 1038, 1031, 1028, 1027, 1028, 1029, 1046, 1068, 1087, 1105, 1122, 1196, 1173, 1098, 1080, 1060, 1040, 1036, 1022, 1019, 1022, 1029, 1029, 1045, 1077, 1094, 1103, 1109, 1189, 1170, 1096, 1070, 1063, 1048, 1033, 1026, 1023, 1016, 1021, 1037, 1053, 1068, 1098, 1107, 1128, 1195, 1166, 1099, 1086, 1066, 1061, 1040, 1022, 1022, 1028, 1027, 1041, 1057, 1086, 1094, 1103, 1124, 1188, 1202, 1113, 1081, 1083, 1071, 1040, 1025, 1024, 1025, 1019, 1055, 1055, 1081, 1099, 1112, 1128, 1202, 1171, 1108, 1083, 1084, 1078, 1051, 1043, 1020, 1037, 1037, 1049, 1072, 1069, 1100, 1107, 1115, 1176, 1180, 1106, 1094, 1077, 1068, 1053, 1050, 1035, 1041, 1038, 1062, 1068, 1068, 1084, 1098, 1125, 1184, 1164, 1104, 1077, 1057, 1064, 1049, 1039, 1041, 1036, 1041, 1042, 1058, 1064, 1087, 1099, 1111, 1173, 1209, 1137, 1099, 1083, 1076, 1072, 1077, 1065, 1066, 1065, 1061, 1081, 1096, 1135, 1126, 1150, 1333, ]
+ #4208x3120_D65_70 - D65
+ - ct: 6504
+ resolution: 4208x3120
+ r: [1359, 1336, 1313, 1273, 1274, 1250, 1250, 1218, 1222, 1223, 1240, 1266, 1308, 1327, 1333, 1336, 1456, 1359, 1286, 1256, 1249, 1235, 1235, 1216, 1219, 1187, 1205, 1216, 1240, 1267, 1277, 1303, 1311, 1420, 1326, 1254, 1250, 1239, 1212, 1207, 1191, 1181, 1176, 1181, 1187, 1226, 1241, 1281, 1295, 1326, 1391, 1304, 1253, 1234, 1234, 1209, 1174, 1156, 1147, 1131, 1139, 1168, 1196, 1227, 1265, 1282, 1293, 1385, 1302, 1242, 1224, 1216, 1171, 1140, 1112, 1098, 1087, 1098, 1124, 1177, 1206, 1245, 1266, 1310, 1389, 1327, 1227, 1231, 1195, 1156, 1116, 1094, 1070, 1067, 1073, 1101, 1151, 1190, 1223, 1251, 1281, 1402, 1285, 1229, 1203, 1184, 1135, 1093, 1063, 1047, 1041, 1050, 1083, 1119, 1176, 1211, 1248, 1288, 1388, 1269, 1210, 1215, 1173, 1118, 1078, 1046, 1028, 1025, 1037, 1059, 1103, 1170, 1213, 1230, 1268, 1355, 1295, 1208, 1203, 1171, 1124, 1070, 1041, 1024, 1027, 1030, 1057, 1094, 1168, 1206, 1252, 1270, 1364, 1293, 1196, 1187, 1156, 1110, 1075, 1039, 1022, 1022, 1028, 1065, 1096, 1166, 1213, 1245, 1273, 1349, 1291, 1213, 1203, 1162, 1131, 1079, 1053, 1038, 1029, 1044, 1080, 1119, 1176, 1225, 1243, 1271, 1354, 1284, 1222, 1202, 1186, 1136, 1097, 1063, 1054, 1041, 1054, 1083, 1131, 1186, 1232, 1256, 1276, 1360, 1290, 1237, 1210, 1207, 1166, 1116, 1076, 1066, 1070, 1080, 1109, 1152, 1188, 1230, 1240, 1293, 1341, 1304, 1231, 1229, 1210, 1177, 1153, 1128, 1097, 1105, 1108, 1140, 1170, 1213, 1224, 1260, 1282, 1357, 1299, 1237, 1218, 1218, 1202, 1171, 1144, 1135, 1131, 1143, 1161, 1189, 1221, 1233, 1261, 1271, 1346, 1262, 1216, 1229, 1218, 1191, 1187, 1162, 1161, 1148, 1153, 1180, 1201, 1220, 1234, 1251, 1250, 1352, 1294, 1234, 1242, 1240, 1246, 1200, 1178, 1172, 1137, 1154, 1187, 1214, 1252, 1251, 1247, 1296, 1456, ]
+ gr: [1240, 1187, 1158, 1152, 1144, 1129, 1130, 1118, 1115, 1113, 1119, 1141, 1156, 1172, 1180, 1199, 1330, 1223, 1153, 1127, 1123, 1115, 1104, 1104, 1095, 1100, 1107, 1110, 1121, 1137, 1156, 1169, 1179, 1261, 1205, 1138, 1122, 1108, 1101, 1104, 1098, 1088, 1083, 1090, 1106, 1119, 1125, 1144, 1163, 1186, 1236, 1170, 1122, 1112, 1101, 1091, 1089, 1076, 1068, 1061, 1072, 1084, 1101, 1118, 1134, 1156, 1179, 1243, 1162, 1120, 1105, 1105, 1088, 1067, 1061, 1050, 1050, 1057, 1070, 1088, 1112, 1127, 1145, 1166, 1232, 1163, 1108, 1111, 1099, 1079, 1054, 1046, 1041, 1030, 1040, 1053, 1074, 1098, 1120, 1140, 1170, 1226, 1158, 1105, 1094, 1099, 1064, 1048, 1034, 1036, 1028, 1029, 1049, 1055, 1089, 1116, 1135, 1166, 1218, 1142, 1107, 1094, 1092, 1061, 1041, 1030, 1024, 1025, 1028, 1036, 1053, 1087, 1110, 1128, 1153, 1223, 1142, 1098, 1092, 1084, 1056, 1036, 1025, 1024, 1027, 1024, 1038, 1055, 1082, 1108, 1132, 1153, 1203, 1155, 1098, 1094, 1080, 1056, 1034, 1023, 1025, 1022, 1025, 1036, 1053, 1078, 1112, 1126, 1144, 1212, 1163, 1096, 1092, 1083, 1059, 1039, 1027, 1023, 1028, 1026, 1044, 1056, 1091, 1114, 1130, 1149, 1204, 1152, 1103, 1090, 1089, 1065, 1045, 1031, 1028, 1025, 1035, 1048, 1064, 1092, 1116, 1131, 1157, 1203, 1162, 1100, 1098, 1093, 1076, 1049, 1033, 1030, 1030, 1040, 1050, 1067, 1094, 1103, 1127, 1154, 1221, 1162, 1112, 1099, 1095, 1079, 1064, 1042, 1033, 1034, 1048, 1061, 1077, 1091, 1108, 1126, 1148, 1213, 1154, 1112, 1106, 1095, 1081, 1065, 1056, 1052, 1050, 1059, 1071, 1082, 1091, 1102, 1129, 1149, 1211, 1157, 1106, 1092, 1081, 1066, 1072, 1064, 1048, 1056, 1061, 1066, 1076, 1091, 1107, 1122, 1145, 1207, 1204, 1127, 1117, 1106, 1098, 1081, 1073, 1068, 1062, 1068, 1081, 1107, 1102, 1127, 1148, 1170, 1353, ]
+ gb: [1240, 1177, 1157, 1143, 1129, 1130, 1118, 1112, 1123, 1123, 1123, 1137, 1159, 1181, 1197, 1206, 1354, 1217, 1153, 1130, 1124, 1109, 1114, 1105, 1108, 1116, 1110, 1114, 1131, 1145, 1145, 1163, 1183, 1249, 1197, 1134, 1124, 1107, 1115, 1104, 1100, 1085, 1091, 1097, 1102, 1110, 1133, 1145, 1155, 1190, 1227, 1191, 1125, 1107, 1105, 1093, 1084, 1072, 1066, 1071, 1072, 1081, 1106, 1124, 1129, 1153, 1178, 1238, 1193, 1108, 1104, 1098, 1085, 1072, 1059, 1052, 1048, 1059, 1075, 1089, 1105, 1126, 1146, 1162, 1233, 1166, 1098, 1099, 1091, 1078, 1053, 1043, 1036, 1035, 1045, 1058, 1070, 1100, 1113, 1128, 1156, 1230, 1173, 1100, 1087, 1087, 1064, 1046, 1037, 1031, 1031, 1034, 1047, 1063, 1092, 1107, 1112, 1153, 1228, 1169, 1089, 1089, 1079, 1057, 1043, 1030, 1030, 1027, 1027, 1035, 1057, 1087, 1111, 1125, 1136, 1218, 1166, 1097, 1087, 1079, 1056, 1035, 1022, 1021, 1027, 1022, 1035, 1053, 1083, 1109, 1118, 1138, 1198, 1151, 1100, 1087, 1077, 1057, 1034, 1023, 1024, 1027, 1025, 1036, 1051, 1083, 1109, 1116, 1129, 1215, 1159, 1096, 1091, 1079, 1053, 1037, 1026, 1021, 1020, 1020, 1039, 1063, 1086, 1113, 1116, 1134, 1214, 1158, 1096, 1091, 1087, 1065, 1043, 1034, 1025, 1020, 1028, 1046, 1059, 1088, 1109, 1119, 1130, 1202, 1168, 1101, 1091, 1084, 1074, 1050, 1029, 1028, 1026, 1035, 1055, 1072, 1099, 1105, 1121, 1138, 1204, 1160, 1104, 1093, 1094, 1079, 1067, 1043, 1036, 1036, 1048, 1057, 1081, 1089, 1107, 1118, 1140, 1222, 1158, 1101, 1096, 1090, 1082, 1076, 1059, 1052, 1053, 1063, 1071, 1086, 1094, 1103, 1119, 1134, 1206, 1150, 1105, 1098, 1093, 1082, 1077, 1067, 1063, 1065, 1069, 1081, 1081, 1088, 1108, 1123, 1138, 1211, 1198, 1133, 1114, 1117, 1097, 1093, 1076, 1073, 1067, 1077, 1076, 1089, 1101, 1119, 1154, 1163, 1346, ]
+ b: [1241, 1188, 1165, 1151, 1131, 1127, 1134, 1115, 1122, 1127, 1131, 1136, 1154, 1165, 1173, 1161, 1319, 1210, 1153, 1138, 1120, 1111, 1114, 1118, 1124, 1108, 1118, 1121, 1123, 1132, 1151, 1161, 1150, 1244, 1224, 1149, 1118, 1108, 1107, 1107, 1103, 1098, 1091, 1103, 1103, 1121, 1124, 1135, 1167, 1177, 1224, 1195, 1130, 1099, 1108, 1101, 1083, 1081, 1078, 1074, 1084, 1086, 1097, 1115, 1128, 1145, 1181, 1211, 1191, 1111, 1109, 1098, 1087, 1081, 1071, 1059, 1053, 1064, 1078, 1091, 1109, 1127, 1139, 1167, 1226, 1192, 1111, 1097, 1098, 1072, 1064, 1050, 1042, 1040, 1046, 1053, 1077, 1099, 1113, 1130, 1152, 1215, 1179, 1106, 1093, 1084, 1070, 1055, 1039, 1037, 1034, 1033, 1046, 1067, 1088, 1112, 1120, 1150, 1220, 1178, 1092, 1097, 1085, 1066, 1049, 1033, 1032, 1026, 1028, 1038, 1058, 1081, 1112, 1120, 1137, 1208, 1170, 1103, 1096, 1082, 1063, 1038, 1035, 1025, 1026, 1027, 1035, 1060, 1075, 1109, 1122, 1133, 1214, 1175, 1095, 1097, 1074, 1061, 1039, 1029, 1028, 1022, 1025, 1033, 1049, 1083, 1107, 1117, 1125, 1212, 1179, 1097, 1091, 1076, 1062, 1045, 1030, 1031, 1027, 1031, 1039, 1055, 1082, 1109, 1114, 1144, 1204, 1178, 1102, 1080, 1087, 1060, 1052, 1027, 1028, 1025, 1028, 1043, 1067, 1093, 1113, 1121, 1123, 1189, 1191, 1117, 1100, 1092, 1079, 1058, 1037, 1037, 1020, 1037, 1058, 1065, 1092, 1101, 1115, 1140, 1194, 1173, 1120, 1096, 1085, 1085, 1065, 1048, 1039, 1036, 1046, 1053, 1076, 1096, 1099, 1114, 1140, 1195, 1180, 1105, 1090, 1079, 1073, 1066, 1056, 1049, 1043, 1057, 1061, 1077, 1081, 1090, 1115, 1131, 1180, 1154, 1095, 1084, 1061, 1055, 1056, 1045, 1043, 1039, 1041, 1051, 1067, 1077, 1092, 1108, 1122, 1197, 1210, 1139, 1117, 1112, 1088, 1097, 1084, 1073, 1074, 1065, 1079, 1091, 1103, 1131, 1144, 1154, 1356, ]
+ #4208x3120_D75_70 - D75
+ - ct: 7504
+ resolution: 4208x3120
+ r: [2718, 2443, 2251, 2101, 1949, 1828, 1725, 1659, 1637, 1656, 1692, 1787, 1913, 2038, 2175, 2358, 2612, 2566, 2301, 2129, 1946, 1798, 1654, 1562, 1501, 1474, 1484, 1541, 1628, 1753, 1900, 2056, 2216, 2458, 2439, 2204, 2002, 1839, 1664, 1534, 1419, 1372, 1340, 1357, 1403, 1489, 1621, 1784, 1950, 2114, 2358, 2344, 2108, 1932, 1723, 1559, 1413, 1321, 1258, 1239, 1246, 1293, 1388, 1512, 1675, 1846, 2036, 2269, 2294, 2047, 1842, 1635, 1464, 1328, 1231, 1178, 1144, 1167, 1208, 1298, 1419, 1582, 1769, 1962, 2198, 2234, 1977, 1769, 1556, 1393, 1262, 1164, 1108, 1086, 1096, 1146, 1232, 1350, 1513, 1700, 1913, 2137, 2206, 1942, 1733, 1515, 1345, 1216, 1120, 1066, 1045, 1060, 1099, 1182, 1316, 1462, 1656, 1868, 2131, 2182, 1922, 1685, 1495, 1315, 1188, 1092, 1045, 1025, 1037, 1080, 1160, 1283, 1442, 1624, 1853, 2102, 2193, 1910, 1702, 1477, 1310, 1179, 1087, 1034, 1024, 1029, 1069, 1163, 1278, 1441, 1624, 1846, 2081, 2191, 1936, 1698, 1495, 1325, 1192, 1100, 1052, 1033, 1042, 1082, 1166, 1291, 1448, 1634, 1852, 2118, 2209, 1957, 1732, 1534, 1357, 1223, 1125, 1078, 1062, 1066, 1113, 1204, 1324, 1486, 1665, 1895, 2127, 2267, 2018, 1789, 1577, 1407, 1280, 1181, 1124, 1105, 1113, 1166, 1252, 1388, 1539, 1724, 1936, 2180, 2319, 2074, 1867, 1659, 1491, 1354, 1248, 1192, 1175, 1191, 1236, 1333, 1441, 1618, 1798, 2005, 2249, 2399, 2148, 1955, 1752, 1578, 1442, 1351, 1293, 1272, 1286, 1334, 1418, 1547, 1709, 1872, 2085, 2297, 2497, 2217, 2069, 1857, 1694, 1560, 1458, 1403, 1384, 1400, 1443, 1537, 1670, 1815, 1991, 2157, 2412, 2594, 2341, 2147, 2004, 1827, 1693, 1600, 1537, 1521, 1524, 1576, 1665, 1788, 1941, 2083, 2257, 2529, 2745, 2483, 2315, 2146, 2006, 1868, 1779, 1701, 1679, 1704, 1744, 1845, 1954, 2087, 2219, 2407, 2701, ]
+ gr: [2344, 2089, 1940, 1831, 1739, 1672, 1602, 1564, 1546, 1553, 1585, 1636, 1713, 1798, 1899, 2031, 2234, 2182, 1973, 1842, 1732, 1637, 1548, 1485, 1448, 1422, 1438, 1466, 1527, 1594, 1695, 1784, 1902, 2122, 2082, 1884, 1773, 1653, 1549, 1465, 1398, 1351, 1329, 1338, 1376, 1435, 1516, 1611, 1725, 1828, 2008, 1997, 1821, 1706, 1585, 1480, 1382, 1319, 1261, 1244, 1253, 1291, 1352, 1439, 1540, 1647, 1772, 1932, 1947, 1773, 1655, 1522, 1409, 1310, 1239, 1184, 1161, 1174, 1213, 1284, 1368, 1480, 1601, 1717, 1882, 1904, 1739, 1605, 1470, 1360, 1257, 1173, 1124, 1094, 1111, 1149, 1221, 1320, 1433, 1550, 1678, 1844, 1878, 1711, 1571, 1443, 1317, 1213, 1126, 1077, 1057, 1066, 1105, 1180, 1279, 1400, 1515, 1652, 1819, 1862, 1687, 1556, 1420, 1299, 1183, 1102, 1048, 1029, 1041, 1081, 1155, 1258, 1374, 1495, 1634, 1800, 1856, 1692, 1556, 1415, 1289, 1176, 1095, 1044, 1024, 1033, 1073, 1145, 1247, 1370, 1492, 1626, 1800, 1869, 1697, 1555, 1419, 1303, 1190, 1104, 1054, 1040, 1045, 1085, 1154, 1260, 1373, 1511, 1632, 1804, 1887, 1717, 1571, 1440, 1323, 1216, 1128, 1077, 1066, 1069, 1109, 1182, 1284, 1398, 1520, 1656, 1831, 1910, 1751, 1607, 1480, 1360, 1261, 1173, 1123, 1100, 1114, 1154, 1226, 1326, 1444, 1555, 1689, 1856, 1962, 1793, 1656, 1522, 1416, 1315, 1237, 1180, 1166, 1176, 1214, 1288, 1375, 1486, 1603, 1722, 1910, 2020, 1845, 1710, 1586, 1477, 1387, 1307, 1266, 1241, 1257, 1292, 1347, 1446, 1548, 1657, 1785, 1964, 2118, 1888, 1794, 1658, 1552, 1462, 1394, 1349, 1332, 1342, 1378, 1436, 1525, 1617, 1736, 1848, 2048, 2195, 1989, 1855, 1742, 1633, 1555, 1487, 1437, 1427, 1429, 1471, 1521, 1603, 1699, 1804, 1921, 2149, 2334, 2103, 1971, 1863, 1757, 1666, 1598, 1565, 1537, 1554, 1579, 1640, 1716, 1810, 1923, 2044, 2308, ]
+ gb: [2383, 2122, 1974, 1866, 1767, 1684, 1620, 1581, 1559, 1575, 1592, 1654, 1726, 1816, 1917, 2071, 2294, 2242, 2002, 1872, 1752, 1650, 1564, 1499, 1455, 1438, 1442, 1485, 1537, 1614, 1715, 1814, 1935, 2155, 2114, 1929, 1797, 1674, 1568, 1477, 1406, 1358, 1340, 1348, 1386, 1447, 1534, 1631, 1754, 1861, 2057, 2044, 1859, 1737, 1606, 1493, 1396, 1322, 1270, 1247, 1259, 1305, 1370, 1455, 1566, 1679, 1808, 1979, 1981, 1812, 1674, 1549, 1424, 1325, 1246, 1191, 1168, 1179, 1222, 1294, 1383, 1498, 1623, 1748, 1932, 1939, 1777, 1626, 1500, 1376, 1265, 1179, 1128, 1104, 1119, 1160, 1235, 1331, 1447, 1577, 1708, 1885, 1922, 1735, 1602, 1464, 1333, 1226, 1134, 1083, 1061, 1071, 1113, 1191, 1296, 1412, 1543, 1677, 1849, 1885, 1723, 1574, 1437, 1310, 1191, 1105, 1055, 1035, 1048, 1088, 1164, 1272, 1388, 1516, 1660, 1847, 1891, 1714, 1568, 1431, 1300, 1185, 1099, 1047, 1024, 1038, 1075, 1155, 1259, 1386, 1512, 1649, 1832, 1901, 1722, 1575, 1434, 1309, 1196, 1109, 1054, 1041, 1047, 1087, 1162, 1267, 1385, 1526, 1650, 1833, 1912, 1740, 1588, 1456, 1329, 1220, 1133, 1080, 1065, 1072, 1113, 1189, 1289, 1410, 1538, 1672, 1862, 1949, 1767, 1632, 1487, 1367, 1261, 1175, 1123, 1100, 1114, 1158, 1224, 1331, 1450, 1571, 1705, 1880, 1990, 1811, 1670, 1531, 1420, 1315, 1227, 1180, 1158, 1172, 1212, 1285, 1375, 1490, 1611, 1744, 1925, 2033, 1864, 1715, 1588, 1477, 1377, 1307, 1253, 1232, 1248, 1285, 1344, 1439, 1545, 1661, 1797, 1971, 2126, 1898, 1798, 1658, 1548, 1449, 1381, 1338, 1315, 1329, 1366, 1428, 1512, 1617, 1730, 1853, 2058, 2203, 1998, 1856, 1734, 1624, 1539, 1467, 1424, 1409, 1409, 1448, 1505, 1584, 1689, 1796, 1923, 2148, 2342, 2110, 1959, 1848, 1740, 1635, 1572, 1533, 1519, 1527, 1561, 1610, 1693, 1786, 1900, 2039, 2306, ]
+ b: [2199, 1976, 1828, 1725, 1640, 1549, 1510, 1473, 1457, 1462, 1485, 1529, 1603, 1690, 1796, 1922, 2111, 2048, 1861, 1735, 1618, 1532, 1462, 1400, 1360, 1346, 1355, 1384, 1433, 1501, 1589, 1680, 1793, 1982, 1975, 1801, 1672, 1564, 1465, 1387, 1326, 1294, 1272, 1284, 1310, 1363, 1440, 1518, 1627, 1730, 1888, 1903, 1736, 1617, 1500, 1405, 1325, 1260, 1219, 1198, 1208, 1239, 1296, 1365, 1465, 1557, 1664, 1833, 1837, 1684, 1556, 1449, 1345, 1261, 1200, 1151, 1132, 1137, 1175, 1238, 1307, 1402, 1517, 1627, 1775, 1806, 1650, 1518, 1407, 1306, 1216, 1144, 1099, 1078, 1092, 1120, 1185, 1270, 1360, 1472, 1596, 1740, 1778, 1621, 1499, 1381, 1270, 1180, 1110, 1066, 1046, 1057, 1087, 1150, 1236, 1335, 1447, 1560, 1703, 1764, 1612, 1479, 1367, 1255, 1158, 1089, 1045, 1031, 1038, 1071, 1128, 1218, 1312, 1430, 1544, 1702, 1773, 1604, 1480, 1359, 1252, 1148, 1082, 1041, 1024, 1036, 1061, 1124, 1210, 1314, 1432, 1542, 1693, 1782, 1617, 1485, 1366, 1253, 1162, 1092, 1046, 1038, 1043, 1068, 1130, 1215, 1322, 1431, 1549, 1700, 1786, 1634, 1499, 1378, 1276, 1184, 1108, 1067, 1060, 1062, 1094, 1153, 1235, 1346, 1450, 1556, 1722, 1813, 1667, 1535, 1411, 1306, 1220, 1148, 1103, 1089, 1091, 1132, 1189, 1277, 1372, 1474, 1593, 1740, 1852, 1712, 1569, 1449, 1354, 1263, 1195, 1156, 1137, 1149, 1180, 1239, 1319, 1413, 1516, 1627, 1798, 1910, 1741, 1617, 1509, 1403, 1324, 1267, 1221, 1205, 1213, 1244, 1296, 1377, 1459, 1565, 1679, 1826, 1984, 1788, 1696, 1556, 1473, 1386, 1333, 1296, 1280, 1282, 1316, 1361, 1442, 1519, 1624, 1732, 1905, 2059, 1881, 1746, 1642, 1533, 1467, 1400, 1370, 1354, 1357, 1389, 1438, 1500, 1587, 1688, 1800, 1995, 2190, 1971, 1845, 1743, 1643, 1562, 1515, 1468, 1453, 1454, 1501, 1532, 1608, 1692, 1782, 1904, 2117, ]
+ #4208x3120_F11_TL84_70 - F11_TL84
+ - ct: 4000
+ resolution: 4208x3120
+ r: [1286, 1278, 1265, 1240, 1240, 1217, 1199, 1205, 1185, 1191, 1213, 1243, 1251, 1276, 1282, 1297, 1358, 1273, 1227, 1225, 1219, 1199, 1190, 1164, 1151, 1137, 1151, 1174, 1213, 1238, 1237, 1261, 1274, 1331, 1273, 1220, 1214, 1199, 1174, 1154, 1126, 1115, 1105, 1106, 1132, 1183, 1215, 1238, 1260, 1277, 1310, 1254, 1204, 1204, 1193, 1151, 1097, 1081, 1066, 1057, 1066, 1094, 1133, 1183, 1228, 1240, 1275, 1341, 1239, 1196, 1193, 1167, 1112, 1071, 1046, 1035, 1034, 1045, 1056, 1097, 1153, 1210, 1232, 1257, 1313, 1240, 1187, 1195, 1142, 1080, 1048, 1031, 1023, 1025, 1026, 1034, 1065, 1115, 1186, 1223, 1254, 1322, 1241, 1178, 1166, 1121, 1060, 1031, 1014, 1029, 1039, 1026, 1032, 1057, 1101, 1162, 1210, 1247, 1295, 1224, 1178, 1157, 1104, 1049, 1021, 1015, 1036, 1044, 1036, 1024, 1049, 1097, 1144, 1206, 1235, 1312, 1215, 1170, 1153, 1098, 1046, 1020, 1017, 1043, 1046, 1036, 1028, 1039, 1086, 1144, 1202, 1234, 1280, 1224, 1178, 1148, 1093, 1049, 1010, 1011, 1032, 1038, 1030, 1024, 1042, 1094, 1153, 1213, 1231, 1294, 1237, 1185, 1157, 1104, 1050, 1017, 1005, 1029, 1030, 1022, 1027, 1048, 1098, 1172, 1213, 1243, 1300, 1244, 1173, 1168, 1122, 1073, 1021, 1011, 1004, 1007, 1015, 1029, 1062, 1115, 1176, 1219, 1227, 1304, 1243, 1192, 1182, 1148, 1093, 1048, 1014, 1004, 1007, 1019, 1039, 1068, 1132, 1187, 1214, 1237, 1290, 1233, 1197, 1186, 1170, 1130, 1068, 1043, 1021, 1024, 1035, 1063, 1100, 1148, 1200, 1218, 1239, 1280, 1225, 1193, 1182, 1178, 1152, 1113, 1082, 1057, 1055, 1069, 1098, 1133, 1184, 1199, 1214, 1224, 1291, 1224, 1180, 1184, 1176, 1165, 1145, 1105, 1093, 1081, 1091, 1128, 1167, 1185, 1197, 1202, 1207, 1268, 1216, 1185, 1208, 1194, 1182, 1156, 1131, 1104, 1097, 1110, 1150, 1176, 1214, 1220, 1219, 1234, 1375, ]
+ gr: [1267, 1211, 1186, 1180, 1181, 1169, 1162, 1152, 1144, 1152, 1159, 1184, 1192, 1196, 1221, 1236, 1372, 1236, 1175, 1159, 1149, 1143, 1142, 1134, 1123, 1120, 1130, 1134, 1154, 1170, 1190, 1202, 1212, 1256, 1214, 1170, 1139, 1139, 1125, 1116, 1120, 1100, 1097, 1106, 1111, 1131, 1160, 1173, 1191, 1203, 1266, 1206, 1150, 1137, 1128, 1111, 1095, 1087, 1073, 1069, 1077, 1097, 1116, 1137, 1160, 1182, 1204, 1252, 1187, 1142, 1137, 1122, 1098, 1068, 1065, 1046, 1052, 1054, 1069, 1093, 1121, 1147, 1174, 1200, 1253, 1176, 1136, 1125, 1111, 1080, 1061, 1044, 1042, 1032, 1041, 1055, 1072, 1106, 1139, 1157, 1186, 1246, 1182, 1120, 1109, 1092, 1067, 1042, 1037, 1033, 1028, 1031, 1043, 1058, 1094, 1130, 1156, 1179, 1240, 1162, 1120, 1110, 1088, 1054, 1032, 1030, 1027, 1027, 1025, 1035, 1050, 1091, 1121, 1149, 1186, 1226, 1152, 1122, 1108, 1092, 1054, 1031, 1024, 1026, 1029, 1021, 1037, 1055, 1085, 1113, 1144, 1178, 1217, 1168, 1113, 1102, 1084, 1053, 1032, 1025, 1024, 1027, 1027, 1032, 1048, 1083, 1123, 1142, 1168, 1226, 1163, 1116, 1111, 1086, 1060, 1033, 1023, 1023, 1025, 1028, 1035, 1062, 1090, 1124, 1140, 1164, 1216, 1179, 1124, 1107, 1100, 1072, 1043, 1024, 1024, 1020, 1029, 1044, 1067, 1106, 1128, 1143, 1163, 1219, 1179, 1127, 1117, 1105, 1086, 1053, 1034, 1029, 1029, 1034, 1054, 1076, 1102, 1125, 1157, 1179, 1231, 1165, 1137, 1120, 1112, 1100, 1069, 1051, 1038, 1038, 1052, 1068, 1097, 1109, 1132, 1146, 1166, 1233, 1187, 1128, 1122, 1111, 1107, 1083, 1073, 1057, 1060, 1076, 1083, 1105, 1114, 1134, 1139, 1170, 1243, 1174, 1126, 1115, 1111, 1097, 1093, 1072, 1073, 1067, 1077, 1095, 1104, 1120, 1139, 1135, 1169, 1256, 1232, 1141, 1148, 1125, 1122, 1123, 1104, 1096, 1093, 1094, 1117, 1137, 1146, 1153, 1158, 1160, 1389, ]
+ gb: [1264, 1211, 1190, 1175, 1162, 1153, 1144, 1142, 1132, 1132, 1149, 1168, 1193, 1211, 1221, 1230, 1377, 1240, 1176, 1162, 1152, 1140, 1139, 1131, 1120, 1120, 1122, 1142, 1155, 1163, 1191, 1203, 1210, 1274, 1240, 1171, 1153, 1142, 1131, 1118, 1104, 1091, 1099, 1099, 1111, 1133, 1156, 1172, 1192, 1213, 1273, 1222, 1157, 1140, 1134, 1117, 1092, 1075, 1069, 1067, 1080, 1091, 1115, 1136, 1167, 1180, 1211, 1272, 1226, 1153, 1134, 1124, 1102, 1079, 1063, 1048, 1050, 1055, 1072, 1097, 1123, 1158, 1180, 1201, 1273, 1199, 1142, 1131, 1117, 1088, 1059, 1042, 1035, 1034, 1037, 1057, 1078, 1116, 1145, 1161, 1193, 1256, 1211, 1141, 1116, 1106, 1074, 1049, 1035, 1031, 1033, 1033, 1045, 1073, 1104, 1136, 1153, 1188, 1250, 1196, 1128, 1114, 1100, 1060, 1039, 1030, 1034, 1032, 1030, 1030, 1057, 1094, 1125, 1155, 1169, 1257, 1204, 1126, 1114, 1100, 1063, 1037, 1022, 1024, 1032, 1034, 1036, 1060, 1094, 1125, 1148, 1172, 1242, 1188, 1123, 1116, 1093, 1060, 1035, 1025, 1024, 1027, 1027, 1034, 1057, 1090, 1134, 1146, 1172, 1239, 1192, 1122, 1119, 1095, 1069, 1040, 1021, 1026, 1016, 1030, 1038, 1065, 1094, 1136, 1148, 1173, 1244, 1202, 1132, 1117, 1104, 1068, 1043, 1034, 1020, 1019, 1025, 1042, 1072, 1102, 1136, 1152, 1167, 1237, 1191, 1136, 1120, 1108, 1087, 1053, 1034, 1025, 1020, 1032, 1050, 1073, 1110, 1130, 1148, 1182, 1238, 1201, 1133, 1117, 1120, 1100, 1071, 1049, 1038, 1032, 1048, 1064, 1090, 1117, 1134, 1152, 1170, 1237, 1188, 1128, 1128, 1115, 1106, 1090, 1067, 1058, 1058, 1066, 1082, 1107, 1115, 1135, 1148, 1171, 1250, 1187, 1138, 1126, 1119, 1108, 1095, 1078, 1075, 1066, 1079, 1090, 1099, 1121, 1143, 1149, 1165, 1237, 1229, 1158, 1157, 1139, 1119, 1118, 1101, 1078, 1084, 1091, 1103, 1125, 1130, 1149, 1173, 1184, 1398, ]
+ b: [1291, 1208, 1168, 1145, 1132, 1140, 1122, 1134, 1138, 1129, 1131, 1140, 1161, 1197, 1196, 1179, 1329, 1235, 1176, 1150, 1125, 1118, 1113, 1115, 1113, 1108, 1113, 1115, 1131, 1136, 1149, 1181, 1176, 1255, 1237, 1147, 1129, 1116, 1119, 1106, 1104, 1091, 1086, 1099, 1104, 1119, 1137, 1134, 1164, 1179, 1231, 1204, 1137, 1111, 1113, 1103, 1096, 1079, 1070, 1070, 1074, 1090, 1104, 1120, 1126, 1149, 1183, 1234, 1208, 1123, 1112, 1118, 1097, 1075, 1066, 1055, 1051, 1059, 1066, 1090, 1114, 1127, 1135, 1157, 1226, 1197, 1110, 1109, 1095, 1083, 1055, 1047, 1044, 1040, 1044, 1051, 1063, 1095, 1112, 1132, 1148, 1232, 1198, 1107, 1098, 1081, 1063, 1051, 1043, 1036, 1033, 1033, 1043, 1061, 1082, 1109, 1116, 1144, 1209, 1161, 1095, 1096, 1091, 1054, 1042, 1039, 1035, 1035, 1022, 1042, 1053, 1080, 1107, 1122, 1132, 1216, 1169, 1097, 1094, 1081, 1048, 1041, 1024, 1034, 1034, 1031, 1034, 1058, 1074, 1105, 1124, 1124, 1218, 1188, 1095, 1092, 1079, 1054, 1042, 1032, 1035, 1022, 1025, 1035, 1053, 1080, 1107, 1118, 1132, 1228, 1181, 1093, 1094, 1077, 1059, 1043, 1030, 1030, 1023, 1033, 1036, 1058, 1090, 1109, 1111, 1135, 1209, 1191, 1105, 1096, 1087, 1060, 1044, 1034, 1034, 1020, 1034, 1037, 1063, 1087, 1112, 1123, 1138, 1226, 1203, 1118, 1090, 1097, 1081, 1052, 1041, 1027, 1030, 1034, 1048, 1067, 1093, 1110, 1121, 1142, 1220, 1210, 1127, 1102, 1091, 1087, 1061, 1052, 1024, 1044, 1041, 1056, 1076, 1091, 1113, 1125, 1152, 1216, 1194, 1107, 1106, 1077, 1085, 1074, 1060, 1048, 1041, 1048, 1060, 1082, 1085, 1085, 1125, 1132, 1218, 1190, 1112, 1074, 1071, 1066, 1067, 1050, 1045, 1045, 1045, 1061, 1075, 1070, 1088, 1106, 1128, 1222, 1234, 1145, 1131, 1120, 1099, 1095, 1079, 1078, 1073, 1078, 1083, 1086, 1108, 1125, 1141, 1156, 1386, ]
+ #4208x3120_F2_CWF_70 - F2_CWF
+ - ct: 4230
+ resolution: 4208x3120
+ r: [1140, 1119, 1106, 1105, 1086, 1079, 1072, 1070, 1070, 1079, 1084, 1102, 1114, 1131, 1157, 1152, 1232, 1131, 1103, 1088, 1084, 1071, 1074, 1077, 1066, 1064, 1063, 1080, 1094, 1101, 1112, 1113, 1134, 1194, 1143, 1073, 1077, 1078, 1069, 1067, 1058, 1060, 1046, 1048, 1067, 1085, 1095, 1101, 1127, 1144, 1169, 1132, 1072, 1074, 1078, 1055, 1045, 1037, 1033, 1039, 1036, 1045, 1068, 1085, 1098, 1122, 1115, 1183, 1106, 1064, 1069, 1068, 1049, 1026, 1030, 1019, 1025, 1026, 1038, 1051, 1070, 1100, 1102, 1120, 1174, 1103, 1043, 1052, 1055, 1024, 1023, 1017, 1019, 1025, 1024, 1032, 1037, 1063, 1085, 1094, 1110, 1195, 1095, 1047, 1062, 1041, 1025, 1017, 1011, 1031, 1027, 1023, 1023, 1030, 1050, 1071, 1084, 1110, 1190, 1073, 1034, 1056, 1042, 1015, 1010, 1016, 1032, 1027, 1024, 1024, 1036, 1039, 1074, 1087, 1109, 1168, 1079, 1042, 1055, 1032, 1019, 1007, 1013, 1026, 1027, 1026, 1021, 1032, 1044, 1082, 1093, 1098, 1158, 1091, 1046, 1053, 1028, 1020, 1007, 1011, 1026, 1022, 1019, 1021, 1020, 1045, 1071, 1084, 1096, 1159, 1114, 1047, 1047, 1030, 1017, 997, 1008, 1016, 1019, 1021, 1016, 1028, 1053, 1080, 1094, 1103, 1157, 1088, 1049, 1052, 1040, 1024, 1003, 1001, 1004, 1010, 1006, 1019, 1037, 1057, 1085, 1084, 1099, 1161, 1106, 1057, 1063, 1056, 1032, 1010, 993, 998, 999, 1006, 1016, 1031, 1052, 1071, 1089, 1106, 1174, 1112, 1055, 1054, 1062, 1043, 1022, 1002, 1004, 1008, 1007, 1015, 1045, 1064, 1085, 1087, 1097, 1157, 1102, 1059, 1064, 1059, 1054, 1035, 1018, 1002, 1005, 1012, 1035, 1052, 1057, 1068, 1071, 1098, 1156, 1098, 1045, 1044, 1042, 1046, 1041, 1024, 1009, 1004, 1017, 1035, 1062, 1062, 1064, 1064, 1088, 1140, 1088, 1043, 1070, 1066, 1041, 1047, 1026, 1014, 1009, 1022, 1032, 1060, 1073, 1077, 1087, 1107, 1237, ]
+ gr: [1219, 1156, 1145, 1130, 1128, 1112, 1116, 1104, 1112, 1106, 1118, 1128, 1154, 1165, 1161, 1170, 1306, 1183, 1124, 1113, 1099, 1100, 1099, 1091, 1084, 1095, 1090, 1099, 1116, 1126, 1140, 1142, 1158, 1213, 1174, 1112, 1103, 1094, 1084, 1087, 1090, 1075, 1075, 1077, 1088, 1101, 1119, 1133, 1149, 1162, 1193, 1149, 1106, 1091, 1086, 1076, 1071, 1066, 1057, 1064, 1064, 1074, 1082, 1109, 1117, 1140, 1151, 1204, 1155, 1094, 1089, 1088, 1075, 1059, 1052, 1046, 1043, 1048, 1061, 1074, 1101, 1113, 1123, 1154, 1198, 1137, 1093, 1082, 1078, 1059, 1048, 1041, 1033, 1030, 1038, 1048, 1059, 1078, 1109, 1116, 1143, 1198, 1119, 1082, 1074, 1071, 1051, 1040, 1036, 1032, 1031, 1031, 1042, 1047, 1077, 1097, 1112, 1133, 1185, 1126, 1082, 1077, 1058, 1039, 1029, 1025, 1024, 1024, 1022, 1033, 1044, 1068, 1095, 1099, 1131, 1187, 1123, 1078, 1071, 1060, 1043, 1028, 1025, 1027, 1027, 1021, 1033, 1045, 1066, 1087, 1105, 1121, 1173, 1121, 1070, 1067, 1058, 1039, 1024, 1020, 1024, 1024, 1022, 1030, 1043, 1064, 1093, 1099, 1121, 1182, 1112, 1076, 1072, 1065, 1044, 1029, 1021, 1023, 1021, 1026, 1032, 1047, 1066, 1091, 1105, 1131, 1180, 1132, 1076, 1066, 1067, 1052, 1031, 1021, 1021, 1020, 1028, 1039, 1044, 1076, 1098, 1107, 1127, 1179, 1124, 1087, 1076, 1076, 1064, 1036, 1018, 1018, 1020, 1028, 1041, 1056, 1085, 1086, 1106, 1128, 1187, 1126, 1099, 1082, 1072, 1065, 1043, 1031, 1024, 1029, 1034, 1052, 1065, 1074, 1094, 1111, 1127, 1181, 1128, 1086, 1076, 1073, 1072, 1058, 1050, 1046, 1039, 1048, 1059, 1074, 1070, 1096, 1112, 1124, 1174, 1140, 1078, 1077, 1067, 1057, 1055, 1043, 1040, 1042, 1042, 1054, 1069, 1075, 1088, 1099, 1112, 1189, 1182, 1099, 1096, 1093, 1082, 1080, 1072, 1055, 1059, 1061, 1076, 1095, 1090, 1112, 1113, 1140, 1321, ]
+ gb: [1236, 1163, 1136, 1120, 1113, 1111, 1109, 1101, 1104, 1099, 1102, 1140, 1141, 1158, 1170, 1194, 1332, 1195, 1138, 1114, 1109, 1097, 1098, 1092, 1089, 1085, 1089, 1098, 1117, 1125, 1141, 1155, 1156, 1232, 1186, 1125, 1108, 1095, 1099, 1081, 1078, 1075, 1073, 1073, 1083, 1097, 1118, 1128, 1148, 1166, 1218, 1171, 1107, 1099, 1091, 1086, 1069, 1059, 1051, 1049, 1064, 1071, 1088, 1110, 1118, 1137, 1162, 1225, 1171, 1099, 1092, 1085, 1069, 1057, 1051, 1041, 1036, 1050, 1055, 1077, 1092, 1118, 1133, 1151, 1227, 1158, 1099, 1090, 1086, 1061, 1043, 1039, 1028, 1036, 1039, 1048, 1060, 1091, 1110, 1117, 1147, 1216, 1152, 1086, 1082, 1073, 1054, 1040, 1026, 1028, 1029, 1032, 1040, 1051, 1076, 1104, 1115, 1139, 1222, 1141, 1088, 1078, 1073, 1048, 1034, 1026, 1025, 1025, 1022, 1033, 1051, 1077, 1104, 1115, 1129, 1202, 1154, 1081, 1080, 1069, 1050, 1029, 1023, 1022, 1029, 1027, 1031, 1050, 1070, 1098, 1107, 1127, 1188, 1146, 1090, 1078, 1065, 1044, 1029, 1015, 1022, 1024, 1025, 1035, 1053, 1071, 1104, 1102, 1136, 1207, 1152, 1083, 1078, 1073, 1042, 1027, 1024, 1024, 1016, 1024, 1037, 1056, 1076, 1106, 1111, 1130, 1197, 1146, 1086, 1076, 1074, 1046, 1031, 1023, 1018, 1021, 1026, 1043, 1051, 1081, 1102, 1111, 1126, 1191, 1134, 1090, 1084, 1079, 1067, 1038, 1019, 1018, 1021, 1033, 1041, 1055, 1081, 1099, 1107, 1131, 1199, 1147, 1091, 1082, 1083, 1072, 1050, 1031, 1024, 1027, 1032, 1053, 1063, 1082, 1099, 1107, 1130, 1191, 1139, 1087, 1078, 1077, 1073, 1058, 1048, 1037, 1037, 1046, 1062, 1073, 1079, 1099, 1099, 1130, 1177, 1147, 1082, 1087, 1074, 1061, 1062, 1052, 1042, 1036, 1045, 1063, 1068, 1079, 1094, 1103, 1120, 1189, 1176, 1105, 1102, 1092, 1081, 1073, 1064, 1053, 1053, 1066, 1067, 1084, 1087, 1103, 1134, 1146, 1336, ]
+ b: [1203, 1195, 1154, 1123, 1104, 1106, 1116, 1099, 1099, 1099, 1102, 1106, 1123, 1155, 1149, 1168, 1283, 1196, 1141, 1119, 1102, 1098, 1088, 1088, 1095, 1086, 1095, 1097, 1101, 1117, 1121, 1156, 1135, 1209, 1211, 1127, 1102, 1082, 1089, 1088, 1072, 1075, 1083, 1083, 1085, 1106, 1107, 1120, 1142, 1149, 1224, 1163, 1121, 1087, 1078, 1085, 1077, 1062, 1065, 1056, 1057, 1082, 1093, 1094, 1096, 1111, 1147, 1193, 1179, 1105, 1083, 1088, 1070, 1074, 1060, 1048, 1055, 1044, 1068, 1082, 1091, 1097, 1102, 1141, 1209, 1178, 1091, 1076, 1077, 1063, 1060, 1043, 1043, 1035, 1046, 1059, 1064, 1084, 1103, 1107, 1125, 1196, 1156, 1088, 1068, 1070, 1057, 1043, 1046, 1041, 1038, 1038, 1046, 1059, 1073, 1083, 1086, 1111, 1178, 1146, 1067, 1083, 1068, 1044, 1042, 1033, 1044, 1033, 1026, 1037, 1045, 1067, 1089, 1092, 1108, 1203, 1148, 1082, 1072, 1066, 1050, 1044, 1035, 1035, 1031, 1028, 1035, 1055, 1069, 1082, 1094, 1101, 1188, 1163, 1067, 1074, 1056, 1040, 1034, 1037, 1026, 1022, 1033, 1037, 1049, 1067, 1084, 1092, 1103, 1185, 1156, 1074, 1073, 1066, 1042, 1036, 1028, 1031, 1030, 1034, 1042, 1051, 1073, 1091, 1090, 1102, 1196, 1172, 1086, 1071, 1077, 1055, 1041, 1036, 1025, 1024, 1028, 1032, 1053, 1076, 1094, 1089, 1101, 1178, 1179, 1095, 1079, 1075, 1070, 1043, 1026, 1022, 1022, 1029, 1045, 1054, 1078, 1075, 1092, 1120, 1179, 1193, 1091, 1074, 1061, 1064, 1056, 1043, 1034, 1026, 1027, 1039, 1060, 1081, 1070, 1078, 1115, 1205, 1172, 1096, 1069, 1060, 1071, 1055, 1044, 1035, 1027, 1043, 1048, 1063, 1054, 1065, 1083, 1122, 1186, 1158, 1088, 1060, 1043, 1037, 1037, 1031, 1033, 1025, 1029, 1035, 1041, 1041, 1060, 1084, 1114, 1202, 1217, 1122, 1101, 1079, 1058, 1061, 1049, 1056, 1051, 1036, 1062, 1061, 1076, 1094, 1116, 1139, 1331, ]
+
diff --git a/src/ipa/rkisp1/data/meson.build b/src/ipa/rkisp1/data/meson.build
new file mode 100644
index 00000000..1e3522b2
--- /dev/null
+++ b/src/ipa/rkisp1/data/meson.build
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: CC0-1.0
+
+conf_files = files([
+ 'imx219.yaml',
+ 'imx258.yaml',
+ 'ov2685.yaml',
+ 'ov4689.yaml',
+ 'ov5640.yaml',
+ 'ov5695.yaml',
+ 'ov8858.yaml',
+ 'uncalibrated.yaml',
+])
+
+install_data(conf_files,
+ install_dir : ipa_data_dir / 'rkisp1',
+ install_tag : 'runtime')
diff --git a/src/ipa/rkisp1/data/ov2685.yaml b/src/ipa/rkisp1/data/ov2685.yaml
new file mode 100644
index 00000000..fdfc98d3
--- /dev/null
+++ b/src/ipa/rkisp1/data/ov2685.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Agc:
+ - Awb:
+ - LensShadingCorrection:
+ x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ sets:
+ #800x600_A_70 - A
+ - ct: 2856
+ resolution: 800x600
+ r: [2451, 2258, 2111, 2039, 1982, 1925, 1860, 1818, 1802, 1815, 1859, 1936, 1997, 2056, 2129, 2298, 2486, 2351, 2157, 2066, 1991, 1912, 1809, 1720, 1677, 1653, 1671, 1739, 1843, 1932, 2009, 2071, 2182, 2392, 2253, 2105, 2018, 1929, 1802, 1670, 1566, 1503, 1475, 1508, 1590, 1705, 1848, 1947, 2026, 2118, 2281, 2174, 2065, 1975, 1854, 1687, 1529, 1412, 1345, 1327, 1358, 1445, 1572, 1733, 1870, 1992, 2075, 2202, 2125, 2033, 1929, 1765, 1574, 1407, 1286, 1220, 1204, 1237, 1318, 1447, 1632, 1801, 1951, 2048, 2142, 2092, 2010, 1877, 1688, 1471, 1304, 1187, 1127, 1118, 1149, 1221, 1348, 1533, 1738, 1918, 2021, 2105, 2088, 1982, 1836, 1628, 1398, 1239, 1128, 1073, 1060, 1086, 1163, 1280, 1466, 1688, 1886, 2001, 2092, 2067, 1965, 1809, 1584, 1358, 1200, 1094, 1044, 1030, 1056, 1123, 1240, 1424, 1649, 1860, 1989, 2082, 2057, 1960, 1795, 1569, 1345, 1187, 1083, 1034, 1024, 1046, 1111, 1229, 1408, 1637, 1850, 1989, 2085, 2053, 1967, 1802, 1578, 1358, 1199, 1095, 1046, 1031, 1058, 1122, 1245, 1423, 1651, 1867, 1989, 2084, 2059, 1970, 1823, 1615, 1399, 1235, 1129, 1074, 1061, 1090, 1161, 1281, 1461, 1689, 1878, 2006, 2096, 2086, 1989, 1866, 1670, 1471, 1302, 1188, 1134, 1117, 1150, 1223, 1352, 1537, 1745, 1909, 2028, 2114, 2101, 2006, 1916, 1749, 1567, 1399, 1278, 1218, 1206, 1237, 1317, 1456, 1633, 1813, 1954, 2053, 2142, 2171, 2023, 1954, 1843, 1680, 1526, 1403, 1339, 1323, 1357, 1440, 1575, 1733, 1885, 1996, 2069, 2212, 2231, 2074, 1990, 1916, 1792, 1656, 1554, 1489, 1473, 1513, 1588, 1702, 1840, 1946, 2011, 2124, 2283, 2343, 2146, 2036, 1973, 1890, 1789, 1700, 1653, 1645, 1678, 1733, 1828, 1922, 1978, 2065, 2181, 2405, 2420, 2246, 2092, 2015, 1954, 1885, 1816, 1776, 1777, 1791, 1847, 1904, 1941, 2016, 2105, 2284, 2463, ]
+ gr: [1790, 1645, 1522, 1469, 1433, 1419, 1390, 1381, 1374, 1381, 1401, 1428, 1460, 1494, 1552, 1693, 1839, 1687, 1555, 1471, 1433, 1408, 1362, 1335, 1319, 1308, 1318, 1344, 1393, 1430, 1456, 1497, 1591, 1752, 1612, 1503, 1447, 1417, 1365, 1315, 1276, 1248, 1237, 1252, 1290, 1339, 1404, 1435, 1469, 1539, 1661, 1547, 1470, 1424, 1389, 1321, 1260, 1205, 1173, 1165, 1181, 1221, 1286, 1358, 1409, 1452, 1503, 1603, 1504, 1451, 1411, 1358, 1276, 1198, 1148, 1114, 1110, 1124, 1164, 1228, 1320, 1388, 1435, 1479, 1552, 1475, 1437, 1392, 1325, 1231, 1153, 1094, 1069, 1068, 1084, 1119, 1182, 1278, 1365, 1429, 1469, 1529, 1464, 1430, 1375, 1301, 1196, 1118, 1067, 1043, 1039, 1051, 1089, 1150, 1245, 1342, 1417, 1453, 1512, 1461, 1418, 1369, 1281, 1177, 1099, 1051, 1028, 1029, 1037, 1069, 1129, 1224, 1328, 1404, 1449, 1503, 1455, 1422, 1366, 1276, 1170, 1094, 1046, 1026, 1024, 1033, 1063, 1125, 1216, 1322, 1400, 1448, 1508, 1459, 1426, 1368, 1280, 1179, 1102, 1051, 1030, 1029, 1039, 1071, 1132, 1222, 1327, 1406, 1448, 1502, 1473, 1433, 1380, 1302, 1201, 1125, 1069, 1046, 1043, 1055, 1091, 1153, 1245, 1343, 1412, 1461, 1523, 1488, 1445, 1397, 1328, 1242, 1157, 1104, 1079, 1073, 1088, 1127, 1193, 1284, 1373, 1424, 1473, 1543, 1521, 1461, 1424, 1361, 1289, 1210, 1152, 1124, 1118, 1134, 1174, 1242, 1330, 1396, 1439, 1494, 1572, 1573, 1475, 1434, 1397, 1336, 1270, 1213, 1182, 1176, 1194, 1239, 1301, 1366, 1420, 1464, 1510, 1624, 1628, 1510, 1449, 1424, 1378, 1326, 1281, 1252, 1243, 1264, 1304, 1352, 1406, 1443, 1456, 1554, 1692, 1727, 1578, 1482, 1448, 1415, 1374, 1337, 1318, 1317, 1338, 1356, 1398, 1429, 1443, 1501, 1603, 1783, 1776, 1643, 1510, 1448, 1415, 1387, 1353, 1344, 1343, 1348, 1368, 1396, 1407, 1442, 1515, 1674, 1832, ]
+ gb: [1805, 1650, 1529, 1468, 1430, 1412, 1378, 1371, 1363, 1371, 1393, 1430, 1465, 1501, 1567, 1713, 1864, 1700, 1564, 1476, 1434, 1404, 1359, 1323, 1306, 1294, 1306, 1338, 1388, 1432, 1462, 1509, 1605, 1780, 1627, 1520, 1457, 1423, 1370, 1311, 1267, 1238, 1226, 1245, 1286, 1344, 1414, 1448, 1489, 1563, 1697, 1568, 1487, 1436, 1398, 1325, 1257, 1200, 1163, 1156, 1175, 1221, 1291, 1372, 1427, 1476, 1528, 1636, 1527, 1474, 1431, 1371, 1285, 1201, 1144, 1109, 1104, 1121, 1165, 1239, 1335, 1411, 1461, 1509, 1588, 1498, 1463, 1413, 1343, 1242, 1159, 1094, 1066, 1064, 1083, 1124, 1195, 1299, 1391, 1455, 1499, 1561, 1492, 1454, 1401, 1319, 1209, 1124, 1068, 1042, 1039, 1053, 1096, 1164, 1268, 1370, 1446, 1486, 1547, 1486, 1446, 1392, 1302, 1190, 1108, 1053, 1028, 1029, 1040, 1078, 1146, 1245, 1355, 1437, 1600, 1546, 1600, 1449, 1389, 1294, 1184, 1101, 1047, 1024, 1024, 1035, 1073, 1136, 1240, 1348, 1431, 1483, 1537, 1485, 1450, 1390, 1298, 1188, 1109, 1051, 1030, 1026, 1038, 1077, 1143, 1243, 1354, 1436, 1482, 1547, 1494, 1454, 1400, 1317, 1211, 1125, 1067, 1041, 1038, 1053, 1094, 1165, 1264, 1368, 1440, 1489, 1557, 1513, 1464, 1414, 1340, 1245, 1156, 1097, 1071, 1063, 1081, 1126, 1197, 1298, 1394, 1446, 1502, 1573, 1541, 1477, 1438, 1370, 1292, 1204, 1142, 1111, 1106, 1121, 1169, 1245, 1338, 1411, 1462, 1519, 1599, 1590, 1485, 1447, 1403, 1334, 1263, 1199, 1164, 1158, 1179, 1230, 1299, 1373, 1433, 1477, 1528, 1649, 1643, 1520, 1454, 1426, 1375, 1315, 1266, 1235, 1224, 1247, 1291, 1345, 1408, 1449, 1468, 1572, 1711, 1738, 1579, 1482, 1443, 1406, 1359, 1318, 1294, 1294, 1312, 1338, 1385, 1427, 1441, 1507, 1614, 1799, 1786, 1653, 1516, 1452, 1414, 1383, 1348, 1331, 1328, 1336, 1362, 1391, 1408, 1448, 1529, 1684, 1858, ]
+ b: [1807, 1633, 1496, 1427, 1395, 1372, 1357, 1340, 1339, 1335, 1356, 1382, 1410, 1454, 1541, 1690, 1860, 1657, 1503, 1411, 1364, 1342, 1312, 1286, 1274, 1262, 1270, 1287, 1326, 1355, 1387, 1447, 1550, 1726, 1556, 1438, 1374, 1340, 1305, 1267, 1236, 1213, 1199, 1211, 1246, 1280, 1324, 1355, 1397, 1475, 1620, 1473, 1407, 1350, 1317, 1270, 1223, 1173, 1144, 1135, 1151, 1185, 1237, 1292, 1326, 1368, 1422, 1544, 1430, 1375, 1331, 1293, 1238, 1166, 1120, 1096, 1091, 1104, 1133, 1188, 1261, 1310, 1351, 1388, 1487, 1383, 1362, 1316, 1269, 1194, 1128, 1076, 1054, 1057, 1070, 1101, 1146, 1229, 1294, 1329, 1368, 1459, 1368, 1347, 1301, 1250, 1162, 1099, 1057, 1039, 1035, 1041, 1076, 1119, 1199, 1271, 1321, 1349, 1440, 1360, 1338, 1299, 1234, 1145, 1086, 1042, 1029, 1026, 1034, 1059, 1104, 1176, 1260, 1307, 1344, 1439, 1347, 1342, 1293, 1226, 1139, 1077, 1040, 1024, 1025, 1030, 1051, 1099, 1170, 1249, 1301, 1335, 1432, 1346, 1342, 1295, 1227, 1145, 1083, 1040, 1025, 1024, 1031, 1059, 1096, 1170, 1247, 1297, 1338, 1436, 1362, 1344, 1299, 1245, 1161, 1095, 1055, 1034, 1031, 1041, 1069, 1115, 1185, 1252, 1299, 1347, 1453, 1378, 1353, 1311, 1261, 1191, 1117, 1077, 1058, 1045, 1063, 1092, 1141, 1210, 1274, 1302, 1358, 1461, 1405, 1364, 1329, 1281, 1229, 1159, 1106, 1084, 1080, 1093, 1124, 1180, 1244, 1285, 1317, 1380, 1496, 1467, 1379, 1343, 1304, 1260, 1208, 1154, 1127, 1117, 1138, 1172, 1225, 1266, 1297, 1340, 1397, 1556, 1532, 1428, 1354, 1325, 1290, 1248, 1211, 1181, 1178, 1197, 1227, 1261, 1293, 1321, 1342, 1450, 1624, 1634, 1502, 1394, 1347, 1316, 1283, 1251, 1239, 1241, 1254, 1266, 1297, 1312, 1328, 1396, 1509, 1739, 1685, 1572, 1426, 1351, 1313, 1285, 1257, 1254, 1249, 1259, 1266, 1287, 1292, 1336, 1429, 1593, 1816, ]
+ #800x600_D65_70 - D65
+ - ct: 6504
+ resolution: 800x600
+ r: [2310, 2164, 1991, 1936, 1850, 1817, 1755, 1703, 1707, 1707, 1757, 1836, 1862, 1962, 2029, 2221, 2360, 2246, 2047, 1960, 1865, 1809, 1707, 1633, 1600, 1571, 1595, 1646, 1733, 1829, 1886, 1973, 2107, 2297, 2150, 1988, 1897, 1818, 1703, 1592, 1504, 1453, 1424, 1452, 1527, 1625, 1753, 1828, 1929, 2014, 2213, 2056, 1960, 1846, 1757, 1608, 1475, 1376, 1315, 1297, 1330, 1399, 1512, 1645, 1782, 1879, 1981, 2117, 2007, 1925, 1817, 1678, 1513, 1371, 1268, 1205, 1188, 1221, 800, 1406, 1563, 1712, 1840, 1954, 2039, 1988, 1883, 1780, 1612, 1425, 1282, 1180, 1125, 1111, 1140, 1208, 1324, 1484, 1660, 1821, 1914, 2015, 1973, 1864, 1740, 1553, 1366, 1220, 1124, 1069, 1057, 1083, 1154, 1264, 1423, 1615, 1794, 1891, 2000, 1955, 1842, 1717, 1524, 1332, 1187, 1094, 1042, 1028, 1053, 1117, 1229, 1387, 1582, 1767, 1877, 1991, 1942, 1849, 1704, 1509, 1320, 1177, 1081, 1031, 1024, 1042, 1108, 1216, 1376, 1569, 1767, 1877, 1998, 1946, 1853, 1710, 1515, 1335, 1186, 1092, 1041, 1030, 1055, 1118, 1233, 1390, 1584, 1773, 1885, 1985, 1958, 1852, 1737, 1550, 1370, 1224, 1125, 1073, 1058, 1089, 1155, 1265, 1419, 1614, 1788, 1894, 2007, 1973, 1875, 1768, 1604, 1426, 1282, 1181, 1128, 1112, 1145, 1214, 1330, 1491, 1667, 1810, 1926, 2015, 1995, 1902, 1815, 1667, 1513, 1371, 1262, 1207, 1194, 1224, 1299, 1418, 1569, 1723, 1848, 1961, 2038, 2051, 1925, 1837, 1758, 1606, 1473, 1373, 1313, 1302, 1335, 1405, 1521, 1650, 1793, 1893, 1977, 2116, 2136, 1971, 1882, 1815, 1703, 1587, 1492, 1445, 1432, 1461, 1529, 1624, 1754, 1841, 1907, 2032, 2215, 2244, 2038, 1200, 1860, 1800, 1696, 1625, 1583, 1577, 1610, 1653, 1734, 1822, 1865, 1980, 2109, 2298, 2286, 2159, 1971, 1909, 1828, 1794, 1703, 1686, 1686, 1689, 1740, 1810, 1830, 1925, 1999, 2201, 2357, ]
+ gr: [1785, 1800, 1516, 1458, 1422, 1403, 1374, 1363, 1359, 1363, 1385, 1417, 1447, 1486, 1547, 1693, 1834, 1675, 1547, 1462, 1418, 1393, 1346, 1319, 1304, 1289, 1302, 1330, 1382, 1417, 1451, 1492, 1592, 1743, 1607, 1498, 1437, 1404, 1353, 1301, 1264, 1238, 1226, 1240, 1281, 1325, 1398, 1426, 1468, 1541, 1668, 1547, 1466, 1413, 1382, 1311, 1251, 1202, 1168, 1161, 1176, 1218, 1275, 1351, 1408, 1449, 1498, 1606, 1499, 1447, 1404, 1349, 1269, 1199, 1147, 1113, 1106, 1123, 1163, 1225, 1313, 1384, 1435, 1485, 1551, 1467, 1437, 1388, 1318, 1228, 1154, 1099, 1070, 1066, 1081, 1120, 1185, 1278, 1362, 1430, 1468, 1530, 1460, 1422, 1370, 1293, 1199, 1121, 1068, 1044, 1035, 1052, 1090, 1155, 1244, 1344, 1420, 1457, 1507, 1460, 1416, 1363, 1278, 1179, 1105, 1054, 1028, 1028, 1036, 1073, 1134, 1230, 1323, 1413, 1452, 1509, 1454, 1421, 1361, 1272, 1174, 1097, 1046, 1025, 1024, 1033, 1068, 1130, 1222, 1320, 1408, 1450, 1503, 1456, 1423, 1366, 1275, 1184, 1105, 1053, 1030, 1027, 1040, 1073, 1136, 1228, 1324, 1411, 1457, 1508, 1472, 1429, 1376, 1294, 1205, 1126, 1072, 1046, 1044, 1058, 1095, 1159, 1246, 1345, 1419, 1464, 1530, 1481, 1443, 1396, 1322, 1239, 1161, 1104, 1078, 1070, 1088, 1128, 1196, 1283, 1371, 1428, 1600, 1551, 1521, 1457, 1421, 1355, 1282, 1209, 1152, 1125, 1116, 1134, 1176, 1243, 1324, 1398, 1446, 1497, 1581, 1571, 1471, 1430, 1392, 1328, 1262, 1210, 1179, 1172, 1191, 1236, 1295, 1363, 1424, 1465, 1511, 1636, 1636, 1509, 1448, 1415, 1368, 1316, 1271, 1243, 1234, 1258, 800, 1340, 1407, 1439, 1459, 1561, 1699, 1720, 1577, 1479, 1444, 1408, 1362, 1325, 1304, 1305, 1325, 1348, 1394, 1426, 1439, 1503, 1609, 1788, 1770, 1642, 1502, 1444, 1400, 1384, 1338, 1334, 1329, 1339, 1357, 1389, 1396, 1443, 1514, 1670, 1822, ]
+ gb: [1791, 1649, 1516, 1459, 1422, 1404, 1373, 1360, 1353, 1358, 1386, 1424, 1451, 1492, 1563, 1710, 1854, 1687, 1553, 1463, 1420, 1393, 1347, 1313, 800, 1284, 1295, 1324, 1376, 1417, 1455, 1493, 1609, 1768, 1617, 1511, 1444, 1409, 1359, 1299, 1260, 1234, 1219, 1237, 1276, 1328, 1403, 1431, 1479, 1557, 1696, 1555, 1477, 1422, 1388, 1311, 1250, 1200, 1165, 1158, 1174, 1217, 1281, 1358, 1416, 1463, 1520, 1629, 1520, 1458, 1415, 1355, 1272, 1203, 1144, 1111, 1105, 1122, 1165, 1231, 1322, 1394, 1447, 1497, 1577, 1481, 1452, 1399, 1330, 1234, 1160, 1101, 1070, 1065, 1082, 1124, 1192, 1288, 1373, 1443, 1485, 1556, 1476, 1437, 1384, 1304, 1207, 1124, 1070, 1045, 1039, 1055, 1092, 1163, 1256, 1357, 1429, 1475, 1539, 1470, 1430, 1373, 1288, 1186, 1108, 1056, 1029, 1027, 1040, 1078, 1142, 1240, 1336, 1424, 1469, 1529, 1465, 1433, 1370, 1281, 1179, 1102, 1049, 1025, 1024, 1035, 1070, 1134, 1230, 1332, 1420, 1464, 1536, 1469, 1434, 1372, 1283, 1186, 1108, 1055, 1029, 1027, 1037, 1076, 1145, 1236, 1337, 1421, 1468, 1535, 1478, 1438, 1382, 1303, 1210, 1128, 1070, 1044, 1040, 1056, 1096, 1164, 1255, 1355, 1427, 1478, 1551, 1489, 1454, 1401, 1329, 1239, 1160, 1102, 1075, 1067, 1084, 1128, 1196, 1288, 1380, 1435, 1492, 1573, 1528, 1464, 1426, 1358, 1283, 1206, 1146, 1116, 1110, 1129, 1172, 1242, 1327, 1402, 1451, 1508, 1597, 1574, 1476, 1433, 1395, 1326, 1254, 1202, 1170, 1165, 1182, 1230, 1292, 1361, 1425, 1471, 1526, 1657, 1638, 1512, 1449, 1418, 1366, 1308, 1259, 1230, 1223, 1246, 1285, 1334, 1402, 1439, 1465, 1574, 1712, 1723, 1575, 1474, 1440, 1400, 1353, 1312, 1289, 1287, 1305, 1332, 1381, 1417, 1440, 1504, 1616, 1806, 1780, 1652, 1506, 1448, 1403, 1380, 1340, 1327, 1325, 1335, 1350, 1390, 1402, 1448, 1532, 1693, 1848, ]
+ b: [1834, 1686, 1532, 1462, 1420, 1404, 1369, 1360, 1354, 1357, 1375, 1415, 1442, 1496, 1568, 1741, 1872, 1706, 1543, 1441, 1391, 1366, 1321, 1295, 1281, 1270, 1276, 1305, 1345, 1389, 1418, 1477, 1588, 1752, 1594, 1473, 1400, 1363, 1317, 1269, 1238, 1216, 1206, 1214, 1250, 800, 1353, 1389, 1434, 1503, 1664, 1514, 1437, 1372, 1334, 1278, 1228, 1180, 1151, 1143, 1159, 1196, 1246, 1313, 1359, 1405, 1453, 1587, 1465, 1401, 1351, 1308, 1236, 1177, 1127, 1101, 1093, 1109, 1141, 1200, 1274, 1335, 1384, 1427, 1522, 1423, 1386, 1335, 1275, 1199, 1133, 1087, 1063, 1059, 1069, 1104, 1159, 1240, 1316, 1369, 1402, 1493, 1407, 1375, 1318, 1256, 1172, 1107, 1060, 1041, 1035, 1048, 1077, 1135, 1211, 1291, 1354, 1391, 1478, 1390, 1365, 1313, 1239, 1153, 1089, 1047, 1029, 1028, 1033, 1065, 1116, 1193, 1278, 1342, 1382, 1475, 1384, 1364, 1308, 1231, 1146, 1082, 1040, 1025, 1024, 1030, 1057, 1110, 1183, 1269, 1337, 1379, 1475, 1384, 1372, 1309, 1233, 1152, 1086, 1046, 1024, 1024, 1032, 1061, 1113, 1187, 1268, 1337, 1379, 1479, 1395, 1370, 1317, 1249, 1171, 1102, 1058, 1035, 1029, 1047, 1073, 1130, 1200, 1278, 1341, 1388, 1491, 1420, 1383, 1336, 1265, 1195, 1129, 1078, 1059, 1053, 1065, 1102, 1155, 1227, 1301, 1348, 1405, 1505, 1452, 1396, 1356, 1295, 1234, 1166, 1116, 1092, 1084, 1103, 1139, 1195, 1262, 1321, 1364, 1420, 1547, 1517, 1414, 1375, 1324, 1269, 1214, 1165, 1138, 1132, 1148, 1188, 1239, 1291, 1336, 1387, 1446, 1604, 1587, 1471, 1383, 1354, 1309, 1257, 1216, 1192, 1187, 1209, 1241, 1277, 1330, 1366, 1384, 1498, 1682, 1689, 1543, 1427, 1381, 1344, 1303, 1265, 1250, 1251, 1266, 1284, 1326, 1353, 1369, 1447, 1566, 1790, 1754, 1632, 1469, 1391, 1353, 1317, 1292, 1282, 1278, 1294, 1306, 1321, 1347, 1382, 1477, 1650, 1854, ]
+ #800x600_F2_CWF_70 - F2_CWF
+ - ct: 4230
+ resolution: 800x600
+ r: [2065, 1886, 1745, 1661, 1619, 1574, 1532, 1504, 1498, 1499, 1533, 1586, 1628, 1689, 1770, 1942, 2140, 1978, 1796, 1688, 1627, 1565, 1501, 1446, 1424, 1407, 1419, 1460, 1525, 1583, 1642, 1712, 1829, 2032, 1880, 1732, 1643, 1579, 1499, 1418, 1356, 1319, 1300, 1320, 1372, 1443, 1536, 1598, 1661, 1763, 1923, 1812, 1689, 1608, 1535, 1429, 1335, 1267, 1223, 1210, 1234, 1284, 1362, 1461, 1547, 1634, 1715, 1848, 1755, 1664, 1579, 1600, 1362, 1262, 1188, 1145, 1132, 1156, 1211, 1289, 1403, 1504, 1604, 1688, 1791, 1726, 1635, 1548, 1433, 1298, 1199, 1126, 1084, 1080, 1101, 1147, 1226, 1340, 1468, 1586, 1659, 1752, 1707, 1624, 1522, 1393, 1256, 1155, 1085, 1054, 1043, 1059, 1111, 1187, 1302, 1435, 1566, 1645, 1732, 1695, 1605, 1508, 1367, 1230, 1132, 1066, 1034, 1028, 1042, 1084, 1160, 1275, 1418, 1549, 1634, 1722, 1681, 1604, 1498, 1360, 1222, 1121, 1058, 1027, 1024, 1034, 1075, 1151, 1264, 1407, 1543, 1633, 1723, 1691, 1609, 1498, 1361, 1231, 1130, 1064, 1037, 1027, 1043, 1083, 1162, 1275, 1413, 1545, 1638, 1714, 1692, 1612, 1515, 1385, 1258, 1153, 1087, 1051, 1045, 1064, 1109, 1185, 1295, 1437, 1560, 1645, 1741, 1712, 1627, 1538, 1417, 1298, 1199, 1124, 1087, 1075, 1101, 1146, 1231, 1342, 1472, 1574, 1665, 1754, 1743, 1637, 1572, 1466, 1357, 1253, 1181, 1142, 1131, 1154, 1207, 1295, 1401, 1515, 1601, 1687, 1789, 1807, 1661, 1597, 1525, 1425, 1328, 1257, 1215, 1208, 1230, 1282, 1363, 1459, 1555, 1800, 1714, 1857, 1871, 1711, 1631, 1573, 1491, 1407, 1343, 1307, 1298, 1323, 1368, 1440, 1528, 1601, 1649, 1767, 1932, 1982, 1788, 1675, 1617, 1559, 1489, 1433, 1406, 1405, 1425, 1457, 1516, 1581, 1623, 1713, 1836, 2044, 2041, 1885, 1730, 1646, 1589, 1547, 1498, 1476, 1474, 1488, 1518, 1569, 1594, 1656, 1757, 1921, 2111, ]
+ gr: [1765, 1633, 1502, 1441, 1411, 1389, 1365, 1356, 1350, 1358, 1375, 1408, 1434, 1476, 1534, 1678, 1820, 1671, 1535, 1450, 1410, 1381, 1341, 1311, 1297, 1288, 1295, 1323, 1368, 1407, 1437, 1600, 1580, 1736, 1595, 1488, 1424, 1388, 1342, 1293, 1255, 1230, 1219, 1235, 1270, 1319, 1384, 1413, 1452, 1524, 1657, 1534, 1452, 1399, 1367, 1300, 1238, 1194, 1162, 1155, 1171, 1209, 1267, 1336, 1393, 1435, 1486, 1591, 1491, 1429, 1389, 1335, 1255, 1189, 1139, 1108, 1104, 1118, 1156, 1218, 1302, 1369, 1422, 1470, 1540, 1456, 1416, 1370, 1305, 1216, 1146, 1093, 1068, 1064, 1078, 1116, 1176, 1268, 1345, 1415, 1451, 1510, 1445, 1409, 1352, 1280, 1185, 1113, 1065, 1041, 1039, 1051, 1085, 1147, 1235, 1330, 1402, 1440, 1499, 1444, 1399, 1349, 1261, 1171, 1096, 1050, 1029, 1030, 1037, 1070, 1127, 1217, 1314, 1395, 1437, 1490, 1437, 1401, 1346, 1256, 1161, 1091, 1043, 1026, 1024, 1034, 1064, 1123, 1210, 1308, 1390, 1436, 1490, 1441, 1409, 1346, 1262, 1170, 1097, 1049, 1030, 1029, 1040, 1069, 1129, 1216, 1315, 1393, 1439, 1490, 1458, 1413, 1357, 1280, 1194, 1118, 1065, 1044, 1043, 1055, 1088, 1151, 1235, 1331, 1404, 1448, 1513, 1475, 1426, 1378, 1304, 1225, 1149, 1098, 1074, 1067, 1083, 1122, 1187, 1268, 1356, 1411, 1465, 1530, 1505, 1439, 1402, 1339, 1268, 1197, 1144, 1119, 1110, 1129, 1167, 1232, 1313, 1383, 1428, 1481, 1563, 1564, 1455, 1415, 1373, 1313, 1249, 1203, 1173, 1167, 1184, 1227, 1284, 1349, 1404, 1449, 1499, 1617, 1620, 1493, 1428, 1402, 1354, 1303, 1261, 1236, 1228, 1250, 1285, 1333, 1389, 1428, 1444, 1544, 1684, 1710, 1568, 1462, 1428, 1394, 1354, 1315, 800, 1298, 1317, 1337, 1381, 1411, 1428, 1491, 1594, 1774, 1755, 1632, 1496, 1430, 1395, 1370, 1330, 1328, 1322, 1331, 1348, 1378, 1392, 1426, 1503, 1657, 1810, ]
+ gb: [1773, 1627, 1500, 1438, 1403, 1382, 1352, 1341, 1336, 1344, 1365, 1404, 1435, 1476, 1545, 1692, 1839, 1672, 1540, 1450, 1406, 1376, 1332, 1298, 1282, 1274, 1284, 1312, 1363, 1405, 1440, 1483, 1594, 1751, 1608, 1494, 1426, 1391, 1341, 1284, 1247, 1219, 1207, 1224, 1263, 1318, 1388, 1423, 1460, 1542, 1678, 1545, 1463, 1407, 1368, 1298, 1235, 1188, 1153, 1148, 1163, 1207, 1268, 1345, 1402, 1450, 1506, 1613, 1499, 1442, 1399, 1342, 1259, 1187, 1135, 1103, 1096, 1116, 1157, 1222, 1310, 1382, 1436, 1489, 1564, 1475, 1434, 1382, 1315, 1221, 1145, 1093, 1065, 1061, 1076, 1115, 1182, 1278, 1364, 1431, 1474, 1541, 1461, 1425, 1368, 1290, 1193, 1118, 1064, 1041, 1037, 1050, 1090, 1154, 1246, 1346, 1420, 1466, 1525, 1463, 1416, 1363, 1273, 1178, 1097, 1051, 1030, 1029, 1039, 1073, 1136, 1232, 1332, 1414, 1460, 1519, 1452, 1420, 1357, 1268, 1172, 1094, 1045, 1026, 1024, 1034, 1067, 1131, 1223, 1324, 1409, 1458, 1521, 1460, 1420, 1359, 1271, 1175, 1099, 1048, 1029, 1027, 1038, 1072, 1136, 1227, 1330, 1412, 1458, 1524, 1467, 1424, 1368, 1289, 1197, 1117, 1063, 1040, 1038, 1053, 1089, 1156, 1246, 1345, 1415, 1470, 1538, 1486, 1437, 1384, 1309, 1224, 1146, 1091, 1067, 1063, 1077, 1118, 1187, 1278, 1367, 1425, 1600, 1553, 1519, 1445, 1408, 1342, 1266, 1192, 1136, 1106, 1102, 1119, 1161, 1230, 1316, 1389, 1438, 1495, 1583, 1567, 1460, 1420, 1374, 1310, 1241, 1189, 1158, 1152, 1173, 1214, 1278, 1348, 1410, 1456, 1511, 1634, 1624, 1498, 1427, 1400, 1346, 1294, 1244, 1219, 1210, 1232, 1271, 1321, 1384, 1430, 1448, 1557, 1697, 1719, 1560, 1458, 1421, 1381, 1338, 1298, 1274, 1275, 1292, 1318, 1365, 1404, 1424, 1489, 1601, 1785, 1751, 1637, 1497, 1429, 1389, 1361, 1323, 1311, 1309, 1318, 1339, 1374, 1388, 1429, 1513, 1674, 1829, ]
+ b: [1800, 1643, 1486, 1416, 1376, 1354, 1329, 1318, 1309, 1310, 1331, 1359, 1390, 1444, 1533, 1708, 1846, 1664, 1510, 1400, 1351, 1324, 1286, 1260, 1246, 1235, 1244, 1266, 1306, 1341, 1373, 1441, 1556, 1734, 1557, 1441, 1360, 1322, 1282, 1242, 1211, 1188, 1180, 1186, 1220, 1258, 1309, 1346, 1391, 1475, 1626, 1484, 1400, 1331, 1300, 1247, 1202, 1163, 1135, 1127, 1143, 1170, 1215, 1274, 1315, 1365, 1417, 1555, 1422, 1368, 1316, 1270, 1209, 1158, 1117, 1088, 1084, 1094, 1130, 1174, 1240, 800, 1343, 1389, 1497, 1383, 1351, 1299, 1247, 1177, 1122, 1081, 1057, 1051, 1067, 1094, 1142, 1209, 1274, 1329, 1362, 1461, 1367, 1333, 1284, 1224, 1153, 1098, 1056, 1040, 1035, 1042, 1070, 1118, 1186, 1255, 1314, 1349, 1441, 1355, 1327, 1275, 1209, 1137, 1082, 1044, 1029, 1026, 1034, 1056, 1100, 1166, 1241, 1302, 1341, 1439, 1343, 1325, 1270, 1201, 1130, 1075, 1037, 1024, 1026, 1030, 1050, 1094, 1160, 1231, 1295, 1334, 1434, 1347, 1330, 1274, 1203, 1135, 1079, 1040, 1026, 1024, 1031, 1054, 1097, 1161, 1231, 1292, 1338, 1433, 1358, 1330, 1280, 1219, 1152, 1093, 1051, 1032, 1030, 1043, 1067, 1115, 1173, 1237, 1298, 1348, 1447, 1382, 1342, 1298, 1236, 1174, 1115, 1071, 1051, 1044, 1060, 1088, 1138, 1197, 1259, 1301, 1365, 1464, 1410, 1360, 1314, 1259, 1205, 1149, 1104, 1079, 1075, 1090, 1123, 1171, 1227, 1277, 1315, 1387, 1508, 1476, 1376, 1330, 1287, 1238, 1188, 1144, 1122, 1115, 1132, 1165, 1206, 1249, 1294, 1344, 1402, 1567, 1548, 1431, 1348, 1314, 1271, 1224, 1190, 1168, 1163, 1182, 1210, 1246, 1286, 1318, 1344, 1462, 1650, 1658, 1510, 1386, 1342, 1305, 1268, 1232, 1220, 1221, 1236, 1250, 1283, 1311, 1328, 1406, 1530, 1755, 1698, 1587, 1431, 1350, 1304, 1274, 1244, 1238, 1239, 1245, 1262, 1283, 1293, 1339, 1439, 1608, 1825, ]
+ #800x600_D50_70 - D50
+ - ct: 5003
+ resolution: 800x600
+ r: [2543, 2578, 2509, 2438, 2318, 2233, 2133, 2085, 2088, 2130, 2245, 2390, 2533, 2674, 2811, 2910, 2790, 2536, 2518, 2407, 2309, 2153, 2048, 1910, 1861, 1865, 1921, 2013, 2160, 2340, 2523, 2664, 2836, 2882, 2501, 2408, 2276, 2127, 1951, 1804, 1701, 1655, 1635, 1674, 1771, 1939, 2141, 2356, 2565, 2701, 2839, 2403, 2314, 2154, 1963, 1779, 1618, 1511, 1447, 1433, 1470, 1554, 1714, 1920, 2196, 2430, 2589, 2694, 2352, 2232, 2049, 1828, 1635, 1472, 1357, 1295, 1274, 1317, 1399, 1543, 1785, 2021, 2302, 2494, 2688, 2254, 2143, 1936, 1720, 1509, 1345, 1237, 1168, 1158, 1188, 1271, 1420, 1614, 1894, 2190, 2443, 2592, 2210, 2085, 1870, 1630, 1432, 1264, 1161, 1090, 1079, 1102, 1184, 1329, 1525, 1797, 2112, 2377, 2587, 2224, 2063, 1822, 1598, 1381, 1217, 1121, 1045, 1031, 1063, 1129, 1270, 1481, 1749, 2059, 2344, 2559, 2234, 2083, 1812, 1592, 1381, 1215, 1102, 1046, 1024, 1053, 1122, 1257, 1466, 1734, 2045, 2338, 2530, 2224, 2063, 1856, 1610, 1407, 1237, 1126, 1063, 1044, 1072, 1145, 1288, 1485, 1764, 2059, 2344, 2539, 2273, 2135, 1906, 1675, 1470, 1299, 1187, 1112, 1094, 1120, 1208, 1348, 1546, 1828, 2124, 2377, 2566, 2321, 2197, 1986, 1779, 1563, 1402, 1271, 1209, 1192, 1221, 1313, 1461, 1664, 1929, 2203, 2460, 2659, 2371, 2292, 2119, 1906, 1700, 1538, 1407, 1335, 1321, 1366, 1447, 1593, 1800, 2062, 2331, 2570, 2737, 2485, 2382, 2262, 2078, 1876, 1721, 1587, 1525, 1504, 1545, 1633, 1785, 1985, 2246, 2464, 2631, 2799, 2621, 2465, 2387, 2243, 2063, 1912, 1801, 1734, 1705, 1755, 1848, 2005, 2213, 2417, 2584, 2773, 2900, 2757, 2632, 2519, 2419, 2283, 2160, 2044, 1976, 1979, 2024, 2107, 2272, 2430, 2578, 2731, 2921, 2984, 2724, 2762, 2663, 2570, 2413, 2331, 2245, 2227, 2242, 2278, 2369, 2486, 2647, 2763, 2864, 3041, 2860, ]
+ gr: [2123, 2151, 2065, 2008, 1917, 1836, 1766, 1738, 1740, 1752, 1817, 1882, 1943, 2023, 2110, 2206, 2123, 2143, 2093, 2006, 1915, 1810, 1724, 1632, 1597, 1588, 1608, 1665, 1733, 1827, 1928, 2014, 2122, 2189, 2104, 2052, 1936, 1805, 1686, 1575, 1502, 1464, 1446, 1461, 1512, 1597, 1705, 1827, 1949, 2027, 2124, 2066, 1962, 1856, 1704, 1563, 1450, 1376, 1323, 1310, 1323, 1371, 1466, 1570, 1714, 1868, 1954, 2066, 1997, 1917, 1771, 1622, 1466, 1351, 1258, 1217, 1199, 1211, 1265, 1351, 1469, 1622, 1781, 1891, 1989, 1958, 1863, 1700, 1537, 1382, 1265, 1182, 1133, 1118, 1128, 1178, 1254, 1385, 1537, 1695, 1838, 1943, 1935, 1829, 1642, 1480, 1319, 1202, 1122, 1078, 1061, 1073, 1114, 1196, 1316, 1477, 1655, 1806, 1913, 1953, 1794, 1639, 1442, 1288, 1171, 1089, 1047, 1031, 1044, 1083, 1153, 1279, 1436, 1623, 1783, 1924, 1940, 1807, 1621, 1442, 1283, 1166, 1083, 1041, 1024, 1034, 1073, 1147, 1270, 1436, 1608, 1768, 1897, 1968, 1828, 1639, 1470, 1297, 1182, 1096, 1055, 1038, 1050, 1090, 1168, 1290, 1442, 1627, 1783, 1917, 1942, 1841, 1682, 1510, 1349, 1222, 1132, 1088, 1067, 1081, 1127, 1206, 1326, 1486, 1651, 1811, 1942, 2005, 1901, 1743, 1578, 1422, 1303, 1209, 1152, 1135, 1148, 1191, 1280, 1399, 1548, 1719, 1845, 1974, 2057, 1952, 1830, 1685, 1512, 1393, 1305, 1245, 1221, 1233, 1289, 1372, 1489, 1634, 1776, 1904, 2031, 2113, 2007, 1918, 1777, 1640, 1511, 1423, 1360, 1344, 1360, 1400, 1494, 1608, 1742, 1862, 1976, 2123, 2199, 2104, 2006, 1879, 1756, 1649, 1553, 1502, 1480, 1495, 1546, 1633, 1732, 1839, 1956, 2052, 2210, 2300, 2191, 2104, 2010, 1907, 1802, 1717, 1669, 1655, 1673, 1717, 1792, 1878, 1955, 2054, 2222, 2274, 2310, 2336, 2195, 2103, 2012, 1925, 1861, 1823, 1814, 1844, 1889, 1931, 2004, 2079, 2166, 2287, 2213, ]
+ gb: [2166, 2183, 2106, 2056, 1961, 1889, 1800, 1772, 1760, 1791, 1821, 1907, 1948, 2040, 2115, 2205, 2191, 2197, 2125, 2062, 1973, 1862, 1758, 1680, 1620, 1612, 1636, 1693, 1758, 1851, 1953, 2031, 2125, 2174, 2125, 2067, 1974, 1852, 1719, 1621, 1532, 1477, 1465, 1480, 1535, 1605, 1724, 1852, 1967, 2050, 2156, 2107, 2015, 1893, 1738, 1608, 1485, 1406, 1337, 1319, 1337, 1382, 1476, 1589, 1733, 1869, 1985, 2070, 2037, 1948, 1806, 1641, 1501, 1377, 1287, 1227, 1215, 1227, 1274, 1364, 1485, 1645, 1806, 1928, 2028, 1981, 1887, 1728, 1564, 1409, 1285, 1199, 1145, 1125, 1135, 1183, 1270, 1395, 1560, 1733, 1868, 1974, 1965, 1841, 1670, 1509, 1349, 1221, 1138, 1084, 1065, 1073, 1121, 1208, 1332, 1496, 1670, 1835, 1958, 1948, 1818, 1642, 1467, 1315, 1185, 1099, 1052, 1035, 1042, 1084, 1163, 1292, 1458, 1638, 1812, 1948, 1942, 1809, 1635, 1467, 1296, 1178, 1094, 1039, 1024, 1038, 1073, 1157, 1285, 1451, 1640, 1803, 1935, 1948, 1812, 1646, 1483, 1317, 1196, 1107, 1057, 1043, 1053, 1090, 1183, 1296, 1464, 1650, 1818, 1941, 1965, 1841, 1687, 1519, 1362, 1243, 1145, 1094, 1075, 1088, 1137, 1225, 1339, 1512, 1692, 1835, 1988, 1981, 1893, 1738, 1586, 1435, 1314, 1218, 1160, 1143, 1158, 1212, 1294, 1418, 1578, 1742, 1887, 2005, 2037, 1948, 1838, 1674, 1527, 1398, 1309, 1251, 1236, 1253, 1305, 1385, 1514, 1674, 1816, 1934, 2062, 2098, 2015, 1899, 1791, 1656, 1530, 1430, 1379, 1360, 1379, 1428, 1517, 1639, 1781, 1893, 2015, 2117, 2199, 2075, 1988, 1910, 1776, 1664, 1583, 1518, 1502, 1525, 1576, 1668, 1776, 1898, 1981, 2084, 2221, 2269, 2204, 2103, 2021, 1921, 1827, 1751, 1676, 1671, 1693, 1755, 1843, 1927, 2007, 2095, 2224, 2294, 2285, 2285, 2190, 2112, 2009, 1956, 1909, 1853, 1845, 1864, 1921, 1995, 2058, 2137, 2199, 2308, 2231, ]
+ b: [2007, 2014, 1951, 1922, 1856, 1794, 1746, 1720, 1718, 1747, 1818, 1865, 1956, 2026, 2146, 2219, 2251, 2020, 1954, 1914, 1840, 1745, 1673, 1626, 1592, 1586, 1613, 1674, 1732, 1851, 1938, 2030, 2131, 2207, 1927, 1878, 1807, 1732, 1628, 1548, 1486, 1461, 1440, 1465, 1519, 1601, 1715, 1846, 1943, 2018, 2141, 1863, 1826, 1730, 1633, 1515, 1436, 1369, 1326, 1318, 1337, 1399, 1479, 1598, 1729, 1865, 1962, 2051, 1840, 1751, 1653, 1541, 1426, 1333, 1265, 1217, 1214, 1223, 1281, 1373, 1493, 1641, 1794, 1908, 2015, 1803, 1695, 1587, 1462, 1347, 1245, 1173, 1139, 1122, 1139, 1197, 1288, 1404, 1555, 1712, 1845, 1987, 1781, 1659, 1544, 1402, 1284, 1186, 1117, 1075, 1065, 1088, 1131, 1214, 1342, 1504, 1667, 1808, 1945, 1753, 1639, 1509, 1376, 1253, 1152, 1083, 1045, 1040, 1051, 1094, 1177, 1307, 1464, 1630, 1782, 1939, 1752, 1626, 1510, 1370, 1248, 1141, 1076, 1037, 1024, 1043, 1087, 1163, 1299, 1452, 1631, 1789, 1927, 1761, 1639, 1509, 1384, 1259, 1157, 1088, 1049, 1036, 1061, 1103, 1190, 1321, 1469, 1648, 1806, 1939, 1772, 1673, 1550, 1423, 1304, 1194, 1124, 1088, 1073, 1094, 1143, 1231, 1353, 1508, 1673, 1816, 1955, 1794, 1709, 1599, 1495, 1373, 1269, 1191, 1149, 1129, 1159, 1210, 1298, 1429, 1571, 1726, 1854, 2010, 1840, 1759, 1679, 1567, 1448, 1358, 1284, 1234, 1228, 1249, 1306, 1392, 1507, 1647, 1794, 1917, 2076, 1929, 1835, 1760, 1670, 1565, 1470, 1388, 1351, 1335, 1362, 1423, 1511, 1609, 1743, 1865, 1983, 2145, 2028, 1898, 1841, 1761, 1670, 1590, 1519, 1483, 1475, 1505, 1563, 1640, 1749, 1862, 1943, 2078, 2218, 2109, 2014, 1944, 1883, 1812, 1745, 1674, 1630, 1635, 1665, 1717, 1801, 1884, 1967, 2064, 2188, 2295, 2157, 2126, 2020, 1952, 1891, 1833, 1781, 1761, 1773, 1803, 1857, 1943, 2005, 2026, 2159, 2268, 2251, ]
+
+...
diff --git a/src/ipa/rkisp1/data/ov4689.yaml b/src/ipa/rkisp1/data/ov4689.yaml
new file mode 100644
index 00000000..60901296
--- /dev/null
+++ b/src/ipa/rkisp1/data/ov4689.yaml
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Agc:
+ - Awb:
+ - BlackLevelCorrection:
+...
diff --git a/src/ipa/rkisp1/data/ov5640.yaml b/src/ipa/rkisp1/data/ov5640.yaml
new file mode 100644
index 00000000..4b21d412
--- /dev/null
+++ b/src/ipa/rkisp1/data/ov5640.yaml
@@ -0,0 +1,250 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Agc:
+ - Awb:
+ - BlackLevelCorrection:
+ - ColorProcessing:
+ - GammaSensorLinearization:
+ x-intervals: [ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 ]
+ y:
+ red: [ 0, 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840, 4095 ]
+ green: [ 0, 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840, 4095 ]
+ blue: [ 0, 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840, 4095 ]
+ - LensShadingCorrection:
+ x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ sets:
+ - ct: 3000
+ r: [
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ ]
+ gr: [
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ ]
+ gb: [
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ ]
+ b: [
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ ]
+ - ct: 7000
+ r: [
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ ]
+ gr: [
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ ]
+ gb: [
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ ]
+ b: [
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ ]
+ - DefectPixelClusterCorrection:
+ fixed-set: false
+ sets:
+ # PG, LC, RO, RND, RG
+ - line-threshold:
+ green: 8
+ red-blue: 8
+ line-mad-factor:
+ green: 4
+ red-blue: 4
+ pg-factor:
+ green: 8
+ red-blue: 8
+ rnd-threshold:
+ green: 10
+ red-blue: 10
+ rg-factor:
+ green: 32
+ red-blue: 32
+ ro-limits:
+ green: 1
+ red-blue: 1
+ rnd-offsets:
+ green: 2
+ red-blue: 2
+ # PG, LC, RO
+ - line-threshold:
+ green: 24
+ red-blue: 32
+ line-mad-factor:
+ green: 16
+ red-blue: 24
+ pg-factor:
+ green: 6
+ red-blue: 8
+ ro-limits:
+ green: 2
+ red-blue: 2
+ # PG, LC, RO, RND, RG
+ - line-threshold:
+ green: 32
+ red-blue: 32
+ line-mad-factor:
+ green: 4
+ red-blue: 4
+ pg-factor:
+ green: 10
+ red-blue: 10
+ rnd-threshold:
+ green: 6
+ red-blue: 8
+ rg-factor:
+ green: 4
+ red-blue: 4
+ ro-limits:
+ green: 1
+ red-blue: 2
+ rnd-offsets:
+ green: 2
+ red-blue: 2
+ - Dpf:
+ DomainFilter:
+ g: [ 16, 16, 16, 16, 16, 16 ]
+ rb: [ 16, 16, 16, 16, 16, 16 ]
+ NoiseLevelFunction:
+ coeff: [
+ 1023, 1023, 1023, 1023, 1023, 1023, 1023, 1023,
+ 1023, 1023, 1023, 1023, 1023, 1023, 1023, 1023,
+ 1023
+ ]
+ scale-mode: "linear"
+ FilterStrength:
+ r: 64
+ g: 64
+ b: 64
+ - Filter:
+...
diff --git a/src/ipa/rkisp1/data/ov5695.yaml b/src/ipa/rkisp1/data/ov5695.yaml
new file mode 100644
index 00000000..2e39e3a5
--- /dev/null
+++ b/src/ipa/rkisp1/data/ov5695.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Agc:
+ - Awb:
+ - LensShadingCorrection:
+ x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ sets:
+ #2592x1944_A_70 - A
+ - ct: 2856
+ resolution: 2592x1944
+ r: [2312, 2874, 2965, 2789, 2603, 2424, 2288, 2176, 2151, 2176, 2240, 2345, 2520, 2736, 2856, 2825, 2272, 2675, 3026, 2925, 2693, 2443, 2247, 2074, 1992, 1947, 1972, 2066, 2211, 2386, 2618, 2847, 2953, 2698, 2927, 3008, 2846, 2541, 2272, 2037, 1867, 1782, 1740, 1762, 1855, 1981, 2198, 2454, 2711, 2963, 2927, 2974, 2920, 2664, 2337, 2061, 1822, 1648, 1550, 1503, 1550, 1648, 1794, 1982, 2257, 2565, 2805, 2880, 2933, 2799, 2472, 2161, 1880, 1631, 1457, 1361, 1328, 1364, 1448, 1602, 1817, 2087, 2390, 2698, 2911, 2947, 2734, 2404, 2061, 1759, 1525, 1340, 1244, 1209, 1240, 1343, 1473, 1701, 1975, 2278, 2641, 2823, 2948, 2680, 2342, 1979, 1667, 1425, 1259, 1159, 1125, 1159, 1238, 1407, 1633, 1914, 2235, 2592, 2866, 2936, 2661, 2276, 1908, 1624, 1368, 1190, 1097, 1058, 1086, 1178, 1341, 1556, 1848, 2175, 2509, 2763, 2873, 2603, 2230, 1868, 1578, 1320, 1157, 1058, 1024, 1053, 1142, 1302, 1521, 1789, 2125, 2471, 2760, 2896, 2661, 2276, 1914, 1591, 1349, 1176, 1083, 1044, 1080, 1166, 1327, 1544, 1814, 2141, 2509, 2763, 2969, 2710, 2342, 1985, 1676, 1431, 1250, 1146, 1105, 1140, 1234, 1392, 1616, 1895, 2235, 2578, 2847, 3060, 2800, 2426, 2076, 1764, 1518, 1335, 1227, 1197, 1227, 1314, 1486, 1696, 1989, 2298, 2641, 2863, 2978, 2853, 2496, 2169, 1880, 1631, 1457, 1345, 1304, 1334, 1429, 1586, 1811, 2064, 2378, 2698, 2867, 3024, 2960, 2664, 2327, 2054, 1811, 1626, 1517, 1490, 1514, 1597, 1763, 1962, 2229, 2538, 2768, 2926, 3032, 3077, 2864, 2554, 2272, 2052, 1861, 1747, 1716, 1742, 1816, 1995, 2190, 2454, 2727, 2920, 2927, 2849, 3155, 3008, 2772, 2490, 2276, 2121, 2006, 1954, 1978, 2066, 2202, 2408, 2648, 2847, 2977, 2797, 2440, 3116, 3132, 2900, 2738, 2509, 2329, 2239, 2194, 2230, 2298, 2436, 2617, 2825, 2965, 2899, 2312, ]
+ gr: [1557, 1922, 2004, 1947, 1841, 1757, 1689, 1651, 1631, 1647, 1680, 1737, 1835, 1911, 1995, 1941, 1613, 1820, 2038, 1996, 1900, 1779, 1692, 1617, 1565, 1549, 1554, 1594, 1670, 1753, 1875, 1957, 2029, 1848, 2009, 2064, 1956, 1834, 1715, 1601, 1518, 1474, 1446, 1459, 1505, 1582, 1666, 1796, 1935, 2029, 2009, 2013, 2006, 1874, 1731, 1602, 1493, 1409, 1346, 1332, 1348, 1395, 1474, 1576, 1689, 1843, 1944, 2003, 1982, 1931, 1783, 1637, 1496, 1386, 1297, 1238, 1219, 1239, 1284, 1370, 1474, 1601, 1747, 1897, 2000, 1998, 1920, 1755, 1587, 1455, 1325, 1228, 1171, 1159, 1176, 1223, 1311, 1418, 1565, 1707, 1855, 1990, 2007, 1897, 1733, 1574, 1423, 1296, 1183, 1121, 1101, 1132, 1182, 1277, 1396, 1539, 1696, 1866, 1990, 2000, 1870, 1692, 1529, 1377, 1239, 1141, 1077, 1057, 1079, 1141, 1230, 1350, 1493, 1640, 1810, 1961, 1957, 1849, 1669, 1496, 1356, 1212, 1112, 1053, 1024, 1049, 1106, 1203, 1322, 1465, 1615, 1780, 1919, 1969, 1870, 1675, 1515, 1365, 1232, 1128, 1063, 1042, 1068, 1123, 1220, 1345, 1483, 1628, 1788, 1945, 2007, 1917, 1728, 1574, 1420, 1285, 1173, 1115, 1088, 1109, 1170, 1268, 1388, 1532, 1678, 1835, 1999, 2033, 1927, 1760, 1613, 1461, 1334, 1234, 1175, 1145, 1168, 1225, 1311, 1423, 1557, 1726, 1874, 2015, 2000, 1960, 1810, 1641, 1515, 1391, 1292, 1228, 1212, 1232, 1275, 1358, 1462, 1601, 1737, 1883, 1974, 2032, 2006, 1874, 1712, 1594, 1477, 1395, 1329, 1316, 1327, 1375, 1453, 1547, 1671, 1808, 1937, 1994, 2039, 2064, 1971, 1829, 1701, 1608, 1521, 1465, 1441, 1462, 1498, 1571, 1666, 1785, 1921, 2003, 2039, 1886, 2087, 2062, 1926, 1817, 1706, 1637, 1572, 1560, 1572, 1613, 1688, 1774, 1868, 1973, 2029, 1886, 1692, 2020, 2067, 2008, 1897, 1822, 1741, 1704, 1683, 1695, 1727, 1783, 1872, 1977, 2022, 1989, 1639, ]
+ gb: [1553, 1926, 1992, 1930, 1852, 1746, 1675, 1630, 1611, 1622, 1671, 1726, 1804, 1915, 1992, 1955, 1584, 1852, 2043, 2001, 1879, 1773, 1674, 1602, 1548, 1532, 1541, 1583, 1661, 1752, 1867, 1986, 2034, 1881, 1993, 2060, 1976, 1811, 1697, 1590, 1505, 1459, 1439, 1453, 1496, 1579, 1674, 1795, 1940, 2051, 2034, 2018, 2003, 1866, 1735, 1594, 1478, 1396, 1339, 1326, 1339, 1388, 1463, 1579, 1707, 1842, 1980, 2037, 2014, 1950, 1793, 1641, 1509, 1384, 1291, 1229, 1209, 1231, 1283, 1369, 1481, 1625, 1751, 1901, 2023, 2029, 1925, 1750, 1602, 1458, 1330, 1228, 1162, 1144, 1166, 1218, 1308, 1433, 1572, 1730, 1872, 2029, 2020, 1934, 1752, 1578, 1429, 1288, 1181, 1116, 1102, 1130, 1184, 1278, 1400, 1546, 1700, 1870, 2020, 2030, 1899, 1706, 1536, 1388, 1239, 1137, 1074, 1053, 1078, 1134, 1235, 1358, 1509, 1661, 1838, 1989, 1985, 1853, 1682, 1522, 1356, 1209, 1114, 1050, 1024, 1046, 1106, 1206, 1335, 1478, 1623, 1801, 1954, 2005, 1887, 1706, 1536, 1383, 1235, 1131, 1063, 1045, 1059, 1120, 1225, 1356, 1493, 1666, 1815, 1981, 2063, 1948, 1767, 1589, 1438, 1293, 1183, 1116, 1093, 1115, 1174, 1272, 1400, 1546, 1695, 1877, 2012, 2055, 1952, 1795, 1633, 1476, 1347, 1235, 1167, 1146, 1160, 1230, 1323, 1435, 1579, 1730, 1898, 2046, 2059, 1972, 1843, 1666, 1519, 1402, 1291, 1231, 1209, 1233, 1283, 1366, 1481, 1613, 1767, 1922, 2023, 2066, 2036, 1903, 1740, 1609, 1484, 1399, 1337, 1317, 1330, 1378, 1451, 1572, 1689, 1830, 1964, 2037, 2034, 2097, 2005, 1856, 1724, 1608, 1521, 1471, 1450, 1456, 1505, 1593, 1688, 1805, 1940, 2051, 2045, 1974, 2123, 2067, 1958, 1827, 1719, 1633, 1580, 1563, 1576, 1609, 1688, 1783, 1892, 2009, 2053, 1911, 1652, 2078, 2101, 2021, 1915, 1837, 1731, 1682, 1661, 1686, 1717, 1782, 1864, 1982, 2036, 2005, 1669, ]
+ b: [1439, 1756, 1796, 1808, 1716, 1631, 1568, 1537, 1530, 1546, 1578, 1608, 1676, 1744, 1796, 1756, 1456, 1685, 1858, 1830, 1764, 1687, 1603, 1529, 1486, 1489, 1486, 1493, 1552, 1628, 1721, 1812, 1858, 1727, 1837, 1888, 1825, 1726, 1628, 1548, 1478, 1449, 1423, 1434, 1462, 1521, 1566, 1688, 1809, 1888, 1837, 1889, 1857, 1775, 1680, 1576, 1467, 1403, 1336, 1309, 1329, 1369, 1429, 1529, 1623, 1733, 1822, 1868, 1852, 1828, 1704, 1585, 1486, 1377, 1285, 1237, 1216, 1232, 1268, 1344, 1438, 1536, 1667, 1764, 1813, 1853, 1815, 1675, 1576, 1436, 1333, 1226, 1158, 1145, 1158, 1216, 1298, 1407, 1503, 1640, 1754, 1816, 1908, 1800, 1691, 1536, 1422, 1296, 1188, 1114, 1095, 1114, 1174, 1268, 1388, 1485, 1623, 1742, 1851, 1865, 1783, 1646, 1513, 1378, 1236, 1124, 1071, 1050, 1074, 1132, 1211, 1333, 1463, 1603, 1713, 1829, 1822, 1736, 1621, 1486, 1358, 1211, 1109, 1040, 1024, 1037, 1101, 1197, 1314, 1423, 1559, 1683, 1788, 1829, 1769, 1635, 1513, 1371, 1231, 1128, 1057, 1033, 1057, 1112, 1202, 1327, 1455, 1572, 1700, 1794, 1870, 1831, 1679, 1554, 1430, 1290, 1170, 1103, 1091, 1107, 1165, 1263, 1374, 1501, 1623, 1742, 1833, 1911, 1863, 1724, 1586, 1459, 1352, 1236, 1171, 1153, 1171, 1221, 1315, 1414, 1520, 1663, 1799, 1872, 1913, 1861, 1730, 1626, 1511, 1397, 1296, 1242, 1221, 1227, 1279, 1350, 1446, 1555, 1691, 1779, 1852, 1934, 1893, 1804, 1703, 1576, 1475, 1396, 1329, 1309, 1336, 1363, 1437, 1538, 1634, 1747, 1839, 1868, 1955, 1991, 1910, 1808, 1696, 1596, 1537, 1472, 1445, 1457, 1494, 1539, 1617, 1739, 1825, 1928, 1860, 1818, 2015, 1981, 1906, 1778, 1680, 1627, 1585, 1551, 1566, 1596, 1646, 1725, 1824, 1902, 1945, 1794, 1571, 1937, 1977, 1932, 1866, 1784, 1714, 1674, 1642, 1662, 1678, 1730, 1788, 1859, 1913, 1912, 1592, ]
+ #2592x1944_D65_70 - D65
+ - ct: 6504
+ resolution: 2592x1944
+ r: [2457, 2985, 2981, 2763, 2587, 2383, 2222, 2123, 2089, 2123, 2167, 2270, 2466, 2638, 2823, 2805, 2457, 2770, 3097, 2893, 2640, 2410, 2169, 2039, 1933, 1908, 1914, 1973, 2117, 2295, 2514, 2728, 2953, 2735, 3009, 2991, 2771, 2467, 2201, 1985, 1825, 1726, 1679, 1703, 1791, 1924, 2085, 2345, 2583, 2806, 2898, 3015, 2906, 2586, 2267, 2005, 1790, 1629, 1527, 1488, 1505, 1597, 1734, 1923, 2169, 2447, 2714, 2876, 2953, 2756, 2435, 2120, 1832, 1617, 1462, 1359, 1326, 1351, 1423, 1573, 1774, 2014, 2285, 2612, 2857, 2963, 2676, 2324, 2016, 1735, 1499, 1334, 1234, 1201, 1227, 1313, 1452, 1649, 1893, 2177, 2503, 2754, 2883, 2582, 2252, 1912, 1634, 1401, 1236, 1144, 1106, 1135, 1215, 1365, 1570, 1804, 2091, 2443, 2715, 2839, 2555, 2196, 1860, 1576, 1346, 1180, 1084, 1046, 1077, 1161, 1305, 1501, 1767, 2056, 2384, 2678, 2797, 2546, 2165, 1832, 1546, 1314, 1150, 1060, 1024, 1046, 1133, 1275, 1474, 1726, 2030, 2378, 2667, 2811, 2555, 2169, 1843, 1564, 1321, 1161, 1069, 1032, 1057, 1146, 1289, 1496, 1751, 2021, 2350, 2653, 2883, 2603, 2195, 1884, 1614, 1388, 1219, 1116, 1077, 1107, 1196, 1335, 1529, 1787, 2079, 2406, 2689, 2900, 2630, 2293, 1963, 1677, 1462, 1294, 1194, 1157, 1181, 1274, 1403, 1622, 1847, 2163, 2464, 2727, 2920, 2731, 2400, 2071, 1798, 1567, 1404, 1301, 1264, 1293, 1376, 1514, 1711, 1949, 2224, 2568, 2767, 3015, 2820, 2545, 2196, 1933, 1719, 1554, 1452, 1422, 1442, 1525, 1661, 1847, 2078, 2358, 2639, 2780, 2971, 2927, 2674, 2396, 2110, 1904, 1767, 1654, 1611, 1627, 1720, 1848, 2026, 2250, 2540, 2722, 2863, 2842, 3023, 2864, 2576, 2311, 2105, 1952, 1857, 1808, 1830, 1912, 2033, 2205, 2417, 2652, 2822, 2667, 2489, 3024, 2981, 2737, 2546, 2317, 2180, 2086, 2041, 2050, 2140, 2255, 2391, 2615, 2735, 2840, 2366, ]
+ gr: [1766, 2092, 2109, 2006, 1875, 1775, 1707, 1659, 1633, 1646, 1679, 1754, 1844, 1954, 2045, 2041, 1740, 1981, 2142, 2048, 1911, 1779, 1678, 1597, 1549, 1529, 1539, 1570, 1630, 1728, 1848, 1970, 2064, 1971, 2109, 2107, 1982, 1820, 1673, 1563, 1494, 1442, 1423, 1433, 1472, 1538, 1630, 1751, 1899, 2019, 2058, 2121, 2066, 1892, 1719, 1584, 1472, 1386, 1331, 1311, 1326, 1370, 1441, 1533, 1673, 1820, 1956, 2062, 2080, 1982, 1807, 1636, 1493, 1379, 1293, 1236, 1213, 1230, 1280, 1353, 1458, 1580, 1729, 1885, 2017, 2074, 1934, 1756, 1584, 1435, 1318, 1220, 1163, 1142, 1154, 1207, 1280, 1393, 1522, 1666, 1844, 1990, 2041, 1886, 1711, 1535, 1392, 1269, 1165, 1106, 1086, 1103, 1151, 1240, 1356, 1479, 1635, 1802, 1969, 2006, 1856, 1673, 1506, 1359, 1220, 1131, 1067, 1041, 1056, 1113, 1201, 1312, 1446, 1594, 1771, 1937, 2000, 1841, 1654, 1489, 1334, 1201, 1105, 1046, 1024, 1038, 1096, 1183, 1299, 1428, 1577, 1746, 1925, 2006, 1850, 1656, 1490, 1339, 1210, 1112, 1054, 1028, 1044, 1098, 1188, 1296, 1431, 1574, 1754, 1923, 2033, 1868, 1692, 1518, 1366, 1242, 1143, 1085, 1060, 1074, 1133, 1214, 1329, 1460, 1602, 1780, 1938, 2040, 1900, 1722, 1547, 1409, 1291, 1192, 1131, 1107, 1125, 1174, 1258, 1363, 1488, 1644, 1813, 1958, 2052, 1939, 1770, 1592, 1461, 1346, 1254, 1192, 1174, 1186, 1236, 1312, 1410, 1535, 1690, 1846, 1975, 2071, 1986, 1843, 1664, 1533, 1424, 1338, 1280, 1256, 1269, 1309, 1387, 1475, 1596, 1753, 1898, 2006, 2058, 2045, 1906, 1756, 1622, 1517, 1432, 1380, 1363, 1372, 1412, 1480, 1566, 1691, 1835, 1955, 2008, 1971, 2083, 2008, 1842, 1718, 1606, 1530, 1488, 1463, 1468, 1506, 1574, 1675, 1772, 1904, 1992, 1922, 1748, 2103, 2063, 1961, 1838, 1724, 1648, 1600, 1596, 1592, 1627, 1690, 1780, 1890, 1969, 1992, 1713, ]
+ gb: [1749, 2093, 2072, 1983, 1869, 1765, 1684, 1638, 1621, 1629, 1666, 1734, 1838, 1925, 2019, 2021, 1722, 1981, 2142, 2048, 1904, 1774, 1660, 1582, 1535, 1512, 1528, 1563, 1626, 1728, 1854, 1970, 2064, 1961, 2088, 2107, 1975, 1809, 1668, 1556, 1481, 1424, 1406, 1421, 1456, 1528, 1626, 1761, 1886, 2028, 2068, 2111, 2049, 1873, 1715, 1569, 1465, 1376, 1323, 1300, 1321, 1363, 1432, 1536, 1660, 1808, 1956, 2062, 2089, 1975, 1797, 1632, 1493, 1374, 1284, 1228, 1205, 1226, 1273, 1351, 1449, 1577, 1729, 1898, 2035, 2083, 1934, 1751, 1584, 1441, 1307, 1214, 1156, 1134, 1153, 1203, 1280, 1393, 1526, 1675, 1844, 1998, 2049, 1905, 1702, 1535, 1390, 1265, 1160, 1103, 1078, 1100, 1150, 1238, 1351, 1485, 1631, 1814, 1984, 2014, 1868, 1678, 1506, 1356, 1218, 1123, 1065, 1039, 1055, 1112, 1201, 1317, 1446, 1602, 1782, 1952, 2008, 1853, 1658, 1496, 1344, 1203, 1110, 1046, 1024, 1037, 1091, 1179, 1292, 1428, 1588, 1757, 1947, 2030, 1856, 1660, 1493, 1346, 1212, 1116, 1049, 1024, 1040, 1093, 1190, 1303, 1440, 1590, 1760, 1937, 2041, 1886, 1688, 1522, 1376, 1240, 1146, 1083, 1057, 1074, 1131, 1218, 1331, 1466, 1614, 1785, 1953, 2066, 1920, 1737, 1558, 1415, 1289, 1186, 1130, 1110, 1123, 1172, 1254, 1368, 1492, 1644, 1814, 1974, 2080, 1953, 1775, 1612, 1461, 1343, 1254, 1194, 1174, 1186, 1236, 1309, 1413, 1528, 1695, 1852, 1983, 2081, 2009, 1837, 1678, 1543, 1424, 1338, 1278, 1254, 1273, 1306, 1390, 1485, 1604, 1758, 1905, 2016, 2078, 2062, 1926, 1777, 1626, 1517, 1441, 1388, 1363, 1367, 1412, 1487, 1574, 1686, 1835, 1962, 2018, 1981, 2112, 2016, 1848, 1733, 1614, 1541, 1488, 1469, 1468, 1520, 1570, 1666, 1789, 1911, 1992, 1913, 1776, 2082, 2072, 1968, 1856, 1739, 1657, 1600, 1577, 1592, 1627, 1695, 1786, 1883, 1977, 2002, 1722, ]
+ b: [1681, 1945, 1998, 1882, 1777, 1699, 1617, 1588, 1571, 1554, 1581, 1644, 1729, 1797, 1905, 1919, 1646, 1868, 2012, 1964, 1828, 1711, 1617, 1535, 1492, 1479, 1478, 1509, 1559, 1636, 1737, 1860, 1925, 1830, 1961, 2001, 1890, 1754, 1638, 1529, 1463, 1407, 1389, 1407, 1432, 1485, 1574, 1668, 1790, 1898, 1922, 1995, 1962, 1813, 1680, 1557, 1453, 1378, 1319, 1297, 1302, 1348, 1418, 1505, 1605, 1726, 1868, 1944, 2004, 1901, 1765, 1611, 1482, 1375, 1287, 1230, 1207, 1224, 1259, 1338, 1420, 1528, 1664, 1807, 1921, 1969, 1858, 1708, 1557, 1434, 1317, 1217, 1161, 1142, 1156, 1206, 1275, 1369, 1481, 1598, 1764, 1880, 1973, 1821, 1664, 1516, 1392, 1270, 1165, 1106, 1085, 1095, 1152, 1231, 1336, 1445, 1567, 1725, 1856, 1947, 1804, 1647, 1495, 1359, 1230, 1136, 1067, 1043, 1060, 1115, 1197, 1299, 1419, 1548, 1695, 1834, 1924, 1787, 1623, 1478, 1346, 1212, 1114, 1052, 1024, 1044, 1094, 1172, 1287, 1408, 1532, 1681, 1853, 1925, 1804, 1641, 1481, 1351, 1225, 1124, 1056, 1032, 1046, 1099, 1181, 1296, 1410, 1531, 1688, 1806, 1951, 1821, 1664, 1516, 1377, 1255, 1150, 1089, 1066, 1082, 1128, 1214, 1315, 1432, 1562, 1709, 1856, 1957, 1840, 1688, 1546, 1413, 1297, 1190, 1139, 1116, 1130, 1179, 1259, 1347, 1462, 1592, 1740, 1859, 1968, 1881, 1728, 1588, 1460, 1345, 1265, 1199, 1180, 1191, 1241, 1307, 1391, 1498, 1644, 1773, 1876, 2008, 1940, 1789, 1654, 1531, 1427, 1341, 1286, 1265, 1273, 1316, 1370, 1471, 1569, 1696, 1830, 1896, 2002, 1977, 1871, 1732, 1620, 1519, 1432, 1387, 1362, 1364, 1402, 1466, 1535, 1654, 1782, 1877, 1896, 1895, 2025, 1975, 1828, 1704, 1599, 1540, 1478, 1456, 1459, 1499, 1548, 1636, 1737, 1841, 1925, 1830, 1705, 2013, 2036, 1912, 1785, 1720, 1636, 1588, 1565, 1576, 1599, 1664, 1722, 1815, 1905, 1945, 1681, ]
+ #2592x1944_F2_CWF_70 - F2_CWF
+ - ct: 4230
+ resolution: 2592x1944
+ r: [2512, 2860, 2753, 2554, 2376, 2198, 2033, 1949, 1924, 1921, 2012, 2100, 2257, 2461, 2682, 2775, 2436, 2753, 2915, 2713, 2415, 2193, 2004, 1869, 1790, 1755, 1774, 1844, 1945, 2108, 2306, 2547, 2755, 2697, 2849, 2810, 2526, 2247, 2018, 1821, 1692, 1608, 1577, 1591, 1653, 1775, 1921, 2132, 2371, 2625, 2765, 2881, 2679, 2376, 2077, 1853, 1677, 1542, 1449, 1412, 1430, 1511, 1615, 1781, 1983, 2258, 2517, 2722, 2832, 2589, 2237, 1977, 1718, 1527, 1403, 1319, 1290, 1307, 1370, 1491, 1658, 1850, 2112, 2408, 2708, 2718, 2474, 2154, 1861, 1616, 1439, 1293, 1211, 1176, 1205, 1275, 1390, 1553, 1773, 2008, 2313, 2607, 2661, 2388, 2066, 1781, 1535, 1359, 1207, 1130, 1098, 1117, 1192, 1313, 1474, 1688, 1934, 2240, 2537, 2672, 2353, 2024, 1733, 1494, 1296, 1162, 1075, 1045, 1064, 1146, 1261, 1422, 1640, 1889, 2197, 2528, 2599, 2332, 1991, 1718, 1484, 1276, 1139, 1051, 1024, 1051, 1117, 1245, 1409, 1620, 1861, 2179, 2481, 2651, 2338, 2004, 1719, 1479, 1289, 1146, 1066, 1034, 1055, 1127, 1248, 1413, 1633, 1872, 2184, 2471, 2640, 2372, 2045, 1751, 1514, 1324, 1189, 1107, 1064, 1097, 1163, 1280, 1455, 1661, 1915, 2226, 2498, 2672, 2457, 2107, 1820, 1587, 1390, 1248, 1170, 1132, 1155, 1235, 1353, 1510, 1729, 1967, 2268, 2544, 2781, 2532, 2198, 1920, 1678, 1486, 1349, 1251, 1225, 1251, 1326, 1438, 1602, 1800, 2043, 2343, 2616, 2826, 2637, 2330, 2024, 1796, 1609, 1480, 1391, 1365, 1370, 1442, 1556, 1714, 1915, 2190, 2461, 2673, 2820, 2738, 2472, 2182, 1949, 1760, 1640, 1545, 1517, 1524, 1591, 1716, 1867, 2073, 2308, 2561, 2686, 2782, 2806, 2648, 2352, 2132, 1926, 1819, 1716, 1678, 1702, 1757, 1872, 2029, 2234, 2434, 2611, 2617, 2538, 2919, 2777, 2554, 2345, 2148, 2012, 1940, 1896, 1930, 1961, 2065, 2243, 2426, 2592, 2669, 2461, ]
+ gr: [2065, 2350, 2320, 2148, 2002, 1877, 1794, 1730, 1709, 1712, 1754, 1837, 1948, 2082, 2217, 2291, 2054, 2263, 2359, 2204, 2022, 1860, 1735, 1639, 1583, 1560, 1576, 1619, 1694, 1805, 1967, 2126, 2281, 2228, 2353, 2294, 2112, 1897, 1724, 1615, 1525, 1460, 1441, 1448, 1499, 1581, 1684, 1829, 2000, 2187, 2305, 2354, 2194, 1994, 1785, 1626, 1493, 1406, 1349, 1323, 1342, 1384, 1468, 1576, 1722, 1909, 2100, 2265, 2281, 2126, 1894, 1708, 1539, 1409, 1310, 1253, 1225, 1240, 1291, 1377, 1486, 1639, 1821, 2019, 2220, 2257, 2059, 1819, 1622, 1464, 1337, 1233, 1168, 1144, 1161, 1219, 1302, 1420, 1576, 1733, 1934, 2180, 2189, 1991, 1759, 1578, 1407, 1280, 1164, 1107, 1085, 1100, 1157, 1242, 1359, 1514, 1685, 1894, 2110, 2153, 1954, 1726, 1537, 1365, 1229, 1129, 1066, 1039, 1057, 1114, 1202, 1327, 1471, 1638, 1850, 2094, 2153, 1948, 1718, 1522, 1352, 1217, 1114, 1047, 1024, 1038, 1100, 1187, 1310, 1467, 1627, 1851, 2078, 2162, 1947, 1716, 1527, 1367, 1225, 1125, 1054, 1031, 1045, 1106, 1198, 1320, 1465, 1638, 1861, 2094, 2180, 1964, 1731, 1545, 1383, 1252, 1145, 1085, 1057, 1070, 1131, 1223, 1341, 1488, 1658, 1852, 2077, 2199, 2002, 1787, 1584, 1429, 1297, 1194, 1131, 1109, 1124, 1181, 1266, 1384, 1523, 1695, 1908, 2118, 2260, 2071, 1843, 1651, 1502, 1364, 1265, 1203, 1181, 1197, 1244, 1331, 1451, 1579, 1763, 1969, 2153, 2276, 2150, 1922, 1736, 1573, 1453, 1355, 1296, 1275, 1285, 1335, 1417, 1526, 1663, 1849, 2052, 2203, 2294, 2205, 2029, 1834, 1666, 1548, 1461, 1399, 1372, 1390, 1431, 1513, 1620, 1760, 1931, 2115, 2237, 2228, 2271, 2126, 1934, 1784, 1650, 1577, 1512, 1485, 1506, 1547, 1625, 1729, 1872, 2029, 2189, 2160, 2033, 2326, 2227, 2106, 1935, 1815, 1721, 1671, 1627, 1654, 1688, 1768, 1885, 2021, 2160, 2245, 2022, ]
+ gb: [2062, 2335, 2286, 2148, 1975, 1850, 1776, 1709, 1688, 1709, 1761, 1822, 1943, 2082, 2226, 2300, 2062, 2272, 2345, 2186, 2016, 1856, 1728, 1637, 1579, 1556, 1564, 1610, 1691, 1807, 1961, 2126, 2280, 2237, 2338, 2293, 2081, 1893, 1731, 1594, 1501, 1444, 1424, 1441, 1485, 1572, 1677, 1830, 2022, 2195, 2303, 2352, 2212, 1988, 1782, 1625, 1499, 1400, 1342, 1318, 1335, 1379, 1468, 1579, 1728, 1898, 2116, 2274, 2311, 2127, 1896, 1701, 1538, 1404, 1308, 1249, 1218, 1243, 1290, 1382, 1491, 1641, 1828, 2041, 2249, 2256, 2060, 1820, 1637, 1476, 1335, 1234, 1166, 1147, 1159, 1220, 1302, 1428, 1586, 1754, 1968, 2198, 2225, 2013, 1781, 1584, 1421, 1281, 1166, 1101, 1082, 1105, 1158, 1246, 1372, 1524, 1696, 1914, 2144, 2179, 1961, 1742, 1546, 1378, 1232, 1136, 1064, 1042, 1061, 1118, 1208, 1335, 1489, 1661, 1875, 2110, 2179, 1962, 1734, 1538, 1367, 1224, 1117, 1051, 1024, 1046, 1106, 1195, 1322, 1479, 1658, 1876, 2094, 2179, 1988, 1742, 1543, 1375, 1232, 1128, 1060, 1030, 1050, 1110, 1208, 1330, 1486, 1652, 1881, 2127, 2197, 2006, 1761, 1562, 1396, 1255, 1152, 1086, 1063, 1077, 1137, 1232, 1354, 1504, 1682, 1902, 2135, 2236, 2031, 1810, 1605, 1449, 1311, 1200, 1137, 1110, 1130, 1185, 1275, 1389, 1539, 1720, 1922, 2161, 2290, 2103, 1873, 1675, 1504, 1379, 1276, 1211, 1184, 1202, 1251, 1339, 1460, 1593, 1785, 1983, 2180, 2329, 2176, 1961, 1752, 1598, 1471, 1366, 1308, 1279, 1292, 1348, 1432, 1535, 1682, 1874, 2068, 2222, 2338, 2253, 2059, 1852, 1686, 1565, 1473, 1410, 1385, 1393, 1445, 1522, 1639, 1782, 1959, 2132, 2257, 2272, 2312, 2160, 1961, 1802, 1674, 1587, 1525, 1497, 1508, 1557, 1644, 1741, 1897, 2045, 2197, 2202, 2095, 2335, 2276, 2098, 1969, 1828, 1732, 1669, 1641, 1656, 1699, 1785, 1886, 2036, 2188, 2254, 2030, ]
+ b: [1957, 2184, 2113, 2000, 1876, 1757, 1686, 1620, 1614, 1596, 1649, 1687, 1805, 1914, 2027, 2082, 1880, 2101, 2170, 2056, 1894, 1763, 1659, 1571, 1527, 1501, 1506, 1541, 1608, 1694, 1809, 1964, 2094, 2040, 2156, 2121, 1964, 1796, 1654, 1563, 1485, 1419, 1399, 1407, 1447, 1499, 1587, 1724, 1859, 2019, 2076, 2184, 2063, 1888, 1705, 1586, 1470, 1383, 1330, 1299, 1315, 1352, 1421, 1513, 1633, 1794, 1956, 2125, 2153, 2012, 1821, 1660, 1511, 1395, 1302, 1241, 1219, 1232, 1275, 1352, 1453, 1570, 1726, 1914, 2080, 2106, 1953, 1751, 1601, 1462, 1333, 1235, 1171, 1142, 1156, 1207, 1285, 1403, 1520, 1656, 1838, 2038, 2081, 1885, 1704, 1553, 1398, 1266, 1166, 1101, 1079, 1097, 1151, 1240, 1340, 1471, 1616, 1780, 1970, 2041, 1882, 1686, 1513, 1364, 1235, 1125, 1065, 1037, 1054, 1108, 1196, 1299, 1429, 1576, 1756, 1935, 2049, 1853, 1665, 1504, 1363, 1227, 1118, 1049, 1024, 1035, 1099, 1188, 1298, 1434, 1582, 1752, 1929, 2073, 1870, 1677, 1520, 1364, 1240, 1131, 1057, 1037, 1048, 1102, 1188, 1308, 1442, 1600, 1756, 1921, 2048, 1885, 1695, 1525, 1387, 1248, 1148, 1085, 1064, 1076, 1131, 1215, 1325, 1458, 1591, 1780, 1926, 2089, 1926, 1731, 1563, 1432, 1304, 1191, 1132, 1112, 1129, 1172, 1258, 1359, 1492, 1647, 1814, 1975, 2115, 1983, 1799, 1626, 1491, 1368, 1270, 1212, 1188, 1204, 1249, 1322, 1416, 1548, 1697, 1874, 2045, 2164, 2047, 1888, 1705, 1571, 1451, 1357, 1296, 1276, 1291, 1336, 1404, 1499, 1616, 1772, 1956, 2069, 2177, 2139, 1964, 1785, 1654, 1549, 1459, 1402, 1376, 1385, 1423, 1493, 1587, 1704, 1847, 2003, 2057, 2144, 2190, 2056, 1906, 1753, 1642, 1556, 1506, 1488, 1485, 1534, 1592, 1684, 1809, 1935, 2076, 2081, 1997, 2228, 2150, 2030, 1888, 1799, 1704, 1637, 1631, 1629, 1667, 1716, 1816, 1914, 2043, 2122, 1917, ]
+ #2592x1944_D50_70 - D50
+ - ct: 5003
+ resolution: 2592x1944
+ r: [2445, 2929, 2967, 2734, 2576, 2380, 2211, 2113, 2074, 2072, 2166, 2255, 2383, 2626, 2861, 2812, 2411, 2795, 3067, 2915, 2660, 2369, 2162, 2038, 1940, 1900, 1919, 1978, 2106, 2281, 2519, 2702, 2875, 2718, 2953, 3006, 2761, 2452, 2197, 1964, 1815, 1720, 1676, 1712, 1769, 1899, 2070, 2268, 2581, 2739, 2798, 3022, 2895, 2570, 2275, 2011, 1793, 1619, 1512, 1486, 1506, 1577, 1740, 1898, 2123, 2420, 2659, 2869, 2939, 2776, 2457, 2132, 1863, 1619, 1479, 1366, 1332, 1356, 1435, 1571, 1769, 1978, 2272, 2543, 2736, 2905, 2703, 2360, 2023, 1747, 1516, 1355, 1247, 1214, 1243, 1332, 1457, 1651, 1898, 2194, 2488, 2714, 2945, 2615, 2257, 1937, 1653, 1419, 1242, 1151, 1117, 1138, 1219, 1374, 1575, 1795, 2080, 2417, 2695, 2795, 2558, 2207, 1875, 1586, 1350, 1182, 1089, 1046, 1084, 1158, 1305, 1497, 1736, 2027, 2351, 2624, 2840, 2547, 2201, 1863, 1566, 1323, 1172, 1068, 1024, 1057, 1142, 1288, 1484, 1725, 2010, 2343, 2584, 2857, 2580, 2222, 1875, 1573, 1355, 1182, 1086, 1046, 1072, 1151, 1301, 1509, 1762, 2052, 2371, 2707, 2912, 2615, 2257, 1904, 1631, 1389, 1227, 1129, 1090, 1122, 1197, 1331, 1529, 1777, 2040, 2397, 2639, 2905, 2628, 2290, 1987, 1698, 1457, 1296, 1202, 1154, 1181, 1259, 1398, 1607, 1826, 2119, 2466, 2684, 2939, 2748, 2399, 2078, 1796, 1584, 1424, 1310, 1276, 1297, 1377, 1519, 1708, 1943, 2222, 2543, 2736, 2982, 2863, 2570, 2243, 1964, 1740, 1570, 1470, 1435, 1448, 1537, 1683, 1856, 2094, 2342, 2632, 2798, 3037, 2970, 2681, 2413, 2111, 1920, 1769, 1672, 1616, 1634, 1709, 1847, 2019, 2234, 2488, 2709, 2835, 2836, 3026, 2851, 2611, 2315, 2106, 1932, 1836, 1801, 1807, 1899, 2027, 2199, 2392, 2620, 2805, 2644, 2515, 3013, 2967, 2792, 2553, 2343, 2181, 2046, 2035, 2033, 2108, 2239, 2444, 2575, 2731, 2812, 2411, ]
+ gr: [1764, 2120, 2133, 2015, 1886, 1783, 1704, 1644, 1626, 1631, 1666, 1739, 1792, 1938, 2020, 2014, 1727, 1988, 2163, 2079, 1945, 1797, 1681, 1595, 1551, 1526, 1533, 1567, 1619, 1707, 1833, 1963, 2052, 1936, 2115, 2119, 1964, 1824, 1676, 1555, 1486, 1428, 1406, 1425, 1447, 1526, 1623, 1720, 1866, 2001, 2030, 2142, 2062, 1902, 1716, 1580, 1465, 1376, 1321, 1301, 1314, 1355, 1428, 1513, 1645, 1791, 1941, 2022, 2104, 1988, 1816, 1663, 1515, 1388, 1294, 1235, 1215, 1225, 1271, 1350, 1449, 1571, 1719, 1880, 2028, 2113, 1963, 1766, 1588, 1445, 1325, 1231, 1168, 1142, 1155, 1213, 1284, 1392, 1517, 1662, 1835, 1980, 2065, 1897, 1712, 1544, 1394, 1268, 1163, 1105, 1080, 1097, 1147, 1225, 1348, 1464, 1603, 1780, 1948, 2044, 1877, 1672, 1512, 1355, 1223, 1127, 1057, 1038, 1052, 1107, 1193, 1312, 1437, 1593, 1741, 1931, 2004, 1873, 1674, 1501, 1350, 1211, 1113, 1048, 1024, 1038, 1095, 1180, 1301, 1424, 1571, 1738, 1895, 2027, 1871, 1681, 1506, 1361, 1227, 1123, 1064, 1035, 1057, 1104, 1189, 1310, 1440, 1573, 1758, 1916, 2048, 1884, 1707, 1526, 1374, 1248, 1154, 1087, 1069, 1073, 1128, 1205, 1317, 1455, 1590, 1757, 1925, 2031, 1907, 1720, 1557, 1406, 1289, 1193, 1129, 1104, 1116, 1170, 1244, 1348, 1478, 1621, 1792, 1947, 2075, 1973, 1777, 1615, 1465, 1355, 1269, 1195, 1176, 1184, 1234, 1302, 1412, 1532, 1669, 1826, 1975, 2100, 2028, 1870, 1687, 1542, 1443, 1352, 1294, 1264, 1278, 1324, 1393, 1492, 1602, 1757, 1911, 2031, 2093, 2054, 1935, 1763, 1631, 1529, 1441, 1393, 1361, 1371, 1419, 1480, 1569, 1690, 1827, 1960, 2020, 1957, 2091, 1979, 1864, 1722, 1619, 1529, 1484, 1458, 1471, 1497, 1557, 1654, 1761, 1918, 2005, 1907, 1783, 2076, 2094, 1938, 1829, 1729, 1657, 1592, 1571, 1572, 1616, 1664, 1769, 1880, 1968, 1994, 1718, ]
+ gb: [1771, 2117, 2122, 1999, 1887, 1768, 1691, 1633, 1619, 1633, 1668, 1736, 1836, 1923, 2010, 2002, 1734, 2040, 2161, 2070, 1925, 1777, 1678, 1601, 1532, 1528, 1518, 1562, 1625, 1724, 1840, 1956, 2079, 1954, 2091, 2109, 1965, 1826, 1669, 1561, 1472, 1419, 1400, 1422, 1450, 1521, 1608, 1732, 1867, 2001, 2028, 2151, 2053, 1877, 1718, 1579, 1465, 1379, 1319, 1296, 1309, 1350, 1428, 1530, 1647, 1792, 1934, 2030, 2112, 2003, 1824, 1656, 1511, 1388, 1296, 1240, 1206, 1228, 1271, 1347, 1458, 1577, 1725, 1894, 2018, 2112, 1978, 1778, 1602, 1451, 1325, 1231, 1165, 1141, 1154, 1207, 1292, 1397, 1530, 1687, 1849, 2030, 2056, 1911, 1723, 1554, 1396, 1271, 1165, 1103, 1077, 1100, 1148, 1236, 1343, 1477, 1626, 1798, 1972, 2027, 1885, 1692, 1522, 1358, 1225, 1126, 1068, 1038, 1055, 1105, 1194, 1313, 1443, 1583, 1771, 1931, 2037, 1868, 1690, 1514, 1355, 1216, 1116, 1053, 1024, 1046, 1096, 1191, 1306, 1433, 1586, 1762, 1925, 2061, 1891, 1688, 1522, 1363, 1236, 1128, 1067, 1037, 1059, 1110, 1196, 1318, 1439, 1596, 1765, 1977, 2056, 1898, 1709, 1535, 1391, 1264, 1157, 1089, 1069, 1076, 1131, 1216, 1335, 1467, 1596, 1775, 1948, 2048, 1929, 1737, 1567, 1427, 1294, 1198, 1130, 1106, 1120, 1168, 1260, 1353, 1491, 1641, 1811, 1963, 2112, 1988, 1795, 1626, 1484, 1374, 1274, 1198, 1174, 1190, 1237, 1317, 1427, 1538, 1695, 1840, 2000, 2140, 2045, 1877, 1708, 1567, 1443, 1360, 1304, 1267, 1288, 1337, 1398, 1491, 1621, 1781, 1919, 2039, 2112, 2109, 1936, 1792, 1633, 1539, 1450, 1396, 1377, 1376, 1422, 1496, 1579, 1697, 1835, 1976, 2028, 2029, 2089, 2028, 1884, 1734, 1638, 1543, 1490, 1460, 1466, 1514, 1579, 1670, 1774, 1910, 2013, 1904, 1790, 2117, 2065, 1961, 1854, 1752, 1672, 1616, 1590, 1599, 1623, 1700, 1782, 1867, 1984, 2022, 1698, ]
+ b: [1676, 1930, 1956, 1924, 1811, 1685, 1640, 1571, 1556, 1544, 1569, 1639, 1710, 1802, 1890, 1881, 1642, 1930, 2013, 1952, 1827, 1711, 1616, 1538, 1488, 1472, 1470, 1494, 1560, 1632, 1724, 1825, 1906, 1803, 1985, 2007, 1894, 1759, 1625, 1524, 1440, 1401, 1380, 1385, 1411, 1463, 1537, 1649, 1765, 1876, 1884, 1996, 1961, 1831, 1676, 1555, 1444, 1367, 1301, 1282, 1295, 1328, 1383, 1468, 1580, 1708, 1833, 1900, 2020, 1914, 1777, 1618, 1508, 1382, 1284, 1227, 1197, 1216, 1251, 1325, 1408, 1511, 1639, 1796, 1915, 1998, 1901, 1716, 1581, 1447, 1327, 1226, 1169, 1134, 1155, 1199, 1269, 1368, 1486, 1608, 1741, 1879, 1959, 1838, 1674, 1531, 1387, 1269, 1158, 1094, 1072, 1082, 1132, 1217, 1323, 1431, 1568, 1706, 1847, 1956, 1806, 1645, 1497, 1352, 1222, 1124, 1059, 1031, 1049, 1093, 1177, 1292, 1398, 1528, 1686, 1800, 1945, 1806, 1634, 1494, 1357, 1211, 1110, 1049, 1024, 1034, 1080, 1174, 1277, 1388, 1519, 1673, 1809, 1989, 1822, 1664, 1497, 1366, 1239, 1115, 1065, 1033, 1049, 1095, 1183, 1295, 1406, 1544, 1679, 1855, 1981, 1838, 1674, 1512, 1384, 1260, 1151, 1086, 1062, 1069, 1121, 1198, 1303, 1423, 1540, 1691, 1847, 1964, 1856, 1683, 1550, 1422, 1294, 1189, 1122, 1103, 1113, 1164, 1237, 1332, 1446, 1574, 1741, 1859, 2008, 1885, 1755, 1606, 1471, 1371, 1263, 1197, 1169, 1182, 1228, 1298, 1392, 1501, 1620, 1763, 1883, 2034, 1950, 1823, 1676, 1540, 1439, 1353, 1298, 1269, 1276, 1325, 1383, 1468, 1575, 1700, 1833, 1923, 2012, 1995, 1894, 1744, 1625, 1519, 1440, 1389, 1361, 1370, 1403, 1467, 1558, 1642, 1773, 1876, 1908, 1903, 2038, 1942, 1844, 1704, 1599, 1528, 1484, 1445, 1457, 1494, 1544, 1602, 1724, 1843, 1906, 1827, 1724, 2051, 2027, 1914, 1827, 1698, 1640, 1577, 1566, 1588, 1604, 1633, 1717, 1811, 1901, 1930, 1665, ]
+
+...
diff --git a/src/ipa/rkisp1/data/ov8858.yaml b/src/ipa/rkisp1/data/ov8858.yaml
new file mode 100644
index 00000000..f297b0e0
--- /dev/null
+++ b/src/ipa/rkisp1/data/ov8858.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Agc:
+ - Awb:
+ - LensShadingCorrection:
+ x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
+ sets:
+ #3264x2448_A_70 - A
+ - ct: 2856
+ resolution: 3264x2448
+ r: [4095, 3932, 3584, 3324, 3113, 2934, 2747, 2619, 2566, 2579, 2671, 2816, 3009, 3217, 3444, 3843, 4095, 4095, 3658, 3343, 3088, 2867, 2620, 2404, 2271, 2207, 2229, 2315, 2485, 2727, 2965, 3232, 3500, 4057, 3926, 3482, 3187, 2914, 2612, 2330, 2112, 1976, 1917, 1931, 2028, 2198, 2456, 2762, 3042, 3335, 3770, 3739, 3331, 3029, 2720, 2364, 2070, 1852, 1718, 1655, 1669, 1765, 1940, 2207, 2538, 2878, 3183, 3565, 3590, 3209, 2910, 2524, 2156, 1860, 1642, 1493, 1431, 1446, 1551, 1734, 1986, 2338, 2721, 3075, 3405, 3484, 3116, 2778, 2373, 1997, 1698, 1466, 1315, 1254, 1272, 1374, 1562, 1825, 2169, 2587, 2946, 3317, 3415, 3044, 2682, 2252, 1873, 1574, 1336, 1192, 1126, 1146, 1249, 1437, 1712, 2050, 2462, 2877, 3238, 3355, 3002, 2619, 2171, 1800, 1490, 1259, 1112, 1051, 1073, 1173, 1359, 1635, 1977, 2388, 2813, 3182, 3348, 2969, 2587, 2138, 1768, 1457, 1228, 1085, 1024, 1043, 1144, 1326, 1603, 1950, 2364, 2783, 3170, 3344, 2984, 2594, 2152, 1776, 1468, 1239, 1098, 1041, 1061, 1161, 1342, 1617, 1962, 2373, 2798, 3177, 3388, 3011, 2637, 2207, 1829, 1528, 1298, 1158, 1100, 1120, 1217, 1408, 1677, 2018, 2429, 2841, 3192, 3442, 3064, 2718, 2301, 1929, 1633, 1405, 1263, 1205, 1224, 1326, 1513, 1777, 2119, 2525, 2903, 3274, 3557, 3138, 2822, 2435, 2066, 1775, 1558, 1414, 1355, 1378, 1478, 1663, 1927, 2255, 2657, 2987, 3369, 3682, 3256, 2940, 2604, 2252, 1958, 1748, 1609, 1557, 1576, 1677, 1857, 2106, 2445, 2793, 3096, 3526, 3874, 3380, 3075, 2783, 2472, 2189, 1974, 1846, 1790, 1811, 1909, 2086, 2342, 2643, 2934, 3247, 3743, 4095, 3583, 3218, 2950, 2708, 2456, 2257, 2114, 2064, 2083, 2185, 2364, 2598, 2856, 3111, 3444, 4045, 4095, 3842, 3474, 3155, 2950, 2731, 2575, 2440, 2388, 2413, 2499, 2659, 2846, 3056, 3334, 3796, 4095, ]
+ gr: [3246, 2753, 2547, 2359, 2249, 2148, 2052, 1977, 1938, 1947, 1995, 2082, 2183, 2277, 2411, 2655, 2957, 2906, 2568, 2361, 2223, 2092, 1964, 1850, 1767, 1735, 1740, 1790, 1881, 2002, 2124, 2265, 2437, 2751, 2740, 2449, 2261, 2106, 1950, 1798, 1681, 1604, 1570, 1577, 1626, 1714, 1846, 2012, 2149, 2322, 2581, 2628, 2348, 2169, 2000, 1808, 1654, 1539, 1460, 1419, 1429, 1483, 1576, 1710, 1881, 2062, 2231, 2443, 2541, 2279, 2102, 1891, 1687, 1536, 1420, 1330, 1289, 1298, 1362, 1459, 1589, 1773, 1967, 2168, 2352, 2459, 2226, 2027, 1797, 1599, 1442, 1313, 1221, 1179, 1190, 1253, 1359, 1497, 1675, 1898, 2100, 2286, 2406, 2180, 1976, 1732, 1531, 1369, 1231, 1140, 1096, 1109, 1174, 1284, 1431, 1608, 1824, 2055, 2245, 2374, 2148, 1928, 1684, 1484, 1317, 1178, 1084, 1043, 1058, 1122, 1234, 1387, 1562, 1785, 2020, 2218, 2363, 2140, 1910, 1663, 1464, 1292, 1156, 1063, 1024, 1036, 1102, 1214, 1363, 1547, 1762, 2004, 2194, 2366, 2136, 1917, 1670, 1469, 1302, 1163, 1073, 1032, 1047, 1111, 1223, 1373, 1552, 1775, 2009, 2206, 2383, 2158, 1940, 1703, 1506, 1339, 1201, 1112, 1072, 1087, 1150, 1265, 1408, 1584, 1805, 2030, 2228, 2434, 2189, 1994, 1757, 1557, 1400, 1270, 1181, 1142, 1154, 1218, 1328, 1468, 1640, 1860, 2068, 2267, 2497, 2235, 2043, 1837, 1630, 1477, 1360, 1273, 1238, 1249, 1310, 1412, 1544, 1725, 1924, 2124, 2329, 2592, 2305, 2109, 1925, 1731, 1576, 1460, 1384, 1350, 1364, 1422, 1513, 1648, 1818, 2009, 2174, 2427, 2699, 2379, 2188, 2022, 1860, 1696, 1588, 1510, 1480, 1489, 1543, 1637, 1771, 1937, 2072, 2269, 2546, 2862, 2514, 2276, 2120, 1983, 1850, 1737, 1664, 1628, 1642, 1695, 1787, 1914, 2043, 2182, 2390, 2734, 3175, 2661, 2434, 2232, 2119, 2004, 1921, 1849, 1813, 1816, 1874, 1959, 2049, 2159, 2317, 2604, 2891, ]
+ gb: [3248, 2762, 2549, 2352, 2241, 2135, 2024, 1949, 1910, 1923, 1970, 2058, 2167, 2278, 2427, 2679, 3003, 2939, 2581, 2369, 2212, 2084, 1945, 1829, 1743, 1710, 1713, 1773, 1861, 1999, 2127, 2278, 2456, 2799, 2766, 2468, 2268, 2114, 1949, 1788, 1666, 1587, 1550, 1557, 1612, 1711, 1849, 2022, 2168, 2354, 2627, 2659, 2372, 2185, 2003, 1808, 1646, 1531, 1447, 1404, 1415, 1474, 1573, 1711, 1896, 2082, 2269, 2494, 2572, 2297, 2122, 1903, 1694, 1534, 1411, 1322, 1278, 1294, 1356, 1459, 1599, 1796, 2003, 2204, 2415, 2494, 2259, 2053, 1813, 1609, 1442, 1310, 1216, 1174, 1186, 1254, 1368, 1512, 1699, 1934, 2147, 2352, 2450, 2219, 2006, 1751, 1543, 1372, 1233, 1134, 1096, 1108, 1175, 1292, 1449, 1639, 1865, 2103, 2311, 2424, 2182, 1960, 1705, 1498, 1324, 1181, 1086, 1041, 1059, 1127, 1245, 1404, 1594, 1828, 2078, 2281, 2405, 2182, 1937, 1687, 1480, 1301, 1161, 1062, 1024, 1038, 1107, 1224, 1384, 1581, 1812, 2057, 2272, 2417, 2181, 1951, 1695, 1487, 1312, 1167, 1074, 1032, 1050, 1118, 1235, 1397, 1586, 1820, 2069, 2278, 2450, 2196, 1974, 1724, 1522, 1348, 1205, 1113, 1075, 1089, 1153, 1276, 1430, 1619, 1849, 2095, 2291, 2483, 2229, 2022, 1779, 1573, 1408, 1272, 1181, 1142, 1156, 1223, 1339, 1488, 1673, 1905, 2123, 2343, 2541, 2277, 2079, 1856, 1643, 1485, 1361, 1270, 1235, 1248, 1313, 1421, 1566, 1751, 1971, 2173, 2399, 2635, 2339, 2138, 1944, 1745, 1580, 1458, 1380, 1344, 1359, 1418, 1519, 1661, 1849, 2048, 2222, 2487, 2743, 2413, 2216, 2037, 1864, 1702, 1579, 1500, 1467, 1479, 1537, 1642, 1777, 1958, 2108, 2315, 2617, 2890, 2544, 2293, 2131, 1988, 1842, 1726, 1651, 1612, 1628, 1684, 1783, 1920, 2060, 2213, 2432, 2804, 3189, 2693, 2445, 2245, 2116, 2000, 1902, 1826, 1789, 1798, 1857, 1950, 2045, 2170, 2337, 2642, 2952, ]
+ b: [3058, 2592, 2385, 2213, 2113, 2016, 1936, 1869, 1845, 1844, 1887, 1965, 2056, 2162, 2288, 2535, 2815, 2739, 2411, 2208, 2067, 1959, 1848, 1747, 1681, 1655, 1659, 1709, 1788, 1909, 2024, 2149, 2317, 2640, 2595, 2298, 2119, 1981, 1836, 1704, 1608, 1543, 1517, 1519, 1561, 1646, 1774, 1925, 2042, 2217, 2463, 2469, 2218, 2033, 1880, 1710, 1575, 1479, 1419, 1384, 1398, 1439, 1527, 1647, 1810, 1968, 2125, 2330, 2404, 2138, 1979, 1785, 1611, 1474, 1374, 1303, 1271, 1280, 1336, 1421, 1545, 1706, 1895, 2058, 2261, 2341, 2104, 1920, 1713, 1535, 1397, 1284, 1203, 1168, 1181, 1237, 1339, 1462, 1631, 1822, 2012, 2194, 2293, 2063, 1882, 1662, 1480, 1336, 1206, 1128, 1092, 1106, 1165, 1270, 1407, 1565, 1767, 1965, 2158, 2262, 2048, 1845, 1625, 1450, 1289, 1165, 1079, 1041, 1057, 1122, 1223, 1370, 1534, 1725, 1940, 2129, 2258, 2046, 1834, 1605, 1433, 1273, 1147, 1058, 1024, 1037, 1102, 1209, 1352, 1519, 1711, 1928, 2110, 2261, 2041, 1847, 1615, 1442, 1282, 1151, 1069, 1028, 1048, 1109, 1218, 1359, 1523, 1716, 1927, 2124, 2282, 2064, 1864, 1645, 1461, 1316, 1184, 1103, 1070, 1083, 1143, 1249, 1389, 1552, 1745, 1948, 2141, 2326, 2090, 1907, 1695, 1505, 1362, 1247, 1164, 1133, 1144, 1202, 1307, 1436, 1597, 1794, 1985, 2182, 2380, 2132, 1952, 1758, 1569, 1429, 1323, 1247, 1215, 1229, 1283, 1379, 1506, 1669, 1851, 2025, 2222, 2458, 2187, 2000, 1835, 1653, 1511, 1407, 1344, 1314, 1326, 1374, 1461, 1583, 1749, 1916, 2069, 2319, 2559, 2255, 2066, 1910, 1757, 1616, 1512, 1450, 1427, 1431, 1481, 1565, 1688, 1850, 1970, 2151, 2432, 2700, 2384, 2151, 1995, 1874, 1747, 1637, 1577, 1552, 1563, 1610, 1689, 1817, 1934, 2064, 2254, 2607, 3019, 2498, 2301, 2107, 1991, 1888, 1808, 1742, 1716, 1716, 1775, 1847, 1930, 2044, 2200, 2494, 2763, ]
+ #3264x2448_D50_70 - D50
+ - ct: 5003
+ resolution: 3264x2448
+ r: [4095, 3613, 3287, 3049, 2867, 2696, 2545, 2427, 2374, 2387, 2473, 2592, 2779, 2948, 3156, 3544, 3984, 3842, 3341, 3076, 2850, 2650, 2438, 2245, 2123, 2065, 2085, 2164, 2316, 2531, 2745, 2979, 3232, 3738, 3605, 3194, 2924, 2694, 2430, 2182, 1986, 1867, 1814, 1824, 1909, 2060, 2301, 2567, 2807, 3088, 3473, 3432, 3048, 2806, 2516, 2208, 1953, 1758, 1638, 1581, 1596, 1679, 1836, 2061, 2367, 2669, 2928, 3285, 3275, 2940, 2676, 2354, 2027, 1763, 1572, 1443, 1385, 1398, 1496, 1648, 1878, 2184, 2527, 2813, 3150, 3181, 2855, 2566, 2201, 1877, 1622, 1413, 1284, 1226, 1243, 1333, 1502, 1732, 2033, 2391, 2731, 3021, 3116, 2786, 2474, 2100, 1773, 1510, 1304, 1171, 1114, 1131, 1224, 1389, 1630, 1925, 2296, 2638, 2973, 3060, 2752, 2410, 2024, 1710, 1437, 1231, 1101, 1044, 1063, 1152, 1318, 1559, 1865, 2228, 2600, 2919, 3044, 2730, 2388, 2001, 1677, 1403, 1204, 1073, 1024, 1036, 1128, 1289, 1534, 1839, 2198, 2569, 2903, 3039, 2734, 2392, 2004, 1684, 1417, 1210, 1086, 1031, 1050, 1138, 1306, 1544, 1845, 2204, 2576, 2916, 3099, 2751, 2432, 2050, 1732, 1469, 1264, 1136, 1085, 1101, 1194, 1358, 1596, 1891, 2264, 2612, 2929, 3131, 2808, 2499, 2142, 1811, 1556, 1354, 1230, 1178, 1195, 1286, 1451, 1683, 1986, 2341, 2678, 2991, 3235, 2875, 2592, 2258, 1936, 1679, 1491, 1363, 1310, 1332, 1421, 1582, 1813, 2113, 2455, 2737, 3096, 3357, 2965, 2692, 2412, 2094, 1840, 1650, 1533, 1485, 1501, 1591, 1747, 1979, 2275, 2582, 2840, 3239, 3543, 3094, 2808, 2555, 2298, 2043, 1851, 1737, 1685, 1703, 1791, 1955, 2178, 2459, 2700, 2992, 3425, 3749, 3286, 2950, 2712, 2495, 2282, 2093, 1972, 1919, 1950, 2033, 2186, 2412, 2625, 2856, 3165, 3713, 4095, 3514, 3156, 2880, 2701, 2511, 2370, 2249, 2203, 2222, 2309, 2454, 2607, 2813, 3060, 3476, 3973, ]
+ gr: [3126, 2654, 2449, 2277, 2167, 2065, 1967, 1898, 1859, 1866, 1917, 2000, 2085, 2198, 2323, 2565, 2866, 2805, 2487, 2288, 2151, 2020, 1894, 1781, 1706, 1672, 1681, 1731, 1812, 1937, 2057, 2191, 2358, 2670, 2662, 2378, 2191, 2044, 1889, 1739, 1629, 1554, 1520, 1528, 1576, 1662, 1791, 1947, 2083, 2253, 2496, 2545, 2278, 2108, 1939, 1753, 1606, 1498, 1421, 1385, 1393, 1444, 1533, 1656, 1830, 2001, 2166, 2370, 2460, 2205, 2037, 1834, 1644, 1494, 1384, 1301, 1264, 1275, 1328, 1422, 1547, 1723, 1914, 2100, 2284, 2377, 2164, 1972, 1748, 1557, 1410, 1287, 1200, 1162, 1174, 1231, 1334, 1463, 1632, 1846, 2043, 2218, 2335, 2117, 1922, 1686, 1494, 1339, 1213, 1125, 1090, 1100, 1157, 1263, 1401, 1569, 1778, 1995, 2176, 2311, 2081, 1879, 1641, 1452, 1292, 1163, 1078, 1038, 1055, 1111, 1217, 1356, 1527, 1740, 1960, 2152, 2296, 2074, 1861, 1621, 1434, 1273, 1142, 1058, 1024, 1032, 1093, 1197, 1338, 1508, 1718, 1949, 2134, 2292, 2079, 1863, 1628, 1441, 1280, 1149, 1065, 1029, 1042, 1100, 1207, 1347, 1519, 1728, 1951, 2144, 2319, 2089, 1890, 1658, 1470, 1312, 1185, 1101, 1065, 1077, 1138, 1242, 1378, 1549, 1757, 1976, 2157, 2353, 2128, 1936, 1706, 1519, 1366, 1249, 1162, 1129, 1142, 1198, 1303, 1434, 1600, 1808, 2011, 2202, 2417, 2165, 1985, 1785, 1586, 1443, 1327, 1249, 1217, 1226, 1283, 1378, 1506, 1675, 1874, 2060, 2255, 2508, 2231, 2044, 1867, 1681, 1530, 1425, 1348, 1320, 1331, 1386, 1476, 1601, 1770, 1955, 2110, 2345, 2616, 2306, 2124, 1958, 1799, 1648, 1536, 1466, 1437, 1448, 1497, 1589, 1716, 1880, 2017, 2199, 2467, 2754, 2434, 2202, 2053, 1920, 1788, 1681, 1608, 1574, 1588, 1641, 1726, 1853, 1980, 2112, 2304, 2656, 3054, 2562, 2347, 2155, 2038, 1931, 1843, 1778, 1742, 1748, 1803, 1887, 1976, 2089, 2229, 2513, 2806, ]
+ gb: [3110, 2650, 2442, 2268, 2159, 2061, 1963, 1887, 1855, 1860, 1910, 1995, 2091, 2202, 2330, 2589, 2876, 2817, 2480, 2285, 2141, 2019, 1890, 1777, 1697, 1664, 1670, 1725, 1811, 1936, 2060, 2200, 2370, 2701, 2645, 2378, 2188, 2041, 1882, 1735, 1623, 1548, 1513, 1524, 1567, 1660, 1798, 1959, 2096, 2272, 2534, 2550, 2276, 2104, 1935, 1753, 1601, 1494, 1417, 1377, 1388, 1441, 1533, 1660, 1839, 2014, 2181, 2402, 2452, 2209, 2036, 1834, 1641, 1493, 1377, 1298, 1257, 1272, 1328, 1426, 1554, 1732, 1932, 2122, 2315, 2387, 2165, 1969, 1749, 1559, 1407, 1285, 1197, 1159, 1171, 1233, 1337, 1472, 1649, 1862, 2070, 2256, 2336, 2119, 1926, 1684, 1495, 1340, 1210, 1124, 1087, 1100, 1159, 1269, 1411, 1582, 1801, 2019, 2219, 2312, 2092, 1885, 1644, 1453, 1295, 1164, 1077, 1036, 1054, 1115, 1221, 1370, 1544, 1763, 1995, 2189, 2297, 2086, 1862, 1629, 1435, 1275, 1145, 1058, 1024, 1036, 1097, 1205, 1352, 1529, 1746, 1980, 2180, 2305, 2091, 1869, 1634, 1444, 1283, 1151, 1066, 1030, 1045, 1106, 1215, 1360, 1538, 1754, 1987, 2182, 2329, 2104, 1896, 1662, 1476, 1315, 1187, 1101, 1066, 1081, 1142, 1249, 1395, 1566, 1785, 2007, 2205, 2369, 2133, 1942, 1715, 1523, 1370, 1247, 1163, 1128, 1141, 1203, 1309, 1447, 1618, 1834, 2043, 2240, 2430, 2181, 1995, 1785, 1588, 1444, 1330, 1247, 1216, 1227, 1287, 1387, 1520, 1694, 1902, 2086, 2299, 2513, 2244, 2058, 1879, 1688, 1534, 1424, 1350, 1317, 1331, 1388, 1478, 1613, 1786, 1975, 2139, 2392, 2625, 2320, 2129, 1965, 1806, 1649, 1539, 1465, 1435, 1446, 1500, 1596, 1728, 1895, 2039, 2230, 2517, 2757, 2450, 2210, 2061, 1924, 1795, 1680, 1608, 1572, 1587, 1638, 1732, 1863, 1994, 2136, 2337, 2692, 3076, 2574, 2347, 2163, 2039, 1933, 1842, 1764, 1738, 1749, 1804, 1883, 1981, 2095, 2253, 2542, 2845, ]
+ b: [2915, 2480, 2280, 2121, 2025, 1929, 1854, 1793, 1773, 1769, 1815, 1879, 1970, 2069, 2185, 2406, 2670, 2610, 2321, 2132, 1997, 1889, 1781, 1681, 1616, 1587, 1598, 1642, 1721, 1831, 1945, 2068, 2221, 2492, 2485, 2222, 2043, 1913, 1775, 1639, 1541, 1485, 1457, 1466, 1500, 1579, 1705, 1855, 1972, 2122, 2360, 2380, 2127, 1969, 1815, 1647, 1516, 1427, 1367, 1342, 1342, 1390, 1463, 1577, 1739, 1901, 2041, 2243, 2297, 2061, 1914, 1722, 1549, 1418, 1325, 1261, 1233, 1241, 1287, 1369, 1483, 1638, 1820, 1994, 2158, 2233, 2025, 1852, 1646, 1474, 1347, 1242, 1171, 1142, 1152, 1203, 1293, 1409, 1559, 1758, 1931, 2104, 2198, 1987, 1808, 1594, 1424, 1290, 1178, 1104, 1079, 1088, 1139, 1232, 1358, 1505, 1700, 1893, 2077, 2165, 1972, 1772, 1561, 1393, 1250, 1139, 1065, 1035, 1051, 1101, 1196, 1323, 1473, 1656, 1867, 2046, 2166, 1960, 1769, 1542, 1381, 1234, 1121, 1048, 1024, 1034, 1084, 1178, 1308, 1462, 1651, 1855, 2036, 2166, 1961, 1774, 1548, 1380, 1240, 1126, 1054, 1025, 1041, 1092, 1186, 1315, 1464, 1654, 1862, 2041, 2184, 1975, 1794, 1576, 1408, 1268, 1155, 1082, 1056, 1066, 1118, 1211, 1338, 1492, 1678, 1877, 2063, 2222, 1999, 1826, 1623, 1441, 1314, 1208, 1137, 1109, 1120, 1171, 1261, 1383, 1533, 1724, 1912, 2071, 2265, 2043, 1871, 1684, 1507, 1372, 1276, 1211, 1183, 1193, 1242, 1327, 1447, 1600, 1781, 1941, 2132, 2351, 2095, 1928, 1760, 1588, 1454, 1357, 1297, 1271, 1282, 1326, 1406, 1523, 1684, 1849, 1988, 2215, 2439, 2167, 1992, 1847, 1695, 1551, 1455, 1397, 1372, 1381, 1422, 1507, 1622, 1785, 1897, 2068, 2323, 2564, 2289, 2068, 1923, 1803, 1684, 1581, 1520, 1495, 1504, 1546, 1623, 1752, 1866, 1990, 2170, 2488, 2838, 2390, 2201, 2026, 1908, 1814, 1736, 1669, 1643, 1654, 1700, 1774, 1862, 1964, 2101, 2363, 2613, ]
+ #3264x2448_D65_70 - D65
+ - ct: 6504
+ resolution: 3264x2448
+ r: [4095, 3609, 3293, 3044, 2858, 2708, 2555, 2426, 2383, 2390, 2485, 2610, 2769, 2948, 3150, 3554, 4002, 3858, 3341, 3067, 2851, 2656, 2436, 2251, 2136, 2083, 2092, 2169, 2327, 2531, 2747, 2983, 3227, 3713, 3579, 3194, 2920, 2704, 2441, 2187, 2002, 1873, 1824, 1838, 1920, 2070, 2308, 2573, 2812, 3074, 3487, 3428, 3039, 2791, 2525, 2213, 1962, 1775, 1650, 1593, 1609, 1691, 1852, 2077, 2379, 2680, 2932, 3261, 3283, 2933, 2685, 2353, 2038, 1779, 1582, 1449, 1395, 1407, 1501, 1661, 1893, 2189, 2527, 2825, 3136, 3179, 2846, 2572, 2206, 1894, 1626, 1426, 1292, 1234, 1250, 1343, 1513, 1744, 2046, 2404, 2725, 3037, 3115, 2787, 2479, 2109, 1786, 1520, 1312, 1180, 1120, 1136, 1229, 1399, 1641, 1938, 2296, 2645, 2956, 3052, 2747, 2419, 2039, 1716, 1448, 1238, 1106, 1047, 1068, 1160, 1326, 1572, 1876, 2228, 2597, 2913, 3044, 2732, 2389, 2006, 1687, 1415, 1208, 1079, 1024, 1040, 1132, 1296, 1542, 1843, 2206, 2571, 2901, 3049, 2721, 2397, 2016, 1694, 1426, 1215, 1091, 1035, 1055, 1145, 1312, 1550, 1859, 2211, 2575, 2919, 3078, 2759, 2434, 2063, 1737, 1478, 1271, 1141, 1088, 1106, 1199, 1367, 1603, 1905, 2267, 2616, 2927, 3143, 2793, 2505, 2140, 1828, 1564, 1364, 1237, 1183, 1202, 1290, 1461, 1695, 1996, 2340, 2676, 2993, 3228, 2867, 2595, 2268, 1942, 1689, 1499, 1370, 1316, 1340, 1431, 1593, 1823, 2117, 2461, 2756, 3077, 3371, 2972, 2696, 2408, 2104, 1852, 1661, 1541, 1491, 1505, 1599, 1758, 1987, 2276, 2582, 2849, 3235, 3523, 3088, 2811, 2565, 2302, 2046, 1860, 1745, 1694, 1716, 1800, 1961, 2188, 2460, 2699, 2987, 3420, 3757, 3276, 2947, 2706, 2497, 2283, 2099, 1979, 1929, 1947, 2032, 2199, 2409, 2626, 2852, 3158, 3715, 4095, 3473, 3168, 2886, 2708, 2514, 2365, 2251, 2203, 2229, 2315, 2440, 2623, 2806, 3061, 3472, 3935, ]
+ gr: [3109, 2638, 2434, 2267, 2147, 2051, 1954, 1871, 1847, 1848, 1903, 1981, 2080, 2184, 2312, 2555, 2821, 2799, 2481, 2275, 2132, 2010, 1885, 1775, 1698, 1665, 1670, 1719, 1802, 1926, 2045, 2182, 2346, 2660, 2643, 2361, 2180, 2032, 1880, 1730, 1618, 1547, 1513, 1520, 1566, 1652, 1785, 1940, 2074, 2238, 2491, 2534, 2272, 2096, 1934, 1743, 1597, 1491, 1416, 1379, 1389, 1437, 1526, 1653, 1822, 1991, 2156, 2356, 2445, 2203, 2031, 1828, 1639, 1492, 1376, 1298, 1261, 1270, 1325, 1418, 1540, 1717, 1908, 2093, 2270, 2374, 2153, 1965, 1746, 1552, 1404, 1282, 1198, 1160, 1173, 1228, 1331, 1459, 1629, 1836, 2038, 2206, 2328, 2111, 1916, 1679, 1490, 1336, 1208, 1123, 1087, 1097, 1156, 1260, 1398, 1564, 1772, 1985, 2174, 2292, 2087, 1871, 1639, 1448, 1292, 1161, 1077, 1038, 1051, 1111, 1214, 1355, 1521, 1732, 1955, 2142, 2290, 2067, 1852, 1619, 1430, 1271, 1141, 1055, 1024, 1033, 1091, 1194, 1335, 1507, 1715, 1939, 2133, 2285, 2073, 1861, 1623, 1436, 1278, 1147, 1065, 1028, 1042, 1099, 1204, 1345, 1514, 1723, 1945, 2131, 2312, 2082, 1884, 1653, 1467, 1308, 1181, 1100, 1065, 1076, 1133, 1240, 1377, 1543, 1754, 1968, 2151, 2350, 2114, 1928, 1703, 1515, 1364, 1244, 1161, 1126, 1138, 1197, 1300, 1429, 1595, 1803, 2003, 2192, 2404, 2166, 1977, 1775, 1581, 1435, 1322, 1245, 1213, 1223, 1278, 1375, 1504, 1671, 1872, 2048, 2255, 2499, 2220, 2040, 1859, 1678, 1526, 1416, 1345, 1314, 1327, 1380, 1468, 1596, 1763, 1948, 2105, 2337, 2607, 2299, 2116, 1951, 1792, 1638, 1534, 1458, 1431, 1443, 1492, 1583, 1709, 1873, 2004, 2191, 2463, 2733, 2429, 2197, 2044, 1912, 1782, 1670, 1601, 1568, 1581, 1630, 1719, 1847, 1973, 2107, 2304, 2637, 3045, 2548, 2338, 2143, 2029, 1920, 1832, 1762, 1736, 1737, 1795, 1871, 1961, 2070, 2227, 2493, 2794, ]
+ gb: [3118, 2634, 2434, 2259, 2154, 2052, 1949, 1888, 1844, 1853, 1900, 1987, 2084, 2192, 2325, 2571, 2855, 2786, 2469, 2271, 2125, 2010, 1882, 1775, 1690, 1662, 1669, 1719, 1805, 1928, 2050, 2192, 2362, 2674, 2635, 2358, 2173, 2030, 1872, 1729, 1620, 1547, 1508, 1516, 1565, 1654, 1790, 1947, 2082, 2257, 2516, 2527, 2260, 2094, 1923, 1744, 1598, 1486, 1411, 1374, 1388, 1438, 1525, 1657, 1830, 2001, 2169, 2382, 2431, 2196, 2021, 1824, 1634, 1486, 1376, 1296, 1254, 1269, 1325, 1422, 1547, 1722, 1922, 2106, 2297, 2367, 2146, 1960, 1736, 1550, 1402, 1281, 1196, 1157, 1169, 1230, 1333, 1466, 1640, 1848, 2055, 2232, 2320, 2105, 1909, 1675, 1489, 1335, 1208, 1120, 1083, 1099, 1158, 1265, 1405, 1575, 1794, 2006, 2206, 2295, 2075, 1873, 1634, 1447, 1292, 1162, 1076, 1037, 1052, 1113, 1220, 1363, 1541, 1748, 1982, 2173, 2278, 2071, 1850, 1619, 1430, 1271, 1144, 1056, 1024, 1035, 1096, 1202, 1348, 1521, 1736, 1966, 2162, 2290, 2073, 1856, 1626, 1439, 1279, 1150, 1065, 1029, 1043, 1104, 1211, 1355, 1532, 1744, 1973, 2166, 2302, 2090, 1883, 1651, 1466, 1313, 1184, 1100, 1065, 1078, 1139, 1246, 1388, 1557, 1771, 1995, 2185, 2344, 2122, 1927, 1706, 1513, 1368, 1245, 1163, 1126, 1140, 1200, 1305, 1441, 1612, 1823, 2030, 2225, 2411, 2166, 1983, 1776, 1584, 1439, 1324, 1245, 1213, 1225, 1283, 1383, 1513, 1688, 1887, 2074, 2281, 2493, 2226, 2042, 1867, 1679, 1535, 1418, 1349, 1317, 1329, 1382, 1476, 1607, 1780, 1968, 2128, 2376, 2613, 2305, 2120, 1955, 1797, 1642, 1536, 1460, 1430, 1446, 1496, 1591, 1722, 1887, 2029, 2217, 2500, 2745, 2434, 2202, 2052, 1917, 1784, 1676, 1603, 1572, 1584, 1634, 1731, 1857, 1986, 2128, 2326, 2675, 3059, 2546, 2342, 2153, 2041, 1930, 1833, 1767, 1731, 1739, 1795, 1880, 1970, 2091, 2242, 2528, 2816, ]
+ b: [2873, 2460, 2268, 2104, 2011, 1921, 1837, 1775, 1753, 1759, 1798, 1871, 1956, 2059, 2172, 2375, 2631, 2606, 2309, 2117, 1990, 1879, 1768, 1673, 1606, 1582, 1588, 1633, 1705, 1820, 1931, 2051, 2202, 2475, 2458, 2204, 2033, 1901, 1760, 1630, 1533, 1475, 1452, 1455, 1495, 1572, 1694, 1839, 1962, 2110, 2332, 2361, 2122, 1964, 1800, 1640, 1506, 1417, 1362, 1332, 1340, 1378, 1452, 1573, 1727, 1887, 2031, 2222, 2280, 2053, 1893, 1713, 1542, 1414, 1321, 1257, 1229, 1235, 1282, 1365, 1470, 1633, 1804, 1974, 2144, 2220, 2010, 1846, 1638, 1472, 1340, 1238, 1168, 1141, 1149, 1201, 1288, 1403, 1551, 1742, 1923, 2094, 2180, 1986, 1797, 1591, 1416, 1287, 1176, 1105, 1077, 1088, 1137, 1230, 1350, 1502, 1688, 1885, 2062, 2161, 1955, 1767, 1554, 1387, 1249, 1135, 1064, 1035, 1050, 1097, 1191, 1317, 1471, 1654, 1863, 2027, 2145, 1955, 1757, 1539, 1375, 1233, 1121, 1047, 1024, 1033, 1086, 1175, 1303, 1454, 1640, 1848, 2020, 2154, 1953, 1760, 1542, 1379, 1237, 1124, 1053, 1027, 1038, 1089, 1182, 1310, 1463, 1645, 1848, 2028, 2167, 1965, 1781, 1567, 1400, 1266, 1152, 1083, 1054, 1066, 1117, 1209, 1334, 1483, 1674, 1867, 2043, 2207, 1995, 1816, 1613, 1440, 1311, 1204, 1137, 1109, 1118, 1169, 1258, 1378, 1527, 1713, 1899, 2067, 2247, 2035, 1862, 1676, 1500, 1369, 1274, 1208, 1182, 1190, 1237, 1324, 1439, 1592, 1770, 1930, 2126, 2337, 2085, 1919, 1752, 1585, 1447, 1353, 1294, 1270, 1278, 1325, 1401, 1517, 1672, 1842, 1979, 2199, 2421, 2154, 1984, 1835, 1686, 1549, 1450, 1393, 1369, 1381, 1418, 1500, 1617, 1769, 1886, 2055, 2310, 2539, 2273, 2056, 1921, 1791, 1680, 1576, 1515, 1490, 1499, 1544, 1624, 1737, 1860, 1983, 2162, 2458, 2817, 2386, 2185, 2018, 1904, 1802, 1724, 1668, 1638, 1646, 1685, 1765, 1851, 1953, 2089, 2342, 2607, ]
+ #3264x2448_D75_70 - D75
+ - ct: 7504
+ resolution: 3264x2448
+ r: [4095, 3519, 3218, 2985, 2815, 2645, 2509, 2389, 2327, 2355, 2435, 2555, 2710, 2908, 3107, 3455, 3909, 3739, 3284, 3001, 2795, 2603, 2392, 2213, 2093, 2049, 2058, 2135, 2281, 2493, 2685, 2920, 3163, 3650, 3536, 3113, 2865, 2641, 2393, 2149, 1967, 1852, 1802, 1811, 1894, 2037, 2267, 2525, 2747, 3014, 3388, 3358, 2983, 2730, 2466, 2185, 1933, 1755, 1634, 1579, 1590, 1678, 1826, 2049, 2329, 2621, 2864, 3207, 3196, 2870, 2628, 2311, 2001, 1757, 1569, 1439, 1382, 1396, 1488, 1645, 1865, 2163, 2477, 2773, 3063, 3115, 2785, 2512, 2175, 1859, 1619, 1412, 1285, 1228, 1243, 1335, 1502, 1726, 2015, 2362, 2666, 2951, 3027, 2733, 2430, 2073, 1761, 1507, 1303, 1172, 1116, 1132, 1223, 1388, 1622, 1913, 2253, 2591, 2908, 2995, 2683, 2368, 2007, 1696, 1435, 1234, 1104, 1045, 1068, 1154, 1317, 1561, 1846, 2189, 2547, 2845, 2960, 2670, 2344, 1972, 1667, 1403, 1205, 1074, 1024, 1038, 1128, 1290, 1526, 1816, 2166, 2519, 2841, 2985, 2665, 2355, 1980, 1675, 1416, 1210, 1087, 1032, 1052, 1141, 1300, 1537, 1836, 2171, 2530, 2837, 3017, 2686, 2380, 2030, 1721, 1465, 1264, 1140, 1086, 1104, 1190, 1358, 1586, 1879, 2221, 2556, 2871, 3062, 2738, 2456, 2107, 1796, 1549, 1356, 1232, 1175, 1192, 1285, 1446, 1672, 1961, 2298, 2626, 2926, 3172, 2807, 2533, 2227, 1916, 1670, 1485, 1356, 1308, 1325, 1415, 1577, 1801, 2085, 2411, 2676, 3033, 3272, 2904, 2640, 2360, 2069, 1821, 1639, 1525, 1476, 1492, 1580, 1735, 1951, 2232, 2536, 2784, 3143, 3481, 3014, 2752, 2511, 2256, 2018, 1835, 1719, 1672, 1687, 1777, 1931, 2151, 2414, 2647, 2922, 3369, 3652, 3193, 2877, 2650, 2441, 2239, 2058, 1946, 1895, 1918, 1999, 2153, 2365, 2572, 2794, 3086, 3594, 4095, 3408, 3097, 2824, 2643, 2469, 2323, 2215, 2158, 2187, 2264, 2412, 2554, 2742, 2991, 3425, 3869, ]
+ gr: [3118, 2636, 2433, 2254, 2141, 2035, 1950, 1873, 1840, 1849, 1893, 1975, 2079, 2175, 2303, 2544, 2821, 2787, 2475, 2277, 2131, 2003, 1880, 1767, 1691, 1656, 1665, 1715, 1794, 1921, 2037, 2179, 2343, 2648, 2644, 2359, 2180, 2024, 1877, 1724, 1615, 1543, 1508, 1516, 1561, 1650, 1780, 1935, 2071, 2236, 2483, 2533, 2271, 2094, 1926, 1742, 1593, 1487, 1413, 1377, 1385, 1434, 1520, 1647, 1819, 1984, 2150, 2358, 2451, 2197, 2027, 1823, 1635, 1491, 1375, 1296, 1258, 1268, 1324, 1417, 1538, 1712, 1905, 2087, 2270, 2374, 2145, 1961, 1741, 1549, 1402, 1281, 1196, 1159, 1169, 1227, 1325, 1458, 1624, 1834, 2028, 2212, 2324, 2109, 1912, 1678, 1487, 1335, 1208, 1123, 1087, 1096, 1155, 1260, 1394, 1560, 1769, 1981, 2168, 2302, 2071, 1872, 1633, 1447, 1290, 1159, 1076, 1038, 1052, 1109, 1211, 1356, 1521, 1728, 1954, 2134, 2285, 2065, 1850, 1617, 1427, 1269, 1142, 1054, 1024, 1033, 1090, 1194, 1333, 1502, 1714, 1936, 2128, 2281, 2075, 1855, 1621, 1435, 1277, 1146, 1064, 1030, 1042, 1100, 1203, 1341, 1513, 1721, 1948, 2122, 2312, 2076, 1880, 1647, 1463, 1308, 1180, 1099, 1064, 1075, 1132, 1237, 1375, 1539, 1746, 1961, 2151, 2345, 2115, 1924, 1700, 1514, 1361, 1244, 1160, 1126, 1137, 1194, 1298, 1427, 1592, 1802, 2001, 2181, 2409, 2156, 1978, 1774, 1578, 1435, 1320, 1242, 1211, 1221, 1276, 1372, 1498, 1668, 1864, 2047, 2237, 2494, 2218, 2033, 1858, 1672, 1520, 1415, 1343, 1311, 1324, 1376, 1462, 1590, 1758, 1940, 2097, 2340, 2607, 2290, 2110, 1945, 1786, 1638, 1526, 1455, 1425, 1437, 1485, 1578, 1705, 1868, 1998, 2185, 2460, 2727, 2419, 2192, 2039, 1906, 1775, 1666, 1593, 1565, 1576, 1627, 1711, 1838, 1963, 2101, 2299, 2626, 3040, 2538, 2330, 2138, 2021, 1918, 1827, 1755, 1724, 1732, 1784, 1866, 1954, 2068, 2214, 2496, 2760, ]
+ gb: [3103, 2631, 2429, 2258, 2149, 2044, 1949, 1878, 1843, 1853, 1904, 1985, 2081, 2188, 2320, 2563, 2842, 2787, 2459, 2271, 2124, 2008, 1878, 1772, 1689, 1663, 1666, 1715, 1801, 1924, 2045, 2190, 2357, 2679, 2626, 2355, 2170, 2027, 1869, 1724, 1617, 1543, 1507, 1517, 1566, 1653, 1785, 1945, 2080, 2250, 2509, 2516, 2256, 2083, 1920, 1737, 1595, 1485, 1413, 1376, 1385, 1438, 1526, 1654, 1826, 1997, 2161, 2383, 2426, 2190, 2013, 1820, 1629, 1486, 1374, 1294, 1255, 1266, 1325, 1419, 1543, 1721, 1918, 2103, 2291, 2358, 2142, 1954, 1731, 1545, 1400, 1280, 1194, 1157, 1171, 1227, 1334, 1465, 1633, 1848, 2045, 2227, 2319, 2095, 1902, 1672, 1488, 1334, 1207, 1123, 1085, 1096, 1157, 1261, 1401, 1572, 1784, 2003, 2191, 2286, 2071, 1863, 1631, 1445, 1289, 1160, 1075, 1038, 1053, 1113, 1221, 1363, 1534, 1743, 1971, 2167, 2278, 2059, 1844, 1613, 1427, 1271, 1143, 1057, 1024, 1035, 1096, 1199, 1346, 1518, 1731, 1960, 2153, 2280, 2065, 1853, 1619, 1438, 1278, 1149, 1066, 1029, 1044, 1105, 1210, 1354, 1528, 1735, 1970, 2160, 2302, 2080, 1875, 1649, 1465, 1309, 1183, 1100, 1065, 1079, 1136, 1246, 1384, 1556, 1767, 1987, 2178, 2346, 2109, 1923, 1697, 1514, 1365, 1245, 1160, 1127, 1141, 1199, 1303, 1438, 1608, 1818, 2027, 2215, 2410, 2158, 1976, 1774, 1578, 1437, 1325, 1245, 1212, 1225, 1284, 1379, 1514, 1680, 1883, 2068, 2272, 2489, 2219, 2041, 1862, 1677, 1529, 1417, 1345, 1314, 1327, 1381, 1474, 1600, 1780, 1961, 2120, 2371, 2601, 2306, 2111, 1953, 1795, 1642, 1534, 1459, 1431, 1443, 1496, 1587, 1717, 1881, 2024, 2213, 2482, 2733, 2436, 2194, 2049, 1910, 1784, 1674, 1600, 1567, 1581, 1632, 1728, 1855, 1985, 2122, 2321, 2675, 3032, 2542, 2344, 2151, 2037, 1930, 1834, 1767, 1732, 1747, 1791, 1879, 1968, 2083, 2239, 2522, 2807, ]
+ b: [2879, 2455, 2264, 2106, 2006, 1922, 1836, 1777, 1750, 1753, 1802, 1870, 1949, 2055, 2160, 2385, 2620, 2609, 2309, 2119, 1990, 1882, 1764, 1668, 1603, 1583, 1586, 1625, 1704, 1818, 1933, 2054, 2201, 2478, 2465, 2208, 2038, 1897, 1760, 1627, 1531, 1477, 1450, 1453, 1492, 1569, 1686, 1838, 1960, 2103, 2342, 2362, 2116, 1967, 1802, 1637, 1506, 1416, 1359, 1332, 1340, 1379, 1453, 1574, 1722, 1888, 2030, 2214, 2284, 2053, 1896, 1715, 1540, 1412, 1320, 1257, 1227, 1236, 1282, 1363, 1468, 1629, 1806, 1969, 2149, 2217, 2010, 1841, 1638, 1470, 1340, 1237, 1168, 1140, 1146, 1199, 1286, 1401, 1552, 1740, 1932, 2082, 2182, 1981, 1791, 1589, 1418, 1287, 1175, 1104, 1076, 1087, 1137, 1227, 1352, 1497, 1690, 1883, 2059, 2158, 1964, 1767, 1551, 1387, 1247, 1135, 1065, 1036, 1048, 1100, 1190, 1318, 1466, 1651, 1858, 2037, 2149, 1951, 1756, 1539, 1373, 1233, 1121, 1047, 1024, 1035, 1085, 1174, 1302, 1457, 1637, 1845, 2021, 2153, 1952, 1760, 1542, 1378, 1236, 1126, 1054, 1026, 1040, 1090, 1181, 1308, 1458, 1645, 1852, 2025, 2172, 1964, 1780, 1565, 1398, 1266, 1151, 1085, 1055, 1066, 1116, 1209, 1333, 1484, 1667, 1864, 2036, 2200, 1989, 1822, 1612, 1435, 1311, 1202, 1135, 1108, 1117, 1169, 1259, 1374, 1526, 1714, 1895, 2075, 2259, 2034, 1860, 1674, 1500, 1363, 1275, 1208, 1180, 1192, 1237, 1319, 1437, 1591, 1767, 1932, 2119, 2327, 2081, 1914, 1750, 1580, 1445, 1350, 1292, 1269, 1279, 1320, 1400, 1515, 1671, 1835, 1975, 2198, 2428, 2152, 1983, 1838, 1684, 1546, 1448, 1394, 1367, 1377, 1417, 1501, 1615, 1768, 1890, 2056, 2310, 2536, 2273, 2059, 1919, 1794, 1676, 1576, 1512, 1487, 1499, 1543, 1621, 1741, 1856, 1980, 2155, 2463, 2820, 2387, 2189, 2014, 1906, 1806, 1722, 1672, 1639, 1645, 1687, 1758, 1846, 1950, 2094, 2345, 2609, ]
+ #3264x2448_F11_TL84_70 - F11_TL84
+ - ct: 4000
+ resolution: 3264x2448
+ r: [4002, 3309, 3035, 2794, 2634, 2461, 2319, 2207, 2157, 2168, 2244, 2370, 2537, 2712, 2917, 3269, 3672, 3551, 3103, 2825, 2625, 2420, 2214, 2037, 1922, 1874, 1882, 1956, 2100, 2302, 2511, 2738, 2969, 3444, 3298, 2949, 2692, 2463, 2213, 1969, 1792, 1686, 1640, 1646, 1721, 1857, 2074, 2333, 2576, 2831, 3187, 3157, 2805, 2562, 2298, 1998, 1762, 1596, 1491, 1444, 1454, 1521, 1655, 1863, 2142, 2432, 2691, 3014, 3030, 2709, 2454, 2128, 1831, 1597, 1435, 1335, 1291, 1302, 1366, 1495, 1686, 1971, 2291, 2593, 2883, 2940, 2627, 2345, 1995, 1701, 1475, 1311, 1216, 1176, 1186, 1246, 1372, 1564, 1831, 2173, 2490, 2788, 2868, 2575, 2259, 1900, 1604, 1387, 1231, 1136, 1095, 1105, 1167, 1286, 1475, 1735, 2074, 2418, 2721, 2826, 2533, 2203, 1835, 1548, 1332, 1177, 1084, 1042, 1056, 1116, 1233, 1422, 1676, 2015, 2370, 2679, 2812, 2511, 2176, 1810, 1521, 1303, 1157, 1063, 1024, 1034, 1095, 1216, 1398, 1657, 1989, 2342, 2677, 2816, 2517, 2185, 1816, 1530, 1312, 1161, 1070, 1031, 1041, 1109, 1224, 1410, 1665, 1999, 2359, 2664, 2839, 2531, 2218, 1856, 1571, 1350, 1197, 1106, 1065, 1080, 1142, 1263, 1451, 1708, 2046, 2389, 2703, 2896, 2578, 2281, 1935, 1636, 1421, 1265, 1171, 1135, 1147, 1209, 1335, 1527, 1788, 2123, 2454, 2753, 2994, 2638, 2366, 2046, 1749, 1522, 1365, 1268, 1231, 1245, 1310, 1442, 1638, 1912, 2230, 2518, 2840, 3101, 2741, 2467, 2183, 1895, 1664, 1502, 1402, 1363, 1376, 1451, 1582, 1789, 2057, 2362, 2609, 2977, 3260, 2841, 2581, 2342, 2083, 1842, 1676, 1575, 1534, 1553, 1625, 1769, 1977, 2240, 2474, 2752, 3175, 3489, 3019, 2716, 2496, 2274, 2077, 1899, 1789, 1751, 1769, 1847, 1991, 2189, 2409, 2631, 2927, 3411, 3949, 3229, 2910, 2647, 2477, 2296, 2156, 2049, 2010, 2022, 2104, 2237, 2398, 2579, 2812, 3226, 3666, ]
+ gr: [3132, 2654, 2457, 2283, 2168, 2064, 1974, 1892, 1855, 1864, 1922, 1997, 2100, 2202, 2331, 2576, 2861, 2822, 2487, 2297, 2143, 2021, 1891, 1780, 1697, 1664, 1669, 1720, 1809, 1934, 2058, 2197, 2364, 2674, 2652, 2374, 2189, 2039, 1882, 1732, 1618, 1541, 1502, 1512, 1561, 1654, 1788, 1943, 2081, 2250, 2503, 2542, 2272, 2100, 1925, 1743, 1592, 1482, 1408, 1367, 1378, 1429, 1517, 1644, 1816, 1993, 2163, 2364, 2454, 2203, 2028, 1824, 1624, 1481, 1366, 1286, 1249, 1256, 1312, 1409, 1527, 1709, 1905, 2097, 2279, 2368, 2158, 1956, 1731, 1540, 1390, 1275, 1189, 1153, 1165, 1219, 1318, 1446, 1615, 1833, 2032, 2220, 2332, 2110, 1908, 1667, 1473, 1322, 1200, 1119, 1085, 1095, 1149, 1249, 1383, 1550, 1760, 1983, 2175, 2300, 2074, 1859, 1619, 1428, 1273, 1154, 1072, 1038, 1052, 1105, 1203, 1339, 1506, 1722, 1951, 2146, 2289, 2061, 1844, 1602, 1410, 1256, 1134, 1053, 1024, 1031, 1089, 1183, 1320, 1490, 1702, 1938, 2137, 2282, 2067, 1845, 1605, 1418, 1260, 1141, 1061, 1027, 1041, 1095, 1194, 1328, 1497, 1713, 1942, 2139, 2318, 2083, 1870, 1634, 1448, 1296, 1173, 1096, 1062, 1073, 1129, 1226, 1363, 1528, 1741, 1967, 2157, 2345, 2113, 1918, 1691, 1495, 1351, 1233, 1154, 1119, 1132, 1189, 1286, 1418, 1583, 1795, 2001, 2190, 2416, 2159, 1976, 1767, 1568, 1424, 1311, 1232, 1202, 1211, 1268, 1363, 1490, 1661, 1868, 2047, 2256, 2502, 2222, 2037, 1855, 1670, 1518, 1407, 1333, 1302, 1313, 1369, 1457, 1591, 1756, 1941, 2106, 2352, 2619, 2304, 2118, 1948, 1789, 1638, 1523, 1449, 1418, 1432, 1483, 1578, 1706, 1875, 2011, 2197, 2473, 2758, 2433, 2198, 2052, 1915, 1783, 1674, 1593, 1566, 1576, 1629, 1721, 1852, 1976, 2115, 2312, 2657, 3071, 2569, 2344, 2154, 2039, 1930, 1841, 1773, 1734, 1748, 1795, 1881, 1974, 2089, 2231, 2521, 2802, ]
+ gb: [3133, 2656, 2457, 2275, 2154, 2053, 1951, 1877, 1838, 1848, 1901, 1985, 2088, 2205, 2345, 2598, 2891, 2824, 2492, 2292, 2135, 2015, 1879, 1765, 1681, 1647, 1653, 1708, 1800, 1928, 2056, 2208, 2384, 2708, 2667, 2381, 2198, 2039, 1879, 1723, 1610, 1527, 1492, 1502, 1553, 1645, 1781, 1953, 2093, 2277, 2545, 2558, 2287, 2108, 1931, 1743, 1586, 1472, 1400, 1359, 1367, 1424, 1513, 1652, 1830, 2012, 2188, 2417, 2474, 2212, 2042, 1831, 1630, 1477, 1365, 1283, 1242, 1255, 1313, 1408, 1538, 1723, 1930, 2127, 2323, 2395, 2169, 1970, 1738, 1548, 1392, 1272, 1187, 1151, 1161, 1222, 1322, 1459, 1633, 1861, 2066, 2263, 2356, 2130, 1922, 1679, 1479, 1325, 1200, 1118, 1082, 1094, 1151, 1254, 1396, 1573, 1792, 2024, 2227, 2337, 2095, 1883, 1627, 1438, 1279, 1156, 1074, 1038, 1054, 1110, 1211, 1352, 1530, 1752, 1997, 2195, 2306, 2095, 1861, 1616, 1421, 1258, 1139, 1055, 1024, 1035, 1094, 1193, 1335, 1513, 1741, 1986, 2182, 2315, 2094, 1867, 1622, 1427, 1266, 1143, 1064, 1029, 1044, 1100, 1202, 1344, 1523, 1746, 1989, 2193, 2342, 2108, 1890, 1648, 1458, 1299, 1176, 1096, 1061, 1075, 1132, 1236, 1376, 1557, 1773, 2010, 2203, 2377, 2140, 1939, 1704, 1508, 1353, 1232, 1154, 1120, 1131, 1193, 1292, 1432, 1608, 1828, 2044, 2251, 2443, 2185, 1992, 1782, 1577, 1428, 1315, 1233, 1199, 1214, 1271, 1370, 1504, 1685, 1895, 2093, 2305, 2519, 2249, 2058, 1869, 1675, 1519, 1406, 1331, 1298, 1313, 1371, 1462, 1599, 1781, 1976, 2139, 2405, 2637, 2326, 2130, 1962, 1792, 1637, 1521, 1445, 1412, 1428, 1481, 1578, 1713, 1888, 2035, 2238, 2529, 2777, 2458, 2215, 2053, 1917, 1776, 1662, 1588, 1554, 1568, 1624, 1722, 1851, 1992, 2136, 2351, 2708, 3076, 2575, 2354, 2161, 2036, 1925, 1834, 1757, 1723, 1732, 1779, 1874, 1972, 2093, 2258, 2546, 2857, ]
+ b: [2906, 2483, 2290, 2108, 2020, 1921, 1851, 1778, 1756, 1759, 1799, 1880, 1969, 2074, 2183, 2435, 2664, 2618, 2324, 2122, 1992, 1883, 1772, 1666, 1601, 1578, 1586, 1627, 1712, 1827, 1934, 2072, 2225, 2524, 2483, 2211, 2037, 1900, 1761, 1625, 1532, 1472, 1447, 1449, 1486, 1571, 1692, 1847, 1968, 2118, 2360, 2370, 2126, 1961, 1803, 1638, 1509, 1411, 1355, 1324, 1335, 1376, 1449, 1572, 1729, 1884, 2042, 2233, 2286, 2051, 1902, 1710, 1537, 1407, 1314, 1249, 1222, 1228, 1276, 1356, 1472, 1629, 1815, 1975, 2159, 2238, 2012, 1839, 1636, 1463, 1333, 1232, 1165, 1137, 1144, 1192, 1280, 1394, 1549, 1743, 1922, 2094, 2184, 1979, 1797, 1586, 1413, 1279, 1170, 1102, 1074, 1086, 1134, 1219, 1345, 1492, 1684, 1888, 2067, 2160, 1958, 1765, 1546, 1378, 1240, 1132, 1062, 1035, 1050, 1095, 1184, 1307, 1459, 1646, 1858, 2036, 2151, 1954, 1752, 1531, 1366, 1224, 1115, 1046, 1026, 1033, 1081, 1170, 1293, 1450, 1635, 1845, 2032, 2155, 1948, 1754, 1535, 1373, 1228, 1118, 1053, 1024, 1038, 1088, 1175, 1299, 1452, 1638, 1849, 2027, 2179, 1970, 1780, 1565, 1391, 1259, 1147, 1079, 1053, 1063, 1113, 1203, 1324, 1474, 1668, 1869, 2037, 2214, 1989, 1816, 1610, 1433, 1297, 1194, 1130, 1105, 1112, 1161, 1249, 1367, 1522, 1710, 1892, 2074, 2264, 2034, 1863, 1673, 1491, 1360, 1264, 1199, 1176, 1185, 1230, 1312, 1434, 1590, 1770, 1936, 2127, 2348, 2084, 1916, 1751, 1581, 1437, 1343, 1284, 1254, 1268, 1312, 1395, 1516, 1673, 1837, 1986, 2216, 2445, 2159, 1975, 1832, 1684, 1544, 1441, 1381, 1358, 1367, 1413, 1494, 1612, 1773, 1894, 2067, 2330, 2573, 2285, 2061, 1914, 1791, 1672, 1568, 1507, 1480, 1492, 1529, 1619, 1743, 1862, 1987, 2168, 2475, 2853, 2395, 2197, 2003, 1909, 1798, 1726, 1652, 1638, 1640, 1687, 1762, 1852, 1956, 2101, 2365, 2643, ]
+ #3264x2448_F2_CWF_70 - F2_CWF
+ - ct: 4230
+ resolution: 3264x2448
+ r: [3695, 3077, 2822, 2622, 2472, 2342, 2200, 2111, 2075, 2079, 2145, 2258, 2393, 2547, 2713, 3030, 3396, 3294, 2882, 2641, 2461, 2294, 2117, 1965, 1868, 1822, 1827, 1898, 2020, 2200, 2366, 2557, 2763, 3190, 3081, 2755, 2527, 2334, 2120, 1915, 1760, 1667, 1625, 1635, 1702, 1820, 2002, 2225, 2422, 2641, 2979, 2935, 2624, 2415, 2192, 1939, 1732, 1587, 1496, 1452, 1461, 1526, 1643, 1825, 2064, 2314, 2518, 2804, 2832, 2532, 2323, 2050, 1792, 1591, 1448, 1348, 1301, 1315, 1382, 1504, 1675, 1916, 2190, 2435, 2700, 2735, 2464, 2229, 1935, 1680, 1485, 1327, 1227, 1183, 1194, 1265, 1392, 1567, 1799, 2091, 2351, 2611, 2673, 2415, 2150, 1853, 1597, 1397, 1244, 1144, 1096, 1111, 1182, 1308, 1489, 1715, 2000, 2291, 2552, 2638, 2381, 2104, 1797, 1546, 1342, 1189, 1086, 1042, 1058, 1126, 1255, 1435, 1666, 1950, 2257, 2514, 2621, 2361, 2083, 1766, 1525, 1319, 1164, 1064, 1024, 1037, 1106, 1231, 1415, 1644, 1929, 2233, 2506, 2638, 2364, 2088, 1777, 1528, 1326, 1168, 1073, 1029, 1046, 1115, 1240, 1422, 1654, 1941, 2237, 2511, 2655, 2388, 2121, 1813, 1563, 1366, 1210, 1114, 1070, 1084, 1155, 1283, 1459, 1693, 1981, 2269, 2530, 2712, 2427, 2182, 1884, 1628, 1428, 1281, 1183, 1143, 1158, 1226, 1352, 1531, 1764, 2046, 2317, 2579, 2790, 2485, 2250, 1983, 1722, 1523, 1379, 1284, 1242, 1258, 1327, 1454, 1628, 1862, 2139, 2376, 2667, 2895, 2571, 2344, 2103, 1851, 1644, 1506, 1409, 1371, 1388, 1457, 1578, 1756, 1996, 2250, 2457, 2782, 3048, 2672, 2441, 2229, 2007, 1806, 1658, 1567, 1526, 1541, 1611, 1739, 1916, 2148, 2340, 2583, 2953, 3225, 2827, 2544, 2353, 2172, 1998, 1846, 1755, 1708, 1732, 1794, 1928, 2102, 2282, 2468, 2726, 3175, 3641, 3010, 2734, 2492, 2341, 2192, 2069, 1968, 1937, 1948, 2023, 2139, 2270, 2437, 2634, 2994, 3392, ]
+ gr: [3050, 2599, 2407, 2232, 2134, 2044, 1950, 1879, 1843, 1845, 1897, 1973, 2069, 2164, 2285, 2518, 2788, 2763, 2436, 2247, 2112, 1994, 1867, 1764, 1688, 1655, 1661, 1710, 1788, 1907, 2024, 2157, 2320, 2612, 2604, 2323, 2155, 2009, 1858, 1715, 1606, 1543, 1504, 1512, 1556, 1640, 1766, 1917, 2047, 2211, 2450, 2492, 2232, 2067, 1906, 1727, 1584, 1480, 1411, 1371, 1381, 1428, 1512, 1632, 1799, 1962, 2124, 2327, 2400, 2164, 1999, 1801, 1617, 1475, 1369, 1292, 1252, 1264, 1317, 1408, 1525, 1691, 1879, 2063, 2240, 2326, 2120, 1935, 1721, 1533, 1392, 1278, 1194, 1156, 1167, 1225, 1319, 1443, 1606, 1809, 2003, 2170, 2291, 2075, 1883, 1653, 1470, 1323, 1204, 1122, 1086, 1096, 1153, 1252, 1381, 1540, 1746, 1951, 2139, 2256, 2043, 1839, 1609, 1430, 1278, 1158, 1076, 1038, 1052, 1108, 1206, 1341, 1500, 1702, 1929, 2103, 2242, 2036, 1820, 1596, 1411, 1260, 1138, 1053, 1024, 1032, 1091, 1186, 1322, 1484, 1690, 1909, 2098, 2251, 2034, 1826, 1598, 1416, 1267, 1143, 1065, 1027, 1043, 1097, 1198, 1328, 1493, 1694, 1913, 2096, 2263, 2048, 1852, 1626, 1447, 1298, 1177, 1096, 1063, 1075, 1131, 1230, 1360, 1521, 1723, 1934, 2117, 2316, 2078, 1897, 1680, 1494, 1351, 1238, 1159, 1123, 1135, 1193, 1290, 1416, 1572, 1776, 1974, 2152, 2362, 2122, 1947, 1746, 1562, 1424, 1313, 1238, 1207, 1218, 1272, 1361, 1484, 1647, 1838, 2014, 2215, 2461, 2182, 2007, 1835, 1653, 1510, 1408, 1336, 1305, 1317, 1368, 1456, 1576, 1736, 1919, 2068, 2306, 2560, 2260, 2080, 1920, 1771, 1626, 1516, 1450, 1420, 1432, 1480, 1566, 1687, 1844, 1975, 2157, 2418, 2703, 2387, 2160, 2012, 1888, 1763, 1660, 1588, 1558, 1566, 1617, 1702, 1827, 1943, 2075, 2267, 2603, 2992, 2511, 2296, 2118, 2001, 1898, 1817, 1749, 1719, 1730, 1779, 1859, 1938, 2050, 2187, 2457, 2741, ]
+ gb: [3060, 2612, 2398, 2229, 2123, 2030, 1932, 1857, 1822, 1830, 1874, 1957, 2069, 2163, 2291, 2542, 2825, 2776, 2432, 2251, 2106, 1988, 1856, 1748, 1668, 1636, 1641, 1695, 1784, 1902, 2026, 2170, 2338, 2654, 2609, 2336, 2151, 2005, 1853, 1710, 1597, 1527, 1487, 1500, 1546, 1634, 1768, 1926, 2063, 2235, 2497, 2514, 2248, 2075, 1908, 1727, 1578, 1471, 1396, 1360, 1371, 1422, 1509, 1639, 1810, 1981, 2151, 2365, 2415, 2182, 2010, 1807, 1619, 1474, 1366, 1284, 1247, 1257, 1316, 1409, 1532, 1710, 1906, 2098, 2282, 2358, 2140, 1949, 1725, 1539, 1393, 1276, 1191, 1153, 1166, 1224, 1325, 1455, 1628, 1840, 2045, 2226, 2308, 2101, 1903, 1666, 1479, 1329, 1204, 1121, 1083, 1098, 1154, 1260, 1395, 1565, 1775, 2000, 2191, 2296, 2069, 1863, 1625, 1437, 1285, 1160, 1074, 1038, 1053, 1112, 1214, 1355, 1527, 1746, 1970, 2167, 2280, 2060, 1844, 1609, 1422, 1262, 1140, 1055, 1024, 1034, 1095, 1198, 1337, 1516, 1724, 1962, 2155, 2284, 2063, 1850, 1618, 1429, 1273, 1147, 1064, 1030, 1043, 1104, 1207, 1351, 1519, 1738, 1965, 2159, 2303, 2083, 1878, 1640, 1460, 1304, 1182, 1099, 1065, 1078, 1136, 1244, 1379, 1552, 1764, 1986, 2181, 2341, 2110, 1916, 1698, 1504, 1359, 1238, 1159, 1125, 1136, 1197, 1297, 1431, 1599, 1809, 2018, 2208, 2403, 2156, 1967, 1764, 1570, 1427, 1315, 1237, 1205, 1217, 1274, 1369, 1502, 1673, 1875, 2061, 2278, 2488, 2208, 2025, 1848, 1662, 1513, 1405, 1333, 1304, 1314, 1372, 1460, 1588, 1760, 1946, 2108, 2355, 2596, 2289, 2101, 1934, 1775, 1624, 1516, 1442, 1412, 1425, 1476, 1571, 1700, 1865, 2005, 2195, 2486, 2720, 2411, 2169, 2025, 1895, 1760, 1650, 1578, 1548, 1559, 1612, 1702, 1834, 1960, 2101, 2302, 2647, 3035, 2523, 2314, 2125, 2002, 1897, 1806, 1738, 1705, 1716, 1766, 1855, 1944, 2061, 2204, 2497, 2792, ]
+ b: [2861, 2421, 2239, 2078, 1980, 1893, 1811, 1762, 1723, 1742, 1779, 1851, 1933, 2034, 2151, 2359, 2635, 2562, 2279, 2088, 1949, 1859, 1748, 1650, 1585, 1562, 1570, 1607, 1691, 1798, 1909, 2028, 2181, 2467, 2428, 2166, 2009, 1873, 1736, 1613, 1518, 1461, 1436, 1441, 1480, 1557, 1676, 1814, 1932, 2087, 2311, 2326, 2088, 1923, 1779, 1621, 1492, 1404, 1351, 1322, 1329, 1368, 1445, 1557, 1708, 1863, 2004, 2200, 2250, 2013, 1869, 1687, 1522, 1398, 1309, 1250, 1218, 1231, 1273, 1354, 1457, 1615, 1779, 1941, 2113, 2187, 1979, 1812, 1617, 1454, 1331, 1231, 1163, 1137, 1145, 1195, 1277, 1392, 1537, 1720, 1899, 2061, 2161, 1947, 1769, 1567, 1405, 1273, 1171, 1101, 1078, 1087, 1132, 1222, 1336, 1483, 1665, 1849, 2018, 2122, 1923, 1740, 1530, 1369, 1239, 1131, 1064, 1037, 1049, 1096, 1182, 1306, 1452, 1625, 1829, 1999, 2115, 1919, 1730, 1520, 1360, 1222, 1117, 1046, 1024, 1033, 1086, 1169, 1288, 1439, 1617, 1815, 1991, 2121, 1918, 1736, 1524, 1359, 1227, 1119, 1053, 1025, 1040, 1088, 1173, 1295, 1442, 1624, 1817, 1995, 2136, 1934, 1750, 1546, 1384, 1254, 1147, 1079, 1053, 1063, 1114, 1203, 1321, 1464, 1649, 1837, 2004, 2179, 1955, 1795, 1587, 1423, 1294, 1195, 1131, 1105, 1112, 1161, 1247, 1362, 1506, 1688, 1872, 2037, 2228, 1999, 1833, 1656, 1480, 1353, 1263, 1197, 1172, 1182, 1228, 1311, 1423, 1574, 1751, 1903, 2078, 2309, 2047, 1889, 1724, 1558, 1425, 1336, 1277, 1252, 1263, 1308, 1382, 1500, 1654, 1806, 1954, 2164, 2390, 2114, 1949, 1802, 1660, 1524, 1429, 1373, 1352, 1360, 1401, 1482, 1597, 1748, 1863, 2031, 2287, 2520, 2231, 2019, 1882, 1760, 1651, 1549, 1494, 1466, 1478, 1519, 1597, 1715, 1827, 1947, 2124, 2444, 2788, 2355, 2157, 1974, 1878, 1770, 1701, 1637, 1615, 1612, 1661, 1743, 1824, 1925, 2064, 2315, 2599, ]
+
diff --git a/src/ipa/rkisp1/data/uncalibrated.yaml b/src/ipa/rkisp1/data/uncalibrated.yaml
new file mode 100644
index 00000000..60901296
--- /dev/null
+++ b/src/ipa/rkisp1/data/uncalibrated.yaml
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - Agc:
+ - Awb:
+ - BlackLevelCorrection:
+...
diff --git a/src/ipa/rkisp1/ipa_context.cpp b/src/ipa/rkisp1/ipa_context.cpp
new file mode 100644
index 00000000..80b99df8
--- /dev/null
+++ b/src/ipa/rkisp1/ipa_context.cpp
@@ -0,0 +1,413 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 IPA Context
+ */
+
+#include "ipa_context.h"
+
+/**
+ * \file ipa_context.h
+ * \brief Context and state information shared between the algorithms
+ */
+
+namespace libcamera::ipa::rkisp1 {
+
+/**
+ * \struct IPAHwSettings
+ * \brief RkISP1 version-specific hardware parameters
+ */
+
+/**
+ * \var IPAHwSettings::numAeCells
+ * \brief Number of cells in the AE exposure means grid
+ *
+ * \var IPAHwSettings::numHistogramBins
+ * \brief Number of bins in the histogram
+ *
+ * \var IPAHwSettings::numHistogramWeights
+ * \brief Number of weights in the histogram grid
+ *
+ * \var IPAHwSettings::numGammaOutSamples
+ * \brief Number of samples in the gamma out table
+ */
+
+/**
+ * \struct IPASessionConfiguration
+ * \brief Session configuration for the IPA module
+ *
+ * The session configuration contains all IPA configuration parameters that
+ * remain constant during the capture session, from IPA module start to stop.
+ * It is typically set during the configure() operation of the IPA module, but
+ * may also be updated in the start() operation.
+ */
+
+/**
+ * \var IPASessionConfiguration::agc
+ * \brief AGC parameters configuration of the IPA
+ *
+ * \var IPASessionConfiguration::agc.measureWindow
+ * \brief AGC measure window
+ */
+
+/**
+ * \var IPASessionConfiguration::awb
+ * \brief AWB parameters configuration of the IPA
+ *
+ * \var IPASessionConfiguration::awb.measureWindow
+ * \brief AWB measure window
+ *
+ * \var IPASessionConfiguration::awb.enabled
+ * \brief Indicates if the AWB hardware is enabled and applies colour gains
+ *
+ * The AWB module of the ISP applies colour gains and computes statistics. It is
+ * enabled when the AWB algorithm is loaded, regardless of whether the algorithm
+ * operates in manual or automatic mode.
+ */
+
+/**
+ * \var IPASessionConfiguration::lsc
+ * \brief Lens Shading Correction configuration of the IPA
+ *
+ * \var IPASessionConfiguration::lsc.enabled
+ * \brief Indicates if the LSC hardware is enabled
+ */
+
+/**
+ * \var IPASessionConfiguration::sensor
+ * \brief Sensor-specific configuration of the IPA
+ *
+ * \var IPASessionConfiguration::sensor.minExposureTime
+ * \brief Minimum exposure time supported with the sensor
+ *
+ * \var IPASessionConfiguration::sensor.maxExposureTime
+ * \brief Maximum exposure time supported with the sensor
+ *
+ * \var IPASessionConfiguration::sensor.minAnalogueGain
+ * \brief Minimum analogue gain supported with the sensor
+ *
+ * \var IPASessionConfiguration::sensor.maxAnalogueGain
+ * \brief Maximum analogue gain supported with the sensor
+ *
+ * \var IPASessionConfiguration::sensor.defVBlank
+ * \brief The default vblank value of the sensor
+ *
+ * \var IPASessionConfiguration::sensor.lineDuration
+ * \brief Line duration in microseconds
+ *
+ * \var IPASessionConfiguration::sensor.size
+ * \brief Sensor output resolution
+ */
+
+/**
+ * \var IPASessionConfiguration::raw
+ * \brief Indicates if the camera is configured to capture raw frames
+ */
+
+/**
+ * \var IPASessionConfiguration::paramFormat
+ * \brief The fourcc of the parameters buffers format
+ */
+
+/**
+ * \struct IPAActiveState
+ * \brief Active state for algorithms
+ *
+ * The active state contains all algorithm-specific data that needs to be
+ * maintained by algorithms across frames. Unlike the session configuration,
+ * the active state is mutable and constantly updated by algorithms. The active
+ * state is accessible through the IPAContext structure.
+ *
+ * The active state stores two distinct categories of information:
+ *
+ * - The consolidated value of all algorithm controls. Requests passed to
+ * the queueRequest() function store values for controls that the
+ * application wants to modify for that particular frame, and the
+ * queueRequest() function updates the active state with those values.
+ * The active state thus contains a consolidated view of the value of all
+ * controls handled by the algorithm.
+ *
+ * - The value of parameters computed by the algorithm when running in auto
+ * mode. Algorithms running in auto mode compute new parameters every
+ * time statistics buffers are received (either synchronously, or
+ * possibly in a background thread). The latest computed value of those
+ * parameters is stored in the active state in the process() function.
+ *
+ * Each of the members in the active state belongs to a specific algorithm. A
+ * member may be read by any algorithm, but shall only be written by its owner.
+ */
+
+/**
+ * \var IPAActiveState::agc
+ * \brief State for the Automatic Gain Control algorithm
+ *
+ * The \a automatic variables track the latest values computed by algorithm
+ * based on the latest processed statistics. All other variables track the
+ * consolidated controls requested in queued requests.
+ *
+ * \struct IPAActiveState::agc.manual
+ * \brief Manual exposure time and analog gain (set through requests)
+ *
+ * \var IPAActiveState::agc.manual.exposure
+ * \brief Manual exposure time expressed as a number of lines as set by the
+ * ExposureTime control
+ *
+ * \var IPAActiveState::agc.manual.gain
+ * \brief Manual analogue gain as set by the AnalogueGain control
+ *
+ * \struct IPAActiveState::agc.automatic
+ * \brief Automatic exposure time and analog gain (computed by the algorithm)
+ *
+ * \var IPAActiveState::agc.automatic.exposure
+ * \brief Automatic exposure time expressed as a number of lines
+ *
+ * \var IPAActiveState::agc.automatic.gain
+ * \brief Automatic analogue gain multiplier
+ *
+ * \var IPAActiveState::agc.autoEnabled
+ * \brief Manual/automatic AGC state as set by the AeEnable control
+ *
+ * \var IPAActiveState::agc.constraintMode
+ * \brief Constraint mode as set by the AeConstraintMode control
+ *
+ * \var IPAActiveState::agc.exposureMode
+ * \brief Exposure mode as set by the AeExposureMode control
+ *
+ * \var IPAActiveState::agc.meteringMode
+ * \brief Metering mode as set by the AeMeteringMode control
+ *
+ * \var IPAActiveState::agc.maxFrameDuration
+ * \brief Maximum frame duration as set by the FrameDurationLimits control
+ */
+
+/**
+ * \var IPAActiveState::awb
+ * \brief State for the Automatic White Balance algorithm
+ *
+ * \struct IPAActiveState::awb.gains
+ * \brief White balance gains
+ *
+ * \var IPAActiveState::awb.gains.manual
+ * \brief Manual white balance gains (set through requests)
+ *
+ * \var IPAActiveState::awb.gains.automatic
+ * \brief Automatic white balance gains (computed by the algorithm)
+ *
+ * \var IPAActiveState::awb.temperatureK
+ * \brief Estimated color temperature
+ *
+ * \var IPAActiveState::awb.autoEnabled
+ * \brief Whether the Auto White Balance algorithm is enabled
+ */
+
+/**
+ * \var IPAActiveState::cproc
+ * \brief State for the Color Processing algorithm
+ *
+ * \struct IPAActiveState::cproc.brightness
+ * \brief Brightness level
+ *
+ * \var IPAActiveState::cproc.contrast
+ * \brief Contrast level
+ *
+ * \var IPAActiveState::cproc.saturation
+ * \brief Saturation level
+ */
+
+/**
+ * \var IPAActiveState::dpf
+ * \brief State for the Denoise Pre-Filter algorithm
+ *
+ * \var IPAActiveState::dpf.denoise
+ * \brief Indicates if denoise is activated
+ */
+
+/**
+ * \var IPAActiveState::filter
+ * \brief State for the Filter algorithm
+ *
+ * \struct IPAActiveState::filter.denoise
+ * \brief Denoising level
+ *
+ * \var IPAActiveState::filter.sharpness
+ * \brief Sharpness level
+ */
+
+/**
+ * \var IPAActiveState::goc
+ * \brief State for the goc algorithm
+ *
+ * \var IPAActiveState::goc.gamma
+ * \brief Gamma value applied as 1.0/gamma
+ */
+
+/**
+ * \struct IPAFrameContext
+ * \brief Per-frame context for algorithms
+ *
+ * The frame context stores two distinct categories of information:
+ *
+ * - The value of the controls to be applied to the frame. These values are
+ * typically set in the queueRequest() function, from the consolidated
+ * control values stored in the active state. The frame context thus stores
+ * values for all controls related to the algorithm, not limited to the
+ * controls specified in the corresponding request, but consolidated from all
+ * requests that have been queued so far.
+ *
+ * For controls that can be set manually or computed by an algorithm
+ * (depending on the algorithm operation mode), such as for instance the
+ * colour gains for the AWB algorithm, the control value will be stored in
+ * the frame context in the queueRequest() function only when operating in
+ * manual mode. When operating in auto mode, the values are computed by the
+ * algorithm in process(), stored in the active state, and copied to the
+ * frame context in prepare(), just before being stored in the ISP parameters
+ * buffer.
+ *
+ * The queueRequest() function can also store ancillary data in the frame
+ * context, such as flags to indicate if (and what) control values have
+ * changed compared to the previous request.
+ *
+ * - Status information computed by the algorithm for a frame. For instance,
+ * the colour temperature estimated by the AWB algorithm from ISP statistics
+ * calculated on a frame is stored in the frame context for that frame in
+ * the process() function.
+ */
+
+/**
+ * \var IPAFrameContext::agc
+ * \brief Automatic Gain Control parameters for this frame
+ *
+ * The exposure and gain are provided by the AGC algorithm, and are to be
+ * applied to the sensor in order to take effect for this frame.
+ *
+ * \var IPAFrameContext::agc.exposure
+ * \brief Exposure time expressed as a number of lines computed by the algorithm
+ *
+ * \var IPAFrameContext::agc.gain
+ * \brief Analogue gain multiplier computed by the algorithm
+ *
+ * The gain should be adapted to the sensor specific gain code before applying.
+ *
+ * \var IPAFrameContext::agc.autoEnabled
+ * \brief Manual/automatic AGC state as set by the AeEnable control
+ *
+ * \var IPAFrameContext::agc.constraintMode
+ * \brief Constraint mode as set by the AeConstraintMode control
+ *
+ * \var IPAFrameContext::agc.exposureMode
+ * \brief Exposure mode as set by the AeExposureMode control
+ *
+ * \var IPAFrameContext::agc.meteringMode
+ * \brief Metering mode as set by the AeMeteringMode control
+ *
+ * \var IPAFrameContext::agc.maxFrameDuration
+ * \brief Maximum frame duration as set by the FrameDurationLimits control
+ *
+ * \var IPAFrameContext::agc.updateMetering
+ * \brief Indicate if new ISP AGC metering parameters need to be applied
+ */
+
+/**
+ * \var IPAFrameContext::awb
+ * \brief Automatic White Balance parameters for this frame
+ *
+ * \struct IPAFrameContext::awb.gains
+ * \brief White balance gains
+ *
+ * \var IPAFrameContext::awb.temperatureK
+ * \brief Estimated color temperature
+ *
+ * \var IPAFrameContext::awb.autoEnabled
+ * \brief Whether the Auto White Balance algorithm is enabled
+ */
+
+/**
+ * \var IPAFrameContext::cproc
+ * \brief Color Processing parameters for this frame
+ *
+ * \struct IPAFrameContext::cproc.brightness
+ * \brief Brightness level
+ *
+ * \var IPAFrameContext::cproc.contrast
+ * \brief Contrast level
+ *
+ * \var IPAFrameContext::cproc.saturation
+ * \brief Saturation level
+ *
+ * \var IPAFrameContext::cproc.update
+ * \brief Indicates if the color processing parameters have been updated
+ * compared to the previous frame
+ */
+
+/**
+ * \var IPAFrameContext::dpf
+ * \brief Denoise Pre-Filter parameters for this frame
+ *
+ * \var IPAFrameContext::dpf.denoise
+ * \brief Indicates if denoise is activated
+ *
+ * \var IPAFrameContext::dpf.update
+ * \brief Indicates if the denoise pre-filter parameters have been updated
+ * compared to the previous frame
+ */
+
+/**
+ * \var IPAFrameContext::filter
+ * \brief Filter parameters for this frame
+ *
+ * \struct IPAFrameContext::filter.denoise
+ * \brief Denoising level
+ *
+ * \var IPAFrameContext::filter.sharpness
+ * \brief Sharpness level
+ *
+ * \var IPAFrameContext::filter.updateParams
+ * \brief Indicates if the filter parameters have been updated compared to the
+ * previous frame
+ */
+
+/**
+ * \var IPAFrameContext::goc
+ * \brief Gamma out correction parameters for this frame
+ *
+ * \var IPAFrameContext::goc.gamma
+ * \brief Gamma value applied as 1.0/gamma
+ *
+ * \var IPAFrameContext::goc.update
+ * \brief Indicates if the goc parameters have been updated compared to the
+ * previous frame
+ */
+
+/**
+ * \var IPAFrameContext::sensor
+ * \brief Sensor configuration that used been used for this frame
+ *
+ * \var IPAFrameContext::sensor.exposure
+ * \brief Exposure time expressed as a number of lines
+ *
+ * \var IPAFrameContext::sensor.gain
+ * \brief Analogue gain multiplier
+ */
+
+/**
+ * \struct IPAContext
+ * \brief Global IPA context data shared between all algorithms
+ *
+ * \var IPAContext::hw
+ * \brief RkISP1 version-specific hardware parameters
+ *
+ * \var IPAContext::sensorInfo
+ * \brief The IPA session sensorInfo, immutable during the session
+ *
+ * \var IPAContext::configuration
+ * \brief The IPA session configuration, immutable during the session
+ *
+ * \var IPAContext::activeState
+ * \brief The IPA active state, storing the latest state for all algorithms
+ *
+ * \var IPAContext::frameContexts
+ * \brief Ring buffer of per-frame contexts
+ */
+
+} /* namespace libcamera::ipa::rkisp1 */
diff --git a/src/ipa/rkisp1/ipa_context.h b/src/ipa/rkisp1/ipa_context.h
new file mode 100644
index 00000000..b83c1822
--- /dev/null
+++ b/src/ipa/rkisp1/ipa_context.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021-2022, Ideas On Board
+ *
+ * RkISP1 IPA Context
+ *
+ */
+
+#pragma once
+
+#include <memory>
+
+#include <linux/rkisp1-config.h>
+
+#include <libcamera/base/utils.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/geometry.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+
+#include "libcamera/internal/debug_controls.h"
+#include "libcamera/internal/matrix.h"
+
+#include <libipa/camera_sensor_helper.h>
+#include <libipa/fc_queue.h>
+#include <libipa/vector.h>
+
+namespace libcamera {
+
+namespace ipa::rkisp1 {
+
+struct IPAHwSettings {
+ unsigned int numAeCells;
+ unsigned int numHistogramBins;
+ unsigned int numHistogramWeights;
+ unsigned int numGammaOutSamples;
+ bool compand;
+};
+
+struct IPASessionConfiguration {
+ struct {
+ struct rkisp1_cif_isp_window measureWindow;
+ } agc;
+
+ struct {
+ struct rkisp1_cif_isp_window measureWindow;
+ bool enabled;
+ } awb;
+
+ struct {
+ bool enabled;
+ } lsc;
+
+ struct {
+ utils::Duration minExposureTime;
+ utils::Duration maxExposureTime;
+ double minAnalogueGain;
+ double maxAnalogueGain;
+
+ int32_t defVBlank;
+ utils::Duration lineDuration;
+ Size size;
+ } sensor;
+
+ bool raw;
+ uint32_t paramFormat;
+};
+
+struct IPAActiveState {
+ struct {
+ struct {
+ uint32_t exposure;
+ double gain;
+ } manual;
+ struct {
+ uint32_t exposure;
+ double gain;
+ } automatic;
+
+ bool autoEnabled;
+ controls::AeConstraintModeEnum constraintMode;
+ controls::AeExposureModeEnum exposureMode;
+ controls::AeMeteringModeEnum meteringMode;
+ utils::Duration maxFrameDuration;
+ } agc;
+
+ struct {
+ struct {
+ RGB<double> manual;
+ RGB<double> automatic;
+ } gains;
+
+ unsigned int temperatureK;
+ bool autoEnabled;
+ } awb;
+
+ struct {
+ Matrix<float, 3, 3> ccm;
+ } ccm;
+
+ struct {
+ int8_t brightness;
+ uint8_t contrast;
+ uint8_t saturation;
+ } cproc;
+
+ struct {
+ bool denoise;
+ } dpf;
+
+ struct {
+ uint8_t denoise;
+ uint8_t sharpness;
+ } filter;
+
+ struct {
+ double gamma;
+ } goc;
+};
+
+struct IPAFrameContext : public FrameContext {
+ struct {
+ uint32_t exposure;
+ double gain;
+ bool autoEnabled;
+ controls::AeConstraintModeEnum constraintMode;
+ controls::AeExposureModeEnum exposureMode;
+ controls::AeMeteringModeEnum meteringMode;
+ utils::Duration maxFrameDuration;
+ bool updateMetering;
+ } agc;
+
+ struct {
+ RGB<double> gains;
+ bool autoEnabled;
+ unsigned int temperatureK;
+ } awb;
+
+ struct {
+ int8_t brightness;
+ uint8_t contrast;
+ uint8_t saturation;
+ bool update;
+ } cproc;
+
+ struct {
+ bool denoise;
+ bool update;
+ } dpf;
+
+ struct {
+ uint8_t denoise;
+ uint8_t sharpness;
+ bool update;
+ } filter;
+
+ struct {
+ double gamma;
+ bool update;
+ } goc;
+
+ struct {
+ uint32_t exposure;
+ double gain;
+ } sensor;
+
+ struct {
+ Matrix<float, 3, 3> ccm;
+ } ccm;
+
+ struct {
+ double lux;
+ } lux;
+};
+
+struct IPAContext {
+ IPAContext(unsigned int frameContextSize)
+ : hw(nullptr), frameContexts(frameContextSize)
+ {
+ }
+
+ const IPAHwSettings *hw;
+ IPACameraSensorInfo sensorInfo;
+ IPASessionConfiguration configuration;
+ IPAActiveState activeState;
+
+ FCQueue<IPAFrameContext> frameContexts;
+
+ ControlInfoMap::Map ctrlMap;
+
+ DebugMetadata debugMetadata;
+
+ /* Interface to the Camera Helper */
+ std::unique_ptr<CameraSensorHelper> camHelper;
+};
+
+} /* namespace ipa::rkisp1 */
+
+} /* namespace libcamera*/
diff --git a/src/ipa/rkisp1/meson.build b/src/ipa/rkisp1/meson.build
index 521518bd..26a9fa40 100644
--- a/src/ipa/rkisp1/meson.build
+++ b/src/ipa/rkisp1/meson.build
@@ -1,8 +1,32 @@
-rkisp1_ipa = shared_module('ipa_rkisp1',
- 'rkisp1.cpp',
- name_prefix : '',
- include_directories : [ipa_includes, libipa_includes],
- dependencies : libcamera_dep,
- link_with : libipa,
- install : true,
- install_dir : ipa_install_dir)
+# SPDX-License-Identifier: CC0-1.0
+
+subdir('algorithms')
+subdir('data')
+
+ipa_name = 'ipa_rkisp1'
+
+rkisp1_ipa_sources = files([
+ 'ipa_context.cpp',
+ 'params.cpp',
+ 'rkisp1.cpp',
+])
+
+rkisp1_ipa_sources += rkisp1_ipa_algorithms
+
+mod = shared_module(ipa_name, rkisp1_ipa_sources,
+ name_prefix : '',
+ include_directories : [ipa_includes],
+ dependencies : [libcamera_private, libipa_dep],
+ install : true,
+ install_dir : ipa_install_dir)
+
+if ipa_sign_module
+ custom_target(ipa_name + '.so.sign',
+ input : mod,
+ output : ipa_name + '.so.sign',
+ command : [ipa_sign, ipa_priv_key, '@INPUT@', '@OUTPUT@'],
+ install : false,
+ build_by_default : true)
+endif
+
+ipa_names += ipa_name
diff --git a/src/ipa/rkisp1/module.h b/src/ipa/rkisp1/module.h
new file mode 100644
index 00000000..69e9bc82
--- /dev/null
+++ b/src/ipa/rkisp1/module.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Ideas On Board
+ *
+ * RkISP1 IPA Module
+ */
+
+#pragma once
+
+#include <linux/rkisp1-config.h>
+
+#include <libcamera/ipa/rkisp1_ipa_interface.h>
+
+#include <libipa/module.h>
+
+#include "ipa_context.h"
+#include "params.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1 {
+
+using Module = ipa::Module<IPAContext, IPAFrameContext, IPACameraSensorInfo,
+ RkISP1Params, rkisp1_stat_buffer>;
+
+} /* namespace ipa::rkisp1 */
+
+} /* namespace libcamera*/
diff --git a/src/ipa/rkisp1/params.cpp b/src/ipa/rkisp1/params.cpp
new file mode 100644
index 00000000..4c0b051c
--- /dev/null
+++ b/src/ipa/rkisp1/params.cpp
@@ -0,0 +1,222 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * RkISP1 ISP Parameters
+ */
+
+#include "params.h"
+
+#include <map>
+#include <stddef.h>
+#include <string.h>
+
+#include <linux/rkisp1-config.h>
+#include <linux/videodev2.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(RkISP1Params)
+
+namespace ipa::rkisp1 {
+
+namespace {
+
+struct BlockTypeInfo {
+ enum rkisp1_ext_params_block_type type;
+ size_t size;
+ size_t offset;
+ uint32_t enableBit;
+};
+
+#define RKISP1_BLOCK_TYPE_ENTRY(block, id, type, category, bit) \
+ { BlockType::block, { \
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_##id, \
+ sizeof(struct rkisp1_cif_isp_##type##_config), \
+ offsetof(struct rkisp1_params_cfg, category.type##_config), \
+ RKISP1_CIF_ISP_MODULE_##bit, \
+ } }
+
+#define RKISP1_BLOCK_TYPE_ENTRY_MEAS(block, id, type) \
+ RKISP1_BLOCK_TYPE_ENTRY(block, id##_MEAS, type, meas, id)
+
+#define RKISP1_BLOCK_TYPE_ENTRY_OTHERS(block, id, type) \
+ RKISP1_BLOCK_TYPE_ENTRY(block, id, type, others, id)
+
+#define RKISP1_BLOCK_TYPE_ENTRY_EXT(block, id, type) \
+ { BlockType::block, { \
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_##id, \
+ sizeof(struct rkisp1_cif_isp_##type##_config), \
+ 0, 0, \
+ } }
+
+const std::map<BlockType, BlockTypeInfo> kBlockTypeInfo = {
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Bls, BLS, bls),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Dpcc, DPCC, dpcc),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Sdg, SDG, sdg),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(AwbGain, AWB_GAIN, awb_gain),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Flt, FLT, flt),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Bdm, BDM, bdm),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Ctk, CTK, ctk),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Goc, GOC, goc),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Dpf, DPF, dpf),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(DpfStrength, DPF_STRENGTH, dpf_strength),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Cproc, CPROC, cproc),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Ie, IE, ie),
+ RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Lsc, LSC, lsc),
+ RKISP1_BLOCK_TYPE_ENTRY_MEAS(Awb, AWB, awb_meas),
+ RKISP1_BLOCK_TYPE_ENTRY_MEAS(Hst, HST, hst),
+ RKISP1_BLOCK_TYPE_ENTRY_MEAS(Aec, AEC, aec),
+ RKISP1_BLOCK_TYPE_ENTRY_MEAS(Afc, AFC, afc),
+ RKISP1_BLOCK_TYPE_ENTRY_EXT(CompandBls, COMPAND_BLS, compand_bls),
+ RKISP1_BLOCK_TYPE_ENTRY_EXT(CompandExpand, COMPAND_EXPAND, compand_curve),
+ RKISP1_BLOCK_TYPE_ENTRY_EXT(CompandCompress, COMPAND_COMPRESS, compand_curve),
+};
+
+} /* namespace */
+
+RkISP1ParamsBlockBase::RkISP1ParamsBlockBase(RkISP1Params *params, BlockType type,
+ const Span<uint8_t> &data)
+ : params_(params), type_(type)
+{
+ if (params_->format() == V4L2_META_FMT_RK_ISP1_EXT_PARAMS) {
+ header_ = data.subspan(0, sizeof(rkisp1_ext_params_block_header));
+ data_ = data.subspan(sizeof(rkisp1_ext_params_block_header));
+ } else {
+ data_ = data;
+ }
+}
+
+void RkISP1ParamsBlockBase::setEnabled(bool enabled)
+{
+ /*
+ * For the legacy fixed format, blocks are enabled in the top-level
+ * header. Delegate to the RkISP1Params class.
+ */
+ if (params_->format() == V4L2_META_FMT_RK_ISP1_PARAMS)
+ return params_->setBlockEnabled(type_, enabled);
+
+ /*
+ * For the extensible format, set the enable and disable flags in the
+ * block header directly.
+ */
+ struct rkisp1_ext_params_block_header *header =
+ reinterpret_cast<struct rkisp1_ext_params_block_header *>(header_.data());
+ header->flags &= ~(RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE |
+ RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE);
+ header->flags |= enabled ? RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE
+ : RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE;
+}
+
+RkISP1Params::RkISP1Params(uint32_t format, Span<uint8_t> data)
+ : format_(format), data_(data), used_(0)
+{
+ if (format_ == V4L2_META_FMT_RK_ISP1_EXT_PARAMS) {
+ struct rkisp1_ext_params_cfg *cfg =
+ reinterpret_cast<struct rkisp1_ext_params_cfg *>(data.data());
+
+ cfg->version = RKISP1_EXT_PARAM_BUFFER_V1;
+ cfg->data_size = 0;
+
+ used_ += offsetof(struct rkisp1_ext_params_cfg, data);
+ } else {
+ memset(data.data(), 0, data.size());
+ used_ = sizeof(struct rkisp1_params_cfg);
+ }
+}
+
+void RkISP1Params::setBlockEnabled(BlockType type, bool enabled)
+{
+ const BlockTypeInfo &info = kBlockTypeInfo.at(type);
+
+ struct rkisp1_params_cfg *cfg =
+ reinterpret_cast<struct rkisp1_params_cfg *>(data_.data());
+ if (enabled)
+ cfg->module_ens |= info.enableBit;
+ else
+ cfg->module_ens &= ~info.enableBit;
+}
+
+Span<uint8_t> RkISP1Params::block(BlockType type)
+{
+ auto infoIt = kBlockTypeInfo.find(type);
+ if (infoIt == kBlockTypeInfo.end()) {
+ LOG(RkISP1Params, Error)
+ << "Invalid parameters block type "
+ << utils::to_underlying(type);
+ return {};
+ }
+
+ const BlockTypeInfo &info = infoIt->second;
+
+ /*
+ * For the legacy format, return a block referencing the fixed location
+ * of the data.
+ */
+ if (format_ == V4L2_META_FMT_RK_ISP1_PARAMS) {
+ /*
+ * Blocks available only in extended parameters have an offset
+ * of 0. Return nullptr in that case.
+ */
+ if (info.offset == 0) {
+ LOG(RkISP1Params, Error)
+ << "Block type " << utils::to_underlying(type)
+ << " unavailable in fixed parameters format";
+ return {};
+ }
+
+ struct rkisp1_params_cfg *cfg =
+ reinterpret_cast<struct rkisp1_params_cfg *>(data_.data());
+
+ cfg->module_cfg_update |= info.enableBit;
+ cfg->module_en_update |= info.enableBit;
+
+ return data_.subspan(info.offset, info.size);
+ }
+
+ /*
+ * For the extensible format, allocate memory for the block, including
+ * the header. Look up the block in the cache first. If an algorithm
+ * requests the same block type twice, it should get the same block.
+ */
+ auto cacheIt = blocks_.find(type);
+ if (cacheIt != blocks_.end())
+ return cacheIt->second;
+
+ /* Make sure we don't run out of space. */
+ size_t size = sizeof(struct rkisp1_ext_params_block_header)
+ + ((info.size + 7) & ~7);
+ if (size > data_.size() - used_) {
+ LOG(RkISP1Params, Error)
+ << "Out of memory to allocate block type "
+ << utils::to_underlying(type);
+ return {};
+ }
+
+ /* Allocate a new block, clear its memory, and initialize its header. */
+ Span<uint8_t> block = data_.subspan(used_, size);
+ used_ += size;
+
+ struct rkisp1_ext_params_cfg *cfg =
+ reinterpret_cast<struct rkisp1_ext_params_cfg *>(data_.data());
+ cfg->data_size += size;
+
+ memset(block.data(), 0, block.size());
+
+ struct rkisp1_ext_params_block_header *header =
+ reinterpret_cast<struct rkisp1_ext_params_block_header *>(block.data());
+ header->type = info.type;
+ header->size = block.size();
+
+ /* Update the cache. */
+ blocks_[type] = block;
+
+ return block;
+}
+
+} /* namespace ipa::rkisp1 */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/params.h b/src/ipa/rkisp1/params.h
new file mode 100644
index 00000000..40450e34
--- /dev/null
+++ b/src/ipa/rkisp1/params.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * RkISP1 ISP Parameters
+ */
+
+#pragma once
+
+#include <map>
+#include <stdint.h>
+
+#include <linux/rkisp1-config.h>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/span.h>
+
+namespace libcamera {
+
+namespace ipa::rkisp1 {
+
+enum class BlockType {
+ Bls,
+ Dpcc,
+ Sdg,
+ AwbGain,
+ Flt,
+ Bdm,
+ Ctk,
+ Goc,
+ Dpf,
+ DpfStrength,
+ Cproc,
+ Ie,
+ Lsc,
+ Awb,
+ Hst,
+ Aec,
+ Afc,
+ CompandBls,
+ CompandExpand,
+ CompandCompress,
+};
+
+namespace details {
+
+template<BlockType B>
+struct block_type {
+};
+
+#define RKISP1_DEFINE_BLOCK_TYPE(blockType, blockStruct) \
+template<> \
+struct block_type<BlockType::blockType> { \
+ using type = struct rkisp1_cif_isp_##blockStruct##_config; \
+};
+
+RKISP1_DEFINE_BLOCK_TYPE(Bls, bls)
+RKISP1_DEFINE_BLOCK_TYPE(Dpcc, dpcc)
+RKISP1_DEFINE_BLOCK_TYPE(Sdg, sdg)
+RKISP1_DEFINE_BLOCK_TYPE(AwbGain, awb_gain)
+RKISP1_DEFINE_BLOCK_TYPE(Flt, flt)
+RKISP1_DEFINE_BLOCK_TYPE(Bdm, bdm)
+RKISP1_DEFINE_BLOCK_TYPE(Ctk, ctk)
+RKISP1_DEFINE_BLOCK_TYPE(Goc, goc)
+RKISP1_DEFINE_BLOCK_TYPE(Dpf, dpf)
+RKISP1_DEFINE_BLOCK_TYPE(DpfStrength, dpf_strength)
+RKISP1_DEFINE_BLOCK_TYPE(Cproc, cproc)
+RKISP1_DEFINE_BLOCK_TYPE(Ie, ie)
+RKISP1_DEFINE_BLOCK_TYPE(Lsc, lsc)
+RKISP1_DEFINE_BLOCK_TYPE(Awb, awb_meas)
+RKISP1_DEFINE_BLOCK_TYPE(Hst, hst)
+RKISP1_DEFINE_BLOCK_TYPE(Aec, aec)
+RKISP1_DEFINE_BLOCK_TYPE(Afc, afc)
+RKISP1_DEFINE_BLOCK_TYPE(CompandBls, compand_bls)
+RKISP1_DEFINE_BLOCK_TYPE(CompandExpand, compand_curve)
+RKISP1_DEFINE_BLOCK_TYPE(CompandCompress, compand_curve)
+
+} /* namespace details */
+
+class RkISP1Params;
+
+class RkISP1ParamsBlockBase
+{
+public:
+ RkISP1ParamsBlockBase(RkISP1Params *params, BlockType type,
+ const Span<uint8_t> &data);
+
+ Span<uint8_t> data() const { return data_; }
+
+ void setEnabled(bool enabled);
+
+private:
+ LIBCAMERA_DISABLE_COPY(RkISP1ParamsBlockBase)
+
+ RkISP1Params *params_;
+ BlockType type_;
+ Span<uint8_t> header_;
+ Span<uint8_t> data_;
+};
+
+template<BlockType B>
+class RkISP1ParamsBlock : public RkISP1ParamsBlockBase
+{
+public:
+ using Type = typename details::block_type<B>::type;
+
+ RkISP1ParamsBlock(RkISP1Params *params, const Span<uint8_t> &data)
+ : RkISP1ParamsBlockBase(params, B, data)
+ {
+ }
+
+ const Type *operator->() const
+ {
+ return reinterpret_cast<const Type *>(data().data());
+ }
+
+ Type *operator->()
+ {
+ return reinterpret_cast<Type *>(data().data());
+ }
+
+ const Type &operator*() const &
+ {
+ return *reinterpret_cast<const Type *>(data().data());
+ }
+
+ Type &operator*() &
+ {
+ return *reinterpret_cast<Type *>(data().data());
+ }
+};
+
+class RkISP1Params
+{
+public:
+ RkISP1Params(uint32_t format, Span<uint8_t> data);
+
+ template<BlockType B>
+ RkISP1ParamsBlock<B> block()
+ {
+ return RkISP1ParamsBlock<B>(this, block(B));
+ }
+
+ uint32_t format() const { return format_; }
+ size_t size() const { return used_; }
+
+private:
+ friend class RkISP1ParamsBlockBase;
+
+ Span<uint8_t> block(BlockType type);
+ void setBlockEnabled(BlockType type, bool enabled);
+
+ uint32_t format_;
+
+ Span<uint8_t> data_;
+ size_t used_;
+
+ std::map<BlockType, Span<uint8_t>> blocks_;
+};
+
+} /* namespace ipa::rkisp1 */
+
+} /* namespace libcamera*/
diff --git a/src/ipa/rkisp1/rkisp1.cpp b/src/ipa/rkisp1/rkisp1.cpp
index 438b3c66..2ffdd99b 100644
--- a/src/ipa/rkisp1/rkisp1.cpp
+++ b/src/ipa/rkisp1/rkisp1.cpp
@@ -2,103 +2,296 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * rkisp1.cpp - RkISP1 Image Processing Algorithms
+ * RkISP1 Image Processing Algorithms
*/
#include <algorithm>
-#include <math.h>
-#include <queue>
+#include <array>
+#include <chrono>
#include <stdint.h>
#include <string.h>
-#include <sys/mman.h>
#include <linux/rkisp1-config.h>
+#include <linux/v4l2-controls.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
-#include <ipa/ipa_interface.h>
-#include <ipa/ipa_module_info.h>
-#include <ipa/rkisp1.h>
-#include <libcamera/buffer.h>
#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/framebuffer.h>
#include <libcamera/request.h>
-#include <libipa/ipa_interface_wrapper.h>
-#include "log.h"
-#include "utils.h"
+#include <libcamera/ipa/ipa_interface.h>
+#include <libcamera/ipa/ipa_module_info.h>
+#include <libcamera/ipa/rkisp1_ipa_interface.h>
+
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/mapped_framebuffer.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "algorithms/algorithm.h"
+
+#include "ipa_context.h"
+#include "params.h"
namespace libcamera {
LOG_DEFINE_CATEGORY(IPARkISP1)
-class IPARkISP1 : public IPAInterface
+using namespace std::literals::chrono_literals;
+
+namespace ipa::rkisp1 {
+
+/* Maximum number of frame contexts to be held */
+static constexpr uint32_t kMaxFrameContexts = 16;
+
+class IPARkISP1 : public IPARkISP1Interface, public Module
{
public:
- int init() override { return 0; }
-
- void configure(const std::map<unsigned int, IPAStream> &streamConfig,
- const std::map<unsigned int, const ControlInfoMap &> &entityControls) override;
+ IPARkISP1();
+
+ int init(const IPASettings &settings, unsigned int hwRevision,
+ const IPACameraSensorInfo &sensorInfo,
+ const ControlInfoMap &sensorControls,
+ ControlInfoMap *ipaControls) override;
+ int start() override;
+ void stop() override;
+
+ int configure(const IPAConfigInfo &ipaConfig,
+ const std::map<uint32_t, IPAStream> &streamConfig,
+ ControlInfoMap *ipaControls) override;
void mapBuffers(const std::vector<IPABuffer> &buffers) override;
void unmapBuffers(const std::vector<unsigned int> &ids) override;
- void processEvent(const IPAOperationData &event) override;
-private:
- void queueRequest(unsigned int frame, rkisp1_isp_params_cfg *params,
- const ControlList &controls);
- void updateStatistics(unsigned int frame,
- const rkisp1_stat_buffer *stats);
+ void queueRequest(const uint32_t frame, const ControlList &controls) override;
+ void computeParams(const uint32_t frame, const uint32_t bufferId) override;
+ void processStats(const uint32_t frame, const uint32_t bufferId,
+ const ControlList &sensorControls) override;
+protected:
+ std::string logPrefix() const override;
+
+private:
+ void updateControls(const IPACameraSensorInfo &sensorInfo,
+ const ControlInfoMap &sensorControls,
+ ControlInfoMap *ipaControls);
void setControls(unsigned int frame);
- void metadataReady(unsigned int frame, unsigned int aeState);
std::map<unsigned int, FrameBuffer> buffers_;
- std::map<unsigned int, void *> buffersMemory_;
-
- ControlInfoMap ctrls_;
-
- /* Camera sensor controls. */
- bool autoExposure_;
- uint32_t exposure_;
- uint32_t minExposure_;
- uint32_t maxExposure_;
- uint32_t gain_;
- uint32_t minGain_;
- uint32_t maxGain_;
+ std::map<unsigned int, MappedFrameBuffer> mappedBuffers_;
+
+ ControlInfoMap sensorControls_;
+
+ /* Local parameter storage */
+ struct IPAContext context_;
+};
+
+namespace {
+
+const IPAHwSettings ipaHwSettingsV10{
+ RKISP1_CIF_ISP_AE_MEAN_MAX_V10,
+ RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10,
+ RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10,
+ RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10,
+ false,
+};
+
+const IPAHwSettings ipaHwSettingsIMX8MP{
+ RKISP1_CIF_ISP_AE_MEAN_MAX_V10,
+ RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10,
+ RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10,
+ RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10,
+ true,
+};
+
+const IPAHwSettings ipaHwSettingsV12{
+ RKISP1_CIF_ISP_AE_MEAN_MAX_V12,
+ RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12,
+ RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12,
+ RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12,
+ false,
+};
+
+/* List of controls handled by the RkISP1 IPA */
+const ControlInfoMap::Map rkisp1Controls{
+ { &controls::AwbEnable, ControlInfo(false, true) },
+ { &controls::ColourGains, ControlInfo(0.0f, 3.996f, 1.0f) },
+ { &controls::DebugMetadataEnable, ControlInfo(false, true, false) },
+ { &controls::Sharpness, ControlInfo(0.0f, 10.0f, 1.0f) },
+ { &controls::draft::NoiseReductionMode, ControlInfo(controls::draft::NoiseReductionModeValues) },
};
-void IPARkISP1::configure(const std::map<unsigned int, IPAStream> &streamConfig,
- const std::map<unsigned int, const ControlInfoMap &> &entityControls)
+} /* namespace */
+
+IPARkISP1::IPARkISP1()
+ : context_(kMaxFrameContexts)
{
- if (entityControls.empty())
- return;
+}
- ctrls_ = entityControls.at(0);
+std::string IPARkISP1::logPrefix() const
+{
+ return "rkisp1";
+}
- const auto itExp = ctrls_.find(V4L2_CID_EXPOSURE);
- if (itExp == ctrls_.end()) {
- LOG(IPARkISP1, Error) << "Can't find exposure control";
- return;
+int IPARkISP1::init(const IPASettings &settings, unsigned int hwRevision,
+ const IPACameraSensorInfo &sensorInfo,
+ const ControlInfoMap &sensorControls,
+ ControlInfoMap *ipaControls)
+{
+ /* \todo Add support for other revisions */
+ switch (hwRevision) {
+ case RKISP1_V10:
+ context_.hw = &ipaHwSettingsV10;
+ break;
+ case RKISP1_V_IMX8MP:
+ context_.hw = &ipaHwSettingsIMX8MP;
+ break;
+ case RKISP1_V12:
+ context_.hw = &ipaHwSettingsV12;
+ break;
+ default:
+ LOG(IPARkISP1, Error)
+ << "Hardware revision " << hwRevision
+ << " is currently not supported";
+ return -ENODEV;
}
- const auto itGain = ctrls_.find(V4L2_CID_ANALOGUE_GAIN);
- if (itGain == ctrls_.end()) {
- LOG(IPARkISP1, Error) << "Can't find gain control";
- return;
+ LOG(IPARkISP1, Debug) << "Hardware revision is " << hwRevision;
+
+ context_.sensorInfo = sensorInfo;
+
+ context_.camHelper = CameraSensorHelperFactoryBase::create(settings.sensorModel);
+ if (!context_.camHelper) {
+ LOG(IPARkISP1, Error)
+ << "Failed to create camera sensor helper for "
+ << settings.sensorModel;
+ return -ENODEV;
+ }
+
+ context_.configuration.sensor.lineDuration =
+ sensorInfo.minLineLength * 1.0s / sensorInfo.pixelRate;
+
+ /* Load the tuning data file. */
+ File file(settings.configurationFile);
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ int ret = file.error();
+ LOG(IPARkISP1, Error)
+ << "Failed to open configuration file "
+ << settings.configurationFile << ": " << strerror(-ret);
+ return ret;
+ }
+
+ std::unique_ptr<libcamera::YamlObject> data = YamlParser::parse(file);
+ if (!data)
+ return -EINVAL;
+
+ unsigned int version = (*data)["version"].get<uint32_t>(0);
+ if (version != 1) {
+ LOG(IPARkISP1, Error)
+ << "Invalid tuning file version " << version;
+ return -EINVAL;
}
- autoExposure_ = true;
+ if (!data->contains("algorithms")) {
+ LOG(IPARkISP1, Error)
+ << "Tuning file doesn't contain any algorithm";
+ return -EINVAL;
+ }
- minExposure_ = std::max<uint32_t>(itExp->second.min().get<int32_t>(), 1);
- maxExposure_ = itExp->second.max().get<int32_t>();
- exposure_ = minExposure_;
+ int ret = createAlgorithms(context_, (*data)["algorithms"]);
+ if (ret)
+ return ret;
- minGain_ = std::max<uint32_t>(itGain->second.min().get<int32_t>(), 1);
- maxGain_ = itGain->second.max().get<int32_t>();
- gain_ = minGain_;
+ /* Initialize controls. */
+ updateControls(sensorInfo, sensorControls, ipaControls);
- LOG(IPARkISP1, Info)
- << "Exposure: " << minExposure_ << "-" << maxExposure_
- << " Gain: " << minGain_ << "-" << maxGain_;
+ return 0;
+}
+int IPARkISP1::start()
+{
setControls(0);
+
+ return 0;
+}
+
+void IPARkISP1::stop()
+{
+ context_.frameContexts.clear();
+}
+
+int IPARkISP1::configure(const IPAConfigInfo &ipaConfig,
+ const std::map<uint32_t, IPAStream> &streamConfig,
+ ControlInfoMap *ipaControls)
+{
+ sensorControls_ = ipaConfig.sensorControls;
+
+ const auto itExp = sensorControls_.find(V4L2_CID_EXPOSURE);
+ int32_t minExposure = itExp->second.min().get<int32_t>();
+ int32_t maxExposure = itExp->second.max().get<int32_t>();
+
+ const auto itGain = sensorControls_.find(V4L2_CID_ANALOGUE_GAIN);
+ int32_t minGain = itGain->second.min().get<int32_t>();
+ int32_t maxGain = itGain->second.max().get<int32_t>();
+
+ LOG(IPARkISP1, Debug)
+ << "Exposure: [" << minExposure << ", " << maxExposure
+ << "], gain: [" << minGain << ", " << maxGain << "]";
+
+ /* Clear the IPA context before the streaming session. */
+ context_.configuration = {};
+ context_.activeState = {};
+ context_.frameContexts.clear();
+
+ context_.configuration.paramFormat = ipaConfig.paramFormat;
+
+ const IPACameraSensorInfo &info = ipaConfig.sensorInfo;
+ const ControlInfo vBlank = sensorControls_.find(V4L2_CID_VBLANK)->second;
+ context_.configuration.sensor.defVBlank = vBlank.def().get<int32_t>();
+ context_.configuration.sensor.size = info.outputSize;
+ context_.configuration.sensor.lineDuration = info.minLineLength * 1.0s / info.pixelRate;
+
+ /* Update the camera controls using the new sensor settings. */
+ updateControls(info, sensorControls_, ipaControls);
+
+ /*
+ * When the AGC computes the new exposure values for a frame, it needs
+ * to know the limits for exposure time and analogue gain. As it depends
+ * on the sensor, update it with the controls.
+ *
+ * \todo take VBLANK into account for maximum exposure time
+ */
+ context_.configuration.sensor.minExposureTime =
+ minExposure * context_.configuration.sensor.lineDuration;
+ context_.configuration.sensor.maxExposureTime =
+ maxExposure * context_.configuration.sensor.lineDuration;
+ context_.configuration.sensor.minAnalogueGain =
+ context_.camHelper->gain(minGain);
+ context_.configuration.sensor.maxAnalogueGain =
+ context_.camHelper->gain(maxGain);
+
+ context_.configuration.raw = std::any_of(streamConfig.begin(), streamConfig.end(),
+ [](auto &cfg) -> bool {
+ PixelFormat pixelFormat{ cfg.second.pixelFormat };
+ const PixelFormatInfo &format = PixelFormatInfo::info(pixelFormat);
+ return format.colourEncoding == PixelFormatInfo::ColourEncodingRAW;
+ });
+
+ for (auto const &a : algorithms()) {
+ Algorithm *algo = static_cast<Algorithm *>(a.get());
+
+ /* Disable algorithms that don't support raw formats. */
+ algo->disabled_ = context_.configuration.raw && !algo->supportsRaw_;
+ if (algo->disabled_)
+ continue;
+
+ int ret = algo->configure(context_, info);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
void IPARkISP1::mapBuffers(const std::vector<IPABuffer> &buffers)
@@ -109,22 +302,13 @@ void IPARkISP1::mapBuffers(const std::vector<IPABuffer> &buffers)
std::forward_as_tuple(buffer.planes));
const FrameBuffer &fb = elem.first->second;
- /*
- * \todo Provide a helper to mmap() buffers (possibly exposed
- * to applications).
- */
- buffersMemory_[buffer.id] = mmap(NULL,
- fb.planes()[0].length,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- fb.planes()[0].fd.fd(),
- 0);
-
- if (buffersMemory_[buffer.id] == MAP_FAILED) {
- int ret = -errno;
+ MappedFrameBuffer mappedBuffer(&fb, MappedFrameBuffer::MapFlag::ReadWrite);
+ if (!mappedBuffer.isValid()) {
LOG(IPARkISP1, Fatal) << "Failed to mmap buffer: "
- << strerror(-ret);
+ << strerror(mappedBuffer.error());
}
+
+ mappedBuffers_.emplace(buffer.id, std::move(mappedBuffer));
}
}
@@ -135,134 +319,150 @@ void IPARkISP1::unmapBuffers(const std::vector<unsigned int> &ids)
if (fb == buffers_.end())
continue;
- munmap(buffersMemory_[id], fb->second.planes()[0].length);
- buffersMemory_.erase(id);
+ mappedBuffers_.erase(id);
buffers_.erase(id);
}
}
-void IPARkISP1::processEvent(const IPAOperationData &event)
+void IPARkISP1::queueRequest(const uint32_t frame, const ControlList &controls)
{
- switch (event.operation) {
- case RKISP1_IPA_EVENT_SIGNAL_STAT_BUFFER: {
- unsigned int frame = event.data[0];
- unsigned int bufferId = event.data[1];
+ IPAFrameContext &frameContext = context_.frameContexts.alloc(frame);
+ context_.debugMetadata.enableByControl(controls);
- const rkisp1_stat_buffer *stats =
- static_cast<rkisp1_stat_buffer *>(buffersMemory_[bufferId]);
-
- updateStatistics(frame, stats);
- break;
- }
- case RKISP1_IPA_EVENT_QUEUE_REQUEST: {
- unsigned int frame = event.data[0];
- unsigned int bufferId = event.data[1];
-
- rkisp1_isp_params_cfg *params =
- static_cast<rkisp1_isp_params_cfg *>(buffersMemory_[bufferId]);
-
- queueRequest(frame, params, event.controls[0]);
- break;
- }
- default:
- LOG(IPARkISP1, Error) << "Unkown event " << event.operation;
- break;
+ for (auto const &a : algorithms()) {
+ Algorithm *algo = static_cast<Algorithm *>(a.get());
+ if (algo->disabled_)
+ continue;
+ algo->queueRequest(context_, frame, frameContext, controls);
}
}
-void IPARkISP1::queueRequest(unsigned int frame, rkisp1_isp_params_cfg *params,
- const ControlList &controls)
+void IPARkISP1::computeParams(const uint32_t frame, const uint32_t bufferId)
{
- /* Prepare parameters buffer. */
- memset(params, 0, sizeof(*params));
+ IPAFrameContext &frameContext = context_.frameContexts.get(frame);
- /* Auto Exposure on/off. */
- if (controls.contains(controls::AeEnable)) {
- autoExposure_ = controls.get(controls::AeEnable);
- if (autoExposure_)
- params->module_ens = CIFISP_MODULE_AEC;
+ RkISP1Params params(context_.configuration.paramFormat,
+ mappedBuffers_.at(bufferId).planes()[0]);
- params->module_en_update = CIFISP_MODULE_AEC;
- }
-
- IPAOperationData op;
- op.operation = RKISP1_IPA_ACTION_PARAM_FILLED;
+ for (auto const &algo : algorithms())
+ algo->prepare(context_, frame, frameContext, &params);
- queueFrameAction.emit(frame, op);
+ paramsComputed.emit(frame, params.size());
}
-void IPARkISP1::updateStatistics(unsigned int frame,
- const rkisp1_stat_buffer *stats)
+void IPARkISP1::processStats(const uint32_t frame, const uint32_t bufferId,
+ const ControlList &sensorControls)
{
- const cifisp_stat *params = &stats->params;
- unsigned int aeState = 0;
-
- if (stats->meas_type & CIFISP_STAT_AUTOEXP) {
- const cifisp_ae_stat *ae = &params->ae;
-
- const unsigned int target = 60;
-
- unsigned int value = 0;
- unsigned int num = 0;
- for (int i = 0; i < CIFISP_AE_MEAN_MAX; i++) {
- if (ae->exp_mean[i] <= 15)
- continue;
-
- value += ae->exp_mean[i];
- num++;
- }
- value /= num;
-
- double factor = (double)target / value;
-
- if (frame % 3 == 0) {
- double exposure;
-
- exposure = factor * exposure_ * gain_ / minGain_;
- exposure_ = utils::clamp<uint64_t>((uint64_t)exposure,
- minExposure_,
- maxExposure_);
-
- exposure = exposure / exposure_ * minGain_;
- gain_ = utils::clamp<uint64_t>((uint64_t)exposure,
- minGain_, maxGain_);
-
- setControls(frame + 1);
- }
-
- aeState = fabs(factor - 1.0f) < 0.05f ? 2 : 1;
+ IPAFrameContext &frameContext = context_.frameContexts.get(frame);
+
+ /*
+ * In raw capture mode, the ISP is bypassed and no statistics buffer is
+ * provided.
+ */
+ const rkisp1_stat_buffer *stats = nullptr;
+ if (!context_.configuration.raw)
+ stats = reinterpret_cast<rkisp1_stat_buffer *>(
+ mappedBuffers_.at(bufferId).planes()[0].data());
+
+ frameContext.sensor.exposure =
+ sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>();
+ frameContext.sensor.gain =
+ context_.camHelper->gain(sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>());
+
+ ControlList metadata(controls::controls);
+
+ for (auto const &a : algorithms()) {
+ Algorithm *algo = static_cast<Algorithm *>(a.get());
+ if (algo->disabled_)
+ continue;
+ algo->process(context_, frame, frameContext, stats, metadata);
}
- metadataReady(frame, aeState);
+ setControls(frame);
+
+ context_.debugMetadata.moveEntries(metadata);
+ metadataReady.emit(frame, metadata);
}
-void IPARkISP1::setControls(unsigned int frame)
+void IPARkISP1::updateControls(const IPACameraSensorInfo &sensorInfo,
+ const ControlInfoMap &sensorControls,
+ ControlInfoMap *ipaControls)
{
- IPAOperationData op;
- op.operation = RKISP1_IPA_ACTION_V4L2_SET;
+ ControlInfoMap::Map ctrlMap = rkisp1Controls;
+
+ /*
+ * Compute exposure time limits from the V4L2_CID_EXPOSURE control
+ * limits and the line duration.
+ */
+ double lineDuration = context_.configuration.sensor.lineDuration.get<std::micro>();
+ const ControlInfo &v4l2Exposure = sensorControls.find(V4L2_CID_EXPOSURE)->second;
+ int32_t minExposure = v4l2Exposure.min().get<int32_t>() * lineDuration;
+ int32_t maxExposure = v4l2Exposure.max().get<int32_t>() * lineDuration;
+ int32_t defExposure = v4l2Exposure.def().get<int32_t>() * lineDuration;
+ ctrlMap.emplace(std::piecewise_construct,
+ std::forward_as_tuple(&controls::ExposureTime),
+ std::forward_as_tuple(minExposure, maxExposure, defExposure));
+
+ /* Compute the analogue gain limits. */
+ const ControlInfo &v4l2Gain = sensorControls.find(V4L2_CID_ANALOGUE_GAIN)->second;
+ float minGain = context_.camHelper->gain(v4l2Gain.min().get<int32_t>());
+ float maxGain = context_.camHelper->gain(v4l2Gain.max().get<int32_t>());
+ float defGain = context_.camHelper->gain(v4l2Gain.def().get<int32_t>());
+ ctrlMap.emplace(std::piecewise_construct,
+ std::forward_as_tuple(&controls::AnalogueGain),
+ std::forward_as_tuple(minGain, maxGain, defGain));
+
+ /*
+ * Compute the frame duration limits.
+ *
+ * The frame length is computed assuming a fixed line length combined
+ * with the vertical frame sizes.
+ */
+ const ControlInfo &v4l2HBlank = sensorControls.find(V4L2_CID_HBLANK)->second;
+ uint32_t hblank = v4l2HBlank.def().get<int32_t>();
+ uint32_t lineLength = sensorInfo.outputSize.width + hblank;
+
+ const ControlInfo &v4l2VBlank = sensorControls.find(V4L2_CID_VBLANK)->second;
+ std::array<uint32_t, 3> frameHeights{
+ v4l2VBlank.min().get<int32_t>() + sensorInfo.outputSize.height,
+ v4l2VBlank.max().get<int32_t>() + sensorInfo.outputSize.height,
+ v4l2VBlank.def().get<int32_t>() + sensorInfo.outputSize.height,
+ };
+
+ std::array<int64_t, 3> frameDurations;
+ for (unsigned int i = 0; i < frameHeights.size(); ++i) {
+ uint64_t frameSize = lineLength * frameHeights[i];
+ frameDurations[i] = frameSize / (sensorInfo.pixelRate / 1000000U);
+ }
- ControlList ctrls(ctrls_);
- ctrls.set(V4L2_CID_EXPOSURE, static_cast<int32_t>(exposure_));
- ctrls.set(V4L2_CID_ANALOGUE_GAIN, static_cast<int32_t>(gain_));
- op.controls.push_back(ctrls);
+ ctrlMap[&controls::FrameDurationLimits] = ControlInfo(frameDurations[0],
+ frameDurations[1],
+ frameDurations[2]);
- queueFrameAction.emit(frame, op);
+ ctrlMap.insert(context_.ctrlMap.begin(), context_.ctrlMap.end());
+ *ipaControls = ControlInfoMap(std::move(ctrlMap), controls::controls);
}
-void IPARkISP1::metadataReady(unsigned int frame, unsigned int aeState)
+void IPARkISP1::setControls(unsigned int frame)
{
- ControlList ctrls(controls::controls);
+ /*
+ * \todo The frame number is most likely wrong here, we need to take
+ * internal sensor delays and other timing parameters into account.
+ */
- if (aeState)
- ctrls.set(controls::AeLocked, aeState == 2);
+ IPAFrameContext &frameContext = context_.frameContexts.get(frame);
+ uint32_t exposure = frameContext.agc.exposure;
+ uint32_t gain = context_.camHelper->gainCode(frameContext.agc.gain);
- IPAOperationData op;
- op.operation = RKISP1_IPA_ACTION_METADATA;
- op.controls.push_back(ctrls);
+ ControlList ctrls(sensorControls_);
+ ctrls.set(V4L2_CID_EXPOSURE, static_cast<int32_t>(exposure));
+ ctrls.set(V4L2_CID_ANALOGUE_GAIN, static_cast<int32_t>(gain));
- queueFrameAction.emit(frame, op);
+ setSensorControls.emit(frame, ctrls);
}
+} /* namespace ipa::rkisp1 */
+
/*
* External IPA module interface
*/
@@ -271,14 +471,13 @@ extern "C" {
const struct IPAModuleInfo ipaModuleInfo = {
IPA_MODULE_API_VERSION,
1,
- "PipelineHandlerRkISP1",
- "RkISP1 IPA",
- "LGPL-2.1-or-later",
+ "rkisp1",
+ "rkisp1",
};
-struct ipa_context *ipaCreate()
+IPAInterface *ipaCreate()
{
- return new IPAInterfaceWrapper(std::make_unique<IPARkISP1>());
+ return new ipa::rkisp1::IPARkISP1();
}
}
diff --git a/src/ipa/rpi/README.md b/src/ipa/rpi/README.md
new file mode 100644
index 00000000..94a8ccc8
--- /dev/null
+++ b/src/ipa/rpi/README.md
@@ -0,0 +1,25 @@
+.. SPDX-License-Identifier: BSD-2-Clause
+
+# _libcamera_ for the Raspberry Pi
+
+Raspberry Pi provides a fully featured pipeline handler and control algorithms
+(IPAs, or "Image Processing Algorithms") to work with _libcamera_. Support is
+included for all existing Raspberry Pi camera modules.
+
+_libcamera_ for the Raspberry Pi allows users to:
+
+1. Use their existing Raspberry Pi cameras.
+1. Change the tuning of the image processing for their Raspberry Pi cameras.
+1. Alter or amend the control algorithms (such as AGC/AEC, AWB or any others)
+ that control the sensor and ISP.
+1. Implement their own custom control algorithms.
+1. Supply new tunings and/or algorithms for completely new sensors.
+
+## How to install and run _libcamera_ on the Raspberry Pi
+
+Please follow the instructions [here](https://www.raspberrypi.com/documentation/accessories/camera.html).
+
+## Documentation
+
+Full documentation for the _Raspberry Pi Camera Algorithm and Tuning Guide_ can
+be found [here](https://datasheets.raspberrypi.com/camera/raspberry-pi-camera-guide.pdf).
diff --git a/src/ipa/rpi/cam_helper/cam_helper.cpp b/src/ipa/rpi/cam_helper/cam_helper.cpp
new file mode 100644
index 00000000..a78db9c1
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper.cpp
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * helper information for different sensors
+ */
+
+#include <linux/videodev2.h>
+
+#include <limits>
+#include <map>
+#include <string.h>
+
+#include "libcamera/internal/v4l2_videodevice.h"
+
+#include "cam_helper.h"
+#include "md_parser.h"
+
+using namespace RPiController;
+using namespace libcamera;
+using libcamera::utils::Duration;
+using namespace std::literals::chrono_literals;
+
+namespace libcamera {
+LOG_DECLARE_CATEGORY(IPARPI)
+}
+
+namespace {
+
+std::map<std::string, CamHelperCreateFunc> &camHelpers()
+{
+ static std::map<std::string, CamHelperCreateFunc> helpers;
+ return helpers;
+}
+
+} /* namespace */
+
+CamHelper *CamHelper::create(std::string const &camName)
+{
+ /*
+ * CamHelpers get registered by static RegisterCamHelper
+ * initialisers.
+ */
+ for (auto &p : camHelpers()) {
+ if (camName.find(p.first) != std::string::npos)
+ return p.second();
+ }
+
+ return nullptr;
+}
+
+CamHelper::CamHelper(std::unique_ptr<MdParser> parser, unsigned int frameIntegrationDiff)
+ : parser_(std::move(parser)), frameIntegrationDiff_(frameIntegrationDiff)
+{
+}
+
+CamHelper::~CamHelper()
+{
+}
+
+void CamHelper::prepare(Span<const uint8_t> buffer,
+ Metadata &metadata)
+{
+ parseEmbeddedData(buffer, metadata);
+}
+
+void CamHelper::process([[maybe_unused]] StatisticsPtr &stats,
+ [[maybe_unused]] Metadata &metadata)
+{
+}
+
+uint32_t CamHelper::exposureLines(const Duration exposure, const Duration lineLength) const
+{
+ return exposure / lineLength;
+}
+
+Duration CamHelper::exposure(uint32_t exposureLines, const Duration lineLength) const
+{
+ return exposureLines * lineLength;
+}
+
+std::pair<uint32_t, uint32_t> CamHelper::getBlanking(Duration &exposure,
+ Duration minFrameDuration,
+ Duration maxFrameDuration) const
+{
+ uint32_t frameLengthMin, frameLengthMax, vblank, hblank;
+ Duration lineLength = mode_.minLineLength;
+
+ /*
+ * minFrameDuration and maxFrameDuration are clamped by the caller
+ * based on the limits for the active sensor mode.
+ *
+ * frameLengthMax gets calculated on the smallest line length as we do
+ * not want to extend that unless absolutely necessary.
+ */
+ frameLengthMin = minFrameDuration / mode_.minLineLength;
+ frameLengthMax = maxFrameDuration / mode_.minLineLength;
+
+ /*
+ * Watch out for (exposureLines + frameIntegrationDiff_) overflowing a
+ * uint32_t in the std::clamp() below when the exposure time is
+ * extremely (extremely!) long - as happens when the IPA calculates the
+ * maximum possible exposure time.
+ */
+ uint32_t exposureLines = std::min(CamHelper::exposureLines(exposure, lineLength),
+ std::numeric_limits<uint32_t>::max() - frameIntegrationDiff_);
+ uint32_t frameLengthLines = std::clamp(exposureLines + frameIntegrationDiff_,
+ frameLengthMin, frameLengthMax);
+
+ /*
+ * If our frame length lines is above the maximum allowed, see if we can
+ * extend the line length to accommodate the requested frame length.
+ */
+ if (frameLengthLines > mode_.maxFrameLength) {
+ Duration lineLengthAdjusted = lineLength * frameLengthLines / mode_.maxFrameLength;
+ lineLength = std::min(mode_.maxLineLength, lineLengthAdjusted);
+ frameLengthLines = mode_.maxFrameLength;
+ }
+
+ hblank = lineLengthToHblank(lineLength);
+ vblank = frameLengthLines - mode_.height;
+
+ /*
+ * Limit the exposure to the maximum frame duration requested, and
+ * re-calculate if it has been clipped.
+ */
+ exposureLines = std::min(frameLengthLines - frameIntegrationDiff_,
+ CamHelper::exposureLines(exposure, lineLength));
+ exposure = CamHelper::exposure(exposureLines, lineLength);
+
+ return { vblank, hblank };
+}
+
+Duration CamHelper::hblankToLineLength(uint32_t hblank) const
+{
+ return (mode_.width + hblank) * (1.0s / mode_.pixelRate);
+}
+
+uint32_t CamHelper::lineLengthToHblank(const Duration &lineLength) const
+{
+ return (lineLength * mode_.pixelRate / 1.0s) - mode_.width;
+}
+
+Duration CamHelper::lineLengthPckToDuration(uint32_t lineLengthPck) const
+{
+ return lineLengthPck * (1.0s / mode_.pixelRate);
+}
+
+void CamHelper::setCameraMode(const CameraMode &mode)
+{
+ mode_ = mode;
+ if (parser_) {
+ parser_->reset();
+ parser_->setBitsPerPixel(mode.bitdepth);
+ parser_->setLineLengthBytes(0); /* We use SetBufferSize. */
+ }
+}
+
+void CamHelper::setHwConfig(const Controller::HardwareConfig &hwConfig)
+{
+ hwConfig_ = hwConfig;
+}
+
+bool CamHelper::sensorEmbeddedDataPresent() const
+{
+ return false;
+}
+
+double CamHelper::getModeSensitivity([[maybe_unused]] const CameraMode &mode) const
+{
+ /*
+ * Most sensors have the same sensitivity in every mode, but this
+ * function can be overridden for those that do not. Note that it is
+ * called before mode_ is set, so it must return the sensitivity
+ * of the mode that is passed in.
+ */
+ return 1.0;
+}
+
+unsigned int CamHelper::hideFramesStartup() const
+{
+ /*
+ * The number of frames when a camera first starts that shouldn't be
+ * displayed as they are invalid in some way.
+ */
+ return 0;
+}
+
+unsigned int CamHelper::hideFramesModeSwitch() const
+{
+ /* After a mode switch, many sensors return valid frames immediately. */
+ return 0;
+}
+
+unsigned int CamHelper::mistrustFramesStartup() const
+{
+ /* Many sensors return a single bad frame on start-up. */
+ return 1;
+}
+
+unsigned int CamHelper::mistrustFramesModeSwitch() const
+{
+ /* Many sensors return valid metadata immediately. */
+ return 0;
+}
+
+void CamHelper::parseEmbeddedData(Span<const uint8_t> buffer,
+ Metadata &metadata)
+{
+ MdParser::RegisterMap registers;
+ Metadata parsedMetadata;
+
+ if (buffer.empty())
+ return;
+
+ if (parser_->parse(buffer, registers) != MdParser::Status::OK) {
+ LOG(IPARPI, Error) << "Embedded data buffer parsing failed";
+ return;
+ }
+
+ populateMetadata(registers, parsedMetadata);
+ metadata.merge(parsedMetadata);
+
+ /*
+ * Overwrite the exposure/gain, line/frame length and sensor temperature values
+ * in the existing DeviceStatus with values from the parsed embedded buffer.
+ * Fetch it first in case any other fields were set meaningfully.
+ */
+ DeviceStatus deviceStatus, parsedDeviceStatus;
+ if (metadata.get("device.status", deviceStatus) ||
+ parsedMetadata.get("device.status", parsedDeviceStatus)) {
+ LOG(IPARPI, Error) << "DeviceStatus not found";
+ return;
+ }
+
+ deviceStatus.exposureTime = parsedDeviceStatus.exposureTime;
+ deviceStatus.analogueGain = parsedDeviceStatus.analogueGain;
+ deviceStatus.frameLength = parsedDeviceStatus.frameLength;
+ deviceStatus.lineLength = parsedDeviceStatus.lineLength;
+ if (parsedDeviceStatus.sensorTemperature)
+ deviceStatus.sensorTemperature = parsedDeviceStatus.sensorTemperature;
+
+ LOG(IPARPI, Debug) << "Metadata updated - " << deviceStatus;
+
+ metadata.set("device.status", deviceStatus);
+}
+
+void CamHelper::populateMetadata([[maybe_unused]] const MdParser::RegisterMap &registers,
+ [[maybe_unused]] Metadata &metadata) const
+{
+}
+
+RegisterCamHelper::RegisterCamHelper(char const *camName,
+ CamHelperCreateFunc createFunc)
+{
+ camHelpers()[std::string(camName)] = createFunc;
+}
diff --git a/src/ipa/rpi/cam_helper/cam_helper.h b/src/ipa/rpi/cam_helper/cam_helper.h
new file mode 100644
index 00000000..4a826690
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * helper class providing camera information
+ */
+#pragma once
+
+#include <memory>
+#include <string>
+#include <utility>
+
+#include <libcamera/base/span.h>
+#include <libcamera/base/utils.h>
+
+#include "controller/camera_mode.h"
+#include "controller/controller.h"
+#include "controller/metadata.h"
+#include "md_parser.h"
+
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace RPiController {
+
+/*
+ * The CamHelper class provides a number of facilities that anyone trying
+ * to drive a camera will need to know, but which are not provided by the
+ * standard driver framework. Specifically, it provides:
+ *
+ * A "CameraMode" structure to describe extra information about the chosen
+ * mode of the driver. For example, how it is cropped from the full sensor
+ * area, how it is scaled, whether pixels are averaged compared to the full
+ * resolution.
+ *
+ * The ability to convert between number of lines of exposure and actual
+ * exposure time, and to convert between the sensor's gain codes and actual
+ * gains.
+ *
+ * A function to query if the sensor outputs embedded data that can be parsed.
+ *
+ * A function to return the sensitivity of a given camera mode.
+ *
+ * A parser to parse the embedded data buffers provided by some sensors (for
+ * example, the imx219 does; the ov5647 doesn't). This allows us to know for
+ * sure the exposure and gain of the frame we're looking at. CamHelper
+ * provides functions for converting analogue gains to and from the sensor's
+ * native gain codes.
+ *
+ * Finally, a set of functions that determine how to handle the vagaries of
+ * different camera modules on start-up or when switching modes. Some
+ * modules may produce one or more frames that are not yet correctly exposed,
+ * or where the metadata may be suspect. We have the following functions:
+ * HideFramesStartup(): Tell the pipeline handler not to return this many
+ * frames at start-up. This can also be used to hide initial frames
+ * while the AGC and other algorithms are sorting themselves out.
+ * HideFramesModeSwitch(): Tell the pipeline handler not to return this
+ * many frames after a mode switch (other than start-up). Some sensors
+ * may produce innvalid frames after a mode switch; others may not.
+ * MistrustFramesStartup(): At start-up a sensor may return frames for
+ * which we should not run any control algorithms (for example, metadata
+ * may be invalid).
+ * MistrustFramesModeSwitch(): The number of frames, after a mode switch
+ * (other than start-up), for which control algorithms should not run
+ * (for example, metadata may be unreliable).
+ */
+
+class CamHelper
+{
+public:
+ static CamHelper *create(std::string const &camName);
+ CamHelper(std::unique_ptr<MdParser> parser, unsigned int frameIntegrationDiff);
+ virtual ~CamHelper();
+ void setCameraMode(const CameraMode &mode);
+ void setHwConfig(const Controller::HardwareConfig &hwConfig);
+ virtual void prepare(libcamera::Span<const uint8_t> buffer,
+ Metadata &metadata);
+ virtual void process(StatisticsPtr &stats, Metadata &metadata);
+ virtual uint32_t exposureLines(const libcamera::utils::Duration exposure,
+ const libcamera::utils::Duration lineLength) const;
+ virtual libcamera::utils::Duration exposure(uint32_t exposureLines,
+ const libcamera::utils::Duration lineLength) const;
+ virtual std::pair<uint32_t, uint32_t> getBlanking(libcamera::utils::Duration &exposure,
+ libcamera::utils::Duration minFrameDuration,
+ libcamera::utils::Duration maxFrameDuration) const;
+ libcamera::utils::Duration hblankToLineLength(uint32_t hblank) const;
+ uint32_t lineLengthToHblank(const libcamera::utils::Duration &duration) const;
+ libcamera::utils::Duration lineLengthPckToDuration(uint32_t lineLengthPck) const;
+ virtual uint32_t gainCode(double gain) const = 0;
+ virtual double gain(uint32_t gainCode) const = 0;
+ virtual bool sensorEmbeddedDataPresent() const;
+ virtual double getModeSensitivity(const CameraMode &mode) const;
+ virtual unsigned int hideFramesStartup() const;
+ virtual unsigned int hideFramesModeSwitch() const;
+ virtual unsigned int mistrustFramesStartup() const;
+ virtual unsigned int mistrustFramesModeSwitch() const;
+
+protected:
+ void parseEmbeddedData(libcamera::Span<const uint8_t> buffer,
+ Metadata &metadata);
+ virtual void populateMetadata(const MdParser::RegisterMap &registers,
+ Metadata &metadata) const;
+
+ std::unique_ptr<MdParser> parser_;
+ CameraMode mode_;
+ Controller::HardwareConfig hwConfig_;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ unsigned int frameIntegrationDiff_;
+};
+
+/*
+ * This is for registering camera helpers with the system, so that the
+ * CamHelper::Create function picks them up automatically.
+ */
+
+typedef CamHelper *(*CamHelperCreateFunc)();
+struct RegisterCamHelper
+{
+ RegisterCamHelper(char const *camName,
+ CamHelperCreateFunc createFunc);
+};
+
+} /* namespace RPi */
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx219.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx219.cpp
new file mode 100644
index 00000000..ba01153e
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx219.cpp
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * camera helper for imx219 sensor
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+/*
+ * We have observed that the imx219 embedded data stream randomly returns junk
+ * register values. Do not rely on embedded data until this has been resolved.
+ */
+#define ENABLE_EMBEDDED_DATA 0
+
+#include "cam_helper.h"
+#if ENABLE_EMBEDDED_DATA
+#include "md_parser.h"
+#endif
+
+using namespace RPiController;
+
+/*
+ * We care about one gain register and a pair of exposure registers. Their I2C
+ * addresses from the Sony IMX219 datasheet:
+ */
+constexpr uint32_t gainReg = 0x157;
+constexpr uint32_t expHiReg = 0x15a;
+constexpr uint32_t expLoReg = 0x15b;
+constexpr uint32_t frameLengthHiReg = 0x160;
+constexpr uint32_t frameLengthLoReg = 0x161;
+constexpr uint32_t lineLengthHiReg = 0x162;
+constexpr uint32_t lineLengthLoReg = 0x163;
+constexpr std::initializer_list<uint32_t> registerList [[maybe_unused]]
+ = { expHiReg, expLoReg, gainReg, frameLengthHiReg, frameLengthLoReg,
+ lineLengthHiReg, lineLengthLoReg };
+
+class CamHelperImx219 : public CamHelper
+{
+public:
+ CamHelperImx219();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ unsigned int mistrustFramesModeSwitch() const override;
+ bool sensorEmbeddedDataPresent() const override;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 4;
+
+ void populateMetadata(const MdParser::RegisterMap &registers,
+ Metadata &metadata) const override;
+};
+
+CamHelperImx219::CamHelperImx219()
+#if ENABLE_EMBEDDED_DATA
+ : CamHelper(std::make_unique<MdParserSmia>(registerList), frameIntegrationDiff)
+#else
+ : CamHelper({}, frameIntegrationDiff)
+#endif
+{
+}
+
+uint32_t CamHelperImx219::gainCode(double gain) const
+{
+ return (uint32_t)(256 - 256 / gain);
+}
+
+double CamHelperImx219::gain(uint32_t gainCode) const
+{
+ return 256.0 / (256 - gainCode);
+}
+
+unsigned int CamHelperImx219::mistrustFramesModeSwitch() const
+{
+ /*
+ * For reasons unknown, we do occasionally get a bogus metadata frame
+ * at a mode switch (though not at start-up). Possibly warrants some
+ * investigation, though not a big deal.
+ */
+ return 1;
+}
+
+bool CamHelperImx219::sensorEmbeddedDataPresent() const
+{
+ return ENABLE_EMBEDDED_DATA;
+}
+
+void CamHelperImx219::populateMetadata(const MdParser::RegisterMap &registers,
+ Metadata &metadata) const
+{
+ DeviceStatus deviceStatus;
+
+ deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 +
+ registers.at(lineLengthLoReg));
+ deviceStatus.exposureTime = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
+ deviceStatus.lineLength);
+ deviceStatus.analogueGain = gain(registers.at(gainReg));
+ deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
+
+ metadata.set("device.status", deviceStatus);
+}
+
+static CamHelper *create()
+{
+ return new CamHelperImx219();
+}
+
+static RegisterCamHelper reg("imx219", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx283.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx283.cpp
new file mode 100644
index 00000000..efc03193
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx283.cpp
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2024, Raspberry Pi Ltd
+ *
+ * cam_helper_Imx283.cpp - camera information for Imx283 sensor
+ */
+
+#include <assert.h>
+
+#include "cam_helper.h"
+
+using namespace RPiController;
+
+class CamHelperImx283 : public CamHelper
+{
+public:
+ CamHelperImx283();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ unsigned int hideFramesModeSwitch() const override;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 4;
+};
+
+/*
+ * Imx283 doesn't output metadata, so we have to use the delayed controls which
+ * works by counting frames.
+ */
+
+CamHelperImx283::CamHelperImx283()
+ : CamHelper({}, frameIntegrationDiff)
+{
+}
+
+uint32_t CamHelperImx283::gainCode(double gain) const
+{
+ return static_cast<uint32_t>(2048.0 - 2048.0 / gain);
+}
+
+double CamHelperImx283::gain(uint32_t gainCode) const
+{
+ return static_cast<double>(2048.0 / (2048 - gainCode));
+}
+
+unsigned int CamHelperImx283::hideFramesModeSwitch() const
+{
+ /* After a mode switch, we seem to get 1 bad frame. */
+ return 1;
+}
+
+static CamHelper *create()
+{
+ return new CamHelperImx283();
+}
+
+static RegisterCamHelper reg("imx283", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx290.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx290.cpp
new file mode 100644
index 00000000..c1aa8528
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx290.cpp
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2021, Raspberry Pi Ltd
+ *
+ * camera helper for imx290 sensor
+ */
+
+#include <cmath>
+
+#include "cam_helper.h"
+
+using namespace RPiController;
+
+class CamHelperImx290 : public CamHelper
+{
+public:
+ CamHelperImx290();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ unsigned int hideFramesStartup() const override;
+ unsigned int hideFramesModeSwitch() const override;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 2;
+};
+
+CamHelperImx290::CamHelperImx290()
+ : CamHelper({}, frameIntegrationDiff)
+{
+}
+
+uint32_t CamHelperImx290::gainCode(double gain) const
+{
+ int code = 66.6667 * std::log10(gain);
+ return std::max(0, std::min(code, 0xf0));
+}
+
+double CamHelperImx290::gain(uint32_t gainCode) const
+{
+ return std::pow(10, 0.015 * gainCode);
+}
+
+unsigned int CamHelperImx290::hideFramesStartup() const
+{
+ /* On startup, we seem to get 1 bad frame. */
+ return 1;
+}
+
+unsigned int CamHelperImx290::hideFramesModeSwitch() const
+{
+ /* After a mode switch, we seem to get 1 bad frame. */
+ return 1;
+}
+
+static CamHelper *create()
+{
+ return new CamHelperImx290();
+}
+
+static RegisterCamHelper reg("imx290", &create);
+static RegisterCamHelper reg327("imx327", &create);
+static RegisterCamHelper reg462("imx462", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx296.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx296.cpp
new file mode 100644
index 00000000..ac7ee2ea
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx296.cpp
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Camera helper for IMX296 sensor
+ */
+
+#include <algorithm>
+#include <cmath>
+#include <stddef.h>
+
+#include "cam_helper.h"
+
+using namespace RPiController;
+using libcamera::utils::Duration;
+using namespace std::literals::chrono_literals;
+
+class CamHelperImx296 : public CamHelper
+{
+public:
+ CamHelperImx296();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ uint32_t exposureLines(const Duration exposure, const Duration lineLength) const override;
+ Duration exposure(uint32_t exposureLines, const Duration lineLength) const override;
+
+private:
+ static constexpr uint32_t minExposureLines = 1;
+ static constexpr uint32_t maxGainCode = 239;
+ static constexpr Duration timePerLine = 550.0 / 37.125e6 * 1.0s;
+
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 4;
+};
+
+CamHelperImx296::CamHelperImx296()
+ : CamHelper(nullptr, frameIntegrationDiff)
+{
+}
+
+uint32_t CamHelperImx296::gainCode(double gain) const
+{
+ uint32_t code = 20 * std::log10(gain) * 10;
+ return std::min(code, maxGainCode);
+}
+
+double CamHelperImx296::gain(uint32_t gainCode) const
+{
+ return std::pow(10.0, gainCode / 200.0);
+}
+
+uint32_t CamHelperImx296::exposureLines(const Duration exposure,
+ [[maybe_unused]] const Duration lineLength) const
+{
+ return std::max<uint32_t>(minExposureLines, (exposure - 14.26us) / timePerLine);
+}
+
+Duration CamHelperImx296::exposure(uint32_t exposureLines,
+ [[maybe_unused]] const Duration lineLength) const
+{
+ return std::max<uint32_t>(minExposureLines, exposureLines) * timePerLine + 14.26us;
+}
+
+static CamHelper *create()
+{
+ return new CamHelperImx296();
+}
+
+static RegisterCamHelper reg("imx296", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx415.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx415.cpp
new file mode 100644
index 00000000..c0a09eee
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx415.cpp
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2025, Raspberry Pi Ltd
+ *
+ * camera helper for imx415 sensor
+ */
+
+#include <cmath>
+
+#include "cam_helper.h"
+
+using namespace RPiController;
+
+class CamHelperImx415 : public CamHelper
+{
+public:
+ CamHelperImx415();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ unsigned int hideFramesStartup() const override;
+ unsigned int hideFramesModeSwitch() const override;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 8;
+};
+
+CamHelperImx415::CamHelperImx415()
+ : CamHelper({}, frameIntegrationDiff)
+{
+}
+
+uint32_t CamHelperImx415::gainCode(double gain) const
+{
+ int code = 66.6667 * std::log10(gain);
+ return std::max(0, std::min(code, 0xf0));
+}
+
+double CamHelperImx415::gain(uint32_t gainCode) const
+{
+ return std::pow(10, 0.015 * gainCode);
+}
+
+unsigned int CamHelperImx415::hideFramesStartup() const
+{
+ /* On startup, we seem to get 1 bad frame. */
+ return 1;
+}
+
+unsigned int CamHelperImx415::hideFramesModeSwitch() const
+{
+ /* After a mode switch, we seem to get 1 bad frame. */
+ return 1;
+}
+
+static CamHelper *create()
+{
+ return new CamHelperImx415();
+}
+
+static RegisterCamHelper reg("imx415", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx477.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx477.cpp
new file mode 100644
index 00000000..a72ac67d
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx477.cpp
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * camera helper for imx477 sensor
+ */
+
+#include <algorithm>
+#include <assert.h>
+#include <cmath>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <libcamera/base/log.h>
+
+#include "cam_helper.h"
+#include "md_parser.h"
+
+using namespace RPiController;
+using namespace libcamera;
+using libcamera::utils::Duration;
+
+namespace libcamera {
+LOG_DECLARE_CATEGORY(IPARPI)
+}
+
+/*
+ * We care about two gain registers and a pair of exposure registers. Their
+ * I2C addresses from the Sony IMX477 datasheet:
+ */
+constexpr uint32_t expHiReg = 0x0202;
+constexpr uint32_t expLoReg = 0x0203;
+constexpr uint32_t gainHiReg = 0x0204;
+constexpr uint32_t gainLoReg = 0x0205;
+constexpr uint32_t frameLengthHiReg = 0x0340;
+constexpr uint32_t frameLengthLoReg = 0x0341;
+constexpr uint32_t lineLengthHiReg = 0x0342;
+constexpr uint32_t lineLengthLoReg = 0x0343;
+constexpr uint32_t temperatureReg = 0x013a;
+constexpr std::initializer_list<uint32_t> registerList =
+ { expHiReg, expLoReg, gainHiReg, gainLoReg, frameLengthHiReg, frameLengthLoReg,
+ lineLengthHiReg, lineLengthLoReg, temperatureReg };
+
+class CamHelperImx477 : public CamHelper
+{
+public:
+ CamHelperImx477();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ void prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata) override;
+ std::pair<uint32_t, uint32_t> getBlanking(Duration &exposure, Duration minFrameDuration,
+ Duration maxFrameDuration) const override;
+ bool sensorEmbeddedDataPresent() const override;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 22;
+ /* Maximum frame length allowable for long exposure calculations. */
+ static constexpr int frameLengthMax = 0xffdc;
+ /* Largest long exposure scale factor given as a left shift on the frame length. */
+ static constexpr int longExposureShiftMax = 7;
+
+ void populateMetadata(const MdParser::RegisterMap &registers,
+ Metadata &metadata) const override;
+};
+
+CamHelperImx477::CamHelperImx477()
+ : CamHelper(std::make_unique<MdParserSmia>(registerList), frameIntegrationDiff)
+{
+}
+
+uint32_t CamHelperImx477::gainCode(double gain) const
+{
+ return static_cast<uint32_t>(1024 - 1024 / gain);
+}
+
+double CamHelperImx477::gain(uint32_t gainCode) const
+{
+ return 1024.0 / (1024 - gainCode);
+}
+
+void CamHelperImx477::prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata)
+{
+ MdParser::RegisterMap registers;
+ DeviceStatus deviceStatus;
+
+ if (metadata.get("device.status", deviceStatus)) {
+ LOG(IPARPI, Error) << "DeviceStatus not found from DelayedControls";
+ return;
+ }
+
+ parseEmbeddedData(buffer, metadata);
+
+ /*
+ * The DeviceStatus struct is first populated with values obtained from
+ * DelayedControls. If this reports frame length is > frameLengthMax,
+ * it means we are using a long exposure mode. Since the long exposure
+ * scale factor is not returned back through embedded data, we must rely
+ * on the existing exposure lines and frame length values returned by
+ * DelayedControls.
+ *
+ * Otherwise, all values are updated with what is reported in the
+ * embedded data.
+ */
+ if (deviceStatus.frameLength > frameLengthMax) {
+ DeviceStatus parsedDeviceStatus;
+
+ metadata.get("device.status", parsedDeviceStatus);
+ parsedDeviceStatus.exposureTime = deviceStatus.exposureTime;
+ parsedDeviceStatus.frameLength = deviceStatus.frameLength;
+ metadata.set("device.status", parsedDeviceStatus);
+
+ LOG(IPARPI, Debug) << "Metadata updated for long exposure: "
+ << parsedDeviceStatus;
+ }
+}
+
+std::pair<uint32_t, uint32_t> CamHelperImx477::getBlanking(Duration &exposure,
+ Duration minFrameDuration,
+ Duration maxFrameDuration) const
+{
+ uint32_t frameLength, exposureLines;
+ unsigned int shift = 0;
+
+ auto [vblank, hblank] = CamHelper::getBlanking(exposure, minFrameDuration,
+ maxFrameDuration);
+
+ frameLength = mode_.height + vblank;
+ Duration lineLength = hblankToLineLength(hblank);
+
+ /*
+ * Check if the frame length calculated needs to be setup for long
+ * exposure mode. This will require us to use a long exposure scale
+ * factor provided by a shift operation in the sensor.
+ */
+ while (frameLength > frameLengthMax) {
+ if (++shift > longExposureShiftMax) {
+ shift = longExposureShiftMax;
+ frameLength = frameLengthMax;
+ break;
+ }
+ frameLength >>= 1;
+ }
+
+ if (shift) {
+ /* Account for any rounding in the scaled frame length value. */
+ frameLength <<= shift;
+ exposureLines = CamHelperImx477::exposureLines(exposure, lineLength);
+ exposureLines = std::min(exposureLines, frameLength - frameIntegrationDiff);
+ exposure = CamHelperImx477::exposure(exposureLines, lineLength);
+ }
+
+ return { frameLength - mode_.height, hblank };
+}
+
+bool CamHelperImx477::sensorEmbeddedDataPresent() const
+{
+ return true;
+}
+
+void CamHelperImx477::populateMetadata(const MdParser::RegisterMap &registers,
+ Metadata &metadata) const
+{
+ DeviceStatus deviceStatus;
+
+ deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 +
+ registers.at(lineLengthLoReg));
+ deviceStatus.exposureTime = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
+ deviceStatus.lineLength);
+ deviceStatus.analogueGain = gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg));
+ deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
+ deviceStatus.sensorTemperature = std::clamp<int8_t>(registers.at(temperatureReg), -20, 80);
+
+ metadata.set("device.status", deviceStatus);
+}
+
+static CamHelper *create()
+{
+ return new CamHelperImx477();
+}
+
+static RegisterCamHelper reg("imx477", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx519.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx519.cpp
new file mode 100644
index 00000000..10cbea48
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx519.cpp
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Based on cam_helper_imx477.cpp
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * camera helper for imx519 sensor
+ * Copyright (C) 2021, Arducam Technology co., Ltd.
+ */
+
+#include <assert.h>
+#include <cmath>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <libcamera/base/log.h>
+
+#include "cam_helper.h"
+#include "md_parser.h"
+
+using namespace RPiController;
+using namespace libcamera;
+using libcamera::utils::Duration;
+
+namespace libcamera {
+LOG_DECLARE_CATEGORY(IPARPI)
+}
+
+/*
+ * We care about two gain registers and a pair of exposure registers. Their
+ * I2C addresses from the Sony IMX519 datasheet:
+ */
+constexpr uint32_t expHiReg = 0x0202;
+constexpr uint32_t expLoReg = 0x0203;
+constexpr uint32_t gainHiReg = 0x0204;
+constexpr uint32_t gainLoReg = 0x0205;
+constexpr uint32_t frameLengthHiReg = 0x0340;
+constexpr uint32_t frameLengthLoReg = 0x0341;
+constexpr uint32_t lineLengthHiReg = 0x0342;
+constexpr uint32_t lineLengthLoReg = 0x0343;
+constexpr std::initializer_list<uint32_t> registerList =
+ { expHiReg, expLoReg, gainHiReg, gainLoReg, frameLengthHiReg, frameLengthLoReg,
+ lineLengthHiReg, lineLengthLoReg };
+
+class CamHelperImx519 : public CamHelper
+{
+public:
+ CamHelperImx519();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ void prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata) override;
+ std::pair<uint32_t, uint32_t> getBlanking(Duration &exposure, Duration minFrameDuration,
+ Duration maxFrameDuration) const override;
+ bool sensorEmbeddedDataPresent() const override;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 32;
+ /* Maximum frame length allowable for long exposure calculations. */
+ static constexpr int frameLengthMax = 0xffdc;
+ /* Largest long exposure scale factor given as a left shift on the frame length. */
+ static constexpr int longExposureShiftMax = 7;
+
+ void populateMetadata(const MdParser::RegisterMap &registers,
+ Metadata &metadata) const override;
+};
+
+CamHelperImx519::CamHelperImx519()
+ : CamHelper(std::make_unique<MdParserSmia>(registerList), frameIntegrationDiff)
+{
+}
+
+uint32_t CamHelperImx519::gainCode(double gain) const
+{
+ return static_cast<uint32_t>(1024 - 1024 / gain);
+}
+
+double CamHelperImx519::gain(uint32_t gainCode) const
+{
+ return 1024.0 / (1024 - gainCode);
+}
+
+void CamHelperImx519::prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata)
+{
+ MdParser::RegisterMap registers;
+ DeviceStatus deviceStatus;
+
+ if (metadata.get("device.status", deviceStatus)) {
+ LOG(IPARPI, Error) << "DeviceStatus not found from DelayedControls";
+ return;
+ }
+
+ parseEmbeddedData(buffer, metadata);
+
+ /*
+ * The DeviceStatus struct is first populated with values obtained from
+ * DelayedControls. If this reports frame length is > frameLengthMax,
+ * it means we are using a long exposure mode. Since the long exposure
+ * scale factor is not returned back through embedded data, we must rely
+ * on the existing exposure lines and frame length values returned by
+ * DelayedControls.
+ *
+ * Otherwise, all values are updated with what is reported in the
+ * embedded data.
+ */
+ if (deviceStatus.frameLength > frameLengthMax) {
+ DeviceStatus parsedDeviceStatus;
+
+ metadata.get("device.status", parsedDeviceStatus);
+ parsedDeviceStatus.exposureTime = deviceStatus.exposureTime;
+ parsedDeviceStatus.frameLength = deviceStatus.frameLength;
+ metadata.set("device.status", parsedDeviceStatus);
+
+ LOG(IPARPI, Debug) << "Metadata updated for long exposure: "
+ << parsedDeviceStatus;
+ }
+}
+
+std::pair<uint32_t, uint32_t> CamHelperImx519::getBlanking(Duration &exposure,
+ Duration minFrameDuration,
+ Duration maxFrameDuration) const
+{
+ uint32_t frameLength, exposureLines;
+ unsigned int shift = 0;
+
+ auto [vblank, hblank] = CamHelper::getBlanking(exposure, minFrameDuration,
+ maxFrameDuration);
+
+ frameLength = mode_.height + vblank;
+ Duration lineLength = hblankToLineLength(hblank);
+
+ /*
+ * Check if the frame length calculated needs to be setup for long
+ * exposure mode. This will require us to use a long exposure scale
+ * factor provided by a shift operation in the sensor.
+ */
+ while (frameLength > frameLengthMax) {
+ if (++shift > longExposureShiftMax) {
+ shift = longExposureShiftMax;
+ frameLength = frameLengthMax;
+ break;
+ }
+ frameLength >>= 1;
+ }
+
+ if (shift) {
+ /* Account for any rounding in the scaled frame length value. */
+ frameLength <<= shift;
+ exposureLines = CamHelperImx519::exposureLines(exposure, lineLength);
+ exposureLines = std::min(exposureLines, frameLength - frameIntegrationDiff);
+ exposure = CamHelperImx519::exposure(exposureLines, lineLength);
+ }
+
+ return { frameLength - mode_.height, hblank };
+}
+
+bool CamHelperImx519::sensorEmbeddedDataPresent() const
+{
+ return true;
+}
+
+void CamHelperImx519::populateMetadata(const MdParser::RegisterMap &registers,
+ Metadata &metadata) const
+{
+ DeviceStatus deviceStatus;
+
+ deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 +
+ registers.at(lineLengthLoReg));
+ deviceStatus.exposureTime = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
+ deviceStatus.lineLength);
+ deviceStatus.analogueGain = gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg));
+ deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
+
+ metadata.set("device.status", deviceStatus);
+}
+
+static CamHelper *create()
+{
+ return new CamHelperImx519();
+}
+
+static RegisterCamHelper reg("imx519", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx708.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx708.cpp
new file mode 100644
index 00000000..6150909c
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx708.cpp
@@ -0,0 +1,371 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Raspberry Pi Ltd
+ *
+ * camera helper for imx708 sensor
+ */
+
+#include <cmath>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <libcamera/base/log.h>
+
+#include "controller/pdaf_data.h"
+
+#include "cam_helper.h"
+#include "md_parser.h"
+
+using namespace RPiController;
+using namespace libcamera;
+using libcamera::utils::Duration;
+
+using namespace std::literals::chrono_literals;
+
+namespace libcamera {
+LOG_DECLARE_CATEGORY(IPARPI)
+}
+
+/*
+ * We care about two gain registers and a pair of exposure registers. Their
+ * I2C addresses from the Sony imx708 datasheet:
+ */
+constexpr uint32_t expHiReg = 0x0202;
+constexpr uint32_t expLoReg = 0x0203;
+constexpr uint32_t gainHiReg = 0x0204;
+constexpr uint32_t gainLoReg = 0x0205;
+constexpr uint32_t frameLengthHiReg = 0x0340;
+constexpr uint32_t frameLengthLoReg = 0x0341;
+constexpr uint32_t lineLengthHiReg = 0x0342;
+constexpr uint32_t lineLengthLoReg = 0x0343;
+constexpr uint32_t temperatureReg = 0x013a;
+constexpr std::initializer_list<uint32_t> registerList =
+ { expHiReg, expLoReg, gainHiReg, gainLoReg, lineLengthHiReg,
+ lineLengthLoReg, frameLengthHiReg, frameLengthLoReg, temperatureReg };
+
+class CamHelperImx708 : public CamHelper
+{
+public:
+ CamHelperImx708();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gain_code) const override;
+ void prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata) override;
+ void process(StatisticsPtr &stats, Metadata &metadata) override;
+ std::pair<uint32_t, uint32_t> getBlanking(Duration &exposure, Duration minFrameDuration,
+ Duration maxFrameDuration) const override;
+ bool sensorEmbeddedDataPresent() const override;
+ double getModeSensitivity(const CameraMode &mode) const override;
+ unsigned int hideFramesModeSwitch() const override;
+ unsigned int hideFramesStartup() const override;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 48;
+ /* Maximum frame length allowable for long exposure calculations. */
+ static constexpr int frameLengthMax = 0xffdc;
+ /* Largest long exposure scale factor given as a left shift on the frame length. */
+ static constexpr int longExposureShiftMax = 7;
+
+ static constexpr int pdafStatsRows = 12;
+ static constexpr int pdafStatsCols = 16;
+
+ void populateMetadata(const MdParser::RegisterMap &registers,
+ Metadata &metadata) const override;
+
+ static bool parsePdafData(const uint8_t *ptr, size_t len, unsigned bpp,
+ PdafRegions &pdaf);
+
+ bool parseAEHist(const uint8_t *ptr, size_t len, unsigned bpp);
+ void putAGCStatistics(StatisticsPtr stats);
+
+ Histogram aeHistLinear_;
+ uint32_t aeHistAverage_;
+ bool aeHistValid_;
+};
+
+CamHelperImx708::CamHelperImx708()
+ : CamHelper(std::make_unique<MdParserSmia>(registerList), frameIntegrationDiff),
+ aeHistLinear_{}, aeHistAverage_(0), aeHistValid_(false)
+{
+}
+
+uint32_t CamHelperImx708::gainCode(double gain) const
+{
+ return static_cast<uint32_t>(1024 - 1024 / gain);
+}
+
+double CamHelperImx708::gain(uint32_t gain_code) const
+{
+ return 1024.0 / (1024 - gain_code);
+}
+
+void CamHelperImx708::prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata)
+{
+ MdParser::RegisterMap registers;
+ DeviceStatus deviceStatus;
+
+ LOG(IPARPI, Debug) << "Embedded buffer size: " << buffer.size();
+
+ if (metadata.get("device.status", deviceStatus)) {
+ LOG(IPARPI, Error) << "DeviceStatus not found from DelayedControls";
+ return;
+ }
+
+ parseEmbeddedData(buffer, metadata);
+
+ /*
+ * Parse PDAF data, which we expect to occupy the third scanline
+ * of embedded data. As PDAF is quite sensor-specific, it's parsed here.
+ */
+ size_t bytesPerLine = (mode_.width * mode_.bitdepth) >> 3;
+
+ if (buffer.size() > 2 * bytesPerLine) {
+ PdafRegions pdaf;
+ if (parsePdafData(&buffer[2 * bytesPerLine],
+ buffer.size() - 2 * bytesPerLine,
+ mode_.bitdepth, pdaf))
+ metadata.set("pdaf.regions", pdaf);
+ }
+
+ /* Parse AE-HIST data where present */
+ if (buffer.size() > 3 * bytesPerLine) {
+ aeHistValid_ = parseAEHist(&buffer[3 * bytesPerLine],
+ buffer.size() - 3 * bytesPerLine,
+ mode_.bitdepth);
+ }
+
+ /*
+ * The DeviceStatus struct is first populated with values obtained from
+ * DelayedControls. If this reports frame length is > frameLengthMax,
+ * it means we are using a long exposure mode. Since the long exposure
+ * scale factor is not returned back through embedded data, we must rely
+ * on the existing exposure lines and frame length values returned by
+ * DelayedControls.
+ *
+ * Otherwise, all values are updated with what is reported in the
+ * embedded data.
+ */
+ if (deviceStatus.frameLength > frameLengthMax) {
+ DeviceStatus parsedDeviceStatus;
+
+ metadata.get("device.status", parsedDeviceStatus);
+ parsedDeviceStatus.exposureTime = deviceStatus.exposureTime;
+ parsedDeviceStatus.frameLength = deviceStatus.frameLength;
+ metadata.set("device.status", parsedDeviceStatus);
+
+ LOG(IPARPI, Debug) << "Metadata updated for long exposure: "
+ << parsedDeviceStatus;
+ }
+}
+
+void CamHelperImx708::process(StatisticsPtr &stats, [[maybe_unused]] Metadata &metadata)
+{
+ if (aeHistValid_)
+ putAGCStatistics(stats);
+}
+
+std::pair<uint32_t, uint32_t> CamHelperImx708::getBlanking(Duration &exposure,
+ Duration minFrameDuration,
+ Duration maxFrameDuration) const
+{
+ uint32_t frameLength, exposureLines;
+ unsigned int shift = 0;
+
+ auto [vblank, hblank] = CamHelper::getBlanking(exposure, minFrameDuration,
+ maxFrameDuration);
+
+ frameLength = mode_.height + vblank;
+ Duration lineLength = hblankToLineLength(hblank);
+
+ /*
+ * Check if the frame length calculated needs to be setup for long
+ * exposure mode. This will require us to use a long exposure scale
+ * factor provided by a shift operation in the sensor.
+ */
+ while (frameLength > frameLengthMax) {
+ if (++shift > longExposureShiftMax) {
+ shift = longExposureShiftMax;
+ frameLength = frameLengthMax;
+ break;
+ }
+ frameLength >>= 1;
+ }
+
+ if (shift) {
+ /* Account for any rounding in the scaled frame length value. */
+ frameLength <<= shift;
+ exposureLines = CamHelper::exposureLines(exposure, lineLength);
+ exposureLines = std::min(exposureLines, frameLength - frameIntegrationDiff);
+ exposure = CamHelper::exposure(exposureLines, lineLength);
+ }
+
+ return { frameLength - mode_.height, hblank };
+}
+
+bool CamHelperImx708::sensorEmbeddedDataPresent() const
+{
+ return true;
+}
+
+double CamHelperImx708::getModeSensitivity(const CameraMode &mode) const
+{
+ /* In binned modes, sensitivity increases by a factor of 2 */
+ return (mode.width > 2304) ? 1.0 : 2.0;
+}
+
+unsigned int CamHelperImx708::hideFramesModeSwitch() const
+{
+ /*
+ * We need to drop the first startup frame in HDR mode.
+ * Unfortunately the only way to currently determine if the sensor is in
+ * the HDR mode is to match with the resolution and framerate - the HDR
+ * mode only runs upto 30fps.
+ */
+ if (mode_.width == 2304 && mode_.height == 1296 &&
+ mode_.minFrameDuration > 1.0s / 32)
+ return 1;
+ else
+ return 0;
+}
+
+unsigned int CamHelperImx708::hideFramesStartup() const
+{
+ return hideFramesModeSwitch();
+}
+
+void CamHelperImx708::populateMetadata(const MdParser::RegisterMap &registers,
+ Metadata &metadata) const
+{
+ DeviceStatus deviceStatus;
+
+ deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 +
+ registers.at(lineLengthLoReg));
+ deviceStatus.exposureTime = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg),
+ deviceStatus.lineLength);
+ deviceStatus.analogueGain = gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg));
+ deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg);
+ deviceStatus.sensorTemperature = std::clamp<int8_t>(registers.at(temperatureReg), -20, 80);
+
+ metadata.set("device.status", deviceStatus);
+}
+
+bool CamHelperImx708::parsePdafData(const uint8_t *ptr, size_t len,
+ unsigned bpp, PdafRegions &pdaf)
+{
+ size_t step = bpp >> 1; /* bytes per PDAF grid entry */
+
+ if (bpp < 10 || bpp > 14 || len < 194 * step || ptr[0] != 0 || ptr[1] >= 0x40) {
+ LOG(IPARPI, Error) << "PDAF data in unsupported format";
+ return false;
+ }
+
+ pdaf.init({ pdafStatsCols, pdafStatsRows });
+
+ ptr += 2 * step;
+ for (unsigned i = 0; i < pdafStatsRows; ++i) {
+ for (unsigned j = 0; j < pdafStatsCols; ++j) {
+ unsigned c = (ptr[0] << 3) | (ptr[1] >> 5);
+ int p = (((ptr[1] & 0x0F) - (ptr[1] & 0x10)) << 6) | (ptr[2] >> 2);
+ PdafData pdafData;
+ pdafData.conf = c;
+ pdafData.phase = c ? p : 0;
+ pdaf.set(libcamera::Point(j, i), { pdafData, 1, 0 });
+ ptr += step;
+ }
+ }
+
+ return true;
+}
+
+bool CamHelperImx708::parseAEHist(const uint8_t *ptr, size_t len, unsigned bpp)
+{
+ static constexpr unsigned int PipelineBits = Statistics::NormalisationFactorPow2;
+
+ uint64_t count = 0, sum = 0;
+ size_t step = bpp >> 1; /* bytes per histogram bin */
+ uint32_t hist[128];
+
+ if (len < 144 * step)
+ return false;
+
+ /*
+ * Read the 128 bin linear histogram, which by default covers
+ * the full range of the HDR shortest exposure (small values are
+ * expected to dominate, so pixel-value resolution will be poor).
+ */
+ for (unsigned i = 0; i < 128; ++i) {
+ if (ptr[3] != 0x55)
+ return false;
+ uint32_t c = (ptr[0] << 14) + (ptr[1] << 6) + (ptr[2] >> 2);
+ hist[i] = c >> 2; /* pixels to quads */
+ if (i != 0) {
+ count += c;
+ sum += c *
+ (i * (1u << (PipelineBits - 7)) +
+ (1u << (PipelineBits - 8)));
+ }
+ ptr += step;
+ }
+
+ /*
+ * Now use the first 9 bins of the log histogram (these should be
+ * subdivisions of the smallest linear bin), to get a more accurate
+ * average value. Don't assume that AEHIST1_AVERAGE is present.
+ */
+ for (unsigned i = 0; i < 9; ++i) {
+ if (ptr[3] != 0x55)
+ return false;
+ uint32_t c = (ptr[0] << 14) + (ptr[1] << 6) + (ptr[2] >> 2);
+ count += c;
+ sum += c *
+ ((3u << PipelineBits) >> (17 - i));
+ ptr += step;
+ }
+ if ((unsigned)((ptr[0] << 12) + (ptr[1] << 4) + (ptr[2] >> 4)) !=
+ hist[1]) {
+ LOG(IPARPI, Error) << "Lin/Log histogram mismatch";
+ return false;
+ }
+
+ aeHistLinear_ = Histogram(hist, 128);
+ aeHistAverage_ = count ? (sum / count) : 0;
+
+ return count != 0;
+}
+
+void CamHelperImx708::putAGCStatistics(StatisticsPtr stats)
+{
+ /*
+ * For HDR mode, copy sensor's AE/AGC statistics over ISP's, so the
+ * AGC algorithm sees a linear response to exposure and gain changes.
+ *
+ * Histogram: Just copy the "raw" histogram over the tone-mapped one,
+ * although they have different distributions (raw values are lower).
+ * Tuning should either ignore it, or constrain for highlights only.
+ *
+ * Average: Overwrite all regional averages with a global raw average,
+ * scaled by a fiddle-factor so that a conventional (non-HDR) y_target
+ * of e.g. 0.17 will map to a suitable level for HDR.
+ */
+ stats->yHist = aeHistLinear_;
+
+ constexpr unsigned int HdrHeadroomFactor = 4;
+ uint64_t v = HdrHeadroomFactor * aeHistAverage_;
+ for (auto &region : stats->agcRegions) {
+ region.val.rSum = region.val.gSum = region.val.bSum = region.counted * v;
+ }
+}
+
+static CamHelper *create()
+{
+ return new CamHelperImx708();
+}
+
+static RegisterCamHelper reg("imx708", &create);
+static RegisterCamHelper regWide("imx708_wide", &create);
+static RegisterCamHelper regNoIr("imx708_noir", &create);
+static RegisterCamHelper regWideNoIr("imx708_wide_noir", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_ov5647.cpp b/src/ipa/rpi/cam_helper/cam_helper_ov5647.cpp
new file mode 100644
index 00000000..40d6b6d7
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_ov5647.cpp
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * camera information for ov5647 sensor
+ */
+
+#include <assert.h>
+
+#include "cam_helper.h"
+
+using namespace RPiController;
+
+class CamHelperOv5647 : public CamHelper
+{
+public:
+ CamHelperOv5647();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ unsigned int hideFramesStartup() const override;
+ unsigned int hideFramesModeSwitch() const override;
+ unsigned int mistrustFramesStartup() const override;
+ unsigned int mistrustFramesModeSwitch() const override;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 4;
+};
+
+/*
+ * OV5647 doesn't output metadata, so we have to use the "unicam parser" which
+ * works by counting frames.
+ */
+
+CamHelperOv5647::CamHelperOv5647()
+ : CamHelper({}, frameIntegrationDiff)
+{
+}
+
+uint32_t CamHelperOv5647::gainCode(double gain) const
+{
+ return static_cast<uint32_t>(gain * 16.0);
+}
+
+double CamHelperOv5647::gain(uint32_t gainCode) const
+{
+ return static_cast<double>(gainCode) / 16.0;
+}
+
+unsigned int CamHelperOv5647::hideFramesStartup() const
+{
+ /*
+ * On startup, we get a couple of under-exposed frames which
+ * we don't want shown.
+ */
+ return 2;
+}
+
+unsigned int CamHelperOv5647::hideFramesModeSwitch() const
+{
+ /*
+ * After a mode switch, we get a couple of under-exposed frames which
+ * we don't want shown.
+ */
+ return 2;
+}
+
+unsigned int CamHelperOv5647::mistrustFramesStartup() const
+{
+ /*
+ * First couple of frames are under-exposed and are no good for control
+ * algos.
+ */
+ return 2;
+}
+
+unsigned int CamHelperOv5647::mistrustFramesModeSwitch() const
+{
+ /*
+ * First couple of frames are under-exposed even after a simple
+ * mode switch, and are no good for control algos.
+ */
+ return 2;
+}
+
+static CamHelper *create()
+{
+ return new CamHelperOv5647();
+}
+
+static RegisterCamHelper reg("ov5647", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_ov64a40.cpp b/src/ipa/rpi/cam_helper/cam_helper_ov64a40.cpp
new file mode 100644
index 00000000..980495a8
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_ov64a40.cpp
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2021, Raspberry Pi Ltd
+ * Copyright (C) 2023, Ideas on Board Oy.
+ *
+ * camera information for ov64a40 sensor
+ */
+
+#include <assert.h>
+
+#include "cam_helper.h"
+
+using namespace RPiController;
+
+class CamHelperOv64a40 : public CamHelper
+{
+public:
+ CamHelperOv64a40();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ double getModeSensitivity(const CameraMode &mode) const override;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 32;
+};
+
+CamHelperOv64a40::CamHelperOv64a40()
+ : CamHelper({}, frameIntegrationDiff)
+{
+}
+
+uint32_t CamHelperOv64a40::gainCode(double gain) const
+{
+ return static_cast<uint32_t>(gain * 128.0);
+}
+
+double CamHelperOv64a40::gain(uint32_t gainCode) const
+{
+ return static_cast<double>(gainCode) / 128.0;
+}
+
+double CamHelperOv64a40::getModeSensitivity(const CameraMode &mode) const
+{
+ if (mode.binX >= 2 && mode.scaleX >= 4) {
+ return 4.0;
+ } else if (mode.binX >= 2 && mode.scaleX >= 2) {
+ return 2.0;
+ } else {
+ return 1.0;
+ }
+}
+
+static CamHelper *create()
+{
+ return new CamHelperOv64a40();
+}
+
+static RegisterCamHelper reg("ov64a40", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_ov7251.cpp b/src/ipa/rpi/cam_helper/cam_helper_ov7251.cpp
new file mode 100644
index 00000000..fc7b999f
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_ov7251.cpp
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2021, Raspberry Pi Ltd
+ *
+ * camera information for ov7251 sensor
+ */
+
+#include <assert.h>
+
+#include "cam_helper.h"
+
+using namespace RPiController;
+
+class CamHelperOv7251 : public CamHelper
+{
+public:
+ CamHelperOv7251();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 4;
+};
+
+/*
+ * OV7251 doesn't output metadata, so we have to use the "unicam parser" which
+ * works by counting frames.
+ */
+
+CamHelperOv7251::CamHelperOv7251()
+ : CamHelper({}, frameIntegrationDiff)
+{
+}
+
+uint32_t CamHelperOv7251::gainCode(double gain) const
+{
+ return static_cast<uint32_t>(gain * 16.0);
+}
+
+double CamHelperOv7251::gain(uint32_t gainCode) const
+{
+ return static_cast<double>(gainCode) / 16.0;
+}
+
+static CamHelper *create()
+{
+ return new CamHelperOv7251();
+}
+
+static RegisterCamHelper reg("ov7251", &create);
diff --git a/src/ipa/rpi/cam_helper/cam_helper_ov9281.cpp b/src/ipa/rpi/cam_helper/cam_helper_ov9281.cpp
new file mode 100644
index 00000000..e93a4691
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_ov9281.cpp
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2021, Raspberry Pi Ltd
+ *
+ * camera information for ov9281 sensor
+ */
+
+#include <assert.h>
+
+#include "cam_helper.h"
+
+using namespace RPiController;
+
+class CamHelperOv9281 : public CamHelper
+{
+public:
+ CamHelperOv9281();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 25;
+};
+
+/*
+ * OV9281 doesn't output metadata, so we have to use the "unicam parser" which
+ * works by counting frames.
+ */
+
+CamHelperOv9281::CamHelperOv9281()
+ : CamHelper({}, frameIntegrationDiff)
+{
+}
+
+uint32_t CamHelperOv9281::gainCode(double gain) const
+{
+ return static_cast<uint32_t>(gain * 16.0);
+}
+
+double CamHelperOv9281::gain(uint32_t gainCode) const
+{
+ return static_cast<double>(gainCode) / 16.0;
+}
+
+static CamHelper *create()
+{
+ return new CamHelperOv9281();
+}
+
+static RegisterCamHelper reg("ov9281", &create);
diff --git a/src/ipa/rpi/cam_helper/md_parser.h b/src/ipa/rpi/cam_helper/md_parser.h
new file mode 100644
index 00000000..227c376c
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/md_parser.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * image sensor metadata parser interface
+ */
+#pragma once
+
+#include <initializer_list>
+#include <map>
+#include <optional>
+#include <stdint.h>
+
+#include <libcamera/base/span.h>
+
+/*
+ * Camera metadata parser class. Usage as shown below.
+ *
+ * Setup:
+ *
+ * Usually the metadata parser will be made as part of the CamHelper class so
+ * application code doesn't have to worry which kind to instantiate. But for
+ * the sake of example let's suppose we're parsing imx219 metadata.
+ *
+ * MdParser *parser = new MdParserSmia({ expHiReg, expLoReg, gainReg });
+ * parser->SetBitsPerPixel(bpp);
+ * parser->SetLineLengthBytes(pitch);
+ * parser->SetNumLines(2);
+ *
+ * Note 1: if you don't know how many lines there are, the size of the input
+ * buffer is used as a limit instead.
+ *
+ * Note 2: if you don't know the line length, you can leave the line length unset
+ * (or set to zero) and the parser will hunt for the line start instead.
+ *
+ * Then on every frame:
+ *
+ * RegisterMap registers;
+ * if (parser->Parse(buffer, registers) != MdParser::OK)
+ * much badness;
+ * Metadata metadata;
+ * CamHelper::PopulateMetadata(registers, metadata);
+ *
+ * (Note that the CamHelper class converts to/from exposure lines and time,
+ * and gain_code / actual gain.)
+ *
+ * If you suspect your embedded data may have changed its layout, change any line
+ * lengths, number of lines, bits per pixel etc. that are different, and
+ * then:
+ *
+ * parser->Reset();
+ *
+ * before calling Parse again.
+ */
+
+namespace RPiController {
+
+/* Abstract base class from which other metadata parsers are derived. */
+
+class MdParser
+{
+public:
+ using RegisterMap = std::map<uint32_t, uint32_t>;
+
+ /*
+ * Parser status codes:
+ * OK - success
+ * NOTFOUND - value such as exposure or gain was not found
+ * ERROR - all other errors
+ */
+ enum Status {
+ OK = 0,
+ NOTFOUND = 1,
+ ERROR = 2
+ };
+
+ MdParser()
+ : reset_(true), bitsPerPixel_(0), numLines_(0), lineLengthBytes_(0)
+ {
+ }
+
+ virtual ~MdParser() = default;
+
+ void reset()
+ {
+ reset_ = true;
+ }
+
+ void setBitsPerPixel(int bpp)
+ {
+ bitsPerPixel_ = bpp;
+ }
+
+ void setNumLines(unsigned int numLines)
+ {
+ numLines_ = numLines;
+ }
+
+ void setLineLengthBytes(unsigned int numBytes)
+ {
+ lineLengthBytes_ = numBytes;
+ }
+
+ virtual Status parse(libcamera::Span<const uint8_t> buffer,
+ RegisterMap &registers) = 0;
+
+protected:
+ bool reset_;
+ int bitsPerPixel_;
+ unsigned int numLines_;
+ unsigned int lineLengthBytes_;
+};
+
+/*
+ * This isn't a full implementation of a metadata parser for SMIA sensors,
+ * however, it does provide the findRegs function which will prove useful and
+ * make it easier to implement parsers for other SMIA-like sensors (see
+ * md_parser_imx219.cpp for an example).
+ */
+
+class MdParserSmia final : public MdParser
+{
+public:
+ MdParserSmia(std::initializer_list<uint32_t> registerList);
+
+ MdParser::Status parse(libcamera::Span<const uint8_t> buffer,
+ RegisterMap &registers) override;
+
+private:
+ /* Maps register address to offset in the buffer. */
+ using OffsetMap = std::map<uint32_t, std::optional<uint32_t>>;
+
+ /*
+ * Note that error codes > 0 are regarded as non-fatal; codes < 0
+ * indicate a bad data buffer. Status codes are:
+ * ParseOk - found all registers, much happiness
+ * MissingRegs - some registers found; should this be a hard error?
+ * The remaining codes are all hard errors.
+ */
+ enum ParseStatus {
+ ParseOk = 0,
+ MissingRegs = 1,
+ NoLineStart = -1,
+ IllegalTag = -2,
+ BadDummy = -3,
+ BadLineEnd = -4,
+ BadPadding = -5
+ };
+
+ ParseStatus findRegs(libcamera::Span<const uint8_t> buffer);
+
+ OffsetMap offsets_;
+};
+
+} /* namespace RPi */
diff --git a/src/ipa/rpi/cam_helper/md_parser_smia.cpp b/src/ipa/rpi/cam_helper/md_parser_smia.cpp
new file mode 100644
index 00000000..c7bdcf94
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/md_parser_smia.cpp
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019-2021, Raspberry Pi Ltd
+ *
+ * SMIA specification based embedded data parser
+ */
+
+#include <libcamera/base/log.h>
+#include "md_parser.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+/*
+ * This function goes through the embedded data to find the offsets (not
+ * values!), in the data block, where the values of the given registers can
+ * subsequently be found.
+ *
+ * Embedded data tag bytes, from Sony IMX219 datasheet but general to all SMIA
+ * sensors, I think.
+ */
+
+constexpr unsigned int LineStart = 0x0a;
+constexpr unsigned int LineEndTag = 0x07;
+constexpr unsigned int RegHiBits = 0xaa;
+constexpr unsigned int RegLowBits = 0xa5;
+constexpr unsigned int RegValue = 0x5a;
+constexpr unsigned int RegSkip = 0x55;
+
+MdParserSmia::MdParserSmia(std::initializer_list<uint32_t> registerList)
+{
+ for (auto r : registerList)
+ offsets_[r] = {};
+}
+
+MdParser::Status MdParserSmia::parse(libcamera::Span<const uint8_t> buffer,
+ RegisterMap &registers)
+{
+ if (reset_) {
+ /*
+ * Search again through the metadata for all the registers
+ * requested.
+ */
+ ASSERT(bitsPerPixel_);
+
+ for (const auto &kv : offsets_)
+ offsets_[kv.first] = {};
+
+ ParseStatus ret = findRegs(buffer);
+ /*
+ * > 0 means "worked partially but parse again next time",
+ * < 0 means "hard error".
+ *
+ * In either case, we retry parsing on the next frame.
+ */
+ if (ret != ParseOk)
+ return ERROR;
+
+ reset_ = false;
+ }
+
+ /* Populate the register values requested. */
+ registers.clear();
+ for (const auto &[reg, offset] : offsets_) {
+ if (!offset) {
+ reset_ = true;
+ return NOTFOUND;
+ }
+ registers[reg] = buffer[offset.value()];
+ }
+
+ return OK;
+}
+
+MdParserSmia::ParseStatus MdParserSmia::findRegs(libcamera::Span<const uint8_t> buffer)
+{
+ ASSERT(offsets_.size());
+
+ if (buffer[0] != LineStart)
+ return NoLineStart;
+
+ unsigned int currentOffset = 1; /* after the LineStart */
+ unsigned int currentLineStart = 0, currentLine = 0;
+ unsigned int regNum = 0, regsDone = 0;
+
+ while (1) {
+ int tag = buffer[currentOffset++];
+
+ /* Non-dummy bytes come in even-sized blocks: skip can only ever follow tag */
+ while ((bitsPerPixel_ == 10 &&
+ (currentOffset + 1 - currentLineStart) % 5 == 0) ||
+ (bitsPerPixel_ == 12 &&
+ (currentOffset + 1 - currentLineStart) % 3 == 0) ||
+ (bitsPerPixel_ == 14 &&
+ (currentOffset - currentLineStart) % 7 >= 4)) {
+ if (buffer[currentOffset++] != RegSkip)
+ return BadDummy;
+ }
+
+ int dataByte = buffer[currentOffset++];
+
+ if (tag == LineEndTag) {
+ if (dataByte != LineEndTag)
+ return BadLineEnd;
+
+ if (numLines_ && ++currentLine == numLines_)
+ return MissingRegs;
+
+ if (lineLengthBytes_) {
+ currentOffset = currentLineStart + lineLengthBytes_;
+
+ /* Require whole line to be in the buffer (if buffer size set). */
+ if (buffer.size() &&
+ currentOffset + lineLengthBytes_ > buffer.size())
+ return MissingRegs;
+
+ if (buffer[currentOffset] != LineStart)
+ return NoLineStart;
+ } else {
+ /* allow a zero line length to mean "hunt for the next line" */
+ while (currentOffset < buffer.size() &&
+ buffer[currentOffset] != LineStart)
+ currentOffset++;
+
+ if (currentOffset == buffer.size())
+ return NoLineStart;
+ }
+
+ /* inc currentOffset to after LineStart */
+ currentLineStart = currentOffset++;
+ } else {
+ if (tag == RegHiBits)
+ regNum = (regNum & 0xff) | (dataByte << 8);
+ else if (tag == RegLowBits)
+ regNum = (regNum & 0xff00) | dataByte;
+ else if (tag == RegSkip)
+ regNum++;
+ else if (tag == RegValue) {
+ auto reg = offsets_.find(regNum);
+
+ if (reg != offsets_.end()) {
+ offsets_[regNum] = currentOffset - 1;
+
+ if (++regsDone == offsets_.size())
+ return ParseOk;
+ }
+ regNum++;
+ } else
+ return IllegalTag;
+ }
+ }
+}
diff --git a/src/ipa/rpi/cam_helper/meson.build b/src/ipa/rpi/cam_helper/meson.build
new file mode 100644
index 00000000..abf02147
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/meson.build
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: CC0-1.0
+
+rpi_ipa_cam_helper_sources = files([
+ 'cam_helper.cpp',
+ 'cam_helper_ov5647.cpp',
+ 'cam_helper_imx219.cpp',
+ 'cam_helper_imx283.cpp',
+ 'cam_helper_imx290.cpp',
+ 'cam_helper_imx296.cpp',
+ 'cam_helper_imx415.cpp',
+ 'cam_helper_imx477.cpp',
+ 'cam_helper_imx519.cpp',
+ 'cam_helper_imx708.cpp',
+ 'cam_helper_ov64a40.cpp',
+ 'cam_helper_ov7251.cpp',
+ 'cam_helper_ov9281.cpp',
+ 'md_parser_smia.cpp',
+])
+
+rpi_ipa_cam_helper_includes = [
+ include_directories('..'),
+]
+
+rpi_ipa_cam_helper_deps = [
+ libcamera_private,
+]
+
+rpi_ipa_cam_helper_lib = static_library('rpi_ipa_cam_helper', rpi_ipa_cam_helper_sources,
+ include_directories : rpi_ipa_cam_helper_includes,
+ dependencies : rpi_ipa_cam_helper_deps)
diff --git a/src/ipa/rpi/common/ipa_base.cpp b/src/ipa/rpi/common/ipa_base.cpp
new file mode 100644
index 00000000..6ff1e22b
--- /dev/null
+++ b/src/ipa/rpi/common/ipa_base.cpp
@@ -0,0 +1,1542 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Raspberry Pi IPA base class
+ */
+
+#include "ipa_base.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/span.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/property_ids.h>
+
+#include "controller/af_algorithm.h"
+#include "controller/af_status.h"
+#include "controller/agc_algorithm.h"
+#include "controller/awb_algorithm.h"
+#include "controller/awb_status.h"
+#include "controller/black_level_status.h"
+#include "controller/ccm_algorithm.h"
+#include "controller/ccm_status.h"
+#include "controller/contrast_algorithm.h"
+#include "controller/denoise_algorithm.h"
+#include "controller/hdr_algorithm.h"
+#include "controller/lux_status.h"
+#include "controller/sharpen_algorithm.h"
+#include "controller/statistics.h"
+
+namespace libcamera {
+
+using namespace std::literals::chrono_literals;
+using utils::Duration;
+
+namespace {
+
+/* Number of frame length times to hold in the queue. */
+constexpr unsigned int FrameLengthsQueueSize = 10;
+
+/* Configure the sensor with these values initially. */
+constexpr double defaultAnalogueGain = 1.0;
+constexpr Duration defaultExposureTime = 20.0ms;
+constexpr Duration defaultMinFrameDuration = 1.0s / 30.0;
+constexpr Duration defaultMaxFrameDuration = 250.0s;
+
+/*
+ * Determine the minimum allowable inter-frame duration to run the controller
+ * algorithms. If the pipeline handler provider frames at a rate higher than this,
+ * we rate-limit the controller Prepare() and Process() calls to lower than or
+ * equal to this rate.
+ */
+constexpr Duration controllerMinFrameDuration = 1.0s / 30.0;
+
+/* List of controls handled by the Raspberry Pi IPA */
+const ControlInfoMap::Map ipaControls{
+ { &controls::AeEnable, ControlInfo(false, true) },
+ { &controls::ExposureTime, ControlInfo(0, 66666) },
+ { &controls::AnalogueGain, ControlInfo(1.0f, 16.0f) },
+ { &controls::AeMeteringMode, ControlInfo(controls::AeMeteringModeValues) },
+ { &controls::AeConstraintMode, ControlInfo(controls::AeConstraintModeValues) },
+ { &controls::AeExposureMode, ControlInfo(controls::AeExposureModeValues) },
+ { &controls::ExposureValue, ControlInfo(-8.0f, 8.0f, 0.0f) },
+ { &controls::AeFlickerMode, ControlInfo(static_cast<int>(controls::FlickerOff),
+ static_cast<int>(controls::FlickerManual),
+ static_cast<int>(controls::FlickerOff)) },
+ { &controls::AeFlickerPeriod, ControlInfo(100, 1000000) },
+ { &controls::Brightness, ControlInfo(-1.0f, 1.0f, 0.0f) },
+ { &controls::Contrast, ControlInfo(0.0f, 32.0f, 1.0f) },
+ { &controls::HdrMode, ControlInfo(controls::HdrModeValues) },
+ { &controls::Sharpness, ControlInfo(0.0f, 16.0f, 1.0f) },
+ { &controls::ScalerCrop, ControlInfo(Rectangle{}, Rectangle(65535, 65535, 65535, 65535), Rectangle{}) },
+ { &controls::FrameDurationLimits, ControlInfo(INT64_C(33333), INT64_C(120000)) },
+ { &controls::draft::NoiseReductionMode, ControlInfo(controls::draft::NoiseReductionModeValues) },
+ { &controls::rpi::StatsOutputEnable, ControlInfo(false, true, false) },
+};
+
+/* IPA controls handled conditionally, if the sensor is not mono */
+const ControlInfoMap::Map ipaColourControls{
+ { &controls::AwbEnable, ControlInfo(false, true) },
+ { &controls::AwbMode, ControlInfo(controls::AwbModeValues) },
+ { &controls::ColourGains, ControlInfo(0.0f, 32.0f) },
+ { &controls::ColourTemperature, ControlInfo(100, 100000) },
+ { &controls::Saturation, ControlInfo(0.0f, 32.0f, 1.0f) },
+};
+
+/* IPA controls handled conditionally, if the lens has a focus control */
+const ControlInfoMap::Map ipaAfControls{
+ { &controls::AfMode, ControlInfo(controls::AfModeValues) },
+ { &controls::AfRange, ControlInfo(controls::AfRangeValues) },
+ { &controls::AfSpeed, ControlInfo(controls::AfSpeedValues) },
+ { &controls::AfMetering, ControlInfo(controls::AfMeteringValues) },
+ { &controls::AfWindows, ControlInfo(Rectangle{}, Rectangle(65535, 65535, 65535, 65535), Rectangle{}) },
+ { &controls::AfTrigger, ControlInfo(controls::AfTriggerValues) },
+ { &controls::AfPause, ControlInfo(controls::AfPauseValues) },
+ { &controls::LensPosition, ControlInfo(0.0f, 32.0f, 1.0f) }
+};
+
+/* Platform specific controls */
+const std::map<const std::string, ControlInfoMap::Map> platformControls {
+ { "pisp", {
+ { &controls::rpi::ScalerCrops, ControlInfo(Rectangle{}, Rectangle(65535, 65535, 65535, 65535), Rectangle{}) }
+ } },
+};
+
+} /* namespace */
+
+LOG_DEFINE_CATEGORY(IPARPI)
+
+namespace ipa::RPi {
+
+IpaBase::IpaBase()
+ : controller_(), frameLengths_(FrameLengthsQueueSize, 0s), statsMetadataOutput_(false),
+ stitchSwapBuffers_(false), frameCount_(0), mistrustCount_(0), lastRunTimestamp_(0),
+ firstStart_(true), flickerState_({ 0, 0s })
+{
+}
+
+IpaBase::~IpaBase()
+{
+}
+
+int32_t IpaBase::init(const IPASettings &settings, const InitParams &params, InitResult *result)
+{
+ /*
+ * Load the "helper" for this sensor. This tells us all the device specific stuff
+ * that the kernel driver doesn't. We only do this the first time; we don't need
+ * to re-parse the metadata after a simple mode-switch for no reason.
+ */
+ helper_ = std::unique_ptr<RPiController::CamHelper>(RPiController::CamHelper::create(settings.sensorModel));
+ if (!helper_) {
+ LOG(IPARPI, Error) << "Could not create camera helper for "
+ << settings.sensorModel;
+ return -EINVAL;
+ }
+
+ /* Pass out the sensor metadata to the pipeline handler */
+ int sensorMetadata = helper_->sensorEmbeddedDataPresent();
+ result->sensorConfig.sensorMetadata = sensorMetadata;
+
+ /* Load the tuning file for this sensor. */
+ int ret = controller_.read(settings.configurationFile.c_str());
+ if (ret) {
+ LOG(IPARPI, Error)
+ << "Failed to load tuning data file "
+ << settings.configurationFile;
+ return ret;
+ }
+
+ lensPresent_ = params.lensPresent;
+
+ controller_.initialise();
+ helper_->setHwConfig(controller_.getHardwareConfig());
+
+ /* Return the controls handled by the IPA */
+ ControlInfoMap::Map ctrlMap = ipaControls;
+ if (lensPresent_)
+ ctrlMap.merge(ControlInfoMap::Map(ipaAfControls));
+
+ auto platformCtrlsIt = platformControls.find(controller_.getTarget());
+ if (platformCtrlsIt != platformControls.end())
+ ctrlMap.merge(ControlInfoMap::Map(platformCtrlsIt->second));
+
+ monoSensor_ = params.sensorInfo.cfaPattern == properties::draft::ColorFilterArrangementEnum::MONO;
+ if (!monoSensor_)
+ ctrlMap.merge(ControlInfoMap::Map(ipaColourControls));
+
+ result->controlInfo = ControlInfoMap(std::move(ctrlMap), controls::controls);
+
+ return platformInit(params, result);
+}
+
+int32_t IpaBase::configure(const IPACameraSensorInfo &sensorInfo, const ConfigParams &params,
+ ConfigResult *result)
+{
+ sensorCtrls_ = params.sensorControls;
+
+ if (!validateSensorControls()) {
+ LOG(IPARPI, Error) << "Sensor control validation failed.";
+ return -1;
+ }
+
+ if (lensPresent_) {
+ lensCtrls_ = params.lensControls;
+ if (!validateLensControls()) {
+ LOG(IPARPI, Warning) << "Lens validation failed, "
+ << "no lens control will be available.";
+ lensPresent_ = false;
+ }
+ }
+
+ /* Setup a metadata ControlList to output metadata. */
+ libcameraMetadata_ = ControlList(controls::controls);
+
+ /* Re-assemble camera mode using the sensor info. */
+ setMode(sensorInfo);
+
+ mode_.transform = static_cast<libcamera::Transform>(params.transform);
+
+ /* Pass the camera mode to the CamHelper to setup algorithms. */
+ helper_->setCameraMode(mode_);
+
+ /*
+ * Initialise this ControlList correctly, even if empty, in case the IPA is
+ * running is isolation mode (passing the ControlList through the IPC layer).
+ */
+ ControlList ctrls(sensorCtrls_);
+
+ /* The pipeline handler passes out the mode's sensitivity. */
+ result->modeSensitivity = mode_.sensitivity;
+
+ if (firstStart_) {
+ /* Supply initial values for frame durations. */
+ applyFrameDurations(defaultMinFrameDuration, defaultMaxFrameDuration);
+
+ /* Supply initial values for gain and exposure. */
+ AgcStatus agcStatus;
+ agcStatus.exposureTime = defaultExposureTime;
+ agcStatus.analogueGain = defaultAnalogueGain;
+ applyAGC(&agcStatus, ctrls);
+
+ /*
+ * Set the lens to the default (typically hyperfocal) position
+ * on first start.
+ */
+ if (lensPresent_) {
+ RPiController::AfAlgorithm *af =
+ dynamic_cast<RPiController::AfAlgorithm *>(controller_.getAlgorithm("af"));
+
+ if (af) {
+ float defaultPos =
+ ipaAfControls.at(&controls::LensPosition).def().get<float>();
+ ControlList lensCtrl(lensCtrls_);
+ int32_t hwpos;
+
+ af->setLensPosition(defaultPos, &hwpos);
+ lensCtrl.set(V4L2_CID_FOCUS_ABSOLUTE, hwpos);
+ result->lensControls = std::move(lensCtrl);
+ }
+ }
+ }
+
+ result->sensorControls = std::move(ctrls);
+
+ /*
+ * Apply the correct limits to the exposure, gain and frame duration controls
+ * based on the current sensor mode.
+ */
+ ControlInfoMap::Map ctrlMap = ipaControls;
+ ctrlMap[&controls::FrameDurationLimits] =
+ ControlInfo(static_cast<int64_t>(mode_.minFrameDuration.get<std::micro>()),
+ static_cast<int64_t>(mode_.maxFrameDuration.get<std::micro>()));
+
+ ctrlMap[&controls::AnalogueGain] =
+ ControlInfo(static_cast<float>(mode_.minAnalogueGain),
+ static_cast<float>(mode_.maxAnalogueGain));
+
+ ctrlMap[&controls::ExposureTime] =
+ ControlInfo(static_cast<int32_t>(mode_.minExposureTime.get<std::micro>()),
+ static_cast<int32_t>(mode_.maxExposureTime.get<std::micro>()));
+
+ /* Declare colour processing related controls for non-mono sensors. */
+ if (!monoSensor_)
+ ctrlMap.merge(ControlInfoMap::Map(ipaColourControls));
+
+ /* Declare Autofocus controls, only if we have a controllable lens */
+ if (lensPresent_)
+ ctrlMap.merge(ControlInfoMap::Map(ipaAfControls));
+
+ result->controlInfo = ControlInfoMap(std::move(ctrlMap), controls::controls);
+
+ return platformConfigure(params, result);
+}
+
+void IpaBase::start(const ControlList &controls, StartResult *result)
+{
+ RPiController::Metadata metadata;
+
+ if (!controls.empty()) {
+ /* We have been given some controls to action before start. */
+ applyControls(controls);
+ }
+
+ controller_.switchMode(mode_, &metadata);
+
+ /* Reset the frame lengths queue state. */
+ lastTimeout_ = 0s;
+ frameLengths_.clear();
+ frameLengths_.resize(FrameLengthsQueueSize, 0s);
+
+ /* SwitchMode may supply updated exposure/gain values to use. */
+ AgcStatus agcStatus;
+ agcStatus.exposureTime = 0.0s;
+ agcStatus.analogueGain = 0.0;
+
+ metadata.get("agc.status", agcStatus);
+ if (agcStatus.exposureTime && agcStatus.analogueGain) {
+ ControlList ctrls(sensorCtrls_);
+ applyAGC(&agcStatus, ctrls);
+ result->controls = std::move(ctrls);
+ setCameraTimeoutValue();
+ }
+ /* Make a note of this as it tells us the HDR status of the first few frames. */
+ hdrStatus_ = agcStatus.hdr;
+
+ /*
+ * Initialise frame counts, and decide how many frames must be hidden or
+ * "mistrusted", which depends on whether this is a startup from cold,
+ * or merely a mode switch in a running system.
+ */
+ frameCount_ = 0;
+ if (firstStart_) {
+ dropFrameCount_ = helper_->hideFramesStartup();
+ mistrustCount_ = helper_->mistrustFramesStartup();
+
+ /*
+ * Query the AGC/AWB for how many frames they may take to
+ * converge sufficiently. Where these numbers are non-zero
+ * we must allow for the frames with bad statistics
+ * (mistrustCount_) that they won't see. But if zero (i.e.
+ * no convergence necessary), no frames need to be dropped.
+ */
+ unsigned int agcConvergenceFrames = 0;
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (agc) {
+ agcConvergenceFrames = agc->getConvergenceFrames();
+ if (agcConvergenceFrames)
+ agcConvergenceFrames += mistrustCount_;
+ }
+
+ unsigned int awbConvergenceFrames = 0;
+ RPiController::AwbAlgorithm *awb = dynamic_cast<RPiController::AwbAlgorithm *>(
+ controller_.getAlgorithm("awb"));
+ if (awb) {
+ awbConvergenceFrames = awb->getConvergenceFrames();
+ if (awbConvergenceFrames)
+ awbConvergenceFrames += mistrustCount_;
+ }
+
+ dropFrameCount_ = std::max({ dropFrameCount_, agcConvergenceFrames, awbConvergenceFrames });
+ LOG(IPARPI, Debug) << "Drop " << dropFrameCount_ << " frames on startup";
+ } else {
+ dropFrameCount_ = helper_->hideFramesModeSwitch();
+ mistrustCount_ = helper_->mistrustFramesModeSwitch();
+ }
+
+ result->dropFrameCount = dropFrameCount_;
+
+ firstStart_ = false;
+ lastRunTimestamp_ = 0;
+
+ platformStart(controls, result);
+}
+
+void IpaBase::mapBuffers(const std::vector<IPABuffer> &buffers)
+{
+ for (const IPABuffer &buffer : buffers) {
+ const FrameBuffer fb(buffer.planes);
+ buffers_.emplace(buffer.id,
+ MappedFrameBuffer(&fb, MappedFrameBuffer::MapFlag::ReadWrite));
+ }
+}
+
+void IpaBase::unmapBuffers(const std::vector<unsigned int> &ids)
+{
+ for (unsigned int id : ids) {
+ auto it = buffers_.find(id);
+ if (it == buffers_.end())
+ continue;
+
+ buffers_.erase(id);
+ }
+}
+
+void IpaBase::prepareIsp(const PrepareParams &params)
+{
+ applyControls(params.requestControls);
+
+ /*
+ * At start-up, or after a mode-switch, we may want to
+ * avoid running the control algos for a few frames in case
+ * they are "unreliable".
+ */
+ int64_t frameTimestamp = params.sensorControls.get(controls::SensorTimestamp).value_or(0);
+ unsigned int ipaContext = params.ipaContext % rpiMetadata_.size();
+ RPiController::Metadata &rpiMetadata = rpiMetadata_[ipaContext];
+ Span<uint8_t> embeddedBuffer;
+
+ rpiMetadata.clear();
+ fillDeviceStatus(params.sensorControls, ipaContext);
+
+ if (params.buffers.embedded) {
+ /*
+ * Pipeline handler has supplied us with an embedded data buffer,
+ * we must pass it to the CamHelper for parsing.
+ */
+ auto it = buffers_.find(params.buffers.embedded);
+ ASSERT(it != buffers_.end());
+ embeddedBuffer = it->second.planes()[0];
+ }
+
+ /*
+ * AGC wants to know the algorithm status from the time it actioned the
+ * sensor exposure/gain changes. So fetch it from the metadata list
+ * indexed by the IPA cookie returned, and put it in the current frame
+ * metadata.
+ *
+ * Note if the HDR mode has changed, as things like tonemaps may need updating.
+ */
+ AgcStatus agcStatus;
+ bool hdrChange = false;
+ RPiController::Metadata &delayedMetadata = rpiMetadata_[params.delayContext];
+ if (!delayedMetadata.get<AgcStatus>("agc.status", agcStatus)) {
+ rpiMetadata.set("agc.delayed_status", agcStatus);
+ hdrChange = agcStatus.hdr.mode != hdrStatus_.mode;
+ hdrStatus_ = agcStatus.hdr;
+ }
+
+ /*
+ * This may overwrite the DeviceStatus using values from the sensor
+ * metadata, and may also do additional custom processing.
+ */
+ helper_->prepare(embeddedBuffer, rpiMetadata);
+
+ /* Allow a 10% margin on the comparison below. */
+ Duration delta = (frameTimestamp - lastRunTimestamp_) * 1.0ns;
+ if (lastRunTimestamp_ && frameCount_ > dropFrameCount_ &&
+ delta < controllerMinFrameDuration * 0.9 && !hdrChange) {
+ /*
+ * Ensure we merge the previous frame's metadata with the current
+ * frame. This will not overwrite exposure/gain values for the
+ * current frame, or any other bits of metadata that were added
+ * in helper_->Prepare().
+ */
+ RPiController::Metadata &lastMetadata =
+ rpiMetadata_[(ipaContext ? ipaContext : rpiMetadata_.size()) - 1];
+ rpiMetadata.mergeCopy(lastMetadata);
+ processPending_ = false;
+ } else {
+ processPending_ = true;
+ lastRunTimestamp_ = frameTimestamp;
+ }
+
+ /*
+ * If the statistics are inline (i.e. already available with the Bayer
+ * frame), call processStats() now before prepare().
+ */
+ if (controller_.getHardwareConfig().statsInline)
+ processStats({ params.buffers, params.ipaContext });
+
+ /* Do we need/want to call prepare? */
+ if (processPending_) {
+ controller_.prepare(&rpiMetadata);
+ /* Actually prepare the ISP parameters for the frame. */
+ platformPrepareIsp(params, rpiMetadata);
+ }
+
+ frameCount_++;
+
+ /* If the statistics are inline the metadata can be returned early. */
+ if (controller_.getHardwareConfig().statsInline)
+ reportMetadata(ipaContext);
+
+ /* Ready to push the input buffer into the ISP. */
+ prepareIspComplete.emit(params.buffers, stitchSwapBuffers_);
+}
+
+void IpaBase::processStats(const ProcessParams &params)
+{
+ unsigned int ipaContext = params.ipaContext % rpiMetadata_.size();
+
+ if (processPending_ && frameCount_ >= mistrustCount_) {
+ RPiController::Metadata &rpiMetadata = rpiMetadata_[ipaContext];
+
+ auto it = buffers_.find(params.buffers.stats);
+ if (it == buffers_.end()) {
+ LOG(IPARPI, Error) << "Could not find stats buffer!";
+ return;
+ }
+
+ RPiController::StatisticsPtr statistics = platformProcessStats(it->second.planes()[0]);
+
+ /* reportMetadata() will pick this up and set the FocusFoM metadata */
+ rpiMetadata.set("focus.status", statistics->focusRegions);
+
+ helper_->process(statistics, rpiMetadata);
+ controller_.process(statistics, &rpiMetadata);
+
+ struct AgcStatus agcStatus;
+ if (rpiMetadata.get("agc.status", agcStatus) == 0) {
+ ControlList ctrls(sensorCtrls_);
+ applyAGC(&agcStatus, ctrls);
+ setDelayedControls.emit(ctrls, ipaContext);
+ setCameraTimeoutValue();
+ }
+ }
+
+ /*
+ * If the statistics are not inline the metadata must be returned now,
+ * before the processStatsComplete signal.
+ */
+ if (!controller_.getHardwareConfig().statsInline)
+ reportMetadata(ipaContext);
+
+ processStatsComplete.emit(params.buffers);
+}
+
+void IpaBase::setMode(const IPACameraSensorInfo &sensorInfo)
+{
+ mode_.bitdepth = sensorInfo.bitsPerPixel;
+ mode_.width = sensorInfo.outputSize.width;
+ mode_.height = sensorInfo.outputSize.height;
+ mode_.sensorWidth = sensorInfo.activeAreaSize.width;
+ mode_.sensorHeight = sensorInfo.activeAreaSize.height;
+ mode_.cropX = sensorInfo.analogCrop.x;
+ mode_.cropY = sensorInfo.analogCrop.y;
+ mode_.pixelRate = sensorInfo.pixelRate;
+
+ /*
+ * Calculate scaling parameters. The scale_[xy] factors are determined
+ * by the ratio between the crop rectangle size and the output size.
+ */
+ mode_.scaleX = sensorInfo.analogCrop.width / sensorInfo.outputSize.width;
+ mode_.scaleY = sensorInfo.analogCrop.height / sensorInfo.outputSize.height;
+
+ /*
+ * We're not told by the pipeline handler how scaling is split between
+ * binning and digital scaling. For now, as a heuristic, assume that
+ * downscaling up to 2 is achieved through binning, and that any
+ * additional scaling is achieved through digital scaling.
+ *
+ * \todo Get the pipeline handle to provide the full data
+ */
+ mode_.binX = std::min(2, static_cast<int>(mode_.scaleX));
+ mode_.binY = std::min(2, static_cast<int>(mode_.scaleY));
+
+ /* The noise factor is the square root of the total binning factor. */
+ mode_.noiseFactor = std::sqrt(mode_.binX * mode_.binY);
+
+ /*
+ * Calculate the line length as the ratio between the line length in
+ * pixels and the pixel rate.
+ */
+ mode_.minLineLength = sensorInfo.minLineLength * (1.0s / sensorInfo.pixelRate);
+ mode_.maxLineLength = sensorInfo.maxLineLength * (1.0s / sensorInfo.pixelRate);
+
+ /*
+ * Ensure that the maximum pixel processing rate does not exceed the ISP
+ * hardware capabilities. If it does, try adjusting the minimum line
+ * length to compensate if possible.
+ */
+ Duration minPixelTime = controller_.getHardwareConfig().minPixelProcessingTime;
+ Duration pixelTime = mode_.minLineLength / mode_.width;
+ if (minPixelTime && pixelTime < minPixelTime) {
+ Duration adjustedLineLength = minPixelTime * mode_.width;
+ if (adjustedLineLength <= mode_.maxLineLength) {
+ LOG(IPARPI, Info)
+ << "Adjusting mode minimum line length from " << mode_.minLineLength
+ << " to " << adjustedLineLength << " because of ISP constraints.";
+ mode_.minLineLength = adjustedLineLength;
+ } else {
+ LOG(IPARPI, Error)
+ << "Sensor minimum line length of " << pixelTime * mode_.width
+ << " (" << 1us / pixelTime << " MPix/s)"
+ << " is below the minimum allowable ISP limit of "
+ << adjustedLineLength
+ << " (" << 1us / minPixelTime << " MPix/s) ";
+ LOG(IPARPI, Error)
+ << "THIS WILL CAUSE IMAGE CORRUPTION!!! "
+ << "Please update the camera sensor driver to allow more horizontal blanking control.";
+ }
+ }
+
+ /*
+ * Set the frame length limits for the mode to ensure exposure and
+ * framerate calculations are clipped appropriately.
+ */
+ mode_.minFrameLength = sensorInfo.minFrameLength;
+ mode_.maxFrameLength = sensorInfo.maxFrameLength;
+
+ /* Store these for convenience. */
+ mode_.minFrameDuration = mode_.minFrameLength * mode_.minLineLength;
+ mode_.maxFrameDuration = mode_.maxFrameLength * mode_.maxLineLength;
+
+ /*
+ * Some sensors may have different sensitivities in different modes;
+ * the CamHelper will know the correct value.
+ */
+ mode_.sensitivity = helper_->getModeSensitivity(mode_);
+
+ const ControlInfo &gainCtrl = sensorCtrls_.at(V4L2_CID_ANALOGUE_GAIN);
+ const ControlInfo &exposureTimeCtrl = sensorCtrls_.at(V4L2_CID_EXPOSURE);
+
+ mode_.minAnalogueGain = helper_->gain(gainCtrl.min().get<int32_t>());
+ mode_.maxAnalogueGain = helper_->gain(gainCtrl.max().get<int32_t>());
+
+ /*
+ * We need to give the helper the min/max frame durations so it can calculate
+ * the correct exposure limits below.
+ */
+ helper_->setCameraMode(mode_);
+
+ /*
+ * Exposure time is calculated based on the limits of the frame
+ * durations.
+ */
+ mode_.minExposureTime = helper_->exposure(exposureTimeCtrl.min().get<int32_t>(),
+ mode_.minLineLength);
+ mode_.maxExposureTime = Duration::max();
+ helper_->getBlanking(mode_.maxExposureTime, mode_.minFrameDuration,
+ mode_.maxFrameDuration);
+}
+
+void IpaBase::setCameraTimeoutValue()
+{
+ /*
+ * Take the maximum value of the exposure queue as the camera timeout
+ * value to pass back to the pipeline handler. Only signal if it has changed
+ * from the last set value.
+ */
+ auto max = std::max_element(frameLengths_.begin(), frameLengths_.end());
+
+ if (*max != lastTimeout_) {
+ setCameraTimeout.emit(max->get<std::milli>());
+ lastTimeout_ = *max;
+ }
+}
+
+bool IpaBase::validateSensorControls()
+{
+ static const uint32_t ctrls[] = {
+ V4L2_CID_ANALOGUE_GAIN,
+ V4L2_CID_EXPOSURE,
+ V4L2_CID_VBLANK,
+ V4L2_CID_HBLANK,
+ };
+
+ for (auto c : ctrls) {
+ if (sensorCtrls_.find(c) == sensorCtrls_.end()) {
+ LOG(IPARPI, Error) << "Unable to find sensor control "
+ << utils::hex(c);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool IpaBase::validateLensControls()
+{
+ if (lensCtrls_.find(V4L2_CID_FOCUS_ABSOLUTE) == lensCtrls_.end()) {
+ LOG(IPARPI, Error) << "Unable to find Lens control V4L2_CID_FOCUS_ABSOLUTE";
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Converting between enums (used in the libcamera API) and the names that
+ * we use to identify different modes. Unfortunately, the conversion tables
+ * must be kept up-to-date by hand.
+ */
+static const std::map<int32_t, std::string> MeteringModeTable = {
+ { controls::MeteringCentreWeighted, "centre-weighted" },
+ { controls::MeteringSpot, "spot" },
+ { controls::MeteringMatrix, "matrix" },
+ { controls::MeteringCustom, "custom" },
+};
+
+static const std::map<int32_t, std::string> ConstraintModeTable = {
+ { controls::ConstraintNormal, "normal" },
+ { controls::ConstraintHighlight, "highlight" },
+ { controls::ConstraintShadows, "shadows" },
+ { controls::ConstraintCustom, "custom" },
+};
+
+static const std::map<int32_t, std::string> ExposureModeTable = {
+ { controls::ExposureNormal, "normal" },
+ { controls::ExposureShort, "short" },
+ { controls::ExposureLong, "long" },
+ { controls::ExposureCustom, "custom" },
+};
+
+static const std::map<int32_t, std::string> AwbModeTable = {
+ { controls::AwbAuto, "auto" },
+ { controls::AwbIncandescent, "incandescent" },
+ { controls::AwbTungsten, "tungsten" },
+ { controls::AwbFluorescent, "fluorescent" },
+ { controls::AwbIndoor, "indoor" },
+ { controls::AwbDaylight, "daylight" },
+ { controls::AwbCloudy, "cloudy" },
+ { controls::AwbCustom, "custom" },
+};
+
+static const std::map<int32_t, RPiController::AfAlgorithm::AfMode> AfModeTable = {
+ { controls::AfModeManual, RPiController::AfAlgorithm::AfModeManual },
+ { controls::AfModeAuto, RPiController::AfAlgorithm::AfModeAuto },
+ { controls::AfModeContinuous, RPiController::AfAlgorithm::AfModeContinuous },
+};
+
+static const std::map<int32_t, RPiController::AfAlgorithm::AfRange> AfRangeTable = {
+ { controls::AfRangeNormal, RPiController::AfAlgorithm::AfRangeNormal },
+ { controls::AfRangeMacro, RPiController::AfAlgorithm::AfRangeMacro },
+ { controls::AfRangeFull, RPiController::AfAlgorithm::AfRangeFull },
+};
+
+static const std::map<int32_t, RPiController::AfAlgorithm::AfPause> AfPauseTable = {
+ { controls::AfPauseImmediate, RPiController::AfAlgorithm::AfPauseImmediate },
+ { controls::AfPauseDeferred, RPiController::AfAlgorithm::AfPauseDeferred },
+ { controls::AfPauseResume, RPiController::AfAlgorithm::AfPauseResume },
+};
+
+static const std::map<int32_t, std::string> HdrModeTable = {
+ { controls::HdrModeOff, "Off" },
+ { controls::HdrModeMultiExposureUnmerged, "MultiExposureUnmerged" },
+ { controls::HdrModeMultiExposure, "MultiExposure" },
+ { controls::HdrModeSingleExposure, "SingleExposure" },
+ { controls::HdrModeNight, "Night" },
+};
+
+void IpaBase::applyControls(const ControlList &controls)
+{
+ using RPiController::AgcAlgorithm;
+ using RPiController::AfAlgorithm;
+ using RPiController::ContrastAlgorithm;
+ using RPiController::DenoiseAlgorithm;
+ using RPiController::HdrAlgorithm;
+
+ /* Clear the return metadata buffer. */
+ libcameraMetadata_.clear();
+
+ /* Because some AF controls are mode-specific, handle AF mode change first. */
+ if (controls.contains(controls::AF_MODE)) {
+ AfAlgorithm *af = dynamic_cast<AfAlgorithm *>(controller_.getAlgorithm("af"));
+ if (!af) {
+ LOG(IPARPI, Warning)
+ << "Could not set AF_MODE - no AF algorithm";
+ }
+
+ int32_t idx = controls.get(controls::AF_MODE).get<int32_t>();
+ auto mode = AfModeTable.find(idx);
+ if (mode == AfModeTable.end()) {
+ LOG(IPARPI, Error) << "AF mode " << idx
+ << " not recognised";
+ } else if (af)
+ af->setMode(mode->second);
+ }
+
+ /* Iterate over controls */
+ for (auto const &ctrl : controls) {
+ LOG(IPARPI, Debug) << "Request ctrl: "
+ << controls::controls.at(ctrl.first)->name()
+ << " = " << ctrl.second.toString();
+
+ switch (ctrl.first) {
+ case controls::AE_ENABLE: {
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning)
+ << "Could not set AE_ENABLE - no AGC algorithm";
+ break;
+ }
+
+ if (ctrl.second.get<bool>() == false)
+ agc->disableAuto();
+ else
+ agc->enableAuto();
+
+ libcameraMetadata_.set(controls::AeEnable, ctrl.second.get<bool>());
+ break;
+ }
+
+ case controls::EXPOSURE_TIME: {
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning)
+ << "Could not set EXPOSURE_TIME - no AGC algorithm";
+ break;
+ }
+
+ /* The control provides units of microseconds. */
+ agc->setFixedExposureTime(0, ctrl.second.get<int32_t>() * 1.0us);
+
+ libcameraMetadata_.set(controls::ExposureTime, ctrl.second.get<int32_t>());
+ break;
+ }
+
+ case controls::ANALOGUE_GAIN: {
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning)
+ << "Could not set ANALOGUE_GAIN - no AGC algorithm";
+ break;
+ }
+
+ agc->setFixedAnalogueGain(0, ctrl.second.get<float>());
+
+ libcameraMetadata_.set(controls::AnalogueGain,
+ ctrl.second.get<float>());
+ break;
+ }
+
+ case controls::AE_METERING_MODE: {
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning)
+ << "Could not set AE_METERING_MODE - no AGC algorithm";
+ break;
+ }
+
+ int32_t idx = ctrl.second.get<int32_t>();
+ if (MeteringModeTable.count(idx)) {
+ agc->setMeteringMode(MeteringModeTable.at(idx));
+ libcameraMetadata_.set(controls::AeMeteringMode, idx);
+ } else {
+ LOG(IPARPI, Error) << "Metering mode " << idx
+ << " not recognised";
+ }
+ break;
+ }
+
+ case controls::AE_CONSTRAINT_MODE: {
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning)
+ << "Could not set AE_CONSTRAINT_MODE - no AGC algorithm";
+ break;
+ }
+
+ int32_t idx = ctrl.second.get<int32_t>();
+ if (ConstraintModeTable.count(idx)) {
+ agc->setConstraintMode(ConstraintModeTable.at(idx));
+ libcameraMetadata_.set(controls::AeConstraintMode, idx);
+ } else {
+ LOG(IPARPI, Error) << "Constraint mode " << idx
+ << " not recognised";
+ }
+ break;
+ }
+
+ case controls::AE_EXPOSURE_MODE: {
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning)
+ << "Could not set AE_EXPOSURE_MODE - no AGC algorithm";
+ break;
+ }
+
+ int32_t idx = ctrl.second.get<int32_t>();
+ if (ExposureModeTable.count(idx)) {
+ agc->setExposureMode(ExposureModeTable.at(idx));
+ libcameraMetadata_.set(controls::AeExposureMode, idx);
+ } else {
+ LOG(IPARPI, Error) << "Exposure mode " << idx
+ << " not recognised";
+ }
+ break;
+ }
+
+ case controls::EXPOSURE_VALUE: {
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning)
+ << "Could not set EXPOSURE_VALUE - no AGC algorithm";
+ break;
+ }
+
+ /*
+ * The SetEv() function takes in a direct exposure multiplier.
+ * So convert to 2^EV
+ */
+ double ev = pow(2.0, ctrl.second.get<float>());
+ agc->setEv(0, ev);
+ libcameraMetadata_.set(controls::ExposureValue,
+ ctrl.second.get<float>());
+ break;
+ }
+
+ case controls::AE_FLICKER_MODE: {
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning)
+ << "Could not set AeFlickerMode - no AGC algorithm";
+ break;
+ }
+
+ int32_t mode = ctrl.second.get<int32_t>();
+ bool modeValid = true;
+
+ switch (mode) {
+ case controls::FlickerOff:
+ agc->setFlickerPeriod(0us);
+
+ break;
+
+ case controls::FlickerManual:
+ agc->setFlickerPeriod(flickerState_.manualPeriod);
+
+ break;
+
+ default:
+ LOG(IPARPI, Error) << "Flicker mode " << mode << " is not supported";
+ modeValid = false;
+
+ break;
+ }
+
+ if (modeValid)
+ flickerState_.mode = mode;
+
+ break;
+ }
+
+ case controls::AE_FLICKER_PERIOD: {
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning)
+ << "Could not set AeFlickerPeriod - no AGC algorithm";
+ break;
+ }
+
+ uint32_t manualPeriod = ctrl.second.get<int32_t>();
+ flickerState_.manualPeriod = manualPeriod * 1.0us;
+
+ /*
+ * We note that it makes no difference if the mode gets set to "manual"
+ * first, and the period updated after, or vice versa.
+ */
+ if (flickerState_.mode == controls::FlickerManual)
+ agc->setFlickerPeriod(flickerState_.manualPeriod);
+
+ break;
+ }
+
+ case controls::AWB_ENABLE: {
+ /* Silently ignore this control for a mono sensor. */
+ if (monoSensor_)
+ break;
+
+ RPiController::AwbAlgorithm *awb = dynamic_cast<RPiController::AwbAlgorithm *>(
+ controller_.getAlgorithm("awb"));
+ if (!awb) {
+ LOG(IPARPI, Warning)
+ << "Could not set AWB_ENABLE - no AWB algorithm";
+ break;
+ }
+
+ if (ctrl.second.get<bool>() == false)
+ awb->disableAuto();
+ else
+ awb->enableAuto();
+
+ libcameraMetadata_.set(controls::AwbEnable,
+ ctrl.second.get<bool>());
+ break;
+ }
+
+ case controls::AWB_MODE: {
+ /* Silently ignore this control for a mono sensor. */
+ if (monoSensor_)
+ break;
+
+ RPiController::AwbAlgorithm *awb = dynamic_cast<RPiController::AwbAlgorithm *>(
+ controller_.getAlgorithm("awb"));
+ if (!awb) {
+ LOG(IPARPI, Warning)
+ << "Could not set AWB_MODE - no AWB algorithm";
+ break;
+ }
+
+ int32_t idx = ctrl.second.get<int32_t>();
+ if (AwbModeTable.count(idx)) {
+ awb->setMode(AwbModeTable.at(idx));
+ libcameraMetadata_.set(controls::AwbMode, idx);
+ } else {
+ LOG(IPARPI, Error) << "AWB mode " << idx
+ << " not recognised";
+ }
+ break;
+ }
+
+ case controls::COLOUR_GAINS: {
+ /* Silently ignore this control for a mono sensor. */
+ if (monoSensor_)
+ break;
+
+ auto gains = ctrl.second.get<Span<const float>>();
+ RPiController::AwbAlgorithm *awb = dynamic_cast<RPiController::AwbAlgorithm *>(
+ controller_.getAlgorithm("awb"));
+ if (!awb) {
+ LOG(IPARPI, Warning)
+ << "Could not set COLOUR_GAINS - no AWB algorithm";
+ break;
+ }
+
+ awb->setManualGains(gains[0], gains[1]);
+ if (gains[0] != 0.0f && gains[1] != 0.0f)
+ /* A gain of 0.0f will switch back to auto mode. */
+ libcameraMetadata_.set(controls::ColourGains,
+ { gains[0], gains[1] });
+ break;
+ }
+
+ case controls::COLOUR_TEMPERATURE: {
+ /* Silently ignore this control for a mono sensor. */
+ if (monoSensor_)
+ break;
+
+ auto temperatureK = ctrl.second.get<int32_t>();
+ RPiController::AwbAlgorithm *awb = dynamic_cast<RPiController::AwbAlgorithm *>(
+ controller_.getAlgorithm("awb"));
+ if (!awb) {
+ LOG(IPARPI, Warning)
+ << "Could not set COLOUR_TEMPERATURE - no AWB algorithm";
+ break;
+ }
+
+ awb->setColourTemperature(temperatureK);
+ /* This metadata will get reported back automatically. */
+ break;
+ }
+
+ case controls::BRIGHTNESS: {
+ RPiController::ContrastAlgorithm *contrast = dynamic_cast<RPiController::ContrastAlgorithm *>(
+ controller_.getAlgorithm("contrast"));
+ if (!contrast) {
+ LOG(IPARPI, Warning)
+ << "Could not set BRIGHTNESS - no contrast algorithm";
+ break;
+ }
+
+ contrast->setBrightness(ctrl.second.get<float>() * 65536);
+ libcameraMetadata_.set(controls::Brightness,
+ ctrl.second.get<float>());
+ break;
+ }
+
+ case controls::CONTRAST: {
+ RPiController::ContrastAlgorithm *contrast = dynamic_cast<RPiController::ContrastAlgorithm *>(
+ controller_.getAlgorithm("contrast"));
+ if (!contrast) {
+ LOG(IPARPI, Warning)
+ << "Could not set CONTRAST - no contrast algorithm";
+ break;
+ }
+
+ contrast->setContrast(ctrl.second.get<float>());
+ libcameraMetadata_.set(controls::Contrast,
+ ctrl.second.get<float>());
+ break;
+ }
+
+ case controls::SATURATION: {
+ /* Silently ignore this control for a mono sensor. */
+ if (monoSensor_)
+ break;
+
+ RPiController::CcmAlgorithm *ccm = dynamic_cast<RPiController::CcmAlgorithm *>(
+ controller_.getAlgorithm("ccm"));
+ if (!ccm) {
+ LOG(IPARPI, Warning)
+ << "Could not set SATURATION - no ccm algorithm";
+ break;
+ }
+
+ ccm->setSaturation(ctrl.second.get<float>());
+ libcameraMetadata_.set(controls::Saturation,
+ ctrl.second.get<float>());
+ break;
+ }
+
+ case controls::SHARPNESS: {
+ RPiController::SharpenAlgorithm *sharpen = dynamic_cast<RPiController::SharpenAlgorithm *>(
+ controller_.getAlgorithm("sharpen"));
+ if (!sharpen) {
+ LOG(IPARPI, Warning)
+ << "Could not set SHARPNESS - no sharpen algorithm";
+ break;
+ }
+
+ sharpen->setStrength(ctrl.second.get<float>());
+ libcameraMetadata_.set(controls::Sharpness,
+ ctrl.second.get<float>());
+ break;
+ }
+
+ case controls::rpi::SCALER_CROPS:
+ case controls::SCALER_CROP: {
+ /* We do nothing with this, but should avoid the warning below. */
+ break;
+ }
+
+ case controls::FRAME_DURATION_LIMITS: {
+ auto frameDurations = ctrl.second.get<Span<const int64_t>>();
+ applyFrameDurations(frameDurations[0] * 1.0us, frameDurations[1] * 1.0us);
+ break;
+ }
+
+ case controls::draft::NOISE_REDUCTION_MODE:
+ /* Handled below in handleControls() */
+ libcameraMetadata_.set(controls::draft::NoiseReductionMode,
+ ctrl.second.get<int32_t>());
+ break;
+
+ case controls::AF_MODE:
+ break; /* We already handled this one above */
+
+ case controls::AF_RANGE: {
+ AfAlgorithm *af = dynamic_cast<AfAlgorithm *>(controller_.getAlgorithm("af"));
+ if (!af) {
+ LOG(IPARPI, Warning)
+ << "Could not set AF_RANGE - no focus algorithm";
+ break;
+ }
+
+ auto range = AfRangeTable.find(ctrl.second.get<int32_t>());
+ if (range == AfRangeTable.end()) {
+ LOG(IPARPI, Error) << "AF range " << ctrl.second.get<int32_t>()
+ << " not recognised";
+ break;
+ }
+ af->setRange(range->second);
+ break;
+ }
+
+ case controls::AF_SPEED: {
+ AfAlgorithm *af = dynamic_cast<AfAlgorithm *>(controller_.getAlgorithm("af"));
+ if (!af) {
+ LOG(IPARPI, Warning)
+ << "Could not set AF_SPEED - no focus algorithm";
+ break;
+ }
+
+ AfAlgorithm::AfSpeed speed = ctrl.second.get<int32_t>() == controls::AfSpeedFast ?
+ AfAlgorithm::AfSpeedFast : AfAlgorithm::AfSpeedNormal;
+ af->setSpeed(speed);
+ break;
+ }
+
+ case controls::AF_METERING: {
+ AfAlgorithm *af = dynamic_cast<AfAlgorithm *>(controller_.getAlgorithm("af"));
+ if (!af) {
+ LOG(IPARPI, Warning)
+ << "Could not set AF_METERING - no AF algorithm";
+ break;
+ }
+ af->setMetering(ctrl.second.get<int32_t>() == controls::AfMeteringWindows);
+ break;
+ }
+
+ case controls::AF_WINDOWS: {
+ AfAlgorithm *af = dynamic_cast<AfAlgorithm *>(controller_.getAlgorithm("af"));
+ if (!af) {
+ LOG(IPARPI, Warning)
+ << "Could not set AF_WINDOWS - no AF algorithm";
+ break;
+ }
+ af->setWindows(ctrl.second.get<Span<const Rectangle>>());
+ break;
+ }
+
+ case controls::AF_PAUSE: {
+ AfAlgorithm *af = dynamic_cast<AfAlgorithm *>(controller_.getAlgorithm("af"));
+ if (!af || af->getMode() != AfAlgorithm::AfModeContinuous) {
+ LOG(IPARPI, Warning)
+ << "Could not set AF_PAUSE - no AF algorithm or not Continuous";
+ break;
+ }
+ auto pause = AfPauseTable.find(ctrl.second.get<int32_t>());
+ if (pause == AfPauseTable.end()) {
+ LOG(IPARPI, Error) << "AF pause " << ctrl.second.get<int32_t>()
+ << " not recognised";
+ break;
+ }
+ af->pause(pause->second);
+ break;
+ }
+
+ case controls::AF_TRIGGER: {
+ AfAlgorithm *af = dynamic_cast<AfAlgorithm *>(controller_.getAlgorithm("af"));
+ if (!af || af->getMode() != AfAlgorithm::AfModeAuto) {
+ LOG(IPARPI, Warning)
+ << "Could not set AF_TRIGGER - no AF algorithm or not Auto";
+ break;
+ } else {
+ if (ctrl.second.get<int32_t>() == controls::AfTriggerStart)
+ af->triggerScan();
+ else
+ af->cancelScan();
+ }
+ break;
+ }
+
+ case controls::LENS_POSITION: {
+ AfAlgorithm *af = dynamic_cast<AfAlgorithm *>(controller_.getAlgorithm("af"));
+ if (af) {
+ int32_t hwpos;
+ if (af->setLensPosition(ctrl.second.get<float>(), &hwpos)) {
+ ControlList lensCtrls(lensCtrls_);
+ lensCtrls.set(V4L2_CID_FOCUS_ABSOLUTE, hwpos);
+ setLensControls.emit(lensCtrls);
+ }
+ } else {
+ LOG(IPARPI, Warning)
+ << "Could not set LENS_POSITION - no AF algorithm";
+ }
+ break;
+ }
+
+ case controls::HDR_MODE: {
+ HdrAlgorithm *hdr = dynamic_cast<HdrAlgorithm *>(controller_.getAlgorithm("hdr"));
+ if (!hdr) {
+ LOG(IPARPI, Warning) << "No HDR algorithm available";
+ break;
+ }
+
+ auto mode = HdrModeTable.find(ctrl.second.get<int32_t>());
+ if (mode == HdrModeTable.end()) {
+ LOG(IPARPI, Warning) << "Unrecognised HDR mode";
+ break;
+ }
+
+ AgcAlgorithm *agc = dynamic_cast<AgcAlgorithm *>(controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Warning) << "HDR requires an AGC algorithm";
+ break;
+ }
+
+ if (hdr->setMode(mode->second) == 0) {
+ agc->setActiveChannels(hdr->getChannels());
+
+ /* We also disable adpative contrast enhancement if HDR is running. */
+ ContrastAlgorithm *contrast =
+ dynamic_cast<ContrastAlgorithm *>(controller_.getAlgorithm("contrast"));
+ if (contrast) {
+ if (mode->second == "Off")
+ contrast->restoreCe();
+ else
+ contrast->enableCe(false);
+ }
+
+ DenoiseAlgorithm *denoise =
+ dynamic_cast<DenoiseAlgorithm *>(controller_.getAlgorithm("denoise"));
+ if (denoise) {
+ /* \todo - make the HDR mode say what denoise it wants? */
+ if (mode->second == "Night")
+ denoise->setConfig("night");
+ else if (mode->second == "SingleExposure")
+ denoise->setConfig("hdr");
+ /* MultiExposure doesn't need extra extra denoise. */
+ else
+ denoise->setConfig("normal");
+ }
+ } else
+ LOG(IPARPI, Warning)
+ << "HDR mode " << mode->second << " not supported";
+
+ break;
+ }
+
+ case controls::rpi::STATS_OUTPUT_ENABLE:
+ statsMetadataOutput_ = ctrl.second.get<bool>();
+ break;
+
+ default:
+ LOG(IPARPI, Warning)
+ << "Ctrl " << controls::controls.at(ctrl.first)->name()
+ << " is not handled.";
+ break;
+ }
+ }
+
+ /* Give derived classes a chance to examine the new controls. */
+ handleControls(controls);
+}
+
+void IpaBase::fillDeviceStatus(const ControlList &sensorControls, unsigned int ipaContext)
+{
+ DeviceStatus deviceStatus = {};
+
+ int32_t exposureLines = sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>();
+ int32_t gainCode = sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>();
+ int32_t vblank = sensorControls.get(V4L2_CID_VBLANK).get<int32_t>();
+ int32_t hblank = sensorControls.get(V4L2_CID_HBLANK).get<int32_t>();
+
+ deviceStatus.lineLength = helper_->hblankToLineLength(hblank);
+ deviceStatus.exposureTime = helper_->exposure(exposureLines, deviceStatus.lineLength);
+ deviceStatus.analogueGain = helper_->gain(gainCode);
+ deviceStatus.frameLength = mode_.height + vblank;
+
+ RPiController::AfAlgorithm *af = dynamic_cast<RPiController::AfAlgorithm *>(
+ controller_.getAlgorithm("af"));
+ if (af)
+ deviceStatus.lensPosition = af->getLensPosition();
+
+ LOG(IPARPI, Debug) << "Metadata - " << deviceStatus;
+
+ rpiMetadata_[ipaContext].set("device.status", deviceStatus);
+}
+
+void IpaBase::reportMetadata(unsigned int ipaContext)
+{
+ RPiController::Metadata &rpiMetadata = rpiMetadata_[ipaContext];
+ std::unique_lock<RPiController::Metadata> lock(rpiMetadata);
+
+ /*
+ * Certain information about the current frame and how it will be
+ * processed can be extracted and placed into the libcamera metadata
+ * buffer, where an application could query it.
+ */
+ DeviceStatus *deviceStatus = rpiMetadata.getLocked<DeviceStatus>("device.status");
+ if (deviceStatus) {
+ libcameraMetadata_.set(controls::ExposureTime,
+ deviceStatus->exposureTime.get<std::micro>());
+ libcameraMetadata_.set(controls::AnalogueGain, deviceStatus->analogueGain);
+ libcameraMetadata_.set(controls::FrameDuration,
+ helper_->exposure(deviceStatus->frameLength, deviceStatus->lineLength).get<std::micro>());
+ if (deviceStatus->sensorTemperature)
+ libcameraMetadata_.set(controls::SensorTemperature, *deviceStatus->sensorTemperature);
+ if (deviceStatus->lensPosition)
+ libcameraMetadata_.set(controls::LensPosition, *deviceStatus->lensPosition);
+ }
+
+ AgcPrepareStatus *agcPrepareStatus = rpiMetadata.getLocked<AgcPrepareStatus>("agc.prepare_status");
+ if (agcPrepareStatus) {
+ libcameraMetadata_.set(controls::AeLocked, agcPrepareStatus->locked);
+ libcameraMetadata_.set(controls::DigitalGain, agcPrepareStatus->digitalGain);
+ }
+
+ LuxStatus *luxStatus = rpiMetadata.getLocked<LuxStatus>("lux.status");
+ if (luxStatus)
+ libcameraMetadata_.set(controls::Lux, luxStatus->lux);
+
+ AwbStatus *awbStatus = rpiMetadata.getLocked<AwbStatus>("awb.status");
+ if (awbStatus) {
+ libcameraMetadata_.set(controls::ColourGains, { static_cast<float>(awbStatus->gainR),
+ static_cast<float>(awbStatus->gainB) });
+ libcameraMetadata_.set(controls::ColourTemperature, awbStatus->temperatureK);
+ }
+
+ BlackLevelStatus *blackLevelStatus = rpiMetadata.getLocked<BlackLevelStatus>("black_level.status");
+ if (blackLevelStatus)
+ libcameraMetadata_.set(controls::SensorBlackLevels,
+ { static_cast<int32_t>(blackLevelStatus->blackLevelR),
+ static_cast<int32_t>(blackLevelStatus->blackLevelG),
+ static_cast<int32_t>(blackLevelStatus->blackLevelG),
+ static_cast<int32_t>(blackLevelStatus->blackLevelB) });
+
+ RPiController::FocusRegions *focusStatus =
+ rpiMetadata.getLocked<RPiController::FocusRegions>("focus.status");
+ if (focusStatus) {
+ /*
+ * Calculate the average FoM over the central (symmetric) positions
+ * to give an overall scene FoM. This can change later if it is
+ * not deemed suitable.
+ */
+ libcamera::Size size = focusStatus->size();
+ unsigned rows = size.height;
+ unsigned cols = size.width;
+
+ uint64_t sum = 0;
+ unsigned int numRegions = 0;
+ for (unsigned r = rows / 3; r < rows - rows / 3; ++r) {
+ for (unsigned c = cols / 4; c < cols - cols / 4; ++c) {
+ sum += focusStatus->get({ (int)c, (int)r }).val;
+ numRegions++;
+ }
+ }
+
+ uint32_t focusFoM = sum / numRegions;
+ libcameraMetadata_.set(controls::FocusFoM, focusFoM);
+ }
+
+ CcmStatus *ccmStatus = rpiMetadata.getLocked<CcmStatus>("ccm.status");
+ if (ccmStatus) {
+ float m[9];
+ for (unsigned int i = 0; i < 9; i++)
+ m[i] = ccmStatus->matrix[i];
+ libcameraMetadata_.set(controls::ColourCorrectionMatrix, m);
+ }
+
+ const AfStatus *afStatus = rpiMetadata.getLocked<AfStatus>("af.status");
+ if (afStatus) {
+ int32_t s, p;
+ switch (afStatus->state) {
+ case AfState::Scanning:
+ s = controls::AfStateScanning;
+ break;
+ case AfState::Focused:
+ s = controls::AfStateFocused;
+ break;
+ case AfState::Failed:
+ s = controls::AfStateFailed;
+ break;
+ default:
+ s = controls::AfStateIdle;
+ }
+ switch (afStatus->pauseState) {
+ case AfPauseState::Pausing:
+ p = controls::AfPauseStatePausing;
+ break;
+ case AfPauseState::Paused:
+ p = controls::AfPauseStatePaused;
+ break;
+ default:
+ p = controls::AfPauseStateRunning;
+ }
+ libcameraMetadata_.set(controls::AfState, s);
+ libcameraMetadata_.set(controls::AfPauseState, p);
+ }
+
+ /*
+ * THe HDR algorithm sets the HDR channel into the agc.status at the time that those
+ * AGC parameters were calculated several frames ago, so it comes back to us now in
+ * the delayed_status. If this frame is too soon after a mode switch for the
+ * delayed_status to be available, we use the HDR status that came out of the
+ * switchMode call.
+ */
+ const AgcStatus *agcStatus = rpiMetadata.getLocked<AgcStatus>("agc.delayed_status");
+ const HdrStatus &hdrStatus = agcStatus ? agcStatus->hdr : hdrStatus_;
+ if (!hdrStatus.mode.empty() && hdrStatus.mode != "Off") {
+ int32_t hdrMode = controls::HdrModeOff;
+ for (auto const &[mode, name] : HdrModeTable) {
+ if (hdrStatus.mode == name) {
+ hdrMode = mode;
+ break;
+ }
+ }
+ libcameraMetadata_.set(controls::HdrMode, hdrMode);
+
+ if (hdrStatus.channel == "short")
+ libcameraMetadata_.set(controls::HdrChannel, controls::HdrChannelShort);
+ else if (hdrStatus.channel == "long")
+ libcameraMetadata_.set(controls::HdrChannel, controls::HdrChannelLong);
+ else if (hdrStatus.channel == "medium")
+ libcameraMetadata_.set(controls::HdrChannel, controls::HdrChannelMedium);
+ else
+ libcameraMetadata_.set(controls::HdrChannel, controls::HdrChannelNone);
+ }
+
+ metadataReady.emit(libcameraMetadata_);
+}
+
+void IpaBase::applyFrameDurations(Duration minFrameDuration, Duration maxFrameDuration)
+{
+ /*
+ * This will only be applied once AGC recalculations occur.
+ * The values may be clamped based on the sensor mode capabilities as well.
+ */
+ minFrameDuration_ = minFrameDuration ? minFrameDuration : defaultMinFrameDuration;
+ maxFrameDuration_ = maxFrameDuration ? maxFrameDuration : defaultMaxFrameDuration;
+ minFrameDuration_ = std::clamp(minFrameDuration_,
+ mode_.minFrameDuration, mode_.maxFrameDuration);
+ maxFrameDuration_ = std::clamp(maxFrameDuration_,
+ mode_.minFrameDuration, mode_.maxFrameDuration);
+ maxFrameDuration_ = std::max(maxFrameDuration_, minFrameDuration_);
+
+ /* Return the validated limits via metadata. */
+ libcameraMetadata_.set(controls::FrameDurationLimits,
+ { static_cast<int64_t>(minFrameDuration_.get<std::micro>()),
+ static_cast<int64_t>(maxFrameDuration_.get<std::micro>()) });
+
+ /*
+ * Calculate the maximum exposure time possible for the AGC to use.
+ * getBlanking() will update maxExposureTime with the largest exposure
+ * value possible.
+ */
+ Duration maxExposureTime = Duration::max();
+ helper_->getBlanking(maxExposureTime, minFrameDuration_, maxFrameDuration_);
+
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ agc->setMaxExposureTime(maxExposureTime);
+}
+
+void IpaBase::applyAGC(const struct AgcStatus *agcStatus, ControlList &ctrls)
+{
+ const int32_t minGainCode = helper_->gainCode(mode_.minAnalogueGain);
+ const int32_t maxGainCode = helper_->gainCode(mode_.maxAnalogueGain);
+ int32_t gainCode = helper_->gainCode(agcStatus->analogueGain);
+
+ /*
+ * Ensure anything larger than the max gain code will not be passed to
+ * DelayedControls. The AGC will correctly handle a lower gain returned
+ * by the sensor, provided it knows the actual gain used.
+ */
+ gainCode = std::clamp<int32_t>(gainCode, minGainCode, maxGainCode);
+
+ /* getBlanking might clip exposure time to the fps limits. */
+ Duration exposure = agcStatus->exposureTime;
+ auto [vblank, hblank] = helper_->getBlanking(exposure, minFrameDuration_, maxFrameDuration_);
+ int32_t exposureLines = helper_->exposureLines(exposure,
+ helper_->hblankToLineLength(hblank));
+
+ LOG(IPARPI, Debug) << "Applying AGC Exposure: " << exposure
+ << " (Exposure lines: " << exposureLines << ", AGC requested "
+ << agcStatus->exposureTime << ") Gain: "
+ << agcStatus->analogueGain << " (Gain Code: "
+ << gainCode << ")";
+
+ ctrls.set(V4L2_CID_VBLANK, static_cast<int32_t>(vblank));
+ ctrls.set(V4L2_CID_EXPOSURE, exposureLines);
+ ctrls.set(V4L2_CID_ANALOGUE_GAIN, gainCode);
+
+ /*
+ * At present, there is no way of knowing if a control is read-only.
+ * As a workaround, assume that if the minimum and maximum values of
+ * the V4L2_CID_HBLANK control are the same, it implies the control
+ * is read-only. This seems to be the case for all the cameras our IPA
+ * works with.
+ *
+ * \todo The control API ought to have a flag to specify if a control
+ * is read-only which could be used below.
+ */
+ if (mode_.minLineLength != mode_.maxLineLength)
+ ctrls.set(V4L2_CID_HBLANK, static_cast<int32_t>(hblank));
+
+ /*
+ * Store the frame length times in a circular queue, up-to FrameLengthsQueueSize
+ * elements. This will be used to advertise a camera timeout value to the
+ * pipeline handler.
+ */
+ frameLengths_.pop_front();
+ frameLengths_.push_back(helper_->exposure(vblank + mode_.height,
+ helper_->hblankToLineLength(hblank)));
+}
+
+} /* namespace ipa::RPi */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rpi/common/ipa_base.h b/src/ipa/rpi/common/ipa_base.h
new file mode 100644
index 00000000..1a811beb
--- /dev/null
+++ b/src/ipa/rpi/common/ipa_base.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023, Raspberry Pi Ltd
+ *
+ * Raspberry Pi IPA base class
+ */
+#pragma once
+
+#include <array>
+#include <deque>
+#include <map>
+#include <stdint.h>
+
+#include <libcamera/base/utils.h>
+#include <libcamera/controls.h>
+
+#include <libcamera/ipa/raspberrypi_ipa_interface.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include "cam_helper/cam_helper.h"
+#include "controller/agc_status.h"
+#include "controller/camera_mode.h"
+#include "controller/controller.h"
+#include "controller/hdr_status.h"
+#include "controller/metadata.h"
+
+namespace libcamera {
+
+namespace ipa::RPi {
+
+class IpaBase : public IPARPiInterface
+{
+public:
+ IpaBase();
+ ~IpaBase();
+
+ int32_t init(const IPASettings &settings, const InitParams &params, InitResult *result) override;
+ int32_t configure(const IPACameraSensorInfo &sensorInfo, const ConfigParams &params,
+ ConfigResult *result) override;
+
+ void start(const ControlList &controls, StartResult *result) override;
+ void stop() override {}
+
+ void mapBuffers(const std::vector<IPABuffer> &buffers) override;
+ void unmapBuffers(const std::vector<unsigned int> &ids) override;
+
+ void prepareIsp(const PrepareParams &params) override;
+ void processStats(const ProcessParams &params) override;
+
+protected:
+ bool monoSensor() const
+ {
+ return monoSensor_;
+ }
+
+ /* Raspberry Pi controller specific defines. */
+ std::unique_ptr<RPiController::CamHelper> helper_;
+ RPiController::Controller controller_;
+
+ ControlInfoMap sensorCtrls_;
+ ControlInfoMap lensCtrls_;
+
+ /* Camera sensor params. */
+ CameraMode mode_;
+
+ /* Track the frame length times over FrameLengthsQueueSize frames. */
+ std::deque<utils::Duration> frameLengths_;
+ utils::Duration lastTimeout_;
+ ControlList libcameraMetadata_;
+ bool statsMetadataOutput_;
+
+ /* Remember the HDR status after a mode switch. */
+ HdrStatus hdrStatus_;
+
+ /* Whether the stitch block (if available) needs to swap buffers. */
+ bool stitchSwapBuffers_;
+
+private:
+ /* Number of metadata objects available in the context list. */
+ static constexpr unsigned int numMetadataContexts = 16;
+
+ virtual int32_t platformInit(const InitParams &params, InitResult *result) = 0;
+ virtual int32_t platformStart(const ControlList &controls, StartResult *result) = 0;
+ virtual int32_t platformConfigure(const ConfigParams &params, ConfigResult *result) = 0;
+
+ virtual void platformPrepareIsp(const PrepareParams &params,
+ RPiController::Metadata &rpiMetadata) = 0;
+ virtual RPiController::StatisticsPtr platformProcessStats(Span<uint8_t> mem) = 0;
+
+ void setMode(const IPACameraSensorInfo &sensorInfo);
+ void setCameraTimeoutValue();
+ bool validateSensorControls();
+ bool validateLensControls();
+ void applyControls(const ControlList &controls);
+ virtual void handleControls(const ControlList &controls) = 0;
+ void fillDeviceStatus(const ControlList &sensorControls, unsigned int ipaContext);
+ void reportMetadata(unsigned int ipaContext);
+ void applyFrameDurations(utils::Duration minFrameDuration, utils::Duration maxFrameDuration);
+ void applyAGC(const struct AgcStatus *agcStatus, ControlList &ctrls);
+
+ std::map<unsigned int, MappedFrameBuffer> buffers_;
+
+ bool lensPresent_;
+ bool monoSensor_;
+
+ std::array<RPiController::Metadata, numMetadataContexts> rpiMetadata_;
+
+ /*
+ * We count frames to decide if the frame must be hidden (e.g. from
+ * display) or mistrusted (i.e. not given to the control algos).
+ */
+ uint64_t frameCount_;
+
+ /* How many frames we should avoid running control algos on. */
+ unsigned int mistrustCount_;
+
+ /* Number of frames that need to be dropped on startup. */
+ unsigned int dropFrameCount_;
+
+ /* Frame timestamp for the last run of the controller. */
+ uint64_t lastRunTimestamp_;
+
+ /* Do we run a Controller::process() for this frame? */
+ bool processPending_;
+
+ /* Distinguish the first camera start from others. */
+ bool firstStart_;
+
+ /* Frame duration (1/fps) limits. */
+ utils::Duration minFrameDuration_;
+ utils::Duration maxFrameDuration_;
+
+ /* The current state of flicker avoidance. */
+ struct FlickerState {
+ int32_t mode;
+ utils::Duration manualPeriod;
+ } flickerState_;
+};
+
+} /* namespace ipa::RPi */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rpi/common/meson.build b/src/ipa/rpi/common/meson.build
new file mode 100644
index 00000000..73d2ee73
--- /dev/null
+++ b/src/ipa/rpi/common/meson.build
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: CC0-1.0
+
+rpi_ipa_common_sources = files([
+ 'ipa_base.cpp',
+])
+
+rpi_ipa_common_includes = [
+ include_directories('..'),
+]
+
+rpi_ipa_common_deps = [
+ libcamera_private,
+]
+
+rpi_ipa_common_lib = static_library('rpi_ipa_common', rpi_ipa_common_sources,
+ include_directories : rpi_ipa_common_includes,
+ dependencies : rpi_ipa_common_deps)
diff --git a/src/ipa/rpi/controller/af_algorithm.h b/src/ipa/rpi/controller/af_algorithm.h
new file mode 100644
index 00000000..ad9b5754
--- /dev/null
+++ b/src/ipa/rpi/controller/af_algorithm.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Raspberry Pi Ltd
+ *
+ * af_algorithm.hpp - auto focus algorithm interface
+ */
+#pragma once
+
+#include <optional>
+
+#include <libcamera/base/span.h>
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class AfAlgorithm : public Algorithm
+{
+public:
+ AfAlgorithm(Controller *controller)
+ : Algorithm(controller) {}
+
+ /*
+ * An autofocus algorithm should provide the following calls.
+ *
+ * Where a ControlList combines a change of AfMode with other AF
+ * controls, setMode() should be called first, to ensure the
+ * algorithm will be in the correct state to handle controls.
+ *
+ * setLensPosition() returns true if the mode was AfModeManual and
+ * the lens position has changed, otherwise returns false. When it
+ * returns true, hwpos should be sent immediately to the lens driver.
+ *
+ * getMode() is provided mainly for validating controls.
+ * getLensPosition() is provided for populating DeviceStatus.
+ */
+
+ enum AfRange { AfRangeNormal = 0,
+ AfRangeMacro,
+ AfRangeFull,
+ AfRangeMax };
+
+ enum AfSpeed { AfSpeedNormal = 0,
+ AfSpeedFast,
+ AfSpeedMax };
+
+ enum AfMode { AfModeManual = 0,
+ AfModeAuto,
+ AfModeContinuous };
+
+ enum AfPause { AfPauseImmediate = 0,
+ AfPauseDeferred,
+ AfPauseResume };
+
+ virtual void setRange([[maybe_unused]] AfRange range)
+ {
+ }
+ virtual void setSpeed([[maybe_unused]] AfSpeed speed)
+ {
+ }
+ virtual void setMetering([[maybe_unused]] bool use_windows)
+ {
+ }
+ virtual void setWindows([[maybe_unused]] libcamera::Span<libcamera::Rectangle const> const &wins)
+ {
+ }
+ virtual void setMode(AfMode mode) = 0;
+ virtual AfMode getMode() const = 0;
+ virtual bool setLensPosition(double dioptres, int32_t *hwpos) = 0;
+ virtual std::optional<double> getLensPosition() const = 0;
+ virtual void triggerScan() = 0;
+ virtual void cancelScan() = 0;
+ virtual void pause(AfPause pause) = 0;
+};
+
+} // namespace RPiController
diff --git a/src/ipa/rpi/controller/af_status.h b/src/ipa/rpi/controller/af_status.h
new file mode 100644
index 00000000..c1487cc4
--- /dev/null
+++ b/src/ipa/rpi/controller/af_status.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Raspberry Pi Ltd
+ *
+ * AF control algorithm status
+ */
+#pragma once
+
+#include <optional>
+
+/*
+ * The AF algorithm should post the following structure into the image's
+ * "af.status" metadata. lensSetting should control the lens.
+ */
+
+enum class AfState {
+ Idle = 0,
+ Scanning,
+ Focused,
+ Failed
+};
+
+enum class AfPauseState {
+ Running = 0,
+ Pausing,
+ Paused
+};
+
+struct AfStatus {
+ /* state for reporting */
+ AfState state;
+ AfPauseState pauseState;
+ /* lensSetting should be sent to the lens driver, when valid */
+ std::optional<int> lensSetting;
+};
diff --git a/src/ipa/rpi/controller/agc_algorithm.h b/src/ipa/rpi/controller/agc_algorithm.h
new file mode 100644
index 00000000..c9782857
--- /dev/null
+++ b/src/ipa/rpi/controller/agc_algorithm.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * AGC/AEC control algorithm interface
+ */
+#pragma once
+
+#include <vector>
+
+#include <libcamera/base/utils.h>
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class AgcAlgorithm : public Algorithm
+{
+public:
+ AgcAlgorithm(Controller *controller) : Algorithm(controller) {}
+ /* An AGC algorithm must provide the following: */
+ virtual unsigned int getConvergenceFrames() const = 0;
+ virtual std::vector<double> const &getWeights() const = 0;
+ virtual void setEv(unsigned int channel, double ev) = 0;
+ virtual void setFlickerPeriod(libcamera::utils::Duration flickerPeriod) = 0;
+ virtual void setFixedExposureTime(unsigned int channel,
+ libcamera::utils::Duration fixedExposureTime) = 0;
+ virtual void setMaxExposureTime(libcamera::utils::Duration maxExposureTime) = 0;
+ virtual void setFixedAnalogueGain(unsigned int channel, double fixedAnalogueGain) = 0;
+ virtual void setMeteringMode(std::string const &meteringModeName) = 0;
+ virtual void setExposureMode(std::string const &exposureModeName) = 0;
+ virtual void setConstraintMode(std::string const &contraintModeName) = 0;
+ virtual void enableAuto() = 0;
+ virtual void disableAuto() = 0;
+ virtual void setActiveChannels(const std::vector<unsigned int> &activeChannels) = 0;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/agc_status.h b/src/ipa/rpi/controller/agc_status.h
new file mode 100644
index 00000000..9308b156
--- /dev/null
+++ b/src/ipa/rpi/controller/agc_status.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * AGC/AEC control algorithm status
+ */
+#pragma once
+
+#include <string>
+
+#include <libcamera/base/utils.h>
+
+#include "hdr_status.h"
+
+/*
+ * The AGC algorithm process method should post an AgcStatus into the image
+ * metadata under the tag "agc.status".
+ * The AGC algorithm prepare method should post an AgcPrepareStatus instead
+ * under "agc.prepare_status".
+ */
+
+/*
+ * Note: total_exposure_value will be reported as zero until the algorithm has
+ * seen statistics and calculated meaningful values. The contents should be
+ * ignored until then.
+ */
+
+struct AgcStatus {
+ libcamera::utils::Duration totalExposureValue; /* value for all exposure and gain for this image */
+ libcamera::utils::Duration targetExposureValue; /* (unfiltered) target total exposure AGC is aiming for */
+ libcamera::utils::Duration exposureTime;
+ double analogueGain;
+ std::string exposureMode;
+ std::string constraintMode;
+ std::string meteringMode;
+ double ev;
+ libcamera::utils::Duration flickerPeriod;
+ int floatingRegionEnable;
+ libcamera::utils::Duration fixedExposureTime;
+ double fixedAnalogueGain;
+ unsigned int channel;
+ HdrStatus hdr;
+};
+
+struct AgcPrepareStatus {
+ double digitalGain;
+ int locked;
+};
diff --git a/src/ipa/rpi/controller/algorithm.cpp b/src/ipa/rpi/controller/algorithm.cpp
new file mode 100644
index 00000000..beed47a1
--- /dev/null
+++ b/src/ipa/rpi/controller/algorithm.cpp
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * ISP control algorithms
+ */
+
+#include "algorithm.h"
+
+using namespace RPiController;
+
+int Algorithm::read([[maybe_unused]] const libcamera::YamlObject &params)
+{
+ return 0;
+}
+
+void Algorithm::initialise()
+{
+}
+
+void Algorithm::switchMode([[maybe_unused]] CameraMode const &cameraMode,
+ [[maybe_unused]] Metadata *metadata)
+{
+}
+
+void Algorithm::prepare([[maybe_unused]] Metadata *imageMetadata)
+{
+}
+
+void Algorithm::process([[maybe_unused]] StatisticsPtr &stats,
+ [[maybe_unused]] Metadata *imageMetadata)
+{
+}
+
+/* For registering algorithms with the system: */
+
+namespace {
+
+std::map<std::string, AlgoCreateFunc> &algorithms()
+{
+ static std::map<std::string, AlgoCreateFunc> algorithms;
+ return algorithms;
+}
+
+} /* namespace */
+
+std::map<std::string, AlgoCreateFunc> const &RPiController::getAlgorithms()
+{
+ return algorithms();
+}
+
+RegisterAlgorithm::RegisterAlgorithm(char const *name,
+ AlgoCreateFunc createFunc)
+{
+ algorithms()[std::string(name)] = createFunc;
+}
diff --git a/src/ipa/rpi/controller/algorithm.h b/src/ipa/rpi/controller/algorithm.h
new file mode 100644
index 00000000..1971bfdc
--- /dev/null
+++ b/src/ipa/rpi/controller/algorithm.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * ISP control algorithm interface
+ */
+#pragma once
+
+/*
+ * All algorithms should be derived from this class and made available to the
+ * Controller.
+ */
+
+#include <string>
+#include <memory>
+#include <map>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "controller.h"
+
+namespace RPiController {
+
+/* This defines the basic interface for all control algorithms. */
+
+class Algorithm
+{
+public:
+ Algorithm(Controller *controller)
+ : controller_(controller)
+ {
+ }
+ virtual ~Algorithm() = default;
+ virtual char const *name() const = 0;
+ virtual int read(const libcamera::YamlObject &params);
+ virtual void initialise();
+ virtual void switchMode(CameraMode const &cameraMode, Metadata *metadata);
+ virtual void prepare(Metadata *imageMetadata);
+ virtual void process(StatisticsPtr &stats, Metadata *imageMetadata);
+ Metadata &getGlobalMetadata() const
+ {
+ return controller_->getGlobalMetadata();
+ }
+ const std::string &getTarget() const
+ {
+ return controller_->getTarget();
+ }
+ const Controller::HardwareConfig &getHardwareConfig() const
+ {
+ return controller_->getHardwareConfig();
+ }
+
+private:
+ Controller *controller_;
+};
+
+/*
+ * This code is for automatic registration of Front End algorithms with the
+ * system.
+ */
+
+typedef Algorithm *(*AlgoCreateFunc)(Controller *controller);
+struct RegisterAlgorithm {
+ RegisterAlgorithm(char const *name, AlgoCreateFunc createFunc);
+};
+std::map<std::string, AlgoCreateFunc> const &getAlgorithms();
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/alsc_status.h b/src/ipa/rpi/controller/alsc_status.h
new file mode 100644
index 00000000..329e8a37
--- /dev/null
+++ b/src/ipa/rpi/controller/alsc_status.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * ALSC (auto lens shading correction) control algorithm status
+ */
+#pragma once
+
+#include <vector>
+
+/*
+ * The ALSC algorithm should post the following structure into the image's
+ * "alsc.status" metadata.
+ */
+
+struct AlscStatus {
+ std::vector<double> r;
+ std::vector<double> g;
+ std::vector<double> b;
+ unsigned int rows;
+ unsigned int cols;
+};
diff --git a/src/ipa/rpi/controller/awb_algorithm.h b/src/ipa/rpi/controller/awb_algorithm.h
new file mode 100644
index 00000000..d941ed4e
--- /dev/null
+++ b/src/ipa/rpi/controller/awb_algorithm.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * AWB control algorithm interface
+ */
+#pragma once
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class AwbAlgorithm : public Algorithm
+{
+public:
+ AwbAlgorithm(Controller *controller) : Algorithm(controller) {}
+ /* An AWB algorithm must provide the following: */
+ virtual unsigned int getConvergenceFrames() const = 0;
+ virtual void initialValues(double &gainR, double &gainB) = 0;
+ virtual void setMode(std::string const &modeName) = 0;
+ virtual void setManualGains(double manualR, double manualB) = 0;
+ virtual void setColourTemperature(double temperatureK) = 0;
+ virtual void enableAuto() = 0;
+ virtual void disableAuto() = 0;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/awb_status.h b/src/ipa/rpi/controller/awb_status.h
new file mode 100644
index 00000000..125df1a0
--- /dev/null
+++ b/src/ipa/rpi/controller/awb_status.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * AWB control algorithm status
+ */
+#pragma once
+
+/*
+ * The AWB algorithm places its results into both the image and global metadata,
+ * under the tag "awb.status".
+ */
+
+struct AwbStatus {
+ char mode[32];
+ double temperatureK;
+ double gainR;
+ double gainG;
+ double gainB;
+};
diff --git a/src/ipa/rpi/controller/black_level_algorithm.h b/src/ipa/rpi/controller/black_level_algorithm.h
new file mode 100644
index 00000000..ce044e59
--- /dev/null
+++ b/src/ipa/rpi/controller/black_level_algorithm.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023, Raspberry Pi Ltd
+ *
+ * black level control algorithm interface
+ */
+#pragma once
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class BlackLevelAlgorithm : public Algorithm
+{
+public:
+ BlackLevelAlgorithm(Controller *controller)
+ : Algorithm(controller) {}
+ /* A black level algorithm must provide the following: */
+ virtual void initialValues(uint16_t &blackLevelR, uint16_t &blackLevelG,
+ uint16_t &blackLevelB) = 0;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/black_level_status.h b/src/ipa/rpi/controller/black_level_status.h
new file mode 100644
index 00000000..57a0705a
--- /dev/null
+++ b/src/ipa/rpi/controller/black_level_status.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * black level control algorithm status
+ */
+#pragma once
+
+/* The "black level" algorithm stores the black levels to use. */
+
+struct BlackLevelStatus {
+ uint16_t blackLevelR; /* out of 16 bits */
+ uint16_t blackLevelG;
+ uint16_t blackLevelB;
+};
diff --git a/src/ipa/rpi/controller/cac_status.h b/src/ipa/rpi/controller/cac_status.h
new file mode 100644
index 00000000..adffce41
--- /dev/null
+++ b/src/ipa/rpi/controller/cac_status.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ *
+ * CAC (Chromatic Abberation Correction) algorithm status
+ */
+#pragma once
+
+struct CacStatus {
+ std::vector<double> lutRx;
+ std::vector<double> lutRy;
+ std::vector<double> lutBx;
+ std::vector<double> lutBy;
+};
diff --git a/src/ipa/rpi/controller/camera_mode.h b/src/ipa/rpi/controller/camera_mode.h
new file mode 100644
index 00000000..61162b32
--- /dev/null
+++ b/src/ipa/rpi/controller/camera_mode.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019-2020, Raspberry Pi Ltd
+ *
+ * description of a particular operating mode of a sensor
+ */
+#pragma once
+
+#include <libcamera/transform.h>
+
+#include <libcamera/base/utils.h>
+
+/*
+ * Description of a "camera mode", holding enough information for control
+ * algorithms to adapt their behaviour to the different modes of the camera,
+ * including binning, scaling, cropping etc.
+ */
+
+struct CameraMode {
+ /* bit depth of the raw camera output */
+ uint32_t bitdepth;
+ /* size in pixels of frames in this mode */
+ uint16_t width;
+ uint16_t height;
+ /* size of full resolution uncropped frame ("sensor frame") */
+ uint16_t sensorWidth;
+ uint16_t sensorHeight;
+ /* binning factor (1 = no binning, 2 = 2-pixel binning etc.) */
+ uint8_t binX;
+ uint8_t binY;
+ /* location of top left pixel in the sensor frame */
+ uint16_t cropX;
+ uint16_t cropY;
+ /* scaling factor (so if uncropped, width*scaleX is sensorWidth) */
+ double scaleX;
+ double scaleY;
+ /* scaling of the noise compared to the native sensor mode */
+ double noiseFactor;
+ /* minimum and maximum line time and frame durations */
+ libcamera::utils::Duration minLineLength;
+ libcamera::utils::Duration maxLineLength;
+ libcamera::utils::Duration minFrameDuration;
+ libcamera::utils::Duration maxFrameDuration;
+ /* any camera transform *not* reflected already in the camera tuning */
+ libcamera::Transform transform;
+ /* minimum and maximum frame lengths in units of lines */
+ uint32_t minFrameLength;
+ uint32_t maxFrameLength;
+ /* sensitivity of this mode */
+ double sensitivity;
+ /* pixel clock rate */
+ uint64_t pixelRate;
+ /* Mode specific exposure time limits */
+ libcamera::utils::Duration minExposureTime;
+ libcamera::utils::Duration maxExposureTime;
+ /* Mode specific analogue gain limits */
+ double minAnalogueGain;
+ double maxAnalogueGain;
+};
diff --git a/src/ipa/rpi/controller/ccm_algorithm.h b/src/ipa/rpi/controller/ccm_algorithm.h
new file mode 100644
index 00000000..6678ba75
--- /dev/null
+++ b/src/ipa/rpi/controller/ccm_algorithm.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * CCM (colour correction matrix) control algorithm interface
+ */
+#pragma once
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class CcmAlgorithm : public Algorithm
+{
+public:
+ CcmAlgorithm(Controller *controller) : Algorithm(controller) {}
+ /* A CCM algorithm must provide the following: */
+ virtual void setSaturation(double saturation) = 0;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/ccm_status.h b/src/ipa/rpi/controller/ccm_status.h
new file mode 100644
index 00000000..c81bcd42
--- /dev/null
+++ b/src/ipa/rpi/controller/ccm_status.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * CCM (colour correction matrix) control algorithm status
+ */
+#pragma once
+
+/* The "ccm" algorithm generates an appropriate colour matrix. */
+
+struct CcmStatus {
+ double matrix[9];
+ double saturation;
+};
diff --git a/src/ipa/rpi/controller/contrast_algorithm.h b/src/ipa/rpi/controller/contrast_algorithm.h
new file mode 100644
index 00000000..2e983350
--- /dev/null
+++ b/src/ipa/rpi/controller/contrast_algorithm.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * contrast (gamma) control algorithm interface
+ */
+#pragma once
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class ContrastAlgorithm : public Algorithm
+{
+public:
+ ContrastAlgorithm(Controller *controller) : Algorithm(controller) {}
+ /* A contrast algorithm must provide the following: */
+ virtual void setBrightness(double brightness) = 0;
+ virtual void setContrast(double contrast) = 0;
+ virtual void enableCe(bool enable) = 0;
+ virtual void restoreCe() = 0;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/contrast_status.h b/src/ipa/rpi/controller/contrast_status.h
new file mode 100644
index 00000000..1f175872
--- /dev/null
+++ b/src/ipa/rpi/controller/contrast_status.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * contrast (gamma) control algorithm status
+ */
+#pragma once
+
+#include "libipa/pwl.h"
+
+/*
+ * The "contrast" algorithm creates a gamma curve, optionally doing a little bit
+ * of contrast stretching based on the AGC histogram.
+ */
+
+struct ContrastStatus {
+ libcamera::ipa::Pwl gammaCurve;
+ double brightness;
+ double contrast;
+};
diff --git a/src/ipa/rpi/controller/controller.cpp b/src/ipa/rpi/controller/controller.cpp
new file mode 100644
index 00000000..651fff63
--- /dev/null
+++ b/src/ipa/rpi/controller/controller.cpp
@@ -0,0 +1,222 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * ISP controller
+ */
+
+#include <assert.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include "algorithm.h"
+#include "controller.h"
+
+using namespace RPiController;
+using namespace libcamera;
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(RPiController)
+
+static const std::map<std::string, Controller::HardwareConfig> HardwareConfigMap = {
+ {
+ "bcm2835",
+ {
+ /*
+ * There are only ever 15 AGC regions computed by the firmware
+ * due to zoning, but the HW defines AGC_REGIONS == 16!
+ */
+ .agcRegions = { 15 , 1 },
+ .agcZoneWeights = { 15 , 1 },
+ .awbRegions = { 16, 12 },
+ .cacRegions = { 0, 0 },
+ .focusRegions = { 4, 3 },
+ .numHistogramBins = 128,
+ .numGammaPoints = 33,
+ .pipelineWidth = 13,
+ .statsInline = false,
+ .minPixelProcessingTime = 0s,
+ .dataBufferStrided = true,
+ }
+ },
+ {
+ "pisp",
+ {
+ .agcRegions = { 0, 0 },
+ .agcZoneWeights = { 15, 15 },
+ .awbRegions = { 32, 32 },
+ .cacRegions = { 8, 8 },
+ .focusRegions = { 8, 8 },
+ .numHistogramBins = 1024,
+ .numGammaPoints = 64,
+ .pipelineWidth = 16,
+ .statsInline = true,
+
+ /*
+ * The constraint below is on the rate of pixels going
+ * from CSI2 peripheral to ISP-FE (400Mpix/s, plus tiny
+ * overheads per scanline, for which 380Mpix/s is a
+ * conservative bound).
+ *
+ * There is a 64kbit data FIFO before the bottleneck,
+ * which means that in all reasonable cases the
+ * constraint applies at a timescale >= 1 scanline, so
+ * adding horizontal blanking can prevent loss.
+ *
+ * If the backlog were to grow beyond 64kbit during a
+ * single scanline, there could still be loss. This
+ * could happen using 4 lanes at 1.5Gbps at 10bpp with
+ * frames wider than ~16,000 pixels.
+ */
+ .minPixelProcessingTime = 1.0us / 380,
+ .dataBufferStrided = false,
+ }
+ },
+};
+
+Controller::Controller()
+ : switchModeCalled_(false)
+{
+}
+
+Controller::~Controller() {}
+
+int Controller::read(char const *filename)
+{
+ File file(filename);
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ LOG(RPiController, Warning)
+ << "Failed to open tuning file '" << filename << "'";
+ return -EINVAL;
+ }
+
+ std::unique_ptr<YamlObject> root = YamlParser::parse(file);
+ if (!root)
+ return -EINVAL;
+
+ double version = (*root)["version"].get<double>(1.0);
+ target_ = (*root)["target"].get<std::string>("bcm2835");
+
+ if (version < 2.0) {
+ LOG(RPiController, Warning)
+ << "This format of the tuning file will be deprecated soon!"
+ << " Please use the convert_tuning.py utility to update to version 2.0.";
+
+ for (auto const &[key, value] : root->asDict()) {
+ int ret = createAlgorithm(key, value);
+ if (ret)
+ return ret;
+ }
+ } else if (version < 3.0) {
+ if (!root->contains("algorithms")) {
+ LOG(RPiController, Error)
+ << "Tuning file " << filename
+ << " does not have an \"algorithms\" list!";
+ return -EINVAL;
+ }
+
+ for (auto const &rootAlgo : (*root)["algorithms"].asList())
+ for (auto const &[key, value] : rootAlgo.asDict()) {
+ int ret = createAlgorithm(key, value);
+ if (ret)
+ return ret;
+ }
+ } else {
+ LOG(RPiController, Error)
+ << "Unrecognised version " << version
+ << " for the tuning file " << filename;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int Controller::createAlgorithm(const std::string &name, const YamlObject &params)
+{
+ auto it = getAlgorithms().find(name);
+ if (it == getAlgorithms().end()) {
+ LOG(RPiController, Warning)
+ << "No algorithm found for \"" << name << "\"";
+ return 0;
+ }
+
+ Algorithm *algo = (*it->second)(this);
+ int ret = algo->read(params);
+ if (ret)
+ return ret;
+
+ algorithms_.push_back(AlgorithmPtr(algo));
+ return 0;
+}
+
+void Controller::initialise()
+{
+ for (auto &algo : algorithms_)
+ algo->initialise();
+}
+
+void Controller::switchMode(CameraMode const &cameraMode, Metadata *metadata)
+{
+ for (auto &algo : algorithms_)
+ algo->switchMode(cameraMode, metadata);
+ switchModeCalled_ = true;
+}
+
+void Controller::prepare(Metadata *imageMetadata)
+{
+ assert(switchModeCalled_);
+ for (auto &algo : algorithms_)
+ algo->prepare(imageMetadata);
+}
+
+void Controller::process(StatisticsPtr stats, Metadata *imageMetadata)
+{
+ assert(switchModeCalled_);
+ for (auto &algo : algorithms_)
+ algo->process(stats, imageMetadata);
+}
+
+Metadata &Controller::getGlobalMetadata()
+{
+ return globalMetadata_;
+}
+
+Algorithm *Controller::getAlgorithm(std::string const &name) const
+{
+ /*
+ * The passed name must be the entire algorithm name, or must match the
+ * last part of it with a period (.) just before.
+ */
+ size_t nameLen = name.length();
+ for (auto &algo : algorithms_) {
+ char const *algoName = algo->name();
+ size_t algoNameLen = strlen(algoName);
+ if (algoNameLen >= nameLen &&
+ strcasecmp(name.c_str(),
+ algoName + algoNameLen - nameLen) == 0 &&
+ (nameLen == algoNameLen ||
+ algoName[algoNameLen - nameLen - 1] == '.'))
+ return algo.get();
+ }
+ return nullptr;
+}
+
+const std::string &Controller::getTarget() const
+{
+ return target_;
+}
+
+const Controller::HardwareConfig &Controller::getHardwareConfig() const
+{
+ auto cfg = HardwareConfigMap.find(getTarget());
+
+ /*
+ * This really should not happen, the IPA ought to validate the target
+ * on initialisation.
+ */
+ ASSERT(cfg != HardwareConfigMap.end());
+ return cfg->second;
+}
diff --git a/src/ipa/rpi/controller/controller.h b/src/ipa/rpi/controller/controller.h
new file mode 100644
index 00000000..fdb46557
--- /dev/null
+++ b/src/ipa/rpi/controller/controller.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * ISP controller interface
+ */
+#pragma once
+
+/*
+ * The Controller is simply a container for a collecting together a number of
+ * "control algorithms" (such as AWB etc.) and for running them all in a
+ * convenient manner.
+ */
+
+#include <vector>
+#include <string>
+
+#include <libcamera/base/utils.h>
+#include "libcamera/internal/yaml_parser.h"
+
+#include "camera_mode.h"
+#include "device_status.h"
+#include "metadata.h"
+#include "statistics.h"
+
+namespace RPiController {
+
+class Algorithm;
+typedef std::unique_ptr<Algorithm> AlgorithmPtr;
+
+/*
+ * The Controller holds a pointer to some global_metadata, which is how
+ * different controllers and control algorithms within them can exchange
+ * information. The Prepare function returns a pointer to metadata for this
+ * specific image, and which should be passed on to the Process function.
+ */
+
+class Controller
+{
+public:
+ struct HardwareConfig {
+ libcamera::Size agcRegions;
+ libcamera::Size agcZoneWeights;
+ libcamera::Size awbRegions;
+ libcamera::Size cacRegions;
+ libcamera::Size focusRegions;
+ unsigned int numHistogramBins;
+ unsigned int numGammaPoints;
+ unsigned int pipelineWidth;
+ bool statsInline;
+ libcamera::utils::Duration minPixelProcessingTime;
+ bool dataBufferStrided;
+ };
+
+ Controller();
+ ~Controller();
+ int read(char const *filename);
+ void initialise();
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata);
+ void prepare(Metadata *imageMetadata);
+ void process(StatisticsPtr stats, Metadata *imageMetadata);
+ Metadata &getGlobalMetadata();
+ Algorithm *getAlgorithm(std::string const &name) const;
+ const std::string &getTarget() const;
+ const HardwareConfig &getHardwareConfig() const;
+
+protected:
+ int createAlgorithm(const std::string &name, const libcamera::YamlObject &params);
+
+ Metadata globalMetadata_;
+ std::vector<AlgorithmPtr> algorithms_;
+ bool switchModeCalled_;
+
+private:
+ std::string target_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/denoise_algorithm.h b/src/ipa/rpi/controller/denoise_algorithm.h
new file mode 100644
index 00000000..b9a2a33c
--- /dev/null
+++ b/src/ipa/rpi/controller/denoise_algorithm.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2021, Raspberry Pi Ltd
+ *
+ * Denoise control algorithm interface
+ */
+#pragma once
+
+#include <string>
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+enum class DenoiseMode { Off, ColourOff, ColourFast, ColourHighQuality };
+
+class DenoiseAlgorithm : public Algorithm
+{
+public:
+ DenoiseAlgorithm(Controller *controller) : Algorithm(controller) {}
+ /* A Denoise algorithm must provide the following: */
+ virtual void setMode(DenoiseMode mode) = 0;
+ /* Some platforms may not be able to define this, so supply a default. */
+ virtual void setConfig([[maybe_unused]] std::string const &name) {}
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/denoise_status.h b/src/ipa/rpi/controller/denoise_status.h
new file mode 100644
index 00000000..eead6086
--- /dev/null
+++ b/src/ipa/rpi/controller/denoise_status.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019-2021, Raspberry Pi Ltd
+ *
+ * Denoise control algorithm status
+ */
+#pragma once
+
+/* This stores the parameters required for Denoise. */
+
+struct DenoiseStatus {
+ double noiseConstant;
+ double noiseSlope;
+ double strength;
+ unsigned int mode;
+};
+
+struct SdnStatus {
+ double noiseConstant;
+ double noiseSlope;
+ double noiseConstant2;
+ double noiseSlope2;
+ double strength;
+};
+
+struct CdnStatus {
+ double strength;
+ double threshold;
+};
+
+struct TdnStatus {
+ double noiseConstant;
+ double noiseSlope;
+ double threshold;
+};
diff --git a/src/ipa/rpi/controller/device_status.cpp b/src/ipa/rpi/controller/device_status.cpp
new file mode 100644
index 00000000..1695764d
--- /dev/null
+++ b/src/ipa/rpi/controller/device_status.cpp
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2021, Raspberry Pi Ltd
+ *
+ * device (image sensor) status
+ */
+#include "device_status.h"
+
+using namespace libcamera; /* for the Duration operator<< overload */
+
+std::ostream &operator<<(std::ostream &out, const DeviceStatus &d)
+{
+ out << "Exposure time: " << d.exposureTime
+ << " Frame length: " << d.frameLength
+ << " Line length: " << d.lineLength
+ << " Gain: " << d.analogueGain;
+
+ if (d.aperture)
+ out << " Aperture: " << *d.aperture;
+
+ if (d.lensPosition)
+ out << " Lens: " << *d.lensPosition;
+
+ if (d.flashIntensity)
+ out << " Flash: " << *d.flashIntensity;
+
+ if (d.sensorTemperature)
+ out << " Temperature: " << *d.sensorTemperature;
+
+ return out;
+}
diff --git a/src/ipa/rpi/controller/device_status.h b/src/ipa/rpi/controller/device_status.h
new file mode 100644
index 00000000..b1792035
--- /dev/null
+++ b/src/ipa/rpi/controller/device_status.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019-2021, Raspberry Pi Ltd
+ *
+ * device (image sensor) status
+ */
+#pragma once
+
+#include <iostream>
+#include <optional>
+
+#include <libcamera/base/utils.h>
+
+/*
+ * Definition of "device metadata" which stores things like exposure time and
+ * analogue gain that downstream control algorithms will want to know.
+ */
+
+struct DeviceStatus {
+ DeviceStatus()
+ : exposureTime(std::chrono::seconds(0)), frameLength(0),
+ lineLength(std::chrono::seconds(0)), analogueGain(0.0)
+ {
+ }
+
+ friend std::ostream &operator<<(std::ostream &out, const DeviceStatus &d);
+
+ /* time the image is exposed */
+ libcamera::utils::Duration exposureTime;
+ /* frame length given in number of lines */
+ uint32_t frameLength;
+ /* line length for the current frame */
+ libcamera::utils::Duration lineLength;
+ double analogueGain;
+ /* 1.0/distance-in-metres */
+ std::optional<double> lensPosition;
+ /* 1/f so that brightness quadruples when this doubles */
+ std::optional<double> aperture;
+ /* proportional to brightness with 0 = no flash, 1 = maximum flash */
+ std::optional<double> flashIntensity;
+ /* Sensor reported temperature value (in degrees) */
+ std::optional<double> sensorTemperature;
+};
diff --git a/src/ipa/rpi/controller/dpc_status.h b/src/ipa/rpi/controller/dpc_status.h
new file mode 100644
index 00000000..9f30d5d9
--- /dev/null
+++ b/src/ipa/rpi/controller/dpc_status.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * DPC (defective pixel correction) control algorithm status
+ */
+#pragma once
+
+/* The "DPC" algorithm sets defective pixel correction strength. */
+
+struct DpcStatus {
+ int strength; /* 0 = "off", 1 = "normal", 2 = "strong" */
+};
diff --git a/src/ipa/rpi/controller/geq_status.h b/src/ipa/rpi/controller/geq_status.h
new file mode 100644
index 00000000..cb107a48
--- /dev/null
+++ b/src/ipa/rpi/controller/geq_status.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * GEQ (green equalisation) control algorithm status
+ */
+#pragma once
+
+/* The "GEQ" algorithm calculates the green equalisation thresholds */
+
+struct GeqStatus {
+ uint16_t offset;
+ double slope;
+};
diff --git a/src/ipa/rpi/controller/hdr_algorithm.h b/src/ipa/rpi/controller/hdr_algorithm.h
new file mode 100644
index 00000000..b889d8fd
--- /dev/null
+++ b/src/ipa/rpi/controller/hdr_algorithm.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023, Raspberry Pi Ltd
+ *
+ * HDR control algorithm interface
+ */
+#pragma once
+
+#include <vector>
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class HdrAlgorithm : public Algorithm
+{
+public:
+ HdrAlgorithm(Controller *controller)
+ : Algorithm(controller) {}
+ /* An HDR algorithm must provide the following: */
+ virtual int setMode(std::string const &modeName) = 0;
+ virtual std::vector<unsigned int> getChannels() const = 0;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/hdr_status.h b/src/ipa/rpi/controller/hdr_status.h
new file mode 100644
index 00000000..a4955778
--- /dev/null
+++ b/src/ipa/rpi/controller/hdr_status.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ *
+ * HDR control algorithm status
+ */
+#pragma once
+
+#include <string>
+
+/*
+ * The HDR algorithm process method should post an HdrStatus into the image
+ * metadata under the tag "hdr.status".
+ */
+
+struct HdrStatus {
+ std::string mode;
+ std::string channel;
+};
diff --git a/src/ipa/rpi/controller/histogram.cpp b/src/ipa/rpi/controller/histogram.cpp
new file mode 100644
index 00000000..13089839
--- /dev/null
+++ b/src/ipa/rpi/controller/histogram.cpp
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * histogram calculations
+ */
+#include <cmath>
+#include <stdio.h>
+
+#include "histogram.h"
+
+using namespace RPiController;
+
+uint64_t Histogram::cumulativeFreq(double bin) const
+{
+ if (bin <= 0)
+ return 0;
+ else if (bin >= bins())
+ return total();
+ int b = (int)bin;
+ return cumulative_[b] +
+ (bin - b) * (cumulative_[b + 1] - cumulative_[b]);
+}
+
+double Histogram::quantile(double q, int first, int last) const
+{
+ if (first == -1)
+ first = 0;
+ if (last == -1)
+ last = cumulative_.size() - 2;
+ assert(first <= last);
+ uint64_t items = q * total();
+ while (first < last) /* binary search to find the right bin */
+ {
+ int middle = (first + last) / 2;
+ if (cumulative_[middle + 1] > items)
+ last = middle; /* between first and middle */
+ else
+ first = middle + 1; /* after middle */
+ }
+ assert(items >= cumulative_[first] && items <= cumulative_[last + 1]);
+ double frac = cumulative_[first + 1] == cumulative_[first] ? 0
+ : (double)(items - cumulative_[first]) /
+ (cumulative_[first + 1] - cumulative_[first]);
+ return first + frac;
+}
+
+double Histogram::interBinMean(double binLo, double binHi) const
+{
+ assert(binHi >= binLo);
+ double sumBinFreq = 0, cumulFreq = 0;
+ for (double binNext = std::floor(binLo) + 1.0; binNext <= std::ceil(binHi);
+ binLo = binNext, binNext += 1.0) {
+ int bin = std::floor(binLo);
+ double freq = (cumulative_[bin + 1] - cumulative_[bin]) *
+ (std::min(binNext, binHi) - binLo);
+ sumBinFreq += bin * freq;
+ cumulFreq += freq;
+ }
+
+ if (cumulFreq == 0) {
+ /* interval had zero width or contained no weight? */
+ return binHi;
+ }
+
+ /* add 0.5 to give an average for bin mid-points */
+ return sumBinFreq / cumulFreq + 0.5;
+}
+
+double Histogram::interQuantileMean(double qLo, double qHi) const
+{
+ assert(qHi >= qLo);
+ double pLo = quantile(qLo);
+ double pHi = quantile(qHi, (int)pLo);
+ return interBinMean(pLo, pHi);
+}
diff --git a/src/ipa/rpi/controller/histogram.h b/src/ipa/rpi/controller/histogram.h
new file mode 100644
index 00000000..ab4e5e31
--- /dev/null
+++ b/src/ipa/rpi/controller/histogram.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * histogram calculation interface
+ */
+#pragma once
+
+#include <stdint.h>
+#include <vector>
+#include <cassert>
+
+/*
+ * A simple histogram class, for use in particular to find "quantiles" and
+ * averages between "quantiles".
+ */
+
+namespace RPiController {
+
+class Histogram
+{
+public:
+ Histogram()
+ {
+ cumulative_.push_back(0);
+ }
+
+ template<typename T> Histogram(T *histogram, int num)
+ {
+ assert(num);
+ cumulative_.reserve(num + 1);
+ cumulative_.push_back(0);
+ for (int i = 0; i < num; i++)
+ cumulative_.push_back(cumulative_.back() +
+ histogram[i]);
+ }
+ uint32_t bins() const { return cumulative_.size() - 1; }
+ uint64_t total() const { return cumulative_[cumulative_.size() - 1]; }
+ /* Cumulative frequency up to a (fractional) point in a bin. */
+ uint64_t cumulativeFreq(double bin) const;
+ /* Return the mean value between two (fractional) bins. */
+ double interBinMean(double binLo, double binHi) const;
+ /*
+ * Return the (fractional) bin of the point q (0 <= q <= 1) through the
+ * histogram. Optionally provide limits to help.
+ */
+ double quantile(double q, int first = -1, int last = -1) const;
+ /* Return the average histogram bin value between the two quantiles. */
+ double interQuantileMean(double qLo, double qHi) const;
+
+private:
+ std::vector<uint64_t> cumulative_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/lux_status.h b/src/ipa/rpi/controller/lux_status.h
new file mode 100644
index 00000000..d8729f43
--- /dev/null
+++ b/src/ipa/rpi/controller/lux_status.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * Lux control algorithm status
+ */
+#pragma once
+
+/*
+ * The "lux" algorithm looks at the (AGC) histogram statistics of the frame and
+ * estimates the current lux level of the scene. It does this by a simple ratio
+ * calculation comparing to a reference image that was taken in known conditions
+ * with known statistics and a properly measured lux level. There is a slight
+ * problem with aperture, in that it may be variable without the system knowing
+ * or being aware of it. In this case an external application may set a
+ * "current_aperture" value if it wishes, which would be used in place of the
+ * (presumably meaningless) value in the image metadata.
+ */
+
+struct LuxStatus {
+ double lux;
+ double aperture;
+};
diff --git a/src/ipa/rpi/controller/meson.build b/src/ipa/rpi/controller/meson.build
new file mode 100644
index 00000000..74b74888
--- /dev/null
+++ b/src/ipa/rpi/controller/meson.build
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: CC0-1.0
+
+rpi_ipa_controller_sources = files([
+ 'algorithm.cpp',
+ 'controller.cpp',
+ 'device_status.cpp',
+ 'histogram.cpp',
+ 'rpi/af.cpp',
+ 'rpi/agc.cpp',
+ 'rpi/agc_channel.cpp',
+ 'rpi/alsc.cpp',
+ 'rpi/awb.cpp',
+ 'rpi/black_level.cpp',
+ 'rpi/cac.cpp',
+ 'rpi/ccm.cpp',
+ 'rpi/contrast.cpp',
+ 'rpi/denoise.cpp',
+ 'rpi/dpc.cpp',
+ 'rpi/geq.cpp',
+ 'rpi/hdr.cpp',
+ 'rpi/lux.cpp',
+ 'rpi/noise.cpp',
+ 'rpi/saturation.cpp',
+ 'rpi/sdn.cpp',
+ 'rpi/sharpen.cpp',
+ 'rpi/tonemap.cpp',
+])
+
+rpi_ipa_controller_deps = [
+ libcamera_private,
+]
+
+rpi_ipa_controller_lib = static_library('rpi_ipa_controller', rpi_ipa_controller_sources,
+ include_directories : libipa_includes,
+ dependencies : rpi_ipa_controller_deps)
diff --git a/src/ipa/rpi/controller/metadata.h b/src/ipa/rpi/controller/metadata.h
new file mode 100644
index 00000000..77d3b074
--- /dev/null
+++ b/src/ipa/rpi/controller/metadata.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019-2021, Raspberry Pi Ltd
+ *
+ * general metadata class
+ */
+#pragma once
+
+/* A simple class for carrying arbitrary metadata, for example about an image. */
+
+#include <any>
+#include <map>
+#include <mutex>
+#include <string>
+#include <utility>
+
+#include <libcamera/base/thread_annotations.h>
+
+namespace RPiController {
+
+class LIBCAMERA_TSA_CAPABILITY("mutex") Metadata
+{
+public:
+ Metadata() = default;
+
+ Metadata(Metadata const &other)
+ {
+ std::scoped_lock otherLock(other.mutex_);
+ data_ = other.data_;
+ }
+
+ Metadata(Metadata &&other)
+ {
+ std::scoped_lock otherLock(other.mutex_);
+ data_ = std::move(other.data_);
+ other.data_.clear();
+ }
+
+ template<typename T>
+ void set(std::string const &tag, T &&value)
+ {
+ std::scoped_lock lock(mutex_);
+ data_[tag] = std::forward<T>(value);
+ }
+
+ template<typename T>
+ int get(std::string const &tag, T &value) const
+ {
+ std::scoped_lock lock(mutex_);
+ auto it = data_.find(tag);
+ if (it == data_.end())
+ return -1;
+ value = std::any_cast<T>(it->second);
+ return 0;
+ }
+
+ void clear()
+ {
+ std::scoped_lock lock(mutex_);
+ data_.clear();
+ }
+
+ Metadata &operator=(Metadata const &other)
+ {
+ std::scoped_lock lock(mutex_, other.mutex_);
+ data_ = other.data_;
+ return *this;
+ }
+
+ Metadata &operator=(Metadata &&other)
+ {
+ std::scoped_lock lock(mutex_, other.mutex_);
+ data_ = std::move(other.data_);
+ other.data_.clear();
+ return *this;
+ }
+
+ void merge(Metadata &other)
+ {
+ std::scoped_lock lock(mutex_, other.mutex_);
+ data_.merge(other.data_);
+ }
+
+ void mergeCopy(const Metadata &other)
+ {
+ std::scoped_lock lock(mutex_, other.mutex_);
+ /*
+ * If the metadata key exists, ignore this item and copy only
+ * unique key/value pairs.
+ */
+ data_.insert(other.data_.begin(), other.data_.end());
+ }
+
+ void erase(std::string const &tag)
+ {
+ std::scoped_lock lock(mutex_);
+ eraseLocked(tag);
+ }
+
+ template<typename T>
+ T *getLocked(std::string const &tag)
+ {
+ /*
+ * This allows in-place access to the Metadata contents,
+ * for which you should be holding the lock.
+ */
+ auto it = data_.find(tag);
+ if (it == data_.end())
+ return nullptr;
+ return std::any_cast<T>(&it->second);
+ }
+
+ template<typename T>
+ void setLocked(std::string const &tag, T &&value)
+ {
+ /* Use this only if you're holding the lock yourself. */
+ data_[tag] = std::forward<T>(value);
+ }
+
+ void eraseLocked(std::string const &tag)
+ {
+ auto it = data_.find(tag);
+ if (it == data_.end())
+ return;
+ data_.erase(it);
+ }
+
+ /*
+ * Note: use of (lowercase) lock and unlock means you can create scoped
+ * locks with the standard lock classes.
+ * e.g. std::lock_guard<RPiController::Metadata> lock(metadata)
+ */
+ void lock() LIBCAMERA_TSA_ACQUIRE() { mutex_.lock(); }
+ auto try_lock() LIBCAMERA_TSA_ACQUIRE() { return mutex_.try_lock(); }
+ void unlock() LIBCAMERA_TSA_RELEASE() { mutex_.unlock(); }
+
+private:
+ mutable std::mutex mutex_;
+ std::map<std::string, std::any> data_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/noise_status.h b/src/ipa/rpi/controller/noise_status.h
new file mode 100644
index 00000000..1919da32
--- /dev/null
+++ b/src/ipa/rpi/controller/noise_status.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * Noise control algorithm status
+ */
+#pragma once
+
+/* The "noise" algorithm stores an estimate of the noise profile for this image. */
+
+struct NoiseStatus {
+ double noiseConstant;
+ double noiseSlope;
+};
diff --git a/src/ipa/rpi/controller/pdaf_data.h b/src/ipa/rpi/controller/pdaf_data.h
new file mode 100644
index 00000000..779b987d
--- /dev/null
+++ b/src/ipa/rpi/controller/pdaf_data.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Raspberry Pi Ltd
+ *
+ * PDAF Metadata
+ */
+#pragma once
+
+#include <stdint.h>
+
+#include "region_stats.h"
+
+namespace RPiController {
+
+struct PdafData {
+ /* Confidence, in arbitrary units */
+ uint16_t conf;
+ /* Phase error, in s16 Q4 format (S.11.4) */
+ int16_t phase;
+};
+
+using PdafRegions = RegionStats<PdafData>;
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/region_stats.h b/src/ipa/rpi/controller/region_stats.h
new file mode 100644
index 00000000..c60f7d9a
--- /dev/null
+++ b/src/ipa/rpi/controller/region_stats.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Raspberry Pi Ltd
+ *
+ * Raspberry Pi region based statistics container
+ */
+#pragma once
+
+#include <array>
+#include <stdint.h>
+#include <vector>
+
+#include <libcamera/geometry.h>
+
+namespace RPiController {
+
+template<typename T>
+class RegionStats
+{
+public:
+ struct Region {
+ T val;
+ uint32_t counted;
+ uint32_t uncounted;
+ };
+
+ RegionStats()
+ : size_({}), numFloating_(0), default_({})
+ {
+ }
+
+ void init(const libcamera::Size &size, unsigned int numFloating = 0)
+ {
+ size_ = size;
+ numFloating_ = numFloating;
+ regions_.clear();
+ regions_.resize(size_.width * size_.height + numFloating_);
+ }
+
+ void init(unsigned int num)
+ {
+ size_ = libcamera::Size(num, 1);
+ numFloating_ = 0;
+ regions_.clear();
+ regions_.resize(num);
+ }
+
+ unsigned int numRegions() const
+ {
+ return size_.width * size_.height;
+ }
+
+ unsigned int numFloatingRegions() const
+ {
+ return numFloating_;
+ }
+
+ libcamera::Size size() const
+ {
+ return size_;
+ }
+
+ void set(unsigned int index, const Region &region)
+ {
+ if (index >= numRegions())
+ return;
+ set_(index, region);
+ }
+
+ void set(const libcamera::Point &pos, const Region &region)
+ {
+ set(pos.y * size_.width + pos.x, region);
+ }
+
+ void setFloating(unsigned int index, const Region &region)
+ {
+ if (index >= numFloatingRegions())
+ return;
+ set(numRegions() + index, region);
+ }
+
+ const Region &get(unsigned int index) const
+ {
+ if (index >= numRegions())
+ return default_;
+ return get_(index);
+ }
+
+ const Region &get(const libcamera::Point &pos) const
+ {
+ return get(pos.y * size_.width + pos.x);
+ }
+
+ const Region &getFloating(unsigned int index) const
+ {
+ if (index >= numFloatingRegions())
+ return default_;
+ return get_(numRegions() + index);
+ }
+
+ typename std::vector<Region>::iterator begin() { return regions_.begin(); }
+ typename std::vector<Region>::iterator end() { return regions_.end(); }
+ typename std::vector<Region>::const_iterator begin() const { return regions_.begin(); }
+ typename std::vector<Region>::const_iterator end() const { return regions_.end(); }
+
+private:
+ void set_(unsigned int index, const Region &region)
+ {
+ regions_[index] = region;
+ }
+
+ const Region &get_(unsigned int index) const
+ {
+ return regions_[index];
+ }
+
+ libcamera::Size size_;
+ unsigned int numFloating_;
+ std::vector<Region> regions_;
+ Region default_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/af.cpp b/src/ipa/rpi/controller/rpi/af.cpp
new file mode 100644
index 00000000..2157eb94
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/af.cpp
@@ -0,0 +1,797 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022-2023, Raspberry Pi Ltd
+ *
+ * Autofocus control algorithm
+ */
+
+#include "af.h"
+
+#include <cmath>
+#include <iomanip>
+#include <stdlib.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiAf)
+
+#define NAME "rpi.af"
+
+/*
+ * Default values for parameters. All may be overridden in the tuning file.
+ * Many of these values are sensor- or module-dependent; the defaults here
+ * assume IMX708 in a Raspberry Pi V3 camera with the standard lens.
+ *
+ * Here all focus values are in dioptres (1/m). They are converted to hardware
+ * units when written to status.lensSetting or returned from setLensPosition().
+ *
+ * Gain and delay values are relative to the update rate, since much (not all)
+ * of the delay is in the sensor and (for CDAF) ISP, not the lens mechanism;
+ * but note that algorithms are updated at no more than 30 Hz.
+ */
+
+Af::RangeDependentParams::RangeDependentParams()
+ : focusMin(0.0),
+ focusMax(12.0),
+ focusDefault(1.0)
+{
+}
+
+Af::SpeedDependentParams::SpeedDependentParams()
+ : stepCoarse(1.0),
+ stepFine(0.25),
+ contrastRatio(0.75),
+ pdafGain(-0.02),
+ pdafSquelch(0.125),
+ maxSlew(2.0),
+ pdafFrames(20),
+ dropoutFrames(6),
+ stepFrames(4)
+{
+}
+
+Af::CfgParams::CfgParams()
+ : confEpsilon(8),
+ confThresh(16),
+ confClip(512),
+ skipFrames(5),
+ map()
+{
+}
+
+template<typename T>
+static void readNumber(T &dest, const libcamera::YamlObject &params, char const *name)
+{
+ auto value = params[name].get<T>();
+ if (value)
+ dest = *value;
+ else
+ LOG(RPiAf, Warning) << "Missing parameter \"" << name << "\"";
+}
+
+void Af::RangeDependentParams::read(const libcamera::YamlObject &params)
+{
+
+ readNumber<double>(focusMin, params, "min");
+ readNumber<double>(focusMax, params, "max");
+ readNumber<double>(focusDefault, params, "default");
+}
+
+void Af::SpeedDependentParams::read(const libcamera::YamlObject &params)
+{
+ readNumber<double>(stepCoarse, params, "step_coarse");
+ readNumber<double>(stepFine, params, "step_fine");
+ readNumber<double>(contrastRatio, params, "contrast_ratio");
+ readNumber<double>(pdafGain, params, "pdaf_gain");
+ readNumber<double>(pdafSquelch, params, "pdaf_squelch");
+ readNumber<double>(maxSlew, params, "max_slew");
+ readNumber<uint32_t>(pdafFrames, params, "pdaf_frames");
+ readNumber<uint32_t>(dropoutFrames, params, "dropout_frames");
+ readNumber<uint32_t>(stepFrames, params, "step_frames");
+}
+
+int Af::CfgParams::read(const libcamera::YamlObject &params)
+{
+ if (params.contains("ranges")) {
+ auto &rr = params["ranges"];
+
+ if (rr.contains("normal"))
+ ranges[AfRangeNormal].read(rr["normal"]);
+ else
+ LOG(RPiAf, Warning) << "Missing range \"normal\"";
+
+ ranges[AfRangeMacro] = ranges[AfRangeNormal];
+ if (rr.contains("macro"))
+ ranges[AfRangeMacro].read(rr["macro"]);
+
+ ranges[AfRangeFull].focusMin = std::min(ranges[AfRangeNormal].focusMin,
+ ranges[AfRangeMacro].focusMin);
+ ranges[AfRangeFull].focusMax = std::max(ranges[AfRangeNormal].focusMax,
+ ranges[AfRangeMacro].focusMax);
+ ranges[AfRangeFull].focusDefault = ranges[AfRangeNormal].focusDefault;
+ if (rr.contains("full"))
+ ranges[AfRangeFull].read(rr["full"]);
+ } else
+ LOG(RPiAf, Warning) << "No ranges defined";
+
+ if (params.contains("speeds")) {
+ auto &ss = params["speeds"];
+
+ if (ss.contains("normal"))
+ speeds[AfSpeedNormal].read(ss["normal"]);
+ else
+ LOG(RPiAf, Warning) << "Missing speed \"normal\"";
+
+ speeds[AfSpeedFast] = speeds[AfSpeedNormal];
+ if (ss.contains("fast"))
+ speeds[AfSpeedFast].read(ss["fast"]);
+ } else
+ LOG(RPiAf, Warning) << "No speeds defined";
+
+ readNumber<uint32_t>(confEpsilon, params, "conf_epsilon");
+ readNumber<uint32_t>(confThresh, params, "conf_thresh");
+ readNumber<uint32_t>(confClip, params, "conf_clip");
+ readNumber<uint32_t>(skipFrames, params, "skip_frames");
+
+ if (params.contains("map"))
+ map = params["map"].get<ipa::Pwl>(ipa::Pwl{});
+ else
+ LOG(RPiAf, Warning) << "No map defined";
+
+ return 0;
+}
+
+void Af::CfgParams::initialise()
+{
+ if (map.empty()) {
+ /* Default mapping from dioptres to hardware setting */
+ static constexpr double DefaultMapX0 = 0.0;
+ static constexpr double DefaultMapY0 = 445.0;
+ static constexpr double DefaultMapX1 = 15.0;
+ static constexpr double DefaultMapY1 = 925.0;
+
+ map.append(DefaultMapX0, DefaultMapY0);
+ map.append(DefaultMapX1, DefaultMapY1);
+ }
+}
+
+/* Af Algorithm class */
+
+static constexpr unsigned MaxWindows = 10;
+
+Af::Af(Controller *controller)
+ : AfAlgorithm(controller),
+ cfg_(),
+ range_(AfRangeNormal),
+ speed_(AfSpeedNormal),
+ mode_(AfAlgorithm::AfModeManual),
+ pauseFlag_(false),
+ statsRegion_(0, 0, 0, 0),
+ windows_(),
+ useWindows_(false),
+ phaseWeights_(),
+ contrastWeights_(),
+ scanState_(ScanState::Idle),
+ initted_(false),
+ ftarget_(-1.0),
+ fsmooth_(-1.0),
+ prevContrast_(0.0),
+ skipCount_(0),
+ stepCount_(0),
+ dropCount_(0),
+ scanMaxContrast_(0.0),
+ scanMinContrast_(1.0e9),
+ scanData_(),
+ reportState_(AfState::Idle)
+{
+ /*
+ * Reserve space for data, to reduce memory fragmentation. It's too early
+ * to query the size of the PDAF (from camera) and Contrast (from ISP)
+ * statistics, but these are plausible upper bounds.
+ */
+ phaseWeights_.w.reserve(16 * 12);
+ contrastWeights_.w.reserve(getHardwareConfig().focusRegions.width *
+ getHardwareConfig().focusRegions.height);
+ scanData_.reserve(32);
+}
+
+Af::~Af()
+{
+}
+
+char const *Af::name() const
+{
+ return NAME;
+}
+
+int Af::read(const libcamera::YamlObject &params)
+{
+ return cfg_.read(params);
+}
+
+void Af::initialise()
+{
+ cfg_.initialise();
+}
+
+void Af::switchMode(CameraMode const &cameraMode, [[maybe_unused]] Metadata *metadata)
+{
+ (void)metadata;
+
+ /* Assume that PDAF and Focus stats grids cover the visible area */
+ statsRegion_.x = (int)cameraMode.cropX;
+ statsRegion_.y = (int)cameraMode.cropY;
+ statsRegion_.width = (unsigned)(cameraMode.width * cameraMode.scaleX);
+ statsRegion_.height = (unsigned)(cameraMode.height * cameraMode.scaleY);
+ LOG(RPiAf, Debug) << "switchMode: statsRegion: "
+ << statsRegion_.x << ','
+ << statsRegion_.y << ','
+ << statsRegion_.width << ','
+ << statsRegion_.height;
+ invalidateWeights();
+
+ if (scanState_ >= ScanState::Coarse && scanState_ < ScanState::Settle) {
+ /*
+ * If a scan was in progress, re-start it, as CDAF statistics
+ * may have changed. Though if the application is just about
+ * to take a still picture, this will not help...
+ */
+ startProgrammedScan();
+ }
+ skipCount_ = cfg_.skipFrames;
+}
+
+void Af::computeWeights(RegionWeights *wgts, unsigned rows, unsigned cols)
+{
+ wgts->rows = rows;
+ wgts->cols = cols;
+ wgts->sum = 0;
+ wgts->w.resize(rows * cols);
+ std::fill(wgts->w.begin(), wgts->w.end(), 0);
+
+ if (rows > 0 && cols > 0 && useWindows_ &&
+ statsRegion_.height >= rows && statsRegion_.width >= cols) {
+ /*
+ * Here we just merge all of the given windows, weighted by area.
+ * \todo Perhaps a better approach might be to find the phase in each
+ * window and choose either the closest or the highest-confidence one?
+ * Ensure weights sum to less than (1<<16). 46080 is a "round number"
+ * below 65536, for better rounding when window size is a simple
+ * fraction of image dimensions.
+ */
+ const unsigned maxCellWeight = 46080u / (MaxWindows * rows * cols);
+ const unsigned cellH = statsRegion_.height / rows;
+ const unsigned cellW = statsRegion_.width / cols;
+ const unsigned cellA = cellH * cellW;
+
+ for (auto &w : windows_) {
+ for (unsigned r = 0; r < rows; ++r) {
+ int y0 = std::max(statsRegion_.y + (int)(cellH * r), w.y);
+ int y1 = std::min(statsRegion_.y + (int)(cellH * (r + 1)),
+ w.y + (int)(w.height));
+ if (y0 >= y1)
+ continue;
+ y1 -= y0;
+ for (unsigned c = 0; c < cols; ++c) {
+ int x0 = std::max(statsRegion_.x + (int)(cellW * c), w.x);
+ int x1 = std::min(statsRegion_.x + (int)(cellW * (c + 1)),
+ w.x + (int)(w.width));
+ if (x0 >= x1)
+ continue;
+ unsigned a = y1 * (x1 - x0);
+ a = (maxCellWeight * a + cellA - 1) / cellA;
+ wgts->w[r * cols + c] += a;
+ wgts->sum += a;
+ }
+ }
+ }
+ }
+
+ if (wgts->sum == 0) {
+ /* Default AF window is the middle 1/2 width of the middle 1/3 height */
+ for (unsigned r = rows / 3; r < rows - rows / 3; ++r) {
+ for (unsigned c = cols / 4; c < cols - cols / 4; ++c) {
+ wgts->w[r * cols + c] = 1;
+ wgts->sum += 1;
+ }
+ }
+ }
+}
+
+void Af::invalidateWeights()
+{
+ phaseWeights_.sum = 0;
+ contrastWeights_.sum = 0;
+}
+
+bool Af::getPhase(PdafRegions const &regions, double &phase, double &conf)
+{
+ libcamera::Size size = regions.size();
+ if (size.height != phaseWeights_.rows || size.width != phaseWeights_.cols ||
+ phaseWeights_.sum == 0) {
+ LOG(RPiAf, Debug) << "Recompute Phase weights " << size.width << 'x' << size.height;
+ computeWeights(&phaseWeights_, size.height, size.width);
+ }
+
+ uint32_t sumWc = 0;
+ int64_t sumWcp = 0;
+ for (unsigned i = 0; i < regions.numRegions(); ++i) {
+ unsigned w = phaseWeights_.w[i];
+ if (w) {
+ const PdafData &data = regions.get(i).val;
+ unsigned c = data.conf;
+ if (c >= cfg_.confThresh) {
+ if (c > cfg_.confClip)
+ c = cfg_.confClip;
+ c -= (cfg_.confThresh >> 2);
+ sumWc += w * c;
+ c -= (cfg_.confThresh >> 2);
+ sumWcp += (int64_t)(w * c) * (int64_t)data.phase;
+ }
+ }
+ }
+
+ if (0 < phaseWeights_.sum && phaseWeights_.sum <= sumWc) {
+ phase = (double)sumWcp / (double)sumWc;
+ conf = (double)sumWc / (double)phaseWeights_.sum;
+ return true;
+ } else {
+ phase = 0.0;
+ conf = 0.0;
+ return false;
+ }
+}
+
+double Af::getContrast(const FocusRegions &focusStats)
+{
+ libcamera::Size size = focusStats.size();
+ if (size.height != contrastWeights_.rows ||
+ size.width != contrastWeights_.cols || contrastWeights_.sum == 0) {
+ LOG(RPiAf, Debug) << "Recompute Contrast weights "
+ << size.width << 'x' << size.height;
+ computeWeights(&contrastWeights_, size.height, size.width);
+ }
+
+ uint64_t sumWc = 0;
+ for (unsigned i = 0; i < focusStats.numRegions(); ++i)
+ sumWc += contrastWeights_.w[i] * focusStats.get(i).val;
+
+ return (contrastWeights_.sum > 0) ? ((double)sumWc / (double)contrastWeights_.sum) : 0.0;
+}
+
+void Af::doPDAF(double phase, double conf)
+{
+ /* Apply loop gain */
+ phase *= cfg_.speeds[speed_].pdafGain;
+
+ if (mode_ == AfModeContinuous) {
+ /*
+ * PDAF in Continuous mode. Scale down lens movement when
+ * delta is small or confidence is low, to suppress wobble.
+ */
+ phase *= conf / (conf + cfg_.confEpsilon);
+ if (std::abs(phase) < cfg_.speeds[speed_].pdafSquelch) {
+ double a = phase / cfg_.speeds[speed_].pdafSquelch;
+ phase *= a * a;
+ }
+ } else {
+ /*
+ * PDAF in triggered-auto mode. Allow early termination when
+ * phase delta is small; scale down lens movements towards
+ * the end of the sequence, to ensure a stable image.
+ */
+ if (stepCount_ >= cfg_.speeds[speed_].stepFrames) {
+ if (std::abs(phase) < cfg_.speeds[speed_].pdafSquelch)
+ stepCount_ = cfg_.speeds[speed_].stepFrames;
+ } else
+ phase *= stepCount_ / cfg_.speeds[speed_].stepFrames;
+ }
+
+ /* Apply slew rate limit. Report failure if out of bounds. */
+ if (phase < -cfg_.speeds[speed_].maxSlew) {
+ phase = -cfg_.speeds[speed_].maxSlew;
+ reportState_ = (ftarget_ <= cfg_.ranges[range_].focusMin) ? AfState::Failed
+ : AfState::Scanning;
+ } else if (phase > cfg_.speeds[speed_].maxSlew) {
+ phase = cfg_.speeds[speed_].maxSlew;
+ reportState_ = (ftarget_ >= cfg_.ranges[range_].focusMax) ? AfState::Failed
+ : AfState::Scanning;
+ } else
+ reportState_ = AfState::Focused;
+
+ ftarget_ = fsmooth_ + phase;
+}
+
+bool Af::earlyTerminationByPhase(double phase)
+{
+ if (scanData_.size() > 0 &&
+ scanData_[scanData_.size() - 1].conf >= cfg_.confEpsilon) {
+ double oldFocus = scanData_[scanData_.size() - 1].focus;
+ double oldPhase = scanData_[scanData_.size() - 1].phase;
+
+ /*
+ * Check that the gradient is finite and has the expected sign;
+ * Interpolate/extrapolate the lens position for zero phase.
+ * Check that the extrapolation is well-conditioned.
+ */
+ if ((ftarget_ - oldFocus) * (phase - oldPhase) > 0.0) {
+ double param = phase / (phase - oldPhase);
+ if (-3.0 <= param && param <= 3.5) {
+ ftarget_ += param * (oldFocus - ftarget_);
+ LOG(RPiAf, Debug) << "ETBP: param=" << param;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+double Af::findPeak(unsigned i) const
+{
+ double f = scanData_[i].focus;
+
+ if (i > 0 && i + 1 < scanData_.size()) {
+ double dropLo = scanData_[i].contrast - scanData_[i - 1].contrast;
+ double dropHi = scanData_[i].contrast - scanData_[i + 1].contrast;
+ if (0.0 <= dropLo && dropLo < dropHi) {
+ double param = 0.3125 * (1.0 - dropLo / dropHi) * (1.6 - dropLo / dropHi);
+ f += param * (scanData_[i - 1].focus - f);
+ } else if (0.0 <= dropHi && dropHi < dropLo) {
+ double param = 0.3125 * (1.0 - dropHi / dropLo) * (1.6 - dropHi / dropLo);
+ f += param * (scanData_[i + 1].focus - f);
+ }
+ }
+
+ LOG(RPiAf, Debug) << "FindPeak: " << f;
+ return f;
+}
+
+void Af::doScan(double contrast, double phase, double conf)
+{
+ /* Record lens position, contrast and phase values for the current scan */
+ if (scanData_.empty() || contrast > scanMaxContrast_) {
+ scanMaxContrast_ = contrast;
+ scanMaxIndex_ = scanData_.size();
+ }
+ if (contrast < scanMinContrast_)
+ scanMinContrast_ = contrast;
+ scanData_.emplace_back(ScanRecord{ ftarget_, contrast, phase, conf });
+
+ if (scanState_ == ScanState::Coarse) {
+ if (ftarget_ >= cfg_.ranges[range_].focusMax ||
+ contrast < cfg_.speeds[speed_].contrastRatio * scanMaxContrast_) {
+ /*
+ * Finished course scan, or termination based on contrast.
+ * Jump to just after max contrast and start fine scan.
+ */
+ ftarget_ = std::min(ftarget_, findPeak(scanMaxIndex_) +
+ 2.0 * cfg_.speeds[speed_].stepFine);
+ scanState_ = ScanState::Fine;
+ scanData_.clear();
+ } else
+ ftarget_ += cfg_.speeds[speed_].stepCoarse;
+ } else { /* ScanState::Fine */
+ if (ftarget_ <= cfg_.ranges[range_].focusMin || scanData_.size() >= 5 ||
+ contrast < cfg_.speeds[speed_].contrastRatio * scanMaxContrast_) {
+ /*
+ * Finished fine scan, or termination based on contrast.
+ * Use quadratic peak-finding to find best contrast position.
+ */
+ ftarget_ = findPeak(scanMaxIndex_);
+ scanState_ = ScanState::Settle;
+ } else
+ ftarget_ -= cfg_.speeds[speed_].stepFine;
+ }
+
+ stepCount_ = (ftarget_ == fsmooth_) ? 0 : cfg_.speeds[speed_].stepFrames;
+}
+
+void Af::doAF(double contrast, double phase, double conf)
+{
+ /* Skip frames at startup and after sensor mode change */
+ if (skipCount_ > 0) {
+ LOG(RPiAf, Debug) << "SKIP";
+ skipCount_--;
+ return;
+ }
+
+ if (scanState_ == ScanState::Pdaf) {
+ /*
+ * Use PDAF closed-loop control whenever available, in both CAF
+ * mode and (for a limited number of iterations) when triggered.
+ * If PDAF fails (due to poor contrast, noise or large defocus),
+ * fall back to a CDAF-based scan. To avoid "nuisance" scans,
+ * scan only after a number of frames with low PDAF confidence.
+ */
+ if (conf > (dropCount_ ? 1.0 : 0.25) * cfg_.confEpsilon) {
+ doPDAF(phase, conf);
+ if (stepCount_ > 0)
+ stepCount_--;
+ else if (mode_ != AfModeContinuous)
+ scanState_ = ScanState::Idle;
+ dropCount_ = 0;
+ } else if (++dropCount_ == cfg_.speeds[speed_].dropoutFrames)
+ startProgrammedScan();
+ } else if (scanState_ >= ScanState::Coarse && fsmooth_ == ftarget_) {
+ /*
+ * Scanning sequence. This means PDAF has become unavailable.
+ * Allow a delay between steps for CDAF FoM statistics to be
+ * updated, and a "settling time" at the end of the sequence.
+ * [A coarse or fine scan can be abandoned if two PDAF samples
+ * allow direct interpolation of the zero-phase lens position.]
+ */
+ if (stepCount_ > 0)
+ stepCount_--;
+ else if (scanState_ == ScanState::Settle) {
+ if (prevContrast_ >= cfg_.speeds[speed_].contrastRatio * scanMaxContrast_ &&
+ scanMinContrast_ <= cfg_.speeds[speed_].contrastRatio * scanMaxContrast_)
+ reportState_ = AfState::Focused;
+ else
+ reportState_ = AfState::Failed;
+ if (mode_ == AfModeContinuous && !pauseFlag_ &&
+ cfg_.speeds[speed_].dropoutFrames > 0)
+ scanState_ = ScanState::Pdaf;
+ else
+ scanState_ = ScanState::Idle;
+ scanData_.clear();
+ } else if (conf >= cfg_.confEpsilon && earlyTerminationByPhase(phase)) {
+ scanState_ = ScanState::Settle;
+ stepCount_ = (mode_ == AfModeContinuous) ? 0
+ : cfg_.speeds[speed_].stepFrames;
+ } else
+ doScan(contrast, phase, conf);
+ }
+}
+
+void Af::updateLensPosition()
+{
+ if (scanState_ >= ScanState::Pdaf) {
+ ftarget_ = std::clamp(ftarget_,
+ cfg_.ranges[range_].focusMin,
+ cfg_.ranges[range_].focusMax);
+ }
+
+ if (initted_) {
+ /* from a known lens position: apply slew rate limit */
+ fsmooth_ = std::clamp(ftarget_,
+ fsmooth_ - cfg_.speeds[speed_].maxSlew,
+ fsmooth_ + cfg_.speeds[speed_].maxSlew);
+ } else {
+ /* from an unknown position: go straight to target, but add delay */
+ fsmooth_ = ftarget_;
+ initted_ = true;
+ skipCount_ = cfg_.skipFrames;
+ }
+}
+
+void Af::startAF()
+{
+ /* Use PDAF if the tuning file allows it; else CDAF. */
+ if (cfg_.speeds[speed_].dropoutFrames > 0 &&
+ (mode_ == AfModeContinuous || cfg_.speeds[speed_].pdafFrames > 0)) {
+ if (!initted_) {
+ ftarget_ = cfg_.ranges[range_].focusDefault;
+ updateLensPosition();
+ }
+ stepCount_ = (mode_ == AfModeContinuous) ? 0 : cfg_.speeds[speed_].pdafFrames;
+ scanState_ = ScanState::Pdaf;
+ scanData_.clear();
+ dropCount_ = 0;
+ reportState_ = AfState::Scanning;
+ } else
+ startProgrammedScan();
+}
+
+void Af::startProgrammedScan()
+{
+ ftarget_ = cfg_.ranges[range_].focusMin;
+ updateLensPosition();
+ scanState_ = ScanState::Coarse;
+ scanMaxContrast_ = 0.0;
+ scanMinContrast_ = 1.0e9;
+ scanMaxIndex_ = 0;
+ scanData_.clear();
+ stepCount_ = cfg_.speeds[speed_].stepFrames;
+ reportState_ = AfState::Scanning;
+}
+
+void Af::goIdle()
+{
+ scanState_ = ScanState::Idle;
+ reportState_ = AfState::Idle;
+ scanData_.clear();
+}
+
+/*
+ * PDAF phase data are available in prepare(), but CDAF statistics are not
+ * available until process(). We are gambling on the availability of PDAF.
+ * To expedite feedback control using PDAF, issue the V4L2 lens control from
+ * prepare(). Conversely, during scans, we must allow an extra frame delay
+ * between steps, to retrieve CDAF statistics from the previous process()
+ * so we can terminate the scan early without having to change our minds.
+ */
+
+void Af::prepare(Metadata *imageMetadata)
+{
+ /* Initialize for triggered scan or start of CAF mode */
+ if (scanState_ == ScanState::Trigger)
+ startAF();
+
+ if (initted_) {
+ /* Get PDAF from the embedded metadata, and run AF algorithm core */
+ PdafRegions regions;
+ double phase = 0.0, conf = 0.0;
+ double oldFt = ftarget_;
+ double oldFs = fsmooth_;
+ ScanState oldSs = scanState_;
+ uint32_t oldSt = stepCount_;
+ if (imageMetadata->get("pdaf.regions", regions) == 0)
+ getPhase(regions, phase, conf);
+ doAF(prevContrast_, phase, conf);
+ updateLensPosition();
+ LOG(RPiAf, Debug) << std::fixed << std::setprecision(2)
+ << static_cast<unsigned int>(reportState_)
+ << " sst" << static_cast<unsigned int>(oldSs)
+ << "->" << static_cast<unsigned int>(scanState_)
+ << " stp" << oldSt << "->" << stepCount_
+ << " ft" << oldFt << "->" << ftarget_
+ << " fs" << oldFs << "->" << fsmooth_
+ << " cont=" << (int)prevContrast_
+ << " phase=" << (int)phase << " conf=" << (int)conf;
+ }
+
+ /* Report status and produce new lens setting */
+ AfStatus status;
+ if (pauseFlag_)
+ status.pauseState = (scanState_ == ScanState::Idle) ? AfPauseState::Paused
+ : AfPauseState::Pausing;
+ else
+ status.pauseState = AfPauseState::Running;
+
+ if (mode_ == AfModeAuto && scanState_ != ScanState::Idle)
+ status.state = AfState::Scanning;
+ else
+ status.state = reportState_;
+ status.lensSetting = initted_ ? std::optional<int>(cfg_.map.eval(fsmooth_))
+ : std::nullopt;
+ imageMetadata->set("af.status", status);
+}
+
+void Af::process(StatisticsPtr &stats, [[maybe_unused]] Metadata *imageMetadata)
+{
+ (void)imageMetadata;
+ prevContrast_ = getContrast(stats->focusRegions);
+}
+
+/* Controls */
+
+void Af::setRange(AfRange r)
+{
+ LOG(RPiAf, Debug) << "setRange: " << (unsigned)r;
+ if (r < AfAlgorithm::AfRangeMax)
+ range_ = r;
+}
+
+void Af::setSpeed(AfSpeed s)
+{
+ LOG(RPiAf, Debug) << "setSpeed: " << (unsigned)s;
+ if (s < AfAlgorithm::AfSpeedMax) {
+ if (scanState_ == ScanState::Pdaf &&
+ cfg_.speeds[s].pdafFrames > cfg_.speeds[speed_].pdafFrames)
+ stepCount_ += cfg_.speeds[s].pdafFrames - cfg_.speeds[speed_].pdafFrames;
+ speed_ = s;
+ }
+}
+
+void Af::setMetering(bool mode)
+{
+ if (useWindows_ != mode) {
+ useWindows_ = mode;
+ invalidateWeights();
+ }
+}
+
+void Af::setWindows(libcamera::Span<libcamera::Rectangle const> const &wins)
+{
+ windows_.clear();
+ for (auto &w : wins) {
+ LOG(RPiAf, Debug) << "Window: "
+ << w.x << ", "
+ << w.y << ", "
+ << w.width << ", "
+ << w.height;
+ windows_.push_back(w);
+ if (windows_.size() >= MaxWindows)
+ break;
+ }
+
+ if (useWindows_)
+ invalidateWeights();
+}
+
+bool Af::setLensPosition(double dioptres, int *hwpos)
+{
+ bool changed = false;
+
+ if (mode_ == AfModeManual) {
+ LOG(RPiAf, Debug) << "setLensPosition: " << dioptres;
+ ftarget_ = cfg_.map.domain().clamp(dioptres);
+ changed = !(initted_ && fsmooth_ == ftarget_);
+ updateLensPosition();
+ }
+
+ if (hwpos)
+ *hwpos = cfg_.map.eval(fsmooth_);
+
+ return changed;
+}
+
+std::optional<double> Af::getLensPosition() const
+{
+ /*
+ * \todo We ought to perform some precise timing here to determine
+ * the current lens position.
+ */
+ return initted_ ? std::optional<double>(fsmooth_) : std::nullopt;
+}
+
+void Af::cancelScan()
+{
+ LOG(RPiAf, Debug) << "cancelScan";
+ if (mode_ == AfModeAuto)
+ goIdle();
+}
+
+void Af::triggerScan()
+{
+ LOG(RPiAf, Debug) << "triggerScan";
+ if (mode_ == AfModeAuto && scanState_ == ScanState::Idle)
+ scanState_ = ScanState::Trigger;
+}
+
+void Af::setMode(AfAlgorithm::AfMode mode)
+{
+ LOG(RPiAf, Debug) << "setMode: " << (unsigned)mode;
+ if (mode_ != mode) {
+ mode_ = mode;
+ pauseFlag_ = false;
+ if (mode == AfModeContinuous)
+ scanState_ = ScanState::Trigger;
+ else if (mode != AfModeAuto || scanState_ < ScanState::Coarse)
+ goIdle();
+ }
+}
+
+AfAlgorithm::AfMode Af::getMode() const
+{
+ return mode_;
+}
+
+void Af::pause(AfAlgorithm::AfPause pause)
+{
+ LOG(RPiAf, Debug) << "pause: " << (unsigned)pause;
+ if (mode_ == AfModeContinuous) {
+ if (pause == AfPauseResume && pauseFlag_) {
+ pauseFlag_ = false;
+ if (scanState_ < ScanState::Coarse)
+ scanState_ = ScanState::Trigger;
+ } else if (pause != AfPauseResume && !pauseFlag_) {
+ pauseFlag_ = true;
+ if (pause == AfPauseImmediate || scanState_ < ScanState::Coarse)
+ goIdle();
+ }
+ }
+}
+
+// Register algorithm with the system.
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Af(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/af.h b/src/ipa/rpi/controller/rpi/af.h
new file mode 100644
index 00000000..317a51f3
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/af.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022-2023, Raspberry Pi Ltd
+ *
+ * Autofocus control algorithm
+ */
+#pragma once
+
+#include "../af_algorithm.h"
+#include "../af_status.h"
+#include "../pdaf_data.h"
+
+#include "libipa/pwl.h"
+
+/*
+ * This algorithm implements a hybrid of CDAF and PDAF, favouring PDAF.
+ *
+ * Whenever PDAF is available, it is used in a continuous feedback loop.
+ * When triggered in auto mode, we simply enable AF for a limited number
+ * of frames (it may terminate early if the delta becomes small enough).
+ *
+ * When PDAF confidence is low (due e.g. to low contrast or extreme defocus)
+ * or PDAF data are absent, fall back to CDAF with a programmed scan pattern.
+ * A coarse and fine scan are performed, using ISP's CDAF focus FoM to
+ * estimate the lens position with peak contrast. This is slower due to
+ * extra latency in the ISP, and requires a settling time between steps.
+ *
+ * Some hysteresis is applied to the switch between PDAF and CDAF, to avoid
+ * "nuisance" scans. During each interval where PDAF is not working, only
+ * ONE scan will be performed; CAF cannot track objects using CDAF alone.
+ *
+ */
+
+namespace RPiController {
+
+class Af : public AfAlgorithm
+{
+public:
+ Af(Controller *controller = NULL);
+ ~Af();
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void initialise() override;
+
+ /* IPA calls */
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ void prepare(Metadata *imageMetadata) override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+
+ /* controls */
+ void setRange(AfRange range) override;
+ void setSpeed(AfSpeed speed) override;
+ void setMetering(bool use_windows) override;
+ void setWindows(libcamera::Span<libcamera::Rectangle const> const &wins) override;
+ void setMode(AfMode mode) override;
+ AfMode getMode() const override;
+ bool setLensPosition(double dioptres, int32_t *hwpos) override;
+ std::optional<double> getLensPosition() const override;
+ void triggerScan() override;
+ void cancelScan() override;
+ void pause(AfPause pause) override;
+
+private:
+ enum class ScanState {
+ Idle = 0,
+ Trigger,
+ Pdaf,
+ Coarse,
+ Fine,
+ Settle
+ };
+
+ struct RangeDependentParams {
+ double focusMin; /* lower (far) limit in dipotres */
+ double focusMax; /* upper (near) limit in dioptres */
+ double focusDefault; /* default setting ("hyperfocal") */
+
+ RangeDependentParams();
+ void read(const libcamera::YamlObject &params);
+ };
+
+ struct SpeedDependentParams {
+ double stepCoarse; /* used for scans */
+ double stepFine; /* used for scans */
+ double contrastRatio; /* used for scan termination and reporting */
+ double pdafGain; /* coefficient for PDAF feedback loop */
+ double pdafSquelch; /* PDAF stability parameter (device-specific) */
+ double maxSlew; /* limit for lens movement per frame */
+ uint32_t pdafFrames; /* number of iterations when triggered */
+ uint32_t dropoutFrames; /* number of non-PDAF frames to switch to CDAF */
+ uint32_t stepFrames; /* frames to skip in between steps of a scan */
+
+ SpeedDependentParams();
+ void read(const libcamera::YamlObject &params);
+ };
+
+ struct CfgParams {
+ RangeDependentParams ranges[AfRangeMax];
+ SpeedDependentParams speeds[AfSpeedMax];
+ uint32_t confEpsilon; /* PDAF hysteresis threshold (sensor-specific) */
+ uint32_t confThresh; /* PDAF confidence cell min (sensor-specific) */
+ uint32_t confClip; /* PDAF confidence cell max (sensor-specific) */
+ uint32_t skipFrames; /* frames to skip at start or modeswitch */
+ libcamera::ipa::Pwl map; /* converts dioptres -> lens driver position */
+
+ CfgParams();
+ int read(const libcamera::YamlObject &params);
+ void initialise();
+ };
+
+ struct ScanRecord {
+ double focus;
+ double contrast;
+ double phase;
+ double conf;
+ };
+
+ struct RegionWeights {
+ unsigned rows;
+ unsigned cols;
+ uint32_t sum;
+ std::vector<uint16_t> w;
+
+ RegionWeights()
+ : rows(0), cols(0), sum(0), w() {}
+ };
+
+ void computeWeights(RegionWeights *wgts, unsigned rows, unsigned cols);
+ void invalidateWeights();
+ bool getPhase(PdafRegions const &regions, double &phase, double &conf);
+ double getContrast(const FocusRegions &focusStats);
+ void doPDAF(double phase, double conf);
+ bool earlyTerminationByPhase(double phase);
+ double findPeak(unsigned index) const;
+ void doScan(double contrast, double phase, double conf);
+ void doAF(double contrast, double phase, double conf);
+ void updateLensPosition();
+ void startAF();
+ void startProgrammedScan();
+ void goIdle();
+
+ /* Configuration and settings */
+ CfgParams cfg_;
+ AfRange range_;
+ AfSpeed speed_;
+ AfMode mode_;
+ bool pauseFlag_;
+ libcamera::Rectangle statsRegion_;
+ std::vector<libcamera::Rectangle> windows_;
+ bool useWindows_;
+ RegionWeights phaseWeights_;
+ RegionWeights contrastWeights_;
+
+ /* Working state. */
+ ScanState scanState_;
+ bool initted_;
+ double ftarget_, fsmooth_;
+ double prevContrast_;
+ unsigned skipCount_, stepCount_, dropCount_;
+ unsigned scanMaxIndex_;
+ double scanMaxContrast_, scanMinContrast_;
+ std::vector<ScanRecord> scanData_;
+ AfState reportState_;
+};
+
+} // namespace RPiController
diff --git a/src/ipa/rpi/controller/rpi/agc.cpp b/src/ipa/rpi/controller/rpi/agc.cpp
new file mode 100644
index 00000000..c48fdf15
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/agc.cpp
@@ -0,0 +1,338 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * AGC/AEC control algorithm
+ */
+
+#include "agc.h"
+
+#include <libcamera/base/log.h>
+
+#include "../metadata.h"
+
+using namespace RPiController;
+using namespace libcamera;
+using libcamera::utils::Duration;
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(RPiAgc)
+
+#define NAME "rpi.agc"
+
+Agc::Agc(Controller *controller)
+ : AgcAlgorithm(controller),
+ activeChannels_({ 0 }), index_(0)
+{
+}
+
+char const *Agc::name() const
+{
+ return NAME;
+}
+
+int Agc::read(const libcamera::YamlObject &params)
+{
+ /*
+ * When there is only a single channel we can read the old style syntax.
+ * Otherwise we expect a "channels" keyword followed by a list of configurations.
+ */
+ if (!params.contains("channels")) {
+ LOG(RPiAgc, Debug) << "Single channel only";
+ channelTotalExposures_.resize(1, 0s);
+ channelData_.emplace_back();
+ return channelData_.back().channel.read(params, getHardwareConfig());
+ }
+
+ const auto &channels = params["channels"].asList();
+ for (auto ch = channels.begin(); ch != channels.end(); ch++) {
+ LOG(RPiAgc, Debug) << "Read AGC channel";
+ channelData_.emplace_back();
+ int ret = channelData_.back().channel.read(*ch, getHardwareConfig());
+ if (ret)
+ return ret;
+ }
+
+ LOG(RPiAgc, Debug) << "Read " << channelData_.size() << " channel(s)";
+ if (channelData_.empty()) {
+ LOG(RPiAgc, Error) << "No AGC channels provided";
+ return -1;
+ }
+
+ channelTotalExposures_.resize(channelData_.size(), 0s);
+
+ return 0;
+}
+
+int Agc::checkChannel(unsigned int channelIndex) const
+{
+ if (channelIndex >= channelData_.size()) {
+ LOG(RPiAgc, Warning) << "AGC channel " << channelIndex << " not available";
+ return -1;
+ }
+
+ return 0;
+}
+
+void Agc::disableAuto()
+{
+ LOG(RPiAgc, Debug) << "disableAuto";
+
+ /* All channels are enabled/disabled together. */
+ for (auto &data : channelData_)
+ data.channel.disableAuto();
+}
+
+void Agc::enableAuto()
+{
+ LOG(RPiAgc, Debug) << "enableAuto";
+
+ /* All channels are enabled/disabled together. */
+ for (auto &data : channelData_)
+ data.channel.enableAuto();
+}
+
+unsigned int Agc::getConvergenceFrames() const
+{
+ /* If there are n channels, it presumably takes n times as long to converge. */
+ return channelData_[0].channel.getConvergenceFrames() * activeChannels_.size();
+}
+
+std::vector<double> const &Agc::getWeights() const
+{
+ /*
+ * In future the metering weights may be determined differently, making it
+ * difficult to associate different sets of weight with different channels.
+ * Therefore we shall impose a limitation, at least for now, that all
+ * channels will use the same weights.
+ */
+ return channelData_[0].channel.getWeights();
+}
+
+void Agc::setEv(unsigned int channelIndex, double ev)
+{
+ if (checkChannel(channelIndex))
+ return;
+
+ LOG(RPiAgc, Debug) << "setEv " << ev << " for channel " << channelIndex;
+ channelData_[channelIndex].channel.setEv(ev);
+}
+
+void Agc::setFlickerPeriod(Duration flickerPeriod)
+{
+ LOG(RPiAgc, Debug) << "setFlickerPeriod " << flickerPeriod;
+
+ /* Flicker period will be the same across all channels. */
+ for (auto &data : channelData_)
+ data.channel.setFlickerPeriod(flickerPeriod);
+}
+
+void Agc::setMaxExposureTime(Duration maxExposureTime)
+{
+ /* Frame durations will be the same across all channels too. */
+ for (auto &data : channelData_)
+ data.channel.setMaxExposureTime(maxExposureTime);
+}
+
+void Agc::setFixedExposureTime(unsigned int channelIndex, Duration fixedExposureTime)
+{
+ if (checkChannel(channelIndex))
+ return;
+
+ LOG(RPiAgc, Debug) << "setFixedExposureTime " << fixedExposureTime
+ << " for channel " << channelIndex;
+ channelData_[channelIndex].channel.setFixedExposureTime(fixedExposureTime);
+}
+
+void Agc::setFixedAnalogueGain(unsigned int channelIndex, double fixedAnalogueGain)
+{
+ if (checkChannel(channelIndex))
+ return;
+
+ LOG(RPiAgc, Debug) << "setFixedAnalogueGain " << fixedAnalogueGain
+ << " for channel " << channelIndex;
+ channelData_[channelIndex].channel.setFixedAnalogueGain(fixedAnalogueGain);
+}
+
+void Agc::setMeteringMode(std::string const &meteringModeName)
+{
+ /* Metering modes will be the same across all channels too. */
+ for (auto &data : channelData_)
+ data.channel.setMeteringMode(meteringModeName);
+}
+
+void Agc::setExposureMode(std::string const &exposureModeName)
+{
+ LOG(RPiAgc, Debug) << "setExposureMode " << exposureModeName;
+
+ /* Exposure mode will be the same across all channels. */
+ for (auto &data : channelData_)
+ data.channel.setExposureMode(exposureModeName);
+}
+
+void Agc::setConstraintMode(std::string const &constraintModeName)
+{
+ LOG(RPiAgc, Debug) << "setConstraintMode " << constraintModeName;
+
+ /* Constraint mode will be the same across all channels. */
+ for (auto &data : channelData_)
+ data.channel.setConstraintMode(constraintModeName);
+}
+
+template<typename T>
+std::ostream &operator<<(std::ostream &os, const std::vector<T> &v)
+{
+ os << "{";
+ for (const auto &e : v)
+ os << " " << e;
+ os << " }";
+ return os;
+}
+
+void Agc::setActiveChannels(const std::vector<unsigned int> &activeChannels)
+{
+ if (activeChannels.empty()) {
+ LOG(RPiAgc, Warning) << "No active AGC channels supplied";
+ return;
+ }
+
+ for (auto index : activeChannels)
+ if (checkChannel(index))
+ return;
+
+ LOG(RPiAgc, Debug) << "setActiveChannels " << activeChannels;
+ activeChannels_ = activeChannels;
+ index_ = 0;
+}
+
+void Agc::switchMode(CameraMode const &cameraMode,
+ Metadata *metadata)
+{
+ /*
+ * We run switchMode on every channel, and then we're going to start over
+ * with the first active channel again which means that this channel's
+ * status needs to be the one we leave in the metadata.
+ */
+ AgcStatus status;
+
+ for (unsigned int channelIndex = 0; channelIndex < channelData_.size(); channelIndex++) {
+ LOG(RPiAgc, Debug) << "switchMode for channel " << channelIndex;
+ channelData_[channelIndex].channel.switchMode(cameraMode, metadata);
+ if (channelIndex == activeChannels_[0])
+ metadata->get("agc.status", status);
+ }
+
+ status.channel = activeChannels_[0];
+ metadata->set("agc.status", status);
+ index_ = 0;
+}
+
+static void getDelayedChannelIndex(Metadata *metadata, const char *message, unsigned int &channelIndex)
+{
+ std::unique_lock<RPiController::Metadata> lock(*metadata);
+ AgcStatus *status = metadata->getLocked<AgcStatus>("agc.delayed_status");
+ if (status)
+ channelIndex = status->channel;
+ else {
+ /* This does happen at startup, otherwise it would be a Warning or Error. */
+ LOG(RPiAgc, Debug) << message;
+ }
+}
+
+static libcamera::utils::Duration
+setCurrentChannelIndexGetExposure(Metadata *metadata, const char *message, unsigned int channelIndex)
+{
+ std::unique_lock<RPiController::Metadata> lock(*metadata);
+ AgcStatus *status = metadata->getLocked<AgcStatus>("agc.status");
+ libcamera::utils::Duration dur = 0s;
+
+ if (status) {
+ status->channel = channelIndex;
+ dur = status->totalExposureValue;
+ } else {
+ /* This does happen at startup, otherwise it would be a Warning or Error. */
+ LOG(RPiAgc, Debug) << message;
+ }
+
+ return dur;
+}
+
+void Agc::prepare(Metadata *imageMetadata)
+{
+ /*
+ * The DeviceStatus in the metadata should be correct for the image we
+ * are processing. The delayed status should tell us what channel this frame
+ * was from, so we will use that channel's prepare method.
+ *
+ * \todo To be honest, there's not much that's stateful in the prepare methods
+ * so we should perhaps re-evaluate whether prepare even needs to be done
+ * "per channel".
+ */
+ unsigned int channelIndex = activeChannels_[0];
+ getDelayedChannelIndex(imageMetadata, "prepare: no delayed status", channelIndex);
+
+ LOG(RPiAgc, Debug) << "prepare for channel " << channelIndex;
+ channelData_[channelIndex].channel.prepare(imageMetadata);
+}
+
+void Agc::process(StatisticsPtr &stats, Metadata *imageMetadata)
+{
+ /*
+ * We want to generate values for the next channel in round robin fashion
+ * (i.e. the channel at location index_ in the activeChannel list), even though
+ * the statistics we have will be for a different channel (which we find
+ * again from the delayed status).
+ */
+
+ /* Generate updated AGC values for channel for new channel that we are requesting. */
+ unsigned int channelIndex = activeChannels_[index_];
+ AgcChannelData &channelData = channelData_[channelIndex];
+ /* The stats that arrived with this image correspond to the following channel. */
+ unsigned int statsIndex = 0;
+ getDelayedChannelIndex(imageMetadata, "process: no delayed status for stats", statsIndex);
+ LOG(RPiAgc, Debug) << "process for channel " << channelIndex;
+
+ /*
+ * We keep a cache of the most recent DeviceStatus and stats for each channel,
+ * so that we can invoke the next channel's process method with the most up to date
+ * values.
+ */
+ LOG(RPiAgc, Debug) << "Save DeviceStatus and stats for channel " << statsIndex;
+ DeviceStatus deviceStatus;
+ if (imageMetadata->get<DeviceStatus>("device.status", deviceStatus) == 0)
+ channelData_[statsIndex].deviceStatus = deviceStatus;
+ else
+ /* Every frame should have a DeviceStatus. */
+ LOG(RPiAgc, Error) << "process: no device status found";
+ channelData_[statsIndex].statistics = stats;
+
+ /*
+ * Finally fetch the most recent DeviceStatus and stats for the new channel, if both
+ * exist, and call process(). We must make the agc.status metadata record correctly
+ * which channel this is.
+ */
+ StatisticsPtr *statsPtr = &stats;
+ if (channelData.statistics && channelData.deviceStatus) {
+ deviceStatus = *channelData.deviceStatus;
+ statsPtr = &channelData.statistics;
+ } else {
+ /* Can also happen when new channels start. */
+ LOG(RPiAgc, Debug) << "process: channel " << channelIndex << " not seen yet";
+ }
+
+ channelData.channel.process(*statsPtr, deviceStatus, imageMetadata, channelTotalExposures_);
+ auto dur = setCurrentChannelIndexGetExposure(imageMetadata, "process: no AGC status found",
+ channelIndex);
+ if (dur)
+ channelTotalExposures_[channelIndex] = dur;
+
+ /* And onto the next channel for the next call. */
+ index_ = (index_ + 1) % activeChannels_.size();
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Agc(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/agc.h b/src/ipa/rpi/controller/rpi/agc.h
new file mode 100644
index 00000000..3aca000b
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/agc.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * AGC/AEC control algorithm
+ */
+#pragma once
+
+#include <optional>
+#include <string>
+#include <vector>
+
+#include "../agc_algorithm.h"
+
+#include "agc_channel.h"
+
+namespace RPiController {
+
+struct AgcChannelData {
+ AgcChannel channel;
+ std::optional<DeviceStatus> deviceStatus;
+ StatisticsPtr statistics;
+};
+
+class Agc : public AgcAlgorithm
+{
+public:
+ Agc(Controller *controller);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ unsigned int getConvergenceFrames() const override;
+ std::vector<double> const &getWeights() const override;
+ void setEv(unsigned int channel, double ev) override;
+ void setFlickerPeriod(libcamera::utils::Duration flickerPeriod) override;
+ void setMaxExposureTime(libcamera::utils::Duration maxExposureTime) override;
+ void setFixedExposureTime(unsigned int channelIndex,
+ libcamera::utils::Duration fixedExposureTime) override;
+ void setFixedAnalogueGain(unsigned int channelIndex,
+ double fixedAnalogueGain) override;
+ void setMeteringMode(std::string const &meteringModeName) override;
+ void setExposureMode(std::string const &exposureModeName) override;
+ void setConstraintMode(std::string const &contraintModeName) override;
+ void enableAuto() override;
+ void disableAuto() override;
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ void prepare(Metadata *imageMetadata) override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+ void setActiveChannels(const std::vector<unsigned int> &activeChannels) override;
+
+private:
+ int checkChannel(unsigned int channel) const;
+ std::vector<AgcChannelData> channelData_;
+ std::vector<unsigned int> activeChannels_;
+ unsigned int index_; /* index into the activeChannels_ */
+ AgcChannelTotalExposures channelTotalExposures_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/agc_channel.cpp b/src/ipa/rpi/controller/rpi/agc_channel.cpp
new file mode 100644
index 00000000..79c45973
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/agc_channel.cpp
@@ -0,0 +1,1030 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023, Raspberry Pi Ltd
+ *
+ * AGC/AEC control algorithm
+ */
+
+#include "agc_channel.h"
+
+#include <algorithm>
+#include <tuple>
+
+#include <libcamera/base/log.h>
+
+#include "libipa/colours.h"
+#include "libipa/vector.h"
+
+#include "../awb_status.h"
+#include "../device_status.h"
+#include "../histogram.h"
+#include "../lux_status.h"
+#include "../metadata.h"
+
+using namespace RPiController;
+using namespace libcamera;
+using libcamera::utils::Duration;
+using namespace std::literals::chrono_literals;
+
+LOG_DECLARE_CATEGORY(RPiAgc)
+
+int AgcMeteringMode::read(const libcamera::YamlObject &params)
+{
+ const YamlObject &yamlWeights = params["weights"];
+
+ for (const auto &p : yamlWeights.asList()) {
+ auto value = p.get<double>();
+ if (!value)
+ return -EINVAL;
+ weights.push_back(*value);
+ }
+
+ return 0;
+}
+
+static std::tuple<int, std::string>
+readMeteringModes(std::map<std::string, AgcMeteringMode> &metering_modes,
+ const libcamera::YamlObject &params)
+{
+ std::string first;
+ int ret;
+
+ for (const auto &[key, value] : params.asDict()) {
+ AgcMeteringMode meteringMode;
+ ret = meteringMode.read(value);
+ if (ret)
+ return { ret, {} };
+
+ metering_modes[key] = std::move(meteringMode);
+ if (first.empty())
+ first = key;
+ }
+
+ return { 0, first };
+}
+
+int AgcExposureMode::read(const libcamera::YamlObject &params)
+{
+ auto value = params["shutter"].getList<double>();
+ if (!value)
+ return -EINVAL;
+ std::transform(value->begin(), value->end(), std::back_inserter(exposureTime),
+ [](double v) { return v * 1us; });
+
+ value = params["gain"].getList<double>();
+ if (!value)
+ return -EINVAL;
+ gain = std::move(*value);
+
+ if (exposureTime.size() < 2 || gain.size() < 2) {
+ LOG(RPiAgc, Error)
+ << "AgcExposureMode: must have at least two entries in exposure profile";
+ return -EINVAL;
+ }
+
+ if (exposureTime.size() != gain.size()) {
+ LOG(RPiAgc, Error)
+ << "AgcExposureMode: expect same number of exposure and gain entries in exposure profile";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static std::tuple<int, std::string>
+readExposureModes(std::map<std::string, AgcExposureMode> &exposureModes,
+ const libcamera::YamlObject &params)
+{
+ std::string first;
+ int ret;
+
+ for (const auto &[key, value] : params.asDict()) {
+ AgcExposureMode exposureMode;
+ ret = exposureMode.read(value);
+ if (ret)
+ return { ret, {} };
+
+ exposureModes[key] = std::move(exposureMode);
+ if (first.empty())
+ first = key;
+ }
+
+ return { 0, first };
+}
+
+int AgcConstraint::read(const libcamera::YamlObject &params)
+{
+ std::string boundString = params["bound"].get<std::string>("");
+ transform(boundString.begin(), boundString.end(),
+ boundString.begin(), ::toupper);
+ if (boundString != "UPPER" && boundString != "LOWER") {
+ LOG(RPiAgc, Error) << "AGC constraint type should be UPPER or LOWER";
+ return -EINVAL;
+ }
+ bound = boundString == "UPPER" ? Bound::UPPER : Bound::LOWER;
+
+ auto value = params["q_lo"].get<double>();
+ if (!value)
+ return -EINVAL;
+ qLo = *value;
+
+ value = params["q_hi"].get<double>();
+ if (!value)
+ return -EINVAL;
+ qHi = *value;
+
+ yTarget = params["y_target"].get<ipa::Pwl>(ipa::Pwl{});
+ return yTarget.empty() ? -EINVAL : 0;
+}
+
+static std::tuple<int, AgcConstraintMode>
+readConstraintMode(const libcamera::YamlObject &params)
+{
+ AgcConstraintMode mode;
+ int ret;
+
+ for (const auto &p : params.asList()) {
+ AgcConstraint constraint;
+ ret = constraint.read(p);
+ if (ret)
+ return { ret, {} };
+
+ mode.push_back(std::move(constraint));
+ }
+
+ return { 0, mode };
+}
+
+static std::tuple<int, std::string>
+readConstraintModes(std::map<std::string, AgcConstraintMode> &constraintModes,
+ const libcamera::YamlObject &params)
+{
+ std::string first;
+ int ret;
+
+ for (const auto &[key, value] : params.asDict()) {
+ std::tie(ret, constraintModes[key]) = readConstraintMode(value);
+ if (ret)
+ return { ret, {} };
+
+ if (first.empty())
+ first = key;
+ }
+
+ return { 0, first };
+}
+
+int AgcChannelConstraint::read(const libcamera::YamlObject &params)
+{
+ auto channelValue = params["channel"].get<unsigned int>();
+ if (!channelValue) {
+ LOG(RPiAgc, Error) << "AGC channel constraint must have a channel";
+ return -EINVAL;
+ }
+ channel = *channelValue;
+
+ std::string boundString = params["bound"].get<std::string>("");
+ transform(boundString.begin(), boundString.end(),
+ boundString.begin(), ::toupper);
+ if (boundString != "UPPER" && boundString != "LOWER") {
+ LOG(RPiAgc, Error) << "AGC channel constraint type should be UPPER or LOWER";
+ return -EINVAL;
+ }
+ bound = boundString == "UPPER" ? Bound::UPPER : Bound::LOWER;
+
+ auto factorValue = params["factor"].get<double>();
+ if (!factorValue) {
+ LOG(RPiAgc, Error) << "AGC channel constraint must have a factor";
+ return -EINVAL;
+ }
+ factor = *factorValue;
+
+ return 0;
+}
+
+static int readChannelConstraints(std::vector<AgcChannelConstraint> &channelConstraints,
+ const libcamera::YamlObject &params)
+{
+ for (const auto &p : params.asList()) {
+ AgcChannelConstraint constraint;
+ int ret = constraint.read(p);
+ if (ret)
+ return ret;
+
+ channelConstraints.push_back(constraint);
+ }
+
+ return 0;
+}
+
+int AgcConfig::read(const libcamera::YamlObject &params)
+{
+ LOG(RPiAgc, Debug) << "AgcConfig";
+ int ret;
+
+ std::tie(ret, defaultMeteringMode) =
+ readMeteringModes(meteringModes, params["metering_modes"]);
+ if (ret)
+ return ret;
+ std::tie(ret, defaultExposureMode) =
+ readExposureModes(exposureModes, params["exposure_modes"]);
+ if (ret)
+ return ret;
+ std::tie(ret, defaultConstraintMode) =
+ readConstraintModes(constraintModes, params["constraint_modes"]);
+ if (ret)
+ return ret;
+
+ if (params.contains("channel_constraints")) {
+ ret = readChannelConstraints(channelConstraints, params["channel_constraints"]);
+ if (ret)
+ return ret;
+ }
+
+ yTarget = params["y_target"].get<ipa::Pwl>(ipa::Pwl{});
+ if (yTarget.empty())
+ return -EINVAL;
+
+ speed = params["speed"].get<double>(0.2);
+ startupFrames = params["startup_frames"].get<uint16_t>(10);
+ convergenceFrames = params["convergence_frames"].get<unsigned int>(6);
+ fastReduceThreshold = params["fast_reduce_threshold"].get<double>(0.4);
+ baseEv = params["base_ev"].get<double>(1.0);
+
+ /* Start with quite a low value as ramping up is easier than ramping down. */
+ defaultExposureTime = params["default_exposure_time"].get<double>(1000) * 1us;
+ defaultAnalogueGain = params["default_analogue_gain"].get<double>(1.0);
+
+ stableRegion = params["stable_region"].get<double>(0.02);
+
+ desaturate = params["desaturate"].get<int>(1);
+
+ return 0;
+}
+
+AgcChannel::ExposureValues::ExposureValues()
+ : exposureTime(0s), analogueGain(0),
+ totalExposure(0s), totalExposureNoDG(0s)
+{
+}
+
+AgcChannel::AgcChannel()
+ : meteringMode_(nullptr), exposureMode_(nullptr), constraintMode_(nullptr),
+ frameCount_(0), lockCount_(0),
+ lastTargetExposure_(0s), ev_(1.0), flickerPeriod_(0s),
+ maxExposureTime_(0s), fixedExposureTime_(0s), fixedAnalogueGain_(0.0)
+{
+ /* Set AWB default values in case early frames have no updates in metadata. */
+ awb_.gainR = 1.0;
+ awb_.gainG = 1.0;
+ awb_.gainB = 1.0;
+
+ /*
+ * Setting status_.totalExposureValue_ to zero initially tells us
+ * it's not been calculated yet (i.e. Process hasn't yet run).
+ */
+ status_ = {};
+ status_.ev = ev_;
+}
+
+int AgcChannel::read(const libcamera::YamlObject &params,
+ const Controller::HardwareConfig &hardwareConfig)
+{
+ int ret = config_.read(params);
+ if (ret)
+ return ret;
+
+ const Size &size = hardwareConfig.agcZoneWeights;
+ for (auto const &modes : config_.meteringModes) {
+ if (modes.second.weights.size() != size.width * size.height) {
+ LOG(RPiAgc, Error) << "AgcMeteringMode: Incorrect number of weights";
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Set the config's defaults (which are the first ones it read) as our
+ * current modes, until someone changes them. (they're all known to
+ * exist at this point)
+ */
+ meteringModeName_ = config_.defaultMeteringMode;
+ meteringMode_ = &config_.meteringModes[meteringModeName_];
+ exposureModeName_ = config_.defaultExposureMode;
+ exposureMode_ = &config_.exposureModes[exposureModeName_];
+ constraintModeName_ = config_.defaultConstraintMode;
+ constraintMode_ = &config_.constraintModes[constraintModeName_];
+ /* Set up the "last exposure time/gain" values, in case AGC starts "disabled". */
+ status_.exposureTime = config_.defaultExposureTime;
+ status_.analogueGain = config_.defaultAnalogueGain;
+ return 0;
+}
+
+void AgcChannel::disableAuto()
+{
+ fixedExposureTime_ = status_.exposureTime;
+ fixedAnalogueGain_ = status_.analogueGain;
+}
+
+void AgcChannel::enableAuto()
+{
+ fixedExposureTime_ = 0s;
+ fixedAnalogueGain_ = 0;
+}
+
+unsigned int AgcChannel::getConvergenceFrames() const
+{
+ /*
+ * If exposure time and gain have been explicitly set, there is no
+ * convergence to happen, so no need to drop any frames - return zero.
+ */
+ if (fixedExposureTime_ && fixedAnalogueGain_)
+ return 0;
+ else
+ return config_.convergenceFrames;
+}
+
+std::vector<double> const &AgcChannel::getWeights() const
+{
+ /*
+ * In case someone calls setMeteringMode and then this before the
+ * algorithm has run and updated the meteringMode_ pointer.
+ */
+ auto it = config_.meteringModes.find(meteringModeName_);
+ if (it == config_.meteringModes.end())
+ return meteringMode_->weights;
+ return it->second.weights;
+}
+
+void AgcChannel::setEv(double ev)
+{
+ ev_ = ev;
+}
+
+void AgcChannel::setFlickerPeriod(Duration flickerPeriod)
+{
+ flickerPeriod_ = flickerPeriod;
+}
+
+void AgcChannel::setMaxExposureTime(Duration maxExposureTime)
+{
+ maxExposureTime_ = maxExposureTime;
+}
+
+void AgcChannel::setFixedExposureTime(Duration fixedExposureTime)
+{
+ fixedExposureTime_ = fixedExposureTime;
+ /* Set this in case someone calls disableAuto() straight after. */
+ status_.exposureTime = limitExposureTime(fixedExposureTime_);
+}
+
+void AgcChannel::setFixedAnalogueGain(double fixedAnalogueGain)
+{
+ fixedAnalogueGain_ = fixedAnalogueGain;
+ /* Set this in case someone calls disableAuto() straight after. */
+ status_.analogueGain = limitGain(fixedAnalogueGain);
+}
+
+void AgcChannel::setMeteringMode(std::string const &meteringModeName)
+{
+ meteringModeName_ = meteringModeName;
+}
+
+void AgcChannel::setExposureMode(std::string const &exposureModeName)
+{
+ exposureModeName_ = exposureModeName;
+}
+
+void AgcChannel::setConstraintMode(std::string const &constraintModeName)
+{
+ constraintModeName_ = constraintModeName;
+}
+
+void AgcChannel::switchMode(CameraMode const &cameraMode,
+ Metadata *metadata)
+{
+ /* AGC expects the mode sensitivity always to be non-zero. */
+ ASSERT(cameraMode.sensitivity);
+
+ housekeepConfig();
+
+ /*
+ * Store the mode in the local state. We must cache the sensitivity of
+ * of the previous mode for the calculations below.
+ */
+ double lastSensitivity = mode_.sensitivity;
+ mode_ = cameraMode;
+
+ Duration fixedExposureTime = limitExposureTime(fixedExposureTime_);
+ if (fixedExposureTime && fixedAnalogueGain_) {
+ /* We're going to reset the algorithm here with these fixed values. */
+ fetchAwbStatus(metadata);
+ double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 });
+ ASSERT(minColourGain != 0.0);
+
+ /* This is the equivalent of computeTargetExposure and applyDigitalGain. */
+ target_.totalExposureNoDG = fixedExposureTime_ * fixedAnalogueGain_;
+ target_.totalExposure = target_.totalExposureNoDG / minColourGain;
+
+ /* Equivalent of filterExposure. This resets any "history". */
+ filtered_ = target_;
+
+ /* Equivalent of divideUpExposure. */
+ filtered_.exposureTime = fixedExposureTime;
+ filtered_.analogueGain = fixedAnalogueGain_;
+ } else if (status_.totalExposureValue) {
+ /*
+ * On a mode switch, various things could happen:
+ * - the exposure profile might change
+ * - a fixed exposure or gain might be set
+ * - the new mode's sensitivity might be different
+ * We cope with the last of these by scaling the target values. After
+ * that we just need to re-divide the exposure/gain according to the
+ * current exposure profile, which takes care of everything else.
+ */
+
+ double ratio = lastSensitivity / cameraMode.sensitivity;
+ target_.totalExposureNoDG *= ratio;
+ target_.totalExposure *= ratio;
+ filtered_.totalExposureNoDG *= ratio;
+ filtered_.totalExposure *= ratio;
+
+ divideUpExposure();
+ } else {
+ /*
+ * We come through here on startup, when at least one of the
+ * exposure time or gain has not been fixed. We must still
+ * write those values out so that they will be applied
+ * immediately. We supply some arbitrary defaults for any that
+ * weren't set.
+ */
+
+ /* Equivalent of divideUpExposure. */
+ filtered_.exposureTime = fixedExposureTime ? fixedExposureTime : config_.defaultExposureTime;
+ filtered_.analogueGain = fixedAnalogueGain_ ? fixedAnalogueGain_ : config_.defaultAnalogueGain;
+ }
+
+ writeAndFinish(metadata, false);
+}
+
+void AgcChannel::prepare(Metadata *imageMetadata)
+{
+ Duration totalExposureValue = status_.totalExposureValue;
+ AgcStatus delayedStatus;
+ AgcPrepareStatus prepareStatus;
+
+ /* Fetch the AWB status now because AWB also sets it in the prepare method. */
+ fetchAwbStatus(imageMetadata);
+
+ if (!imageMetadata->get("agc.delayed_status", delayedStatus))
+ totalExposureValue = delayedStatus.totalExposureValue;
+
+ prepareStatus.digitalGain = 1.0;
+ prepareStatus.locked = false;
+
+ if (status_.totalExposureValue) {
+ /* Process has run, so we have meaningful values. */
+ DeviceStatus deviceStatus;
+ if (imageMetadata->get("device.status", deviceStatus) == 0) {
+ Duration actualExposure = deviceStatus.exposureTime *
+ deviceStatus.analogueGain;
+ if (actualExposure) {
+ double digitalGain = totalExposureValue / actualExposure;
+ LOG(RPiAgc, Debug) << "Want total exposure " << totalExposureValue;
+ /*
+ * Never ask for a gain < 1.0, and also impose
+ * some upper limit. Make it customisable?
+ */
+ prepareStatus.digitalGain = std::max(1.0, std::min(digitalGain, 4.0));
+ LOG(RPiAgc, Debug) << "Actual exposure " << actualExposure;
+ LOG(RPiAgc, Debug) << "Use digitalGain " << prepareStatus.digitalGain;
+ LOG(RPiAgc, Debug) << "Effective exposure "
+ << actualExposure * prepareStatus.digitalGain;
+ /* Decide whether AEC/AGC has converged. */
+ prepareStatus.locked = updateLockStatus(deviceStatus);
+ }
+ } else
+ LOG(RPiAgc, Warning) << "AgcChannel: no device metadata";
+ imageMetadata->set("agc.prepare_status", prepareStatus);
+ }
+}
+
+void AgcChannel::process(StatisticsPtr &stats, DeviceStatus const &deviceStatus,
+ Metadata *imageMetadata,
+ const AgcChannelTotalExposures &channelTotalExposures)
+{
+ frameCount_++;
+ /*
+ * First a little bit of housekeeping, fetching up-to-date settings and
+ * configuration, that kind of thing.
+ */
+ housekeepConfig();
+ /* Get the current exposure values for the frame that's just arrived. */
+ fetchCurrentExposure(deviceStatus);
+ /* Compute the total gain we require relative to the current exposure. */
+ double gain, targetY;
+ computeGain(stats, imageMetadata, gain, targetY);
+ /* Now compute the target (final) exposure which we think we want. */
+ computeTargetExposure(gain);
+ /* The results have to be filtered so as not to change too rapidly. */
+ filterExposure();
+ /*
+ * We may be asked to limit the exposure using other channels. If another channel
+ * determines our upper bound we may want to know this later.
+ */
+ bool channelBound = applyChannelConstraints(channelTotalExposures);
+ /*
+ * Some of the exposure has to be applied as digital gain, so work out
+ * what that is. It also tells us whether it's trying to desaturate the image
+ * more quickly, which can only happen when another channel is not limiting us.
+ */
+ bool desaturate = applyDigitalGain(gain, targetY, channelBound);
+ /*
+ * The last thing is to divide up the exposure value into a exposure time
+ * and analogue gain, according to the current exposure mode.
+ */
+ divideUpExposure();
+ /* Finally advertise what we've done. */
+ writeAndFinish(imageMetadata, desaturate);
+}
+
+bool AgcChannel::updateLockStatus(DeviceStatus const &deviceStatus)
+{
+ const double errorFactor = 0.10; /* make these customisable? */
+ const int maxLockCount = 5;
+ /* Reset "lock count" when we exceed this multiple of errorFactor */
+ const double resetMargin = 1.5;
+
+ /* Add 200us to the exposure time error to allow for line quantisation. */
+ Duration exposureError = lastDeviceStatus_.exposureTime * errorFactor + 200us;
+ double gainError = lastDeviceStatus_.analogueGain * errorFactor;
+ Duration targetError = lastTargetExposure_ * errorFactor;
+
+ /*
+ * Note that we don't know the exposure/gain limits of the sensor, so
+ * the values we keep requesting may be unachievable. For this reason
+ * we only insist that we're close to values in the past few frames.
+ */
+ if (deviceStatus.exposureTime > lastDeviceStatus_.exposureTime - exposureError &&
+ deviceStatus.exposureTime < lastDeviceStatus_.exposureTime + exposureError &&
+ deviceStatus.analogueGain > lastDeviceStatus_.analogueGain - gainError &&
+ deviceStatus.analogueGain < lastDeviceStatus_.analogueGain + gainError &&
+ status_.targetExposureValue > lastTargetExposure_ - targetError &&
+ status_.targetExposureValue < lastTargetExposure_ + targetError)
+ lockCount_ = std::min(lockCount_ + 1, maxLockCount);
+ else if (deviceStatus.exposureTime < lastDeviceStatus_.exposureTime - resetMargin * exposureError ||
+ deviceStatus.exposureTime > lastDeviceStatus_.exposureTime + resetMargin * exposureError ||
+ deviceStatus.analogueGain < lastDeviceStatus_.analogueGain - resetMargin * gainError ||
+ deviceStatus.analogueGain > lastDeviceStatus_.analogueGain + resetMargin * gainError ||
+ status_.targetExposureValue < lastTargetExposure_ - resetMargin * targetError ||
+ status_.targetExposureValue > lastTargetExposure_ + resetMargin * targetError)
+ lockCount_ = 0;
+
+ lastDeviceStatus_ = deviceStatus;
+ lastTargetExposure_ = status_.targetExposureValue;
+
+ LOG(RPiAgc, Debug) << "Lock count updated to " << lockCount_;
+ return lockCount_ == maxLockCount;
+}
+
+void AgcChannel::housekeepConfig()
+{
+ /* First fetch all the up-to-date settings, so no one else has to do it. */
+ status_.ev = ev_;
+ status_.fixedExposureTime = limitExposureTime(fixedExposureTime_);
+ status_.fixedAnalogueGain = fixedAnalogueGain_;
+ status_.flickerPeriod = flickerPeriod_;
+ LOG(RPiAgc, Debug) << "ev " << status_.ev << " fixedExposureTime "
+ << status_.fixedExposureTime << " fixedAnalogueGain "
+ << status_.fixedAnalogueGain;
+ /*
+ * Make sure the "mode" pointers point to the up-to-date things, if
+ * they've changed.
+ */
+ if (meteringModeName_ != status_.meteringMode) {
+ auto it = config_.meteringModes.find(meteringModeName_);
+ if (it == config_.meteringModes.end()) {
+ LOG(RPiAgc, Warning) << "No metering mode " << meteringModeName_;
+ meteringModeName_ = status_.meteringMode;
+ } else {
+ meteringMode_ = &it->second;
+ status_.meteringMode = meteringModeName_;
+ }
+ }
+ if (exposureModeName_ != status_.exposureMode) {
+ auto it = config_.exposureModes.find(exposureModeName_);
+ if (it == config_.exposureModes.end()) {
+ LOG(RPiAgc, Warning) << "No exposure profile " << exposureModeName_;
+ exposureModeName_ = status_.exposureMode;
+ } else {
+ exposureMode_ = &it->second;
+ status_.exposureMode = exposureModeName_;
+ }
+ }
+ if (constraintModeName_ != status_.constraintMode) {
+ auto it = config_.constraintModes.find(constraintModeName_);
+ if (it == config_.constraintModes.end()) {
+ LOG(RPiAgc, Warning) << "No constraint list " << constraintModeName_;
+ constraintModeName_ = status_.constraintMode;
+ } else {
+ constraintMode_ = &it->second;
+ status_.constraintMode = constraintModeName_;
+ }
+ }
+ LOG(RPiAgc, Debug) << "exposureMode "
+ << exposureModeName_ << " constraintMode "
+ << constraintModeName_ << " meteringMode "
+ << meteringModeName_;
+}
+
+void AgcChannel::fetchCurrentExposure(DeviceStatus const &deviceStatus)
+{
+ current_.exposureTime = deviceStatus.exposureTime;
+ current_.analogueGain = deviceStatus.analogueGain;
+ current_.totalExposure = 0s; /* this value is unused */
+ current_.totalExposureNoDG = current_.exposureTime * current_.analogueGain;
+}
+
+void AgcChannel::fetchAwbStatus(Metadata *imageMetadata)
+{
+ if (imageMetadata->get("awb.status", awb_) != 0)
+ LOG(RPiAgc, Debug) << "No AWB status found";
+}
+
+static double computeInitialY(StatisticsPtr &stats, AwbStatus const &awb,
+ std::vector<double> &weights, double gain)
+{
+ constexpr uint64_t maxVal = 1 << Statistics::NormalisationFactorPow2;
+
+ /*
+ * If we have no AGC region stats, but do have a a Y histogram, use that
+ * directly to caluclate the mean Y value of the image.
+ */
+ if (!stats->agcRegions.numRegions() && stats->yHist.bins()) {
+ /*
+ * When the gain is applied to the histogram, anything below minBin
+ * will scale up directly with the gain, but anything above that
+ * will saturate into the top bin.
+ */
+ auto &hist = stats->yHist;
+ double minBin = std::min(1.0, 1.0 / gain) * hist.bins();
+ double binMean = hist.interBinMean(0.0, minBin);
+ double numUnsaturated = hist.cumulativeFreq(minBin);
+ /* This term is from all the pixels that won't saturate. */
+ double ySum = binMean * gain * numUnsaturated;
+ /* And add the ones that will saturate. */
+ ySum += (hist.total() - numUnsaturated) * hist.bins();
+ return ySum / hist.total() / hist.bins();
+ }
+
+ ASSERT(weights.size() == stats->agcRegions.numRegions());
+
+ /*
+ * Note that the weights are applied by the IPA to the statistics directly,
+ * before they are given to us here.
+ */
+ ipa::RGB<double> sum{ 0.0 };
+ double pixelSum = 0;
+ for (unsigned int i = 0; i < stats->agcRegions.numRegions(); i++) {
+ auto &region = stats->agcRegions.get(i);
+ sum.r() += std::min<double>(region.val.rSum * gain, (maxVal - 1) * region.counted);
+ sum.g() += std::min<double>(region.val.gSum * gain, (maxVal - 1) * region.counted);
+ sum.b() += std::min<double>(region.val.bSum * gain, (maxVal - 1) * region.counted);
+ pixelSum += region.counted;
+ }
+ if (pixelSum == 0.0) {
+ LOG(RPiAgc, Warning) << "computeInitialY: pixelSum is zero";
+ return 0;
+ }
+
+ /* Factor in the AWB correction if needed. */
+ if (stats->agcStatsPos == Statistics::AgcStatsPos::PreWb)
+ sum *= ipa::RGB<double>{{ awb.gainR, awb.gainR, awb.gainB }};
+
+ double ySum = ipa::rec601LuminanceFromRGB(sum);
+
+ return ySum / pixelSum / (1 << 16);
+}
+
+/*
+ * We handle extra gain through EV by adjusting our Y targets. However, you
+ * simply can't monitor histograms once they get very close to (or beyond!)
+ * saturation, so we clamp the Y targets to this value. It does mean that EV
+ * increases don't necessarily do quite what you might expect in certain
+ * (contrived) cases.
+ */
+
+static constexpr double EvGainYTargetLimit = 0.9;
+
+static double constraintComputeGain(AgcConstraint &c, const Histogram &h, double lux,
+ double evGain, double &targetY)
+{
+ targetY = c.yTarget.eval(c.yTarget.domain().clamp(lux));
+ targetY = std::min(EvGainYTargetLimit, targetY * evGain);
+ double iqm = h.interQuantileMean(c.qLo, c.qHi);
+ return (targetY * h.bins()) / iqm;
+}
+
+void AgcChannel::computeGain(StatisticsPtr &statistics, Metadata *imageMetadata,
+ double &gain, double &targetY)
+{
+ struct LuxStatus lux = {};
+ lux.lux = 400; /* default lux level to 400 in case no metadata found */
+ if (imageMetadata->get("lux.status", lux) != 0)
+ LOG(RPiAgc, Warning) << "No lux level found";
+ const Histogram &h = statistics->yHist;
+ double evGain = status_.ev * config_.baseEv;
+ /*
+ * The initial gain and target_Y come from some of the regions. After
+ * that we consider the histogram constraints.
+ */
+ targetY = config_.yTarget.eval(config_.yTarget.domain().clamp(lux.lux));
+ targetY = std::min(EvGainYTargetLimit, targetY * evGain);
+
+ /*
+ * Do this calculation a few times as brightness increase can be
+ * non-linear when there are saturated regions.
+ */
+ gain = 1.0;
+ for (int i = 0; i < 8; i++) {
+ double initialY = computeInitialY(statistics, awb_, meteringMode_->weights, gain);
+ double extraGain = std::min(10.0, targetY / (initialY + .001));
+ gain *= extraGain;
+ LOG(RPiAgc, Debug) << "Initial Y " << initialY << " target " << targetY
+ << " gives gain " << gain;
+ if (extraGain < 1.01) /* close enough */
+ break;
+ }
+
+ for (auto &c : *constraintMode_) {
+ double newTargetY;
+ double newGain = constraintComputeGain(c, h, lux.lux, evGain, newTargetY);
+ LOG(RPiAgc, Debug) << "Constraint has target_Y "
+ << newTargetY << " giving gain " << newGain;
+ if (c.bound == AgcConstraint::Bound::LOWER && newGain > gain) {
+ LOG(RPiAgc, Debug) << "Lower bound constraint adopted";
+ gain = newGain;
+ targetY = newTargetY;
+ } else if (c.bound == AgcConstraint::Bound::UPPER && newGain < gain) {
+ LOG(RPiAgc, Debug) << "Upper bound constraint adopted";
+ gain = newGain;
+ targetY = newTargetY;
+ }
+ }
+ LOG(RPiAgc, Debug) << "Final gain " << gain << " (target_Y " << targetY << " ev "
+ << status_.ev << " base_ev " << config_.baseEv
+ << ")";
+}
+
+void AgcChannel::computeTargetExposure(double gain)
+{
+ if (status_.fixedExposureTime && status_.fixedAnalogueGain) {
+ /*
+ * When analogue gain and exposure time are both fixed, we need
+ * to drive the total exposure so that we end up with a digital
+ * gain of at least 1/minColourGain. Otherwise we'd desaturate
+ * channels causing white to go cyan or magenta.
+ */
+ double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 });
+ ASSERT(minColourGain != 0.0);
+ target_.totalExposure =
+ status_.fixedExposureTime * status_.fixedAnalogueGain / minColourGain;
+ } else {
+ /*
+ * The statistics reflect the image without digital gain, so the final
+ * total exposure we're aiming for is:
+ */
+ target_.totalExposure = current_.totalExposureNoDG * gain;
+ /* The final target exposure is also limited to what the exposure mode allows. */
+ Duration maxExposureTime = status_.fixedExposureTime
+ ? status_.fixedExposureTime
+ : exposureMode_->exposureTime.back();
+ maxExposureTime = limitExposureTime(maxExposureTime);
+ Duration maxTotalExposure =
+ maxExposureTime *
+ (status_.fixedAnalogueGain != 0.0
+ ? status_.fixedAnalogueGain
+ : exposureMode_->gain.back());
+ target_.totalExposure = std::min(target_.totalExposure, maxTotalExposure);
+ }
+ LOG(RPiAgc, Debug) << "Target totalExposure " << target_.totalExposure;
+}
+
+bool AgcChannel::applyChannelConstraints(const AgcChannelTotalExposures &channelTotalExposures)
+{
+ bool channelBound = false;
+ LOG(RPiAgc, Debug)
+ << "Total exposure before channel constraints " << filtered_.totalExposure;
+
+ for (const auto &constraint : config_.channelConstraints) {
+ LOG(RPiAgc, Debug)
+ << "Check constraint: channel " << constraint.channel << " bound "
+ << (constraint.bound == AgcChannelConstraint::Bound::UPPER ? "UPPER" : "LOWER")
+ << " factor " << constraint.factor;
+ if (constraint.channel >= channelTotalExposures.size() ||
+ !channelTotalExposures[constraint.channel]) {
+ LOG(RPiAgc, Debug) << "no such channel or no exposure available- skipped";
+ continue;
+ }
+
+ libcamera::utils::Duration limitExposure =
+ channelTotalExposures[constraint.channel] * constraint.factor;
+ LOG(RPiAgc, Debug) << "Limit exposure " << limitExposure;
+ if ((constraint.bound == AgcChannelConstraint::Bound::UPPER &&
+ filtered_.totalExposure > limitExposure) ||
+ (constraint.bound == AgcChannelConstraint::Bound::LOWER &&
+ filtered_.totalExposure < limitExposure)) {
+ filtered_.totalExposure = limitExposure;
+ LOG(RPiAgc, Debug) << "Constraint applies";
+ channelBound = true;
+ } else
+ LOG(RPiAgc, Debug) << "Constraint does not apply";
+ }
+
+ LOG(RPiAgc, Debug)
+ << "Total exposure after channel constraints " << filtered_.totalExposure;
+
+ return channelBound;
+}
+
+bool AgcChannel::applyDigitalGain(double gain, double targetY, bool channelBound)
+{
+ double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 });
+ ASSERT(minColourGain != 0.0);
+ double dg = 1.0 / minColourGain;
+ /*
+ * I think this pipeline subtracts black level and rescales before we
+ * get the stats, so no need to worry about it.
+ */
+ LOG(RPiAgc, Debug) << "after AWB, target dg " << dg << " gain " << gain
+ << " target_Y " << targetY;
+ /*
+ * Finally, if we're trying to reduce exposure but the target_Y is
+ * "close" to 1.0, then the gain computed for that constraint will be
+ * only slightly less than one, because the measured Y can never be
+ * larger than 1.0. When this happens, demand a large digital gain so
+ * that the exposure can be reduced, de-saturating the image much more
+ * quickly (and we then approach the correct value more quickly from
+ * below).
+ */
+ bool desaturate = false;
+ if (config_.desaturate)
+ desaturate = !channelBound &&
+ targetY > config_.fastReduceThreshold && gain < sqrt(targetY);
+ if (desaturate)
+ dg /= config_.fastReduceThreshold;
+ LOG(RPiAgc, Debug) << "Digital gain " << dg << " desaturate? " << desaturate;
+ filtered_.totalExposureNoDG = filtered_.totalExposure / dg;
+ LOG(RPiAgc, Debug) << "Target totalExposureNoDG " << filtered_.totalExposureNoDG;
+ return desaturate;
+}
+
+void AgcChannel::filterExposure()
+{
+ double speed = config_.speed;
+ double stableRegion = config_.stableRegion;
+
+ /*
+ * AGC adapts instantly if both exposure time and gain are directly
+ * specified or we're in the startup phase. Also disable the stable
+ * region, because we want to reflect any user exposure/gain updates,
+ * however small.
+ */
+ if ((status_.fixedExposureTime && status_.fixedAnalogueGain) ||
+ frameCount_ <= config_.startupFrames) {
+ speed = 1.0;
+ stableRegion = 0.0;
+ }
+ if (!filtered_.totalExposure) {
+ filtered_.totalExposure = target_.totalExposure;
+ } else if (filtered_.totalExposure * (1.0 - stableRegion) < target_.totalExposure &&
+ filtered_.totalExposure * (1.0 + stableRegion) > target_.totalExposure) {
+ /* Total exposure must change by more than this or we leave it alone. */
+ } else {
+ /*
+ * If close to the result go faster, to save making so many
+ * micro-adjustments on the way. (Make this customisable?)
+ */
+ if (filtered_.totalExposure < 1.2 * target_.totalExposure &&
+ filtered_.totalExposure > 0.8 * target_.totalExposure)
+ speed = sqrt(speed);
+ filtered_.totalExposure = speed * target_.totalExposure +
+ filtered_.totalExposure * (1.0 - speed);
+ }
+ LOG(RPiAgc, Debug) << "After filtering, totalExposure " << filtered_.totalExposure
+ << " no dg " << filtered_.totalExposureNoDG;
+}
+
+void AgcChannel::divideUpExposure()
+{
+ /*
+ * Sending the fixed exposure time/gain cases through the same code may
+ * seem unnecessary, but it will make more sense when extend this to
+ * cover variable aperture.
+ */
+ Duration exposureValue = filtered_.totalExposureNoDG;
+ Duration exposureTime;
+ double analogueGain;
+ exposureTime = status_.fixedExposureTime ? status_.fixedExposureTime
+ : exposureMode_->exposureTime[0];
+ exposureTime = limitExposureTime(exposureTime);
+ analogueGain = status_.fixedAnalogueGain != 0.0 ? status_.fixedAnalogueGain
+ : exposureMode_->gain[0];
+ analogueGain = limitGain(analogueGain);
+ if (exposureTime * analogueGain < exposureValue) {
+ for (unsigned int stage = 1;
+ stage < exposureMode_->gain.size(); stage++) {
+ if (!status_.fixedExposureTime) {
+ Duration stageExposureTime =
+ limitExposureTime(exposureMode_->exposureTime[stage]);
+ if (stageExposureTime * analogueGain >= exposureValue) {
+ exposureTime = exposureValue / analogueGain;
+ break;
+ }
+ exposureTime = stageExposureTime;
+ }
+ if (status_.fixedAnalogueGain == 0.0) {
+ if (exposureMode_->gain[stage] * exposureTime >= exposureValue) {
+ analogueGain = exposureValue / exposureTime;
+ break;
+ }
+ analogueGain = exposureMode_->gain[stage];
+ analogueGain = limitGain(analogueGain);
+ }
+ }
+ }
+ LOG(RPiAgc, Debug)
+ << "Divided up exposure time and gain are " << exposureTime
+ << " and " << analogueGain;
+ /*
+ * Finally adjust exposure time for flicker avoidance (require both
+ * exposure time and gain not to be fixed).
+ */
+ if (!status_.fixedExposureTime && !status_.fixedAnalogueGain &&
+ status_.flickerPeriod) {
+ int flickerPeriods = exposureTime / status_.flickerPeriod;
+ if (flickerPeriods) {
+ Duration newExposureTime = flickerPeriods * status_.flickerPeriod;
+ analogueGain *= exposureTime / newExposureTime;
+ /*
+ * We should still not allow the ag to go over the
+ * largest value in the exposure mode. Note that this
+ * may force more of the total exposure into the digital
+ * gain as a side-effect.
+ */
+ analogueGain = std::min(analogueGain, exposureMode_->gain.back());
+ analogueGain = limitGain(analogueGain);
+ exposureTime = newExposureTime;
+ }
+ LOG(RPiAgc, Debug) << "After flicker avoidance, exposure time "
+ << exposureTime << " gain " << analogueGain;
+ }
+ filtered_.exposureTime = exposureTime;
+ filtered_.analogueGain = analogueGain;
+}
+
+void AgcChannel::writeAndFinish(Metadata *imageMetadata, bool desaturate)
+{
+ status_.totalExposureValue = filtered_.totalExposure;
+ status_.targetExposureValue = desaturate ? 0s : target_.totalExposure;
+ status_.exposureTime = filtered_.exposureTime;
+ status_.analogueGain = filtered_.analogueGain;
+ /*
+ * Write to metadata as well, in case anyone wants to update the camera
+ * immediately.
+ */
+ imageMetadata->set("agc.status", status_);
+ LOG(RPiAgc, Debug) << "Output written, total exposure requested is "
+ << filtered_.totalExposure;
+ LOG(RPiAgc, Debug) << "Camera exposure update: exposure time " << filtered_.exposureTime
+ << " analogue gain " << filtered_.analogueGain;
+}
+
+Duration AgcChannel::limitExposureTime(Duration exposureTime)
+{
+ /*
+ * exposureTime == 0 is a special case for fixed exposure time values,
+ * and must pass through unchanged.
+ */
+ if (!exposureTime)
+ return exposureTime;
+
+ exposureTime = std::clamp(exposureTime, mode_.minExposureTime, maxExposureTime_);
+ return exposureTime;
+}
+
+double AgcChannel::limitGain(double gain) const
+{
+ /*
+ * Only limit the lower bounds of the gain value to what the sensor
+ * limits. The upper bound on analogue gain will be made up with
+ * additional digital gain applied by the ISP.
+ *
+ * gain == 0.0 is a special case for fixed exposure time values, and
+ * must pass through unchanged.
+ */
+ if (!gain)
+ return gain;
+
+ gain = std::max(gain, mode_.minAnalogueGain);
+ return gain;
+}
diff --git a/src/ipa/rpi/controller/rpi/agc_channel.h b/src/ipa/rpi/controller/rpi/agc_channel.h
new file mode 100644
index 00000000..734e5efd
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/agc_channel.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023, Raspberry Pi Ltd
+ *
+ * AGC/AEC control algorithm
+ */
+#pragma once
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/utils.h>
+
+#include <libipa/pwl.h>
+
+#include "../agc_status.h"
+#include "../awb_status.h"
+#include "../controller.h"
+
+/* This is our implementation of AGC. */
+
+namespace RPiController {
+
+using AgcChannelTotalExposures = std::vector<libcamera::utils::Duration>;
+
+struct AgcMeteringMode {
+ std::vector<double> weights;
+ int read(const libcamera::YamlObject &params);
+};
+
+struct AgcExposureMode {
+ std::vector<libcamera::utils::Duration> exposureTime;
+ std::vector<double> gain;
+ int read(const libcamera::YamlObject &params);
+};
+
+struct AgcConstraint {
+ enum class Bound { LOWER = 0,
+ UPPER = 1 };
+ Bound bound;
+ double qLo;
+ double qHi;
+ libcamera::ipa::Pwl yTarget;
+ int read(const libcamera::YamlObject &params);
+};
+
+typedef std::vector<AgcConstraint> AgcConstraintMode;
+
+struct AgcChannelConstraint {
+ enum class Bound { LOWER = 0,
+ UPPER = 1 };
+ Bound bound;
+ unsigned int channel;
+ double factor;
+ int read(const libcamera::YamlObject &params);
+};
+
+struct AgcConfig {
+ int read(const libcamera::YamlObject &params);
+ std::map<std::string, AgcMeteringMode> meteringModes;
+ std::map<std::string, AgcExposureMode> exposureModes;
+ std::map<std::string, AgcConstraintMode> constraintModes;
+ std::vector<AgcChannelConstraint> channelConstraints;
+ libcamera::ipa::Pwl yTarget;
+ double speed;
+ uint16_t startupFrames;
+ unsigned int convergenceFrames;
+ double maxChange;
+ double minChange;
+ double fastReduceThreshold;
+ double speedUpThreshold;
+ std::string defaultMeteringMode;
+ std::string defaultExposureMode;
+ std::string defaultConstraintMode;
+ double baseEv;
+ libcamera::utils::Duration defaultExposureTime;
+ double defaultAnalogueGain;
+ double stableRegion;
+ bool desaturate;
+};
+
+class AgcChannel
+{
+public:
+ AgcChannel();
+ int read(const libcamera::YamlObject &params,
+ const Controller::HardwareConfig &hardwareConfig);
+ unsigned int getConvergenceFrames() const;
+ std::vector<double> const &getWeights() const;
+ void setEv(double ev);
+ void setFlickerPeriod(libcamera::utils::Duration flickerPeriod);
+ void setMaxExposureTime(libcamera::utils::Duration maxExposureTime);
+ void setFixedExposureTime(libcamera::utils::Duration fixedExposureTime);
+ void setFixedAnalogueGain(double fixedAnalogueGain);
+ void setMeteringMode(std::string const &meteringModeName);
+ void setExposureMode(std::string const &exposureModeName);
+ void setConstraintMode(std::string const &contraintModeName);
+ void enableAuto();
+ void disableAuto();
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata);
+ void prepare(Metadata *imageMetadata);
+ void process(StatisticsPtr &stats, DeviceStatus const &deviceStatus, Metadata *imageMetadata,
+ const AgcChannelTotalExposures &channelTotalExposures);
+
+private:
+ bool updateLockStatus(DeviceStatus const &deviceStatus);
+ AgcConfig config_;
+ void housekeepConfig();
+ void fetchCurrentExposure(DeviceStatus const &deviceStatus);
+ void fetchAwbStatus(Metadata *imageMetadata);
+ void computeGain(StatisticsPtr &statistics, Metadata *imageMetadata,
+ double &gain, double &targetY);
+ void computeTargetExposure(double gain);
+ void filterExposure();
+ bool applyChannelConstraints(const AgcChannelTotalExposures &channelTotalExposures);
+ bool applyDigitalGain(double gain, double targetY, bool channelBound);
+ void divideUpExposure();
+ void writeAndFinish(Metadata *imageMetadata, bool desaturate);
+ libcamera::utils::Duration limitExposureTime(libcamera::utils::Duration exposureTime);
+ double limitGain(double gain) const;
+ AgcMeteringMode *meteringMode_;
+ AgcExposureMode *exposureMode_;
+ AgcConstraintMode *constraintMode_;
+ CameraMode mode_;
+ uint64_t frameCount_;
+ AwbStatus awb_;
+ struct ExposureValues {
+ ExposureValues();
+
+ libcamera::utils::Duration exposureTime;
+ double analogueGain;
+ libcamera::utils::Duration totalExposure;
+ libcamera::utils::Duration totalExposureNoDG; /* without digital gain */
+ };
+ ExposureValues current_; /* values for the current frame */
+ ExposureValues target_; /* calculate the values we want here */
+ ExposureValues filtered_; /* these values are filtered towards target */
+ AgcStatus status_;
+ int lockCount_;
+ DeviceStatus lastDeviceStatus_;
+ libcamera::utils::Duration lastTargetExposure_;
+ /* Below here the "settings" that applications can change. */
+ std::string meteringModeName_;
+ std::string exposureModeName_;
+ std::string constraintModeName_;
+ double ev_;
+ libcamera::utils::Duration flickerPeriod_;
+ libcamera::utils::Duration maxExposureTime_;
+ libcamera::utils::Duration fixedExposureTime_;
+ double fixedAnalogueGain_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/alsc.cpp b/src/ipa/rpi/controller/rpi/alsc.cpp
new file mode 100644
index 00000000..21edb819
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/alsc.cpp
@@ -0,0 +1,869 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * ALSC (auto lens shading correction) control algorithm
+ */
+
+#include <algorithm>
+#include <cmath>
+#include <functional>
+#include <numeric>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/span.h>
+
+#include "../awb_status.h"
+#include "alsc.h"
+
+/* Raspberry Pi ALSC (Auto Lens Shading Correction) algorithm. */
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiAlsc)
+
+#define NAME "rpi.alsc"
+
+static const double InsufficientData = -1.0;
+
+Alsc::Alsc(Controller *controller)
+ : Algorithm(controller)
+{
+ asyncAbort_ = asyncStart_ = asyncStarted_ = asyncFinished_ = false;
+ asyncThread_ = std::thread(std::bind(&Alsc::asyncFunc, this));
+}
+
+Alsc::~Alsc()
+{
+ {
+ std::lock_guard<std::mutex> lock(mutex_);
+ asyncAbort_ = true;
+ }
+ asyncSignal_.notify_one();
+ asyncThread_.join();
+}
+
+char const *Alsc::name() const
+{
+ return NAME;
+}
+
+static int generateLut(Array2D<double> &lut, const libcamera::YamlObject &params)
+{
+ /* These must be signed ints for the co-ordinate calculations below. */
+ int X = lut.dimensions().width, Y = lut.dimensions().height;
+ double cstrength = params["corner_strength"].get<double>(2.0);
+ if (cstrength <= 1.0) {
+ LOG(RPiAlsc, Error) << "corner_strength must be > 1.0";
+ return -EINVAL;
+ }
+
+ double asymmetry = params["asymmetry"].get<double>(1.0);
+ if (asymmetry < 0) {
+ LOG(RPiAlsc, Error) << "asymmetry must be >= 0";
+ return -EINVAL;
+ }
+
+ double f1 = cstrength - 1, f2 = 1 + sqrt(cstrength);
+ double R2 = X * Y / 4 * (1 + asymmetry * asymmetry);
+ int num = 0;
+ for (int y = 0; y < Y; y++) {
+ for (int x = 0; x < X; x++) {
+ double dy = y - Y / 2 + 0.5,
+ dx = (x - X / 2 + 0.5) * asymmetry;
+ double r2 = (dx * dx + dy * dy) / R2;
+ lut[num++] =
+ (f1 * r2 + f2) * (f1 * r2 + f2) /
+ (f2 * f2); /* this reproduces the cos^4 rule */
+ }
+ }
+ return 0;
+}
+
+static int readLut(Array2D<double> &lut, const libcamera::YamlObject &params)
+{
+ if (params.size() != lut.size()) {
+ LOG(RPiAlsc, Error) << "Invalid number of entries in LSC table";
+ return -EINVAL;
+ }
+
+ int num = 0;
+ for (const auto &p : params.asList()) {
+ auto value = p.get<double>();
+ if (!value)
+ return -EINVAL;
+ lut[num++] = *value;
+ }
+
+ return 0;
+}
+
+static int readCalibrations(std::vector<AlscCalibration> &calibrations,
+ const libcamera::YamlObject &params,
+ std::string const &name, const Size &size)
+{
+ if (params.contains(name)) {
+ double lastCt = 0;
+ for (const auto &p : params[name].asList()) {
+ auto value = p["ct"].get<double>();
+ if (!value)
+ return -EINVAL;
+ double ct = *value;
+ if (ct <= lastCt) {
+ LOG(RPiAlsc, Error)
+ << "Entries in " << name << " must be in increasing ct order";
+ return -EINVAL;
+ }
+ AlscCalibration calibration;
+ calibration.ct = lastCt = ct;
+
+ const libcamera::YamlObject &table = p["table"];
+ if (table.size() != size.width * size.height) {
+ LOG(RPiAlsc, Error)
+ << "Incorrect number of values for ct "
+ << ct << " in " << name;
+ return -EINVAL;
+ }
+
+ int num = 0;
+ calibration.table.resize(size);
+ for (const auto &elem : table.asList()) {
+ value = elem.get<double>();
+ if (!value)
+ return -EINVAL;
+ calibration.table[num++] = *value;
+ }
+
+ calibrations.push_back(std::move(calibration));
+ LOG(RPiAlsc, Debug)
+ << "Read " << name << " calibration for ct " << ct;
+ }
+ }
+ return 0;
+}
+
+int Alsc::read(const libcamera::YamlObject &params)
+{
+ config_.tableSize = getHardwareConfig().awbRegions;
+ config_.framePeriod = params["frame_period"].get<uint16_t>(12);
+ config_.startupFrames = params["startup_frames"].get<uint16_t>(10);
+ config_.speed = params["speed"].get<double>(0.05);
+ double sigma = params["sigma"].get<double>(0.01);
+ config_.sigmaCr = params["sigma_Cr"].get<double>(sigma);
+ config_.sigmaCb = params["sigma_Cb"].get<double>(sigma);
+ config_.minCount = params["min_count"].get<double>(10.0);
+ config_.minG = params["min_G"].get<uint16_t>(50);
+ config_.omega = params["omega"].get<double>(1.3);
+ config_.nIter = params["n_iter"].get<uint32_t>(config_.tableSize.width + config_.tableSize.height);
+ config_.luminanceStrength =
+ params["luminance_strength"].get<double>(1.0);
+
+ config_.luminanceLut.resize(config_.tableSize, 1.0);
+ int ret = 0;
+
+ if (params.contains("corner_strength"))
+ ret = generateLut(config_.luminanceLut, params);
+ else if (params.contains("luminance_lut"))
+ ret = readLut(config_.luminanceLut, params["luminance_lut"]);
+ else
+ LOG(RPiAlsc, Warning)
+ << "no luminance table - assume unity everywhere";
+ if (ret)
+ return ret;
+
+ ret = readCalibrations(config_.calibrationsCr, params, "calibrations_Cr",
+ config_.tableSize);
+ if (ret)
+ return ret;
+ ret = readCalibrations(config_.calibrationsCb, params, "calibrations_Cb",
+ config_.tableSize);
+ if (ret)
+ return ret;
+
+ config_.defaultCt = params["default_ct"].get<double>(4500.0);
+ config_.threshold = params["threshold"].get<double>(1e-3);
+ config_.lambdaBound = params["lambda_bound"].get<double>(0.05);
+
+ return 0;
+}
+
+static double getCt(Metadata *metadata, double defaultCt);
+static void getCalTable(double ct, std::vector<AlscCalibration> const &calibrations,
+ Array2D<double> &calTable);
+static void resampleCalTable(const Array2D<double> &calTableIn, CameraMode const &cameraMode,
+ Array2D<double> &calTableOut);
+static void compensateLambdasForCal(const Array2D<double> &calTable,
+ const Array2D<double> &oldLambdas,
+ Array2D<double> &newLambdas);
+static void addLuminanceToTables(std::array<Array2D<double>, 3> &results,
+ const Array2D<double> &lambdaR, double lambdaG,
+ const Array2D<double> &lambdaB,
+ const Array2D<double> &luminanceLut,
+ double luminanceStrength);
+
+void Alsc::initialise()
+{
+ frameCount2_ = frameCount_ = framePhase_ = 0;
+ firstTime_ = true;
+ ct_ = config_.defaultCt;
+
+ const size_t XY = config_.tableSize.width * config_.tableSize.height;
+
+ for (auto &r : syncResults_)
+ r.resize(config_.tableSize);
+ for (auto &r : prevSyncResults_)
+ r.resize(config_.tableSize);
+ for (auto &r : asyncResults_)
+ r.resize(config_.tableSize);
+
+ luminanceTable_.resize(config_.tableSize);
+ asyncLambdaR_.resize(config_.tableSize);
+ asyncLambdaB_.resize(config_.tableSize);
+ /* The lambdas are initialised in the SwitchMode. */
+ lambdaR_.resize(config_.tableSize);
+ lambdaB_.resize(config_.tableSize);
+
+ /* Temporaries for the computations, but sensible to allocate this up-front! */
+ for (auto &c : tmpC_)
+ c.resize(config_.tableSize);
+ for (auto &m : tmpM_)
+ m.resize(XY);
+}
+
+void Alsc::waitForAysncThread()
+{
+ if (asyncStarted_) {
+ asyncStarted_ = false;
+ std::unique_lock<std::mutex> lock(mutex_);
+ syncSignal_.wait(lock, [&] {
+ return asyncFinished_;
+ });
+ asyncFinished_ = false;
+ }
+}
+
+static bool compareModes(CameraMode const &cm0, CameraMode const &cm1)
+{
+ /*
+ * Return true if the modes crop from the sensor significantly differently,
+ * or if the user transform has changed.
+ */
+ if (cm0.transform != cm1.transform)
+ return true;
+ int leftDiff = std::abs(cm0.cropX - cm1.cropX);
+ int topDiff = std::abs(cm0.cropY - cm1.cropY);
+ int rightDiff = std::abs(cm0.cropX + cm0.scaleX * cm0.width -
+ cm1.cropX - cm1.scaleX * cm1.width);
+ int bottomDiff = std::abs(cm0.cropY + cm0.scaleY * cm0.height -
+ cm1.cropY - cm1.scaleY * cm1.height);
+ /*
+ * These thresholds are a rather arbitrary amount chosen to trigger
+ * when carrying on with the previously calculated tables might be
+ * worse than regenerating them (but without the adaptive algorithm).
+ */
+ int thresholdX = cm0.sensorWidth >> 4;
+ int thresholdY = cm0.sensorHeight >> 4;
+ return leftDiff > thresholdX || rightDiff > thresholdX ||
+ topDiff > thresholdY || bottomDiff > thresholdY;
+}
+
+void Alsc::switchMode(CameraMode const &cameraMode,
+ [[maybe_unused]] Metadata *metadata)
+{
+ /*
+ * We're going to start over with the tables if there's any "significant"
+ * change.
+ */
+ bool resetTables = firstTime_ || compareModes(cameraMode_, cameraMode);
+
+ /* Believe the colour temperature from the AWB, if there is one. */
+ ct_ = getCt(metadata, ct_);
+
+ /* Ensure the other thread isn't running while we do this. */
+ waitForAysncThread();
+
+ cameraMode_ = cameraMode;
+
+ /*
+ * We must resample the luminance table like we do the others, but it's
+ * fixed so we can simply do it up front here.
+ */
+ resampleCalTable(config_.luminanceLut, cameraMode_, luminanceTable_);
+
+ if (resetTables) {
+ /*
+ * Upon every "table reset", arrange for something sensible to be
+ * generated. Construct the tables for the previous recorded colour
+ * temperature. In order to start over from scratch we initialise
+ * the lambdas, but the rest of this code then echoes the code in
+ * doAlsc, without the adaptive algorithm.
+ */
+ std::fill(lambdaR_.begin(), lambdaR_.end(), 1.0);
+ std::fill(lambdaB_.begin(), lambdaB_.end(), 1.0);
+ Array2D<double> &calTableR = tmpC_[0], &calTableB = tmpC_[1], &calTableTmp = tmpC_[2];
+ getCalTable(ct_, config_.calibrationsCr, calTableTmp);
+ resampleCalTable(calTableTmp, cameraMode_, calTableR);
+ getCalTable(ct_, config_.calibrationsCb, calTableTmp);
+ resampleCalTable(calTableTmp, cameraMode_, calTableB);
+ compensateLambdasForCal(calTableR, lambdaR_, asyncLambdaR_);
+ compensateLambdasForCal(calTableB, lambdaB_, asyncLambdaB_);
+ addLuminanceToTables(syncResults_, asyncLambdaR_, 1.0, asyncLambdaB_,
+ luminanceTable_, config_.luminanceStrength);
+ prevSyncResults_ = syncResults_;
+ framePhase_ = config_.framePeriod; /* run the algo again asap */
+ firstTime_ = false;
+ }
+}
+
+void Alsc::fetchAsyncResults()
+{
+ LOG(RPiAlsc, Debug) << "Fetch ALSC results";
+ asyncFinished_ = false;
+ asyncStarted_ = false;
+ syncResults_ = asyncResults_;
+}
+
+double getCt(Metadata *metadata, double defaultCt)
+{
+ AwbStatus awbStatus;
+ awbStatus.temperatureK = defaultCt; /* in case nothing found */
+ if (metadata->get("awb.status", awbStatus) != 0)
+ LOG(RPiAlsc, Debug) << "no AWB results found, using "
+ << awbStatus.temperatureK;
+ else
+ LOG(RPiAlsc, Debug) << "AWB results found, using "
+ << awbStatus.temperatureK;
+ return awbStatus.temperatureK;
+}
+
+static void copyStats(RgbyRegions &regions, StatisticsPtr &stats,
+ std::array<Array2D<double>, 3> &prevSyncResults)
+{
+ if (!regions.numRegions())
+ regions.init(stats->awbRegions.size());
+
+ const std::vector<double> &rTable = prevSyncResults[0].data(); //status.r;
+ const std::vector<double> &gTable = prevSyncResults[1].data(); //status.g;
+ const std::vector<double> &bTable = prevSyncResults[2].data(); //status.b;
+ for (unsigned int i = 0; i < stats->awbRegions.numRegions(); i++) {
+ auto r = stats->awbRegions.get(i);
+ if (stats->colourStatsPos == Statistics::ColourStatsPos::PostLsc) {
+ r.val.rSum = static_cast<uint64_t>(r.val.rSum / rTable[i]);
+ r.val.gSum = static_cast<uint64_t>(r.val.gSum / gTable[i]);
+ r.val.bSum = static_cast<uint64_t>(r.val.bSum / bTable[i]);
+ }
+ regions.set(i, r);
+ }
+}
+
+void Alsc::restartAsync(StatisticsPtr &stats, Metadata *imageMetadata)
+{
+ LOG(RPiAlsc, Debug) << "Starting ALSC calculation";
+ /*
+ * Get the current colour temperature. It's all we need from the
+ * metadata. Default to the last CT value (which could be the default).
+ */
+ ct_ = getCt(imageMetadata, ct_);
+ /*
+ * We have to copy the statistics here, dividing out our best guess of
+ * the LSC table that the pipeline applied to them which we get from
+ * prevSyncResults_.
+ */
+ copyStats(statistics_, stats, prevSyncResults_);
+ framePhase_ = 0;
+ asyncStarted_ = true;
+ {
+ std::lock_guard<std::mutex> lock(mutex_);
+ asyncStart_ = true;
+ }
+ asyncSignal_.notify_one();
+}
+
+void Alsc::prepare(Metadata *imageMetadata)
+{
+ /*
+ * Count frames since we started, and since we last poked the async
+ * thread.
+ */
+ if (frameCount_ < (int)config_.startupFrames)
+ frameCount_++;
+ double speed = frameCount_ < (int)config_.startupFrames
+ ? 1.0
+ : config_.speed;
+ LOG(RPiAlsc, Debug)
+ << "frame count " << frameCount_ << " speed " << speed;
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ if (asyncStarted_ && asyncFinished_)
+ fetchAsyncResults();
+ }
+ /* Apply IIR filter to results and program into the pipeline. */
+ for (unsigned int j = 0; j < syncResults_.size(); j++) {
+ for (unsigned int i = 0; i < syncResults_[j].size(); i++)
+ prevSyncResults_[j][i] = speed * syncResults_[j][i] + (1.0 - speed) * prevSyncResults_[j][i];
+ }
+ /* Put output values into status metadata. */
+ AlscStatus status;
+ status.r = prevSyncResults_[0].data();
+ status.g = prevSyncResults_[1].data();
+ status.b = prevSyncResults_[2].data();
+ imageMetadata->set("alsc.status", status);
+ /*
+ * Put the results in the global metadata as well. This will be used by
+ * AWB to factor in the colour shading correction.
+ */
+ getGlobalMetadata().set("alsc.status", status);
+}
+
+void Alsc::process(StatisticsPtr &stats, Metadata *imageMetadata)
+{
+ /*
+ * Count frames since we started, and since we last poked the async
+ * thread.
+ */
+ if (framePhase_ < (int)config_.framePeriod)
+ framePhase_++;
+ if (frameCount2_ < (int)config_.startupFrames)
+ frameCount2_++;
+ LOG(RPiAlsc, Debug) << "frame_phase " << framePhase_;
+ if (framePhase_ >= (int)config_.framePeriod ||
+ frameCount2_ < (int)config_.startupFrames) {
+ if (asyncStarted_ == false)
+ restartAsync(stats, imageMetadata);
+ }
+}
+
+void Alsc::asyncFunc()
+{
+ while (true) {
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ asyncSignal_.wait(lock, [&] {
+ return asyncStart_ || asyncAbort_;
+ });
+ asyncStart_ = false;
+ if (asyncAbort_)
+ break;
+ }
+ doAlsc();
+ {
+ std::lock_guard<std::mutex> lock(mutex_);
+ asyncFinished_ = true;
+ }
+ syncSignal_.notify_one();
+ }
+}
+
+void getCalTable(double ct, std::vector<AlscCalibration> const &calibrations,
+ Array2D<double> &calTable)
+{
+ if (calibrations.empty()) {
+ std::fill(calTable.begin(), calTable.end(), 1.0);
+ LOG(RPiAlsc, Debug) << "no calibrations found";
+ } else if (ct <= calibrations.front().ct) {
+ calTable = calibrations.front().table;
+ LOG(RPiAlsc, Debug) << "using calibration for "
+ << calibrations.front().ct;
+ } else if (ct >= calibrations.back().ct) {
+ calTable = calibrations.back().table;
+ LOG(RPiAlsc, Debug) << "using calibration for "
+ << calibrations.back().ct;
+ } else {
+ int idx = 0;
+ while (ct > calibrations[idx + 1].ct)
+ idx++;
+ double ct0 = calibrations[idx].ct, ct1 = calibrations[idx + 1].ct;
+ LOG(RPiAlsc, Debug)
+ << "ct is " << ct << ", interpolating between "
+ << ct0 << " and " << ct1;
+ for (unsigned int i = 0; i < calTable.size(); i++)
+ calTable[i] =
+ (calibrations[idx].table[i] * (ct1 - ct) +
+ calibrations[idx + 1].table[i] * (ct - ct0)) /
+ (ct1 - ct0);
+ }
+}
+
+void resampleCalTable(const Array2D<double> &calTableIn,
+ CameraMode const &cameraMode,
+ Array2D<double> &calTableOut)
+{
+ int X = calTableIn.dimensions().width;
+ int Y = calTableIn.dimensions().height;
+
+ /*
+ * Precalculate and cache the x sampling locations and phases to save
+ * recomputing them on every row.
+ */
+ std::vector<int> xLo(X);
+ std::vector<int> xHi(X);
+ std::vector<double> xf(X);
+ double scaleX = cameraMode.sensorWidth /
+ (cameraMode.width * cameraMode.scaleX);
+ double xOff = cameraMode.cropX / (double)cameraMode.sensorWidth;
+ double x = .5 / scaleX + xOff * X - .5;
+ double xInc = 1 / scaleX;
+ for (int i = 0; i < X; i++, x += xInc) {
+ xLo[i] = floor(x);
+ xf[i] = x - xLo[i];
+ xHi[i] = std::min(xLo[i] + 1, X - 1);
+ xLo[i] = std::max(xLo[i], 0);
+ if (!!(cameraMode.transform & libcamera::Transform::HFlip)) {
+ xLo[i] = X - 1 - xLo[i];
+ xHi[i] = X - 1 - xHi[i];
+ }
+ }
+ /* Now march over the output table generating the new values. */
+ double scaleY = cameraMode.sensorHeight /
+ (cameraMode.height * cameraMode.scaleY);
+ double yOff = cameraMode.cropY / (double)cameraMode.sensorHeight;
+ double y = .5 / scaleY + yOff * Y - .5;
+ double yInc = 1 / scaleY;
+ for (int j = 0; j < Y; j++, y += yInc) {
+ int yLo = floor(y);
+ double yf = y - yLo;
+ int yHi = std::min(yLo + 1, Y - 1);
+ yLo = std::max(yLo, 0);
+ if (!!(cameraMode.transform & libcamera::Transform::VFlip)) {
+ yLo = Y - 1 - yLo;
+ yHi = Y - 1 - yHi;
+ }
+ double const *rowAbove = calTableIn.ptr() + X * yLo;
+ double const *rowBelow = calTableIn.ptr() + X * yHi;
+ double *out = calTableOut.ptr() + X * j;
+ for (int i = 0; i < X; i++) {
+ double above = rowAbove[xLo[i]] * (1 - xf[i]) +
+ rowAbove[xHi[i]] * xf[i];
+ double below = rowBelow[xLo[i]] * (1 - xf[i]) +
+ rowBelow[xHi[i]] * xf[i];
+ *(out++) = above * (1 - yf) + below * yf;
+ }
+ }
+}
+
+/* Calculate chrominance statistics (R/G and B/G) for each region. */
+static void calculateCrCb(const RgbyRegions &awbRegion, Array2D<double> &cr,
+ Array2D<double> &cb, uint32_t minCount, uint16_t minG)
+{
+ for (unsigned int i = 0; i < cr.size(); i++) {
+ auto s = awbRegion.get(i);
+
+ /* Do not return unreliable, or zero, colour ratio statistics. */
+ if (s.counted <= minCount || s.val.gSum / s.counted <= minG ||
+ s.val.rSum / s.counted <= minG || s.val.bSum / s.counted <= minG) {
+ cr[i] = cb[i] = InsufficientData;
+ continue;
+ }
+
+ cr[i] = s.val.rSum / (double)s.val.gSum;
+ cb[i] = s.val.bSum / (double)s.val.gSum;
+ }
+}
+
+static void applyCalTable(const Array2D<double> &calTable, Array2D<double> &C)
+{
+ for (unsigned int i = 0; i < C.size(); i++)
+ if (C[i] != InsufficientData)
+ C[i] *= calTable[i];
+}
+
+void compensateLambdasForCal(const Array2D<double> &calTable,
+ const Array2D<double> &oldLambdas,
+ Array2D<double> &newLambdas)
+{
+ double minNewLambda = std::numeric_limits<double>::max();
+ for (unsigned int i = 0; i < newLambdas.size(); i++) {
+ newLambdas[i] = oldLambdas[i] * calTable[i];
+ minNewLambda = std::min(minNewLambda, newLambdas[i]);
+ }
+ for (unsigned int i = 0; i < newLambdas.size(); i++)
+ newLambdas[i] /= minNewLambda;
+}
+
+[[maybe_unused]] static void printCalTable(const Array2D<double> &C)
+{
+ const Size &size = C.dimensions();
+ printf("table: [\n");
+ for (unsigned int j = 0; j < size.height; j++) {
+ for (unsigned int i = 0; i < size.width; i++) {
+ printf("%5.3f", 1.0 / C[j * size.width + i]);
+ if (i != size.width - 1 || j != size.height - 1)
+ printf(",");
+ }
+ printf("\n");
+ }
+ printf("]\n");
+}
+
+/*
+ * Compute weight out of 1.0 which reflects how similar we wish to make the
+ * colours of these two regions.
+ */
+static double computeWeight(double Ci, double Cj, double sigma)
+{
+ if (Ci == InsufficientData || Cj == InsufficientData)
+ return 0;
+ double diff = (Ci - Cj) / sigma;
+ return exp(-diff * diff / 2);
+}
+
+/* Compute all weights. */
+static void computeW(const Array2D<double> &C, double sigma,
+ SparseArray<double> &W)
+{
+ size_t XY = C.size();
+ size_t X = C.dimensions().width;
+
+ for (unsigned int i = 0; i < XY; i++) {
+ /* Start with neighbour above and go clockwise. */
+ W[i][0] = i >= X ? computeWeight(C[i], C[i - X], sigma) : 0;
+ W[i][1] = i % X < X - 1 ? computeWeight(C[i], C[i + 1], sigma) : 0;
+ W[i][2] = i < XY - X ? computeWeight(C[i], C[i + X], sigma) : 0;
+ W[i][3] = i % X ? computeWeight(C[i], C[i - 1], sigma) : 0;
+ }
+}
+
+/* Compute M, the large but sparse matrix such that M * lambdas = 0. */
+static void constructM(const Array2D<double> &C,
+ const SparseArray<double> &W,
+ SparseArray<double> &M)
+{
+ size_t XY = C.size();
+ size_t X = C.dimensions().width;
+
+ double epsilon = 0.001;
+ for (unsigned int i = 0; i < XY; i++) {
+ /*
+ * Note how, if C[i] == INSUFFICIENT_DATA, the weights will all
+ * be zero so the equation is still set up correctly.
+ */
+ int m = !!(i >= X) + !!(i % X < X - 1) + !!(i < XY - X) +
+ !!(i % X); /* total number of neighbours */
+ /* we'll divide the diagonal out straight away */
+ double diagonal = (epsilon + W[i][0] + W[i][1] + W[i][2] + W[i][3]) * C[i];
+ M[i][0] = i >= X ? (W[i][0] * C[i - X] + epsilon / m * C[i]) / diagonal : 0;
+ M[i][1] = i % X < X - 1 ? (W[i][1] * C[i + 1] + epsilon / m * C[i]) / diagonal : 0;
+ M[i][2] = i < XY - X ? (W[i][2] * C[i + X] + epsilon / m * C[i]) / diagonal : 0;
+ M[i][3] = i % X ? (W[i][3] * C[i - 1] + epsilon / m * C[i]) / diagonal : 0;
+ }
+}
+
+/*
+ * In the compute_lambda_ functions, note that the matrix coefficients for the
+ * left/right neighbours are zero down the left/right edges, so we don't need
+ * need to test the i value to exclude them.
+ */
+static double computeLambdaBottom(int i, const SparseArray<double> &M,
+ Array2D<double> &lambda)
+{
+ return M[i][1] * lambda[i + 1] + M[i][2] * lambda[i + lambda.dimensions().width] +
+ M[i][3] * lambda[i - 1];
+}
+static double computeLambdaBottomStart(int i, const SparseArray<double> &M,
+ Array2D<double> &lambda)
+{
+ return M[i][1] * lambda[i + 1] + M[i][2] * lambda[i + lambda.dimensions().width];
+}
+static double computeLambdaInterior(int i, const SparseArray<double> &M,
+ Array2D<double> &lambda)
+{
+ return M[i][0] * lambda[i - lambda.dimensions().width] + M[i][1] * lambda[i + 1] +
+ M[i][2] * lambda[i + lambda.dimensions().width] + M[i][3] * lambda[i - 1];
+}
+static double computeLambdaTop(int i, const SparseArray<double> &M,
+ Array2D<double> &lambda)
+{
+ return M[i][0] * lambda[i - lambda.dimensions().width] + M[i][1] * lambda[i + 1] +
+ M[i][3] * lambda[i - 1];
+}
+static double computeLambdaTopEnd(int i, const SparseArray<double> &M,
+ Array2D<double> &lambda)
+{
+ return M[i][0] * lambda[i - lambda.dimensions().width] + M[i][3] * lambda[i - 1];
+}
+
+/* Gauss-Seidel iteration with over-relaxation. */
+static double gaussSeidel2Sor(const SparseArray<double> &M, double omega,
+ Array2D<double> &lambda, double lambdaBound)
+{
+ int XY = lambda.size();
+ int X = lambda.dimensions().width;
+ const double min = 1 - lambdaBound, max = 1 + lambdaBound;
+ Array2D<double> oldLambda = lambda;
+ int i;
+ lambda[0] = computeLambdaBottomStart(0, M, lambda);
+ lambda[0] = std::clamp(lambda[0], min, max);
+ for (i = 1; i < X; i++) {
+ lambda[i] = computeLambdaBottom(i, M, lambda);
+ lambda[i] = std::clamp(lambda[i], min, max);
+ }
+ for (; i < XY - X; i++) {
+ lambda[i] = computeLambdaInterior(i, M, lambda);
+ lambda[i] = std::clamp(lambda[i], min, max);
+ }
+ for (; i < XY - 1; i++) {
+ lambda[i] = computeLambdaTop(i, M, lambda);
+ lambda[i] = std::clamp(lambda[i], min, max);
+ }
+ lambda[i] = computeLambdaTopEnd(i, M, lambda);
+ lambda[i] = std::clamp(lambda[i], min, max);
+ /*
+ * Also solve the system from bottom to top, to help spread the updates
+ * better.
+ */
+ lambda[i] = computeLambdaTopEnd(i, M, lambda);
+ lambda[i] = std::clamp(lambda[i], min, max);
+ for (i = XY - 2; i >= XY - X; i--) {
+ lambda[i] = computeLambdaTop(i, M, lambda);
+ lambda[i] = std::clamp(lambda[i], min, max);
+ }
+ for (; i >= X; i--) {
+ lambda[i] = computeLambdaInterior(i, M, lambda);
+ lambda[i] = std::clamp(lambda[i], min, max);
+ }
+ for (; i >= 1; i--) {
+ lambda[i] = computeLambdaBottom(i, M, lambda);
+ lambda[i] = std::clamp(lambda[i], min, max);
+ }
+ lambda[0] = computeLambdaBottomStart(0, M, lambda);
+ lambda[0] = std::clamp(lambda[0], min, max);
+ double maxDiff = 0;
+ for (i = 0; i < XY; i++) {
+ lambda[i] = oldLambda[i] + (lambda[i] - oldLambda[i]) * omega;
+ if (std::abs(lambda[i] - oldLambda[i]) > std::abs(maxDiff))
+ maxDiff = lambda[i] - oldLambda[i];
+ }
+ return maxDiff;
+}
+
+/* Normalise the values so that the smallest value is 1. */
+static void normalise(Array2D<double> &results)
+{
+ double minval = *std::min_element(results.begin(), results.end());
+ std::for_each(results.begin(), results.end(),
+ [minval](double val) { return val / minval; });
+}
+
+/* Rescale the values so that the average value is 1. */
+static void reaverage(Array2D<double> &data)
+{
+ double sum = std::accumulate(data.begin(), data.end(), 0.0);
+ double ratio = 1 / (sum / data.size());
+ std::for_each(data.begin(), data.end(),
+ [ratio](double val) { return val * ratio; });
+}
+
+static void runMatrixIterations(const Array2D<double> &C,
+ Array2D<double> &lambda,
+ const SparseArray<double> &W,
+ SparseArray<double> &M, double omega,
+ unsigned int nIter, double threshold, double lambdaBound)
+{
+ constructM(C, W, M);
+ double lastMaxDiff = std::numeric_limits<double>::max();
+ for (unsigned int i = 0; i < nIter; i++) {
+ double maxDiff = std::abs(gaussSeidel2Sor(M, omega, lambda, lambdaBound));
+ if (maxDiff < threshold) {
+ LOG(RPiAlsc, Debug)
+ << "Stop after " << i + 1 << " iterations";
+ break;
+ }
+ /*
+ * this happens very occasionally (so make a note), though
+ * doesn't seem to matter
+ */
+ if (maxDiff > lastMaxDiff)
+ LOG(RPiAlsc, Debug)
+ << "Iteration " << i << ": maxDiff gone up "
+ << lastMaxDiff << " to " << maxDiff;
+ lastMaxDiff = maxDiff;
+ }
+ /* We're going to normalise the lambdas so the total average is 1. */
+ reaverage(lambda);
+}
+
+static void addLuminanceRb(Array2D<double> &result, const Array2D<double> &lambda,
+ const Array2D<double> &luminanceLut,
+ double luminanceStrength)
+{
+ for (unsigned int i = 0; i < result.size(); i++)
+ result[i] = lambda[i] * ((luminanceLut[i] - 1) * luminanceStrength + 1);
+}
+
+static void addLuminanceG(Array2D<double> &result, double lambda,
+ const Array2D<double> &luminanceLut,
+ double luminanceStrength)
+{
+ for (unsigned int i = 0; i < result.size(); i++)
+ result[i] = lambda * ((luminanceLut[i] - 1) * luminanceStrength + 1);
+}
+
+void addLuminanceToTables(std::array<Array2D<double>, 3> &results,
+ const Array2D<double> &lambdaR,
+ double lambdaG, const Array2D<double> &lambdaB,
+ const Array2D<double> &luminanceLut,
+ double luminanceStrength)
+{
+ addLuminanceRb(results[0], lambdaR, luminanceLut, luminanceStrength);
+ addLuminanceG(results[1], lambdaG, luminanceLut, luminanceStrength);
+ addLuminanceRb(results[2], lambdaB, luminanceLut, luminanceStrength);
+ for (auto &r : results)
+ normalise(r);
+}
+
+void Alsc::doAlsc()
+{
+ Array2D<double> &cr = tmpC_[0], &cb = tmpC_[1], &calTableR = tmpC_[2],
+ &calTableB = tmpC_[3], &calTableTmp = tmpC_[4];
+ SparseArray<double> &wr = tmpM_[0], &wb = tmpM_[1], &M = tmpM_[2];
+
+ /*
+ * Calculate our R/B ("Cr"/"Cb") colour statistics, and assess which are
+ * usable.
+ */
+ calculateCrCb(statistics_, cr, cb, config_.minCount, config_.minG);
+ /*
+ * Fetch the new calibrations (if any) for this CT. Resample them in
+ * case the camera mode is not full-frame.
+ */
+ getCalTable(ct_, config_.calibrationsCr, calTableTmp);
+ resampleCalTable(calTableTmp, cameraMode_, calTableR);
+ getCalTable(ct_, config_.calibrationsCb, calTableTmp);
+ resampleCalTable(calTableTmp, cameraMode_, calTableB);
+ /*
+ * You could print out the cal tables for this image here, if you're
+ * tuning the algorithm...
+ * Apply any calibration to the statistics, so the adaptive algorithm
+ * makes only the extra adjustments.
+ */
+ applyCalTable(calTableR, cr);
+ applyCalTable(calTableB, cb);
+ /* Compute weights between zones. */
+ computeW(cr, config_.sigmaCr, wr);
+ computeW(cb, config_.sigmaCb, wb);
+ /* Run Gauss-Seidel iterations over the resulting matrix, for R and B. */
+ runMatrixIterations(cr, lambdaR_, wr, M, config_.omega, config_.nIter,
+ config_.threshold, config_.lambdaBound);
+ runMatrixIterations(cb, lambdaB_, wb, M, config_.omega, config_.nIter,
+ config_.threshold, config_.lambdaBound);
+ /*
+ * Fold the calibrated gains into our final lambda values. (Note that on
+ * the next run, we re-start with the lambda values that don't have the
+ * calibration gains included.)
+ */
+ compensateLambdasForCal(calTableR, lambdaR_, asyncLambdaR_);
+ compensateLambdasForCal(calTableB, lambdaB_, asyncLambdaB_);
+ /* Fold in the luminance table at the appropriate strength. */
+ addLuminanceToTables(asyncResults_, asyncLambdaR_, 1.0,
+ asyncLambdaB_, luminanceTable_,
+ config_.luminanceStrength);
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Alsc(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/alsc.h b/src/ipa/rpi/controller/rpi/alsc.h
new file mode 100644
index 00000000..31087982
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/alsc.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * ALSC (auto lens shading correction) control algorithm
+ */
+#pragma once
+
+#include <array>
+#include <mutex>
+#include <condition_variable>
+#include <thread>
+#include <vector>
+
+#include <libcamera/geometry.h>
+
+#include "../algorithm.h"
+#include "../alsc_status.h"
+#include "../statistics.h"
+
+namespace RPiController {
+
+/* Algorithm to generate automagic LSC (Lens Shading Correction) tables. */
+
+/*
+ * The Array2D class is a very thin wrapper round std::vector so that it can
+ * be used in exactly the same way in the code but carries its correct width
+ * and height ("dimensions") with it.
+ */
+
+template<typename T>
+class Array2D
+{
+public:
+ using Size = libcamera::Size;
+
+ const Size &dimensions() const { return dimensions_; }
+
+ size_t size() const { return data_.size(); }
+
+ const std::vector<T> &data() const { return data_; }
+
+ void resize(const Size &dims)
+ {
+ dimensions_ = dims;
+ data_.resize(dims.width * dims.height);
+ }
+
+ void resize(const Size &dims, const T &value)
+ {
+ resize(dims);
+ std::fill(data_.begin(), data_.end(), value);
+ }
+
+ T &operator[](int index) { return data_[index]; }
+
+ const T &operator[](int index) const { return data_[index]; }
+
+ T *ptr() { return data_.data(); }
+
+ const T *ptr() const { return data_.data(); }
+
+ auto begin() { return data_.begin(); }
+ auto end() { return data_.end(); }
+
+private:
+ Size dimensions_;
+ std::vector<T> data_;
+};
+
+/*
+ * We'll use the term SparseArray for the large sparse matrices that are
+ * XY tall but have only 4 non-zero elements on each row.
+ */
+
+template<typename T>
+using SparseArray = std::vector<std::array<T, 4>>;
+
+struct AlscCalibration {
+ double ct;
+ Array2D<double> table;
+};
+
+struct AlscConfig {
+ /* Only repeat the ALSC calculation every "this many" frames */
+ uint16_t framePeriod;
+ /* number of initial frames for which speed taken as 1.0 (maximum) */
+ uint16_t startupFrames;
+ /* IIR filter speed applied to algorithm results */
+ double speed;
+ double sigmaCr;
+ double sigmaCb;
+ double minCount;
+ uint16_t minG;
+ double omega;
+ uint32_t nIter;
+ Array2D<double> luminanceLut;
+ double luminanceStrength;
+ std::vector<AlscCalibration> calibrationsCr;
+ std::vector<AlscCalibration> calibrationsCb;
+ double defaultCt; /* colour temperature if no metadata found */
+ double threshold; /* iteration termination threshold */
+ double lambdaBound; /* upper/lower bound for lambda from a value of 1 */
+ libcamera::Size tableSize;
+};
+
+class Alsc : public Algorithm
+{
+public:
+ Alsc(Controller *controller = NULL);
+ ~Alsc();
+ char const *name() const override;
+ void initialise() override;
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+
+private:
+ /* configuration is read-only, and available to both threads */
+ AlscConfig config_;
+ bool firstTime_;
+ CameraMode cameraMode_;
+ Array2D<double> luminanceTable_;
+ std::thread asyncThread_;
+ void asyncFunc(); /* asynchronous thread function */
+ std::mutex mutex_;
+ /* condvar for async thread to wait on */
+ std::condition_variable asyncSignal_;
+ /* condvar for synchronous thread to wait on */
+ std::condition_variable syncSignal_;
+ /* for sync thread to check if async thread finished (requires mutex) */
+ bool asyncFinished_;
+ /* for async thread to check if it's been told to run (requires mutex) */
+ bool asyncStart_;
+ /* for async thread to check if it's been told to quit (requires mutex) */
+ bool asyncAbort_;
+
+ /*
+ * The following are only for the synchronous thread to use:
+ * for sync thread to note its has asked async thread to run
+ */
+ bool asyncStarted_;
+ /* counts up to framePeriod before restarting the async thread */
+ int framePhase_;
+ /* counts up to startupFrames */
+ int frameCount_;
+ /* counts up to startupFrames for Process function */
+ int frameCount2_;
+ std::array<Array2D<double>, 3> syncResults_;
+ std::array<Array2D<double>, 3> prevSyncResults_;
+ void waitForAysncThread();
+ /*
+ * The following are for the asynchronous thread to use, though the main
+ * thread can set/reset them if the async thread is known to be idle:
+ */
+ void restartAsync(StatisticsPtr &stats, Metadata *imageMetadata);
+ /* copy out the results from the async thread so that it can be restarted */
+ void fetchAsyncResults();
+ double ct_;
+ RgbyRegions statistics_;
+ std::array<Array2D<double>, 3> asyncResults_;
+ Array2D<double> asyncLambdaR_;
+ Array2D<double> asyncLambdaB_;
+ void doAlsc();
+ Array2D<double> lambdaR_;
+ Array2D<double> lambdaB_;
+
+ /* Temporaries for the computations */
+ std::array<Array2D<double>, 5> tmpC_;
+ std::array<SparseArray<double>, 3> tmpM_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/awb.cpp b/src/ipa/rpi/controller/rpi/awb.cpp
new file mode 100644
index 00000000..8479ae40
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/awb.cpp
@@ -0,0 +1,797 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * AWB control algorithm
+ */
+
+#include <assert.h>
+#include <cmath>
+#include <functional>
+
+#include <libcamera/base/log.h>
+
+#include "../lux_status.h"
+
+#include "alsc_status.h"
+#include "awb.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiAwb)
+
+constexpr double kDefaultCT = 4500.0;
+
+#define NAME "rpi.awb"
+
+/*
+ * todo - the locking in this algorithm needs some tidying up as has been done
+ * elsewhere (ALSC and AGC).
+ */
+
+int AwbMode::read(const libcamera::YamlObject &params)
+{
+ auto value = params["lo"].get<double>();
+ if (!value)
+ return -EINVAL;
+ ctLo = *value;
+
+ value = params["hi"].get<double>();
+ if (!value)
+ return -EINVAL;
+ ctHi = *value;
+
+ return 0;
+}
+
+int AwbPrior::read(const libcamera::YamlObject &params)
+{
+ auto value = params["lux"].get<double>();
+ if (!value)
+ return -EINVAL;
+ lux = *value;
+
+ prior = params["prior"].get<ipa::Pwl>(ipa::Pwl{});
+ return prior.empty() ? -EINVAL : 0;
+}
+
+static int readCtCurve(ipa::Pwl &ctR, ipa::Pwl &ctB, const libcamera::YamlObject &params)
+{
+ if (params.size() % 3) {
+ LOG(RPiAwb, Error) << "AwbConfig: incomplete CT curve entry";
+ return -EINVAL;
+ }
+
+ if (params.size() < 6) {
+ LOG(RPiAwb, Error) << "AwbConfig: insufficient points in CT curve";
+ return -EINVAL;
+ }
+
+ const auto &list = params.asList();
+
+ for (auto it = list.begin(); it != list.end(); it++) {
+ auto value = it->get<double>();
+ if (!value)
+ return -EINVAL;
+ double ct = *value;
+
+ assert(it == list.begin() || ct != ctR.domain().end);
+
+ value = (++it)->get<double>();
+ if (!value)
+ return -EINVAL;
+ ctR.append(ct, *value);
+
+ value = (++it)->get<double>();
+ if (!value)
+ return -EINVAL;
+ ctB.append(ct, *value);
+ }
+
+ return 0;
+}
+
+int AwbConfig::read(const libcamera::YamlObject &params)
+{
+ int ret;
+
+ bayes = params["bayes"].get<int>(1);
+ framePeriod = params["frame_period"].get<uint16_t>(10);
+ startupFrames = params["startup_frames"].get<uint16_t>(10);
+ convergenceFrames = params["convergence_frames"].get<unsigned int>(3);
+ speed = params["speed"].get<double>(0.05);
+
+ if (params.contains("ct_curve")) {
+ ret = readCtCurve(ctR, ctB, params["ct_curve"]);
+ if (ret)
+ return ret;
+ /* We will want the inverse functions of these too. */
+ ctRInverse = ctR.inverse().first;
+ ctBInverse = ctB.inverse().first;
+ }
+
+ if (params.contains("priors")) {
+ for (const auto &p : params["priors"].asList()) {
+ AwbPrior prior;
+ ret = prior.read(p);
+ if (ret)
+ return ret;
+ if (!priors.empty() && prior.lux <= priors.back().lux) {
+ LOG(RPiAwb, Error) << "AwbConfig: Prior must be ordered in increasing lux value";
+ return -EINVAL;
+ }
+ priors.push_back(prior);
+ }
+ if (priors.empty()) {
+ LOG(RPiAwb, Error) << "AwbConfig: no AWB priors configured";
+ return -EINVAL;
+ }
+ }
+ if (params.contains("modes")) {
+ for (const auto &[key, value] : params["modes"].asDict()) {
+ ret = modes[key].read(value);
+ if (ret)
+ return ret;
+ if (defaultMode == nullptr)
+ defaultMode = &modes[key];
+ }
+ if (defaultMode == nullptr) {
+ LOG(RPiAwb, Error) << "AwbConfig: no AWB modes configured";
+ return -EINVAL;
+ }
+ }
+
+ minPixels = params["min_pixels"].get<double>(16.0);
+ minG = params["min_G"].get<uint16_t>(32);
+ minRegions = params["min_regions"].get<uint32_t>(10);
+ deltaLimit = params["delta_limit"].get<double>(0.2);
+ coarseStep = params["coarse_step"].get<double>(0.2);
+ transversePos = params["transverse_pos"].get<double>(0.01);
+ transverseNeg = params["transverse_neg"].get<double>(0.01);
+ if (transversePos <= 0 || transverseNeg <= 0) {
+ LOG(RPiAwb, Error) << "AwbConfig: transverse_pos/neg must be > 0";
+ return -EINVAL;
+ }
+
+ sensitivityR = params["sensitivity_r"].get<double>(1.0);
+ sensitivityB = params["sensitivity_b"].get<double>(1.0);
+
+ if (bayes) {
+ if (ctR.empty() || ctB.empty() || priors.empty() ||
+ defaultMode == nullptr) {
+ LOG(RPiAwb, Warning)
+ << "Bayesian AWB mis-configured - switch to Grey method";
+ bayes = false;
+ }
+ }
+ fast = params[fast].get<int>(bayes); /* default to fast for Bayesian, otherwise slow */
+ whitepointR = params["whitepoint_r"].get<double>(0.0);
+ whitepointB = params["whitepoint_b"].get<double>(0.0);
+ if (bayes == false)
+ sensitivityR = sensitivityB = 1.0; /* nor do sensitivities make any sense */
+ /*
+ * The biasProportion parameter adds a small proportion of the counted
+ * pixles to a region biased to the biasCT colour temperature.
+ *
+ * A typical value for biasProportion would be between 0.05 to 0.1.
+ */
+ biasProportion = params["bias_proportion"].get<double>(0.0);
+ biasCT = params["bias_ct"].get<double>(kDefaultCT);
+ return 0;
+}
+
+Awb::Awb(Controller *controller)
+ : AwbAlgorithm(controller)
+{
+ asyncAbort_ = asyncStart_ = asyncStarted_ = asyncFinished_ = false;
+ mode_ = nullptr;
+ manualR_ = manualB_ = 0.0;
+ asyncThread_ = std::thread(std::bind(&Awb::asyncFunc, this));
+}
+
+Awb::~Awb()
+{
+ {
+ std::lock_guard<std::mutex> lock(mutex_);
+ asyncAbort_ = true;
+ }
+ asyncSignal_.notify_one();
+ asyncThread_.join();
+}
+
+char const *Awb::name() const
+{
+ return NAME;
+}
+
+int Awb::read(const libcamera::YamlObject &params)
+{
+ return config_.read(params);
+}
+
+void Awb::initialise()
+{
+ frameCount_ = framePhase_ = 0;
+ /*
+ * Put something sane into the status that we are filtering towards,
+ * just in case the first few frames don't have anything meaningful in
+ * them.
+ */
+ if (!config_.ctR.empty() && !config_.ctB.empty()) {
+ syncResults_.temperatureK = config_.ctR.domain().clamp(4000);
+ syncResults_.gainR = 1.0 / config_.ctR.eval(syncResults_.temperatureK);
+ syncResults_.gainG = 1.0;
+ syncResults_.gainB = 1.0 / config_.ctB.eval(syncResults_.temperatureK);
+ } else {
+ /* random values just to stop the world blowing up */
+ syncResults_.temperatureK = kDefaultCT;
+ syncResults_.gainR = syncResults_.gainG = syncResults_.gainB = 1.0;
+ }
+ prevSyncResults_ = syncResults_;
+ asyncResults_ = syncResults_;
+}
+
+void Awb::initialValues(double &gainR, double &gainB)
+{
+ gainR = syncResults_.gainR;
+ gainB = syncResults_.gainB;
+}
+
+void Awb::disableAuto()
+{
+ /* Freeze the most recent values, and treat them as manual gains */
+ manualR_ = syncResults_.gainR = prevSyncResults_.gainR;
+ manualB_ = syncResults_.gainB = prevSyncResults_.gainB;
+ syncResults_.gainG = prevSyncResults_.gainG;
+ syncResults_.temperatureK = prevSyncResults_.temperatureK;
+}
+
+void Awb::enableAuto()
+{
+ manualR_ = 0.0;
+ manualB_ = 0.0;
+}
+
+unsigned int Awb::getConvergenceFrames() const
+{
+ /*
+ * If not in auto mode, there is no convergence
+ * to happen, so no need to drop any frames - return zero.
+ */
+ if (!isAutoEnabled())
+ return 0;
+ else
+ return config_.convergenceFrames;
+}
+
+void Awb::setMode(std::string const &modeName)
+{
+ modeName_ = modeName;
+}
+
+void Awb::setManualGains(double manualR, double manualB)
+{
+ /* If any of these are 0.0, we swich back to auto. */
+ manualR_ = manualR;
+ manualB_ = manualB;
+ /*
+ * If not in auto mode, set these values into the syncResults which
+ * means that Prepare() will adopt them immediately.
+ */
+ if (!isAutoEnabled()) {
+ syncResults_.gainR = prevSyncResults_.gainR = manualR_;
+ syncResults_.gainG = prevSyncResults_.gainG = 1.0;
+ syncResults_.gainB = prevSyncResults_.gainB = manualB_;
+ if (config_.bayes) {
+ /* Also estimate the best corresponding colour temperature from the curves. */
+ double ctR = config_.ctRInverse.eval(config_.ctRInverse.domain().clamp(1 / manualR_));
+ double ctB = config_.ctBInverse.eval(config_.ctBInverse.domain().clamp(1 / manualB_));
+ prevSyncResults_.temperatureK = (ctR + ctB) / 2;
+ syncResults_.temperatureK = prevSyncResults_.temperatureK;
+ }
+ }
+}
+
+void Awb::setColourTemperature(double temperatureK)
+{
+ if (!config_.bayes) {
+ LOG(RPiAwb, Warning) << "AWB uncalibrated - cannot set colour temperature";
+ return;
+ }
+
+ temperatureK = config_.ctR.domain().clamp(temperatureK);
+ manualR_ = 1 / config_.ctR.eval(temperatureK);
+ manualB_ = 1 / config_.ctB.eval(temperatureK);
+
+ syncResults_.temperatureK = temperatureK;
+ syncResults_.gainR = manualR_;
+ syncResults_.gainG = 1.0;
+ syncResults_.gainB = manualB_;
+ prevSyncResults_ = syncResults_;
+}
+
+void Awb::switchMode([[maybe_unused]] CameraMode const &cameraMode,
+ Metadata *metadata)
+{
+ /* Let other algorithms know the current white balance values. */
+ metadata->set("awb.status", prevSyncResults_);
+}
+
+bool Awb::isAutoEnabled() const
+{
+ return manualR_ == 0.0 || manualB_ == 0.0;
+}
+
+void Awb::fetchAsyncResults()
+{
+ LOG(RPiAwb, Debug) << "Fetch AWB results";
+ asyncFinished_ = false;
+ asyncStarted_ = false;
+ /*
+ * It's possible manual gains could be set even while the async
+ * thread was running, so only copy the results if still in auto mode.
+ */
+ if (isAutoEnabled())
+ syncResults_ = asyncResults_;
+}
+
+void Awb::restartAsync(StatisticsPtr &stats, double lux)
+{
+ LOG(RPiAwb, Debug) << "Starting AWB calculation";
+ /* this makes a new reference which belongs to the asynchronous thread */
+ statistics_ = stats;
+ /* store the mode as it could technically change */
+ auto m = config_.modes.find(modeName_);
+ mode_ = m != config_.modes.end()
+ ? &m->second
+ : (mode_ == nullptr ? config_.defaultMode : mode_);
+ lux_ = lux;
+ framePhase_ = 0;
+ asyncStarted_ = true;
+ size_t len = modeName_.copy(asyncResults_.mode,
+ sizeof(asyncResults_.mode) - 1);
+ asyncResults_.mode[len] = '\0';
+ {
+ std::lock_guard<std::mutex> lock(mutex_);
+ asyncStart_ = true;
+ }
+ asyncSignal_.notify_one();
+}
+
+void Awb::prepare(Metadata *imageMetadata)
+{
+ if (frameCount_ < (int)config_.startupFrames)
+ frameCount_++;
+ double speed = frameCount_ < (int)config_.startupFrames
+ ? 1.0
+ : config_.speed;
+ LOG(RPiAwb, Debug)
+ << "frame_count " << frameCount_ << " speed " << speed;
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ if (asyncStarted_ && asyncFinished_)
+ fetchAsyncResults();
+ }
+ /* Finally apply IIR filter to results and put into metadata. */
+ memcpy(prevSyncResults_.mode, syncResults_.mode,
+ sizeof(prevSyncResults_.mode));
+ prevSyncResults_.temperatureK = speed * syncResults_.temperatureK +
+ (1.0 - speed) * prevSyncResults_.temperatureK;
+ prevSyncResults_.gainR = speed * syncResults_.gainR +
+ (1.0 - speed) * prevSyncResults_.gainR;
+ prevSyncResults_.gainG = speed * syncResults_.gainG +
+ (1.0 - speed) * prevSyncResults_.gainG;
+ prevSyncResults_.gainB = speed * syncResults_.gainB +
+ (1.0 - speed) * prevSyncResults_.gainB;
+ imageMetadata->set("awb.status", prevSyncResults_);
+ LOG(RPiAwb, Debug)
+ << "Using AWB gains r " << prevSyncResults_.gainR << " g "
+ << prevSyncResults_.gainG << " b "
+ << prevSyncResults_.gainB;
+}
+
+void Awb::process(StatisticsPtr &stats, Metadata *imageMetadata)
+{
+ /* Count frames since we last poked the async thread. */
+ if (framePhase_ < (int)config_.framePeriod)
+ framePhase_++;
+ LOG(RPiAwb, Debug) << "frame_phase " << framePhase_;
+ /* We do not restart the async thread if we're not in auto mode. */
+ if (isAutoEnabled() &&
+ (framePhase_ >= (int)config_.framePeriod ||
+ frameCount_ < (int)config_.startupFrames)) {
+ /* Update any settings and any image metadata that we need. */
+ struct LuxStatus luxStatus = {};
+ luxStatus.lux = 400; /* in case no metadata */
+ if (imageMetadata->get("lux.status", luxStatus) != 0)
+ LOG(RPiAwb, Debug) << "No lux metadata found";
+ LOG(RPiAwb, Debug) << "Awb lux value is " << luxStatus.lux;
+
+ if (asyncStarted_ == false)
+ restartAsync(stats, luxStatus.lux);
+ }
+}
+
+void Awb::asyncFunc()
+{
+ while (true) {
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ asyncSignal_.wait(lock, [&] {
+ return asyncStart_ || asyncAbort_;
+ });
+ asyncStart_ = false;
+ if (asyncAbort_)
+ break;
+ }
+ doAwb();
+ {
+ std::lock_guard<std::mutex> lock(mutex_);
+ asyncFinished_ = true;
+ }
+ syncSignal_.notify_one();
+ }
+}
+
+static void generateStats(std::vector<Awb::RGB> &zones,
+ StatisticsPtr &stats, double minPixels,
+ double minG, Metadata &globalMetadata,
+ double biasProportion, double biasCtR, double biasCtB)
+{
+ std::scoped_lock<RPiController::Metadata> l(globalMetadata);
+
+ for (unsigned int i = 0; i < stats->awbRegions.numRegions(); i++) {
+ Awb::RGB zone;
+ auto &region = stats->awbRegions.get(i);
+ if (region.counted >= minPixels) {
+ zone.G = region.val.gSum / region.counted;
+ if (zone.G < minG)
+ continue;
+ zone.R = region.val.rSum / region.counted;
+ zone.B = region.val.bSum / region.counted;
+ /*
+ * Add some bias samples to allow the search to tend to a
+ * bias CT in failure cases.
+ */
+ const unsigned int proportion = biasProportion * region.counted;
+ zone.R += proportion * biasCtR;
+ zone.B += proportion * biasCtB;
+ zone.G += proportion * 1.0;
+ /* Factor in the ALSC applied colour shading correction if required. */
+ const AlscStatus *alscStatus = globalMetadata.getLocked<AlscStatus>("alsc.status");
+ if (stats->colourStatsPos == Statistics::ColourStatsPos::PreLsc && alscStatus) {
+ zone.R *= alscStatus->r[i];
+ zone.G *= alscStatus->g[i];
+ zone.B *= alscStatus->b[i];
+ }
+ zones.push_back(zone);
+ }
+ }
+}
+
+void Awb::prepareStats()
+{
+ zones_.clear();
+ /*
+ * LSC has already been applied to the stats in this pipeline, so stop
+ * any LSC compensation. We also ignore config_.fast in this version.
+ */
+ const double biasCtR = config_.bayes ? config_.ctR.eval(config_.biasCT) : 0;
+ const double biasCtB = config_.bayes ? config_.ctB.eval(config_.biasCT) : 0;
+ generateStats(zones_, statistics_, config_.minPixels,
+ config_.minG, getGlobalMetadata(),
+ config_.biasProportion, biasCtR, biasCtB);
+ /*
+ * apply sensitivities, so values appear to come from our "canonical"
+ * sensor.
+ */
+ for (auto &zone : zones_) {
+ zone.R *= config_.sensitivityR;
+ zone.B *= config_.sensitivityB;
+ }
+}
+
+double Awb::computeDelta2Sum(double gainR, double gainB)
+{
+ /*
+ * Compute the sum of the squared colour error (non-greyness) as it
+ * appears in the log likelihood equation.
+ */
+ double delta2Sum = 0;
+ for (auto &z : zones_) {
+ double deltaR = gainR * z.R - 1 - config_.whitepointR;
+ double deltaB = gainB * z.B - 1 - config_.whitepointB;
+ double delta2 = deltaR * deltaR + deltaB * deltaB;
+ /* LOG(RPiAwb, Debug) << "deltaR " << deltaR << " deltaB " << deltaB << " delta2 " << delta2; */
+ delta2 = std::min(delta2, config_.deltaLimit);
+ delta2Sum += delta2;
+ }
+ return delta2Sum;
+}
+
+ipa::Pwl Awb::interpolatePrior()
+{
+ /*
+ * Interpolate the prior log likelihood function for our current lux
+ * value.
+ */
+ if (lux_ <= config_.priors.front().lux)
+ return config_.priors.front().prior;
+ else if (lux_ >= config_.priors.back().lux)
+ return config_.priors.back().prior;
+ else {
+ int idx = 0;
+ /* find which two we lie between */
+ while (config_.priors[idx + 1].lux < lux_)
+ idx++;
+ double lux0 = config_.priors[idx].lux,
+ lux1 = config_.priors[idx + 1].lux;
+ return ipa::Pwl::combine(config_.priors[idx].prior,
+ config_.priors[idx + 1].prior,
+ [&](double /*x*/, double y0, double y1) {
+ return y0 + (y1 - y0) *
+ (lux_ - lux0) / (lux1 - lux0);
+ });
+ }
+}
+
+static double interpolateQuadatric(ipa::Pwl::Point const &a, ipa::Pwl::Point const &b,
+ ipa::Pwl::Point const &c)
+{
+ /*
+ * Given 3 points on a curve, find the extremum of the function in that
+ * interval by fitting a quadratic.
+ */
+ const double eps = 1e-3;
+ ipa::Pwl::Point ca = c - a, ba = b - a;
+ double denominator = 2 * (ba.y() * ca.x() - ca.y() * ba.x());
+ if (std::abs(denominator) > eps) {
+ double numerator = ba.y() * ca.x() * ca.x() - ca.y() * ba.x() * ba.x();
+ double result = numerator / denominator + a.x();
+ return std::max(a.x(), std::min(c.x(), result));
+ }
+ /* has degenerated to straight line segment */
+ return a.y() < c.y() - eps ? a.x() : (c.y() < a.y() - eps ? c.x() : b.x());
+}
+
+double Awb::coarseSearch(ipa::Pwl const &prior)
+{
+ points_.clear(); /* assume doesn't deallocate memory */
+ size_t bestPoint = 0;
+ double t = mode_->ctLo;
+ int spanR = 0, spanB = 0;
+ /* Step down the CT curve evaluating log likelihood. */
+ while (true) {
+ double r = config_.ctR.eval(t, &spanR);
+ double b = config_.ctB.eval(t, &spanB);
+ double gainR = 1 / r, gainB = 1 / b;
+ double delta2Sum = computeDelta2Sum(gainR, gainB);
+ double priorLogLikelihood = prior.eval(prior.domain().clamp(t));
+ double finalLogLikelihood = delta2Sum - priorLogLikelihood;
+ LOG(RPiAwb, Debug)
+ << "t: " << t << " gain R " << gainR << " gain B "
+ << gainB << " delta2_sum " << delta2Sum
+ << " prior " << priorLogLikelihood << " final "
+ << finalLogLikelihood;
+ points_.push_back(ipa::Pwl::Point({ t, finalLogLikelihood }));
+ if (points_.back().y() < points_[bestPoint].y())
+ bestPoint = points_.size() - 1;
+ if (t == mode_->ctHi)
+ break;
+ /* for even steps along the r/b curve scale them by the current t */
+ t = std::min(t + t / 10 * config_.coarseStep, mode_->ctHi);
+ }
+ t = points_[bestPoint].x();
+ LOG(RPiAwb, Debug) << "Coarse search found CT " << t;
+ /*
+ * We have the best point of the search, but refine it with a quadratic
+ * interpolation around its neighbours.
+ */
+ if (points_.size() > 2) {
+ unsigned long bp = std::min(bestPoint, points_.size() - 2);
+ bestPoint = std::max(1UL, bp);
+ t = interpolateQuadatric(points_[bestPoint - 1],
+ points_[bestPoint],
+ points_[bestPoint + 1]);
+ LOG(RPiAwb, Debug)
+ << "After quadratic refinement, coarse search has CT "
+ << t;
+ }
+ return t;
+}
+
+void Awb::fineSearch(double &t, double &r, double &b, ipa::Pwl const &prior)
+{
+ int spanR = -1, spanB = -1;
+ config_.ctR.eval(t, &spanR);
+ config_.ctB.eval(t, &spanB);
+ double step = t / 10 * config_.coarseStep * 0.1;
+ int nsteps = 5;
+ double rDiff = config_.ctR.eval(t + nsteps * step, &spanR) -
+ config_.ctR.eval(t - nsteps * step, &spanR);
+ double bDiff = config_.ctB.eval(t + nsteps * step, &spanB) -
+ config_.ctB.eval(t - nsteps * step, &spanB);
+ ipa::Pwl::Point transverse({ bDiff, -rDiff });
+ if (transverse.length2() < 1e-6)
+ return;
+ /*
+ * unit vector orthogonal to the b vs. r function (pointing outwards
+ * with r and b increasing)
+ */
+ transverse = transverse / transverse.length();
+ double bestLogLikelihood = 0, bestT = 0, bestR = 0, bestB = 0;
+ double transverseRange = config_.transverseNeg + config_.transversePos;
+ const int maxNumDeltas = 12;
+ /* a transverse step approximately every 0.01 r/b units */
+ int numDeltas = floor(transverseRange * 100 + 0.5) + 1;
+ numDeltas = numDeltas < 3 ? 3 : (numDeltas > maxNumDeltas ? maxNumDeltas : numDeltas);
+ /*
+ * Step down CT curve. March a bit further if the transverse range is
+ * large.
+ */
+ nsteps += numDeltas;
+ for (int i = -nsteps; i <= nsteps; i++) {
+ double tTest = t + i * step;
+ double priorLogLikelihood =
+ prior.eval(prior.domain().clamp(tTest));
+ double rCurve = config_.ctR.eval(tTest, &spanR);
+ double bCurve = config_.ctB.eval(tTest, &spanB);
+ /* x will be distance off the curve, y the log likelihood there */
+ ipa::Pwl::Point points[maxNumDeltas];
+ int bestPoint = 0;
+ /* Take some measurements transversely *off* the CT curve. */
+ for (int j = 0; j < numDeltas; j++) {
+ points[j][0] = -config_.transverseNeg +
+ (transverseRange * j) / (numDeltas - 1);
+ ipa::Pwl::Point rbTest = ipa::Pwl::Point({ rCurve, bCurve }) +
+ transverse * points[j].x();
+ double rTest = rbTest.x(), bTest = rbTest.y();
+ double gainR = 1 / rTest, gainB = 1 / bTest;
+ double delta2Sum = computeDelta2Sum(gainR, gainB);
+ points[j][1] = delta2Sum - priorLogLikelihood;
+ LOG(RPiAwb, Debug)
+ << "At t " << tTest << " r " << rTest << " b "
+ << bTest << ": " << points[j].y();
+ if (points[j].y() < points[bestPoint].y())
+ bestPoint = j;
+ }
+ /*
+ * We have NUM_DELTAS points transversely across the CT curve,
+ * now let's do a quadratic interpolation for the best result.
+ */
+ bestPoint = std::max(1, std::min(bestPoint, numDeltas - 2));
+ ipa::Pwl::Point rbTest = ipa::Pwl::Point({ rCurve, bCurve }) +
+ transverse * interpolateQuadatric(points[bestPoint - 1],
+ points[bestPoint],
+ points[bestPoint + 1]);
+ double rTest = rbTest.x(), bTest = rbTest.y();
+ double gainR = 1 / rTest, gainB = 1 / bTest;
+ double delta2Sum = computeDelta2Sum(gainR, gainB);
+ double finalLogLikelihood = delta2Sum - priorLogLikelihood;
+ LOG(RPiAwb, Debug)
+ << "Finally "
+ << tTest << " r " << rTest << " b " << bTest << ": "
+ << finalLogLikelihood
+ << (finalLogLikelihood < bestLogLikelihood ? " BEST" : "");
+ if (bestT == 0 || finalLogLikelihood < bestLogLikelihood)
+ bestLogLikelihood = finalLogLikelihood,
+ bestT = tTest, bestR = rTest, bestB = bTest;
+ }
+ t = bestT, r = bestR, b = bestB;
+ LOG(RPiAwb, Debug)
+ << "Fine search found t " << t << " r " << r << " b " << b;
+}
+
+void Awb::awbBayes()
+{
+ /*
+ * May as well divide out G to save computeDelta2Sum from doing it over
+ * and over.
+ */
+ for (auto &z : zones_)
+ z.R = z.R / (z.G + 1), z.B = z.B / (z.G + 1);
+ /*
+ * Get the current prior, and scale according to how many zones are
+ * valid... not entirely sure about this.
+ */
+ ipa::Pwl prior = interpolatePrior();
+ prior *= zones_.size() / (double)(statistics_->awbRegions.numRegions());
+ prior.map([](double x, double y) {
+ LOG(RPiAwb, Debug) << "(" << x << "," << y << ")";
+ });
+ double t = coarseSearch(prior);
+ double r = config_.ctR.eval(t);
+ double b = config_.ctB.eval(t);
+ LOG(RPiAwb, Debug)
+ << "After coarse search: r " << r << " b " << b << " (gains r "
+ << 1 / r << " b " << 1 / b << ")";
+ /*
+ * Not entirely sure how to handle the fine search yet. Mostly the
+ * estimated CT is already good enough, but the fine search allows us to
+ * wander transverely off the CT curve. Under some illuminants, where
+ * there may be more or less green light, this may prove beneficial,
+ * though I probably need more real datasets before deciding exactly how
+ * this should be controlled and tuned.
+ */
+ fineSearch(t, r, b, prior);
+ LOG(RPiAwb, Debug)
+ << "After fine search: r " << r << " b " << b << " (gains r "
+ << 1 / r << " b " << 1 / b << ")";
+ /*
+ * Write results out for the main thread to pick up. Remember to adjust
+ * the gains from the ones that the "canonical sensor" would require to
+ * the ones needed by *this* sensor.
+ */
+ asyncResults_.temperatureK = t;
+ asyncResults_.gainR = 1.0 / r * config_.sensitivityR;
+ asyncResults_.gainG = 1.0;
+ asyncResults_.gainB = 1.0 / b * config_.sensitivityB;
+}
+
+void Awb::awbGrey()
+{
+ LOG(RPiAwb, Debug) << "Grey world AWB";
+ /*
+ * Make a separate list of the derivatives for each of red and blue, so
+ * that we can sort them to exclude the extreme gains. We could
+ * consider some variations, such as normalising all the zones first, or
+ * doing an L2 average etc.
+ */
+ std::vector<RGB> &derivsR(zones_);
+ std::vector<RGB> derivsB(derivsR);
+ std::sort(derivsR.begin(), derivsR.end(),
+ [](RGB const &a, RGB const &b) {
+ return a.G * b.R < b.G * a.R;
+ });
+ std::sort(derivsB.begin(), derivsB.end(),
+ [](RGB const &a, RGB const &b) {
+ return a.G * b.B < b.G * a.B;
+ });
+ /* Average the middle half of the values. */
+ int discard = derivsR.size() / 4;
+ RGB sumR(0, 0, 0), sumB(0, 0, 0);
+ for (auto ri = derivsR.begin() + discard,
+ bi = derivsB.begin() + discard;
+ ri != derivsR.end() - discard; ri++, bi++)
+ sumR += *ri, sumB += *bi;
+ double gainR = sumR.G / (sumR.R + 1),
+ gainB = sumB.G / (sumB.B + 1);
+ /*
+ * The grey world model can't estimate the colour temperature, use a
+ * default value.
+ */
+ asyncResults_.temperatureK = kDefaultCT;
+ asyncResults_.gainR = gainR;
+ asyncResults_.gainG = 1.0;
+ asyncResults_.gainB = gainB;
+}
+
+void Awb::doAwb()
+{
+ prepareStats();
+ LOG(RPiAwb, Debug) << "Valid zones: " << zones_.size();
+ if (zones_.size() > config_.minRegions) {
+ if (config_.bayes)
+ awbBayes();
+ else
+ awbGrey();
+ LOG(RPiAwb, Debug)
+ << "CT found is "
+ << asyncResults_.temperatureK
+ << " with gains r " << asyncResults_.gainR
+ << " and b " << asyncResults_.gainB;
+ }
+ /*
+ * we're done with these; we may as well relinquish our hold on the
+ * pointer.
+ */
+ statistics_.reset();
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Awb(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/awb.h b/src/ipa/rpi/controller/rpi/awb.h
new file mode 100644
index 00000000..86640f8f
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/awb.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * AWB control algorithm
+ */
+#pragma once
+
+#include <mutex>
+#include <condition_variable>
+#include <thread>
+
+#include <libcamera/geometry.h>
+
+#include "../awb_algorithm.h"
+#include "../awb_status.h"
+#include "../statistics.h"
+
+#include "libipa/pwl.h"
+
+namespace RPiController {
+
+/* Control algorithm to perform AWB calculations. */
+
+struct AwbMode {
+ int read(const libcamera::YamlObject &params);
+ double ctLo; /* low CT value for search */
+ double ctHi; /* high CT value for search */
+};
+
+struct AwbPrior {
+ int read(const libcamera::YamlObject &params);
+ double lux; /* lux level */
+ libcamera::ipa::Pwl prior; /* maps CT to prior log likelihood for this lux level */
+};
+
+struct AwbConfig {
+ AwbConfig() : defaultMode(nullptr) {}
+ int read(const libcamera::YamlObject &params);
+ /* Only repeat the AWB calculation every "this many" frames */
+ uint16_t framePeriod;
+ /* number of initial frames for which speed taken as 1.0 (maximum) */
+ uint16_t startupFrames;
+ unsigned int convergenceFrames; /* approx number of frames to converge */
+ double speed; /* IIR filter speed applied to algorithm results */
+ bool fast; /* "fast" mode uses a 16x16 rather than 32x32 grid */
+ libcamera::ipa::Pwl ctR; /* function maps CT to r (= R/G) */
+ libcamera::ipa::Pwl ctB; /* function maps CT to b (= B/G) */
+ libcamera::ipa::Pwl ctRInverse; /* inverse of ctR */
+ libcamera::ipa::Pwl ctBInverse; /* inverse of ctB */
+ /* table of illuminant priors at different lux levels */
+ std::vector<AwbPrior> priors;
+ /* AWB "modes" (determines the search range) */
+ std::map<std::string, AwbMode> modes;
+ AwbMode *defaultMode; /* mode used if no mode selected */
+ /*
+ * minimum proportion of pixels counted within AWB region for it to be
+ * "useful"
+ */
+ double minPixels;
+ /* minimum G value of those pixels, to be regarded a "useful" */
+ uint16_t minG;
+ /*
+ * number of AWB regions that must be "useful" in order to do the AWB
+ * calculation
+ */
+ uint32_t minRegions;
+ /* clamp on colour error term (so as not to penalise non-grey excessively) */
+ double deltaLimit;
+ /* step size control in coarse search */
+ double coarseStep;
+ /* how far to wander off CT curve towards "more purple" */
+ double transversePos;
+ /* how far to wander off CT curve towards "more green" */
+ double transverseNeg;
+ /*
+ * red sensitivity ratio (set to canonical sensor's R/G divided by this
+ * sensor's R/G)
+ */
+ double sensitivityR;
+ /*
+ * blue sensitivity ratio (set to canonical sensor's B/G divided by this
+ * sensor's B/G)
+ */
+ double sensitivityB;
+ /* The whitepoint (which we normally "aim" for) can be moved. */
+ double whitepointR;
+ double whitepointB;
+ bool bayes; /* use Bayesian algorithm */
+ /* proportion of counted samples to add for the search bias */
+ double biasProportion;
+ /* CT target for the search bias */
+ double biasCT;
+};
+
+class Awb : public AwbAlgorithm
+{
+public:
+ Awb(Controller *controller = NULL);
+ ~Awb();
+ char const *name() const override;
+ void initialise() override;
+ int read(const libcamera::YamlObject &params) override;
+ unsigned int getConvergenceFrames() const override;
+ void initialValues(double &gainR, double &gainB) override;
+ void setMode(std::string const &name) override;
+ void setManualGains(double manualR, double manualB) override;
+ void setColourTemperature(double temperatureK) override;
+ void enableAuto() override;
+ void disableAuto() override;
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ void prepare(Metadata *imageMetadata) override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+ struct RGB {
+ RGB(double r = 0, double g = 0, double b = 0)
+ : R(r), G(g), B(b)
+ {
+ }
+ double R, G, B;
+ RGB &operator+=(RGB const &other)
+ {
+ R += other.R, G += other.G, B += other.B;
+ return *this;
+ }
+ };
+
+private:
+ bool isAutoEnabled() const;
+ /* configuration is read-only, and available to both threads */
+ AwbConfig config_;
+ std::thread asyncThread_;
+ void asyncFunc(); /* asynchronous thread function */
+ std::mutex mutex_;
+ /* condvar for async thread to wait on */
+ std::condition_variable asyncSignal_;
+ /* condvar for synchronous thread to wait on */
+ std::condition_variable syncSignal_;
+ /* for sync thread to check if async thread finished (requires mutex) */
+ bool asyncFinished_;
+ /* for async thread to check if it's been told to run (requires mutex) */
+ bool asyncStart_;
+ /* for async thread to check if it's been told to quit (requires mutex) */
+ bool asyncAbort_;
+
+ /*
+ * The following are only for the synchronous thread to use:
+ * for sync thread to note its has asked async thread to run
+ */
+ bool asyncStarted_;
+ /* counts up to framePeriod before restarting the async thread */
+ int framePhase_;
+ int frameCount_; /* counts up to startup_frames */
+ AwbStatus syncResults_;
+ AwbStatus prevSyncResults_;
+ std::string modeName_;
+ /*
+ * The following are for the asynchronous thread to use, though the main
+ * thread can set/reset them if the async thread is known to be idle:
+ */
+ void restartAsync(StatisticsPtr &stats, double lux);
+ /* copy out the results from the async thread so that it can be restarted */
+ void fetchAsyncResults();
+ StatisticsPtr statistics_;
+ AwbMode *mode_;
+ double lux_;
+ AwbStatus asyncResults_;
+ void doAwb();
+ void awbBayes();
+ void awbGrey();
+ void prepareStats();
+ double computeDelta2Sum(double gainR, double gainB);
+ libcamera::ipa::Pwl interpolatePrior();
+ double coarseSearch(libcamera::ipa::Pwl const &prior);
+ void fineSearch(double &t, double &r, double &b, libcamera::ipa::Pwl const &prior);
+ std::vector<RGB> zones_;
+ std::vector<libcamera::ipa::Pwl::Point> points_;
+ /* manual r setting */
+ double manualR_;
+ /* manual b setting */
+ double manualB_;
+};
+
+static inline Awb::RGB operator+(Awb::RGB const &a, Awb::RGB const &b)
+{
+ return Awb::RGB(a.R + b.R, a.G + b.G, a.B + b.B);
+}
+static inline Awb::RGB operator-(Awb::RGB const &a, Awb::RGB const &b)
+{
+ return Awb::RGB(a.R - b.R, a.G - b.G, a.B - b.B);
+}
+static inline Awb::RGB operator*(double d, Awb::RGB const &rgb)
+{
+ return Awb::RGB(d * rgb.R, d * rgb.G, d * rgb.B);
+}
+static inline Awb::RGB operator*(Awb::RGB const &rgb, double d)
+{
+ return d * rgb;
+}
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/black_level.cpp b/src/ipa/rpi/controller/rpi/black_level.cpp
new file mode 100644
index 00000000..4c968f14
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/black_level.cpp
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * black level control algorithm
+ */
+
+#include <stdint.h>
+
+#include <libcamera/base/log.h>
+
+#include "../black_level_status.h"
+
+#include "black_level.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiBlackLevel)
+
+#define NAME "rpi.black_level"
+
+BlackLevel::BlackLevel(Controller *controller)
+ : BlackLevelAlgorithm(controller)
+{
+}
+
+char const *BlackLevel::name() const
+{
+ return NAME;
+}
+
+int BlackLevel::read(const libcamera::YamlObject &params)
+{
+ /* 64 in 10 bits scaled to 16 bits */
+ uint16_t blackLevel = params["black_level"].get<uint16_t>(4096);
+ blackLevelR_ = params["black_level_r"].get<uint16_t>(blackLevel);
+ blackLevelG_ = params["black_level_g"].get<uint16_t>(blackLevel);
+ blackLevelB_ = params["black_level_b"].get<uint16_t>(blackLevel);
+ LOG(RPiBlackLevel, Debug)
+ << " Read black levels red " << blackLevelR_
+ << " green " << blackLevelG_
+ << " blue " << blackLevelB_;
+ return 0;
+}
+
+void BlackLevel::initialValues(uint16_t &blackLevelR, uint16_t &blackLevelG,
+ uint16_t &blackLevelB)
+{
+ blackLevelR = blackLevelR_;
+ blackLevelG = blackLevelG_;
+ blackLevelB = blackLevelB_;
+}
+
+void BlackLevel::prepare(Metadata *imageMetadata)
+{
+ /*
+ * Possibly we should think about doing this in a switchMode or
+ * something?
+ */
+ struct BlackLevelStatus status;
+ status.blackLevelR = blackLevelR_;
+ status.blackLevelG = blackLevelG_;
+ status.blackLevelB = blackLevelB_;
+ imageMetadata->set("black_level.status", status);
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return new BlackLevel(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/black_level.h b/src/ipa/rpi/controller/rpi/black_level.h
new file mode 100644
index 00000000..f50729db
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/black_level.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * black level control algorithm
+ */
+#pragma once
+
+#include "../black_level_algorithm.h"
+#include "../black_level_status.h"
+
+/* This is our implementation of the "black level algorithm". */
+
+namespace RPiController {
+
+class BlackLevel : public BlackLevelAlgorithm
+{
+public:
+ BlackLevel(Controller *controller);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void initialValues(uint16_t &blackLevelR, uint16_t &blackLevelG,
+ uint16_t &blackLevelB) override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ double blackLevelR_;
+ double blackLevelG_;
+ double blackLevelB_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/cac.cpp b/src/ipa/rpi/controller/rpi/cac.cpp
new file mode 100644
index 00000000..17779ad5
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/cac.cpp
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ *
+ * Chromatic Aberration Correction algorithm
+ */
+#include "cac.h"
+
+#include <libcamera/base/log.h>
+
+#include "cac_status.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiCac)
+
+#define NAME "rpi.cac"
+
+Cac::Cac(Controller *controller)
+ : Algorithm(controller)
+{
+}
+
+char const *Cac::name() const
+{
+ return NAME;
+}
+
+static bool arrayToSet(const libcamera::YamlObject &params, std::vector<double> &inputArray, const Size &size)
+{
+ int num = 0;
+ int max_num = (size.width + 1) * (size.height + 1);
+ inputArray.resize(max_num);
+
+ for (const auto &p : params.asList()) {
+ if (num == max_num)
+ return false;
+ inputArray[num++] = p.get<double>(0);
+ }
+
+ return num == max_num;
+}
+
+static void setStrength(std::vector<double> &inputArray, std::vector<double> &outputArray,
+ double strengthFactor)
+{
+ int num = 0;
+ for (const auto &p : inputArray) {
+ outputArray[num++] = p * strengthFactor;
+ }
+}
+
+int Cac::read(const libcamera::YamlObject &params)
+{
+ config_.enabled = params.contains("lut_rx") && params.contains("lut_ry") &&
+ params.contains("lut_bx") && params.contains("lut_by");
+ if (!config_.enabled)
+ return 0;
+
+ const Size &size = getHardwareConfig().cacRegions;
+
+ if (!arrayToSet(params["lut_rx"], config_.lutRx, size)) {
+ LOG(RPiCac, Error) << "Bad CAC lut_rx table";
+ return -EINVAL;
+ }
+
+ if (!arrayToSet(params["lut_ry"], config_.lutRy, size)) {
+ LOG(RPiCac, Error) << "Bad CAC lut_ry table";
+ return -EINVAL;
+ }
+
+ if (!arrayToSet(params["lut_bx"], config_.lutBx, size)) {
+ LOG(RPiCac, Error) << "Bad CAC lut_bx table";
+ return -EINVAL;
+ }
+
+ if (!arrayToSet(params["lut_by"], config_.lutBy, size)) {
+ LOG(RPiCac, Error) << "Bad CAC lut_by table";
+ return -EINVAL;
+ }
+
+ double strength = params["strength"].get<double>(1);
+ cacStatus_.lutRx = config_.lutRx;
+ cacStatus_.lutRy = config_.lutRy;
+ cacStatus_.lutBx = config_.lutBx;
+ cacStatus_.lutBy = config_.lutBy;
+ setStrength(config_.lutRx, cacStatus_.lutRx, strength);
+ setStrength(config_.lutBx, cacStatus_.lutBx, strength);
+ setStrength(config_.lutRy, cacStatus_.lutRy, strength);
+ setStrength(config_.lutBy, cacStatus_.lutBy, strength);
+
+ return 0;
+}
+
+void Cac::prepare(Metadata *imageMetadata)
+{
+ if (config_.enabled)
+ imageMetadata->set("cac.status", cacStatus_);
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return (Algorithm *)new Cac(controller);
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/rpi/controller/rpi/cac.h b/src/ipa/rpi/controller/rpi/cac.h
new file mode 100644
index 00000000..a7b14c00
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/cac.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023, Raspberry Pi Ltd
+ *
+ * cac.hpp - CAC control algorithm
+ */
+#pragma once
+
+#include "algorithm.h"
+#include "cac_status.h"
+
+namespace RPiController {
+
+struct CacConfig {
+ bool enabled;
+ std::vector<double> lutRx;
+ std::vector<double> lutRy;
+ std::vector<double> lutBx;
+ std::vector<double> lutBy;
+};
+
+class Cac : public Algorithm
+{
+public:
+ Cac(Controller *controller = NULL);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ CacConfig config_;
+ CacStatus cacStatus_;
+};
+
+} // namespace RPiController
diff --git a/src/ipa/rpi/controller/rpi/ccm.cpp b/src/ipa/rpi/controller/rpi/ccm.cpp
new file mode 100644
index 00000000..8607f152
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/ccm.cpp
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * CCM (colour correction matrix) control algorithm
+ */
+
+#include <libcamera/base/log.h>
+
+#include "../awb_status.h"
+#include "../ccm_status.h"
+#include "../lux_status.h"
+#include "../metadata.h"
+
+#include "ccm.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiCcm)
+
+/*
+ * This algorithm selects a CCM (Colour Correction Matrix) according to the
+ * colour temperature estimated by AWB (interpolating between known matricies as
+ * necessary). Additionally the amount of colour saturation can be controlled
+ * both according to the current estimated lux level and according to a
+ * saturation setting that is exposed to applications.
+ */
+
+#define NAME "rpi.ccm"
+
+using Matrix3x3 = Matrix<double, 3, 3>;
+
+Ccm::Ccm(Controller *controller)
+ : CcmAlgorithm(controller), saturation_(1.0) {}
+
+char const *Ccm::name() const
+{
+ return NAME;
+}
+
+int Ccm::read(const libcamera::YamlObject &params)
+{
+ if (params.contains("saturation")) {
+ config_.saturation = params["saturation"].get<ipa::Pwl>(ipa::Pwl{});
+ if (config_.saturation.empty())
+ return -EINVAL;
+ }
+
+ for (auto &p : params["ccms"].asList()) {
+ auto value = p["ct"].get<double>();
+ if (!value)
+ return -EINVAL;
+
+ CtCcm ctCcm;
+ ctCcm.ct = *value;
+
+ auto ccm = p["ccm"].get<Matrix3x3>();
+ if (!ccm)
+ return -EINVAL;
+
+ ctCcm.ccm = *ccm;
+
+ if (!config_.ccms.empty() && ctCcm.ct <= config_.ccms.back().ct) {
+ LOG(RPiCcm, Error)
+ << "CCM not in increasing colour temperature order";
+ return -EINVAL;
+ }
+
+ config_.ccms.push_back(std::move(ctCcm));
+ }
+
+ if (config_.ccms.empty()) {
+ LOG(RPiCcm, Error) << "No CCMs specified";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void Ccm::setSaturation(double saturation)
+{
+ saturation_ = saturation;
+}
+
+void Ccm::initialise()
+{
+}
+
+namespace {
+
+template<typename T>
+bool getLocked(Metadata *metadata, std::string const &tag, T &value)
+{
+ T *ptr = metadata->getLocked<T>(tag);
+ if (ptr == nullptr)
+ return false;
+ value = *ptr;
+ return true;
+}
+
+Matrix3x3 calculateCcm(std::vector<CtCcm> const &ccms, double ct)
+{
+ if (ct <= ccms.front().ct)
+ return ccms.front().ccm;
+ else if (ct >= ccms.back().ct)
+ return ccms.back().ccm;
+ else {
+ int i = 0;
+ for (; ct > ccms[i].ct; i++)
+ ;
+ double lambda =
+ (ct - ccms[i - 1].ct) / (ccms[i].ct - ccms[i - 1].ct);
+ return lambda * ccms[i].ccm + (1.0 - lambda) * ccms[i - 1].ccm;
+ }
+}
+
+Matrix3x3 applySaturation(Matrix3x3 const &ccm, double saturation)
+{
+ static const Matrix3x3 RGB2Y({ 0.299, 0.587, 0.114,
+ -0.169, -0.331, 0.500,
+ 0.500, -0.419, -0.081 });
+
+ static const Matrix3x3 Y2RGB({ 1.000, 0.000, 1.402,
+ 1.000, -0.345, -0.714,
+ 1.000, 1.771, 0.000 });
+
+ Matrix3x3 S({ 1, 0, 0,
+ 0, saturation, 0,
+ 0, 0, saturation });
+
+ return Y2RGB * S * RGB2Y * ccm;
+}
+
+} /* namespace */
+
+void Ccm::prepare(Metadata *imageMetadata)
+{
+ bool awbOk = false, luxOk = false;
+ struct AwbStatus awb = {};
+ awb.temperatureK = 4000; /* in case no metadata */
+ struct LuxStatus lux = {};
+ lux.lux = 400; /* in case no metadata */
+ {
+ /* grab mutex just once to get everything */
+ std::lock_guard<Metadata> lock(*imageMetadata);
+ awbOk = getLocked(imageMetadata, "awb.status", awb);
+ luxOk = getLocked(imageMetadata, "lux.status", lux);
+ }
+ if (!awbOk)
+ LOG(RPiCcm, Warning) << "no colour temperature found";
+ if (!luxOk)
+ LOG(RPiCcm, Warning) << "no lux value found";
+ Matrix3x3 ccm = calculateCcm(config_.ccms, awb.temperatureK);
+ double saturation = saturation_;
+ struct CcmStatus ccmStatus;
+ ccmStatus.saturation = saturation;
+ if (!config_.saturation.empty())
+ saturation *= config_.saturation.eval(
+ config_.saturation.domain().clamp(lux.lux));
+ ccm = applySaturation(ccm, saturation);
+ for (int j = 0; j < 3; j++)
+ for (int i = 0; i < 3; i++)
+ ccmStatus.matrix[j * 3 + i] =
+ std::max(-8.0, std::min(7.9999, ccm[j][i]));
+ LOG(RPiCcm, Debug)
+ << "colour temperature " << awb.temperatureK << "K";
+ LOG(RPiCcm, Debug)
+ << "CCM: " << ccmStatus.matrix[0] << " " << ccmStatus.matrix[1]
+ << " " << ccmStatus.matrix[2] << " "
+ << ccmStatus.matrix[3] << " " << ccmStatus.matrix[4]
+ << " " << ccmStatus.matrix[5] << " "
+ << ccmStatus.matrix[6] << " " << ccmStatus.matrix[7]
+ << " " << ccmStatus.matrix[8];
+ imageMetadata->set("ccm.status", ccmStatus);
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Ccm(controller);
+ ;
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/ccm.h b/src/ipa/rpi/controller/rpi/ccm.h
new file mode 100644
index 00000000..c05dbb17
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/ccm.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * CCM (colour correction matrix) control algorithm
+ */
+#pragma once
+
+#include <vector>
+
+#include "libcamera/internal/matrix.h"
+#include <libipa/pwl.h>
+
+#include "../ccm_algorithm.h"
+
+namespace RPiController {
+
+/* Algorithm to calculate colour matrix. Should be placed after AWB. */
+
+struct CtCcm {
+ double ct;
+ libcamera::Matrix<double, 3, 3> ccm;
+};
+
+struct CcmConfig {
+ std::vector<CtCcm> ccms;
+ libcamera::ipa::Pwl saturation;
+};
+
+class Ccm : public CcmAlgorithm
+{
+public:
+ Ccm(Controller *controller = NULL);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void setSaturation(double saturation) override;
+ void initialise() override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ CcmConfig config_;
+ double saturation_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/contrast.cpp b/src/ipa/rpi/controller/rpi/contrast.cpp
new file mode 100644
index 00000000..fe866a54
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/contrast.cpp
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * contrast (gamma) control algorithm
+ */
+#include <stdint.h>
+
+#include <libcamera/base/log.h>
+
+#include "../contrast_status.h"
+#include "../histogram.h"
+
+#include "contrast.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiContrast)
+
+/*
+ * This is a very simple control algorithm which simply retrieves the results of
+ * AGC and AWB via their "status" metadata, and applies digital gain to the
+ * colour channels in accordance with those instructions. We take care never to
+ * apply less than unity gains, as that would cause fully saturated pixels to go
+ * off-white.
+ */
+
+#define NAME "rpi.contrast"
+
+Contrast::Contrast(Controller *controller)
+ : ContrastAlgorithm(controller), brightness_(0.0), contrast_(1.0)
+{
+}
+
+char const *Contrast::name() const
+{
+ return NAME;
+}
+
+int Contrast::read(const libcamera::YamlObject &params)
+{
+ // enable adaptive enhancement by default
+ config_.ceEnable = params["ce_enable"].get<int>(1);
+ ceEnable_ = config_.ceEnable;
+ // the point near the bottom of the histogram to move
+ config_.loHistogram = params["lo_histogram"].get<double>(0.01);
+ // where in the range to try and move it to
+ config_.loLevel = params["lo_level"].get<double>(0.015);
+ // but don't move by more than this
+ config_.loMax = params["lo_max"].get<double>(500);
+ // equivalent values for the top of the histogram...
+ config_.hiHistogram = params["hi_histogram"].get<double>(0.95);
+ config_.hiLevel = params["hi_level"].get<double>(0.95);
+ config_.hiMax = params["hi_max"].get<double>(2000);
+
+ config_.gammaCurve = params["gamma_curve"].get<ipa::Pwl>(ipa::Pwl{});
+ return config_.gammaCurve.empty() ? -EINVAL : 0;
+}
+
+void Contrast::setBrightness(double brightness)
+{
+ brightness_ = brightness;
+}
+
+void Contrast::setContrast(double contrast)
+{
+ contrast_ = contrast;
+}
+
+void Contrast::enableCe(bool enable)
+{
+ ceEnable_ = enable;
+}
+
+void Contrast::restoreCe()
+{
+ ceEnable_ = config_.ceEnable;
+}
+
+void Contrast::initialise()
+{
+ /*
+ * Fill in some default values as Prepare will run before Process gets
+ * called.
+ */
+ status_.brightness = brightness_;
+ status_.contrast = contrast_;
+ status_.gammaCurve = config_.gammaCurve;
+}
+
+void Contrast::prepare(Metadata *imageMetadata)
+{
+ imageMetadata->set("contrast.status", status_);
+}
+
+namespace {
+
+ipa::Pwl computeStretchCurve(Histogram const &histogram,
+ ContrastConfig const &config)
+{
+ ipa::Pwl enhance;
+ enhance.append(0, 0);
+ /*
+ * If the start of the histogram is rather empty, try to pull it down a
+ * bit.
+ */
+ double histLo = histogram.quantile(config.loHistogram) *
+ (65536 / histogram.bins());
+ double levelLo = config.loLevel * 65536;
+ LOG(RPiContrast, Debug)
+ << "Move histogram point " << histLo << " to " << levelLo;
+ histLo = std::max(levelLo,
+ std::min(65535.0, std::min(histLo, levelLo + config.loMax)));
+ LOG(RPiContrast, Debug)
+ << "Final values " << histLo << " -> " << levelLo;
+ enhance.append(histLo, levelLo);
+ /*
+ * Keep the mid-point (median) in the same place, though, to limit the
+ * apparent amount of global brightness shift.
+ */
+ double mid = histogram.quantile(0.5) * (65536 / histogram.bins());
+ enhance.append(mid, mid);
+
+ /*
+ * If the top to the histogram is empty, try to pull the pixel values
+ * there up.
+ */
+ double histHi = histogram.quantile(config.hiHistogram) *
+ (65536 / histogram.bins());
+ double levelHi = config.hiLevel * 65536;
+ LOG(RPiContrast, Debug)
+ << "Move histogram point " << histHi << " to " << levelHi;
+ histHi = std::min(levelHi,
+ std::max(0.0, std::max(histHi, levelHi - config.hiMax)));
+ LOG(RPiContrast, Debug)
+ << "Final values " << histHi << " -> " << levelHi;
+ enhance.append(histHi, levelHi);
+ enhance.append(65535, 65535);
+ return enhance;
+}
+
+ipa::Pwl applyManualContrast(ipa::Pwl const &gammaCurve, double brightness,
+ double contrast)
+{
+ ipa::Pwl newGammaCurve;
+ LOG(RPiContrast, Debug)
+ << "Manual brightness " << brightness << " contrast " << contrast;
+ gammaCurve.map([&](double x, double y) {
+ newGammaCurve.append(
+ x, std::max(0.0, std::min(65535.0,
+ (y - 32768) * contrast +
+ 32768 + brightness)));
+ });
+ return newGammaCurve;
+}
+
+} /* namespace */
+
+void Contrast::process(StatisticsPtr &stats,
+ [[maybe_unused]] Metadata *imageMetadata)
+{
+ Histogram &histogram = stats->yHist;
+ /*
+ * We look at the histogram and adjust the gamma curve in the following
+ * ways: 1. Adjust the gamma curve so as to pull the start of the
+ * histogram down, and possibly push the end up.
+ */
+ ipa::Pwl gammaCurve = config_.gammaCurve;
+ if (ceEnable_) {
+ if (config_.loMax != 0 || config_.hiMax != 0)
+ gammaCurve = computeStretchCurve(histogram, config_).compose(gammaCurve);
+ /*
+ * We could apply other adjustments (e.g. partial equalisation)
+ * based on the histogram...?
+ */
+ }
+ /*
+ * 2. Finally apply any manually selected brightness/contrast
+ * adjustment.
+ */
+ if (brightness_ != 0 || contrast_ != 1.0)
+ gammaCurve = applyManualContrast(gammaCurve, brightness_, contrast_);
+ /*
+ * And fill in the status for output. Use more points towards the bottom
+ * of the curve.
+ */
+ status_.brightness = brightness_;
+ status_.contrast = contrast_;
+ status_.gammaCurve = std::move(gammaCurve);
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Contrast(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/contrast.h b/src/ipa/rpi/controller/rpi/contrast.h
new file mode 100644
index 00000000..c0f7db98
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/contrast.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * contrast (gamma) control algorithm
+ */
+#pragma once
+
+#include <mutex>
+
+#include <libipa/pwl.h>
+
+#include "../contrast_algorithm.h"
+
+namespace RPiController {
+
+/*
+ * Back End algorithm to appaly correct digital gain. Should be placed after
+ * Back End AWB.
+ */
+
+struct ContrastConfig {
+ bool ceEnable;
+ double loHistogram;
+ double loLevel;
+ double loMax;
+ double hiHistogram;
+ double hiLevel;
+ double hiMax;
+ libcamera::ipa::Pwl gammaCurve;
+};
+
+class Contrast : public ContrastAlgorithm
+{
+public:
+ Contrast(Controller *controller = NULL);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void setBrightness(double brightness) override;
+ void setContrast(double contrast) override;
+ void enableCe(bool enable) override;
+ void restoreCe() override;
+ void initialise() override;
+ void prepare(Metadata *imageMetadata) override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+
+private:
+ ContrastConfig config_;
+ double brightness_;
+ double contrast_;
+ ContrastStatus status_;
+ double ceEnable_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/denoise.cpp b/src/ipa/rpi/controller/rpi/denoise.cpp
new file mode 100644
index 00000000..ba851658
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/denoise.cpp
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022 Raspberry Pi Ltd
+ *
+ * Denoise (spatial, colour, temporal) control algorithm
+ */
+#include "denoise.h"
+
+#include <libcamera/base/log.h>
+
+#include "denoise_status.h"
+#include "noise_status.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiDenoise)
+
+// Calculate settings for the denoise blocks using the noise profile in
+// the image metadata.
+
+#define NAME "rpi.denoise"
+
+int DenoiseConfig::read(const libcamera::YamlObject &params)
+{
+ sdnEnable = params.contains("sdn");
+ if (sdnEnable) {
+ auto &sdnParams = params["sdn"];
+ sdnDeviation = sdnParams["deviation"].get<double>(3.2);
+ sdnStrength = sdnParams["strength"].get<double>(0.25);
+ sdnDeviation2 = sdnParams["deviation2"].get<double>(sdnDeviation);
+ sdnDeviationNoTdn = sdnParams["deviation_no_tdn"].get<double>(sdnDeviation);
+ sdnStrengthNoTdn = sdnParams["strength_no_tdn"].get<double>(sdnStrength);
+ sdnTdnBackoff = sdnParams["backoff"].get<double>(0.75);
+ }
+
+ cdnEnable = params.contains("cdn");
+ if (cdnEnable) {
+ auto &cdnParams = params["cdn"];
+ cdnDeviation = cdnParams["deviation"].get<double>(120);
+ cdnStrength = cdnParams["strength"].get<double>(0.2);
+ }
+
+ tdnEnable = params.contains("tdn");
+ if (tdnEnable) {
+ auto &tdnParams = params["tdn"];
+ tdnDeviation = tdnParams["deviation"].get<double>(0.5);
+ tdnThreshold = tdnParams["threshold"].get<double>(0.75);
+ } else if (sdnEnable) {
+ /*
+ * If SDN is enabled but TDN isn't, overwrite all the SDN settings
+ * with the "no TDN" versions. This makes it easier to enable or
+ * disable TDN in the tuning file without editing all the other
+ * parameters.
+ */
+ sdnDeviation = sdnDeviation2 = sdnDeviationNoTdn;
+ sdnStrength = sdnStrengthNoTdn;
+ }
+
+ return 0;
+}
+
+Denoise::Denoise(Controller *controller)
+ : DenoiseAlgorithm(controller), mode_(DenoiseMode::ColourHighQuality)
+{
+}
+
+char const *Denoise::name() const
+{
+ return NAME;
+}
+
+int Denoise::read(const libcamera::YamlObject &params)
+{
+ if (!params.contains("normal")) {
+ configs_["normal"].read(params);
+ currentConfig_ = &configs_["normal"];
+
+ return 0;
+ }
+
+ for (const auto &[key, value] : params.asDict()) {
+ if (configs_[key].read(value)) {
+ LOG(RPiDenoise, Error) << "Failed to read denoise config " << key;
+ return -EINVAL;
+ }
+ }
+
+ auto it = configs_.find("normal");
+ if (it == configs_.end()) {
+ LOG(RPiDenoise, Error) << "No normal denoise settings found";
+ return -EINVAL;
+ }
+ currentConfig_ = &it->second;
+
+ return 0;
+}
+
+void Denoise::initialise()
+{
+}
+
+void Denoise::switchMode([[maybe_unused]] CameraMode const &cameraMode,
+ [[maybe_unused]] Metadata *metadata)
+{
+ /* A mode switch effectively resets temporal denoise and it has to start over. */
+ currentSdnDeviation_ = currentConfig_->sdnDeviationNoTdn;
+ currentSdnStrength_ = currentConfig_->sdnStrengthNoTdn;
+ currentSdnDeviation2_ = currentConfig_->sdnDeviationNoTdn;
+}
+
+void Denoise::prepare(Metadata *imageMetadata)
+{
+ struct NoiseStatus noiseStatus = {};
+ noiseStatus.noiseSlope = 3.0; // in case no metadata
+ if (imageMetadata->get("noise.status", noiseStatus) != 0)
+ LOG(RPiDenoise, Warning) << "no noise profile found";
+
+ LOG(RPiDenoise, Debug)
+ << "Noise profile: constant " << noiseStatus.noiseConstant
+ << " slope " << noiseStatus.noiseSlope;
+
+ if (mode_ == DenoiseMode::Off)
+ return;
+
+ if (currentConfig_->sdnEnable) {
+ struct SdnStatus sdn;
+ sdn.noiseConstant = noiseStatus.noiseConstant * currentSdnDeviation_;
+ sdn.noiseSlope = noiseStatus.noiseSlope * currentSdnDeviation_;
+ sdn.noiseConstant2 = noiseStatus.noiseConstant * currentConfig_->sdnDeviation2;
+ sdn.noiseSlope2 = noiseStatus.noiseSlope * currentSdnDeviation2_;
+ sdn.strength = currentSdnStrength_;
+ imageMetadata->set("sdn.status", sdn);
+ LOG(RPiDenoise, Debug)
+ << "const " << sdn.noiseConstant
+ << " slope " << sdn.noiseSlope
+ << " str " << sdn.strength
+ << " const2 " << sdn.noiseConstant2
+ << " slope2 " << sdn.noiseSlope2;
+
+ /* For the next frame, we back off the SDN parameters as TDN ramps up. */
+ double f = currentConfig_->sdnTdnBackoff;
+ currentSdnDeviation_ = f * currentSdnDeviation_ + (1 - f) * currentConfig_->sdnDeviation;
+ currentSdnStrength_ = f * currentSdnStrength_ + (1 - f) * currentConfig_->sdnStrength;
+ currentSdnDeviation2_ = f * currentSdnDeviation2_ + (1 - f) * currentConfig_->sdnDeviation2;
+ }
+
+ if (currentConfig_->tdnEnable) {
+ struct TdnStatus tdn;
+ tdn.noiseConstant = noiseStatus.noiseConstant * currentConfig_->tdnDeviation;
+ tdn.noiseSlope = noiseStatus.noiseSlope * currentConfig_->tdnDeviation;
+ tdn.threshold = currentConfig_->tdnThreshold;
+ imageMetadata->set("tdn.status", tdn);
+ LOG(RPiDenoise, Debug)
+ << "programmed tdn threshold " << tdn.threshold
+ << " constant " << tdn.noiseConstant
+ << " slope " << tdn.noiseSlope;
+ }
+
+ if (currentConfig_->cdnEnable && mode_ != DenoiseMode::ColourOff) {
+ struct CdnStatus cdn;
+ cdn.threshold = currentConfig_->cdnDeviation * noiseStatus.noiseSlope + noiseStatus.noiseConstant;
+ cdn.strength = currentConfig_->cdnStrength;
+ imageMetadata->set("cdn.status", cdn);
+ LOG(RPiDenoise, Debug)
+ << "programmed cdn threshold " << cdn.threshold
+ << " strength " << cdn.strength;
+ }
+}
+
+void Denoise::setMode(DenoiseMode mode)
+{
+ // We only distinguish between off and all other modes.
+ mode_ = mode;
+}
+
+void Denoise::setConfig(std::string const &name)
+{
+ auto it = configs_.find(name);
+ if (it == configs_.end()) {
+ /*
+ * Some platforms may have no need for different denoise settings, so we only issue
+ * a warning if there clearly are several configurations.
+ */
+ if (configs_.size() > 1)
+ LOG(RPiDenoise, Warning) << "No denoise config found for " << name;
+ else
+ LOG(RPiDenoise, Debug) << "No denoise config found for " << name;
+ } else
+ currentConfig_ = &it->second;
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return (Algorithm *)new Denoise(controller);
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/rpi/controller/rpi/denoise.h b/src/ipa/rpi/controller/rpi/denoise.h
new file mode 100644
index 00000000..92ff4f93
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/denoise.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Raspberry Pi Ltd
+ *
+ * denoise.hpp - Denoise (spatial, colour, temporal) control algorithm
+ */
+#pragma once
+
+#include <map>
+#include <string>
+
+#include "algorithm.h"
+#include "denoise_algorithm.h"
+
+namespace RPiController {
+
+// Algorithm to calculate correct denoise settings.
+
+struct DenoiseConfig {
+ double sdnDeviation;
+ double sdnStrength;
+ double sdnDeviation2;
+ double sdnDeviationNoTdn;
+ double sdnStrengthNoTdn;
+ double sdnTdnBackoff;
+ double cdnDeviation;
+ double cdnStrength;
+ double tdnDeviation;
+ double tdnThreshold;
+ bool tdnEnable;
+ bool sdnEnable;
+ bool cdnEnable;
+ int read(const libcamera::YamlObject &params);
+};
+
+class Denoise : public DenoiseAlgorithm
+{
+public:
+ Denoise(Controller *controller);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void initialise() override;
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ void prepare(Metadata *imageMetadata) override;
+ void setMode(DenoiseMode mode) override;
+ void setConfig(std::string const &name) override;
+
+private:
+ std::map<std::string, DenoiseConfig> configs_;
+ DenoiseConfig *currentConfig_;
+ DenoiseMode mode_;
+
+ /* SDN parameters attenuate over time if TDN is running. */
+ double currentSdnDeviation_;
+ double currentSdnStrength_;
+ double currentSdnDeviation2_;
+};
+
+} // namespace RPiController
diff --git a/src/ipa/rpi/controller/rpi/dpc.cpp b/src/ipa/rpi/controller/rpi/dpc.cpp
new file mode 100644
index 00000000..8aac03f7
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/dpc.cpp
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * DPC (defective pixel correction) control algorithm
+ */
+
+#include <libcamera/base/log.h>
+
+#include "dpc.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiDpc)
+
+/*
+ * We use the lux status so that we can apply stronger settings in darkness (if
+ * necessary).
+ */
+
+#define NAME "rpi.dpc"
+
+Dpc::Dpc(Controller *controller)
+ : Algorithm(controller)
+{
+}
+
+char const *Dpc::name() const
+{
+ return NAME;
+}
+
+int Dpc::read(const libcamera::YamlObject &params)
+{
+ config_.strength = params["strength"].get<int>(1);
+ if (config_.strength < 0 || config_.strength > 2) {
+ LOG(RPiDpc, Error) << "Bad strength value";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void Dpc::prepare(Metadata *imageMetadata)
+{
+ DpcStatus dpcStatus = {};
+ /* Should we vary this with lux level or analogue gain? TBD. */
+ dpcStatus.strength = config_.strength;
+ LOG(RPiDpc, Debug) << "strength " << dpcStatus.strength;
+ imageMetadata->set("dpc.status", dpcStatus);
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Dpc(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/dpc.h b/src/ipa/rpi/controller/rpi/dpc.h
new file mode 100644
index 00000000..9cefb06d
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/dpc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * DPC (defective pixel correction) control algorithm
+ */
+#pragma once
+
+#include "../algorithm.h"
+#include "../dpc_status.h"
+
+namespace RPiController {
+
+/* Back End algorithm to apply appropriate GEQ settings. */
+
+struct DpcConfig {
+ int strength;
+};
+
+class Dpc : public Algorithm
+{
+public:
+ Dpc(Controller *controller);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ DpcConfig config_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/focus.h b/src/ipa/rpi/controller/rpi/focus.h
new file mode 100644
index 00000000..ee014be9
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/focus.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * focus algorithm
+ */
+#pragma once
+
+#include "../algorithm.h"
+#include "../metadata.h"
+
+/*
+ * The "focus" algorithm. All it does it print out a version of the
+ * focus contrast measure; there is no actual auto-focus mechanism to
+ * control.
+ */
+
+namespace RPiController {
+
+class Focus : public Algorithm
+{
+public:
+ Focus(Controller *controller);
+ char const *name() const override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/geq.cpp b/src/ipa/rpi/controller/rpi/geq.cpp
new file mode 100644
index 00000000..40e7191b
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/geq.cpp
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * GEQ (green equalisation) control algorithm
+ */
+
+#include <libcamera/base/log.h>
+
+#include "../device_status.h"
+#include "../lux_status.h"
+
+#include "geq.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiGeq)
+
+/*
+ * We use the lux status so that we can apply stronger settings in darkness (if
+ * necessary).
+ */
+
+#define NAME "rpi.geq"
+
+Geq::Geq(Controller *controller)
+ : Algorithm(controller)
+{
+}
+
+char const *Geq::name() const
+{
+ return NAME;
+}
+
+int Geq::read(const libcamera::YamlObject &params)
+{
+ config_.offset = params["offset"].get<uint16_t>(0);
+ config_.slope = params["slope"].get<double>(0.0);
+ if (config_.slope < 0.0 || config_.slope >= 1.0) {
+ LOG(RPiGeq, Error) << "Bad slope value";
+ return -EINVAL;
+ }
+
+ if (params.contains("strength")) {
+ config_.strength = params["strength"].get<ipa::Pwl>(ipa::Pwl{});
+ if (config_.strength.empty())
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void Geq::prepare(Metadata *imageMetadata)
+{
+ LuxStatus luxStatus = {};
+ luxStatus.lux = 400;
+ if (imageMetadata->get("lux.status", luxStatus))
+ LOG(RPiGeq, Warning) << "no lux data found";
+ DeviceStatus deviceStatus;
+ deviceStatus.analogueGain = 1.0; /* in case not found */
+ if (imageMetadata->get("device.status", deviceStatus))
+ LOG(RPiGeq, Warning)
+ << "no device metadata - use analogue gain of 1x";
+ GeqStatus geqStatus = {};
+ double strength = config_.strength.empty()
+ ? 1.0
+ : config_.strength.eval(config_.strength.domain().clamp(luxStatus.lux));
+ strength *= deviceStatus.analogueGain;
+ double offset = config_.offset * strength;
+ double slope = config_.slope * strength;
+ geqStatus.offset = std::min(65535.0, std::max(0.0, offset));
+ geqStatus.slope = std::min(.99999, std::max(0.0, slope));
+ LOG(RPiGeq, Debug)
+ << "offset " << geqStatus.offset << " slope "
+ << geqStatus.slope << " (analogue gain "
+ << deviceStatus.analogueGain << " lux "
+ << luxStatus.lux << ")";
+ imageMetadata->set("geq.status", geqStatus);
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Geq(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/geq.h b/src/ipa/rpi/controller/rpi/geq.h
new file mode 100644
index 00000000..e8b9f427
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/geq.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * GEQ (green equalisation) control algorithm
+ */
+#pragma once
+
+#include <libipa/pwl.h>
+
+#include "../algorithm.h"
+#include "../geq_status.h"
+
+namespace RPiController {
+
+/* Back End algorithm to apply appropriate GEQ settings. */
+
+struct GeqConfig {
+ uint16_t offset;
+ double slope;
+ libcamera::ipa::Pwl strength; /* lux to strength factor */
+};
+
+class Geq : public Algorithm
+{
+public:
+ Geq(Controller *controller);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ GeqConfig config_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/hdr.cpp b/src/ipa/rpi/controller/rpi/hdr.cpp
new file mode 100644
index 00000000..f3da8291
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/hdr.cpp
@@ -0,0 +1,417 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ *
+ * HDR control algorithm
+ */
+
+#include "hdr.h"
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+
+#include "../agc_status.h"
+#include "../alsc_status.h"
+#include "../stitch_status.h"
+#include "../tonemap_status.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiHdr)
+
+#define NAME "rpi.hdr"
+
+void HdrConfig::read(const libcamera::YamlObject &params, const std::string &modeName)
+{
+ name = modeName;
+
+ if (!params.contains("cadence"))
+ LOG(RPiHdr, Fatal) << "No cadence for HDR mode " << name;
+ cadence = params["cadence"].getList<unsigned int>().value();
+ if (cadence.empty())
+ LOG(RPiHdr, Fatal) << "Empty cadence in HDR mode " << name;
+
+ /*
+ * In the JSON file it's easier to use the channel name as the key, but
+ * for us it's convenient to swap them over.
+ */
+ for (const auto &[k, v] : params["channel_map"].asDict())
+ channelMap[v.get<unsigned int>().value()] = k;
+
+ /* Lens shading related parameters. */
+ if (params.contains("spatial_gain_curve")) {
+ spatialGainCurve = params["spatial_gain_curve"].get<ipa::Pwl>(ipa::Pwl{});
+ } else if (params.contains("spatial_gain")) {
+ double spatialGain = params["spatial_gain"].get<double>(2.0);
+ spatialGainCurve.append(0.0, spatialGain);
+ spatialGainCurve.append(0.01, spatialGain);
+ spatialGainCurve.append(0.06, 1.0); /* maybe make this programmable? */
+ spatialGainCurve.append(1.0, 1.0);
+ }
+
+ diffusion = params["diffusion"].get<unsigned int>(3);
+ /* Clip to an arbitrary limit just to stop typos from killing the system! */
+ const unsigned int MAX_DIFFUSION = 15;
+ if (diffusion > MAX_DIFFUSION) {
+ diffusion = MAX_DIFFUSION;
+ LOG(RPiHdr, Warning) << "Diffusion value clipped to " << MAX_DIFFUSION;
+ }
+
+ /* Read any tonemap parameters. */
+ tonemapEnable = params["tonemap_enable"].get<int>(0);
+ detailConstant = params["detail_constant"].get<uint16_t>(0);
+ detailSlope = params["detail_slope"].get<double>(0.0);
+ iirStrength = params["iir_strength"].get<double>(8.0);
+ strength = params["strength"].get<double>(1.5);
+ if (tonemapEnable)
+ tonemap = params["tonemap"].get<ipa::Pwl>(ipa::Pwl{});
+ speed = params["speed"].get<double>(1.0);
+ if (params.contains("hi_quantile_targets")) {
+ hiQuantileTargets = params["hi_quantile_targets"].getList<double>().value();
+ if (hiQuantileTargets.empty() || hiQuantileTargets.size() % 2)
+ LOG(RPiHdr, Fatal) << "hi_quantile_targets much be even and non-empty";
+ } else
+ hiQuantileTargets = { 0.95, 0.65, 0.5, 0.28, 0.3, 0.25 };
+ hiQuantileMaxGain = params["hi_quantile_max_gain"].get<double>(1.6);
+ if (params.contains("quantile_targets")) {
+ quantileTargets = params["quantile_targets"].getList<double>().value();
+ if (quantileTargets.empty() || quantileTargets.size() % 2)
+ LOG(RPiHdr, Fatal) << "quantile_targets much be even and non-empty";
+ } else
+ quantileTargets = { 0.2, 0.03, 1.0, 0.15 };
+ powerMin = params["power_min"].get<double>(0.65);
+ powerMax = params["power_max"].get<double>(1.0);
+ if (params.contains("contrast_adjustments")) {
+ contrastAdjustments = params["contrast_adjustments"].getList<double>().value();
+ } else
+ contrastAdjustments = { 0.5, 0.75 };
+
+ /* Read any stitch parameters. */
+ stitchEnable = params["stitch_enable"].get<int>(0);
+ thresholdLo = params["threshold_lo"].get<uint16_t>(50000);
+ motionThreshold = params["motion_threshold"].get<double>(0.005);
+ diffPower = params["diff_power"].get<uint8_t>(13);
+ if (diffPower > 15)
+ LOG(RPiHdr, Fatal) << "Bad diff_power value in HDR mode " << name;
+}
+
+Hdr::Hdr(Controller *controller)
+ : HdrAlgorithm(controller)
+{
+ regions_ = controller->getHardwareConfig().awbRegions;
+ numRegions_ = regions_.width * regions_.height;
+ gains_[0].resize(numRegions_, 1.0);
+ gains_[1].resize(numRegions_, 1.0);
+}
+
+char const *Hdr::name() const
+{
+ return NAME;
+}
+
+int Hdr::read(const libcamera::YamlObject &params)
+{
+ /* Make an "HDR off" mode by default so that tuning files don't have to. */
+ HdrConfig &offMode = config_["Off"];
+ offMode.name = "Off";
+ offMode.cadence = { 0 };
+ offMode.channelMap[0] = "None";
+ status_.mode = offMode.name;
+ delayedStatus_.mode = offMode.name;
+
+ /*
+ * But we still allow the tuning file to override the "Off" mode if it wants.
+ * For example, maybe an application will make channel 0 be the "short"
+ * channel, in order to apply other AGC controls to it.
+ */
+ for (const auto &[key, value] : params.asDict())
+ config_[key].read(value, key);
+
+ return 0;
+}
+
+int Hdr::setMode(std::string const &mode)
+{
+ /* Always validate the mode, so it can be used later without checking. */
+ auto it = config_.find(mode);
+ if (it == config_.end()) {
+ LOG(RPiHdr, Warning) << "No such HDR mode " << mode;
+ return -1;
+ }
+
+ status_.mode = it->second.name;
+
+ return 0;
+}
+
+std::vector<unsigned int> Hdr::getChannels() const
+{
+ return config_.at(status_.mode).cadence;
+}
+
+void Hdr::updateAgcStatus(Metadata *metadata)
+{
+ std::scoped_lock lock(*metadata);
+ AgcStatus *agcStatus = metadata->getLocked<AgcStatus>("agc.status");
+ if (agcStatus) {
+ HdrConfig &hdrConfig = config_[status_.mode];
+ auto it = hdrConfig.channelMap.find(agcStatus->channel);
+ if (it != hdrConfig.channelMap.end()) {
+ status_.channel = it->second;
+ agcStatus->hdr = status_;
+ } else
+ LOG(RPiHdr, Warning) << "Channel " << agcStatus->channel
+ << " not found in mode " << status_.mode;
+ } else
+ LOG(RPiHdr, Warning) << "No agc.status found";
+}
+
+void Hdr::switchMode([[maybe_unused]] CameraMode const &cameraMode, Metadata *metadata)
+{
+ updateAgcStatus(metadata);
+ delayedStatus_ = status_;
+}
+
+void Hdr::prepare(Metadata *imageMetadata)
+{
+ AgcStatus agcStatus;
+ if (!imageMetadata->get<AgcStatus>("agc.delayed_status", agcStatus))
+ delayedStatus_ = agcStatus.hdr;
+
+ auto it = config_.find(delayedStatus_.mode);
+ if (it == config_.end()) {
+ /* Shouldn't be possible. There would be nothing we could do. */
+ LOG(RPiHdr, Warning) << "Unexpected HDR mode " << delayedStatus_.mode;
+ return;
+ }
+
+ HdrConfig &config = it->second;
+ if (config.spatialGainCurve.empty())
+ return;
+
+ AlscStatus alscStatus{}; /* some compilers seem to require the braces */
+ if (imageMetadata->get<AlscStatus>("alsc.status", alscStatus)) {
+ LOG(RPiHdr, Warning) << "No ALSC status";
+ return;
+ }
+
+ /* The final gains ended up in the odd or even array, according to diffusion. */
+ std::vector<double> &gains = gains_[config.diffusion & 1];
+ for (unsigned int i = 0; i < numRegions_; i++) {
+ alscStatus.r[i] *= gains[i];
+ alscStatus.g[i] *= gains[i];
+ alscStatus.b[i] *= gains[i];
+ }
+ imageMetadata->set("alsc.status", alscStatus);
+}
+
+bool Hdr::updateTonemap([[maybe_unused]] StatisticsPtr &stats, HdrConfig &config)
+{
+ /* When there's a change of HDR mode we start over with a new tonemap curve. */
+ if (delayedStatus_.mode != previousMode_) {
+ previousMode_ = delayedStatus_.mode;
+ tonemap_ = ipa::Pwl();
+ }
+
+ /* No tonemapping. No need to output a tonemap.status. */
+ if (!config.tonemapEnable)
+ return false;
+
+ /* If an explicit tonemap was given, use it. */
+ if (!config.tonemap.empty()) {
+ tonemap_ = config.tonemap;
+ return true;
+ }
+
+ /*
+ * We wouldn't update the tonemap on short frames when in multi-exposure mode. But
+ * we still need to output the most recent tonemap. Possibly we should make the
+ * config indicate the channels for which we should update the tonemap?
+ */
+ if (delayedStatus_.mode == "MultiExposure" && delayedStatus_.channel != "short")
+ return true;
+
+ /*
+ * Create a tonemap dynamically. We have three ingredients.
+ *
+ * 1. We have a list of "hi quantiles" and "targets". We use these to judge if
+ * the image does seem to be reasonably saturated. If it isn't, we calculate
+ * a gain that we will feed as a linear factor into the tonemap generation.
+ * This prevents unsaturated images from beoming quite so "flat".
+ *
+ * 2. We have a list of quantile/target pairs for the bottom of the histogram.
+ * We use these to calculate how much gain we must apply to the bottom of the
+ * tonemap. We apply this gain as a power curve so as not to blow out the top
+ * end.
+ *
+ * 3. Finally, when we generate the tonemap, we have some contrast adjustments
+ * for the bottom because we know that power curves can start quite steeply and
+ * cause a washed-out look.
+ */
+
+ /* Compute the linear gain from the headroom for saturation at the top. */
+ double gain = 10; /* arbitrary, but hiQuantileMaxGain will clamp it later */
+ for (unsigned int i = 0; i < config.hiQuantileTargets.size(); i += 2) {
+ double quantile = config.hiQuantileTargets[i];
+ double target = config.hiQuantileTargets[i + 1];
+ double value = stats->yHist.interQuantileMean(quantile, 1.0) / 1024.0;
+ double newGain = target / (value + 0.01);
+ gain = std::min(gain, newGain);
+ }
+ gain = std::clamp(gain, 1.0, config.hiQuantileMaxGain);
+
+ /* Compute the power curve from the amount of gain needed at the bottom. */
+ double min_power = 2; /* arbitrary, but config.powerMax will clamp it later */
+ for (unsigned int i = 0; i < config.quantileTargets.size(); i += 2) {
+ double quantile = config.quantileTargets[i];
+ double target = config.quantileTargets[i + 1];
+ double value = stats->yHist.interQuantileMean(0, quantile) / 1024.0;
+ value = std::min(value * gain, 1.0);
+ double power = log(target + 1e-6) / log(value + 1e-6);
+ min_power = std::min(min_power, power);
+ }
+ double power = std::clamp(min_power, config.powerMin, config.powerMax);
+
+ /* Generate the tonemap, including the contrast adjustment factors. */
+ libcamera::ipa::Pwl tonemap;
+ tonemap.append(0, 0);
+ for (unsigned int i = 0; i <= 6; i++) {
+ double x = 1 << (i + 9); /* x loops from 512 to 32768 inclusive */
+ double y = pow(std::min(x * gain, 65535.0) / 65536.0, power) * 65536;
+ if (i < config.contrastAdjustments.size())
+ y *= config.contrastAdjustments[i];
+ if (!tonemap_.empty())
+ y = y * config.speed + tonemap_.eval(x) * (1 - config.speed);
+ tonemap.append(x, y);
+ }
+ tonemap.append(65535, 65535);
+ tonemap_ = tonemap;
+
+ return true;
+}
+
+static void averageGains(std::vector<double> &src, std::vector<double> &dst, const Size &size)
+{
+#define IDX(y, x) ((y)*size.width + (x))
+ unsigned int lastCol = size.width - 1; /* index of last column */
+ unsigned int preLastCol = lastCol - 1; /* and the column before that */
+ unsigned int lastRow = size.height - 1; /* index of last row */
+ unsigned int preLastRow = lastRow - 1; /* and the row before that */
+
+ /* Corners first. */
+ dst[IDX(0, 0)] = (src[IDX(0, 0)] + src[IDX(0, 1)] + src[IDX(1, 0)]) / 3;
+ dst[IDX(0, lastCol)] = (src[IDX(0, lastCol)] + src[IDX(0, preLastCol)] + src[IDX(1, lastCol)]) / 3;
+ dst[IDX(lastRow, 0)] = (src[IDX(lastRow, 0)] + src[IDX(lastRow, 1)] + src[IDX(preLastRow, 0)]) / 3;
+ dst[IDX(lastRow, lastCol)] = (src[IDX(lastRow, lastCol)] + src[IDX(lastRow, preLastCol)] +
+ src[IDX(preLastRow, lastCol)]) /
+ 3;
+
+ /* Now the edges. */
+ for (unsigned int i = 1; i < lastCol; i++) {
+ dst[IDX(0, i)] = (src[IDX(0, i - 1)] + src[IDX(0, i)] + src[IDX(0, i + 1)] + src[IDX(1, i)]) / 4;
+ dst[IDX(lastRow, i)] = (src[IDX(lastRow, i - 1)] + src[IDX(lastRow, i)] +
+ src[IDX(lastRow, i + 1)] + src[IDX(preLastRow, i)]) /
+ 4;
+ }
+
+ for (unsigned int i = 1; i < lastRow; i++) {
+ dst[IDX(i, 0)] = (src[IDX(i - 1, 0)] + src[IDX(i, 0)] + src[IDX(i + 1, 0)] + src[IDX(i, 1)]) / 4;
+ dst[IDX(i, 31)] = (src[IDX(i - 1, lastCol)] + src[IDX(i, lastCol)] +
+ src[IDX(i + 1, lastCol)] + src[IDX(i, preLastCol)]) /
+ 4;
+ }
+
+ /* Finally the interior. */
+ for (unsigned int j = 1; j < lastRow; j++) {
+ for (unsigned int i = 1; i < lastCol; i++) {
+ dst[IDX(j, i)] = (src[IDX(j - 1, i)] + src[IDX(j, i - 1)] + src[IDX(j, i)] +
+ src[IDX(j, i + 1)] + src[IDX(j + 1, i)]) /
+ 5;
+ }
+ }
+}
+
+void Hdr::updateGains(StatisticsPtr &stats, HdrConfig &config)
+{
+ if (config.spatialGainCurve.empty())
+ return;
+
+ /* When alternating exposures, only compute these gains for the short frame. */
+ if (delayedStatus_.mode == "MultiExposure" && delayedStatus_.channel != "short")
+ return;
+
+ for (unsigned int i = 0; i < numRegions_; i++) {
+ auto &region = stats->awbRegions.get(i);
+ unsigned int counted = region.counted;
+ counted += (counted == 0); /* avoid div by zero */
+ double r = region.val.rSum / counted;
+ double g = region.val.gSum / counted;
+ double b = region.val.bSum / counted;
+ double brightness = std::max({ r, g, b }) / 65535;
+ gains_[0][i] = config.spatialGainCurve.eval(brightness);
+ }
+
+ /* Ping-pong between the two gains_ buffers. */
+ for (unsigned int i = 0; i < config.diffusion; i++)
+ averageGains(gains_[i & 1], gains_[(i & 1) ^ 1], regions_);
+}
+
+void Hdr::process(StatisticsPtr &stats, Metadata *imageMetadata)
+{
+ /* Note what HDR channel this frame will be once it comes back to us. */
+ updateAgcStatus(imageMetadata);
+
+ /*
+ * Now figure out what HDR channel this frame is. It should be available in the
+ * agc.delayed_status, unless this is an early frame after a mode switch, in which
+ * case delayedStatus_ should be right.
+ */
+ AgcStatus agcStatus;
+ if (!imageMetadata->get<AgcStatus>("agc.delayed_status", agcStatus))
+ delayedStatus_ = agcStatus.hdr;
+
+ auto it = config_.find(delayedStatus_.mode);
+ if (it == config_.end()) {
+ /* Shouldn't be possible. There would be nothing we could do. */
+ LOG(RPiHdr, Warning) << "Unexpected HDR mode " << delayedStatus_.mode;
+ return;
+ }
+
+ HdrConfig &config = it->second;
+
+ /* Update the spatially varying gains. They get written in prepare(). */
+ updateGains(stats, config);
+
+ if (updateTonemap(stats, config)) {
+ /* Add tonemap.status metadata. */
+ TonemapStatus tonemapStatus;
+
+ tonemapStatus.detailConstant = config.detailConstant;
+ tonemapStatus.detailSlope = config.detailSlope;
+ tonemapStatus.iirStrength = config.iirStrength;
+ tonemapStatus.strength = config.strength;
+ tonemapStatus.tonemap = tonemap_;
+
+ imageMetadata->set("tonemap.status", tonemapStatus);
+ }
+
+ if (config.stitchEnable) {
+ /* Add stitch.status metadata. */
+ StitchStatus stitchStatus;
+
+ stitchStatus.diffPower = config.diffPower;
+ stitchStatus.motionThreshold = config.motionThreshold;
+ stitchStatus.thresholdLo = config.thresholdLo;
+
+ imageMetadata->set("stitch.status", stitchStatus);
+ }
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Hdr(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/hdr.h b/src/ipa/rpi/controller/rpi/hdr.h
new file mode 100644
index 00000000..5c2f3988
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/hdr.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023, Raspberry Pi Ltd
+ *
+ * HDR control algorithm
+ */
+#pragma once
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include <libcamera/geometry.h>
+
+#include <libipa/pwl.h>
+
+#include "../hdr_algorithm.h"
+#include "../hdr_status.h"
+
+/* This is our implementation of an HDR algorithm. */
+
+namespace RPiController {
+
+struct HdrConfig {
+ std::string name;
+ std::vector<unsigned int> cadence;
+ std::map<unsigned int, std::string> channelMap;
+
+ /* Lens shading related parameters. */
+ libcamera::ipa::Pwl spatialGainCurve; /* Brightness to gain curve for different image regions. */
+ unsigned int diffusion; /* How much to diffuse the gain spatially. */
+
+ /* Tonemap related parameters. */
+ bool tonemapEnable;
+ uint16_t detailConstant;
+ double detailSlope;
+ double iirStrength;
+ double strength;
+ libcamera::ipa::Pwl tonemap;
+ /* These relate to adaptive tonemap calculation. */
+ double speed;
+ std::vector<double> hiQuantileTargets; /* quantiles to check for unsaturated images */
+ double hiQuantileMaxGain; /* the max gain we'll apply when unsaturated */
+ std::vector<double> quantileTargets; /* target values for histogram quantiles */
+ double powerMin; /* minimum tonemap power */
+ double powerMax; /* maximum tonemap power */
+ std::vector<double> contrastAdjustments; /* any contrast adjustment factors */
+
+ /* Stitch related parameters. */
+ bool stitchEnable;
+ uint16_t thresholdLo;
+ uint8_t diffPower;
+ double motionThreshold;
+
+ void read(const libcamera::YamlObject &params, const std::string &name);
+};
+
+class Hdr : public HdrAlgorithm
+{
+public:
+ Hdr(Controller *controller);
+ char const *name() const override;
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+ int setMode(std::string const &mode) override;
+ std::vector<unsigned int> getChannels() const override;
+
+private:
+ void updateAgcStatus(Metadata *metadata);
+ void updateGains(StatisticsPtr &stats, HdrConfig &config);
+ bool updateTonemap(StatisticsPtr &stats, HdrConfig &config);
+
+ std::map<std::string, HdrConfig> config_;
+ HdrStatus status_; /* track the current HDR mode and channel */
+ HdrStatus delayedStatus_; /* track the delayed HDR mode and channel */
+ std::string previousMode_;
+ libcamera::ipa::Pwl tonemap_;
+ libcamera::Size regions_; /* stats regions */
+ unsigned int numRegions_; /* total number of stats regions */
+ std::vector<double> gains_[2];
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/lux.cpp b/src/ipa/rpi/controller/rpi/lux.cpp
new file mode 100644
index 00000000..27b89a8f
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/lux.cpp
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * Lux control algorithm
+ */
+
+#include <libcamera/base/log.h>
+
+#include "../device_status.h"
+
+#include "lux.h"
+
+using namespace RPiController;
+using namespace libcamera;
+using namespace std::literals::chrono_literals;
+
+LOG_DEFINE_CATEGORY(RPiLux)
+
+#define NAME "rpi.lux"
+
+Lux::Lux(Controller *controller)
+ : Algorithm(controller)
+{
+ /*
+ * Put in some defaults as there will be no meaningful values until
+ * Process has run.
+ */
+ status_.aperture = 1.0;
+ status_.lux = 400;
+}
+
+char const *Lux::name() const
+{
+ return NAME;
+}
+
+int Lux::read(const libcamera::YamlObject &params)
+{
+ auto value = params["reference_shutter_speed"].get<double>();
+ if (!value)
+ return -EINVAL;
+ referenceExposureTime_ = *value * 1.0us;
+
+ value = params["reference_gain"].get<double>();
+ if (!value)
+ return -EINVAL;
+ referenceGain_ = *value;
+
+ referenceAperture_ = params["reference_aperture"].get<double>(1.0);
+
+ value = params["reference_Y"].get<double>();
+ if (!value)
+ return -EINVAL;
+ referenceY_ = *value;
+
+ value = params["reference_lux"].get<double>();
+ if (!value)
+ return -EINVAL;
+ referenceLux_ = *value;
+
+ currentAperture_ = referenceAperture_;
+ return 0;
+}
+
+void Lux::setCurrentAperture(double aperture)
+{
+ currentAperture_ = aperture;
+}
+
+void Lux::prepare(Metadata *imageMetadata)
+{
+ std::unique_lock<std::mutex> lock(mutex_);
+ imageMetadata->set("lux.status", status_);
+}
+
+void Lux::process(StatisticsPtr &stats, Metadata *imageMetadata)
+{
+ DeviceStatus deviceStatus;
+ if (imageMetadata->get("device.status", deviceStatus) == 0) {
+ double currentGain = deviceStatus.analogueGain;
+ double currentAperture = deviceStatus.aperture.value_or(currentAperture_);
+ double currentY = stats->yHist.interQuantileMean(0, 1);
+ double gainRatio = referenceGain_ / currentGain;
+ double exposureTimeRatio =
+ referenceExposureTime_ / deviceStatus.exposureTime;
+ double apertureRatio = referenceAperture_ / currentAperture;
+ double yRatio = currentY * (65536 / stats->yHist.bins()) / referenceY_;
+ double estimatedLux = exposureTimeRatio * gainRatio *
+ apertureRatio * apertureRatio *
+ yRatio * referenceLux_;
+ LuxStatus status;
+ status.lux = estimatedLux;
+ status.aperture = currentAperture;
+ LOG(RPiLux, Debug) << ": estimated lux " << estimatedLux;
+ {
+ std::unique_lock<std::mutex> lock(mutex_);
+ status_ = status;
+ }
+ /*
+ * Overwrite the metadata here as well, so that downstream
+ * algorithms get the latest value.
+ */
+ imageMetadata->set("lux.status", status);
+ } else
+ LOG(RPiLux, Warning) << ": no device metadata";
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Lux(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/lux.h b/src/ipa/rpi/controller/rpi/lux.h
new file mode 100644
index 00000000..da007fe9
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/lux.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * Lux control algorithm
+ */
+#pragma once
+
+#include <mutex>
+
+#include <libcamera/base/utils.h>
+
+#include "../lux_status.h"
+#include "../algorithm.h"
+
+/* This is our implementation of the "lux control algorithm". */
+
+namespace RPiController {
+
+class Lux : public Algorithm
+{
+public:
+ Lux(Controller *controller);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+ void process(StatisticsPtr &stats, Metadata *imageMetadata) override;
+ void setCurrentAperture(double aperture);
+
+private:
+ /*
+ * These values define the conditions of the reference image, against
+ * which we compare the new image.
+ */
+ libcamera::utils::Duration referenceExposureTime_;
+ double referenceGain_;
+ double referenceAperture_; /* units of 1/f */
+ double referenceY_; /* out of 65536 */
+ double referenceLux_;
+ double currentAperture_;
+ LuxStatus status_;
+ std::mutex mutex_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/noise.cpp b/src/ipa/rpi/controller/rpi/noise.cpp
new file mode 100644
index 00000000..145175fb
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/noise.cpp
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * Noise control algorithm
+ */
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+
+#include "../device_status.h"
+#include "../noise_status.h"
+
+#include "noise.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiNoise)
+
+#define NAME "rpi.noise"
+
+Noise::Noise(Controller *controller)
+ : Algorithm(controller), modeFactor_(1.0)
+{
+}
+
+char const *Noise::name() const
+{
+ return NAME;
+}
+
+void Noise::switchMode(CameraMode const &cameraMode,
+ [[maybe_unused]] Metadata *metadata)
+{
+ /*
+ * For example, we would expect a 2x2 binned mode to have a "noise
+ * factor" of sqrt(2x2) = 2. (can't be less than one, right?)
+ */
+ modeFactor_ = std::max(1.0, cameraMode.noiseFactor);
+}
+
+int Noise::read(const libcamera::YamlObject &params)
+{
+ auto value = params["reference_constant"].get<double>();
+ if (!value)
+ return -EINVAL;
+ referenceConstant_ = *value;
+
+ value = params["reference_slope"].get<double>();
+ if (!value)
+ return -EINVAL;
+ referenceSlope_ = *value;
+
+ return 0;
+}
+
+void Noise::prepare(Metadata *imageMetadata)
+{
+ struct DeviceStatus deviceStatus;
+ deviceStatus.analogueGain = 1.0; /* keep compiler calm */
+ if (imageMetadata->get("device.status", deviceStatus) == 0) {
+ /*
+ * There is a slight question as to exactly how the noise
+ * profile, specifically the constant part of it, scales. For
+ * now we assume it all scales the same, and we'll revisit this
+ * if it proves substantially wrong. NOTE: we may also want to
+ * make some adjustments based on the camera mode (such as
+ * binning), if we knew how to discover it...
+ */
+ double factor = std::sqrt(deviceStatus.analogueGain) / modeFactor_;
+ struct NoiseStatus status;
+ status.noiseConstant = referenceConstant_ * factor;
+ status.noiseSlope = referenceSlope_ * factor;
+ imageMetadata->set("noise.status", status);
+ LOG(RPiNoise, Debug)
+ << "constant " << status.noiseConstant
+ << " slope " << status.noiseSlope;
+ } else
+ LOG(RPiNoise, Warning) << " no metadata";
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return new Noise(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/noise.h b/src/ipa/rpi/controller/rpi/noise.h
new file mode 100644
index 00000000..6deae1f0
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/noise.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * Noise control algorithm
+ */
+#pragma once
+
+#include "../algorithm.h"
+#include "../noise_status.h"
+
+/* This is our implementation of the "noise algorithm". */
+
+namespace RPiController {
+
+class Noise : public Algorithm
+{
+public:
+ Noise(Controller *controller);
+ char const *name() const override;
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ int read(const libcamera::YamlObject &params) override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ /* the noise profile for analogue gain of 1.0 */
+ double referenceConstant_;
+ double referenceSlope_;
+ double modeFactor_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/saturation.cpp b/src/ipa/rpi/controller/rpi/saturation.cpp
new file mode 100644
index 00000000..b83c5887
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/saturation.cpp
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022 Raspberry Pi Ltd
+ *
+ * Saturation control algorithm
+ */
+#include "saturation.h"
+
+#include <libcamera/base/log.h>
+
+#include "saturation_status.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiSaturation)
+
+#define NAME "rpi.saturation"
+
+Saturation::Saturation(Controller *controller)
+ : Algorithm(controller)
+{
+}
+
+char const *Saturation::name() const
+{
+ return NAME;
+}
+
+int Saturation::read(const libcamera::YamlObject &params)
+{
+ config_.shiftR = params["shift_r"].get<uint8_t>(0);
+ config_.shiftG = params["shift_g"].get<uint8_t>(0);
+ config_.shiftB = params["shift_b"].get<uint8_t>(0);
+ return 0;
+}
+
+void Saturation::initialise()
+{
+}
+
+void Saturation::prepare(Metadata *imageMetadata)
+{
+ SaturationStatus saturation;
+
+ saturation.shiftR = config_.shiftR;
+ saturation.shiftG = config_.shiftG;
+ saturation.shiftB = config_.shiftB;
+ imageMetadata->set("saturation.status", saturation);
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return (Algorithm *)new Saturation(controller);
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/rpi/controller/rpi/saturation.h b/src/ipa/rpi/controller/rpi/saturation.h
new file mode 100644
index 00000000..97da412a
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/saturation.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Raspberry Pi Ltd
+ *
+ * saturation.hpp - Saturation control algorithm
+ */
+#pragma once
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+struct SaturationConfig {
+ uint8_t shiftR;
+ uint8_t shiftG;
+ uint8_t shiftB;
+};
+
+class Saturation : public Algorithm
+{
+public:
+ Saturation(Controller *controller = NULL);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void initialise() override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ SaturationConfig config_;
+};
+
+} // namespace RPiController
diff --git a/src/ipa/rpi/controller/rpi/sdn.cpp b/src/ipa/rpi/controller/rpi/sdn.cpp
new file mode 100644
index 00000000..619178a8
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/sdn.cpp
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019-2021, Raspberry Pi Ltd
+ *
+ * SDN (spatial denoise) control algorithm
+ */
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include "../denoise_status.h"
+#include "../noise_status.h"
+
+#include "sdn.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiSdn)
+
+/*
+ * Calculate settings for the spatial denoise block using the noise profile in
+ * the image metadata.
+ */
+
+#define NAME "rpi.sdn"
+
+Sdn::Sdn(Controller *controller)
+ : DenoiseAlgorithm(controller), mode_(DenoiseMode::ColourOff)
+{
+}
+
+char const *Sdn::name() const
+{
+ return NAME;
+}
+
+int Sdn::read(const libcamera::YamlObject &params)
+{
+ LOG(RPiSdn, Warning)
+ << "Using legacy SDN tuning - please consider moving SDN inside rpi.denoise";
+ deviation_ = params["deviation"].get<double>(3.2);
+ strength_ = params["strength"].get<double>(0.75);
+ return 0;
+}
+
+void Sdn::initialise()
+{
+}
+
+void Sdn::prepare(Metadata *imageMetadata)
+{
+ struct NoiseStatus noiseStatus = {};
+ noiseStatus.noiseSlope = 3.0; /* in case no metadata */
+ if (imageMetadata->get("noise.status", noiseStatus) != 0)
+ LOG(RPiSdn, Warning) << "no noise profile found";
+ LOG(RPiSdn, Debug)
+ << "Noise profile: constant " << noiseStatus.noiseConstant
+ << " slope " << noiseStatus.noiseSlope;
+ struct DenoiseStatus status;
+ status.noiseConstant = noiseStatus.noiseConstant * deviation_;
+ status.noiseSlope = noiseStatus.noiseSlope * deviation_;
+ status.strength = strength_;
+ status.mode = utils::to_underlying(mode_);
+ imageMetadata->set("denoise.status", status);
+ LOG(RPiSdn, Debug)
+ << "programmed constant " << status.noiseConstant
+ << " slope " << status.noiseSlope
+ << " strength " << status.strength;
+}
+
+void Sdn::setMode(DenoiseMode mode)
+{
+ /* We only distinguish between off and all other modes. */
+ mode_ = mode;
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return (Algorithm *)new Sdn(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/sdn.h b/src/ipa/rpi/controller/rpi/sdn.h
new file mode 100644
index 00000000..cb226de8
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/sdn.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * SDN (spatial denoise) control algorithm
+ */
+#pragma once
+
+#include "../algorithm.h"
+#include "../denoise_algorithm.h"
+
+namespace RPiController {
+
+/* Algorithm to calculate correct spatial denoise (SDN) settings. */
+
+class Sdn : public DenoiseAlgorithm
+{
+public:
+ Sdn(Controller *controller = NULL);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void initialise() override;
+ void prepare(Metadata *imageMetadata) override;
+ void setMode(DenoiseMode mode) override;
+
+private:
+ double deviation_;
+ double strength_;
+ DenoiseMode mode_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/sharpen.cpp b/src/ipa/rpi/controller/rpi/sharpen.cpp
new file mode 100644
index 00000000..1d143ff5
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/sharpen.cpp
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * sharpening control algorithm
+ */
+
+#include <cmath>
+
+#include <libcamera/base/log.h>
+
+#include "../sharpen_status.h"
+
+#include "sharpen.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiSharpen)
+
+#define NAME "rpi.sharpen"
+
+Sharpen::Sharpen(Controller *controller)
+ : SharpenAlgorithm(controller), userStrength_(1.0)
+{
+}
+
+char const *Sharpen::name() const
+{
+ return NAME;
+}
+
+void Sharpen::switchMode(CameraMode const &cameraMode,
+ [[maybe_unused]] Metadata *metadata)
+{
+ /* can't be less than one, right? */
+ modeFactor_ = std::max(1.0, cameraMode.noiseFactor);
+}
+
+int Sharpen::read(const libcamera::YamlObject &params)
+{
+ threshold_ = params["threshold"].get<double>(1.0);
+ strength_ = params["strength"].get<double>(1.0);
+ limit_ = params["limit"].get<double>(1.0);
+ LOG(RPiSharpen, Debug)
+ << "Read threshold " << threshold_
+ << " strength " << strength_
+ << " limit " << limit_;
+ return 0;
+}
+
+void Sharpen::setStrength(double strength)
+{
+ /*
+ * Note that this function is how an application sets the overall
+ * sharpening "strength". We call this the "user strength" field
+ * as there already is a strength_ field - being an internal gain
+ * parameter that gets passed to the ISP control code. Negative
+ * values are not allowed - coerce them to zero (no sharpening).
+ */
+ userStrength_ = std::max(0.0, strength);
+}
+
+void Sharpen::prepare(Metadata *imageMetadata)
+{
+ /*
+ * The userStrength_ affects the algorithm's internal gain directly, but
+ * we adjust the limit and threshold less aggressively. Using a sqrt
+ * function is an arbitrary but gentle way of accomplishing this.
+ */
+ double userStrengthSqrt = std::sqrt(userStrength_);
+ struct SharpenStatus status;
+ /*
+ * Binned modes seem to need the sharpening toned down with this
+ * pipeline, thus we use the modeFactor_ here. Also avoid
+ * divide-by-zero with the userStrengthSqrt.
+ */
+ status.threshold = threshold_ * modeFactor_ /
+ std::max(0.01, userStrengthSqrt);
+ status.strength = strength_ / modeFactor_ * userStrength_;
+ status.limit = limit_ / modeFactor_ * userStrengthSqrt;
+ /* Finally, report any application-supplied parameters that were used. */
+ status.userStrength = userStrength_;
+ imageMetadata->set("sharpen.status", status);
+}
+
+/* Register algorithm with the system. */
+static Algorithm *create(Controller *controller)
+{
+ return new Sharpen(controller);
+}
+static RegisterAlgorithm reg(NAME, &create);
diff --git a/src/ipa/rpi/controller/rpi/sharpen.h b/src/ipa/rpi/controller/rpi/sharpen.h
new file mode 100644
index 00000000..96ccd609
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/sharpen.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * sharpening control algorithm
+ */
+#pragma once
+
+#include "../sharpen_algorithm.h"
+#include "../sharpen_status.h"
+
+/* This is our implementation of the "sharpen algorithm". */
+
+namespace RPiController {
+
+class Sharpen : public SharpenAlgorithm
+{
+public:
+ Sharpen(Controller *controller);
+ char const *name() const override;
+ void switchMode(CameraMode const &cameraMode, Metadata *metadata) override;
+ int read(const libcamera::YamlObject &params) override;
+ void setStrength(double strength) override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ double threshold_;
+ double strength_;
+ double limit_;
+ double modeFactor_;
+ double userStrength_;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/rpi/tonemap.cpp b/src/ipa/rpi/controller/rpi/tonemap.cpp
new file mode 100644
index 00000000..3422adfe
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/tonemap.cpp
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022 Raspberry Pi Ltd
+ *
+ * Tonemap control algorithm
+ */
+#include "tonemap.h"
+
+#include <libcamera/base/log.h>
+
+#include "tonemap_status.h"
+
+using namespace RPiController;
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(RPiTonemap)
+
+#define NAME "rpi.tonemap"
+
+Tonemap::Tonemap(Controller *controller)
+ : Algorithm(controller)
+{
+}
+
+char const *Tonemap::name() const
+{
+ return NAME;
+}
+
+int Tonemap::read(const libcamera::YamlObject &params)
+{
+ config_.detailConstant = params["detail_constant"].get<uint16_t>(0);
+ config_.detailSlope = params["detail_slope"].get<double>(0.1);
+ config_.iirStrength = params["iir_strength"].get<double>(1.0);
+ config_.strength = params["strength"].get<double>(1.0);
+ config_.tonemap = params["tone_curve"].get<ipa::Pwl>(ipa::Pwl{});
+ return 0;
+}
+
+void Tonemap::initialise()
+{
+}
+
+void Tonemap::prepare(Metadata *imageMetadata)
+{
+ TonemapStatus tonemapStatus;
+
+ tonemapStatus.detailConstant = config_.detailConstant;
+ tonemapStatus.detailSlope = config_.detailSlope;
+ tonemapStatus.iirStrength = config_.iirStrength;
+ tonemapStatus.strength = config_.strength;
+ tonemapStatus.tonemap = config_.tonemap;
+ imageMetadata->set("tonemap.status", tonemapStatus);
+}
+
+// Register algorithm with the system.
+static Algorithm *Create(Controller *controller)
+{
+ return (Algorithm *)new Tonemap(controller);
+}
+static RegisterAlgorithm reg(NAME, &Create);
diff --git a/src/ipa/rpi/controller/rpi/tonemap.h b/src/ipa/rpi/controller/rpi/tonemap.h
new file mode 100644
index 00000000..ba0cf5c4
--- /dev/null
+++ b/src/ipa/rpi/controller/rpi/tonemap.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Raspberry Pi Ltd
+ *
+ * tonemap.hpp - Tonemap control algorithm
+ */
+#pragma once
+
+#include <libipa/pwl.h>
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+struct TonemapConfig {
+ uint16_t detailConstant;
+ double detailSlope;
+ double iirStrength;
+ double strength;
+ libcamera::ipa::Pwl tonemap;
+};
+
+class Tonemap : public Algorithm
+{
+public:
+ Tonemap(Controller *controller = NULL);
+ char const *name() const override;
+ int read(const libcamera::YamlObject &params) override;
+ void initialise() override;
+ void prepare(Metadata *imageMetadata) override;
+
+private:
+ TonemapConfig config_;
+};
+
+} // namespace RPiController
diff --git a/src/ipa/rpi/controller/saturation_status.h b/src/ipa/rpi/controller/saturation_status.h
new file mode 100644
index 00000000..c7fadc99
--- /dev/null
+++ b/src/ipa/rpi/controller/saturation_status.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022 Raspberry Pi Ltd
+ *
+ * Saturation control algorithm status
+ */
+#pragma once
+
+struct SaturationStatus {
+ uint8_t shiftR;
+ uint8_t shiftG;
+ uint8_t shiftB;
+};
diff --git a/src/ipa/rpi/controller/sharpen_algorithm.h b/src/ipa/rpi/controller/sharpen_algorithm.h
new file mode 100644
index 00000000..abd82cb2
--- /dev/null
+++ b/src/ipa/rpi/controller/sharpen_algorithm.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * sharpness control algorithm interface
+ */
+#pragma once
+
+#include "algorithm.h"
+
+namespace RPiController {
+
+class SharpenAlgorithm : public Algorithm
+{
+public:
+ SharpenAlgorithm(Controller *controller) : Algorithm(controller) {}
+ /* A sharpness control algorithm must provide the following: */
+ virtual void setStrength(double strength) = 0;
+};
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/sharpen_status.h b/src/ipa/rpi/controller/sharpen_status.h
new file mode 100644
index 00000000..74910199
--- /dev/null
+++ b/src/ipa/rpi/controller/sharpen_status.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019, Raspberry Pi Ltd
+ *
+ * Sharpen control algorithm status
+ */
+#pragma once
+
+/* The "sharpen" algorithm stores the strength to use. */
+
+struct SharpenStatus {
+ /* controls the smallest level of detail (or noise!) that sharpening will pick up */
+ double threshold;
+ /* the rate at which the sharpening response ramps once above the threshold */
+ double strength;
+ /* upper limit of the allowed sharpening response */
+ double limit;
+ /* The sharpening strength requested by the user or application. */
+ double userStrength;
+};
diff --git a/src/ipa/rpi/controller/statistics.h b/src/ipa/rpi/controller/statistics.h
new file mode 100644
index 00000000..cbd81161
--- /dev/null
+++ b/src/ipa/rpi/controller/statistics.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022, Raspberry Pi Ltd
+ *
+ * Raspberry Pi generic statistics structure
+ */
+#pragma once
+
+#include <memory>
+#include <stdint.h>
+#include <vector>
+
+#include "histogram.h"
+#include "region_stats.h"
+
+namespace RPiController {
+
+struct RgbySums {
+ RgbySums(uint64_t _rSum = 0, uint64_t _gSum = 0, uint64_t _bSum = 0, uint64_t _ySum = 0)
+ : rSum(_rSum), gSum(_gSum), bSum(_bSum), ySum(_ySum)
+ {
+ }
+ uint64_t rSum;
+ uint64_t gSum;
+ uint64_t bSum;
+ uint64_t ySum;
+};
+
+using RgbyRegions = RegionStats<RgbySums>;
+using FocusRegions = RegionStats<uint64_t>;
+
+struct Statistics {
+ /*
+ * All region based statistics are normalised to 16-bits, giving a
+ * maximum value of (1 << NormalisationFactorPow2) - 1.
+ */
+ static constexpr unsigned int NormalisationFactorPow2 = 16;
+
+ /*
+ * Positioning of the AGC statistics gathering in the pipeline:
+ * Pre-WB correction or post-WB correction.
+ * Assume this is post-LSC.
+ */
+ enum class AgcStatsPos { PreWb, PostWb };
+ const AgcStatsPos agcStatsPos;
+
+ /*
+ * Positioning of the AWB/ALSC statistics gathering in the pipeline:
+ * Pre-LSC or post-LSC.
+ */
+ enum class ColourStatsPos { PreLsc, PostLsc };
+ const ColourStatsPos colourStatsPos;
+
+ Statistics(AgcStatsPos a, ColourStatsPos c)
+ : agcStatsPos(a), colourStatsPos(c)
+ {
+ }
+
+ /* Histogram statistics. Not all histograms may be populated! */
+ Histogram rHist;
+ Histogram gHist;
+ Histogram bHist;
+ Histogram yHist;
+
+ /* Row sums for flicker avoidance. */
+ std::vector<RgbySums> rowSums;
+
+ /* Region based colour sums. */
+ RgbyRegions agcRegions;
+ RgbyRegions awbRegions;
+
+ /* Region based focus FoM. */
+ FocusRegions focusRegions;
+};
+
+using StatisticsPtr = std::shared_ptr<Statistics>;
+
+} /* namespace RPiController */
diff --git a/src/ipa/rpi/controller/stitch_status.h b/src/ipa/rpi/controller/stitch_status.h
new file mode 100644
index 00000000..7812f3e3
--- /dev/null
+++ b/src/ipa/rpi/controller/stitch_status.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ *
+ * stitch control algorithm status
+ */
+#pragma once
+
+/*
+ * Parameters for the stitch block.
+ */
+
+struct StitchStatus {
+ uint16_t thresholdLo;
+ uint8_t diffPower;
+ double motionThreshold;
+};
diff --git a/src/ipa/rpi/controller/tonemap_status.h b/src/ipa/rpi/controller/tonemap_status.h
new file mode 100644
index 00000000..0364ff66
--- /dev/null
+++ b/src/ipa/rpi/controller/tonemap_status.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2022 Raspberry Pi Ltd
+ *
+ * Tonemap control algorithm status
+ */
+#pragma once
+
+#include <libipa/pwl.h>
+
+struct TonemapStatus {
+ uint16_t detailConstant;
+ double detailSlope;
+ double iirStrength;
+ double strength;
+ libcamera::ipa::Pwl tonemap;
+};
diff --git a/src/ipa/rpi/meson.build b/src/ipa/rpi/meson.build
new file mode 100644
index 00000000..4811c76f
--- /dev/null
+++ b/src/ipa/rpi/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: CC0-1.0
+
+subdir('cam_helper')
+subdir('common')
+subdir('controller')
+
+foreach pipeline : pipelines
+ pipeline = pipeline.split('/')
+ if pipeline.length() < 2 or pipeline[0] != 'rpi'
+ continue
+ endif
+
+ subdir(pipeline[1])
+endforeach
diff --git a/src/ipa/rpi/vc4/data/imx219.json b/src/ipa/rpi/vc4/data/imx219.json
new file mode 100644
index 00000000..a020b12f
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx219.json
@@ -0,0 +1,695 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 27685,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 998,
+ "reference_Y": 12744
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 3.67
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 204,
+ "slope": 0.01633
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2498.0, 0.9309, 0.3599,
+ 2911.0, 0.8682, 0.4283,
+ 2919.0, 0.8358, 0.4621,
+ 3627.0, 0.7646, 0.5327,
+ 4600.0, 0.6079, 0.6721,
+ 5716.0, 0.5712, 0.7017,
+ 8575.0, 0.4331, 0.8037
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.04791,
+ "transverse_neg": 0.04881
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.7,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.487, 1.481, 1.481, 1.445, 1.389, 1.327, 1.307, 1.307, 1.307, 1.309, 1.341, 1.405, 1.458, 1.494, 1.494, 1.497,
+ 1.491, 1.481, 1.448, 1.397, 1.331, 1.275, 1.243, 1.229, 1.229, 1.249, 1.287, 1.349, 1.409, 1.463, 1.494, 1.497,
+ 1.491, 1.469, 1.405, 1.331, 1.275, 1.217, 1.183, 1.172, 1.172, 1.191, 1.231, 1.287, 1.349, 1.424, 1.484, 1.499,
+ 1.487, 1.444, 1.363, 1.283, 1.217, 1.183, 1.148, 1.138, 1.138, 1.159, 1.191, 1.231, 1.302, 1.385, 1.461, 1.492,
+ 1.481, 1.423, 1.334, 1.253, 1.189, 1.148, 1.135, 1.119, 1.123, 1.137, 1.159, 1.203, 1.272, 1.358, 1.442, 1.488,
+ 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.118, 1.114, 1.116, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
+ 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.116, 1.114, 1.115, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
+ 1.479, 1.425, 1.336, 1.251, 1.189, 1.149, 1.136, 1.118, 1.121, 1.138, 1.158, 1.206, 1.275, 1.358, 1.443, 1.488,
+ 1.488, 1.448, 1.368, 1.285, 1.219, 1.189, 1.149, 1.139, 1.139, 1.158, 1.195, 1.235, 1.307, 1.387, 1.462, 1.493,
+ 1.496, 1.475, 1.411, 1.337, 1.284, 1.219, 1.189, 1.176, 1.176, 1.195, 1.235, 1.296, 1.356, 1.429, 1.487, 1.501,
+ 1.495, 1.489, 1.458, 1.407, 1.337, 1.287, 1.253, 1.239, 1.239, 1.259, 1.296, 1.356, 1.419, 1.472, 1.499, 1.499,
+ 1.494, 1.489, 1.489, 1.453, 1.398, 1.336, 1.317, 1.317, 1.317, 1.321, 1.351, 1.416, 1.467, 1.501, 1.501, 1.499
+ ]
+ },
+ {
+ "ct": 3850,
+ "table":
+ [
+ 1.694, 1.688, 1.688, 1.649, 1.588, 1.518, 1.495, 1.495, 1.495, 1.497, 1.532, 1.602, 1.659, 1.698, 1.698, 1.703,
+ 1.698, 1.688, 1.653, 1.597, 1.525, 1.464, 1.429, 1.413, 1.413, 1.437, 1.476, 1.542, 1.606, 1.665, 1.698, 1.703,
+ 1.697, 1.673, 1.605, 1.525, 1.464, 1.401, 1.369, 1.354, 1.354, 1.377, 1.417, 1.476, 1.542, 1.623, 1.687, 1.705,
+ 1.692, 1.646, 1.561, 1.472, 1.401, 1.368, 1.337, 1.323, 1.324, 1.348, 1.377, 1.417, 1.492, 1.583, 1.661, 1.697,
+ 1.686, 1.625, 1.528, 1.439, 1.372, 1.337, 1.321, 1.311, 1.316, 1.324, 1.348, 1.389, 1.461, 1.553, 1.642, 1.694,
+ 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.306, 1.306, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
+ 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.305, 1.305, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
+ 1.685, 1.624, 1.529, 1.438, 1.372, 1.336, 1.324, 1.309, 1.314, 1.323, 1.348, 1.392, 1.462, 1.555, 1.646, 1.694,
+ 1.692, 1.648, 1.561, 1.473, 1.403, 1.372, 1.336, 1.324, 1.324, 1.348, 1.378, 1.423, 1.495, 1.585, 1.667, 1.701,
+ 1.701, 1.677, 1.608, 1.527, 1.471, 1.403, 1.375, 1.359, 1.359, 1.378, 1.423, 1.488, 1.549, 1.631, 1.694, 1.709,
+ 1.702, 1.694, 1.656, 1.601, 1.527, 1.473, 1.441, 1.424, 1.424, 1.443, 1.488, 1.549, 1.621, 1.678, 1.706, 1.707,
+ 1.699, 1.694, 1.694, 1.654, 1.593, 1.525, 1.508, 1.508, 1.508, 1.509, 1.546, 1.614, 1.674, 1.708, 1.708, 1.707
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 2.179, 2.176, 2.176, 2.125, 2.048, 1.975, 1.955, 1.954, 1.954, 1.956, 1.993, 2.071, 2.141, 2.184, 2.185, 2.188,
+ 2.189, 2.176, 2.128, 2.063, 1.973, 1.908, 1.872, 1.856, 1.856, 1.876, 1.922, 1.999, 2.081, 2.144, 2.184, 2.192,
+ 2.187, 2.152, 2.068, 1.973, 1.907, 1.831, 1.797, 1.786, 1.786, 1.804, 1.853, 1.922, 1.999, 2.089, 2.166, 2.191,
+ 2.173, 2.117, 2.013, 1.908, 1.831, 1.791, 1.755, 1.749, 1.749, 1.767, 1.804, 1.853, 1.939, 2.041, 2.135, 2.181,
+ 2.166, 2.089, 1.975, 1.869, 1.792, 1.755, 1.741, 1.731, 1.734, 1.749, 1.767, 1.818, 1.903, 2.005, 2.111, 2.173,
+ 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.729, 1.725, 1.729, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
+ 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.727, 1.724, 1.725, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
+ 2.166, 2.085, 1.975, 1.869, 1.791, 1.755, 1.741, 1.729, 1.733, 1.749, 1.769, 1.819, 1.904, 2.009, 2.114, 2.174,
+ 2.174, 2.118, 2.015, 1.913, 1.831, 1.791, 1.755, 1.749, 1.749, 1.769, 1.811, 1.855, 1.943, 2.047, 2.139, 2.183,
+ 2.187, 2.151, 2.072, 1.979, 1.911, 1.831, 1.801, 1.791, 1.791, 1.811, 1.855, 1.933, 2.006, 2.101, 2.173, 2.197,
+ 2.189, 2.178, 2.132, 2.069, 1.979, 1.913, 1.879, 1.867, 1.867, 1.891, 1.933, 2.006, 2.091, 2.156, 2.195, 2.197,
+ 2.181, 2.179, 2.178, 2.131, 2.057, 1.981, 1.965, 1.965, 1.965, 1.969, 1.999, 2.083, 2.153, 2.197, 2.197, 2.196
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.967, 1.961, 1.955, 1.953, 1.954, 1.957, 1.961, 1.963, 1.963, 1.961, 1.959, 1.957, 1.954, 1.951, 1.951, 1.955,
+ 1.961, 1.959, 1.957, 1.956, 1.962, 1.967, 1.975, 1.979, 1.979, 1.975, 1.971, 1.967, 1.957, 1.952, 1.951, 1.951,
+ 1.959, 1.959, 1.959, 1.966, 1.976, 1.989, 1.999, 2.004, 2.003, 1.997, 1.991, 1.981, 1.967, 1.956, 1.951, 1.951,
+ 1.959, 1.962, 1.967, 1.978, 1.993, 2.009, 2.021, 2.028, 2.026, 2.021, 2.011, 1.995, 1.981, 1.964, 1.953, 1.951,
+ 1.961, 1.965, 1.977, 1.993, 2.009, 2.023, 2.041, 2.047, 2.047, 2.037, 2.024, 2.011, 1.995, 1.975, 1.958, 1.953,
+ 1.963, 1.968, 1.981, 2.001, 2.019, 2.039, 2.046, 2.052, 2.052, 2.051, 2.035, 2.021, 2.001, 1.978, 1.959, 1.955,
+ 1.961, 1.966, 1.981, 2.001, 2.019, 2.038, 2.043, 2.051, 2.052, 2.042, 2.034, 2.019, 2.001, 1.978, 1.959, 1.954,
+ 1.957, 1.961, 1.972, 1.989, 2.003, 2.021, 2.038, 2.039, 2.039, 2.034, 2.019, 2.004, 1.988, 1.971, 1.954, 1.949,
+ 1.952, 1.953, 1.959, 1.972, 1.989, 2.003, 2.016, 2.019, 2.019, 2.014, 2.003, 1.988, 1.971, 1.955, 1.948, 1.947,
+ 1.949, 1.948, 1.949, 1.957, 1.971, 1.978, 1.991, 1.994, 1.994, 1.989, 1.979, 1.967, 1.954, 1.946, 1.947, 1.947,
+ 1.949, 1.946, 1.944, 1.946, 1.949, 1.954, 1.962, 1.967, 1.967, 1.963, 1.956, 1.948, 1.943, 1.943, 1.946, 1.949,
+ 1.951, 1.946, 1.944, 1.942, 1.943, 1.943, 1.947, 1.948, 1.949, 1.947, 1.945, 1.941, 1.938, 1.939, 1.948, 1.952
+ ]
+ },
+ {
+ "ct": 3850,
+ "table":
+ [
+ 1.726, 1.724, 1.722, 1.723, 1.731, 1.735, 1.743, 1.746, 1.746, 1.741, 1.735, 1.729, 1.725, 1.721, 1.721, 1.721,
+ 1.724, 1.723, 1.723, 1.727, 1.735, 1.744, 1.749, 1.756, 1.756, 1.749, 1.744, 1.735, 1.727, 1.719, 1.719, 1.719,
+ 1.723, 1.723, 1.724, 1.735, 1.746, 1.759, 1.767, 1.775, 1.775, 1.766, 1.758, 1.746, 1.735, 1.723, 1.718, 1.716,
+ 1.723, 1.725, 1.732, 1.746, 1.759, 1.775, 1.782, 1.792, 1.792, 1.782, 1.772, 1.759, 1.745, 1.729, 1.718, 1.716,
+ 1.725, 1.729, 1.738, 1.756, 1.775, 1.785, 1.796, 1.803, 1.804, 1.794, 1.783, 1.772, 1.757, 1.736, 1.722, 1.718,
+ 1.728, 1.731, 1.741, 1.759, 1.781, 1.795, 1.803, 1.806, 1.808, 1.805, 1.791, 1.779, 1.762, 1.739, 1.722, 1.721,
+ 1.727, 1.731, 1.741, 1.759, 1.781, 1.791, 1.799, 1.804, 1.806, 1.801, 1.791, 1.779, 1.762, 1.739, 1.722, 1.717,
+ 1.722, 1.724, 1.733, 1.751, 1.768, 1.781, 1.791, 1.796, 1.799, 1.791, 1.781, 1.766, 1.754, 1.731, 1.717, 1.714,
+ 1.718, 1.718, 1.724, 1.737, 1.752, 1.768, 1.776, 1.782, 1.784, 1.781, 1.766, 1.754, 1.737, 1.724, 1.713, 1.709,
+ 1.716, 1.715, 1.716, 1.725, 1.737, 1.749, 1.756, 1.763, 1.764, 1.762, 1.749, 1.737, 1.724, 1.717, 1.709, 1.708,
+ 1.715, 1.714, 1.712, 1.715, 1.722, 1.729, 1.736, 1.741, 1.742, 1.739, 1.731, 1.723, 1.717, 1.712, 1.711, 1.709,
+ 1.716, 1.714, 1.711, 1.712, 1.715, 1.719, 1.723, 1.728, 1.731, 1.729, 1.723, 1.718, 1.711, 1.711, 1.713, 1.713
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 1.374, 1.372, 1.373, 1.374, 1.375, 1.378, 1.378, 1.381, 1.382, 1.382, 1.378, 1.373, 1.372, 1.369, 1.365, 1.365,
+ 1.371, 1.371, 1.372, 1.374, 1.378, 1.381, 1.384, 1.386, 1.388, 1.387, 1.384, 1.377, 1.372, 1.368, 1.364, 1.362,
+ 1.369, 1.371, 1.372, 1.377, 1.383, 1.391, 1.394, 1.396, 1.397, 1.395, 1.391, 1.382, 1.374, 1.369, 1.362, 1.361,
+ 1.369, 1.371, 1.375, 1.383, 1.391, 1.399, 1.402, 1.404, 1.405, 1.403, 1.398, 1.391, 1.379, 1.371, 1.363, 1.361,
+ 1.371, 1.373, 1.378, 1.388, 1.399, 1.407, 1.411, 1.413, 1.413, 1.411, 1.405, 1.397, 1.385, 1.374, 1.366, 1.362,
+ 1.371, 1.374, 1.379, 1.389, 1.405, 1.411, 1.414, 1.414, 1.415, 1.415, 1.411, 1.401, 1.388, 1.376, 1.367, 1.363,
+ 1.371, 1.373, 1.379, 1.389, 1.405, 1.408, 1.413, 1.414, 1.414, 1.413, 1.409, 1.401, 1.388, 1.376, 1.367, 1.362,
+ 1.366, 1.369, 1.374, 1.384, 1.396, 1.404, 1.407, 1.408, 1.408, 1.408, 1.401, 1.395, 1.382, 1.371, 1.363, 1.359,
+ 1.364, 1.365, 1.368, 1.375, 1.386, 1.396, 1.399, 1.401, 1.399, 1.399, 1.395, 1.385, 1.374, 1.365, 1.359, 1.357,
+ 1.361, 1.363, 1.365, 1.368, 1.377, 1.384, 1.388, 1.391, 1.391, 1.388, 1.385, 1.375, 1.366, 1.361, 1.358, 1.356,
+ 1.361, 1.362, 1.362, 1.364, 1.367, 1.373, 1.376, 1.377, 1.377, 1.375, 1.373, 1.366, 1.362, 1.358, 1.358, 1.358,
+ 1.361, 1.362, 1.362, 1.362, 1.363, 1.367, 1.369, 1.368, 1.367, 1.367, 1.367, 1.364, 1.358, 1.357, 1.358, 1.359
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 2.716, 2.568, 2.299, 2.065, 1.845, 1.693, 1.605, 1.597, 1.596, 1.634, 1.738, 1.914, 2.145, 2.394, 2.719, 2.901,
+ 2.593, 2.357, 2.093, 1.876, 1.672, 1.528, 1.438, 1.393, 1.394, 1.459, 1.569, 1.731, 1.948, 2.169, 2.481, 2.756,
+ 2.439, 2.197, 1.922, 1.691, 1.521, 1.365, 1.266, 1.222, 1.224, 1.286, 1.395, 1.573, 1.747, 1.988, 2.299, 2.563,
+ 2.363, 2.081, 1.797, 1.563, 1.376, 1.244, 1.152, 1.099, 1.101, 1.158, 1.276, 1.421, 1.607, 1.851, 2.163, 2.455,
+ 2.342, 2.003, 1.715, 1.477, 1.282, 1.152, 1.074, 1.033, 1.035, 1.083, 1.163, 1.319, 1.516, 1.759, 2.064, 2.398,
+ 2.342, 1.985, 1.691, 1.446, 1.249, 1.111, 1.034, 1.004, 1.004, 1.028, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
+ 2.342, 1.991, 1.691, 1.446, 1.249, 1.112, 1.034, 1.011, 1.005, 1.035, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
+ 2.365, 2.052, 1.751, 1.499, 1.299, 1.171, 1.089, 1.039, 1.042, 1.084, 1.162, 1.312, 1.516, 1.761, 2.059, 2.393,
+ 2.434, 2.159, 1.856, 1.601, 1.403, 1.278, 1.166, 1.114, 1.114, 1.162, 1.266, 1.402, 1.608, 1.847, 2.146, 2.435,
+ 2.554, 2.306, 2.002, 1.748, 1.563, 1.396, 1.299, 1.247, 1.243, 1.279, 1.386, 1.551, 1.746, 1.977, 2.272, 2.518,
+ 2.756, 2.493, 2.195, 1.947, 1.739, 1.574, 1.481, 1.429, 1.421, 1.457, 1.559, 1.704, 1.929, 2.159, 2.442, 2.681,
+ 2.935, 2.739, 2.411, 2.151, 1.922, 1.749, 1.663, 1.628, 1.625, 1.635, 1.716, 1.872, 2.113, 2.368, 2.663, 2.824
+ ],
+ "sigma": 0.00381,
+ "sigma_Cb": 0.00216
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2860,
+ "ccm":
+ [
+ 2.12089, -0.52461, -0.59629,
+ -0.85342, 2.80445, -0.95103,
+ -0.26897, -1.14788, 2.41685
+ ]
+ },
+ {
+ "ct": 2960,
+ "ccm":
+ [
+ 2.26962, -0.54174, -0.72789,
+ -0.77008, 2.60271, -0.83262,
+ -0.26036, -1.51254, 2.77289
+ ]
+ },
+ {
+ "ct": 3603,
+ "ccm":
+ [
+ 2.18644, -0.66148, -0.52496,
+ -0.77828, 2.69474, -0.91645,
+ -0.25239, -0.83059, 2.08298
+ ]
+ },
+ {
+ "ct": 4650,
+ "ccm":
+ [
+ 2.18174, -0.70887, -0.47287,
+ -0.70196, 2.76426, -1.06231,
+ -0.25157, -0.71978, 1.97135
+ ]
+ },
+ {
+ "ct": 5858,
+ "ccm":
+ [
+ 2.32392, -0.88421, -0.43971,
+ -0.63821, 2.58348, -0.94527,
+ -0.28541, -0.54112, 1.82653
+ ]
+ },
+ {
+ "ct": 7580,
+ "ccm":
+ [
+ 2.21175, -0.53242, -0.67933,
+ -0.57875, 3.07922, -1.50047,
+ -0.27709, -0.73338, 2.01048
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/src/ipa/rpi/vc4/data/imx219_noir.json b/src/ipa/rpi/vc4/data/imx219_noir.json
new file mode 100644
index 00000000..d8bc9639
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx219_noir.json
@@ -0,0 +1,629 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 27685,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 998,
+ "reference_Y": 12744
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 3.67
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 204,
+ "slope": 0.01633
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "bayes": 0
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.7,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.487, 1.481, 1.481, 1.445, 1.389, 1.327, 1.307, 1.307, 1.307, 1.309, 1.341, 1.405, 1.458, 1.494, 1.494, 1.497,
+ 1.491, 1.481, 1.448, 1.397, 1.331, 1.275, 1.243, 1.229, 1.229, 1.249, 1.287, 1.349, 1.409, 1.463, 1.494, 1.497,
+ 1.491, 1.469, 1.405, 1.331, 1.275, 1.217, 1.183, 1.172, 1.172, 1.191, 1.231, 1.287, 1.349, 1.424, 1.484, 1.499,
+ 1.487, 1.444, 1.363, 1.283, 1.217, 1.183, 1.148, 1.138, 1.138, 1.159, 1.191, 1.231, 1.302, 1.385, 1.461, 1.492,
+ 1.481, 1.423, 1.334, 1.253, 1.189, 1.148, 1.135, 1.119, 1.123, 1.137, 1.159, 1.203, 1.272, 1.358, 1.442, 1.488,
+ 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.118, 1.114, 1.116, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
+ 1.479, 1.413, 1.321, 1.236, 1.176, 1.139, 1.116, 1.114, 1.115, 1.123, 1.149, 1.192, 1.258, 1.344, 1.432, 1.487,
+ 1.479, 1.425, 1.336, 1.251, 1.189, 1.149, 1.136, 1.118, 1.121, 1.138, 1.158, 1.206, 1.275, 1.358, 1.443, 1.488,
+ 1.488, 1.448, 1.368, 1.285, 1.219, 1.189, 1.149, 1.139, 1.139, 1.158, 1.195, 1.235, 1.307, 1.387, 1.462, 1.493,
+ 1.496, 1.475, 1.411, 1.337, 1.284, 1.219, 1.189, 1.176, 1.176, 1.195, 1.235, 1.296, 1.356, 1.429, 1.487, 1.501,
+ 1.495, 1.489, 1.458, 1.407, 1.337, 1.287, 1.253, 1.239, 1.239, 1.259, 1.296, 1.356, 1.419, 1.472, 1.499, 1.499,
+ 1.494, 1.489, 1.489, 1.453, 1.398, 1.336, 1.317, 1.317, 1.317, 1.321, 1.351, 1.416, 1.467, 1.501, 1.501, 1.499
+ ]
+ },
+ {
+ "ct": 3850,
+ "table":
+ [
+ 1.694, 1.688, 1.688, 1.649, 1.588, 1.518, 1.495, 1.495, 1.495, 1.497, 1.532, 1.602, 1.659, 1.698, 1.698, 1.703,
+ 1.698, 1.688, 1.653, 1.597, 1.525, 1.464, 1.429, 1.413, 1.413, 1.437, 1.476, 1.542, 1.606, 1.665, 1.698, 1.703,
+ 1.697, 1.673, 1.605, 1.525, 1.464, 1.401, 1.369, 1.354, 1.354, 1.377, 1.417, 1.476, 1.542, 1.623, 1.687, 1.705,
+ 1.692, 1.646, 1.561, 1.472, 1.401, 1.368, 1.337, 1.323, 1.324, 1.348, 1.377, 1.417, 1.492, 1.583, 1.661, 1.697,
+ 1.686, 1.625, 1.528, 1.439, 1.372, 1.337, 1.321, 1.311, 1.316, 1.324, 1.348, 1.389, 1.461, 1.553, 1.642, 1.694,
+ 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.306, 1.306, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
+ 1.684, 1.613, 1.514, 1.423, 1.359, 1.328, 1.311, 1.305, 1.305, 1.316, 1.339, 1.378, 1.446, 1.541, 1.633, 1.693,
+ 1.685, 1.624, 1.529, 1.438, 1.372, 1.336, 1.324, 1.309, 1.314, 1.323, 1.348, 1.392, 1.462, 1.555, 1.646, 1.694,
+ 1.692, 1.648, 1.561, 1.473, 1.403, 1.372, 1.336, 1.324, 1.324, 1.348, 1.378, 1.423, 1.495, 1.585, 1.667, 1.701,
+ 1.701, 1.677, 1.608, 1.527, 1.471, 1.403, 1.375, 1.359, 1.359, 1.378, 1.423, 1.488, 1.549, 1.631, 1.694, 1.709,
+ 1.702, 1.694, 1.656, 1.601, 1.527, 1.473, 1.441, 1.424, 1.424, 1.443, 1.488, 1.549, 1.621, 1.678, 1.706, 1.707,
+ 1.699, 1.694, 1.694, 1.654, 1.593, 1.525, 1.508, 1.508, 1.508, 1.509, 1.546, 1.614, 1.674, 1.708, 1.708, 1.707
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 2.179, 2.176, 2.176, 2.125, 2.048, 1.975, 1.955, 1.954, 1.954, 1.956, 1.993, 2.071, 2.141, 2.184, 2.185, 2.188,
+ 2.189, 2.176, 2.128, 2.063, 1.973, 1.908, 1.872, 1.856, 1.856, 1.876, 1.922, 1.999, 2.081, 2.144, 2.184, 2.192,
+ 2.187, 2.152, 2.068, 1.973, 1.907, 1.831, 1.797, 1.786, 1.786, 1.804, 1.853, 1.922, 1.999, 2.089, 2.166, 2.191,
+ 2.173, 2.117, 2.013, 1.908, 1.831, 1.791, 1.755, 1.749, 1.749, 1.767, 1.804, 1.853, 1.939, 2.041, 2.135, 2.181,
+ 2.166, 2.089, 1.975, 1.869, 1.792, 1.755, 1.741, 1.731, 1.734, 1.749, 1.767, 1.818, 1.903, 2.005, 2.111, 2.173,
+ 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.729, 1.725, 1.729, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
+ 2.165, 2.074, 1.956, 1.849, 1.777, 1.742, 1.727, 1.724, 1.725, 1.734, 1.758, 1.804, 1.884, 1.991, 2.099, 2.172,
+ 2.166, 2.085, 1.975, 1.869, 1.791, 1.755, 1.741, 1.729, 1.733, 1.749, 1.769, 1.819, 1.904, 2.009, 2.114, 2.174,
+ 2.174, 2.118, 2.015, 1.913, 1.831, 1.791, 1.755, 1.749, 1.749, 1.769, 1.811, 1.855, 1.943, 2.047, 2.139, 2.183,
+ 2.187, 2.151, 2.072, 1.979, 1.911, 1.831, 1.801, 1.791, 1.791, 1.811, 1.855, 1.933, 2.006, 2.101, 2.173, 2.197,
+ 2.189, 2.178, 2.132, 2.069, 1.979, 1.913, 1.879, 1.867, 1.867, 1.891, 1.933, 2.006, 2.091, 2.156, 2.195, 2.197,
+ 2.181, 2.179, 2.178, 2.131, 2.057, 1.981, 1.965, 1.965, 1.965, 1.969, 1.999, 2.083, 2.153, 2.197, 2.197, 2.196
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.967, 1.961, 1.955, 1.953, 1.954, 1.957, 1.961, 1.963, 1.963, 1.961, 1.959, 1.957, 1.954, 1.951, 1.951, 1.955,
+ 1.961, 1.959, 1.957, 1.956, 1.962, 1.967, 1.975, 1.979, 1.979, 1.975, 1.971, 1.967, 1.957, 1.952, 1.951, 1.951,
+ 1.959, 1.959, 1.959, 1.966, 1.976, 1.989, 1.999, 2.004, 2.003, 1.997, 1.991, 1.981, 1.967, 1.956, 1.951, 1.951,
+ 1.959, 1.962, 1.967, 1.978, 1.993, 2.009, 2.021, 2.028, 2.026, 2.021, 2.011, 1.995, 1.981, 1.964, 1.953, 1.951,
+ 1.961, 1.965, 1.977, 1.993, 2.009, 2.023, 2.041, 2.047, 2.047, 2.037, 2.024, 2.011, 1.995, 1.975, 1.958, 1.953,
+ 1.963, 1.968, 1.981, 2.001, 2.019, 2.039, 2.046, 2.052, 2.052, 2.051, 2.035, 2.021, 2.001, 1.978, 1.959, 1.955,
+ 1.961, 1.966, 1.981, 2.001, 2.019, 2.038, 2.043, 2.051, 2.052, 2.042, 2.034, 2.019, 2.001, 1.978, 1.959, 1.954,
+ 1.957, 1.961, 1.972, 1.989, 2.003, 2.021, 2.038, 2.039, 2.039, 2.034, 2.019, 2.004, 1.988, 1.971, 1.954, 1.949,
+ 1.952, 1.953, 1.959, 1.972, 1.989, 2.003, 2.016, 2.019, 2.019, 2.014, 2.003, 1.988, 1.971, 1.955, 1.948, 1.947,
+ 1.949, 1.948, 1.949, 1.957, 1.971, 1.978, 1.991, 1.994, 1.994, 1.989, 1.979, 1.967, 1.954, 1.946, 1.947, 1.947,
+ 1.949, 1.946, 1.944, 1.946, 1.949, 1.954, 1.962, 1.967, 1.967, 1.963, 1.956, 1.948, 1.943, 1.943, 1.946, 1.949,
+ 1.951, 1.946, 1.944, 1.942, 1.943, 1.943, 1.947, 1.948, 1.949, 1.947, 1.945, 1.941, 1.938, 1.939, 1.948, 1.952
+ ]
+ },
+ {
+ "ct": 3850,
+ "table":
+ [
+ 1.726, 1.724, 1.722, 1.723, 1.731, 1.735, 1.743, 1.746, 1.746, 1.741, 1.735, 1.729, 1.725, 1.721, 1.721, 1.721,
+ 1.724, 1.723, 1.723, 1.727, 1.735, 1.744, 1.749, 1.756, 1.756, 1.749, 1.744, 1.735, 1.727, 1.719, 1.719, 1.719,
+ 1.723, 1.723, 1.724, 1.735, 1.746, 1.759, 1.767, 1.775, 1.775, 1.766, 1.758, 1.746, 1.735, 1.723, 1.718, 1.716,
+ 1.723, 1.725, 1.732, 1.746, 1.759, 1.775, 1.782, 1.792, 1.792, 1.782, 1.772, 1.759, 1.745, 1.729, 1.718, 1.716,
+ 1.725, 1.729, 1.738, 1.756, 1.775, 1.785, 1.796, 1.803, 1.804, 1.794, 1.783, 1.772, 1.757, 1.736, 1.722, 1.718,
+ 1.728, 1.731, 1.741, 1.759, 1.781, 1.795, 1.803, 1.806, 1.808, 1.805, 1.791, 1.779, 1.762, 1.739, 1.722, 1.721,
+ 1.727, 1.731, 1.741, 1.759, 1.781, 1.791, 1.799, 1.804, 1.806, 1.801, 1.791, 1.779, 1.762, 1.739, 1.722, 1.717,
+ 1.722, 1.724, 1.733, 1.751, 1.768, 1.781, 1.791, 1.796, 1.799, 1.791, 1.781, 1.766, 1.754, 1.731, 1.717, 1.714,
+ 1.718, 1.718, 1.724, 1.737, 1.752, 1.768, 1.776, 1.782, 1.784, 1.781, 1.766, 1.754, 1.737, 1.724, 1.713, 1.709,
+ 1.716, 1.715, 1.716, 1.725, 1.737, 1.749, 1.756, 1.763, 1.764, 1.762, 1.749, 1.737, 1.724, 1.717, 1.709, 1.708,
+ 1.715, 1.714, 1.712, 1.715, 1.722, 1.729, 1.736, 1.741, 1.742, 1.739, 1.731, 1.723, 1.717, 1.712, 1.711, 1.709,
+ 1.716, 1.714, 1.711, 1.712, 1.715, 1.719, 1.723, 1.728, 1.731, 1.729, 1.723, 1.718, 1.711, 1.711, 1.713, 1.713
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 1.374, 1.372, 1.373, 1.374, 1.375, 1.378, 1.378, 1.381, 1.382, 1.382, 1.378, 1.373, 1.372, 1.369, 1.365, 1.365,
+ 1.371, 1.371, 1.372, 1.374, 1.378, 1.381, 1.384, 1.386, 1.388, 1.387, 1.384, 1.377, 1.372, 1.368, 1.364, 1.362,
+ 1.369, 1.371, 1.372, 1.377, 1.383, 1.391, 1.394, 1.396, 1.397, 1.395, 1.391, 1.382, 1.374, 1.369, 1.362, 1.361,
+ 1.369, 1.371, 1.375, 1.383, 1.391, 1.399, 1.402, 1.404, 1.405, 1.403, 1.398, 1.391, 1.379, 1.371, 1.363, 1.361,
+ 1.371, 1.373, 1.378, 1.388, 1.399, 1.407, 1.411, 1.413, 1.413, 1.411, 1.405, 1.397, 1.385, 1.374, 1.366, 1.362,
+ 1.371, 1.374, 1.379, 1.389, 1.405, 1.411, 1.414, 1.414, 1.415, 1.415, 1.411, 1.401, 1.388, 1.376, 1.367, 1.363,
+ 1.371, 1.373, 1.379, 1.389, 1.405, 1.408, 1.413, 1.414, 1.414, 1.413, 1.409, 1.401, 1.388, 1.376, 1.367, 1.362,
+ 1.366, 1.369, 1.374, 1.384, 1.396, 1.404, 1.407, 1.408, 1.408, 1.408, 1.401, 1.395, 1.382, 1.371, 1.363, 1.359,
+ 1.364, 1.365, 1.368, 1.375, 1.386, 1.396, 1.399, 1.401, 1.399, 1.399, 1.395, 1.385, 1.374, 1.365, 1.359, 1.357,
+ 1.361, 1.363, 1.365, 1.368, 1.377, 1.384, 1.388, 1.391, 1.391, 1.388, 1.385, 1.375, 1.366, 1.361, 1.358, 1.356,
+ 1.361, 1.362, 1.362, 1.364, 1.367, 1.373, 1.376, 1.377, 1.377, 1.375, 1.373, 1.366, 1.362, 1.358, 1.358, 1.358,
+ 1.361, 1.362, 1.362, 1.362, 1.363, 1.367, 1.369, 1.368, 1.367, 1.367, 1.367, 1.364, 1.358, 1.357, 1.358, 1.359
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 2.716, 2.568, 2.299, 2.065, 1.845, 1.693, 1.605, 1.597, 1.596, 1.634, 1.738, 1.914, 2.145, 2.394, 2.719, 2.901,
+ 2.593, 2.357, 2.093, 1.876, 1.672, 1.528, 1.438, 1.393, 1.394, 1.459, 1.569, 1.731, 1.948, 2.169, 2.481, 2.756,
+ 2.439, 2.197, 1.922, 1.691, 1.521, 1.365, 1.266, 1.222, 1.224, 1.286, 1.395, 1.573, 1.747, 1.988, 2.299, 2.563,
+ 2.363, 2.081, 1.797, 1.563, 1.376, 1.244, 1.152, 1.099, 1.101, 1.158, 1.276, 1.421, 1.607, 1.851, 2.163, 2.455,
+ 2.342, 2.003, 1.715, 1.477, 1.282, 1.152, 1.074, 1.033, 1.035, 1.083, 1.163, 1.319, 1.516, 1.759, 2.064, 2.398,
+ 2.342, 1.985, 1.691, 1.446, 1.249, 1.111, 1.034, 1.004, 1.004, 1.028, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
+ 2.342, 1.991, 1.691, 1.446, 1.249, 1.112, 1.034, 1.011, 1.005, 1.035, 1.114, 1.274, 1.472, 1.716, 2.019, 2.389,
+ 2.365, 2.052, 1.751, 1.499, 1.299, 1.171, 1.089, 1.039, 1.042, 1.084, 1.162, 1.312, 1.516, 1.761, 2.059, 2.393,
+ 2.434, 2.159, 1.856, 1.601, 1.403, 1.278, 1.166, 1.114, 1.114, 1.162, 1.266, 1.402, 1.608, 1.847, 2.146, 2.435,
+ 2.554, 2.306, 2.002, 1.748, 1.563, 1.396, 1.299, 1.247, 1.243, 1.279, 1.386, 1.551, 1.746, 1.977, 2.272, 2.518,
+ 2.756, 2.493, 2.195, 1.947, 1.739, 1.574, 1.481, 1.429, 1.421, 1.457, 1.559, 1.704, 1.929, 2.159, 2.442, 2.681,
+ 2.935, 2.739, 2.411, 2.151, 1.922, 1.749, 1.663, 1.628, 1.625, 1.635, 1.716, 1.872, 2.113, 2.368, 2.663, 2.824
+ ],
+ "sigma": 0.00381,
+ "sigma_Cb": 0.00216
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2498,
+ "ccm":
+ [
+ 1.58731, -0.18011, -0.40721,
+ -0.60639, 2.03422, -0.42782,
+ -0.19612, -1.69203, 2.88815
+ ]
+ },
+ {
+ "ct": 2811,
+ "ccm":
+ [
+ 1.61593, -0.33164, -0.28429,
+ -0.55048, 1.97779, -0.42731,
+ -0.12042, -1.42847, 2.54889
+ ]
+ },
+ {
+ "ct": 2911,
+ "ccm":
+ [
+ 1.62771, -0.41282, -0.21489,
+ -0.57991, 2.04176, -0.46186,
+ -0.07613, -1.13359, 2.20972
+ ]
+ },
+ {
+ "ct": 2919,
+ "ccm":
+ [
+ 1.62661, -0.37736, -0.24925,
+ -0.52519, 1.95233, -0.42714,
+ -0.10842, -1.34929, 2.45771
+ ]
+ },
+ {
+ "ct": 3627,
+ "ccm":
+ [
+ 1.70385, -0.57231, -0.13154,
+ -0.47763, 1.85998, -0.38235,
+ -0.07467, -0.82678, 1.90145
+ ]
+ },
+ {
+ "ct": 4600,
+ "ccm":
+ [
+ 1.68486, -0.61085, -0.07402,
+ -0.41927, 2.04016, -0.62089,
+ -0.08633, -0.67672, 1.76305
+ ]
+ },
+ {
+ "ct": 5716,
+ "ccm":
+ [
+ 1.80439, -0.73699, -0.06739,
+ -0.36073, 1.83327, -0.47255,
+ -0.08378, -0.56403, 1.64781
+ ]
+ },
+ {
+ "ct": 8575,
+ "ccm":
+ [
+ 1.89357, -0.76427, -0.12931,
+ -0.27399, 2.15605, -0.88206,
+ -0.12035, -0.68256, 1.80292
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/src/ipa/rpi/vc4/data/imx283.json b/src/ipa/rpi/vc4/data/imx283.json
new file mode 100644
index 00000000..bfacecc8
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx283.json
@@ -0,0 +1,313 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 3200
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 2461,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 1148,
+ "reference_Y": 13314
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.204
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 199,
+ "slope": 0.01947
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2213.0, 0.9607, 0.2593,
+ 5313.0, 0.4822, 0.5909,
+ 6237.0, 0.4739, 0.6308
+ ],
+ "sensitivity_r": 1.0,
+ "sensitivity_b": 1.0,
+ "transverse_pos": 0.0144,
+ "transverse_neg": 0.01
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.7
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2213,
+ "ccm":
+ [
+ 1.91264, -0.27609, -0.63655,
+ -0.65708, 2.11718, -0.46009,
+ 0.03629, -1.38441, 2.34811
+ ]
+ },
+ {
+ "ct": 2255,
+ "ccm":
+ [
+ 1.90369, -0.29309, -0.61059,
+ -0.64693, 2.08169, -0.43476,
+ 0.04086, -1.29999, 2.25914
+ ]
+ },
+ {
+ "ct": 2259,
+ "ccm":
+ [
+ 1.92762, -0.35134, -0.57628,
+ -0.63523, 2.08481, -0.44958,
+ 0.06754, -1.32953, 2.26199
+ ]
+ },
+ {
+ "ct": 5313,
+ "ccm":
+ [
+ 1.75924, -0.54053, -0.21871,
+ -0.38159, 1.88671, -0.50511,
+ -0.00747, -0.53492, 1.54239
+ ]
+ },
+ {
+ "ct": 6237,
+ "ccm":
+ [
+ 2.19299, -0.74764, -0.44536,
+ -0.51678, 2.27651, -0.75972,
+ -0.06498, -0.74269, 1.80767
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ }
+ ]
+}
diff --git a/src/ipa/rpi/vc4/data/imx290.json b/src/ipa/rpi/vc4/data/imx290.json
new file mode 100644
index 00000000..8f41bf51
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx290.json
@@ -0,0 +1,214 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 3840
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 6813,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 890,
+ "reference_Y": 12900
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.67
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 187,
+ "slope": 0.00842
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "bayes": 0
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "speed": 0.2,
+ "metering_modes":
+ {
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ },
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 10, 30000, 60000 ],
+ "gain": [ 1.0, 2.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 10, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.16,
+ 10000, 0.16
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.7,
+ "luminance_lut":
+ [
+ 2.844, 2.349, 2.018, 1.775, 1.599, 1.466, 1.371, 1.321, 1.306, 1.316, 1.357, 1.439, 1.552, 1.705, 1.915, 2.221,
+ 2.576, 2.151, 1.851, 1.639, 1.478, 1.358, 1.272, 1.231, 1.218, 1.226, 1.262, 1.335, 1.438, 1.571, 1.766, 2.067,
+ 2.381, 2.005, 1.739, 1.545, 1.389, 1.278, 1.204, 1.166, 1.153, 1.161, 1.194, 1.263, 1.356, 1.489, 1.671, 1.943,
+ 2.242, 1.899, 1.658, 1.481, 1.329, 1.225, 1.156, 1.113, 1.096, 1.107, 1.143, 1.201, 1.289, 1.423, 1.607, 1.861,
+ 2.152, 1.831, 1.602, 1.436, 1.291, 1.193, 1.121, 1.069, 1.047, 1.062, 1.107, 1.166, 1.249, 1.384, 1.562, 1.801,
+ 2.104, 1.795, 1.572, 1.407, 1.269, 1.174, 1.099, 1.041, 1.008, 1.029, 1.083, 1.146, 1.232, 1.364, 1.547, 1.766,
+ 2.104, 1.796, 1.572, 1.403, 1.264, 1.171, 1.097, 1.036, 1.001, 1.025, 1.077, 1.142, 1.231, 1.363, 1.549, 1.766,
+ 2.148, 1.827, 1.594, 1.413, 1.276, 1.184, 1.114, 1.062, 1.033, 1.049, 1.092, 1.153, 1.242, 1.383, 1.577, 1.795,
+ 2.211, 1.881, 1.636, 1.455, 1.309, 1.214, 1.149, 1.104, 1.081, 1.089, 1.125, 1.184, 1.273, 1.423, 1.622, 1.846,
+ 2.319, 1.958, 1.698, 1.516, 1.362, 1.262, 1.203, 1.156, 1.137, 1.142, 1.171, 1.229, 1.331, 1.484, 1.682, 1.933,
+ 2.459, 2.072, 1.789, 1.594, 1.441, 1.331, 1.261, 1.219, 1.199, 1.205, 1.232, 1.301, 1.414, 1.571, 1.773, 2.052,
+ 2.645, 2.206, 1.928, 1.728, 1.559, 1.451, 1.352, 1.301, 1.282, 1.289, 1.319, 1.395, 1.519, 1.685, 1.904, 2.227
+ ],
+ "sigma": 0.005,
+ "sigma_Cb": 0.005
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 3900,
+ "ccm":
+ [
+ 1.54659, -0.17707, -0.36953,
+ -0.51471, 1.72733, -0.21262,
+ 0.06667, -0.92279, 1.85612
+ ]
+ }
+ ]
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx296.json b/src/ipa/rpi/vc4/data/imx296.json
new file mode 100644
index 00000000..8f24ce5b
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx296.json
@@ -0,0 +1,443 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 3840
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 7598,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 800,
+ "reference_Y": 14028
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.671
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 215,
+ "slope": 0.01058
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 7600
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 7600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2500.0, 0.5386, 0.2458,
+ 2800.0, 0.4883, 0.3303,
+ 2900.0, 0.4855, 0.3349,
+ 3620.0, 0.4203, 0.4367,
+ 4560.0, 0.3455, 0.5444,
+ 5600.0, 0.2948, 0.6124,
+ 7400.0, 0.2336, 0.6894
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.03093,
+ "transverse_neg": 0.02374
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 30000, 45000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 12.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 30000 ],
+ "gain": [ 1.0, 2.0, 4.0, 8.0, 16.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 4000,
+ "table":
+ [
+ 2.726, 2.736, 2.737, 2.739, 2.741, 2.741, 2.742, 2.742, 2.743, 2.743, 2.742, 2.742, 2.742, 2.742, 2.741, 2.739,
+ 2.728, 2.736, 2.739, 2.741, 2.742, 2.743, 2.744, 2.745, 2.746, 2.746, 2.745, 2.743, 2.742, 2.742, 2.742, 2.741,
+ 2.729, 2.737, 2.741, 2.744, 2.746, 2.747, 2.748, 2.749, 2.751, 2.751, 2.749, 2.746, 2.744, 2.743, 2.743, 2.743,
+ 2.729, 2.738, 2.743, 2.746, 2.749, 2.749, 2.751, 2.752, 2.753, 2.753, 2.752, 2.751, 2.746, 2.744, 2.744, 2.746,
+ 2.728, 2.737, 2.742, 2.746, 2.749, 2.751, 2.754, 2.755, 2.754, 2.755, 2.754, 2.751, 2.748, 2.746, 2.747, 2.748,
+ 2.724, 2.738, 2.742, 2.746, 2.749, 2.752, 2.755, 2.755, 2.755, 2.755, 2.754, 2.752, 2.749, 2.749, 2.748, 2.748,
+ 2.726, 2.738, 2.741, 2.745, 2.749, 2.753, 2.754, 2.755, 2.755, 2.755, 2.754, 2.753, 2.749, 2.748, 2.748, 2.748,
+ 2.726, 2.738, 2.741, 2.745, 2.746, 2.752, 2.753, 2.753, 2.753, 2.753, 2.754, 2.751, 2.748, 2.748, 2.746, 2.745,
+ 2.726, 2.736, 2.738, 2.742, 2.745, 2.749, 2.752, 2.753, 2.752, 2.752, 2.751, 2.749, 2.747, 2.745, 2.744, 2.742,
+ 2.724, 2.733, 2.736, 2.739, 2.742, 2.745, 2.748, 2.749, 2.749, 2.748, 2.748, 2.747, 2.744, 2.743, 2.742, 2.741,
+ 2.722, 2.726, 2.733, 2.735, 2.737, 2.741, 2.743, 2.744, 2.744, 2.744, 2.744, 2.742, 2.741, 2.741, 2.739, 2.737,
+ 2.719, 2.722, 2.727, 2.729, 2.731, 2.732, 2.734, 2.734, 2.735, 2.735, 2.735, 2.734, 2.733, 2.732, 2.732, 2.732
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 3.507, 3.522, 3.525, 3.527, 3.531, 3.533, 3.534, 3.535, 3.535, 3.536, 3.536, 3.537, 3.537, 3.538, 3.537, 3.536,
+ 3.511, 3.524, 3.528, 3.532, 3.533, 3.535, 3.537, 3.538, 3.538, 3.541, 3.539, 3.539, 3.539, 3.539, 3.538, 3.538,
+ 3.513, 3.528, 3.532, 3.535, 3.538, 3.542, 3.543, 3.546, 3.548, 3.551, 3.547, 3.543, 3.541, 3.541, 3.541, 3.541,
+ 3.513, 3.528, 3.533, 3.539, 3.544, 3.546, 3.548, 3.552, 3.553, 3.553, 3.552, 3.548, 3.543, 3.542, 3.542, 3.545,
+ 3.513, 3.528, 3.534, 3.541, 3.547, 3.549, 3.552, 3.553, 3.554, 3.554, 3.553, 3.549, 3.546, 3.544, 3.547, 3.549,
+ 3.508, 3.528, 3.533, 3.541, 3.548, 3.551, 3.553, 3.554, 3.555, 3.555, 3.555, 3.551, 3.548, 3.547, 3.549, 3.551,
+ 3.511, 3.529, 3.534, 3.541, 3.548, 3.551, 3.553, 3.555, 3.555, 3.555, 3.556, 3.554, 3.549, 3.548, 3.548, 3.548,
+ 3.511, 3.528, 3.533, 3.539, 3.546, 3.549, 3.553, 3.554, 3.554, 3.554, 3.554, 3.553, 3.549, 3.547, 3.547, 3.547,
+ 3.511, 3.527, 3.533, 3.536, 3.541, 3.547, 3.551, 3.553, 3.553, 3.552, 3.551, 3.551, 3.548, 3.544, 3.542, 3.543,
+ 3.507, 3.523, 3.528, 3.533, 3.538, 3.541, 3.546, 3.548, 3.549, 3.548, 3.548, 3.546, 3.542, 3.541, 3.541, 3.541,
+ 3.505, 3.514, 3.523, 3.527, 3.532, 3.537, 3.538, 3.544, 3.544, 3.544, 3.542, 3.541, 3.537, 3.537, 3.536, 3.535,
+ 3.503, 3.508, 3.515, 3.519, 3.521, 3.523, 3.524, 3.525, 3.526, 3.526, 3.527, 3.526, 3.524, 3.526, 3.527, 3.527
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 4000,
+ "table":
+ [
+ 2.032, 2.037, 2.039, 2.041, 2.041, 2.042, 2.043, 2.044, 2.045, 2.045, 2.044, 2.043, 2.042, 2.041, 2.041, 2.034,
+ 2.032, 2.036, 2.039, 2.041, 2.042, 2.042, 2.043, 2.044, 2.045, 2.046, 2.045, 2.044, 2.042, 2.041, 2.039, 2.035,
+ 2.032, 2.036, 2.038, 2.041, 2.043, 2.044, 2.044, 2.045, 2.046, 2.047, 2.047, 2.045, 2.043, 2.042, 2.041, 2.037,
+ 2.032, 2.035, 2.039, 2.042, 2.043, 2.044, 2.045, 2.046, 2.048, 2.048, 2.047, 2.046, 2.045, 2.044, 2.042, 2.039,
+ 2.031, 2.034, 2.037, 2.039, 2.043, 2.045, 2.045, 2.046, 2.047, 2.047, 2.047, 2.046, 2.045, 2.044, 2.043, 2.039,
+ 2.029, 2.033, 2.036, 2.039, 2.042, 2.043, 2.045, 2.046, 2.046, 2.046, 2.046, 2.046, 2.046, 2.045, 2.044, 2.041,
+ 2.028, 2.032, 2.035, 2.039, 2.041, 2.043, 2.044, 2.045, 2.045, 2.046, 2.046, 2.046, 2.046, 2.045, 2.044, 2.039,
+ 2.027, 2.032, 2.035, 2.038, 2.039, 2.041, 2.044, 2.044, 2.044, 2.045, 2.046, 2.046, 2.046, 2.045, 2.044, 2.039,
+ 2.027, 2.031, 2.034, 2.035, 2.037, 2.039, 2.042, 2.043, 2.044, 2.045, 2.045, 2.046, 2.045, 2.044, 2.043, 2.038,
+ 2.025, 2.028, 2.032, 2.034, 2.036, 2.037, 2.041, 2.042, 2.043, 2.044, 2.044, 2.044, 2.044, 2.043, 2.041, 2.036,
+ 2.024, 2.026, 2.029, 2.032, 2.034, 2.036, 2.038, 2.041, 2.041, 2.042, 2.043, 2.042, 2.041, 2.041, 2.037, 2.036,
+ 2.022, 2.024, 2.027, 2.029, 2.032, 2.034, 2.036, 2.039, 2.039, 2.039, 2.041, 2.039, 2.039, 2.038, 2.036, 2.034
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 1.585, 1.587, 1.589, 1.589, 1.589, 1.591, 1.591, 1.591, 1.591, 1.591, 1.589, 1.589, 1.588, 1.588, 1.587, 1.581,
+ 1.585, 1.587, 1.588, 1.589, 1.591, 1.591, 1.591, 1.591, 1.591, 1.591, 1.591, 1.589, 1.588, 1.588, 1.587, 1.582,
+ 1.585, 1.586, 1.588, 1.589, 1.591, 1.591, 1.591, 1.591, 1.592, 1.592, 1.591, 1.591, 1.589, 1.588, 1.587, 1.584,
+ 1.585, 1.586, 1.588, 1.589, 1.591, 1.592, 1.592, 1.592, 1.593, 1.593, 1.592, 1.591, 1.589, 1.589, 1.588, 1.586,
+ 1.584, 1.586, 1.587, 1.589, 1.591, 1.591, 1.592, 1.592, 1.592, 1.592, 1.591, 1.591, 1.591, 1.589, 1.589, 1.586,
+ 1.583, 1.585, 1.587, 1.588, 1.589, 1.591, 1.591, 1.592, 1.592, 1.591, 1.591, 1.591, 1.591, 1.591, 1.589, 1.586,
+ 1.583, 1.584, 1.586, 1.588, 1.589, 1.589, 1.591, 1.591, 1.591, 1.591, 1.591, 1.591, 1.591, 1.591, 1.589, 1.585,
+ 1.581, 1.584, 1.586, 1.587, 1.588, 1.588, 1.589, 1.591, 1.591, 1.591, 1.591, 1.591, 1.591, 1.589, 1.589, 1.585,
+ 1.581, 1.583, 1.584, 1.586, 1.587, 1.588, 1.589, 1.589, 1.591, 1.591, 1.591, 1.591, 1.591, 1.589, 1.589, 1.585,
+ 1.579, 1.581, 1.583, 1.584, 1.586, 1.586, 1.588, 1.589, 1.589, 1.589, 1.589, 1.589, 1.589, 1.589, 1.587, 1.584,
+ 1.578, 1.579, 1.581, 1.583, 1.584, 1.585, 1.586, 1.587, 1.588, 1.588, 1.588, 1.588, 1.588, 1.587, 1.585, 1.583,
+ 1.577, 1.578, 1.579, 1.582, 1.583, 1.584, 1.585, 1.586, 1.586, 1.587, 1.587, 1.587, 1.586, 1.586, 1.584, 1.583
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 1.112, 1.098, 1.078, 1.062, 1.049, 1.039, 1.031, 1.027, 1.026, 1.027, 1.034, 1.043, 1.054, 1.069, 1.087, 1.096,
+ 1.106, 1.091, 1.073, 1.056, 1.042, 1.032, 1.025, 1.021, 1.021, 1.022, 1.027, 1.036, 1.047, 1.061, 1.077, 1.088,
+ 1.101, 1.085, 1.066, 1.049, 1.035, 1.026, 1.019, 1.013, 1.013, 1.015, 1.021, 1.028, 1.039, 1.052, 1.069, 1.083,
+ 1.098, 1.081, 1.059, 1.045, 1.031, 1.021, 1.013, 1.007, 1.007, 1.009, 1.014, 1.021, 1.033, 1.046, 1.063, 1.081,
+ 1.097, 1.076, 1.057, 1.041, 1.027, 1.016, 1.007, 1.004, 1.002, 1.005, 1.009, 1.017, 1.028, 1.043, 1.061, 1.077,
+ 1.096, 1.075, 1.054, 1.039, 1.025, 1.014, 1.005, 1.001, 1.001, 1.002, 1.006, 1.015, 1.027, 1.041, 1.058, 1.076,
+ 1.096, 1.074, 1.054, 1.039, 1.025, 1.013, 1.005, 1.001, 1.001, 1.001, 1.006, 1.015, 1.026, 1.041, 1.058, 1.076,
+ 1.096, 1.075, 1.056, 1.041, 1.026, 1.014, 1.007, 1.003, 1.002, 1.004, 1.008, 1.016, 1.028, 1.041, 1.059, 1.076,
+ 1.096, 1.079, 1.059, 1.044, 1.029, 1.018, 1.011, 1.007, 1.005, 1.008, 1.012, 1.019, 1.031, 1.044, 1.061, 1.077,
+ 1.101, 1.084, 1.065, 1.049, 1.035, 1.024, 1.017, 1.011, 1.011, 1.012, 1.018, 1.025, 1.036, 1.051, 1.068, 1.081,
+ 1.106, 1.092, 1.072, 1.055, 1.042, 1.033, 1.024, 1.019, 1.018, 1.019, 1.025, 1.032, 1.044, 1.058, 1.076, 1.088,
+ 1.113, 1.097, 1.079, 1.063, 1.049, 1.039, 1.031, 1.025, 1.025, 1.025, 1.031, 1.039, 1.051, 1.065, 1.083, 1.094
+ ],
+ "sigma": 0.00047,
+ "sigma_Cb": 0.00056
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2500,
+ "ccm":
+ [
+ 1.95054, -0.57435, -0.37619,
+ -0.46945, 1.86661, -0.39716,
+ 0.07977, -1.14072, 2.06095
+ ]
+ },
+ {
+ "ct": 2800,
+ "ccm":
+ [
+ 1.94104, -0.60261, -0.33844,
+ -0.43162, 1.85422, -0.42261,
+ 0.03799, -0.95022, 1.91222
+ ]
+ },
+ {
+ "ct": 2900,
+ "ccm":
+ [
+ 1.91828, -0.59569, -0.32258,
+ -0.51902, 2.09091, -0.57189,
+ -0.03324, -0.73462, 1.76785
+ ]
+ },
+ {
+ "ct": 3620,
+ "ccm":
+ [
+ 1.97199, -0.66403, -0.30797,
+ -0.46411, 2.02612, -0.56201,
+ -0.07764, -0.61178, 1.68942
+ ]
+ },
+ {
+ "ct": 4560,
+ "ccm":
+ [
+ 2.15256, -0.84787, -0.30469,
+ -0.48422, 2.28962, -0.80541,
+ -0.15113, -0.53014, 1.68127
+ ]
+ },
+ {
+ "ct": 5600,
+ "ccm":
+ [
+ 2.04576, -0.74771, -0.29805,
+ -0.36332, 1.98993, -0.62662,
+ -0.09328, -0.46543, 1.55871
+ ]
+ },
+ {
+ "ct": 7400,
+ "ccm":
+ [
+ 2.37532, -0.83069, -0.54462,
+ -0.48279, 2.84309, -1.36031,
+ -0.21178, -0.66532, 1.87709
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen":
+ {
+ "threshold": 0.1,
+ "strength": 1.0,
+ "limit": 0.18
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx296_mono.json b/src/ipa/rpi/vc4/data/imx296_mono.json
new file mode 100644
index 00000000..fe331569
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx296_mono.json
@@ -0,0 +1,240 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 3840
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 9998,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 823,
+ "reference_Y": 12396
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.753
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 0,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 4000,
+ "table":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 4000,
+ "table":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 1.308, 1.293, 1.228, 1.175, 1.139, 1.108, 1.092, 1.082, 1.082, 1.086, 1.097, 1.114, 1.149, 1.199, 1.279, 1.303,
+ 1.293, 1.249, 1.199, 1.162, 1.136, 1.109, 1.087, 1.077, 1.072, 1.081, 1.095, 1.103, 1.133, 1.172, 1.225, 1.282,
+ 1.251, 1.212, 1.186, 1.159, 1.129, 1.114, 1.102, 1.088, 1.088, 1.088, 1.095, 1.117, 1.123, 1.158, 1.198, 1.249,
+ 1.223, 1.192, 1.177, 1.163, 1.147, 1.139, 1.132, 1.112, 1.111, 1.107, 1.113, 1.118, 1.139, 1.155, 1.186, 1.232,
+ 1.207, 1.186, 1.171, 1.162, 1.168, 1.163, 1.153, 1.138, 1.129, 1.128, 1.132, 1.136, 1.149, 1.167, 1.189, 1.216,
+ 1.198, 1.186, 1.176, 1.176, 1.177, 1.185, 1.171, 1.157, 1.146, 1.144, 1.146, 1.149, 1.161, 1.181, 1.201, 1.221,
+ 1.203, 1.181, 1.176, 1.178, 1.191, 1.189, 1.188, 1.174, 1.159, 1.153, 1.158, 1.161, 1.169, 1.185, 1.211, 1.227,
+ 1.211, 1.179, 1.177, 1.187, 1.194, 1.196, 1.194, 1.187, 1.176, 1.169, 1.171, 1.171, 1.175, 1.189, 1.214, 1.226,
+ 1.219, 1.182, 1.184, 1.191, 1.195, 1.199, 1.197, 1.194, 1.188, 1.185, 1.179, 1.179, 1.182, 1.194, 1.212, 1.227,
+ 1.237, 1.192, 1.194, 1.194, 1.198, 1.199, 1.198, 1.197, 1.196, 1.193, 1.189, 1.189, 1.192, 1.203, 1.214, 1.231,
+ 1.282, 1.199, 1.199, 1.197, 1.199, 1.199, 1.192, 1.193, 1.193, 1.194, 1.196, 1.197, 1.206, 1.216, 1.228, 1.244,
+ 1.309, 1.236, 1.204, 1.203, 1.202, 1.194, 1.194, 1.188, 1.192, 1.192, 1.199, 1.201, 1.212, 1.221, 1.235, 1.247
+ ],
+ "sigma": 0.005,
+ "sigma_Cb": 0.005
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.sharpen":
+ {
+ "threshold": 0.1,
+ "strength": 1.0,
+ "limit": 0.18
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx327.json b/src/ipa/rpi/vc4/data/imx327.json
new file mode 100644
index 00000000..40a56842
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx327.json
@@ -0,0 +1,215 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "description": "This is an interim tuning only. Please consider doing a more formal tuning for your application.",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 3840
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 6813,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 890,
+ "reference_Y": 12900
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.67
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 187,
+ "slope": 0.00842
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "bayes": 0
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "speed": 0.2,
+ "metering_modes":
+ {
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ },
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 10, 30000, 60000 ],
+ "gain": [ 1.0, 2.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 10, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.16,
+ 10000, 0.16
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.7,
+ "luminance_lut":
+ [
+ 2.844, 2.349, 2.018, 1.775, 1.599, 1.466, 1.371, 1.321, 1.306, 1.316, 1.357, 1.439, 1.552, 1.705, 1.915, 2.221,
+ 2.576, 2.151, 1.851, 1.639, 1.478, 1.358, 1.272, 1.231, 1.218, 1.226, 1.262, 1.335, 1.438, 1.571, 1.766, 2.067,
+ 2.381, 2.005, 1.739, 1.545, 1.389, 1.278, 1.204, 1.166, 1.153, 1.161, 1.194, 1.263, 1.356, 1.489, 1.671, 1.943,
+ 2.242, 1.899, 1.658, 1.481, 1.329, 1.225, 1.156, 1.113, 1.096, 1.107, 1.143, 1.201, 1.289, 1.423, 1.607, 1.861,
+ 2.152, 1.831, 1.602, 1.436, 1.291, 1.193, 1.121, 1.069, 1.047, 1.062, 1.107, 1.166, 1.249, 1.384, 1.562, 1.801,
+ 2.104, 1.795, 1.572, 1.407, 1.269, 1.174, 1.099, 1.041, 1.008, 1.029, 1.083, 1.146, 1.232, 1.364, 1.547, 1.766,
+ 2.104, 1.796, 1.572, 1.403, 1.264, 1.171, 1.097, 1.036, 1.001, 1.025, 1.077, 1.142, 1.231, 1.363, 1.549, 1.766,
+ 2.148, 1.827, 1.594, 1.413, 1.276, 1.184, 1.114, 1.062, 1.033, 1.049, 1.092, 1.153, 1.242, 1.383, 1.577, 1.795,
+ 2.211, 1.881, 1.636, 1.455, 1.309, 1.214, 1.149, 1.104, 1.081, 1.089, 1.125, 1.184, 1.273, 1.423, 1.622, 1.846,
+ 2.319, 1.958, 1.698, 1.516, 1.362, 1.262, 1.203, 1.156, 1.137, 1.142, 1.171, 1.229, 1.331, 1.484, 1.682, 1.933,
+ 2.459, 2.072, 1.789, 1.594, 1.441, 1.331, 1.261, 1.219, 1.199, 1.205, 1.232, 1.301, 1.414, 1.571, 1.773, 2.052,
+ 2.645, 2.206, 1.928, 1.728, 1.559, 1.451, 1.352, 1.301, 1.282, 1.289, 1.319, 1.395, 1.519, 1.685, 1.904, 2.227
+ ],
+ "sigma": 0.005,
+ "sigma_Cb": 0.005
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 3900,
+ "ccm":
+ [
+ 1.54659, -0.17707, -0.36953,
+ -0.51471, 1.72733, -0.21262,
+ 0.06667, -0.92279, 1.85612
+ ]
+ }
+ ]
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx378.json b/src/ipa/rpi/vc4/data/imx378.json
new file mode 100644
index 00000000..363b47e1
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx378.json
@@ -0,0 +1,427 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 9999,
+ "reference_gain": 1.95,
+ "reference_aperture": 1.0,
+ "reference_lux": 1000,
+ "reference_Y": 12996
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.641
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 235,
+ "slope": 0.00902
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8100
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2850.0, 0.6361, 0.3911,
+ 3550.0, 0.5386, 0.5077,
+ 4500.0, 0.4472, 0.6171,
+ 5600.0, 0.3906, 0.6848,
+ 8000.0, 0.3412, 0.7441
+ ],
+ "sensitivity_r": 1.0,
+ "sensitivity_b": 1.0,
+ "transverse_pos": 0.01667,
+ "transverse_neg": 0.01195
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 2800,
+ "table":
+ [
+ 1.604, 1.601, 1.593, 1.581, 1.568, 1.561, 1.561, 1.561, 1.561, 1.567, 1.582, 1.596, 1.609, 1.622, 1.632, 1.636,
+ 1.601, 1.594, 1.586, 1.571, 1.555, 1.546, 1.543, 1.543, 1.547, 1.555, 1.572, 1.584, 1.599, 1.614, 1.625, 1.632,
+ 1.599, 1.586, 1.571, 1.555, 1.542, 1.528, 1.518, 1.518, 1.523, 1.537, 1.555, 1.572, 1.589, 1.607, 1.622, 1.629,
+ 1.597, 1.579, 1.561, 1.542, 1.528, 1.512, 1.493, 1.493, 1.499, 1.523, 1.537, 1.563, 1.582, 1.601, 1.619, 1.629,
+ 1.597, 1.577, 1.557, 1.535, 1.512, 1.493, 1.481, 1.479, 1.492, 1.499, 1.524, 1.555, 1.578, 1.599, 1.619, 1.629,
+ 1.597, 1.577, 1.557, 1.534, 1.508, 1.483, 1.476, 1.476, 1.481, 1.496, 1.522, 1.554, 1.578, 1.599, 1.619, 1.629,
+ 1.597, 1.578, 1.557, 1.534, 1.508, 1.483, 1.481, 1.479, 1.481, 1.496, 1.522, 1.554, 1.579, 1.601, 1.619, 1.631,
+ 1.597, 1.581, 1.562, 1.539, 1.517, 1.504, 1.483, 1.481, 1.496, 1.511, 1.531, 1.561, 1.585, 1.607, 1.623, 1.632,
+ 1.601, 1.589, 1.569, 1.554, 1.539, 1.517, 1.504, 1.504, 1.511, 1.531, 1.553, 1.573, 1.596, 1.614, 1.629, 1.636,
+ 1.609, 1.601, 1.586, 1.569, 1.554, 1.542, 1.535, 1.535, 1.541, 1.553, 1.573, 1.592, 1.608, 1.625, 1.637, 1.645,
+ 1.617, 1.611, 1.601, 1.586, 1.574, 1.565, 1.564, 1.564, 1.571, 1.579, 1.592, 1.608, 1.622, 1.637, 1.646, 1.654,
+ 1.619, 1.617, 1.611, 1.601, 1.588, 1.585, 1.585, 1.585, 1.588, 1.592, 1.607, 1.622, 1.637, 1.645, 1.654, 1.655
+ ]
+ },
+ {
+ "ct": 5500,
+ "table":
+ [
+ 2.664, 2.658, 2.645, 2.629, 2.602, 2.602, 2.602, 2.606, 2.617, 2.628, 2.649, 2.677, 2.699, 2.722, 2.736, 2.747,
+ 2.658, 2.653, 2.629, 2.605, 2.576, 2.575, 2.577, 2.592, 2.606, 2.618, 2.629, 2.651, 2.678, 2.707, 2.727, 2.741,
+ 2.649, 2.631, 2.605, 2.576, 2.563, 2.552, 2.552, 2.557, 2.577, 2.604, 2.619, 2.641, 2.669, 2.698, 2.721, 2.741,
+ 2.643, 2.613, 2.583, 2.563, 2.552, 2.531, 2.527, 2.527, 2.551, 2.577, 2.604, 2.638, 2.665, 2.694, 2.721, 2.741,
+ 2.643, 2.606, 2.575, 2.558, 2.531, 2.516, 2.504, 2.516, 2.527, 2.551, 2.596, 2.635, 2.665, 2.694, 2.721, 2.741,
+ 2.643, 2.606, 2.575, 2.558, 2.531, 2.503, 2.501, 2.502, 2.522, 2.551, 2.592, 2.635, 2.669, 2.696, 2.727, 2.744,
+ 2.648, 2.611, 2.579, 2.558, 2.532, 2.511, 2.502, 2.511, 2.522, 2.552, 2.592, 2.642, 2.673, 2.702, 2.731, 2.752,
+ 2.648, 2.619, 2.589, 2.571, 2.556, 2.532, 2.519, 2.522, 2.552, 2.568, 2.605, 2.648, 2.683, 2.715, 2.743, 2.758,
+ 2.659, 2.637, 2.613, 2.589, 2.571, 2.556, 2.555, 2.555, 2.568, 2.605, 2.641, 2.671, 2.699, 2.729, 2.758, 2.776,
+ 2.679, 2.665, 2.637, 2.613, 2.602, 2.599, 2.599, 2.606, 2.619, 2.641, 2.671, 2.698, 2.723, 2.754, 2.776, 2.787,
+ 2.695, 2.684, 2.671, 2.646, 2.636, 2.636, 2.641, 2.648, 2.661, 2.681, 2.698, 2.723, 2.751, 2.776, 2.788, 2.803,
+ 2.702, 2.699, 2.684, 2.671, 2.664, 2.664, 2.664, 2.668, 2.681, 2.698, 2.723, 2.751, 2.773, 2.788, 2.803, 2.805
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 2800,
+ "table":
+ [
+ 2.876, 2.868, 2.863, 2.851, 2.846, 2.846, 2.847, 2.851, 2.851, 2.857, 2.867, 2.875, 2.889, 2.899, 2.913, 2.926,
+ 2.863, 2.861, 2.856, 2.846, 2.846, 2.847, 2.848, 2.851, 2.857, 2.859, 2.875, 2.882, 2.886, 2.896, 2.909, 2.917,
+ 2.861, 2.856, 2.846, 2.841, 2.841, 2.855, 2.867, 2.875, 2.888, 2.888, 2.885, 2.883, 2.886, 2.889, 2.901, 2.913,
+ 2.858, 2.851, 2.846, 2.846, 2.855, 2.867, 2.884, 2.895, 2.902, 2.902, 2.901, 2.891, 2.891, 2.894, 2.901, 2.909,
+ 2.858, 2.851, 2.846, 2.846, 2.867, 2.884, 2.895, 2.902, 2.909, 2.915, 2.911, 2.901, 2.895, 2.898, 2.904, 2.909,
+ 2.858, 2.851, 2.849, 2.853, 2.874, 2.888, 2.901, 2.909, 2.917, 2.922, 2.917, 2.911, 2.901, 2.899, 2.905, 2.908,
+ 2.861, 2.855, 2.853, 2.855, 2.874, 2.888, 2.901, 2.913, 2.918, 2.922, 2.921, 2.911, 2.901, 2.901, 2.907, 2.908,
+ 2.862, 2.859, 2.855, 2.856, 2.872, 2.885, 2.899, 2.906, 2.915, 2.917, 2.911, 2.907, 2.907, 2.907, 2.908, 2.909,
+ 2.863, 2.863, 2.859, 2.864, 2.871, 2.881, 2.885, 2.899, 2.905, 2.905, 2.904, 2.904, 2.907, 2.909, 2.913, 2.913,
+ 2.866, 2.865, 2.865, 2.867, 2.868, 2.872, 2.881, 2.885, 2.889, 2.894, 2.895, 2.902, 2.906, 2.913, 2.914, 2.917,
+ 2.875, 2.875, 2.871, 2.871, 2.871, 2.871, 2.869, 2.869, 2.878, 2.889, 2.894, 2.895, 2.906, 2.914, 2.917, 2.921,
+ 2.882, 2.879, 2.876, 2.874, 2.871, 2.871, 2.869, 2.869, 2.869, 2.878, 2.891, 2.894, 2.905, 2.914, 2.919, 2.921
+ ]
+ },
+ {
+ "ct": 5500,
+ "table":
+ [
+ 1.488, 1.488, 1.488, 1.488, 1.491, 1.492, 1.492, 1.491, 1.491, 1.491, 1.492, 1.495, 1.497, 1.499, 1.499, 1.503,
+ 1.482, 1.485, 1.485, 1.487, 1.489, 1.492, 1.492, 1.492, 1.492, 1.492, 1.494, 1.494, 1.492, 1.491, 1.493, 1.494,
+ 1.482, 1.482, 1.484, 1.485, 1.487, 1.492, 1.496, 1.498, 1.499, 1.498, 1.494, 1.492, 1.491, 1.491, 1.491, 1.491,
+ 1.481, 1.481, 1.482, 1.485, 1.491, 1.496, 1.498, 1.499, 1.501, 1.499, 1.498, 1.493, 1.491, 1.488, 1.488, 1.488,
+ 1.481, 1.481, 1.481, 1.483, 1.491, 1.497, 1.498, 1.499, 1.501, 1.499, 1.498, 1.492, 1.488, 1.485, 1.483, 1.483,
+ 1.479, 1.479, 1.481, 1.482, 1.489, 1.495, 1.497, 1.498, 1.499, 1.499, 1.495, 1.492, 1.485, 1.482, 1.482, 1.481,
+ 1.479, 1.479, 1.479, 1.481, 1.489, 1.494, 1.496, 1.497, 1.497, 1.496, 1.495, 1.489, 1.482, 1.481, 1.479, 1.477,
+ 1.478, 1.478, 1.479, 1.481, 1.487, 1.491, 1.494, 1.496, 1.496, 1.495, 1.492, 1.487, 1.482, 1.479, 1.478, 1.476,
+ 1.478, 1.478, 1.479, 1.482, 1.486, 1.488, 1.491, 1.493, 1.493, 1.492, 1.487, 1.484, 1.481, 1.479, 1.476, 1.476,
+ 1.477, 1.479, 1.481, 1.483, 1.485, 1.486, 1.488, 1.488, 1.487, 1.487, 1.484, 1.483, 1.481, 1.479, 1.476, 1.476,
+ 1.477, 1.479, 1.482, 1.483, 1.484, 1.485, 1.484, 1.482, 1.482, 1.484, 1.483, 1.482, 1.481, 1.479, 1.477, 1.476,
+ 1.477, 1.479, 1.482, 1.483, 1.484, 1.484, 1.482, 1.482, 1.482, 1.482, 1.482, 1.481, 1.479, 1.479, 1.479, 1.479
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 2.764, 2.654, 2.321, 2.043, 1.768, 1.594, 1.558, 1.558, 1.558, 1.568, 1.661, 1.904, 2.193, 2.497, 2.888, 3.043,
+ 2.654, 2.373, 2.049, 1.819, 1.569, 1.446, 1.381, 1.356, 1.356, 1.403, 1.501, 1.679, 1.939, 2.218, 2.586, 2.888,
+ 2.376, 2.154, 1.819, 1.569, 1.438, 1.301, 1.246, 1.224, 1.224, 1.263, 1.349, 1.501, 1.679, 1.985, 2.359, 2.609,
+ 2.267, 1.987, 1.662, 1.438, 1.301, 1.235, 1.132, 1.105, 1.105, 1.164, 1.263, 1.349, 1.528, 1.808, 2.184, 2.491,
+ 2.218, 1.876, 1.568, 1.367, 1.235, 1.132, 1.087, 1.022, 1.023, 1.104, 1.164, 1.278, 1.439, 1.695, 2.066, 2.429,
+ 2.218, 1.832, 1.533, 1.341, 1.206, 1.089, 1.013, 1.002, 1.013, 1.026, 1.122, 1.246, 1.399, 1.642, 2.004, 2.426,
+ 2.218, 1.832, 1.533, 1.341, 1.206, 1.089, 1.011, 1.001, 1.009, 1.026, 1.122, 1.246, 1.399, 1.642, 2.004, 2.426,
+ 2.224, 1.896, 1.584, 1.382, 1.248, 1.147, 1.088, 1.016, 1.026, 1.118, 1.168, 1.283, 1.444, 1.697, 2.066, 2.428,
+ 2.292, 2.019, 1.689, 1.462, 1.322, 1.247, 1.147, 1.118, 1.118, 1.168, 1.275, 1.358, 1.532, 1.809, 2.189, 2.491,
+ 2.444, 2.204, 1.856, 1.606, 1.462, 1.322, 1.257, 1.234, 1.234, 1.275, 1.358, 1.516, 1.686, 1.993, 2.371, 2.622,
+ 2.748, 2.444, 2.108, 1.856, 1.606, 1.476, 1.399, 1.376, 1.376, 1.422, 1.516, 1.686, 1.968, 2.238, 2.611, 2.935,
+ 2.862, 2.748, 2.395, 2.099, 1.811, 1.621, 1.582, 1.582, 1.582, 1.592, 1.677, 1.919, 2.223, 2.534, 2.935, 3.078
+ ],
+ "sigma": 0.00428,
+ "sigma_Cb": 0.00363
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2850,
+ "ccm":
+ [
+ 1.42601, -0.20537, -0.22063,
+ -0.47682, 1.81987, -0.34305,
+ 0.01854, -0.86036, 1.84181
+ ]
+ },
+ {
+ "ct": 2900,
+ "ccm":
+ [
+ 1.29755, 0.04602, -0.34356,
+ -0.41491, 1.73477, -0.31987,
+ -0.01345, -0.97115, 1.98459
+ ]
+ },
+ {
+ "ct": 3550,
+ "ccm":
+ [
+ 1.49811, -0.33412, -0.16398,
+ -0.40869, 1.72995, -0.32127,
+ -0.01924, -0.62181, 1.64105
+ ]
+ },
+ {
+ "ct": 4500,
+ "ccm":
+ [
+ 1.47015, -0.29229, -0.17786,
+ -0.36561, 1.88919, -0.52358,
+ -0.03552, -0.56717, 1.60269
+ ]
+ },
+ {
+ "ct": 5600,
+ "ccm":
+ [
+ 1.60962, -0.47434, -0.13528,
+ -0.32701, 1.73797, -0.41096,
+ -0.07626, -0.40171, 1.47796
+ ]
+ },
+ {
+ "ct": 8000,
+ "ccm":
+ [
+ 1.54642, -0.20396, -0.34246,
+ -0.31748, 2.22559, -0.90811,
+ -0.10035, -0.65877, 1.75912
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx415.json b/src/ipa/rpi/vc4/data/imx415.json
new file mode 100755
index 00000000..6ed16b17
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx415.json
@@ -0,0 +1,413 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 3840
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 19230,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 1198,
+ "reference_Y": 14876
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 17,
+ "reference_slope": 3.439
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 193,
+ "slope": 0.00902
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2698.0, 0.7681, 0.2026,
+ 2930.0, 0.7515, 0.2116,
+ 3643.0, 0.6355, 0.2858,
+ 4605.0, 0.4992, 0.4041,
+ 5658.0, 0.4498, 0.4574
+ ],
+ "sensitivity_r": 1.0,
+ "sensitivity_b": 1.0,
+ "transverse_pos": 0.0112,
+ "transverse_neg": 0.01424
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.8,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.025, 1.016, 1.013, 1.011, 1.008, 1.005, 1.003, 1.001, 1.003, 1.005, 1.008, 1.011, 1.014, 1.019, 1.027, 1.035,
+ 1.025, 1.017, 1.013, 1.011, 1.008, 1.005, 1.003, 1.003, 1.004, 1.005, 1.009, 1.012, 1.017, 1.023, 1.029, 1.035,
+ 1.022, 1.017, 1.013, 1.009, 1.007, 1.005, 1.003, 1.003, 1.004, 1.006, 1.009, 1.012, 1.017, 1.023, 1.029, 1.035,
+ 1.019, 1.015, 1.011, 1.007, 1.005, 1.003, 1.001, 1.001, 1.003, 1.004, 1.007, 1.009, 1.015, 1.022, 1.028, 1.035,
+ 1.018, 1.014, 1.009, 1.006, 1.004, 1.002, 1.001, 1.001, 1.001, 1.003, 1.006, 1.009, 1.015, 1.021, 1.028, 1.035,
+ 1.018, 1.013, 1.011, 1.006, 1.003, 1.002, 1.001, 1.001, 1.001, 1.003, 1.006, 1.009, 1.015, 1.022, 1.028, 1.036,
+ 1.018, 1.014, 1.011, 1.007, 1.004, 1.002, 1.001, 1.001, 1.001, 1.004, 1.007, 1.009, 1.015, 1.023, 1.029, 1.036,
+ 1.019, 1.014, 1.012, 1.008, 1.005, 1.003, 1.002, 1.001, 1.003, 1.005, 1.008, 1.012, 1.016, 1.024, 1.031, 1.037,
+ 1.021, 1.016, 1.013, 1.009, 1.008, 1.005, 1.003, 1.003, 1.005, 1.008, 1.011, 1.014, 1.019, 1.026, 1.033, 1.039,
+ 1.025, 1.021, 1.016, 1.013, 1.009, 1.008, 1.006, 1.006, 1.008, 1.011, 1.014, 1.019, 1.024, 1.031, 1.038, 1.046,
+ 1.029, 1.025, 1.021, 1.018, 1.014, 1.013, 1.011, 1.011, 1.012, 1.015, 1.019, 1.023, 1.028, 1.035, 1.046, 1.051,
+ 1.032, 1.029, 1.023, 1.021, 1.018, 1.015, 1.014, 1.014, 1.015, 1.018, 1.022, 1.027, 1.033, 1.041, 1.051, 1.054
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.025, 1.011, 1.009, 1.005, 1.004, 1.003, 1.001, 1.001, 1.002, 1.006, 1.009, 1.012, 1.016, 1.021, 1.031, 1.041,
+ 1.025, 1.014, 1.009, 1.007, 1.005, 1.004, 1.003, 1.003, 1.004, 1.007, 1.009, 1.013, 1.021, 1.028, 1.037, 1.041,
+ 1.023, 1.014, 1.009, 1.007, 1.005, 1.004, 1.003, 1.003, 1.005, 1.007, 1.011, 1.014, 1.021, 1.028, 1.037, 1.048,
+ 1.022, 1.012, 1.007, 1.005, 1.002, 1.001, 1.001, 1.001, 1.003, 1.005, 1.009, 1.014, 1.019, 1.028, 1.039, 1.048,
+ 1.022, 1.011, 1.006, 1.003, 1.001, 1.001, 1.001, 1.001, 1.002, 1.005, 1.009, 1.014, 1.021, 1.029, 1.039, 1.051,
+ 1.022, 1.012, 1.007, 1.003, 1.002, 1.001, 1.001, 1.001, 1.002, 1.005, 1.009, 1.015, 1.021, 1.031, 1.041, 1.053,
+ 1.023, 1.013, 1.009, 1.005, 1.003, 1.003, 1.001, 1.002, 1.004, 1.006, 1.011, 1.015, 1.022, 1.031, 1.042, 1.056,
+ 1.024, 1.015, 1.012, 1.008, 1.005, 1.004, 1.004, 1.004, 1.006, 1.009, 1.013, 1.018, 1.024, 1.034, 1.045, 1.057,
+ 1.027, 1.017, 1.015, 1.012, 1.009, 1.007, 1.007, 1.008, 1.009, 1.013, 1.018, 1.023, 1.029, 1.038, 1.051, 1.061,
+ 1.029, 1.023, 1.017, 1.015, 1.014, 1.012, 1.011, 1.011, 1.014, 1.018, 1.024, 1.029, 1.036, 1.044, 1.056, 1.066,
+ 1.034, 1.028, 1.023, 1.022, 1.019, 1.019, 1.018, 1.018, 1.021, 1.025, 1.031, 1.035, 1.042, 1.053, 1.066, 1.074,
+ 1.041, 1.034, 1.027, 1.025, 1.025, 1.023, 1.023, 1.023, 1.025, 1.031, 1.035, 1.041, 1.049, 1.059, 1.074, 1.079
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.001, 1.001, 1.007, 1.015, 1.027, 1.034, 1.038, 1.041, 1.042, 1.043, 1.043, 1.043, 1.041, 1.039, 1.049, 1.054,
+ 1.011, 1.011, 1.013, 1.023, 1.032, 1.039, 1.044, 1.047, 1.052, 1.056, 1.059, 1.059, 1.055, 1.051, 1.054, 1.056,
+ 1.015, 1.015, 1.019, 1.032, 1.039, 1.044, 1.047, 1.052, 1.055, 1.059, 1.061, 1.066, 1.063, 1.058, 1.061, 1.064,
+ 1.016, 1.017, 1.023, 1.032, 1.041, 1.045, 1.048, 1.053, 1.056, 1.061, 1.066, 1.069, 1.067, 1.064, 1.065, 1.068,
+ 1.018, 1.019, 1.025, 1.033, 1.042, 1.045, 1.049, 1.054, 1.058, 1.063, 1.071, 1.072, 1.071, 1.068, 1.069, 1.071,
+ 1.023, 1.024, 1.029, 1.035, 1.043, 1.048, 1.052, 1.057, 1.061, 1.065, 1.074, 1.075, 1.075, 1.072, 1.072, 1.075,
+ 1.027, 1.028, 1.031, 1.038, 1.045, 1.051, 1.054, 1.059, 1.064, 1.068, 1.075, 1.079, 1.078, 1.075, 1.076, 1.081,
+ 1.029, 1.031, 1.033, 1.044, 1.048, 1.054, 1.059, 1.064, 1.067, 1.073, 1.079, 1.082, 1.082, 1.079, 1.081, 1.085,
+ 1.033, 1.033, 1.035, 1.047, 1.053, 1.058, 1.064, 1.067, 1.073, 1.079, 1.084, 1.086, 1.086, 1.084, 1.089, 1.091,
+ 1.037, 1.037, 1.038, 1.049, 1.057, 1.062, 1.068, 1.073, 1.079, 1.084, 1.089, 1.092, 1.092, 1.092, 1.096, 1.104,
+ 1.041, 1.041, 1.043, 1.051, 1.061, 1.068, 1.073, 1.079, 1.083, 1.089, 1.092, 1.094, 1.097, 1.099, 1.105, 1.115,
+ 1.048, 1.044, 1.044, 1.051, 1.063, 1.071, 1.076, 1.082, 1.088, 1.091, 1.094, 1.097, 1.099, 1.104, 1.115, 1.126
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.001, 1.001, 1.005, 1.011, 1.014, 1.018, 1.019, 1.019, 1.019, 1.021, 1.021, 1.021, 1.019, 1.017, 1.014, 1.014,
+ 1.009, 1.009, 1.011, 1.014, 1.019, 1.024, 1.026, 1.029, 1.031, 1.032, 1.032, 1.031, 1.027, 1.023, 1.022, 1.022,
+ 1.011, 1.012, 1.015, 1.018, 1.024, 1.026, 1.029, 1.032, 1.035, 1.036, 1.036, 1.034, 1.031, 1.027, 1.025, 1.025,
+ 1.012, 1.013, 1.015, 1.019, 1.025, 1.029, 1.032, 1.035, 1.036, 1.038, 1.038, 1.036, 1.034, 1.029, 1.026, 1.026,
+ 1.013, 1.014, 1.016, 1.019, 1.027, 1.031, 1.034, 1.037, 1.039, 1.039, 1.041, 1.039, 1.036, 1.031, 1.028, 1.027,
+ 1.014, 1.014, 1.017, 1.021, 1.027, 1.033, 1.037, 1.039, 1.041, 1.041, 1.042, 1.042, 1.039, 1.033, 1.029, 1.028,
+ 1.015, 1.015, 1.018, 1.021, 1.027, 1.033, 1.037, 1.041, 1.041, 1.042, 1.042, 1.042, 1.039, 1.034, 1.029, 1.029,
+ 1.015, 1.016, 1.018, 1.022, 1.027, 1.033, 1.037, 1.041, 1.041, 1.042, 1.043, 1.043, 1.041, 1.035, 1.031, 1.031,
+ 1.015, 1.016, 1.018, 1.022, 1.027, 1.032, 1.037, 1.041, 1.042, 1.042, 1.044, 1.043, 1.041, 1.036, 1.034, 1.033,
+ 1.016, 1.017, 1.017, 1.022, 1.027, 1.032, 1.036, 1.039, 1.042, 1.042, 1.043, 1.043, 1.041, 1.039, 1.036, 1.034,
+ 1.017, 1.017, 1.018, 1.022, 1.027, 1.031, 1.035, 1.039, 1.041, 1.042, 1.042, 1.042, 1.042, 1.039, 1.039, 1.039,
+ 1.018, 1.017, 1.017, 1.021, 1.027, 1.031, 1.033, 1.038, 1.041, 1.041, 1.042, 1.042, 1.041, 1.041, 1.041, 1.041
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 2.102, 1.903, 1.658, 1.483, 1.358, 1.267, 1.202, 1.202, 1.202, 1.242, 1.323, 1.431, 1.585, 1.797, 2.096, 2.351,
+ 1.996, 1.776, 1.549, 1.385, 1.273, 1.204, 1.138, 1.133, 1.133, 1.185, 1.252, 1.343, 1.484, 1.679, 1.954, 2.228,
+ 1.923, 1.689, 1.474, 1.318, 1.204, 1.138, 1.079, 1.071, 1.071, 1.133, 1.185, 1.284, 1.415, 1.597, 1.854, 2.146,
+ 1.881, 1.631, 1.423, 1.272, 1.159, 1.079, 1.051, 1.026, 1.046, 1.071, 1.144, 1.245, 1.369, 1.543, 1.801, 2.095,
+ 1.867, 1.595, 1.391, 1.242, 1.131, 1.051, 1.013, 1.002, 1.013, 1.046, 1.121, 1.219, 1.343, 1.511, 1.752, 2.079,
+ 1.867, 1.589, 1.385, 1.236, 1.125, 1.048, 1.001, 1.001, 1.003, 1.045, 1.118, 1.217, 1.342, 1.511, 1.746, 2.079,
+ 1.867, 1.589, 1.385, 1.236, 1.125, 1.048, 1.011, 1.003, 1.011, 1.046, 1.118, 1.217, 1.343, 1.511, 1.746, 2.079,
+ 1.884, 1.621, 1.411, 1.261, 1.149, 1.071, 1.048, 1.024, 1.046, 1.069, 1.141, 1.239, 1.369, 1.541, 1.781, 2.093,
+ 1.913, 1.675, 1.459, 1.304, 1.191, 1.125, 1.071, 1.065, 1.069, 1.124, 1.181, 1.278, 1.413, 1.592, 1.842, 2.133,
+ 1.981, 1.755, 1.529, 1.368, 1.251, 1.191, 1.125, 1.124, 1.124, 1.181, 1.242, 1.337, 1.479, 1.669, 1.935, 2.207,
+ 2.078, 1.867, 1.625, 1.453, 1.344, 1.251, 1.202, 1.201, 1.201, 1.242, 1.333, 1.418, 1.571, 1.776, 2.063, 2.321,
+ 2.217, 2.011, 1.747, 1.562, 1.431, 1.331, 1.278, 1.278, 1.278, 1.313, 1.407, 1.523, 1.686, 1.911, 2.226, 2.484
+ ],
+ "sigma": 0.00135,
+ "sigma_Cb": 0.00279
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2698,
+ "ccm":
+ [
+ 1.57227, -0.32596, -0.24631,
+ -0.61264, 1.70791, -0.09526,
+ -0.43254, 0.48489, 0.94765
+ ]
+ },
+ {
+ "ct": 2930,
+ "ccm":
+ [
+ 1.69455, -0.52724, -0.16731,
+ -0.67131, 1.78468, -0.11338,
+ -0.41609, 0.54693, 0.86916
+ ]
+ },
+ {
+ "ct": 3643,
+ "ccm":
+ [
+ 1.74041, -0.77553, 0.03512,
+ -0.44073, 1.34131, 0.09943,
+ -0.11035, -0.93919, 2.04954
+ ]
+ },
+ {
+ "ct": 4605,
+ "ccm":
+ [
+ 1.49865, -0.41638, -0.08227,
+ -0.39445, 1.70114, -0.30669,
+ 0.01319, -0.88009, 1.86689
+ ]
+ },
+ {
+ "ct": 5658,
+ "ccm":
+ [
+ 1.38601, -0.23128, -0.15472,
+ -0.37641, 1.70444, -0.32803,
+ -0.01575, -0.71466, 1.73041
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ }
+ ]
+}
diff --git a/src/ipa/rpi/vc4/data/imx462.json b/src/ipa/rpi/vc4/data/imx462.json
new file mode 100644
index 00000000..40a56842
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx462.json
@@ -0,0 +1,215 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "description": "This is an interim tuning only. Please consider doing a more formal tuning for your application.",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 3840
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 6813,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 890,
+ "reference_Y": 12900
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.67
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 187,
+ "slope": 0.00842
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "bayes": 0
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "speed": 0.2,
+ "metering_modes":
+ {
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ },
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 10, 30000, 60000 ],
+ "gain": [ 1.0, 2.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 10, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.16,
+ 10000, 0.16
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.7,
+ "luminance_lut":
+ [
+ 2.844, 2.349, 2.018, 1.775, 1.599, 1.466, 1.371, 1.321, 1.306, 1.316, 1.357, 1.439, 1.552, 1.705, 1.915, 2.221,
+ 2.576, 2.151, 1.851, 1.639, 1.478, 1.358, 1.272, 1.231, 1.218, 1.226, 1.262, 1.335, 1.438, 1.571, 1.766, 2.067,
+ 2.381, 2.005, 1.739, 1.545, 1.389, 1.278, 1.204, 1.166, 1.153, 1.161, 1.194, 1.263, 1.356, 1.489, 1.671, 1.943,
+ 2.242, 1.899, 1.658, 1.481, 1.329, 1.225, 1.156, 1.113, 1.096, 1.107, 1.143, 1.201, 1.289, 1.423, 1.607, 1.861,
+ 2.152, 1.831, 1.602, 1.436, 1.291, 1.193, 1.121, 1.069, 1.047, 1.062, 1.107, 1.166, 1.249, 1.384, 1.562, 1.801,
+ 2.104, 1.795, 1.572, 1.407, 1.269, 1.174, 1.099, 1.041, 1.008, 1.029, 1.083, 1.146, 1.232, 1.364, 1.547, 1.766,
+ 2.104, 1.796, 1.572, 1.403, 1.264, 1.171, 1.097, 1.036, 1.001, 1.025, 1.077, 1.142, 1.231, 1.363, 1.549, 1.766,
+ 2.148, 1.827, 1.594, 1.413, 1.276, 1.184, 1.114, 1.062, 1.033, 1.049, 1.092, 1.153, 1.242, 1.383, 1.577, 1.795,
+ 2.211, 1.881, 1.636, 1.455, 1.309, 1.214, 1.149, 1.104, 1.081, 1.089, 1.125, 1.184, 1.273, 1.423, 1.622, 1.846,
+ 2.319, 1.958, 1.698, 1.516, 1.362, 1.262, 1.203, 1.156, 1.137, 1.142, 1.171, 1.229, 1.331, 1.484, 1.682, 1.933,
+ 2.459, 2.072, 1.789, 1.594, 1.441, 1.331, 1.261, 1.219, 1.199, 1.205, 1.232, 1.301, 1.414, 1.571, 1.773, 2.052,
+ 2.645, 2.206, 1.928, 1.728, 1.559, 1.451, 1.352, 1.301, 1.282, 1.289, 1.319, 1.395, 1.519, 1.685, 1.904, 2.227
+ ],
+ "sigma": 0.005,
+ "sigma_Cb": 0.005
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 3900,
+ "ccm":
+ [
+ 1.54659, -0.17707, -0.36953,
+ -0.51471, 1.72733, -0.21262,
+ 0.06667, -0.92279, 1.85612
+ ]
+ }
+ ]
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx477.json b/src/ipa/rpi/vc4/data/imx477.json
new file mode 100644
index 00000000..fa25ee86
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx477.json
@@ -0,0 +1,700 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 27242,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 830,
+ "reference_Y": 17755
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.767
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 204,
+ "slope": 0.01078
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2360.0, 0.6009, 0.3093,
+ 2848.0, 0.5071, 0.4,
+ 2903.0, 0.4905, 0.4392,
+ 3628.0, 0.4261, 0.5564,
+ 3643.0, 0.4228, 0.5623,
+ 4660.0, 0.3529, 0.68,
+ 5579.0, 0.3227, 0.7,
+ 6125.0, 0.3129, 0.71,
+ 6671.0, 0.3065, 0.72,
+ 7217.0, 0.3014, 0.73,
+ 7763.0, 0.295, 0.74,
+ 9505.0, 0.2524, 0.7856
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.0238,
+ "transverse_neg": 0.04429
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 2960,
+ "table":
+ [
+ 2.088, 2.086, 2.082, 2.081, 2.077, 2.071, 2.068, 2.068, 2.072, 2.073, 2.075, 2.078, 2.084, 2.092, 2.095, 2.098,
+ 2.086, 2.084, 2.079, 2.078, 2.075, 2.068, 2.064, 2.063, 2.068, 2.071, 2.072, 2.075, 2.081, 2.089, 2.092, 2.094,
+ 2.083, 2.081, 2.077, 2.072, 2.069, 2.062, 2.059, 2.059, 2.063, 2.067, 2.069, 2.072, 2.079, 2.088, 2.089, 2.089,
+ 2.081, 2.077, 2.072, 2.068, 2.065, 2.058, 2.055, 2.054, 2.057, 2.062, 2.066, 2.069, 2.077, 2.084, 2.086, 2.086,
+ 2.078, 2.075, 2.069, 2.065, 2.061, 2.055, 2.052, 2.049, 2.051, 2.056, 2.062, 2.065, 2.072, 2.079, 2.081, 2.079,
+ 2.079, 2.075, 2.069, 2.064, 2.061, 2.053, 2.049, 2.046, 2.049, 2.051, 2.057, 2.062, 2.069, 2.075, 2.077, 2.075,
+ 2.082, 2.079, 2.072, 2.065, 2.061, 2.054, 2.049, 2.047, 2.049, 2.051, 2.056, 2.061, 2.066, 2.073, 2.073, 2.069,
+ 2.086, 2.082, 2.075, 2.068, 2.062, 2.054, 2.051, 2.049, 2.051, 2.052, 2.056, 2.061, 2.066, 2.073, 2.073, 2.072,
+ 2.088, 2.086, 2.079, 2.074, 2.066, 2.057, 2.051, 2.051, 2.054, 2.055, 2.056, 2.061, 2.067, 2.072, 2.073, 2.072,
+ 2.091, 2.087, 2.079, 2.075, 2.068, 2.057, 2.052, 2.052, 2.056, 2.055, 2.055, 2.059, 2.066, 2.072, 2.072, 2.072,
+ 2.093, 2.088, 2.081, 2.077, 2.069, 2.059, 2.054, 2.054, 2.057, 2.056, 2.056, 2.058, 2.066, 2.072, 2.073, 2.073,
+ 2.095, 2.091, 2.084, 2.078, 2.075, 2.067, 2.057, 2.057, 2.059, 2.059, 2.058, 2.059, 2.068, 2.073, 2.075, 2.078
+ ]
+ },
+ {
+ "ct": 4850,
+ "table":
+ [
+ 2.973, 2.968, 2.956, 2.943, 2.941, 2.932, 2.923, 2.921, 2.924, 2.929, 2.931, 2.939, 2.953, 2.965, 2.966, 2.976,
+ 2.969, 2.962, 2.951, 2.941, 2.934, 2.928, 2.919, 2.918, 2.919, 2.923, 2.927, 2.933, 2.945, 2.957, 2.962, 2.962,
+ 2.964, 2.956, 2.944, 2.932, 2.929, 2.924, 2.915, 2.914, 2.915, 2.919, 2.924, 2.928, 2.941, 2.952, 2.958, 2.959,
+ 2.957, 2.951, 2.939, 2.928, 2.924, 2.919, 2.913, 2.911, 2.911, 2.915, 2.919, 2.925, 2.936, 2.947, 2.952, 2.953,
+ 2.954, 2.947, 2.935, 2.924, 2.919, 2.915, 2.908, 2.906, 2.906, 2.907, 2.914, 2.921, 2.932, 2.941, 2.943, 2.942,
+ 2.953, 2.946, 2.932, 2.921, 2.916, 2.911, 2.904, 2.902, 2.901, 2.904, 2.909, 2.919, 2.926, 2.937, 2.939, 2.939,
+ 2.953, 2.947, 2.932, 2.918, 2.915, 2.909, 2.903, 2.901, 2.901, 2.906, 2.911, 2.918, 2.924, 2.936, 2.936, 2.932,
+ 2.956, 2.948, 2.934, 2.919, 2.916, 2.908, 2.903, 2.901, 2.902, 2.907, 2.909, 2.917, 2.926, 2.936, 2.939, 2.939,
+ 2.957, 2.951, 2.936, 2.923, 2.917, 2.907, 2.904, 2.901, 2.902, 2.908, 2.911, 2.919, 2.929, 2.939, 2.942, 2.942,
+ 2.961, 2.951, 2.936, 2.922, 2.918, 2.906, 2.904, 2.901, 2.901, 2.907, 2.911, 2.921, 2.931, 2.941, 2.942, 2.944,
+ 2.964, 2.954, 2.936, 2.924, 2.918, 2.909, 2.905, 2.905, 2.905, 2.907, 2.912, 2.923, 2.933, 2.942, 2.944, 2.944,
+ 2.964, 2.958, 2.943, 2.927, 2.921, 2.914, 2.909, 2.907, 2.907, 2.912, 2.916, 2.928, 2.936, 2.944, 2.947, 2.952
+ ]
+ },
+ {
+ "ct": 5930,
+ "table":
+ [
+ 3.312, 3.308, 3.301, 3.294, 3.288, 3.277, 3.268, 3.261, 3.259, 3.261, 3.267, 3.273, 3.285, 3.301, 3.303, 3.312,
+ 3.308, 3.304, 3.294, 3.291, 3.283, 3.271, 3.263, 3.259, 3.257, 3.258, 3.261, 3.268, 3.278, 3.293, 3.299, 3.299,
+ 3.302, 3.296, 3.288, 3.282, 3.276, 3.267, 3.259, 3.254, 3.252, 3.253, 3.256, 3.261, 3.273, 3.289, 3.292, 3.292,
+ 3.296, 3.289, 3.282, 3.276, 3.269, 3.263, 3.256, 3.251, 3.248, 3.249, 3.251, 3.257, 3.268, 3.279, 3.284, 3.284,
+ 3.292, 3.285, 3.279, 3.271, 3.264, 3.257, 3.249, 3.243, 3.241, 3.241, 3.246, 3.252, 3.261, 3.274, 3.275, 3.273,
+ 3.291, 3.285, 3.276, 3.268, 3.259, 3.251, 3.242, 3.239, 3.236, 3.238, 3.244, 3.248, 3.258, 3.268, 3.269, 3.265,
+ 3.294, 3.288, 3.275, 3.266, 3.257, 3.248, 3.239, 3.238, 3.237, 3.238, 3.243, 3.246, 3.255, 3.264, 3.264, 3.257,
+ 3.297, 3.293, 3.279, 3.268, 3.258, 3.249, 3.238, 3.237, 3.239, 3.239, 3.243, 3.245, 3.255, 3.264, 3.264, 3.263,
+ 3.301, 3.295, 3.281, 3.271, 3.259, 3.248, 3.237, 3.237, 3.239, 3.241, 3.243, 3.246, 3.257, 3.265, 3.266, 3.264,
+ 3.306, 3.295, 3.279, 3.271, 3.261, 3.247, 3.235, 3.234, 3.239, 3.239, 3.243, 3.247, 3.258, 3.265, 3.265, 3.264,
+ 3.308, 3.297, 3.279, 3.272, 3.261, 3.249, 3.239, 3.239, 3.241, 3.243, 3.245, 3.248, 3.261, 3.265, 3.266, 3.265,
+ 3.309, 3.301, 3.286, 3.276, 3.267, 3.256, 3.246, 3.242, 3.244, 3.244, 3.249, 3.253, 3.263, 3.267, 3.271, 3.274
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 2960,
+ "table":
+ [
+ 2.133, 2.134, 2.139, 2.143, 2.148, 2.155, 2.158, 2.158, 2.158, 2.161, 2.161, 2.162, 2.159, 2.156, 2.152, 2.151,
+ 2.132, 2.133, 2.135, 2.142, 2.147, 2.153, 2.158, 2.158, 2.158, 2.158, 2.159, 2.159, 2.157, 2.154, 2.151, 2.148,
+ 2.133, 2.133, 2.135, 2.142, 2.149, 2.154, 2.158, 2.158, 2.157, 2.156, 2.158, 2.157, 2.155, 2.153, 2.148, 2.146,
+ 2.133, 2.133, 2.138, 2.145, 2.149, 2.154, 2.158, 2.159, 2.158, 2.155, 2.157, 2.156, 2.153, 2.149, 2.146, 2.144,
+ 2.133, 2.134, 2.139, 2.146, 2.149, 2.154, 2.158, 2.159, 2.159, 2.156, 2.154, 2.154, 2.149, 2.145, 2.143, 2.139,
+ 2.135, 2.135, 2.139, 2.146, 2.151, 2.155, 2.158, 2.159, 2.158, 2.156, 2.153, 2.151, 2.146, 2.143, 2.139, 2.136,
+ 2.135, 2.135, 2.138, 2.145, 2.151, 2.154, 2.157, 2.158, 2.157, 2.156, 2.153, 2.151, 2.147, 2.143, 2.141, 2.137,
+ 2.135, 2.134, 2.135, 2.141, 2.149, 2.154, 2.157, 2.157, 2.157, 2.157, 2.157, 2.153, 2.149, 2.146, 2.142, 2.139,
+ 2.132, 2.133, 2.135, 2.139, 2.148, 2.153, 2.158, 2.159, 2.159, 2.161, 2.161, 2.157, 2.154, 2.149, 2.144, 2.141,
+ 2.132, 2.133, 2.135, 2.141, 2.149, 2.155, 2.161, 2.161, 2.162, 2.162, 2.163, 2.159, 2.154, 2.149, 2.144, 2.138,
+ 2.136, 2.136, 2.137, 2.143, 2.149, 2.156, 2.162, 2.163, 2.162, 2.163, 2.164, 2.161, 2.157, 2.152, 2.146, 2.138,
+ 2.137, 2.137, 2.141, 2.147, 2.152, 2.157, 2.162, 2.162, 2.159, 2.161, 2.162, 2.162, 2.157, 2.152, 2.148, 2.148
+ ]
+ },
+ {
+ "ct": 4850,
+ "table":
+ [
+ 1.463, 1.464, 1.471, 1.478, 1.479, 1.483, 1.484, 1.486, 1.486, 1.484, 1.483, 1.481, 1.478, 1.475, 1.471, 1.468,
+ 1.463, 1.463, 1.468, 1.476, 1.479, 1.482, 1.484, 1.487, 1.486, 1.484, 1.483, 1.482, 1.478, 1.473, 1.469, 1.468,
+ 1.463, 1.464, 1.468, 1.476, 1.479, 1.483, 1.484, 1.486, 1.486, 1.485, 1.484, 1.482, 1.477, 1.473, 1.469, 1.468,
+ 1.463, 1.464, 1.469, 1.477, 1.481, 1.483, 1.485, 1.487, 1.487, 1.485, 1.485, 1.482, 1.478, 1.474, 1.469, 1.468,
+ 1.465, 1.465, 1.471, 1.478, 1.481, 1.484, 1.486, 1.488, 1.488, 1.487, 1.485, 1.482, 1.477, 1.472, 1.468, 1.467,
+ 1.465, 1.466, 1.472, 1.479, 1.482, 1.485, 1.486, 1.488, 1.488, 1.486, 1.484, 1.479, 1.475, 1.472, 1.468, 1.466,
+ 1.466, 1.466, 1.472, 1.478, 1.482, 1.484, 1.485, 1.488, 1.487, 1.485, 1.483, 1.479, 1.475, 1.472, 1.469, 1.468,
+ 1.465, 1.466, 1.469, 1.476, 1.481, 1.485, 1.485, 1.486, 1.486, 1.485, 1.483, 1.479, 1.477, 1.474, 1.471, 1.469,
+ 1.464, 1.465, 1.469, 1.476, 1.481, 1.484, 1.485, 1.487, 1.487, 1.486, 1.485, 1.481, 1.478, 1.475, 1.471, 1.469,
+ 1.463, 1.464, 1.469, 1.477, 1.481, 1.485, 1.485, 1.488, 1.488, 1.487, 1.486, 1.481, 1.478, 1.475, 1.471, 1.468,
+ 1.464, 1.465, 1.471, 1.478, 1.482, 1.486, 1.486, 1.488, 1.488, 1.487, 1.486, 1.481, 1.478, 1.475, 1.472, 1.468,
+ 1.465, 1.466, 1.472, 1.481, 1.483, 1.487, 1.487, 1.488, 1.488, 1.486, 1.485, 1.481, 1.479, 1.476, 1.473, 1.472
+ ]
+ },
+ {
+ "ct": 5930,
+ "table":
+ [
+ 1.443, 1.444, 1.448, 1.453, 1.459, 1.463, 1.465, 1.467, 1.469, 1.469, 1.467, 1.466, 1.462, 1.457, 1.454, 1.451,
+ 1.443, 1.444, 1.445, 1.451, 1.459, 1.463, 1.465, 1.467, 1.469, 1.469, 1.467, 1.465, 1.461, 1.456, 1.452, 1.451,
+ 1.444, 1.444, 1.445, 1.451, 1.459, 1.463, 1.466, 1.468, 1.469, 1.469, 1.467, 1.465, 1.461, 1.456, 1.452, 1.449,
+ 1.444, 1.444, 1.447, 1.452, 1.459, 1.464, 1.467, 1.469, 1.471, 1.469, 1.467, 1.466, 1.461, 1.456, 1.452, 1.449,
+ 1.444, 1.445, 1.448, 1.452, 1.459, 1.465, 1.469, 1.471, 1.471, 1.471, 1.468, 1.465, 1.461, 1.455, 1.451, 1.449,
+ 1.445, 1.446, 1.449, 1.453, 1.461, 1.466, 1.469, 1.471, 1.472, 1.469, 1.467, 1.465, 1.459, 1.455, 1.451, 1.447,
+ 1.446, 1.446, 1.449, 1.453, 1.461, 1.466, 1.469, 1.469, 1.469, 1.469, 1.467, 1.465, 1.459, 1.455, 1.452, 1.449,
+ 1.446, 1.446, 1.447, 1.451, 1.459, 1.466, 1.469, 1.469, 1.469, 1.469, 1.467, 1.465, 1.461, 1.457, 1.454, 1.451,
+ 1.444, 1.444, 1.447, 1.451, 1.459, 1.466, 1.469, 1.469, 1.471, 1.471, 1.468, 1.466, 1.462, 1.458, 1.454, 1.452,
+ 1.444, 1.444, 1.448, 1.453, 1.459, 1.466, 1.469, 1.471, 1.472, 1.472, 1.468, 1.466, 1.462, 1.458, 1.454, 1.449,
+ 1.446, 1.447, 1.449, 1.454, 1.461, 1.466, 1.471, 1.471, 1.471, 1.471, 1.468, 1.466, 1.462, 1.459, 1.455, 1.449,
+ 1.447, 1.447, 1.452, 1.457, 1.462, 1.468, 1.472, 1.472, 1.471, 1.471, 1.468, 1.466, 1.462, 1.459, 1.456, 1.455
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 1.548, 1.499, 1.387, 1.289, 1.223, 1.183, 1.164, 1.154, 1.153, 1.169, 1.211, 1.265, 1.345, 1.448, 1.581, 1.619,
+ 1.513, 1.412, 1.307, 1.228, 1.169, 1.129, 1.105, 1.098, 1.103, 1.127, 1.157, 1.209, 1.272, 1.361, 1.481, 1.583,
+ 1.449, 1.365, 1.257, 1.175, 1.124, 1.085, 1.062, 1.054, 1.059, 1.079, 1.113, 1.151, 1.211, 1.293, 1.407, 1.488,
+ 1.424, 1.324, 1.222, 1.139, 1.089, 1.056, 1.034, 1.031, 1.034, 1.049, 1.075, 1.115, 1.164, 1.241, 1.351, 1.446,
+ 1.412, 1.297, 1.203, 1.119, 1.069, 1.039, 1.021, 1.016, 1.022, 1.032, 1.052, 1.086, 1.135, 1.212, 1.321, 1.439,
+ 1.406, 1.287, 1.195, 1.115, 1.059, 1.028, 1.014, 1.012, 1.015, 1.026, 1.041, 1.074, 1.125, 1.201, 1.302, 1.425,
+ 1.406, 1.294, 1.205, 1.126, 1.062, 1.031, 1.013, 1.009, 1.011, 1.019, 1.042, 1.079, 1.129, 1.203, 1.302, 1.435,
+ 1.415, 1.318, 1.229, 1.146, 1.076, 1.039, 1.019, 1.014, 1.017, 1.031, 1.053, 1.093, 1.144, 1.219, 1.314, 1.436,
+ 1.435, 1.348, 1.246, 1.164, 1.094, 1.059, 1.036, 1.032, 1.037, 1.049, 1.072, 1.114, 1.167, 1.257, 1.343, 1.462,
+ 1.471, 1.385, 1.278, 1.189, 1.124, 1.084, 1.064, 1.061, 1.069, 1.078, 1.101, 1.146, 1.207, 1.298, 1.415, 1.496,
+ 1.522, 1.436, 1.323, 1.228, 1.169, 1.118, 1.101, 1.094, 1.099, 1.113, 1.146, 1.194, 1.265, 1.353, 1.474, 1.571,
+ 1.578, 1.506, 1.378, 1.281, 1.211, 1.156, 1.135, 1.134, 1.139, 1.158, 1.194, 1.251, 1.327, 1.427, 1.559, 1.611
+ ],
+ "sigma": 0.00121,
+ "sigma_Cb": 0.00115
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2850,
+ "ccm":
+ [
+ 1.97469, -0.71439, -0.26031,
+ -0.43521, 2.09769, -0.66248,
+ -0.04826, -0.84642, 1.89468
+ ]
+ },
+ {
+ "ct": 2960,
+ "ccm":
+ [
+ 2.12952, -0.91185, -0.21768,
+ -0.38018, 1.90789, -0.52771,
+ 0.03988, -1.10079, 2.06092
+ ]
+ },
+ {
+ "ct": 3580,
+ "ccm":
+ [
+ 2.03422, -0.80048, -0.23374,
+ -0.39089, 1.97221, -0.58132,
+ -0.08969, -0.61439, 1.70408
+ ]
+ },
+ {
+ "ct": 4559,
+ "ccm":
+ [
+ 2.15423, -0.98143, -0.17279,
+ -0.38131, 2.14763, -0.76632,
+ -0.10069, -0.54383, 1.64452
+ ]
+ },
+ {
+ "ct": 5881,
+ "ccm":
+ [
+ 2.18464, -0.95493, -0.22971,
+ -0.36826, 2.00298, -0.63471,
+ -0.15219, -0.38055, 1.53274
+ ]
+ },
+ {
+ "ct": 7600,
+ "ccm":
+ [
+ 2.30687, -0.97295, -0.33392,
+ -0.30872, 2.32779, -1.01908,
+ -0.17761, -0.55891, 1.73651
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx477_noir.json b/src/ipa/rpi/vc4/data/imx477_noir.json
new file mode 100644
index 00000000..472f33fe
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx477_noir.json
@@ -0,0 +1,656 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 27242,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 830,
+ "reference_Y": 17755
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.767
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 204,
+ "slope": 0.01078
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "bayes": 0
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 2960,
+ "table":
+ [
+ 2.088, 2.086, 2.082, 2.081, 2.077, 2.071, 2.068, 2.068, 2.072, 2.073, 2.075, 2.078, 2.084, 2.092, 2.095, 2.098,
+ 2.086, 2.084, 2.079, 2.078, 2.075, 2.068, 2.064, 2.063, 2.068, 2.071, 2.072, 2.075, 2.081, 2.089, 2.092, 2.094,
+ 2.083, 2.081, 2.077, 2.072, 2.069, 2.062, 2.059, 2.059, 2.063, 2.067, 2.069, 2.072, 2.079, 2.088, 2.089, 2.089,
+ 2.081, 2.077, 2.072, 2.068, 2.065, 2.058, 2.055, 2.054, 2.057, 2.062, 2.066, 2.069, 2.077, 2.084, 2.086, 2.086,
+ 2.078, 2.075, 2.069, 2.065, 2.061, 2.055, 2.052, 2.049, 2.051, 2.056, 2.062, 2.065, 2.072, 2.079, 2.081, 2.079,
+ 2.079, 2.075, 2.069, 2.064, 2.061, 2.053, 2.049, 2.046, 2.049, 2.051, 2.057, 2.062, 2.069, 2.075, 2.077, 2.075,
+ 2.082, 2.079, 2.072, 2.065, 2.061, 2.054, 2.049, 2.047, 2.049, 2.051, 2.056, 2.061, 2.066, 2.073, 2.073, 2.069,
+ 2.086, 2.082, 2.075, 2.068, 2.062, 2.054, 2.051, 2.049, 2.051, 2.052, 2.056, 2.061, 2.066, 2.073, 2.073, 2.072,
+ 2.088, 2.086, 2.079, 2.074, 2.066, 2.057, 2.051, 2.051, 2.054, 2.055, 2.056, 2.061, 2.067, 2.072, 2.073, 2.072,
+ 2.091, 2.087, 2.079, 2.075, 2.068, 2.057, 2.052, 2.052, 2.056, 2.055, 2.055, 2.059, 2.066, 2.072, 2.072, 2.072,
+ 2.093, 2.088, 2.081, 2.077, 2.069, 2.059, 2.054, 2.054, 2.057, 2.056, 2.056, 2.058, 2.066, 2.072, 2.073, 2.073,
+ 2.095, 2.091, 2.084, 2.078, 2.075, 2.067, 2.057, 2.057, 2.059, 2.059, 2.058, 2.059, 2.068, 2.073, 2.075, 2.078
+ ]
+ },
+ {
+ "ct": 4850,
+ "table":
+ [
+ 2.973, 2.968, 2.956, 2.943, 2.941, 2.932, 2.923, 2.921, 2.924, 2.929, 2.931, 2.939, 2.953, 2.965, 2.966, 2.976,
+ 2.969, 2.962, 2.951, 2.941, 2.934, 2.928, 2.919, 2.918, 2.919, 2.923, 2.927, 2.933, 2.945, 2.957, 2.962, 2.962,
+ 2.964, 2.956, 2.944, 2.932, 2.929, 2.924, 2.915, 2.914, 2.915, 2.919, 2.924, 2.928, 2.941, 2.952, 2.958, 2.959,
+ 2.957, 2.951, 2.939, 2.928, 2.924, 2.919, 2.913, 2.911, 2.911, 2.915, 2.919, 2.925, 2.936, 2.947, 2.952, 2.953,
+ 2.954, 2.947, 2.935, 2.924, 2.919, 2.915, 2.908, 2.906, 2.906, 2.907, 2.914, 2.921, 2.932, 2.941, 2.943, 2.942,
+ 2.953, 2.946, 2.932, 2.921, 2.916, 2.911, 2.904, 2.902, 2.901, 2.904, 2.909, 2.919, 2.926, 2.937, 2.939, 2.939,
+ 2.953, 2.947, 2.932, 2.918, 2.915, 2.909, 2.903, 2.901, 2.901, 2.906, 2.911, 2.918, 2.924, 2.936, 2.936, 2.932,
+ 2.956, 2.948, 2.934, 2.919, 2.916, 2.908, 2.903, 2.901, 2.902, 2.907, 2.909, 2.917, 2.926, 2.936, 2.939, 2.939,
+ 2.957, 2.951, 2.936, 2.923, 2.917, 2.907, 2.904, 2.901, 2.902, 2.908, 2.911, 2.919, 2.929, 2.939, 2.942, 2.942,
+ 2.961, 2.951, 2.936, 2.922, 2.918, 2.906, 2.904, 2.901, 2.901, 2.907, 2.911, 2.921, 2.931, 2.941, 2.942, 2.944,
+ 2.964, 2.954, 2.936, 2.924, 2.918, 2.909, 2.905, 2.905, 2.905, 2.907, 2.912, 2.923, 2.933, 2.942, 2.944, 2.944,
+ 2.964, 2.958, 2.943, 2.927, 2.921, 2.914, 2.909, 2.907, 2.907, 2.912, 2.916, 2.928, 2.936, 2.944, 2.947, 2.952
+ ]
+ },
+ {
+ "ct": 5930,
+ "table":
+ [
+ 3.312, 3.308, 3.301, 3.294, 3.288, 3.277, 3.268, 3.261, 3.259, 3.261, 3.267, 3.273, 3.285, 3.301, 3.303, 3.312,
+ 3.308, 3.304, 3.294, 3.291, 3.283, 3.271, 3.263, 3.259, 3.257, 3.258, 3.261, 3.268, 3.278, 3.293, 3.299, 3.299,
+ 3.302, 3.296, 3.288, 3.282, 3.276, 3.267, 3.259, 3.254, 3.252, 3.253, 3.256, 3.261, 3.273, 3.289, 3.292, 3.292,
+ 3.296, 3.289, 3.282, 3.276, 3.269, 3.263, 3.256, 3.251, 3.248, 3.249, 3.251, 3.257, 3.268, 3.279, 3.284, 3.284,
+ 3.292, 3.285, 3.279, 3.271, 3.264, 3.257, 3.249, 3.243, 3.241, 3.241, 3.246, 3.252, 3.261, 3.274, 3.275, 3.273,
+ 3.291, 3.285, 3.276, 3.268, 3.259, 3.251, 3.242, 3.239, 3.236, 3.238, 3.244, 3.248, 3.258, 3.268, 3.269, 3.265,
+ 3.294, 3.288, 3.275, 3.266, 3.257, 3.248, 3.239, 3.238, 3.237, 3.238, 3.243, 3.246, 3.255, 3.264, 3.264, 3.257,
+ 3.297, 3.293, 3.279, 3.268, 3.258, 3.249, 3.238, 3.237, 3.239, 3.239, 3.243, 3.245, 3.255, 3.264, 3.264, 3.263,
+ 3.301, 3.295, 3.281, 3.271, 3.259, 3.248, 3.237, 3.237, 3.239, 3.241, 3.243, 3.246, 3.257, 3.265, 3.266, 3.264,
+ 3.306, 3.295, 3.279, 3.271, 3.261, 3.247, 3.235, 3.234, 3.239, 3.239, 3.243, 3.247, 3.258, 3.265, 3.265, 3.264,
+ 3.308, 3.297, 3.279, 3.272, 3.261, 3.249, 3.239, 3.239, 3.241, 3.243, 3.245, 3.248, 3.261, 3.265, 3.266, 3.265,
+ 3.309, 3.301, 3.286, 3.276, 3.267, 3.256, 3.246, 3.242, 3.244, 3.244, 3.249, 3.253, 3.263, 3.267, 3.271, 3.274
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 2960,
+ "table":
+ [
+ 2.133, 2.134, 2.139, 2.143, 2.148, 2.155, 2.158, 2.158, 2.158, 2.161, 2.161, 2.162, 2.159, 2.156, 2.152, 2.151,
+ 2.132, 2.133, 2.135, 2.142, 2.147, 2.153, 2.158, 2.158, 2.158, 2.158, 2.159, 2.159, 2.157, 2.154, 2.151, 2.148,
+ 2.133, 2.133, 2.135, 2.142, 2.149, 2.154, 2.158, 2.158, 2.157, 2.156, 2.158, 2.157, 2.155, 2.153, 2.148, 2.146,
+ 2.133, 2.133, 2.138, 2.145, 2.149, 2.154, 2.158, 2.159, 2.158, 2.155, 2.157, 2.156, 2.153, 2.149, 2.146, 2.144,
+ 2.133, 2.134, 2.139, 2.146, 2.149, 2.154, 2.158, 2.159, 2.159, 2.156, 2.154, 2.154, 2.149, 2.145, 2.143, 2.139,
+ 2.135, 2.135, 2.139, 2.146, 2.151, 2.155, 2.158, 2.159, 2.158, 2.156, 2.153, 2.151, 2.146, 2.143, 2.139, 2.136,
+ 2.135, 2.135, 2.138, 2.145, 2.151, 2.154, 2.157, 2.158, 2.157, 2.156, 2.153, 2.151, 2.147, 2.143, 2.141, 2.137,
+ 2.135, 2.134, 2.135, 2.141, 2.149, 2.154, 2.157, 2.157, 2.157, 2.157, 2.157, 2.153, 2.149, 2.146, 2.142, 2.139,
+ 2.132, 2.133, 2.135, 2.139, 2.148, 2.153, 2.158, 2.159, 2.159, 2.161, 2.161, 2.157, 2.154, 2.149, 2.144, 2.141,
+ 2.132, 2.133, 2.135, 2.141, 2.149, 2.155, 2.161, 2.161, 2.162, 2.162, 2.163, 2.159, 2.154, 2.149, 2.144, 2.138,
+ 2.136, 2.136, 2.137, 2.143, 2.149, 2.156, 2.162, 2.163, 2.162, 2.163, 2.164, 2.161, 2.157, 2.152, 2.146, 2.138,
+ 2.137, 2.137, 2.141, 2.147, 2.152, 2.157, 2.162, 2.162, 2.159, 2.161, 2.162, 2.162, 2.157, 2.152, 2.148, 2.148
+ ]
+ },
+ {
+ "ct": 4850,
+ "table":
+ [
+ 1.463, 1.464, 1.471, 1.478, 1.479, 1.483, 1.484, 1.486, 1.486, 1.484, 1.483, 1.481, 1.478, 1.475, 1.471, 1.468,
+ 1.463, 1.463, 1.468, 1.476, 1.479, 1.482, 1.484, 1.487, 1.486, 1.484, 1.483, 1.482, 1.478, 1.473, 1.469, 1.468,
+ 1.463, 1.464, 1.468, 1.476, 1.479, 1.483, 1.484, 1.486, 1.486, 1.485, 1.484, 1.482, 1.477, 1.473, 1.469, 1.468,
+ 1.463, 1.464, 1.469, 1.477, 1.481, 1.483, 1.485, 1.487, 1.487, 1.485, 1.485, 1.482, 1.478, 1.474, 1.469, 1.468,
+ 1.465, 1.465, 1.471, 1.478, 1.481, 1.484, 1.486, 1.488, 1.488, 1.487, 1.485, 1.482, 1.477, 1.472, 1.468, 1.467,
+ 1.465, 1.466, 1.472, 1.479, 1.482, 1.485, 1.486, 1.488, 1.488, 1.486, 1.484, 1.479, 1.475, 1.472, 1.468, 1.466,
+ 1.466, 1.466, 1.472, 1.478, 1.482, 1.484, 1.485, 1.488, 1.487, 1.485, 1.483, 1.479, 1.475, 1.472, 1.469, 1.468,
+ 1.465, 1.466, 1.469, 1.476, 1.481, 1.485, 1.485, 1.486, 1.486, 1.485, 1.483, 1.479, 1.477, 1.474, 1.471, 1.469,
+ 1.464, 1.465, 1.469, 1.476, 1.481, 1.484, 1.485, 1.487, 1.487, 1.486, 1.485, 1.481, 1.478, 1.475, 1.471, 1.469,
+ 1.463, 1.464, 1.469, 1.477, 1.481, 1.485, 1.485, 1.488, 1.488, 1.487, 1.486, 1.481, 1.478, 1.475, 1.471, 1.468,
+ 1.464, 1.465, 1.471, 1.478, 1.482, 1.486, 1.486, 1.488, 1.488, 1.487, 1.486, 1.481, 1.478, 1.475, 1.472, 1.468,
+ 1.465, 1.466, 1.472, 1.481, 1.483, 1.487, 1.487, 1.488, 1.488, 1.486, 1.485, 1.481, 1.479, 1.476, 1.473, 1.472
+ ]
+ },
+ {
+ "ct": 5930,
+ "table":
+ [
+ 1.443, 1.444, 1.448, 1.453, 1.459, 1.463, 1.465, 1.467, 1.469, 1.469, 1.467, 1.466, 1.462, 1.457, 1.454, 1.451,
+ 1.443, 1.444, 1.445, 1.451, 1.459, 1.463, 1.465, 1.467, 1.469, 1.469, 1.467, 1.465, 1.461, 1.456, 1.452, 1.451,
+ 1.444, 1.444, 1.445, 1.451, 1.459, 1.463, 1.466, 1.468, 1.469, 1.469, 1.467, 1.465, 1.461, 1.456, 1.452, 1.449,
+ 1.444, 1.444, 1.447, 1.452, 1.459, 1.464, 1.467, 1.469, 1.471, 1.469, 1.467, 1.466, 1.461, 1.456, 1.452, 1.449,
+ 1.444, 1.445, 1.448, 1.452, 1.459, 1.465, 1.469, 1.471, 1.471, 1.471, 1.468, 1.465, 1.461, 1.455, 1.451, 1.449,
+ 1.445, 1.446, 1.449, 1.453, 1.461, 1.466, 1.469, 1.471, 1.472, 1.469, 1.467, 1.465, 1.459, 1.455, 1.451, 1.447,
+ 1.446, 1.446, 1.449, 1.453, 1.461, 1.466, 1.469, 1.469, 1.469, 1.469, 1.467, 1.465, 1.459, 1.455, 1.452, 1.449,
+ 1.446, 1.446, 1.447, 1.451, 1.459, 1.466, 1.469, 1.469, 1.469, 1.469, 1.467, 1.465, 1.461, 1.457, 1.454, 1.451,
+ 1.444, 1.444, 1.447, 1.451, 1.459, 1.466, 1.469, 1.469, 1.471, 1.471, 1.468, 1.466, 1.462, 1.458, 1.454, 1.452,
+ 1.444, 1.444, 1.448, 1.453, 1.459, 1.466, 1.469, 1.471, 1.472, 1.472, 1.468, 1.466, 1.462, 1.458, 1.454, 1.449,
+ 1.446, 1.447, 1.449, 1.454, 1.461, 1.466, 1.471, 1.471, 1.471, 1.471, 1.468, 1.466, 1.462, 1.459, 1.455, 1.449,
+ 1.447, 1.447, 1.452, 1.457, 1.462, 1.468, 1.472, 1.472, 1.471, 1.471, 1.468, 1.466, 1.462, 1.459, 1.456, 1.455
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 1.548, 1.499, 1.387, 1.289, 1.223, 1.183, 1.164, 1.154, 1.153, 1.169, 1.211, 1.265, 1.345, 1.448, 1.581, 1.619,
+ 1.513, 1.412, 1.307, 1.228, 1.169, 1.129, 1.105, 1.098, 1.103, 1.127, 1.157, 1.209, 1.272, 1.361, 1.481, 1.583,
+ 1.449, 1.365, 1.257, 1.175, 1.124, 1.085, 1.062, 1.054, 1.059, 1.079, 1.113, 1.151, 1.211, 1.293, 1.407, 1.488,
+ 1.424, 1.324, 1.222, 1.139, 1.089, 1.056, 1.034, 1.031, 1.034, 1.049, 1.075, 1.115, 1.164, 1.241, 1.351, 1.446,
+ 1.412, 1.297, 1.203, 1.119, 1.069, 1.039, 1.021, 1.016, 1.022, 1.032, 1.052, 1.086, 1.135, 1.212, 1.321, 1.439,
+ 1.406, 1.287, 1.195, 1.115, 1.059, 1.028, 1.014, 1.012, 1.015, 1.026, 1.041, 1.074, 1.125, 1.201, 1.302, 1.425,
+ 1.406, 1.294, 1.205, 1.126, 1.062, 1.031, 1.013, 1.009, 1.011, 1.019, 1.042, 1.079, 1.129, 1.203, 1.302, 1.435,
+ 1.415, 1.318, 1.229, 1.146, 1.076, 1.039, 1.019, 1.014, 1.017, 1.031, 1.053, 1.093, 1.144, 1.219, 1.314, 1.436,
+ 1.435, 1.348, 1.246, 1.164, 1.094, 1.059, 1.036, 1.032, 1.037, 1.049, 1.072, 1.114, 1.167, 1.257, 1.343, 1.462,
+ 1.471, 1.385, 1.278, 1.189, 1.124, 1.084, 1.064, 1.061, 1.069, 1.078, 1.101, 1.146, 1.207, 1.298, 1.415, 1.496,
+ 1.522, 1.436, 1.323, 1.228, 1.169, 1.118, 1.101, 1.094, 1.099, 1.113, 1.146, 1.194, 1.265, 1.353, 1.474, 1.571,
+ 1.578, 1.506, 1.378, 1.281, 1.211, 1.156, 1.135, 1.134, 1.139, 1.158, 1.194, 1.251, 1.327, 1.427, 1.559, 1.611
+ ],
+ "sigma": 0.00121,
+ "sigma_Cb": 0.00115
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2360,
+ "ccm":
+ [
+ 1.66078, -0.23588, -0.42491,
+ -0.47456, 1.82763, -0.35307,
+ -0.00545, -1.44729, 2.45273
+ ]
+ },
+ {
+ "ct": 2870,
+ "ccm":
+ [
+ 1.78373, -0.55344, -0.23029,
+ -0.39951, 1.69701, -0.29751,
+ 0.01986, -1.06525, 2.04539
+ ]
+ },
+ {
+ "ct": 2970,
+ "ccm":
+ [
+ 1.73511, -0.56973, -0.16537,
+ -0.36338, 1.69878, -0.33539,
+ -0.02354, -0.76813, 1.79168
+ ]
+ },
+ {
+ "ct": 3000,
+ "ccm":
+ [
+ 2.06374, -0.92218, -0.14156,
+ -0.41721, 1.69289, -0.27568,
+ -0.00554, -0.92741, 1.93295
+ ]
+ },
+ {
+ "ct": 3700,
+ "ccm":
+ [
+ 2.13792, -1.08136, -0.05655,
+ -0.34739, 1.58989, -0.24249,
+ -0.00349, -0.76789, 1.77138
+ ]
+ },
+ {
+ "ct": 3870,
+ "ccm":
+ [
+ 1.83834, -0.70528, -0.13307,
+ -0.30499, 1.60523, -0.30024,
+ -0.05701, -0.58313, 1.64014
+ ]
+ },
+ {
+ "ct": 4000,
+ "ccm":
+ [
+ 2.15741, -1.10295, -0.05447,
+ -0.34631, 1.61158, -0.26528,
+ -0.02723, -0.70288, 1.73011
+ ]
+ },
+ {
+ "ct": 4400,
+ "ccm":
+ [
+ 2.05729, -0.95007, -0.10723,
+ -0.41712, 1.78606, -0.36894,
+ -0.11899, -0.55727, 1.67626
+ ]
+ },
+ {
+ "ct": 4715,
+ "ccm":
+ [
+ 1.90255, -0.77478, -0.12777,
+ -0.31338, 1.88197, -0.56858,
+ -0.06001, -0.61785, 1.67786
+ ]
+ },
+ {
+ "ct": 5920,
+ "ccm":
+ [
+ 1.98691, -0.84671, -0.14019,
+ -0.26581, 1.70615, -0.44035,
+ -0.09532, -0.47332, 1.56864
+ ]
+ },
+ {
+ "ct": 9050,
+ "ccm":
+ [
+ 2.09255, -0.76541, -0.32714,
+ -0.28973, 2.27462, -0.98489,
+ -0.17299, -0.61275, 1.78574
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/src/ipa/rpi/vc4/data/imx477_scientific.json b/src/ipa/rpi/vc4/data/imx477_scientific.json
new file mode 100644
index 00000000..9dc32eb1
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx477_scientific.json
@@ -0,0 +1,488 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 27242,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 830,
+ "reference_Y": 17755
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.767
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 204,
+ "slope": 0.01078
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2000.0, 0.6331025775790707, 0.27424225990946915,
+ 2200.0, 0.5696117366212947, 0.3116091368689487,
+ 2400.0, 0.5204264653110015, 0.34892179554105873,
+ 2600.0, 0.48148675531667223, 0.38565229719076793,
+ 2800.0, 0.450085403501908, 0.42145684622485047,
+ 3000.0, 0.42436130159169017, 0.45611835670028816,
+ 3200.0, 0.40300023695527337, 0.48950766215198593,
+ 3400.0, 0.3850520052612984, 0.5215567075837261,
+ 3600.0, 0.36981508088230314, 0.5522397906415475,
+ 4100.0, 0.333468007836758, 0.5909770465167908,
+ 4600.0, 0.31196097364221376, 0.6515706327327178,
+ 5100.0, 0.2961860409294588, 0.7068178946570284,
+ 5600.0, 0.2842607232745885, 0.7564837749584288,
+ 6100.0, 0.2750265787051251, 0.8006183524920533,
+ 6600.0, 0.2677057225584924, 0.8398879225373039,
+ 7100.0, 0.2617955199757274, 0.8746456080032436,
+ 7600.0, 0.25693714288250125, 0.905569559506562,
+ 8100.0, 0.25287531441063316, 0.9331696750390895,
+ 8600.0, 0.24946601483331993, 0.9576820904825795
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.0238,
+ "transverse_neg": 0.04429,
+ "coarse_step": 0.1
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 0,
+ "gamma_curve":
+ [
+ 0, 0,
+ 512, 2304,
+ 1024, 4608,
+ 1536, 6573,
+ 2048, 8401,
+ 2560, 9992,
+ 3072, 11418,
+ 3584, 12719,
+ 4096, 13922,
+ 4608, 15045,
+ 5120, 16103,
+ 5632, 17104,
+ 6144, 18056,
+ 6656, 18967,
+ 7168, 19839,
+ 7680, 20679,
+ 8192, 21488,
+ 9216, 23028,
+ 10240, 24477,
+ 11264, 25849,
+ 12288, 27154,
+ 13312, 28401,
+ 14336, 29597,
+ 15360, 30747,
+ 16384, 31856,
+ 17408, 32928,
+ 18432, 33966,
+ 19456, 34973,
+ 20480, 35952,
+ 22528, 37832,
+ 24576, 39621,
+ 26624, 41330,
+ 28672, 42969,
+ 30720, 44545,
+ 32768, 46065,
+ 34816, 47534,
+ 36864, 48956,
+ 38912, 50336,
+ 40960, 51677,
+ 43008, 52982,
+ 45056, 54253,
+ 47104, 55493,
+ 49152, 56704,
+ 51200, 57888,
+ 53248, 59046,
+ 55296, 60181,
+ 57344, 61292,
+ 59392, 62382,
+ 61440, 63452,
+ 63488, 64503,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2000,
+ "ccm":
+ [
+ 1.5813882365848004, -0.35293683714581114, -0.27378771561617715,
+ -0.4347297185453639, 1.5792631087746074, -0.12102601986382337,
+ 0.2322290578987574, -1.4382672640468128, 2.1386425781770755
+ ]
+ },
+ {
+ "ct": 2200,
+ "ccm":
+ [
+ 1.6322048484088305, -0.45932286857238486, -0.21373542690252198,
+ -0.3970719209901105, 1.5877868651467202, -0.17249380832122455,
+ 0.20753774825903412, -1.2660673594740142, 2.005654261091916
+ ]
+ },
+ {
+ "ct": 2400,
+ "ccm":
+ [
+ 1.6766610071470398, -0.5447101051688111, -0.16838641107407676,
+ -0.3659845183388154, 1.592223692670396, -0.2127091997471162,
+ 0.1833964516767549, -1.1339155942419321, 1.9089342978542396
+ ]
+ },
+ {
+ "ct": 2600,
+ "ccm":
+ [
+ 1.7161984340622154, -0.6152585785678794, -0.1331100845092582,
+ -0.33972082628066275, 1.5944888273736966, -0.2453979465898787,
+ 0.1615577497676328, -1.0298684958833109, 1.8357854177422053
+ ]
+ },
+ {
+ "ct": 2800,
+ "ccm":
+ [
+ 1.7519307259815728, -0.6748682080165339, -0.10515169074540848,
+ -0.3171703484479931, 1.5955820297498486, -0.2727395854813966,
+ 0.14230870739974305, -0.9460976023551511, 1.778709391659538
+ ]
+ },
+ {
+ "ct": 3000,
+ "ccm":
+ [
+ 1.7846716625128374, -0.7261240476375332, -0.08274697420358428,
+ -0.2975654035173307, 1.5960425637021738, -0.2961043416505157,
+ 0.12546426281675097, -0.8773434727076518, 1.7330356805246685
+ ]
+ },
+ {
+ "ct": 3200,
+ "ccm":
+ [
+ 1.8150085872943436, -0.7708109672515514, -0.06469468211419174,
+ -0.2803468940646277, 1.596168842967451, -0.3164044170681625,
+ 0.11071494533513807, -0.8199772290209191, 1.69572135046367
+ ]
+ },
+ {
+ "ct": 3400,
+ "ccm":
+ [
+ 1.8433668304932087, -0.8102060605062592, -0.05013485852801454,
+ -0.2650934036324084, 1.5961288492969294, -0.33427554893845535,
+ 0.0977478941863518, -0.7714303112098978, 1.6647070820146963
+ ]
+ },
+ {
+ "ct": 3600,
+ "ccm":
+ [
+ 1.8700575831917468, -0.8452518300291346, -0.03842644337477299,
+ -0.2514794528347016, 1.5960178299141876, -0.3501774949366156,
+ 0.08628520830733245, -0.729841503339915, 1.638553343939267
+ ]
+ },
+ {
+ "ct": 4100,
+ "ccm":
+ [
+ 1.8988700903560716, -0.8911278803351247, -0.018848644425650693,
+ -0.21487101487384094, 1.599236541382614, -0.39405450457918206,
+ 0.08251488056482173, -0.7178919368326191, 1.6267009056502704
+ ]
+ },
+ {
+ "ct": 4600,
+ "ccm":
+ [
+ 1.960355191764125, -0.9624344812121991, -0.0017122408632169205,
+ -0.19444620905212898, 1.5978493736948447, -0.416727638296156,
+ 0.06310261513271084, -0.6483790952487849, 1.5834605477213093
+ ]
+ },
+ {
+ "ct": 5100,
+ "ccm":
+ [
+ 2.014680536961399, -1.0195930302148566, 0.007728256612638915,
+ -0.17751999660735496, 1.5977081555831, -0.4366085498741474,
+ 0.04741267583041334, -0.5950327902073489, 1.5512919847321853
+ ]
+ },
+ {
+ "ct": 5600,
+ "ccm":
+ [
+ 2.062652337917251, -1.0658386679125478, 0.011886354256281267,
+ -0.16319197721451495, 1.598363237584736, -0.45422061523742235,
+ 0.03465810928795378, -0.5535454108047286, 1.5269025836946852
+ ]
+ },
+ {
+ "ct": 6100,
+ "ccm":
+ [
+ 2.104985902038069, -1.103597868736314, 0.012503517136539277,
+ -0.15090797064906178, 1.5994703078166095, -0.4698414300864995,
+ 0.02421766063474242, -0.5208922818196823, 1.5081270847783788
+ ]
+ },
+ {
+ "ct": 6600,
+ "ccm":
+ [
+ 2.1424988751299714, -1.134760232367728, 0.010730356010435522,
+ -0.14021846798466234, 1.600822462230719, -0.48379204794526487,
+ 0.015521315410496622, -0.49463630325832275, 1.4933313534840327
+ ]
+ },
+ {
+ "ct": 7100,
+ "ccm":
+ [
+ 2.1758034100130925, -1.1607558481037359, 0.007452724895469076,
+ -0.13085694672641826, 1.6022648614493245, -0.4962330524084075,
+ 0.008226943206113427, -0.4733077192319791, 1.4815336120437468
+ ]
+ },
+ {
+ "ct": 7600,
+ "ccm":
+ [
+ 2.205529206931895, -1.1826662383072108, 0.0032019529917605167,
+ -0.122572009780486, 1.6037258133595753, -0.5073973734282445,
+ 0.0020132587619863425, -0.4556590236414181, 1.471939788496745
+ ]
+ },
+ {
+ "ct": 8100,
+ "ccm":
+ [
+ 2.232224969223067, -1.2013672897252885, -0.0016234598095482985,
+ -0.11518026734442414, 1.6051544769439803, -0.5174558699422255,
+ -0.0033378143542219835, -0.4408590373867774, 1.4640252230667452
+ ]
+ },
+ {
+ "ct": 8600,
+ "ccm":
+ [
+ 2.256082295891265, -1.2173210549996634, -0.0067231350481711675,
+ -0.10860272839843167, 1.6065150139140594, -0.5264728573611493,
+ -0.007952618707984149, -0.4284003574050791, 1.4574646927117558
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx477_v1.json b/src/ipa/rpi/vc4/data/imx477_v1.json
new file mode 100644
index 00000000..55e4adc1
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx477_v1.json
@@ -0,0 +1,525 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 27242,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 830,
+ "reference_Y": 17755
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.767
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 204,
+ "slope": 0.01078
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2360.0, 0.6009, 0.3093,
+ 2870.0, 0.5047, 0.3936,
+ 2970.0, 0.4782, 0.4221,
+ 3700.0, 0.4212, 0.4923,
+ 3870.0, 0.4037, 0.5166,
+ 4000.0, 0.3965, 0.5271,
+ 4400.0, 0.3703, 0.5666,
+ 4715.0, 0.3411, 0.6147,
+ 5920.0, 0.3108, 0.6687,
+ 9050.0, 0.2524, 0.7856
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.0238,
+ "transverse_neg": 0.04429
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.3,
+ 1000, 0.3
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 2960,
+ "table":
+ [
+ 2.088, 2.086, 2.082, 2.081, 2.077, 2.071, 2.068, 2.068, 2.072, 2.073, 2.075, 2.078, 2.084, 2.092, 2.095, 2.098,
+ 2.086, 2.084, 2.079, 2.078, 2.075, 2.068, 2.064, 2.063, 2.068, 2.071, 2.072, 2.075, 2.081, 2.089, 2.092, 2.094,
+ 2.083, 2.081, 2.077, 2.072, 2.069, 2.062, 2.059, 2.059, 2.063, 2.067, 2.069, 2.072, 2.079, 2.088, 2.089, 2.089,
+ 2.081, 2.077, 2.072, 2.068, 2.065, 2.058, 2.055, 2.054, 2.057, 2.062, 2.066, 2.069, 2.077, 2.084, 2.086, 2.086,
+ 2.078, 2.075, 2.069, 2.065, 2.061, 2.055, 2.052, 2.049, 2.051, 2.056, 2.062, 2.065, 2.072, 2.079, 2.081, 2.079,
+ 2.079, 2.075, 2.069, 2.064, 2.061, 2.053, 2.049, 2.046, 2.049, 2.051, 2.057, 2.062, 2.069, 2.075, 2.077, 2.075,
+ 2.082, 2.079, 2.072, 2.065, 2.061, 2.054, 2.049, 2.047, 2.049, 2.051, 2.056, 2.061, 2.066, 2.073, 2.073, 2.069,
+ 2.086, 2.082, 2.075, 2.068, 2.062, 2.054, 2.051, 2.049, 2.051, 2.052, 2.056, 2.061, 2.066, 2.073, 2.073, 2.072,
+ 2.088, 2.086, 2.079, 2.074, 2.066, 2.057, 2.051, 2.051, 2.054, 2.055, 2.056, 2.061, 2.067, 2.072, 2.073, 2.072,
+ 2.091, 2.087, 2.079, 2.075, 2.068, 2.057, 2.052, 2.052, 2.056, 2.055, 2.055, 2.059, 2.066, 2.072, 2.072, 2.072,
+ 2.093, 2.088, 2.081, 2.077, 2.069, 2.059, 2.054, 2.054, 2.057, 2.056, 2.056, 2.058, 2.066, 2.072, 2.073, 2.073,
+ 2.095, 2.091, 2.084, 2.078, 2.075, 2.067, 2.057, 2.057, 2.059, 2.059, 2.058, 2.059, 2.068, 2.073, 2.075, 2.078
+ ]
+ },
+ {
+ "ct": 4850,
+ "table":
+ [
+ 2.973, 2.968, 2.956, 2.943, 2.941, 2.932, 2.923, 2.921, 2.924, 2.929, 2.931, 2.939, 2.953, 2.965, 2.966, 2.976,
+ 2.969, 2.962, 2.951, 2.941, 2.934, 2.928, 2.919, 2.918, 2.919, 2.923, 2.927, 2.933, 2.945, 2.957, 2.962, 2.962,
+ 2.964, 2.956, 2.944, 2.932, 2.929, 2.924, 2.915, 2.914, 2.915, 2.919, 2.924, 2.928, 2.941, 2.952, 2.958, 2.959,
+ 2.957, 2.951, 2.939, 2.928, 2.924, 2.919, 2.913, 2.911, 2.911, 2.915, 2.919, 2.925, 2.936, 2.947, 2.952, 2.953,
+ 2.954, 2.947, 2.935, 2.924, 2.919, 2.915, 2.908, 2.906, 2.906, 2.907, 2.914, 2.921, 2.932, 2.941, 2.943, 2.942,
+ 2.953, 2.946, 2.932, 2.921, 2.916, 2.911, 2.904, 2.902, 2.901, 2.904, 2.909, 2.919, 2.926, 2.937, 2.939, 2.939,
+ 2.953, 2.947, 2.932, 2.918, 2.915, 2.909, 2.903, 2.901, 2.901, 2.906, 2.911, 2.918, 2.924, 2.936, 2.936, 2.932,
+ 2.956, 2.948, 2.934, 2.919, 2.916, 2.908, 2.903, 2.901, 2.902, 2.907, 2.909, 2.917, 2.926, 2.936, 2.939, 2.939,
+ 2.957, 2.951, 2.936, 2.923, 2.917, 2.907, 2.904, 2.901, 2.902, 2.908, 2.911, 2.919, 2.929, 2.939, 2.942, 2.942,
+ 2.961, 2.951, 2.936, 2.922, 2.918, 2.906, 2.904, 2.901, 2.901, 2.907, 2.911, 2.921, 2.931, 2.941, 2.942, 2.944,
+ 2.964, 2.954, 2.936, 2.924, 2.918, 2.909, 2.905, 2.905, 2.905, 2.907, 2.912, 2.923, 2.933, 2.942, 2.944, 2.944,
+ 2.964, 2.958, 2.943, 2.927, 2.921, 2.914, 2.909, 2.907, 2.907, 2.912, 2.916, 2.928, 2.936, 2.944, 2.947, 2.952
+ ]
+ },
+ {
+ "ct": 5930,
+ "table":
+ [
+ 3.312, 3.308, 3.301, 3.294, 3.288, 3.277, 3.268, 3.261, 3.259, 3.261, 3.267, 3.273, 3.285, 3.301, 3.303, 3.312,
+ 3.308, 3.304, 3.294, 3.291, 3.283, 3.271, 3.263, 3.259, 3.257, 3.258, 3.261, 3.268, 3.278, 3.293, 3.299, 3.299,
+ 3.302, 3.296, 3.288, 3.282, 3.276, 3.267, 3.259, 3.254, 3.252, 3.253, 3.256, 3.261, 3.273, 3.289, 3.292, 3.292,
+ 3.296, 3.289, 3.282, 3.276, 3.269, 3.263, 3.256, 3.251, 3.248, 3.249, 3.251, 3.257, 3.268, 3.279, 3.284, 3.284,
+ 3.292, 3.285, 3.279, 3.271, 3.264, 3.257, 3.249, 3.243, 3.241, 3.241, 3.246, 3.252, 3.261, 3.274, 3.275, 3.273,
+ 3.291, 3.285, 3.276, 3.268, 3.259, 3.251, 3.242, 3.239, 3.236, 3.238, 3.244, 3.248, 3.258, 3.268, 3.269, 3.265,
+ 3.294, 3.288, 3.275, 3.266, 3.257, 3.248, 3.239, 3.238, 3.237, 3.238, 3.243, 3.246, 3.255, 3.264, 3.264, 3.257,
+ 3.297, 3.293, 3.279, 3.268, 3.258, 3.249, 3.238, 3.237, 3.239, 3.239, 3.243, 3.245, 3.255, 3.264, 3.264, 3.263,
+ 3.301, 3.295, 3.281, 3.271, 3.259, 3.248, 3.237, 3.237, 3.239, 3.241, 3.243, 3.246, 3.257, 3.265, 3.266, 3.264,
+ 3.306, 3.295, 3.279, 3.271, 3.261, 3.247, 3.235, 3.234, 3.239, 3.239, 3.243, 3.247, 3.258, 3.265, 3.265, 3.264,
+ 3.308, 3.297, 3.279, 3.272, 3.261, 3.249, 3.239, 3.239, 3.241, 3.243, 3.245, 3.248, 3.261, 3.265, 3.266, 3.265,
+ 3.309, 3.301, 3.286, 3.276, 3.267, 3.256, 3.246, 3.242, 3.244, 3.244, 3.249, 3.253, 3.263, 3.267, 3.271, 3.274
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 2960,
+ "table":
+ [
+ 2.133, 2.134, 2.139, 2.143, 2.148, 2.155, 2.158, 2.158, 2.158, 2.161, 2.161, 2.162, 2.159, 2.156, 2.152, 2.151,
+ 2.132, 2.133, 2.135, 2.142, 2.147, 2.153, 2.158, 2.158, 2.158, 2.158, 2.159, 2.159, 2.157, 2.154, 2.151, 2.148,
+ 2.133, 2.133, 2.135, 2.142, 2.149, 2.154, 2.158, 2.158, 2.157, 2.156, 2.158, 2.157, 2.155, 2.153, 2.148, 2.146,
+ 2.133, 2.133, 2.138, 2.145, 2.149, 2.154, 2.158, 2.159, 2.158, 2.155, 2.157, 2.156, 2.153, 2.149, 2.146, 2.144,
+ 2.133, 2.134, 2.139, 2.146, 2.149, 2.154, 2.158, 2.159, 2.159, 2.156, 2.154, 2.154, 2.149, 2.145, 2.143, 2.139,
+ 2.135, 2.135, 2.139, 2.146, 2.151, 2.155, 2.158, 2.159, 2.158, 2.156, 2.153, 2.151, 2.146, 2.143, 2.139, 2.136,
+ 2.135, 2.135, 2.138, 2.145, 2.151, 2.154, 2.157, 2.158, 2.157, 2.156, 2.153, 2.151, 2.147, 2.143, 2.141, 2.137,
+ 2.135, 2.134, 2.135, 2.141, 2.149, 2.154, 2.157, 2.157, 2.157, 2.157, 2.157, 2.153, 2.149, 2.146, 2.142, 2.139,
+ 2.132, 2.133, 2.135, 2.139, 2.148, 2.153, 2.158, 2.159, 2.159, 2.161, 2.161, 2.157, 2.154, 2.149, 2.144, 2.141,
+ 2.132, 2.133, 2.135, 2.141, 2.149, 2.155, 2.161, 2.161, 2.162, 2.162, 2.163, 2.159, 2.154, 2.149, 2.144, 2.138,
+ 2.136, 2.136, 2.137, 2.143, 2.149, 2.156, 2.162, 2.163, 2.162, 2.163, 2.164, 2.161, 2.157, 2.152, 2.146, 2.138,
+ 2.137, 2.137, 2.141, 2.147, 2.152, 2.157, 2.162, 2.162, 2.159, 2.161, 2.162, 2.162, 2.157, 2.152, 2.148, 2.148
+ ]
+ },
+ {
+ "ct": 4850,
+ "table":
+ [
+ 1.463, 1.464, 1.471, 1.478, 1.479, 1.483, 1.484, 1.486, 1.486, 1.484, 1.483, 1.481, 1.478, 1.475, 1.471, 1.468,
+ 1.463, 1.463, 1.468, 1.476, 1.479, 1.482, 1.484, 1.487, 1.486, 1.484, 1.483, 1.482, 1.478, 1.473, 1.469, 1.468,
+ 1.463, 1.464, 1.468, 1.476, 1.479, 1.483, 1.484, 1.486, 1.486, 1.485, 1.484, 1.482, 1.477, 1.473, 1.469, 1.468,
+ 1.463, 1.464, 1.469, 1.477, 1.481, 1.483, 1.485, 1.487, 1.487, 1.485, 1.485, 1.482, 1.478, 1.474, 1.469, 1.468,
+ 1.465, 1.465, 1.471, 1.478, 1.481, 1.484, 1.486, 1.488, 1.488, 1.487, 1.485, 1.482, 1.477, 1.472, 1.468, 1.467,
+ 1.465, 1.466, 1.472, 1.479, 1.482, 1.485, 1.486, 1.488, 1.488, 1.486, 1.484, 1.479, 1.475, 1.472, 1.468, 1.466,
+ 1.466, 1.466, 1.472, 1.478, 1.482, 1.484, 1.485, 1.488, 1.487, 1.485, 1.483, 1.479, 1.475, 1.472, 1.469, 1.468,
+ 1.465, 1.466, 1.469, 1.476, 1.481, 1.485, 1.485, 1.486, 1.486, 1.485, 1.483, 1.479, 1.477, 1.474, 1.471, 1.469,
+ 1.464, 1.465, 1.469, 1.476, 1.481, 1.484, 1.485, 1.487, 1.487, 1.486, 1.485, 1.481, 1.478, 1.475, 1.471, 1.469,
+ 1.463, 1.464, 1.469, 1.477, 1.481, 1.485, 1.485, 1.488, 1.488, 1.487, 1.486, 1.481, 1.478, 1.475, 1.471, 1.468,
+ 1.464, 1.465, 1.471, 1.478, 1.482, 1.486, 1.486, 1.488, 1.488, 1.487, 1.486, 1.481, 1.478, 1.475, 1.472, 1.468,
+ 1.465, 1.466, 1.472, 1.481, 1.483, 1.487, 1.487, 1.488, 1.488, 1.486, 1.485, 1.481, 1.479, 1.476, 1.473, 1.472
+ ]
+ },
+ {
+ "ct": 5930,
+ "table":
+ [
+ 1.443, 1.444, 1.448, 1.453, 1.459, 1.463, 1.465, 1.467, 1.469, 1.469, 1.467, 1.466, 1.462, 1.457, 1.454, 1.451,
+ 1.443, 1.444, 1.445, 1.451, 1.459, 1.463, 1.465, 1.467, 1.469, 1.469, 1.467, 1.465, 1.461, 1.456, 1.452, 1.451,
+ 1.444, 1.444, 1.445, 1.451, 1.459, 1.463, 1.466, 1.468, 1.469, 1.469, 1.467, 1.465, 1.461, 1.456, 1.452, 1.449,
+ 1.444, 1.444, 1.447, 1.452, 1.459, 1.464, 1.467, 1.469, 1.471, 1.469, 1.467, 1.466, 1.461, 1.456, 1.452, 1.449,
+ 1.444, 1.445, 1.448, 1.452, 1.459, 1.465, 1.469, 1.471, 1.471, 1.471, 1.468, 1.465, 1.461, 1.455, 1.451, 1.449,
+ 1.445, 1.446, 1.449, 1.453, 1.461, 1.466, 1.469, 1.471, 1.472, 1.469, 1.467, 1.465, 1.459, 1.455, 1.451, 1.447,
+ 1.446, 1.446, 1.449, 1.453, 1.461, 1.466, 1.469, 1.469, 1.469, 1.469, 1.467, 1.465, 1.459, 1.455, 1.452, 1.449,
+ 1.446, 1.446, 1.447, 1.451, 1.459, 1.466, 1.469, 1.469, 1.469, 1.469, 1.467, 1.465, 1.461, 1.457, 1.454, 1.451,
+ 1.444, 1.444, 1.447, 1.451, 1.459, 1.466, 1.469, 1.469, 1.471, 1.471, 1.468, 1.466, 1.462, 1.458, 1.454, 1.452,
+ 1.444, 1.444, 1.448, 1.453, 1.459, 1.466, 1.469, 1.471, 1.472, 1.472, 1.468, 1.466, 1.462, 1.458, 1.454, 1.449,
+ 1.446, 1.447, 1.449, 1.454, 1.461, 1.466, 1.471, 1.471, 1.471, 1.471, 1.468, 1.466, 1.462, 1.459, 1.455, 1.449,
+ 1.447, 1.447, 1.452, 1.457, 1.462, 1.468, 1.472, 1.472, 1.471, 1.471, 1.468, 1.466, 1.462, 1.459, 1.456, 1.455
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 1.548, 1.499, 1.387, 1.289, 1.223, 1.183, 1.164, 1.154, 1.153, 1.169, 1.211, 1.265, 1.345, 1.448, 1.581, 1.619,
+ 1.513, 1.412, 1.307, 1.228, 1.169, 1.129, 1.105, 1.098, 1.103, 1.127, 1.157, 1.209, 1.272, 1.361, 1.481, 1.583,
+ 1.449, 1.365, 1.257, 1.175, 1.124, 1.085, 1.062, 1.054, 1.059, 1.079, 1.113, 1.151, 1.211, 1.293, 1.407, 1.488,
+ 1.424, 1.324, 1.222, 1.139, 1.089, 1.056, 1.034, 1.031, 1.034, 1.049, 1.075, 1.115, 1.164, 1.241, 1.351, 1.446,
+ 1.412, 1.297, 1.203, 1.119, 1.069, 1.039, 1.021, 1.016, 1.022, 1.032, 1.052, 1.086, 1.135, 1.212, 1.321, 1.439,
+ 1.406, 1.287, 1.195, 1.115, 1.059, 1.028, 1.014, 1.012, 1.015, 1.026, 1.041, 1.074, 1.125, 1.201, 1.302, 1.425,
+ 1.406, 1.294, 1.205, 1.126, 1.062, 1.031, 1.013, 1.009, 1.011, 1.019, 1.042, 1.079, 1.129, 1.203, 1.302, 1.435,
+ 1.415, 1.318, 1.229, 1.146, 1.076, 1.039, 1.019, 1.014, 1.017, 1.031, 1.053, 1.093, 1.144, 1.219, 1.314, 1.436,
+ 1.435, 1.348, 1.246, 1.164, 1.094, 1.059, 1.036, 1.032, 1.037, 1.049, 1.072, 1.114, 1.167, 1.257, 1.343, 1.462,
+ 1.471, 1.385, 1.278, 1.189, 1.124, 1.084, 1.064, 1.061, 1.069, 1.078, 1.101, 1.146, 1.207, 1.298, 1.415, 1.496,
+ 1.522, 1.436, 1.323, 1.228, 1.169, 1.118, 1.101, 1.094, 1.099, 1.113, 1.146, 1.194, 1.265, 1.353, 1.474, 1.571,
+ 1.578, 1.506, 1.378, 1.281, 1.211, 1.156, 1.135, 1.134, 1.139, 1.158, 1.194, 1.251, 1.327, 1.427, 1.559, 1.611
+ ],
+ "sigma": 0.00121,
+ "sigma_Cb": 0.00115
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2360,
+ "ccm":
+ [
+ 1.66078, -0.23588, -0.42491,
+ -0.47456, 1.82763, -0.35307,
+ -0.00545, -1.44729, 2.45273
+ ]
+ },
+ {
+ "ct": 2870,
+ "ccm":
+ [
+ 1.78373, -0.55344, -0.23029,
+ -0.39951, 1.69701, -0.29751,
+ 0.01986, -1.06525, 2.04539
+ ]
+ },
+ {
+ "ct": 2970,
+ "ccm":
+ [
+ 1.73511, -0.56973, -0.16537,
+ -0.36338, 1.69878, -0.33539,
+ -0.02354, -0.76813, 1.79168
+ ]
+ },
+ {
+ "ct": 3000,
+ "ccm":
+ [
+ 2.06374, -0.92218, -0.14156,
+ -0.41721, 1.69289, -0.27568,
+ -0.00554, -0.92741, 1.93295
+ ]
+ },
+ {
+ "ct": 3700,
+ "ccm":
+ [
+ 2.13792, -1.08136, -0.05655,
+ -0.34739, 1.58989, -0.24249,
+ -0.00349, -0.76789, 1.77138
+ ]
+ },
+ {
+ "ct": 3870,
+ "ccm":
+ [
+ 1.83834, -0.70528, -0.13307,
+ -0.30499, 1.60523, -0.30024,
+ -0.05701, -0.58313, 1.64014
+ ]
+ },
+ {
+ "ct": 4000,
+ "ccm":
+ [
+ 2.15741, -1.10295, -0.05447,
+ -0.34631, 1.61158, -0.26528,
+ -0.02723, -0.70288, 1.73011
+ ]
+ },
+ {
+ "ct": 4400,
+ "ccm":
+ [
+ 2.05729, -0.95007, -0.10723,
+ -0.41712, 1.78606, -0.36894,
+ -0.11899, -0.55727, 1.67626
+ ]
+ },
+ {
+ "ct": 4715,
+ "ccm":
+ [
+ 1.90255, -0.77478, -0.12777,
+ -0.31338, 1.88197, -0.56858,
+ -0.06001, -0.61785, 1.67786
+ ]
+ },
+ {
+ "ct": 5920,
+ "ccm":
+ [
+ 1.98691, -0.84671, -0.14019,
+ -0.26581, 1.70615, -0.44035,
+ -0.09532, -0.47332, 1.56864
+ ]
+ },
+ {
+ "ct": 9050,
+ "ccm":
+ [
+ 2.09255, -0.76541, -0.32714,
+ -0.28973, 2.27462, -0.98489,
+ -0.17299, -0.61275, 1.78574
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx519.json b/src/ipa/rpi/vc4/data/imx519.json
new file mode 100644
index 00000000..ce194256
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx519.json
@@ -0,0 +1,427 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 13841,
+ "reference_gain": 2.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 900,
+ "reference_Y": 12064
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.776
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 189,
+ "slope": 0.01495
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 7900
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8000
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2890.0, 0.7328, 0.3734,
+ 3550.0, 0.6228, 0.4763,
+ 4500.0, 0.5208, 0.5825,
+ 5700.0, 0.4467, 0.6671,
+ 7900.0, 0.3858, 0.7411
+ ],
+ "sensitivity_r": 1.0,
+ "sensitivity_b": 1.0,
+ "transverse_pos": 0.02027,
+ "transverse_neg": 0.01935
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.527, 1.521, 1.508, 1.493, 1.476, 1.455, 1.442, 1.441, 1.441, 1.441, 1.448, 1.467, 1.483, 1.494, 1.503, 1.504,
+ 1.525, 1.513, 1.496, 1.477, 1.461, 1.434, 1.418, 1.409, 1.409, 1.416, 1.429, 1.449, 1.469, 1.485, 1.495, 1.503,
+ 1.517, 1.506, 1.485, 1.461, 1.434, 1.412, 1.388, 1.376, 1.376, 1.386, 1.405, 1.429, 1.449, 1.471, 1.488, 1.495,
+ 1.512, 1.496, 1.471, 1.442, 1.412, 1.388, 1.361, 1.344, 1.344, 1.358, 1.384, 1.405, 1.431, 1.456, 1.479, 1.489,
+ 1.508, 1.488, 1.458, 1.425, 1.393, 1.361, 1.343, 1.322, 1.321, 1.342, 1.358, 1.385, 1.416, 1.445, 1.471, 1.484,
+ 1.507, 1.482, 1.453, 1.418, 1.382, 1.349, 1.322, 1.318, 1.318, 1.321, 1.345, 1.373, 1.405, 1.437, 1.465, 1.483,
+ 1.507, 1.482, 1.453, 1.418, 1.382, 1.349, 1.322, 1.313, 1.313, 1.321, 1.345, 1.373, 1.405, 1.437, 1.465, 1.483,
+ 1.507, 1.485, 1.455, 1.422, 1.387, 1.355, 1.333, 1.319, 1.321, 1.333, 1.351, 1.381, 1.411, 1.441, 1.467, 1.483,
+ 1.508, 1.489, 1.463, 1.432, 1.401, 1.372, 1.355, 1.333, 1.333, 1.351, 1.369, 1.393, 1.422, 1.448, 1.471, 1.484,
+ 1.511, 1.494, 1.472, 1.444, 1.416, 1.398, 1.372, 1.361, 1.361, 1.369, 1.393, 1.411, 1.436, 1.458, 1.477, 1.487,
+ 1.511, 1.496, 1.478, 1.455, 1.436, 1.416, 1.399, 1.391, 1.391, 1.397, 1.411, 1.429, 1.451, 1.466, 1.479, 1.487,
+ 1.511, 1.495, 1.478, 1.462, 1.448, 1.432, 1.419, 1.419, 1.419, 1.419, 1.429, 1.445, 1.459, 1.471, 1.482, 1.487
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 2.581, 2.573, 2.558, 2.539, 2.514, 2.487, 2.473, 2.471, 2.471, 2.471, 2.479, 2.499, 2.517, 2.532, 2.543, 2.544,
+ 2.575, 2.559, 2.539, 2.521, 2.491, 2.458, 2.435, 2.421, 2.421, 2.429, 2.449, 2.477, 2.499, 2.519, 2.534, 2.543,
+ 2.561, 2.549, 2.521, 2.491, 2.457, 2.423, 2.393, 2.375, 2.375, 2.387, 2.412, 2.444, 2.475, 2.499, 2.519, 2.532,
+ 2.552, 2.531, 2.498, 2.459, 2.423, 2.391, 2.349, 2.325, 2.325, 2.344, 2.374, 2.412, 2.444, 2.476, 2.505, 2.519,
+ 2.543, 2.518, 2.479, 2.435, 2.392, 2.349, 2.324, 2.285, 2.283, 2.313, 2.344, 2.374, 2.417, 2.457, 2.489, 2.506,
+ 2.541, 2.511, 2.469, 2.421, 2.372, 2.326, 2.284, 2.277, 2.279, 2.283, 2.313, 2.357, 2.401, 2.443, 2.479, 2.504,
+ 2.541, 2.511, 2.469, 2.421, 2.372, 2.326, 2.284, 2.267, 2.267, 2.281, 2.313, 2.357, 2.401, 2.443, 2.479, 2.504,
+ 2.541, 2.512, 2.472, 2.425, 2.381, 2.338, 2.302, 2.278, 2.279, 2.301, 2.324, 2.364, 2.407, 2.447, 2.481, 2.504,
+ 2.544, 2.519, 2.483, 2.441, 2.401, 2.363, 2.338, 2.302, 2.302, 2.324, 2.355, 2.385, 2.423, 2.459, 2.488, 2.506,
+ 2.549, 2.527, 2.497, 2.463, 2.427, 2.401, 2.363, 2.345, 2.345, 2.355, 2.385, 2.412, 2.444, 2.473, 2.497, 2.509,
+ 2.552, 2.532, 2.507, 2.481, 2.459, 2.427, 2.402, 2.389, 2.389, 2.394, 2.412, 2.444, 2.465, 2.481, 2.499, 2.511,
+ 2.553, 2.533, 2.508, 2.489, 2.475, 2.454, 2.429, 2.429, 2.429, 2.429, 2.439, 2.463, 2.481, 2.492, 2.504, 2.511
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 3.132, 3.126, 3.116, 3.103, 3.097, 3.091, 3.087, 3.086, 3.088, 3.091, 3.092, 3.102, 3.113, 3.121, 3.141, 3.144,
+ 3.149, 3.132, 3.123, 3.108, 3.101, 3.096, 3.091, 3.089, 3.091, 3.092, 3.101, 3.107, 3.116, 3.129, 3.144, 3.153,
+ 3.161, 3.149, 3.129, 3.121, 3.108, 3.103, 3.101, 3.101, 3.101, 3.103, 3.107, 3.116, 3.125, 3.134, 3.153, 3.159,
+ 3.176, 3.161, 3.144, 3.129, 3.124, 3.121, 3.117, 3.118, 3.118, 3.119, 3.122, 3.125, 3.134, 3.146, 3.159, 3.171,
+ 3.183, 3.176, 3.157, 3.144, 3.143, 3.143, 3.139, 3.141, 3.141, 3.141, 3.141, 3.141, 3.146, 3.161, 3.171, 3.179,
+ 3.189, 3.183, 3.165, 3.157, 3.156, 3.157, 3.159, 3.163, 3.163, 3.163, 3.163, 3.161, 3.163, 3.169, 3.179, 3.187,
+ 3.199, 3.189, 3.171, 3.165, 3.164, 3.167, 3.171, 3.173, 3.173, 3.172, 3.171, 3.169, 3.169, 3.175, 3.187, 3.189,
+ 3.206, 3.196, 3.177, 3.171, 3.165, 3.167, 3.171, 3.173, 3.173, 3.172, 3.171, 3.171, 3.173, 3.177, 3.192, 3.194,
+ 3.209, 3.197, 3.178, 3.171, 3.164, 3.161, 3.159, 3.161, 3.162, 3.164, 3.167, 3.171, 3.173, 3.181, 3.193, 3.198,
+ 3.204, 3.194, 3.176, 3.165, 3.161, 3.156, 3.154, 3.154, 3.159, 3.161, 3.164, 3.168, 3.173, 3.182, 3.198, 3.199,
+ 3.199, 3.191, 3.176, 3.169, 3.161, 3.157, 3.153, 3.153, 3.156, 3.161, 3.164, 3.168, 3.173, 3.186, 3.196, 3.199,
+ 3.199, 3.188, 3.179, 3.173, 3.165, 3.157, 3.153, 3.154, 3.156, 3.159, 3.167, 3.171, 3.176, 3.185, 3.193, 3.198
+ ]
+ },
+ {
+ "ct": 6000,
+ "table":
+ [
+ 1.579, 1.579, 1.577, 1.574, 1.573, 1.571, 1.571, 1.571, 1.571, 1.569, 1.569, 1.571, 1.572, 1.574, 1.577, 1.578,
+ 1.584, 1.579, 1.578, 1.575, 1.573, 1.572, 1.571, 1.572, 1.572, 1.571, 1.571, 1.572, 1.573, 1.576, 1.578, 1.579,
+ 1.587, 1.584, 1.579, 1.578, 1.575, 1.573, 1.573, 1.575, 1.575, 1.574, 1.573, 1.574, 1.576, 1.578, 1.581, 1.581,
+ 1.591, 1.587, 1.584, 1.579, 1.578, 1.579, 1.579, 1.581, 1.581, 1.581, 1.578, 1.577, 1.578, 1.581, 1.585, 1.586,
+ 1.595, 1.591, 1.587, 1.585, 1.585, 1.586, 1.587, 1.587, 1.588, 1.588, 1.585, 1.584, 1.584, 1.586, 1.589, 1.589,
+ 1.597, 1.595, 1.591, 1.589, 1.591, 1.593, 1.595, 1.596, 1.597, 1.597, 1.595, 1.594, 1.592, 1.592, 1.593, 1.593,
+ 1.601, 1.597, 1.593, 1.592, 1.593, 1.595, 1.598, 1.599, 1.602, 1.601, 1.598, 1.596, 1.595, 1.596, 1.595, 1.595,
+ 1.601, 1.599, 1.594, 1.593, 1.593, 1.595, 1.598, 1.599, 1.602, 1.601, 1.598, 1.597, 1.597, 1.597, 1.597, 1.597,
+ 1.602, 1.599, 1.594, 1.593, 1.592, 1.593, 1.595, 1.597, 1.597, 1.598, 1.598, 1.597, 1.597, 1.597, 1.598, 1.598,
+ 1.599, 1.598, 1.594, 1.592, 1.591, 1.591, 1.592, 1.595, 1.596, 1.597, 1.597, 1.597, 1.597, 1.599, 1.599, 1.599,
+ 1.598, 1.596, 1.594, 1.593, 1.592, 1.592, 1.592, 1.594, 1.595, 1.597, 1.597, 1.597, 1.598, 1.599, 1.599, 1.599,
+ 1.597, 1.595, 1.594, 1.594, 1.593, 1.592, 1.593, 1.595, 1.595, 1.597, 1.598, 1.598, 1.598, 1.599, 1.599, 1.599
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 2.887, 2.754, 2.381, 2.105, 1.859, 1.678, 1.625, 1.623, 1.623, 1.624, 1.669, 1.849, 2.092, 2.362, 2.723, 2.838,
+ 2.754, 2.443, 2.111, 1.905, 1.678, 1.542, 1.455, 1.412, 1.412, 1.452, 1.535, 1.665, 1.893, 2.096, 2.413, 2.723,
+ 2.443, 2.216, 1.911, 1.678, 1.537, 1.372, 1.288, 1.245, 1.245, 1.283, 1.363, 1.527, 1.665, 1.895, 2.193, 2.413,
+ 2.318, 2.057, 1.764, 1.541, 1.372, 1.282, 1.159, 1.113, 1.113, 1.151, 1.269, 1.363, 1.527, 1.749, 2.034, 2.278,
+ 2.259, 1.953, 1.671, 1.452, 1.283, 1.159, 1.107, 1.018, 1.017, 1.097, 1.151, 1.269, 1.437, 1.655, 1.931, 2.222,
+ 2.257, 1.902, 1.624, 1.408, 1.239, 1.111, 1.019, 1.011, 1.005, 1.014, 1.098, 1.227, 1.395, 1.608, 1.883, 2.222,
+ 2.257, 1.902, 1.624, 1.408, 1.239, 1.111, 1.016, 1.001, 1.001, 1.007, 1.098, 1.227, 1.395, 1.608, 1.883, 2.222,
+ 2.257, 1.946, 1.666, 1.448, 1.281, 1.153, 1.093, 1.013, 1.008, 1.089, 1.143, 1.269, 1.437, 1.654, 1.934, 2.226,
+ 2.309, 2.044, 1.756, 1.532, 1.363, 1.259, 1.153, 1.093, 1.093, 1.143, 1.264, 1.354, 1.524, 1.746, 2.035, 2.284,
+ 2.425, 2.201, 1.896, 1.662, 1.519, 1.363, 1.259, 1.214, 1.214, 1.264, 1.354, 1.519, 1.655, 1.888, 2.191, 2.413,
+ 2.724, 2.417, 2.091, 1.888, 1.662, 1.519, 1.419, 1.373, 1.373, 1.425, 1.521, 1.655, 1.885, 2.089, 2.409, 2.722,
+ 2.858, 2.724, 2.356, 2.085, 1.842, 1.658, 1.581, 1.577, 1.577, 1.579, 1.653, 1.838, 2.084, 2.359, 2.722, 2.842
+ ],
+ "sigma": 0.00372,
+ "sigma_Cb": 0.00244
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2890,
+ "ccm":
+ [
+ 1.36754, -0.18448, -0.18306,
+ -0.32356, 1.44826, -0.12471,
+ -0.00412, -0.69936, 1.70348
+ ]
+ },
+ {
+ "ct": 2920,
+ "ccm":
+ [
+ 1.26704, 0.01624, -0.28328,
+ -0.28516, 1.38934, -0.10419,
+ -0.04854, -0.82211, 1.87066
+ ]
+ },
+ {
+ "ct": 3550,
+ "ccm":
+ [
+ 1.42836, -0.27235, -0.15601,
+ -0.28751, 1.41075, -0.12325,
+ -0.01812, -0.54849, 1.56661
+ ]
+ },
+ {
+ "ct": 4500,
+ "ccm":
+ [
+ 1.36328, -0.19569, -0.16759,
+ -0.25254, 1.52248, -0.26994,
+ -0.01575, -0.53155, 1.54729
+ ]
+ },
+ {
+ "ct": 5700,
+ "ccm":
+ [
+ 1.49207, -0.37245, -0.11963,
+ -0.21493, 1.40005, -0.18512,
+ -0.03781, -0.38779, 1.42561
+ ]
+ },
+ {
+ "ct": 7900,
+ "ccm":
+ [
+ 1.34849, -0.05425, -0.29424,
+ -0.22182, 1.77684, -0.55502,
+ -0.07403, -0.55336, 1.62739
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx708.json b/src/ipa/rpi/vc4/data/imx708.json
new file mode 100644
index 00000000..4de6f079
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx708.json
@@ -0,0 +1,671 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 10672,
+ "reference_gain": 1.12,
+ "reference_aperture": 1.0,
+ "reference_lux": 977,
+ "reference_Y": 8627
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 16.0,
+ "reference_slope": 4.0
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 215,
+ "slope": 0.00287
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2498.0, 0.8733, 0.2606,
+ 2821.0, 0.7707, 0.3245,
+ 2925.0, 0.7338, 0.3499,
+ 2926.0, 0.7193, 0.3603,
+ 2951.0, 0.7144, 0.3639,
+ 2954.0, 0.7111, 0.3663,
+ 3578.0, 0.6038, 0.4516,
+ 3717.0, 0.5861, 0.4669,
+ 3784.0, 0.5786, 0.4737,
+ 4485.0, 0.5113, 0.5368,
+ 4615.0, 0.4994, 0.5486,
+ 4671.0, 0.4927, 0.5554,
+ 5753.0, 0.4274, 0.6246,
+ 5773.0, 0.4265, 0.6256,
+ 7433.0, 0.3723, 0.6881
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.03148,
+ "transverse_neg": 0.03061
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ }
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.562, 1.566, 1.566, 1.556, 1.533, 1.506, 1.475, 1.475, 1.475, 1.475, 1.506, 1.533, 1.555, 1.563, 1.562, 1.555,
+ 1.563, 1.564, 1.561, 1.538, 1.508, 1.482, 1.449, 1.436, 1.436, 1.449, 1.481, 1.508, 1.537, 1.557, 1.558, 1.557,
+ 1.564, 1.563, 1.554, 1.522, 1.482, 1.449, 1.421, 1.403, 1.403, 1.419, 1.449, 1.481, 1.519, 1.549, 1.557, 1.559,
+ 1.564, 1.563, 1.545, 1.506, 1.462, 1.421, 1.403, 1.378, 1.378, 1.402, 1.419, 1.459, 1.503, 1.541, 1.557, 1.559,
+ 1.564, 1.562, 1.537, 1.494, 1.447, 1.404, 1.378, 1.364, 1.364, 1.377, 1.402, 1.444, 1.491, 1.532, 1.556, 1.559,
+ 1.564, 1.559, 1.532, 1.487, 1.438, 1.395, 1.365, 1.359, 1.359, 1.364, 1.393, 1.436, 1.484, 1.527, 1.555, 1.558,
+ 1.564, 1.559, 1.532, 1.487, 1.438, 1.395, 1.365, 1.356, 1.356, 1.364, 1.393, 1.436, 1.484, 1.527, 1.554, 1.557,
+ 1.564, 1.561, 1.536, 1.492, 1.444, 1.402, 1.374, 1.364, 1.363, 1.373, 1.401, 1.442, 1.489, 1.531, 1.554, 1.557,
+ 1.564, 1.563, 1.544, 1.504, 1.458, 1.418, 1.397, 1.374, 1.374, 1.395, 1.416, 1.456, 1.501, 1.538, 1.556, 1.557,
+ 1.564, 1.562, 1.551, 1.518, 1.477, 1.441, 1.418, 1.397, 1.397, 1.416, 1.438, 1.474, 1.514, 1.546, 1.556, 1.556,
+ 1.562, 1.562, 1.558, 1.534, 1.499, 1.476, 1.441, 1.426, 1.426, 1.438, 1.473, 1.496, 1.531, 1.552, 1.556, 1.555,
+ 1.561, 1.564, 1.564, 1.552, 1.525, 1.497, 1.466, 1.461, 1.461, 1.464, 1.495, 1.523, 1.548, 1.556, 1.556, 1.552
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 2.609, 2.616, 2.617, 2.607, 2.573, 2.527, 2.483, 2.481, 2.481, 2.483, 2.529, 2.573, 2.604, 2.613, 2.613, 2.604,
+ 2.609, 2.615, 2.608, 2.576, 2.533, 2.489, 2.439, 2.418, 2.418, 2.439, 2.491, 2.532, 2.577, 2.605, 2.609, 2.607,
+ 2.611, 2.611, 2.597, 2.551, 2.489, 2.439, 2.391, 2.364, 2.364, 2.391, 2.439, 2.491, 2.551, 2.592, 2.607, 2.609,
+ 2.612, 2.608, 2.583, 2.526, 2.457, 2.391, 2.362, 2.318, 2.318, 2.362, 2.391, 2.458, 2.526, 2.581, 2.607, 2.611,
+ 2.612, 2.604, 2.571, 2.507, 2.435, 2.362, 2.317, 2.293, 2.294, 2.318, 2.363, 2.434, 2.508, 2.568, 2.604, 2.612,
+ 2.611, 2.602, 2.564, 2.496, 2.419, 2.349, 2.293, 2.284, 2.284, 2.294, 2.347, 2.421, 2.497, 2.562, 2.603, 2.611,
+ 2.609, 2.601, 2.564, 2.496, 2.419, 2.349, 2.293, 2.278, 2.278, 2.294, 2.347, 2.421, 2.497, 2.562, 2.602, 2.609,
+ 2.609, 2.602, 2.568, 2.503, 2.429, 2.361, 2.311, 2.292, 2.292, 2.309, 2.357, 2.429, 2.504, 2.567, 2.602, 2.609,
+ 2.606, 2.604, 2.579, 2.519, 2.449, 2.384, 2.348, 2.311, 2.311, 2.346, 2.383, 2.449, 2.521, 2.577, 2.604, 2.608,
+ 2.604, 2.603, 2.586, 2.537, 2.474, 2.418, 2.384, 2.348, 2.348, 2.383, 2.417, 2.476, 2.538, 2.586, 2.601, 2.603,
+ 2.603, 2.605, 2.596, 2.561, 2.508, 2.474, 2.418, 2.396, 2.396, 2.417, 2.474, 2.511, 2.562, 2.596, 2.603, 2.602,
+ 2.601, 2.607, 2.606, 2.589, 2.549, 2.507, 2.456, 2.454, 2.454, 2.458, 2.508, 2.554, 2.594, 2.605, 2.605, 2.602
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 3.221, 3.226, 3.231, 3.236, 3.239, 3.243, 3.245, 3.247, 3.249, 3.253, 3.255, 3.254, 3.253, 3.242, 3.235, 3.226,
+ 3.225, 3.231, 3.235, 3.238, 3.241, 3.244, 3.246, 3.247, 3.249, 3.254, 3.256, 3.255, 3.252, 3.248, 3.241, 3.232,
+ 3.226, 3.234, 3.239, 3.243, 3.243, 3.245, 3.247, 3.248, 3.251, 3.255, 3.256, 3.256, 3.254, 3.249, 3.244, 3.236,
+ 3.232, 3.238, 3.245, 3.245, 3.246, 3.247, 3.248, 3.251, 3.251, 3.256, 3.257, 3.257, 3.256, 3.254, 3.249, 3.239,
+ 3.232, 3.243, 3.246, 3.246, 3.246, 3.247, 3.248, 3.251, 3.253, 3.257, 3.258, 3.258, 3.257, 3.256, 3.254, 3.239,
+ 3.232, 3.242, 3.246, 3.247, 3.246, 3.246, 3.248, 3.251, 3.252, 3.253, 3.256, 3.255, 3.255, 3.254, 3.251, 3.239,
+ 3.233, 3.241, 3.244, 3.245, 3.244, 3.245, 3.246, 3.249, 3.251, 3.252, 3.253, 3.252, 3.252, 3.252, 3.249, 3.238,
+ 3.238, 3.241, 3.246, 3.246, 3.245, 3.245, 3.247, 3.249, 3.251, 3.252, 3.253, 3.253, 3.252, 3.252, 3.249, 3.239,
+ 3.235, 3.241, 3.245, 3.245, 3.245, 3.245, 3.246, 3.247, 3.251, 3.254, 3.253, 3.255, 3.256, 3.255, 3.251, 3.241,
+ 3.226, 3.235, 3.241, 3.241, 3.241, 3.241, 3.243, 3.245, 3.246, 3.252, 3.253, 3.254, 3.256, 3.254, 3.241, 3.237,
+ 3.205, 3.213, 3.213, 3.214, 3.214, 3.214, 3.214, 3.213, 3.213, 3.216, 3.218, 3.216, 3.214, 3.213, 3.211, 3.208,
+ 3.205, 3.205, 3.212, 3.212, 3.212, 3.213, 3.211, 3.211, 3.211, 3.213, 3.216, 3.214, 3.213, 3.211, 3.208, 3.196
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.645, 1.646, 1.649, 1.653, 1.654, 1.657, 1.659, 1.661, 1.663, 1.662, 1.661, 1.659, 1.656, 1.651, 1.645, 1.642,
+ 1.646, 1.649, 1.652, 1.654, 1.656, 1.659, 1.662, 1.663, 1.664, 1.664, 1.662, 1.661, 1.657, 1.653, 1.649, 1.644,
+ 1.648, 1.652, 1.654, 1.656, 1.658, 1.662, 1.665, 1.668, 1.668, 1.668, 1.665, 1.662, 1.658, 1.655, 1.652, 1.646,
+ 1.649, 1.653, 1.656, 1.658, 1.661, 1.665, 1.667, 1.671, 1.673, 1.671, 1.668, 1.663, 1.659, 1.656, 1.654, 1.647,
+ 1.649, 1.655, 1.657, 1.659, 1.661, 1.666, 1.671, 1.674, 1.675, 1.673, 1.671, 1.664, 1.659, 1.656, 1.654, 1.648,
+ 1.649, 1.654, 1.656, 1.659, 1.661, 1.666, 1.673, 1.676, 1.676, 1.675, 1.671, 1.664, 1.659, 1.656, 1.654, 1.648,
+ 1.649, 1.654, 1.656, 1.658, 1.659, 1.665, 1.672, 1.675, 1.675, 1.674, 1.668, 1.662, 1.658, 1.655, 1.654, 1.646,
+ 1.652, 1.655, 1.657, 1.659, 1.661, 1.665, 1.671, 1.673, 1.673, 1.672, 1.668, 1.662, 1.658, 1.655, 1.654, 1.647,
+ 1.652, 1.655, 1.657, 1.659, 1.661, 1.664, 1.667, 1.671, 1.672, 1.668, 1.666, 1.662, 1.659, 1.656, 1.654, 1.647,
+ 1.647, 1.652, 1.655, 1.656, 1.657, 1.661, 1.664, 1.665, 1.665, 1.665, 1.663, 1.661, 1.657, 1.655, 1.647, 1.647,
+ 1.639, 1.642, 1.644, 1.645, 1.646, 1.648, 1.648, 1.648, 1.649, 1.649, 1.649, 1.646, 1.645, 1.642, 1.639, 1.636,
+ 1.639, 1.641, 1.642, 1.644, 1.645, 1.646, 1.647, 1.647, 1.648, 1.648, 1.647, 1.645, 1.642, 1.639, 1.636, 1.633
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 2.644, 2.396, 2.077, 1.863, 1.682, 1.535, 1.392, 1.382, 1.382, 1.382, 1.515, 1.657, 1.826, 2.035, 2.351, 2.604,
+ 2.497, 2.229, 1.947, 1.733, 1.539, 1.424, 1.296, 1.249, 1.249, 1.285, 1.401, 1.519, 1.699, 1.908, 2.183, 2.456,
+ 2.389, 2.109, 1.848, 1.622, 1.424, 1.296, 1.201, 1.146, 1.146, 1.188, 1.285, 1.401, 1.591, 1.811, 2.065, 2.347,
+ 2.317, 2.026, 1.771, 1.535, 1.339, 1.201, 1.145, 1.069, 1.069, 1.134, 1.188, 1.318, 1.505, 1.734, 1.983, 2.273,
+ 2.276, 1.972, 1.715, 1.474, 1.281, 1.148, 1.069, 1.033, 1.024, 1.065, 1.134, 1.262, 1.446, 1.679, 1.929, 2.233,
+ 2.268, 1.941, 1.682, 1.441, 1.251, 1.119, 1.033, 1.013, 1.013, 1.024, 1.105, 1.231, 1.415, 1.649, 1.898, 2.227,
+ 2.268, 1.941, 1.682, 1.441, 1.251, 1.119, 1.033, 1.001, 1.001, 1.024, 1.105, 1.231, 1.415, 1.649, 1.898, 2.227,
+ 2.268, 1.951, 1.694, 1.456, 1.265, 1.131, 1.044, 1.026, 1.019, 1.039, 1.118, 1.246, 1.429, 1.663, 1.912, 2.227,
+ 2.291, 1.992, 1.738, 1.505, 1.311, 1.175, 1.108, 1.044, 1.041, 1.106, 1.161, 1.292, 1.478, 1.707, 1.955, 2.252,
+ 2.347, 2.058, 1.803, 1.581, 1.384, 1.245, 1.175, 1.108, 1.108, 1.161, 1.239, 1.364, 1.551, 1.773, 2.023, 2.311,
+ 2.438, 2.156, 1.884, 1.674, 1.484, 1.373, 1.245, 1.199, 1.199, 1.239, 1.363, 1.463, 1.647, 1.858, 2.123, 2.406,
+ 2.563, 2.305, 1.998, 1.792, 1.615, 1.472, 1.339, 1.322, 1.322, 1.326, 1.456, 1.593, 1.767, 1.973, 2.273, 2.532
+ ],
+ "sigma": 0.00178,
+ "sigma_Cb": 0.00217
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2964,
+ "ccm":
+ [
+ 1.72129, -0.45961, -0.26169,
+ -0.30042, 1.56924, -0.26882,
+ 0.15133, -1.13293, 1.98161
+ ]
+ },
+ {
+ "ct": 3610,
+ "ccm":
+ [
+ 1.54474, -0.35082, -0.19391,
+ -0.36989, 1.67926, -0.30936,
+ -0.00524, -0.55197, 1.55722
+ ]
+ },
+ {
+ "ct": 4640,
+ "ccm":
+ [
+ 1.52972, -0.35168, -0.17804,
+ -0.28309, 1.67098, -0.38788,
+ 0.01695, -0.57209, 1.55515
+ ]
+ },
+ {
+ "ct": 5910,
+ "ccm":
+ [
+ 1.56879, -0.42159, -0.14719,
+ -0.27275, 1.59354, -0.32079,
+ -0.02862, -0.40662, 1.43525
+ ]
+ },
+ {
+ "ct": 7590,
+ "ccm":
+ [
+ 1.41424, -0.21092, -0.20332,
+ -0.17646, 1.71734, -0.54087,
+ 0.01297, -0.63111, 1.61814
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.af":
+ {
+ "ranges":
+ {
+ "normal":
+ {
+ "min": 0.0,
+ "max": 12.0,
+ "default": 1.0
+ },
+ "macro":
+ {
+ "min": 3.0,
+ "max": 15.0,
+ "default": 4.0
+ }
+ },
+ "speeds":
+ {
+ "normal":
+ {
+ "step_coarse": 1.0,
+ "step_fine": 0.25,
+ "contrast_ratio": 0.75,
+ "pdaf_gain": -0.02,
+ "pdaf_squelch": 0.125,
+ "max_slew": 2.0,
+ "pdaf_frames": 20,
+ "dropout_frames": 6,
+ "step_frames": 4
+ }
+ },
+ "conf_epsilon": 8,
+ "conf_thresh": 16,
+ "conf_clip": 512,
+ "skip_frames": 5,
+ "map": [ 0.0, 445, 15.0, 925 ]
+ }
+ },
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx708_noir.json b/src/ipa/rpi/vc4/data/imx708_noir.json
new file mode 100644
index 00000000..7b7ee874
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx708_noir.json
@@ -0,0 +1,770 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 10672,
+ "reference_gain": 1.12,
+ "reference_aperture": 1.0,
+ "reference_lux": 977,
+ "reference_Y": 8627
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 16.0,
+ "reference_slope": 4.0
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 215,
+ "slope": 0.00287
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 0,
+ "ct_curve":
+ [
+ 2498.0, 0.8733, 0.2606,
+ 2821.0, 0.7707, 0.3245,
+ 2925.0, 0.7338, 0.3499,
+ 2926.0, 0.7193, 0.3603,
+ 2951.0, 0.7144, 0.3639,
+ 2954.0, 0.7111, 0.3663,
+ 3578.0, 0.6038, 0.4516,
+ 3717.0, 0.5861, 0.4669,
+ 3784.0, 0.5786, 0.4737,
+ 4485.0, 0.5113, 0.5368,
+ 4615.0, 0.4994, 0.5486,
+ 4671.0, 0.4927, 0.5554,
+ 5753.0, 0.4274, 0.6246,
+ 5773.0, 0.4265, 0.6256,
+ 7433.0, 0.3723, 0.6881
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.03148,
+ "transverse_neg": 0.03061
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ }
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.562, 1.566, 1.566, 1.556, 1.533, 1.506, 1.475, 1.475, 1.475, 1.475, 1.506, 1.533, 1.555, 1.563, 1.562, 1.555,
+ 1.563, 1.564, 1.561, 1.538, 1.508, 1.482, 1.449, 1.436, 1.436, 1.449, 1.481, 1.508, 1.537, 1.557, 1.558, 1.557,
+ 1.564, 1.563, 1.554, 1.522, 1.482, 1.449, 1.421, 1.403, 1.403, 1.419, 1.449, 1.481, 1.519, 1.549, 1.557, 1.559,
+ 1.564, 1.563, 1.545, 1.506, 1.462, 1.421, 1.403, 1.378, 1.378, 1.402, 1.419, 1.459, 1.503, 1.541, 1.557, 1.559,
+ 1.564, 1.562, 1.537, 1.494, 1.447, 1.404, 1.378, 1.364, 1.364, 1.377, 1.402, 1.444, 1.491, 1.532, 1.556, 1.559,
+ 1.564, 1.559, 1.532, 1.487, 1.438, 1.395, 1.365, 1.359, 1.359, 1.364, 1.393, 1.436, 1.484, 1.527, 1.555, 1.558,
+ 1.564, 1.559, 1.532, 1.487, 1.438, 1.395, 1.365, 1.356, 1.356, 1.364, 1.393, 1.436, 1.484, 1.527, 1.554, 1.557,
+ 1.564, 1.561, 1.536, 1.492, 1.444, 1.402, 1.374, 1.364, 1.363, 1.373, 1.401, 1.442, 1.489, 1.531, 1.554, 1.557,
+ 1.564, 1.563, 1.544, 1.504, 1.458, 1.418, 1.397, 1.374, 1.374, 1.395, 1.416, 1.456, 1.501, 1.538, 1.556, 1.557,
+ 1.564, 1.562, 1.551, 1.518, 1.477, 1.441, 1.418, 1.397, 1.397, 1.416, 1.438, 1.474, 1.514, 1.546, 1.556, 1.556,
+ 1.562, 1.562, 1.558, 1.534, 1.499, 1.476, 1.441, 1.426, 1.426, 1.438, 1.473, 1.496, 1.531, 1.552, 1.556, 1.555,
+ 1.561, 1.564, 1.564, 1.552, 1.525, 1.497, 1.466, 1.461, 1.461, 1.464, 1.495, 1.523, 1.548, 1.556, 1.556, 1.552
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 2.609, 2.616, 2.617, 2.607, 2.573, 2.527, 2.483, 2.481, 2.481, 2.483, 2.529, 2.573, 2.604, 2.613, 2.613, 2.604,
+ 2.609, 2.615, 2.608, 2.576, 2.533, 2.489, 2.439, 2.418, 2.418, 2.439, 2.491, 2.532, 2.577, 2.605, 2.609, 2.607,
+ 2.611, 2.611, 2.597, 2.551, 2.489, 2.439, 2.391, 2.364, 2.364, 2.391, 2.439, 2.491, 2.551, 2.592, 2.607, 2.609,
+ 2.612, 2.608, 2.583, 2.526, 2.457, 2.391, 2.362, 2.318, 2.318, 2.362, 2.391, 2.458, 2.526, 2.581, 2.607, 2.611,
+ 2.612, 2.604, 2.571, 2.507, 2.435, 2.362, 2.317, 2.293, 2.294, 2.318, 2.363, 2.434, 2.508, 2.568, 2.604, 2.612,
+ 2.611, 2.602, 2.564, 2.496, 2.419, 2.349, 2.293, 2.284, 2.284, 2.294, 2.347, 2.421, 2.497, 2.562, 2.603, 2.611,
+ 2.609, 2.601, 2.564, 2.496, 2.419, 2.349, 2.293, 2.278, 2.278, 2.294, 2.347, 2.421, 2.497, 2.562, 2.602, 2.609,
+ 2.609, 2.602, 2.568, 2.503, 2.429, 2.361, 2.311, 2.292, 2.292, 2.309, 2.357, 2.429, 2.504, 2.567, 2.602, 2.609,
+ 2.606, 2.604, 2.579, 2.519, 2.449, 2.384, 2.348, 2.311, 2.311, 2.346, 2.383, 2.449, 2.521, 2.577, 2.604, 2.608,
+ 2.604, 2.603, 2.586, 2.537, 2.474, 2.418, 2.384, 2.348, 2.348, 2.383, 2.417, 2.476, 2.538, 2.586, 2.601, 2.603,
+ 2.603, 2.605, 2.596, 2.561, 2.508, 2.474, 2.418, 2.396, 2.396, 2.417, 2.474, 2.511, 2.562, 2.596, 2.603, 2.602,
+ 2.601, 2.607, 2.606, 2.589, 2.549, 2.507, 2.456, 2.454, 2.454, 2.458, 2.508, 2.554, 2.594, 2.605, 2.605, 2.602
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 3.221, 3.226, 3.231, 3.236, 3.239, 3.243, 3.245, 3.247, 3.249, 3.253, 3.255, 3.254, 3.253, 3.242, 3.235, 3.226,
+ 3.225, 3.231, 3.235, 3.238, 3.241, 3.244, 3.246, 3.247, 3.249, 3.254, 3.256, 3.255, 3.252, 3.248, 3.241, 3.232,
+ 3.226, 3.234, 3.239, 3.243, 3.243, 3.245, 3.247, 3.248, 3.251, 3.255, 3.256, 3.256, 3.254, 3.249, 3.244, 3.236,
+ 3.232, 3.238, 3.245, 3.245, 3.246, 3.247, 3.248, 3.251, 3.251, 3.256, 3.257, 3.257, 3.256, 3.254, 3.249, 3.239,
+ 3.232, 3.243, 3.246, 3.246, 3.246, 3.247, 3.248, 3.251, 3.253, 3.257, 3.258, 3.258, 3.257, 3.256, 3.254, 3.239,
+ 3.232, 3.242, 3.246, 3.247, 3.246, 3.246, 3.248, 3.251, 3.252, 3.253, 3.256, 3.255, 3.255, 3.254, 3.251, 3.239,
+ 3.233, 3.241, 3.244, 3.245, 3.244, 3.245, 3.246, 3.249, 3.251, 3.252, 3.253, 3.252, 3.252, 3.252, 3.249, 3.238,
+ 3.238, 3.241, 3.246, 3.246, 3.245, 3.245, 3.247, 3.249, 3.251, 3.252, 3.253, 3.253, 3.252, 3.252, 3.249, 3.239,
+ 3.235, 3.241, 3.245, 3.245, 3.245, 3.245, 3.246, 3.247, 3.251, 3.254, 3.253, 3.255, 3.256, 3.255, 3.251, 3.241,
+ 3.226, 3.235, 3.241, 3.241, 3.241, 3.241, 3.243, 3.245, 3.246, 3.252, 3.253, 3.254, 3.256, 3.254, 3.241, 3.237,
+ 3.205, 3.213, 3.213, 3.214, 3.214, 3.214, 3.214, 3.213, 3.213, 3.216, 3.218, 3.216, 3.214, 3.213, 3.211, 3.208,
+ 3.205, 3.205, 3.212, 3.212, 3.212, 3.213, 3.211, 3.211, 3.211, 3.213, 3.216, 3.214, 3.213, 3.211, 3.208, 3.196
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.645, 1.646, 1.649, 1.653, 1.654, 1.657, 1.659, 1.661, 1.663, 1.662, 1.661, 1.659, 1.656, 1.651, 1.645, 1.642,
+ 1.646, 1.649, 1.652, 1.654, 1.656, 1.659, 1.662, 1.663, 1.664, 1.664, 1.662, 1.661, 1.657, 1.653, 1.649, 1.644,
+ 1.648, 1.652, 1.654, 1.656, 1.658, 1.662, 1.665, 1.668, 1.668, 1.668, 1.665, 1.662, 1.658, 1.655, 1.652, 1.646,
+ 1.649, 1.653, 1.656, 1.658, 1.661, 1.665, 1.667, 1.671, 1.673, 1.671, 1.668, 1.663, 1.659, 1.656, 1.654, 1.647,
+ 1.649, 1.655, 1.657, 1.659, 1.661, 1.666, 1.671, 1.674, 1.675, 1.673, 1.671, 1.664, 1.659, 1.656, 1.654, 1.648,
+ 1.649, 1.654, 1.656, 1.659, 1.661, 1.666, 1.673, 1.676, 1.676, 1.675, 1.671, 1.664, 1.659, 1.656, 1.654, 1.648,
+ 1.649, 1.654, 1.656, 1.658, 1.659, 1.665, 1.672, 1.675, 1.675, 1.674, 1.668, 1.662, 1.658, 1.655, 1.654, 1.646,
+ 1.652, 1.655, 1.657, 1.659, 1.661, 1.665, 1.671, 1.673, 1.673, 1.672, 1.668, 1.662, 1.658, 1.655, 1.654, 1.647,
+ 1.652, 1.655, 1.657, 1.659, 1.661, 1.664, 1.667, 1.671, 1.672, 1.668, 1.666, 1.662, 1.659, 1.656, 1.654, 1.647,
+ 1.647, 1.652, 1.655, 1.656, 1.657, 1.661, 1.664, 1.665, 1.665, 1.665, 1.663, 1.661, 1.657, 1.655, 1.647, 1.647,
+ 1.639, 1.642, 1.644, 1.645, 1.646, 1.648, 1.648, 1.648, 1.649, 1.649, 1.649, 1.646, 1.645, 1.642, 1.639, 1.636,
+ 1.639, 1.641, 1.642, 1.644, 1.645, 1.646, 1.647, 1.647, 1.648, 1.648, 1.647, 1.645, 1.642, 1.639, 1.636, 1.633
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 2.644, 2.396, 2.077, 1.863, 1.682, 1.535, 1.392, 1.382, 1.382, 1.382, 1.515, 1.657, 1.826, 2.035, 2.351, 2.604,
+ 2.497, 2.229, 1.947, 1.733, 1.539, 1.424, 1.296, 1.249, 1.249, 1.285, 1.401, 1.519, 1.699, 1.908, 2.183, 2.456,
+ 2.389, 2.109, 1.848, 1.622, 1.424, 1.296, 1.201, 1.146, 1.146, 1.188, 1.285, 1.401, 1.591, 1.811, 2.065, 2.347,
+ 2.317, 2.026, 1.771, 1.535, 1.339, 1.201, 1.145, 1.069, 1.069, 1.134, 1.188, 1.318, 1.505, 1.734, 1.983, 2.273,
+ 2.276, 1.972, 1.715, 1.474, 1.281, 1.148, 1.069, 1.033, 1.024, 1.065, 1.134, 1.262, 1.446, 1.679, 1.929, 2.233,
+ 2.268, 1.941, 1.682, 1.441, 1.251, 1.119, 1.033, 1.013, 1.013, 1.024, 1.105, 1.231, 1.415, 1.649, 1.898, 2.227,
+ 2.268, 1.941, 1.682, 1.441, 1.251, 1.119, 1.033, 1.001, 1.001, 1.024, 1.105, 1.231, 1.415, 1.649, 1.898, 2.227,
+ 2.268, 1.951, 1.694, 1.456, 1.265, 1.131, 1.044, 1.026, 1.019, 1.039, 1.118, 1.246, 1.429, 1.663, 1.912, 2.227,
+ 2.291, 1.992, 1.738, 1.505, 1.311, 1.175, 1.108, 1.044, 1.041, 1.106, 1.161, 1.292, 1.478, 1.707, 1.955, 2.252,
+ 2.347, 2.058, 1.803, 1.581, 1.384, 1.245, 1.175, 1.108, 1.108, 1.161, 1.239, 1.364, 1.551, 1.773, 2.023, 2.311,
+ 2.438, 2.156, 1.884, 1.674, 1.484, 1.373, 1.245, 1.199, 1.199, 1.239, 1.363, 1.463, 1.647, 1.858, 2.123, 2.406,
+ 2.563, 2.305, 1.998, 1.792, 1.615, 1.472, 1.339, 1.322, 1.322, 1.326, 1.456, 1.593, 1.767, 1.973, 2.273, 2.532
+ ],
+ "sigma": 0.00178,
+ "sigma_Cb": 0.00217
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2498,
+ "ccm":
+ [
+ 1.14912, 0.28638, -0.43551,
+ -0.49691, 1.60391, -0.10701,
+ -0.10513, -1.09534, 2.20047
+ ]
+ },
+ {
+ "ct": 2821,
+ "ccm":
+ [
+ 1.18251, 0.15501, -0.33752,
+ -0.44304, 1.58495, -0.14191,
+ -0.05077, -0.96422, 2.01498
+ ]
+ },
+ {
+ "ct": 2925,
+ "ccm":
+ [
+ 1.18668, 0.00195, -0.18864,
+ -0.41617, 1.50514, -0.08897,
+ -0.02675, -0.91143, 1.93818
+ ]
+ },
+ {
+ "ct": 2926,
+ "ccm":
+ [
+ 1.50948, -0.44421, -0.06527,
+ -0.37241, 1.41726, -0.04486,
+ 0.07098, -0.84694, 1.77596
+ ]
+ },
+ {
+ "ct": 2951,
+ "ccm":
+ [
+ 1.52743, -0.47333, -0.05411,
+ -0.36485, 1.40764, -0.04279,
+ 0.08672, -0.90479, 1.81807
+ ]
+ },
+ {
+ "ct": 2954,
+ "ccm":
+ [
+ 1.51683, -0.46841, -0.04841,
+ -0.36288, 1.39914, -0.03625,
+ 0.06421, -0.82034, 1.75613
+ ]
+ },
+ {
+ "ct": 3578,
+ "ccm":
+ [
+ 1.59888, -0.59105, -0.00784,
+ -0.29366, 1.32037, -0.02671,
+ 0.06627, -0.76465, 1.69838
+ ]
+ },
+ {
+ "ct": 3717,
+ "ccm":
+ [
+ 1.59063, -0.58059, -0.01003,
+ -0.29583, 1.32715, -0.03132,
+ 0.03613, -0.67431, 1.63817
+ ]
+ },
+ {
+ "ct": 3784,
+ "ccm":
+ [
+ 1.59379, -0.58861, -0.00517,
+ -0.29178, 1.33292, -0.04115,
+ 0.03541, -0.66162, 1.62622
+ ]
+ },
+ {
+ "ct": 4485,
+ "ccm":
+ [
+ 1.40761, -0.34561, -0.06201,
+ -0.32388, 1.57221, -0.24832,
+ -0.01014, -0.63427, 1.64441
+ ]
+ },
+ {
+ "ct": 4615,
+ "ccm":
+ [
+ 1.41537, -0.35832, -0.05705,
+ -0.31429, 1.56019, -0.24591,
+ -0.01761, -0.61859, 1.63621
+ ]
+ },
+ {
+ "ct": 4671,
+ "ccm":
+ [
+ 1.42941, -0.38178, -0.04764,
+ -0.31421, 1.55925, -0.24504,
+ -0.01141, -0.62987, 1.64129
+ ]
+ },
+ {
+ "ct": 5753,
+ "ccm":
+ [
+ 1.64549, -0.63329, -0.01221,
+ -0.22431, 1.36423, -0.13992,
+ -0.00831, -0.55373, 1.56204
+ ]
+ },
+ {
+ "ct": 5773,
+ "ccm":
+ [
+ 1.63668, -0.63557, -0.00111,
+ -0.21919, 1.36234, -0.14315,
+ -0.00399, -0.57428, 1.57827
+ ]
+ },
+ {
+ "ct": 7433,
+ "ccm":
+ [
+ 1.36007, -0.09277, -0.26729,
+ -0.36886, 2.09249, -0.72363,
+ -0.12573, -0.76761, 1.89334
+ ]
+ },
+ {
+ "ct": 55792,
+ "ccm":
+ [
+ 1.65091, -0.63689, -0.01401,
+ -0.22277, 1.35752, -0.13475,
+ -0.00943, -0.55091, 1.56033
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.af":
+ {
+ "ranges":
+ {
+ "normal":
+ {
+ "min": 0.0,
+ "max": 12.0,
+ "default": 1.0
+ },
+ "macro":
+ {
+ "min": 3.0,
+ "max": 15.0,
+ "default": 4.0
+ }
+ },
+ "speeds":
+ {
+ "normal":
+ {
+ "step_coarse": 1.0,
+ "step_fine": 0.25,
+ "contrast_ratio": 0.75,
+ "pdaf_gain": -0.02,
+ "pdaf_squelch": 0.125,
+ "max_slew": 2.0,
+ "pdaf_frames": 20,
+ "dropout_frames": 6,
+ "step_frames": 4
+ }
+ },
+ "conf_epsilon": 8,
+ "conf_thresh": 16,
+ "conf_clip": 512,
+ "skip_frames": 5,
+ "map": [ 0.0, 445, 15.0, 925 ]
+ }
+ },
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx708_wide.json b/src/ipa/rpi/vc4/data/imx708_wide.json
new file mode 100644
index 00000000..6f45aafc
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx708_wide.json
@@ -0,0 +1,682 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 9989,
+ "reference_gain": 1.23,
+ "reference_aperture": 1.0,
+ "reference_lux": 980,
+ "reference_Y": 8345
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 16.0,
+ "reference_slope": 4.0
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 215,
+ "slope": 0.00287
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2750.0, 0.7881, 0.2849,
+ 2940.0, 0.7559, 0.3103,
+ 3650.0, 0.6291, 0.4206,
+ 4625.0, 0.5336, 0.5161,
+ 5715.0, 0.4668, 0.5898
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.01165,
+ "transverse_neg": 0.01601
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ }
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.529, 1.526, 1.522, 1.506, 1.489, 1.473, 1.458, 1.456, 1.456, 1.458, 1.474, 1.493, 1.513, 1.531, 1.541, 1.544,
+ 1.527, 1.523, 1.511, 1.491, 1.474, 1.459, 1.445, 1.441, 1.441, 1.446, 1.461, 1.479, 1.499, 1.521, 1.536, 1.541,
+ 1.524, 1.515, 1.498, 1.477, 1.459, 1.444, 1.431, 1.426, 1.426, 1.435, 1.446, 1.466, 1.487, 1.507, 1.528, 1.538,
+ 1.522, 1.512, 1.491, 1.468, 1.447, 1.431, 1.423, 1.417, 1.418, 1.425, 1.435, 1.455, 1.479, 1.499, 1.523, 1.537,
+ 1.522, 1.509, 1.485, 1.463, 1.441, 1.423, 1.416, 1.413, 1.415, 1.418, 1.429, 1.449, 1.473, 1.495, 1.521, 1.538,
+ 1.522, 1.508, 1.483, 1.461, 1.438, 1.421, 1.413, 1.412, 1.412, 1.415, 1.428, 1.447, 1.471, 1.493, 1.519, 1.538,
+ 1.522, 1.509, 1.484, 1.462, 1.439, 1.421, 1.414, 1.411, 1.412, 1.416, 1.428, 1.447, 1.471, 1.493, 1.519, 1.537,
+ 1.523, 1.511, 1.487, 1.465, 1.443, 1.424, 1.417, 1.413, 1.415, 1.419, 1.429, 1.451, 1.473, 1.494, 1.519, 1.536,
+ 1.524, 1.514, 1.493, 1.471, 1.451, 1.434, 1.424, 1.419, 1.419, 1.428, 1.437, 1.457, 1.477, 1.498, 1.521, 1.538,
+ 1.527, 1.521, 1.503, 1.481, 1.462, 1.449, 1.434, 1.429, 1.429, 1.437, 1.451, 1.469, 1.488, 1.508, 1.527, 1.539,
+ 1.529, 1.527, 1.515, 1.495, 1.477, 1.462, 1.449, 1.444, 1.444, 1.451, 1.467, 1.481, 1.499, 1.519, 1.535, 1.543,
+ 1.534, 1.531, 1.527, 1.512, 1.492, 1.476, 1.463, 1.461, 1.461, 1.464, 1.479, 1.495, 1.515, 1.533, 1.543, 1.546
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 2.603, 2.599, 2.591, 2.567, 2.539, 2.515, 2.489, 2.489, 2.489, 2.491, 2.516, 2.543, 2.574, 2.597, 2.614, 2.617,
+ 2.596, 2.591, 2.571, 2.542, 2.516, 2.489, 2.464, 2.458, 2.458, 2.469, 2.492, 2.518, 2.547, 2.576, 2.602, 2.614,
+ 2.591, 2.576, 2.546, 2.519, 2.489, 2.464, 2.437, 2.427, 2.427, 2.441, 2.467, 2.492, 2.525, 2.553, 2.586, 2.605,
+ 2.588, 2.568, 2.534, 2.503, 2.472, 2.437, 2.423, 2.409, 2.411, 2.425, 2.441, 2.475, 2.513, 2.541, 2.577, 2.602,
+ 2.588, 2.565, 2.527, 2.494, 2.461, 2.425, 2.409, 2.399, 2.403, 2.409, 2.431, 2.466, 2.503, 2.534, 2.571, 2.601,
+ 2.586, 2.561, 2.525, 2.491, 2.454, 2.418, 2.399, 2.396, 2.395, 2.402, 2.424, 2.461, 2.501, 2.531, 2.567, 2.599,
+ 2.583, 2.559, 2.525, 2.491, 2.454, 2.418, 2.398, 2.393, 2.393, 2.401, 2.423, 2.459, 2.498, 2.531, 2.566, 2.597,
+ 2.583, 2.559, 2.526, 2.494, 2.458, 2.421, 2.404, 2.397, 2.399, 2.404, 2.426, 2.461, 2.501, 2.531, 2.566, 2.596,
+ 2.583, 2.563, 2.531, 2.501, 2.469, 2.435, 2.419, 2.405, 2.404, 2.422, 2.435, 2.471, 2.505, 2.537, 2.572, 2.596,
+ 2.585, 2.571, 2.539, 2.516, 2.486, 2.458, 2.435, 2.424, 2.424, 2.435, 2.459, 2.489, 2.521, 2.546, 2.579, 2.601,
+ 2.589, 2.578, 2.557, 2.532, 2.506, 2.483, 2.458, 2.449, 2.449, 2.459, 2.485, 2.507, 2.535, 2.563, 2.591, 2.605,
+ 2.589, 2.586, 2.575, 2.551, 2.525, 2.503, 2.481, 2.476, 2.476, 2.481, 2.504, 2.526, 2.555, 2.583, 2.604, 2.611
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 3.311, 3.339, 3.369, 3.374, 3.371, 3.363, 3.356, 3.353, 3.353, 3.353, 3.357, 3.362, 3.362, 3.356, 3.328, 3.311,
+ 3.321, 3.354, 3.374, 3.374, 3.368, 3.359, 3.352, 3.349, 3.347, 3.347, 3.349, 3.357, 3.361, 3.359, 3.343, 3.324,
+ 3.334, 3.368, 3.375, 3.374, 3.365, 3.356, 3.349, 3.347, 3.346, 3.346, 3.347, 3.349, 3.358, 3.361, 3.357, 3.336,
+ 3.346, 3.378, 3.378, 3.369, 3.363, 3.358, 3.351, 3.348, 3.347, 3.346, 3.347, 3.348, 3.354, 3.364, 3.363, 3.345,
+ 3.351, 3.381, 3.381, 3.368, 3.361, 3.357, 3.349, 3.347, 3.347, 3.345, 3.345, 3.347, 3.353, 3.364, 3.364, 3.347,
+ 3.353, 3.379, 3.379, 3.366, 3.359, 3.351, 3.348, 3.343, 3.342, 3.342, 3.343, 3.345, 3.351, 3.363, 3.363, 3.347,
+ 3.353, 3.376, 3.376, 3.363, 3.351, 3.347, 3.343, 3.338, 3.336, 3.338, 3.339, 3.343, 3.351, 3.361, 3.361, 3.347,
+ 3.351, 3.374, 3.374, 3.359, 3.351, 3.345, 3.338, 3.334, 3.333, 3.334, 3.336, 3.339, 3.347, 3.358, 3.358, 3.345,
+ 3.346, 3.368, 3.368, 3.359, 3.349, 3.343, 3.336, 3.332, 3.327, 3.331, 3.333, 3.337, 3.346, 3.356, 3.356, 3.341,
+ 3.336, 3.362, 3.364, 3.359, 3.351, 3.342, 3.334, 3.324, 3.324, 3.325, 3.329, 3.336, 3.346, 3.351, 3.351, 3.333,
+ 3.324, 3.349, 3.359, 3.358, 3.352, 3.341, 3.329, 3.323, 3.321, 3.322, 3.326, 3.336, 3.346, 3.347, 3.339, 3.319,
+ 3.311, 3.328, 3.352, 3.354, 3.352, 3.341, 3.329, 3.321, 3.319, 3.321, 3.324, 3.338, 3.343, 3.343, 3.319, 3.312
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.634, 1.647, 1.665, 1.668, 1.668, 1.664, 1.662, 1.662, 1.661, 1.661, 1.661, 1.663, 1.663, 1.659, 1.643, 1.636,
+ 1.639, 1.656, 1.668, 1.669, 1.668, 1.666, 1.664, 1.663, 1.663, 1.661, 1.661, 1.662, 1.663, 1.662, 1.654, 1.642,
+ 1.645, 1.663, 1.669, 1.668, 1.667, 1.667, 1.667, 1.668, 1.668, 1.665, 1.662, 1.661, 1.662, 1.664, 1.661, 1.649,
+ 1.651, 1.669, 1.669, 1.667, 1.666, 1.668, 1.669, 1.672, 1.672, 1.668, 1.665, 1.661, 1.661, 1.665, 1.665, 1.655,
+ 1.654, 1.669, 1.669, 1.666, 1.666, 1.669, 1.672, 1.673, 1.673, 1.671, 1.666, 1.661, 1.661, 1.665, 1.665, 1.659,
+ 1.654, 1.669, 1.669, 1.666, 1.666, 1.669, 1.671, 1.673, 1.672, 1.669, 1.667, 1.661, 1.661, 1.665, 1.665, 1.659,
+ 1.654, 1.668, 1.668, 1.664, 1.663, 1.667, 1.669, 1.671, 1.669, 1.668, 1.665, 1.661, 1.661, 1.663, 1.663, 1.659,
+ 1.653, 1.665, 1.665, 1.661, 1.661, 1.664, 1.667, 1.668, 1.668, 1.665, 1.661, 1.658, 1.659, 1.662, 1.662, 1.657,
+ 1.651, 1.664, 1.664, 1.659, 1.659, 1.661, 1.663, 1.663, 1.662, 1.661, 1.658, 1.656, 1.657, 1.662, 1.662, 1.655,
+ 1.645, 1.661, 1.663, 1.661, 1.659, 1.659, 1.659, 1.657, 1.657, 1.656, 1.654, 1.655, 1.656, 1.661, 1.661, 1.649,
+ 1.641, 1.654, 1.661, 1.661, 1.659, 1.657, 1.655, 1.653, 1.652, 1.651, 1.652, 1.653, 1.657, 1.658, 1.655, 1.644,
+ 1.635, 1.645, 1.661, 1.661, 1.661, 1.655, 1.653, 1.649, 1.648, 1.647, 1.651, 1.653, 1.657, 1.657, 1.646, 1.638
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 3.535, 3.279, 3.049, 2.722, 2.305, 1.958, 1.657, 1.647, 1.647, 1.656, 1.953, 2.289, 2.707, 3.058, 3.325, 3.589,
+ 3.379, 3.157, 2.874, 2.421, 1.973, 1.735, 1.472, 1.388, 1.388, 1.471, 1.724, 1.963, 2.409, 2.877, 3.185, 3.416,
+ 3.288, 3.075, 2.696, 2.169, 1.735, 1.472, 1.311, 1.208, 1.208, 1.306, 1.471, 1.724, 2.159, 2.695, 3.092, 3.321,
+ 3.238, 3.001, 2.534, 1.981, 1.572, 1.311, 1.207, 1.082, 1.082, 1.204, 1.306, 1.563, 1.973, 2.529, 3.008, 3.259,
+ 3.211, 2.938, 2.414, 1.859, 1.468, 1.221, 1.082, 1.036, 1.031, 1.079, 1.217, 1.463, 1.851, 2.403, 2.931, 3.229,
+ 3.206, 2.904, 2.356, 1.802, 1.421, 1.181, 1.037, 1.002, 1.002, 1.032, 1.175, 1.414, 1.793, 2.343, 2.899, 3.223,
+ 3.206, 2.904, 2.356, 1.802, 1.421, 1.181, 1.037, 1.005, 1.005, 1.032, 1.175, 1.414, 1.793, 2.343, 2.899, 3.223,
+ 3.211, 2.936, 2.417, 1.858, 1.468, 1.222, 1.083, 1.037, 1.032, 1.083, 1.218, 1.463, 1.848, 2.403, 2.932, 3.226,
+ 3.234, 2.997, 2.536, 1.979, 1.569, 1.311, 1.206, 1.084, 1.084, 1.204, 1.305, 1.565, 1.966, 2.524, 2.996, 3.251,
+ 3.282, 3.069, 2.697, 2.166, 1.731, 1.471, 1.311, 1.207, 1.207, 1.305, 1.466, 1.729, 2.158, 2.689, 3.077, 3.304,
+ 3.369, 3.146, 2.873, 2.415, 1.964, 1.722, 1.471, 1.382, 1.382, 1.466, 1.722, 1.964, 2.408, 2.871, 3.167, 3.401,
+ 3.524, 3.253, 3.025, 2.691, 2.275, 1.939, 1.657, 1.628, 1.628, 1.654, 1.936, 2.275, 2.687, 3.029, 3.284, 3.574
+ ],
+ "sigma": 0.00195,
+ "sigma_Cb": 0.00241
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2868,
+ "ccm":
+ [
+ 1.58923, -0.36649, -0.22273,
+ -0.43591, 1.84858, -0.41268,
+ 0.02948, -0.77666, 1.74718
+ ]
+ },
+ {
+ "ct": 2965,
+ "ccm":
+ [
+ 1.73397, -0.42794, -0.30603,
+ -0.36504, 1.72431, -0.35926,
+ 0.12765, -1.10933, 1.98168
+ ]
+ },
+ {
+ "ct": 3603,
+ "ccm":
+ [
+ 1.61787, -0.42704, -0.19084,
+ -0.37819, 1.74588, -0.36769,
+ 0.00961, -0.59807, 1.58847
+ ]
+ },
+ {
+ "ct": 4620,
+ "ccm":
+ [
+ 1.55581, -0.35422, -0.20158,
+ -0.31805, 1.79309, -0.47505,
+ -0.01256, -0.54489, 1.55746
+ ]
+ },
+ {
+ "ct": 5901,
+ "ccm":
+ [
+ 1.64439, -0.48855, -0.15585,
+ -0.29149, 1.67122, -0.37972,
+ -0.03111, -0.44052, 1.47163
+ ]
+ },
+ {
+ "ct": 7610,
+ "ccm":
+ [
+ 1.48667, -0.26072, -0.22595,
+ -0.21815, 1.86724, -0.64909,
+ -0.00985, -0.64485, 1.65471
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.af":
+ {
+ "ranges":
+ {
+ "normal":
+ {
+ "min": 0.0,
+ "max": 12.0,
+ "default": 1.0
+ },
+ "macro":
+ {
+ "min": 4.0,
+ "max": 32.0,
+ "default": 6.0
+ }
+ },
+ "speeds":
+ {
+ "normal":
+ {
+ "step_coarse": 2.0,
+ "step_fine": 0.5,
+ "contrast_ratio": 0.75,
+ "pdaf_gain": -0.03,
+ "pdaf_squelch": 0.2,
+ "max_slew": 4.0,
+ "pdaf_frames": 20,
+ "dropout_frames": 6,
+ "step_frames": 4
+ },
+ "fast":
+ {
+ "step_coarse": 2.0,
+ "step_fine": 0.5,
+ "contrast_ratio": 0.75,
+ "pdaf_gain": -0.05,
+ "pdaf_squelch": 0.2,
+ "max_slew": 5.0,
+ "pdaf_frames": 16,
+ "dropout_frames": 6,
+ "step_frames": 4
+ }
+ },
+ "conf_epsilon": 8,
+ "conf_thresh": 12,
+ "conf_clip": 512,
+ "skip_frames": 5,
+ "map": [ 0.0, 420, 35.0, 920 ]
+ }
+ },
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/imx708_wide_noir.json b/src/ipa/rpi/vc4/data/imx708_wide_noir.json
new file mode 100644
index 00000000..b9a5227e
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx708_wide_noir.json
@@ -0,0 +1,673 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 9989,
+ "reference_gain": 1.23,
+ "reference_aperture": 1.0,
+ "reference_lux": 980,
+ "reference_Y": 8345
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 16.0,
+ "reference_slope": 4.0
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 215,
+ "slope": 0.00287
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 0,
+ "ct_curve":
+ [
+ 2750.0, 0.7881, 0.2849,
+ 2940.0, 0.7559, 0.3103,
+ 3650.0, 0.6291, 0.4206,
+ 4625.0, 0.5336, 0.5161,
+ 5715.0, 0.4668, 0.5898
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.01165,
+ "transverse_neg": 0.01601
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 0.125,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ },
+ {
+ "base_ev": 1.5,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 1.0, 2.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.2,
+ 1000, 0.2
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "startup_frames": 5,
+ "convergence_frames": 6,
+ "speed": 0.15
+ }
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.529, 1.526, 1.522, 1.506, 1.489, 1.473, 1.458, 1.456, 1.456, 1.458, 1.474, 1.493, 1.513, 1.531, 1.541, 1.544,
+ 1.527, 1.523, 1.511, 1.491, 1.474, 1.459, 1.445, 1.441, 1.441, 1.446, 1.461, 1.479, 1.499, 1.521, 1.536, 1.541,
+ 1.524, 1.515, 1.498, 1.477, 1.459, 1.444, 1.431, 1.426, 1.426, 1.435, 1.446, 1.466, 1.487, 1.507, 1.528, 1.538,
+ 1.522, 1.512, 1.491, 1.468, 1.447, 1.431, 1.423, 1.417, 1.418, 1.425, 1.435, 1.455, 1.479, 1.499, 1.523, 1.537,
+ 1.522, 1.509, 1.485, 1.463, 1.441, 1.423, 1.416, 1.413, 1.415, 1.418, 1.429, 1.449, 1.473, 1.495, 1.521, 1.538,
+ 1.522, 1.508, 1.483, 1.461, 1.438, 1.421, 1.413, 1.412, 1.412, 1.415, 1.428, 1.447, 1.471, 1.493, 1.519, 1.538,
+ 1.522, 1.509, 1.484, 1.462, 1.439, 1.421, 1.414, 1.411, 1.412, 1.416, 1.428, 1.447, 1.471, 1.493, 1.519, 1.537,
+ 1.523, 1.511, 1.487, 1.465, 1.443, 1.424, 1.417, 1.413, 1.415, 1.419, 1.429, 1.451, 1.473, 1.494, 1.519, 1.536,
+ 1.524, 1.514, 1.493, 1.471, 1.451, 1.434, 1.424, 1.419, 1.419, 1.428, 1.437, 1.457, 1.477, 1.498, 1.521, 1.538,
+ 1.527, 1.521, 1.503, 1.481, 1.462, 1.449, 1.434, 1.429, 1.429, 1.437, 1.451, 1.469, 1.488, 1.508, 1.527, 1.539,
+ 1.529, 1.527, 1.515, 1.495, 1.477, 1.462, 1.449, 1.444, 1.444, 1.451, 1.467, 1.481, 1.499, 1.519, 1.535, 1.543,
+ 1.534, 1.531, 1.527, 1.512, 1.492, 1.476, 1.463, 1.461, 1.461, 1.464, 1.479, 1.495, 1.515, 1.533, 1.543, 1.546
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 2.603, 2.599, 2.591, 2.567, 2.539, 2.515, 2.489, 2.489, 2.489, 2.491, 2.516, 2.543, 2.574, 2.597, 2.614, 2.617,
+ 2.596, 2.591, 2.571, 2.542, 2.516, 2.489, 2.464, 2.458, 2.458, 2.469, 2.492, 2.518, 2.547, 2.576, 2.602, 2.614,
+ 2.591, 2.576, 2.546, 2.519, 2.489, 2.464, 2.437, 2.427, 2.427, 2.441, 2.467, 2.492, 2.525, 2.553, 2.586, 2.605,
+ 2.588, 2.568, 2.534, 2.503, 2.472, 2.437, 2.423, 2.409, 2.411, 2.425, 2.441, 2.475, 2.513, 2.541, 2.577, 2.602,
+ 2.588, 2.565, 2.527, 2.494, 2.461, 2.425, 2.409, 2.399, 2.403, 2.409, 2.431, 2.466, 2.503, 2.534, 2.571, 2.601,
+ 2.586, 2.561, 2.525, 2.491, 2.454, 2.418, 2.399, 2.396, 2.395, 2.402, 2.424, 2.461, 2.501, 2.531, 2.567, 2.599,
+ 2.583, 2.559, 2.525, 2.491, 2.454, 2.418, 2.398, 2.393, 2.393, 2.401, 2.423, 2.459, 2.498, 2.531, 2.566, 2.597,
+ 2.583, 2.559, 2.526, 2.494, 2.458, 2.421, 2.404, 2.397, 2.399, 2.404, 2.426, 2.461, 2.501, 2.531, 2.566, 2.596,
+ 2.583, 2.563, 2.531, 2.501, 2.469, 2.435, 2.419, 2.405, 2.404, 2.422, 2.435, 2.471, 2.505, 2.537, 2.572, 2.596,
+ 2.585, 2.571, 2.539, 2.516, 2.486, 2.458, 2.435, 2.424, 2.424, 2.435, 2.459, 2.489, 2.521, 2.546, 2.579, 2.601,
+ 2.589, 2.578, 2.557, 2.532, 2.506, 2.483, 2.458, 2.449, 2.449, 2.459, 2.485, 2.507, 2.535, 2.563, 2.591, 2.605,
+ 2.589, 2.586, 2.575, 2.551, 2.525, 2.503, 2.481, 2.476, 2.476, 2.481, 2.504, 2.526, 2.555, 2.583, 2.604, 2.611
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 3.311, 3.339, 3.369, 3.374, 3.371, 3.363, 3.356, 3.353, 3.353, 3.353, 3.357, 3.362, 3.362, 3.356, 3.328, 3.311,
+ 3.321, 3.354, 3.374, 3.374, 3.368, 3.359, 3.352, 3.349, 3.347, 3.347, 3.349, 3.357, 3.361, 3.359, 3.343, 3.324,
+ 3.334, 3.368, 3.375, 3.374, 3.365, 3.356, 3.349, 3.347, 3.346, 3.346, 3.347, 3.349, 3.358, 3.361, 3.357, 3.336,
+ 3.346, 3.378, 3.378, 3.369, 3.363, 3.358, 3.351, 3.348, 3.347, 3.346, 3.347, 3.348, 3.354, 3.364, 3.363, 3.345,
+ 3.351, 3.381, 3.381, 3.368, 3.361, 3.357, 3.349, 3.347, 3.347, 3.345, 3.345, 3.347, 3.353, 3.364, 3.364, 3.347,
+ 3.353, 3.379, 3.379, 3.366, 3.359, 3.351, 3.348, 3.343, 3.342, 3.342, 3.343, 3.345, 3.351, 3.363, 3.363, 3.347,
+ 3.353, 3.376, 3.376, 3.363, 3.351, 3.347, 3.343, 3.338, 3.336, 3.338, 3.339, 3.343, 3.351, 3.361, 3.361, 3.347,
+ 3.351, 3.374, 3.374, 3.359, 3.351, 3.345, 3.338, 3.334, 3.333, 3.334, 3.336, 3.339, 3.347, 3.358, 3.358, 3.345,
+ 3.346, 3.368, 3.368, 3.359, 3.349, 3.343, 3.336, 3.332, 3.327, 3.331, 3.333, 3.337, 3.346, 3.356, 3.356, 3.341,
+ 3.336, 3.362, 3.364, 3.359, 3.351, 3.342, 3.334, 3.324, 3.324, 3.325, 3.329, 3.336, 3.346, 3.351, 3.351, 3.333,
+ 3.324, 3.349, 3.359, 3.358, 3.352, 3.341, 3.329, 3.323, 3.321, 3.322, 3.326, 3.336, 3.346, 3.347, 3.339, 3.319,
+ 3.311, 3.328, 3.352, 3.354, 3.352, 3.341, 3.329, 3.321, 3.319, 3.321, 3.324, 3.338, 3.343, 3.343, 3.319, 3.312
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.634, 1.647, 1.665, 1.668, 1.668, 1.664, 1.662, 1.662, 1.661, 1.661, 1.661, 1.663, 1.663, 1.659, 1.643, 1.636,
+ 1.639, 1.656, 1.668, 1.669, 1.668, 1.666, 1.664, 1.663, 1.663, 1.661, 1.661, 1.662, 1.663, 1.662, 1.654, 1.642,
+ 1.645, 1.663, 1.669, 1.668, 1.667, 1.667, 1.667, 1.668, 1.668, 1.665, 1.662, 1.661, 1.662, 1.664, 1.661, 1.649,
+ 1.651, 1.669, 1.669, 1.667, 1.666, 1.668, 1.669, 1.672, 1.672, 1.668, 1.665, 1.661, 1.661, 1.665, 1.665, 1.655,
+ 1.654, 1.669, 1.669, 1.666, 1.666, 1.669, 1.672, 1.673, 1.673, 1.671, 1.666, 1.661, 1.661, 1.665, 1.665, 1.659,
+ 1.654, 1.669, 1.669, 1.666, 1.666, 1.669, 1.671, 1.673, 1.672, 1.669, 1.667, 1.661, 1.661, 1.665, 1.665, 1.659,
+ 1.654, 1.668, 1.668, 1.664, 1.663, 1.667, 1.669, 1.671, 1.669, 1.668, 1.665, 1.661, 1.661, 1.663, 1.663, 1.659,
+ 1.653, 1.665, 1.665, 1.661, 1.661, 1.664, 1.667, 1.668, 1.668, 1.665, 1.661, 1.658, 1.659, 1.662, 1.662, 1.657,
+ 1.651, 1.664, 1.664, 1.659, 1.659, 1.661, 1.663, 1.663, 1.662, 1.661, 1.658, 1.656, 1.657, 1.662, 1.662, 1.655,
+ 1.645, 1.661, 1.663, 1.661, 1.659, 1.659, 1.659, 1.657, 1.657, 1.656, 1.654, 1.655, 1.656, 1.661, 1.661, 1.649,
+ 1.641, 1.654, 1.661, 1.661, 1.659, 1.657, 1.655, 1.653, 1.652, 1.651, 1.652, 1.653, 1.657, 1.658, 1.655, 1.644,
+ 1.635, 1.645, 1.661, 1.661, 1.661, 1.655, 1.653, 1.649, 1.648, 1.647, 1.651, 1.653, 1.657, 1.657, 1.646, 1.638
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 3.535, 3.279, 3.049, 2.722, 2.305, 1.958, 1.657, 1.647, 1.647, 1.656, 1.953, 2.289, 2.707, 3.058, 3.325, 3.589,
+ 3.379, 3.157, 2.874, 2.421, 1.973, 1.735, 1.472, 1.388, 1.388, 1.471, 1.724, 1.963, 2.409, 2.877, 3.185, 3.416,
+ 3.288, 3.075, 2.696, 2.169, 1.735, 1.472, 1.311, 1.208, 1.208, 1.306, 1.471, 1.724, 2.159, 2.695, 3.092, 3.321,
+ 3.238, 3.001, 2.534, 1.981, 1.572, 1.311, 1.207, 1.082, 1.082, 1.204, 1.306, 1.563, 1.973, 2.529, 3.008, 3.259,
+ 3.211, 2.938, 2.414, 1.859, 1.468, 1.221, 1.082, 1.036, 1.031, 1.079, 1.217, 1.463, 1.851, 2.403, 2.931, 3.229,
+ 3.206, 2.904, 2.356, 1.802, 1.421, 1.181, 1.037, 1.002, 1.002, 1.032, 1.175, 1.414, 1.793, 2.343, 2.899, 3.223,
+ 3.206, 2.904, 2.356, 1.802, 1.421, 1.181, 1.037, 1.005, 1.005, 1.032, 1.175, 1.414, 1.793, 2.343, 2.899, 3.223,
+ 3.211, 2.936, 2.417, 1.858, 1.468, 1.222, 1.083, 1.037, 1.032, 1.083, 1.218, 1.463, 1.848, 2.403, 2.932, 3.226,
+ 3.234, 2.997, 2.536, 1.979, 1.569, 1.311, 1.206, 1.084, 1.084, 1.204, 1.305, 1.565, 1.966, 2.524, 2.996, 3.251,
+ 3.282, 3.069, 2.697, 2.166, 1.731, 1.471, 1.311, 1.207, 1.207, 1.305, 1.466, 1.729, 2.158, 2.689, 3.077, 3.304,
+ 3.369, 3.146, 2.873, 2.415, 1.964, 1.722, 1.471, 1.382, 1.382, 1.466, 1.722, 1.964, 2.408, 2.871, 3.167, 3.401,
+ 3.524, 3.253, 3.025, 2.691, 2.275, 1.939, 1.657, 1.628, 1.628, 1.654, 1.936, 2.275, 2.687, 3.029, 3.284, 3.574
+ ],
+ "sigma": 0.00195,
+ "sigma_Cb": 0.00241
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2750,
+ "ccm":
+ [
+ 1.13004, 0.36392, -0.49396,
+ -0.45885, 1.68171, -0.22286,
+ -0.06473, -0.86962, 1.93435
+ ]
+ },
+ {
+ "ct": 2940,
+ "ccm":
+ [
+ 1.29876, 0.09627, -0.39503,
+ -0.43085, 1.60258, -0.17172,
+ -0.02638, -0.92581, 1.95218
+ ]
+ },
+ {
+ "ct": 3650,
+ "ccm":
+ [
+ 1.57729, -0.29734, -0.27995,
+ -0.42965, 1.66231, -0.23265,
+ -0.02183, -0.62331, 1.64514
+ ]
+ },
+ {
+ "ct": 4625,
+ "ccm":
+ [
+ 1.52145, -0.22382, -0.29763,
+ -0.40445, 1.82186, -0.41742,
+ -0.05732, -0.56222, 1.61954
+ ]
+ },
+ {
+ "ct": 5715,
+ "ccm":
+ [
+ 1.67851, -0.39193, -0.28658,
+ -0.37169, 1.72949, -0.35781,
+ -0.09556, -0.41951, 1.51508
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.af":
+ {
+ "ranges":
+ {
+ "normal":
+ {
+ "min": 0.0,
+ "max": 12.0,
+ "default": 1.0
+ },
+ "macro":
+ {
+ "min": 4.0,
+ "max": 32.0,
+ "default": 6.0
+ }
+ },
+ "speeds":
+ {
+ "normal":
+ {
+ "step_coarse": 2.0,
+ "step_fine": 0.5,
+ "contrast_ratio": 0.75,
+ "pdaf_gain": -0.03,
+ "pdaf_squelch": 0.2,
+ "max_slew": 4.0,
+ "pdaf_frames": 20,
+ "dropout_frames": 6,
+ "step_frames": 4
+ },
+ "fast":
+ {
+ "step_coarse": 2.0,
+ "step_fine": 0.5,
+ "contrast_ratio": 0.75,
+ "pdaf_gain": -0.05,
+ "pdaf_squelch": 0.2,
+ "max_slew": 5.0,
+ "pdaf_frames": 16,
+ "dropout_frames": 6,
+ "step_frames": 4
+ }
+ },
+ "conf_epsilon": 8,
+ "conf_thresh": 12,
+ "conf_clip": 512,
+ "skip_frames": 5,
+ "map": [ 0.0, 420, 35.0, 920 ]
+ }
+ },
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/meson.build b/src/ipa/rpi/vc4/data/meson.build
new file mode 100644
index 00000000..7a8001ee
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/meson.build
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: CC0-1.0
+
+conf_files = files([
+ 'imx219.json',
+ 'imx219_noir.json',
+ 'imx283.json',
+ 'imx290.json',
+ 'imx296.json',
+ 'imx296_mono.json',
+ 'imx327.json',
+ 'imx378.json',
+ 'imx415.json',
+ 'imx462.json',
+ 'imx477.json',
+ 'imx477_noir.json',
+ 'imx477_scientific.json',
+ 'imx519.json',
+ 'imx708.json',
+ 'imx708_noir.json',
+ 'imx708_wide.json',
+ 'imx708_wide_noir.json',
+ 'ov5647.json',
+ 'ov5647_noir.json',
+ 'ov64a40.json',
+ 'ov7251_mono.json',
+ 'ov9281_mono.json',
+ 'se327m12.json',
+ 'uncalibrated.json',
+])
+
+install_data(conf_files,
+ install_dir : ipa_data_dir / 'rpi' / 'vc4',
+ install_tag : 'runtime')
diff --git a/src/ipa/rpi/vc4/data/ov5647.json b/src/ipa/rpi/vc4/data/ov5647.json
new file mode 100644
index 00000000..40c6059c
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/ov5647.json
@@ -0,0 +1,696 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 1024
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 21663,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 987,
+ "reference_Y": 8961
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 4.25
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 401,
+ "slope": 0.05619
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2500.0, 1.0289, 0.4503,
+ 2803.0, 0.9428, 0.5108,
+ 2914.0, 0.9406, 0.5127,
+ 3605.0, 0.8261, 0.6249,
+ 4540.0, 0.7331, 0.7533,
+ 5699.0, 0.6715, 0.8627,
+ 8625.0, 0.6081, 1.0012
+ ],
+ "sensitivity_r": 1.05,
+ "sensitivity_b": 1.05,
+ "transverse_pos": 0.0321,
+ "transverse_neg": 0.04313
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "channels": [
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "base_ev": 1.25
+ },
+ {
+ "base_ev": 1.25,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ },
+ {
+ "base_ev": 1.25,
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.105, 1.103, 1.093, 1.083, 1.071, 1.065, 1.065, 1.065, 1.066, 1.069, 1.072, 1.077, 1.084, 1.089, 1.093, 1.093,
+ 1.103, 1.096, 1.084, 1.072, 1.059, 1.051, 1.047, 1.047, 1.051, 1.053, 1.059, 1.067, 1.075, 1.082, 1.085, 1.086,
+ 1.096, 1.084, 1.072, 1.059, 1.051, 1.045, 1.039, 1.038, 1.039, 1.045, 1.049, 1.057, 1.063, 1.072, 1.081, 1.082,
+ 1.092, 1.075, 1.061, 1.052, 1.045, 1.039, 1.036, 1.035, 1.035, 1.039, 1.044, 1.049, 1.056, 1.063, 1.072, 1.081,
+ 1.092, 1.073, 1.058, 1.048, 1.043, 1.038, 1.035, 1.033, 1.033, 1.035, 1.039, 1.044, 1.051, 1.057, 1.069, 1.078,
+ 1.091, 1.068, 1.054, 1.045, 1.041, 1.038, 1.035, 1.032, 1.032, 1.032, 1.036, 1.041, 1.045, 1.055, 1.069, 1.078,
+ 1.091, 1.068, 1.052, 1.043, 1.041, 1.038, 1.035, 1.032, 1.031, 1.032, 1.034, 1.036, 1.043, 1.055, 1.069, 1.078,
+ 1.092, 1.068, 1.052, 1.047, 1.042, 1.041, 1.038, 1.035, 1.032, 1.032, 1.035, 1.039, 1.043, 1.055, 1.071, 1.079,
+ 1.092, 1.073, 1.057, 1.051, 1.047, 1.047, 1.044, 1.041, 1.038, 1.038, 1.039, 1.043, 1.051, 1.059, 1.076, 1.083,
+ 1.092, 1.081, 1.068, 1.058, 1.056, 1.056, 1.053, 1.052, 1.049, 1.048, 1.048, 1.051, 1.059, 1.066, 1.083, 1.085,
+ 1.091, 1.087, 1.081, 1.068, 1.065, 1.064, 1.062, 1.062, 1.061, 1.056, 1.056, 1.056, 1.064, 1.069, 1.084, 1.089,
+ 1.091, 1.089, 1.085, 1.079, 1.069, 1.068, 1.067, 1.067, 1.067, 1.063, 1.061, 1.063, 1.068, 1.069, 1.081, 1.092
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.486, 1.484, 1.468, 1.449, 1.427, 1.403, 1.399, 1.399, 1.399, 1.404, 1.413, 1.433, 1.454, 1.473, 1.482, 1.488,
+ 1.484, 1.472, 1.454, 1.431, 1.405, 1.381, 1.365, 1.365, 1.367, 1.373, 1.392, 1.411, 1.438, 1.458, 1.476, 1.481,
+ 1.476, 1.458, 1.433, 1.405, 1.381, 1.361, 1.339, 1.334, 1.334, 1.346, 1.362, 1.391, 1.411, 1.438, 1.462, 1.474,
+ 1.471, 1.443, 1.417, 1.388, 1.361, 1.339, 1.321, 1.313, 1.313, 1.327, 1.346, 1.362, 1.391, 1.422, 1.453, 1.473,
+ 1.469, 1.439, 1.408, 1.377, 1.349, 1.321, 1.312, 1.299, 1.299, 1.311, 1.327, 1.348, 1.378, 1.415, 1.446, 1.468,
+ 1.468, 1.434, 1.402, 1.371, 1.341, 1.316, 1.299, 1.296, 1.295, 1.299, 1.314, 1.338, 1.371, 1.408, 1.441, 1.466,
+ 1.468, 1.434, 1.401, 1.371, 1.341, 1.316, 1.301, 1.296, 1.295, 1.297, 1.314, 1.338, 1.369, 1.408, 1.441, 1.465,
+ 1.469, 1.436, 1.401, 1.374, 1.348, 1.332, 1.315, 1.301, 1.301, 1.313, 1.324, 1.342, 1.372, 1.409, 1.442, 1.465,
+ 1.471, 1.444, 1.413, 1.388, 1.371, 1.348, 1.332, 1.323, 1.323, 1.324, 1.342, 1.362, 1.386, 1.418, 1.449, 1.467,
+ 1.473, 1.454, 1.431, 1.407, 1.388, 1.371, 1.359, 1.352, 1.351, 1.351, 1.362, 1.383, 1.404, 1.433, 1.462, 1.472,
+ 1.474, 1.461, 1.447, 1.424, 1.407, 1.394, 1.385, 1.381, 1.379, 1.381, 1.383, 1.401, 1.419, 1.444, 1.466, 1.481,
+ 1.474, 1.464, 1.455, 1.442, 1.421, 1.408, 1.403, 1.403, 1.403, 1.399, 1.402, 1.415, 1.432, 1.446, 1.467, 1.483
+ ]
+ },
+ {
+ "ct": 6500,
+ "table":
+ [
+ 1.567, 1.565, 1.555, 1.541, 1.525, 1.518, 1.518, 1.518, 1.521, 1.527, 1.532, 1.541, 1.551, 1.559, 1.567, 1.569,
+ 1.565, 1.557, 1.542, 1.527, 1.519, 1.515, 1.511, 1.516, 1.519, 1.524, 1.528, 1.533, 1.542, 1.553, 1.559, 1.562,
+ 1.561, 1.546, 1.532, 1.521, 1.518, 1.515, 1.511, 1.516, 1.519, 1.524, 1.528, 1.529, 1.533, 1.542, 1.554, 1.559,
+ 1.561, 1.539, 1.526, 1.524, 1.521, 1.521, 1.522, 1.524, 1.525, 1.531, 1.529, 1.529, 1.531, 1.538, 1.549, 1.558,
+ 1.559, 1.538, 1.526, 1.525, 1.524, 1.528, 1.534, 1.536, 1.536, 1.536, 1.532, 1.529, 1.531, 1.537, 1.548, 1.556,
+ 1.561, 1.537, 1.525, 1.524, 1.526, 1.532, 1.537, 1.539, 1.538, 1.537, 1.532, 1.529, 1.529, 1.537, 1.546, 1.556,
+ 1.561, 1.536, 1.524, 1.522, 1.525, 1.532, 1.538, 1.538, 1.537, 1.533, 1.528, 1.526, 1.527, 1.536, 1.546, 1.555,
+ 1.561, 1.537, 1.522, 1.521, 1.524, 1.531, 1.536, 1.537, 1.534, 1.529, 1.526, 1.522, 1.523, 1.534, 1.547, 1.555,
+ 1.561, 1.538, 1.524, 1.522, 1.526, 1.531, 1.535, 1.535, 1.534, 1.527, 1.524, 1.522, 1.522, 1.535, 1.549, 1.556,
+ 1.558, 1.543, 1.532, 1.526, 1.526, 1.529, 1.534, 1.535, 1.533, 1.526, 1.523, 1.522, 1.524, 1.537, 1.552, 1.557,
+ 1.555, 1.546, 1.541, 1.528, 1.527, 1.528, 1.531, 1.533, 1.531, 1.527, 1.522, 1.522, 1.526, 1.536, 1.552, 1.561,
+ 1.555, 1.547, 1.542, 1.538, 1.526, 1.526, 1.529, 1.531, 1.529, 1.528, 1.519, 1.519, 1.527, 1.531, 1.543, 1.561
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.684, 1.688, 1.691, 1.697, 1.709, 1.722, 1.735, 1.745, 1.747, 1.745, 1.731, 1.719, 1.709, 1.705, 1.699, 1.699,
+ 1.684, 1.689, 1.694, 1.708, 1.721, 1.735, 1.747, 1.762, 1.762, 1.758, 1.745, 1.727, 1.716, 1.707, 1.701, 1.699,
+ 1.684, 1.691, 1.704, 1.719, 1.734, 1.755, 1.772, 1.786, 1.789, 1.788, 1.762, 1.745, 1.724, 1.709, 1.702, 1.698,
+ 1.682, 1.694, 1.709, 1.729, 1.755, 1.773, 1.798, 1.815, 1.817, 1.808, 1.788, 1.762, 1.733, 1.714, 1.704, 1.699,
+ 1.682, 1.693, 1.713, 1.742, 1.772, 1.798, 1.815, 1.829, 1.831, 1.821, 1.807, 1.773, 1.742, 1.716, 1.703, 1.699,
+ 1.681, 1.693, 1.713, 1.742, 1.772, 1.799, 1.828, 1.839, 1.839, 1.828, 1.807, 1.774, 1.742, 1.715, 1.699, 1.695,
+ 1.679, 1.691, 1.712, 1.739, 1.771, 1.798, 1.825, 1.829, 1.831, 1.818, 1.801, 1.774, 1.738, 1.712, 1.695, 1.691,
+ 1.676, 1.685, 1.703, 1.727, 1.761, 1.784, 1.801, 1.817, 1.817, 1.801, 1.779, 1.761, 1.729, 1.706, 1.691, 1.684,
+ 1.669, 1.678, 1.692, 1.714, 1.741, 1.764, 1.784, 1.795, 1.795, 1.779, 1.761, 1.738, 1.713, 1.696, 1.683, 1.679,
+ 1.664, 1.671, 1.679, 1.693, 1.716, 1.741, 1.762, 1.769, 1.769, 1.753, 1.738, 1.713, 1.701, 1.687, 1.681, 1.676,
+ 1.661, 1.664, 1.671, 1.679, 1.693, 1.714, 1.732, 1.739, 1.739, 1.729, 1.708, 1.701, 1.685, 1.679, 1.676, 1.677,
+ 1.659, 1.661, 1.664, 1.671, 1.679, 1.693, 1.712, 1.714, 1.714, 1.708, 1.701, 1.687, 1.679, 1.672, 1.673, 1.677
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.177, 1.183, 1.187, 1.191, 1.197, 1.206, 1.213, 1.215, 1.215, 1.215, 1.211, 1.204, 1.196, 1.191, 1.183, 1.182,
+ 1.179, 1.185, 1.191, 1.196, 1.206, 1.217, 1.224, 1.229, 1.229, 1.226, 1.221, 1.212, 1.202, 1.195, 1.188, 1.182,
+ 1.183, 1.191, 1.196, 1.206, 1.217, 1.229, 1.239, 1.245, 1.245, 1.245, 1.233, 1.221, 1.212, 1.199, 1.193, 1.187,
+ 1.183, 1.192, 1.201, 1.212, 1.229, 1.241, 1.252, 1.259, 1.259, 1.257, 1.245, 1.233, 1.217, 1.201, 1.194, 1.192,
+ 1.183, 1.192, 1.202, 1.219, 1.238, 1.252, 1.261, 1.269, 1.268, 1.261, 1.257, 1.241, 1.223, 1.204, 1.194, 1.191,
+ 1.182, 1.192, 1.202, 1.219, 1.239, 1.255, 1.266, 1.271, 1.271, 1.265, 1.258, 1.242, 1.223, 1.205, 1.192, 1.191,
+ 1.181, 1.189, 1.199, 1.218, 1.239, 1.254, 1.262, 1.268, 1.268, 1.258, 1.253, 1.241, 1.221, 1.204, 1.191, 1.187,
+ 1.179, 1.184, 1.193, 1.211, 1.232, 1.243, 1.254, 1.257, 1.256, 1.253, 1.242, 1.232, 1.216, 1.199, 1.187, 1.183,
+ 1.174, 1.179, 1.187, 1.202, 1.218, 1.232, 1.243, 1.246, 1.246, 1.239, 1.232, 1.218, 1.207, 1.191, 1.183, 1.179,
+ 1.169, 1.175, 1.181, 1.189, 1.202, 1.218, 1.229, 1.232, 1.232, 1.224, 1.218, 1.207, 1.199, 1.185, 1.181, 1.174,
+ 1.164, 1.168, 1.175, 1.179, 1.189, 1.201, 1.209, 1.213, 1.213, 1.209, 1.201, 1.198, 1.186, 1.181, 1.174, 1.173,
+ 1.161, 1.166, 1.171, 1.175, 1.179, 1.189, 1.197, 1.198, 1.198, 1.197, 1.196, 1.186, 1.182, 1.175, 1.173, 1.173
+ ]
+ },
+ {
+ "ct": 6500,
+ "table":
+ [
+ 1.166, 1.171, 1.173, 1.178, 1.187, 1.193, 1.201, 1.205, 1.205, 1.205, 1.199, 1.191, 1.184, 1.179, 1.174, 1.171,
+ 1.166, 1.172, 1.176, 1.184, 1.195, 1.202, 1.209, 1.216, 1.216, 1.213, 1.208, 1.201, 1.189, 1.182, 1.176, 1.171,
+ 1.166, 1.173, 1.183, 1.195, 1.202, 1.214, 1.221, 1.228, 1.229, 1.228, 1.221, 1.209, 1.201, 1.186, 1.179, 1.174,
+ 1.165, 1.174, 1.187, 1.201, 1.214, 1.223, 1.235, 1.241, 1.242, 1.241, 1.229, 1.221, 1.205, 1.188, 1.181, 1.177,
+ 1.165, 1.174, 1.189, 1.207, 1.223, 1.235, 1.242, 1.253, 1.252, 1.245, 1.241, 1.228, 1.211, 1.189, 1.181, 1.178,
+ 1.164, 1.173, 1.189, 1.207, 1.224, 1.238, 1.249, 1.255, 1.255, 1.249, 1.242, 1.228, 1.211, 1.191, 1.179, 1.176,
+ 1.163, 1.172, 1.187, 1.207, 1.223, 1.237, 1.245, 1.253, 1.252, 1.243, 1.237, 1.228, 1.207, 1.188, 1.176, 1.173,
+ 1.159, 1.167, 1.179, 1.199, 1.217, 1.227, 1.237, 1.241, 1.241, 1.237, 1.228, 1.217, 1.201, 1.184, 1.174, 1.169,
+ 1.156, 1.164, 1.172, 1.189, 1.205, 1.217, 1.226, 1.229, 1.229, 1.222, 1.217, 1.204, 1.192, 1.177, 1.171, 1.166,
+ 1.154, 1.159, 1.166, 1.177, 1.189, 1.205, 1.213, 1.216, 1.216, 1.209, 1.204, 1.192, 1.183, 1.172, 1.168, 1.162,
+ 1.152, 1.155, 1.161, 1.166, 1.177, 1.188, 1.195, 1.198, 1.199, 1.196, 1.187, 1.183, 1.173, 1.168, 1.163, 1.162,
+ 1.151, 1.154, 1.158, 1.162, 1.168, 1.177, 1.183, 1.184, 1.184, 1.184, 1.182, 1.172, 1.168, 1.165, 1.162, 1.161
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 2.236, 2.111, 1.912, 1.741, 1.579, 1.451, 1.379, 1.349, 1.349, 1.361, 1.411, 1.505, 1.644, 1.816, 2.034, 2.159,
+ 2.139, 1.994, 1.796, 1.625, 1.467, 1.361, 1.285, 1.248, 1.239, 1.265, 1.321, 1.408, 1.536, 1.703, 1.903, 2.087,
+ 2.047, 1.898, 1.694, 1.511, 1.373, 1.254, 1.186, 1.152, 1.142, 1.166, 1.226, 1.309, 1.441, 1.598, 1.799, 1.978,
+ 1.999, 1.824, 1.615, 1.429, 1.281, 1.179, 1.113, 1.077, 1.071, 1.096, 1.153, 1.239, 1.357, 1.525, 1.726, 1.915,
+ 1.976, 1.773, 1.563, 1.374, 1.222, 1.119, 1.064, 1.032, 1.031, 1.049, 1.099, 1.188, 1.309, 1.478, 1.681, 1.893,
+ 1.973, 1.756, 1.542, 1.351, 1.196, 1.088, 1.028, 1.011, 1.004, 1.029, 1.077, 1.169, 1.295, 1.459, 1.663, 1.891,
+ 1.973, 1.761, 1.541, 1.349, 1.193, 1.087, 1.031, 1.006, 1.006, 1.023, 1.075, 1.169, 1.298, 1.463, 1.667, 1.891,
+ 1.982, 1.789, 1.568, 1.373, 1.213, 1.111, 1.051, 1.029, 1.024, 1.053, 1.106, 1.199, 1.329, 1.495, 1.692, 1.903,
+ 2.015, 1.838, 1.621, 1.426, 1.268, 1.159, 1.101, 1.066, 1.068, 1.099, 1.166, 1.259, 1.387, 1.553, 1.751, 1.937,
+ 2.076, 1.911, 1.692, 1.507, 1.346, 1.236, 1.169, 1.136, 1.139, 1.174, 1.242, 1.349, 1.475, 1.641, 1.833, 2.004,
+ 2.193, 2.011, 1.798, 1.604, 1.444, 1.339, 1.265, 1.235, 1.237, 1.273, 1.351, 1.461, 1.598, 1.758, 1.956, 2.125,
+ 2.263, 2.154, 1.916, 1.711, 1.549, 1.432, 1.372, 1.356, 1.356, 1.383, 1.455, 1.578, 1.726, 1.914, 2.119, 2.211
+ ],
+ "sigma": 0.006,
+ "sigma_Cb": 0.00208
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2873,
+ "ccm":
+ [
+ 1.88195, -0.26249, -0.61946,
+ -0.63842, 2.11535, -0.47693,
+ -0.13531, -0.99739, 2.13271
+ ]
+ },
+ {
+ "ct": 2965,
+ "ccm":
+ [
+ 2.15048, -0.51859, -0.63189,
+ -0.53572, 1.92585, -0.39013,
+ 0.01831, -1.48576, 2.46744
+ ]
+ },
+ {
+ "ct": 3606,
+ "ccm":
+ [
+ 1.97522, -0.43847, -0.53675,
+ -0.56151, 1.99765, -0.43614,
+ -0.12438, -0.77056, 1.89493
+ ]
+ },
+ {
+ "ct": 4700,
+ "ccm":
+ [
+ 2.00971, -0.51461, -0.49511,
+ -0.52109, 2.01003, -0.48894,
+ -0.09527, -0.67318, 1.76845
+ ]
+ },
+ {
+ "ct": 5890,
+ "ccm":
+ [
+ 2.13616, -0.65283, -0.48333,
+ -0.48364, 1.93115, -0.44751,
+ -0.13465, -0.54831, 1.68295
+ ]
+ },
+ {
+ "ct": 7600,
+ "ccm":
+ [
+ 2.06599, -0.39161, -0.67439,
+ -0.50883, 2.27467, -0.76583,
+ -0.13961, -0.66121, 1.80081
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.hdr":
+ {
+ "MultiExposureUnmerged":
+ {
+ "cadence": [ 1, 2 ],
+ "channel_map":
+ {
+ "short": 1,
+ "long": 2
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/src/ipa/rpi/vc4/data/ov5647_noir.json b/src/ipa/rpi/vc4/data/ov5647_noir.json
new file mode 100644
index 00000000..488b7119
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/ov5647_noir.json
@@ -0,0 +1,412 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 1024
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 21663,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 987,
+ "reference_Y": 8961
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 4.25
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 401,
+ "slope": 0.05619
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "bayes": 0
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 66666 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 33333 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ],
+ "shadows": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.0,
+ "q_hi": 0.5,
+ "y_target":
+ [
+ 0, 0.17,
+ 1000, 0.17
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ],
+ "base_ev": 1.25
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.105, 1.103, 1.093, 1.083, 1.071, 1.065, 1.065, 1.065, 1.066, 1.069, 1.072, 1.077, 1.084, 1.089, 1.093, 1.093,
+ 1.103, 1.096, 1.084, 1.072, 1.059, 1.051, 1.047, 1.047, 1.051, 1.053, 1.059, 1.067, 1.075, 1.082, 1.085, 1.086,
+ 1.096, 1.084, 1.072, 1.059, 1.051, 1.045, 1.039, 1.038, 1.039, 1.045, 1.049, 1.057, 1.063, 1.072, 1.081, 1.082,
+ 1.092, 1.075, 1.061, 1.052, 1.045, 1.039, 1.036, 1.035, 1.035, 1.039, 1.044, 1.049, 1.056, 1.063, 1.072, 1.081,
+ 1.092, 1.073, 1.058, 1.048, 1.043, 1.038, 1.035, 1.033, 1.033, 1.035, 1.039, 1.044, 1.051, 1.057, 1.069, 1.078,
+ 1.091, 1.068, 1.054, 1.045, 1.041, 1.038, 1.035, 1.032, 1.032, 1.032, 1.036, 1.041, 1.045, 1.055, 1.069, 1.078,
+ 1.091, 1.068, 1.052, 1.043, 1.041, 1.038, 1.035, 1.032, 1.031, 1.032, 1.034, 1.036, 1.043, 1.055, 1.069, 1.078,
+ 1.092, 1.068, 1.052, 1.047, 1.042, 1.041, 1.038, 1.035, 1.032, 1.032, 1.035, 1.039, 1.043, 1.055, 1.071, 1.079,
+ 1.092, 1.073, 1.057, 1.051, 1.047, 1.047, 1.044, 1.041, 1.038, 1.038, 1.039, 1.043, 1.051, 1.059, 1.076, 1.083,
+ 1.092, 1.081, 1.068, 1.058, 1.056, 1.056, 1.053, 1.052, 1.049, 1.048, 1.048, 1.051, 1.059, 1.066, 1.083, 1.085,
+ 1.091, 1.087, 1.081, 1.068, 1.065, 1.064, 1.062, 1.062, 1.061, 1.056, 1.056, 1.056, 1.064, 1.069, 1.084, 1.089,
+ 1.091, 1.089, 1.085, 1.079, 1.069, 1.068, 1.067, 1.067, 1.067, 1.063, 1.061, 1.063, 1.068, 1.069, 1.081, 1.092
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.486, 1.484, 1.468, 1.449, 1.427, 1.403, 1.399, 1.399, 1.399, 1.404, 1.413, 1.433, 1.454, 1.473, 1.482, 1.488,
+ 1.484, 1.472, 1.454, 1.431, 1.405, 1.381, 1.365, 1.365, 1.367, 1.373, 1.392, 1.411, 1.438, 1.458, 1.476, 1.481,
+ 1.476, 1.458, 1.433, 1.405, 1.381, 1.361, 1.339, 1.334, 1.334, 1.346, 1.362, 1.391, 1.411, 1.438, 1.462, 1.474,
+ 1.471, 1.443, 1.417, 1.388, 1.361, 1.339, 1.321, 1.313, 1.313, 1.327, 1.346, 1.362, 1.391, 1.422, 1.453, 1.473,
+ 1.469, 1.439, 1.408, 1.377, 1.349, 1.321, 1.312, 1.299, 1.299, 1.311, 1.327, 1.348, 1.378, 1.415, 1.446, 1.468,
+ 1.468, 1.434, 1.402, 1.371, 1.341, 1.316, 1.299, 1.296, 1.295, 1.299, 1.314, 1.338, 1.371, 1.408, 1.441, 1.466,
+ 1.468, 1.434, 1.401, 1.371, 1.341, 1.316, 1.301, 1.296, 1.295, 1.297, 1.314, 1.338, 1.369, 1.408, 1.441, 1.465,
+ 1.469, 1.436, 1.401, 1.374, 1.348, 1.332, 1.315, 1.301, 1.301, 1.313, 1.324, 1.342, 1.372, 1.409, 1.442, 1.465,
+ 1.471, 1.444, 1.413, 1.388, 1.371, 1.348, 1.332, 1.323, 1.323, 1.324, 1.342, 1.362, 1.386, 1.418, 1.449, 1.467,
+ 1.473, 1.454, 1.431, 1.407, 1.388, 1.371, 1.359, 1.352, 1.351, 1.351, 1.362, 1.383, 1.404, 1.433, 1.462, 1.472,
+ 1.474, 1.461, 1.447, 1.424, 1.407, 1.394, 1.385, 1.381, 1.379, 1.381, 1.383, 1.401, 1.419, 1.444, 1.466, 1.481,
+ 1.474, 1.464, 1.455, 1.442, 1.421, 1.408, 1.403, 1.403, 1.403, 1.399, 1.402, 1.415, 1.432, 1.446, 1.467, 1.483
+ ]
+ },
+ {
+ "ct": 6500,
+ "table":
+ [
+ 1.567, 1.565, 1.555, 1.541, 1.525, 1.518, 1.518, 1.518, 1.521, 1.527, 1.532, 1.541, 1.551, 1.559, 1.567, 1.569,
+ 1.565, 1.557, 1.542, 1.527, 1.519, 1.515, 1.511, 1.516, 1.519, 1.524, 1.528, 1.533, 1.542, 1.553, 1.559, 1.562,
+ 1.561, 1.546, 1.532, 1.521, 1.518, 1.515, 1.511, 1.516, 1.519, 1.524, 1.528, 1.529, 1.533, 1.542, 1.554, 1.559,
+ 1.561, 1.539, 1.526, 1.524, 1.521, 1.521, 1.522, 1.524, 1.525, 1.531, 1.529, 1.529, 1.531, 1.538, 1.549, 1.558,
+ 1.559, 1.538, 1.526, 1.525, 1.524, 1.528, 1.534, 1.536, 1.536, 1.536, 1.532, 1.529, 1.531, 1.537, 1.548, 1.556,
+ 1.561, 1.537, 1.525, 1.524, 1.526, 1.532, 1.537, 1.539, 1.538, 1.537, 1.532, 1.529, 1.529, 1.537, 1.546, 1.556,
+ 1.561, 1.536, 1.524, 1.522, 1.525, 1.532, 1.538, 1.538, 1.537, 1.533, 1.528, 1.526, 1.527, 1.536, 1.546, 1.555,
+ 1.561, 1.537, 1.522, 1.521, 1.524, 1.531, 1.536, 1.537, 1.534, 1.529, 1.526, 1.522, 1.523, 1.534, 1.547, 1.555,
+ 1.561, 1.538, 1.524, 1.522, 1.526, 1.531, 1.535, 1.535, 1.534, 1.527, 1.524, 1.522, 1.522, 1.535, 1.549, 1.556,
+ 1.558, 1.543, 1.532, 1.526, 1.526, 1.529, 1.534, 1.535, 1.533, 1.526, 1.523, 1.522, 1.524, 1.537, 1.552, 1.557,
+ 1.555, 1.546, 1.541, 1.528, 1.527, 1.528, 1.531, 1.533, 1.531, 1.527, 1.522, 1.522, 1.526, 1.536, 1.552, 1.561,
+ 1.555, 1.547, 1.542, 1.538, 1.526, 1.526, 1.529, 1.531, 1.529, 1.528, 1.519, 1.519, 1.527, 1.531, 1.543, 1.561
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.684, 1.688, 1.691, 1.697, 1.709, 1.722, 1.735, 1.745, 1.747, 1.745, 1.731, 1.719, 1.709, 1.705, 1.699, 1.699,
+ 1.684, 1.689, 1.694, 1.708, 1.721, 1.735, 1.747, 1.762, 1.762, 1.758, 1.745, 1.727, 1.716, 1.707, 1.701, 1.699,
+ 1.684, 1.691, 1.704, 1.719, 1.734, 1.755, 1.772, 1.786, 1.789, 1.788, 1.762, 1.745, 1.724, 1.709, 1.702, 1.698,
+ 1.682, 1.694, 1.709, 1.729, 1.755, 1.773, 1.798, 1.815, 1.817, 1.808, 1.788, 1.762, 1.733, 1.714, 1.704, 1.699,
+ 1.682, 1.693, 1.713, 1.742, 1.772, 1.798, 1.815, 1.829, 1.831, 1.821, 1.807, 1.773, 1.742, 1.716, 1.703, 1.699,
+ 1.681, 1.693, 1.713, 1.742, 1.772, 1.799, 1.828, 1.839, 1.839, 1.828, 1.807, 1.774, 1.742, 1.715, 1.699, 1.695,
+ 1.679, 1.691, 1.712, 1.739, 1.771, 1.798, 1.825, 1.829, 1.831, 1.818, 1.801, 1.774, 1.738, 1.712, 1.695, 1.691,
+ 1.676, 1.685, 1.703, 1.727, 1.761, 1.784, 1.801, 1.817, 1.817, 1.801, 1.779, 1.761, 1.729, 1.706, 1.691, 1.684,
+ 1.669, 1.678, 1.692, 1.714, 1.741, 1.764, 1.784, 1.795, 1.795, 1.779, 1.761, 1.738, 1.713, 1.696, 1.683, 1.679,
+ 1.664, 1.671, 1.679, 1.693, 1.716, 1.741, 1.762, 1.769, 1.769, 1.753, 1.738, 1.713, 1.701, 1.687, 1.681, 1.676,
+ 1.661, 1.664, 1.671, 1.679, 1.693, 1.714, 1.732, 1.739, 1.739, 1.729, 1.708, 1.701, 1.685, 1.679, 1.676, 1.677,
+ 1.659, 1.661, 1.664, 1.671, 1.679, 1.693, 1.712, 1.714, 1.714, 1.708, 1.701, 1.687, 1.679, 1.672, 1.673, 1.677
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.177, 1.183, 1.187, 1.191, 1.197, 1.206, 1.213, 1.215, 1.215, 1.215, 1.211, 1.204, 1.196, 1.191, 1.183, 1.182,
+ 1.179, 1.185, 1.191, 1.196, 1.206, 1.217, 1.224, 1.229, 1.229, 1.226, 1.221, 1.212, 1.202, 1.195, 1.188, 1.182,
+ 1.183, 1.191, 1.196, 1.206, 1.217, 1.229, 1.239, 1.245, 1.245, 1.245, 1.233, 1.221, 1.212, 1.199, 1.193, 1.187,
+ 1.183, 1.192, 1.201, 1.212, 1.229, 1.241, 1.252, 1.259, 1.259, 1.257, 1.245, 1.233, 1.217, 1.201, 1.194, 1.192,
+ 1.183, 1.192, 1.202, 1.219, 1.238, 1.252, 1.261, 1.269, 1.268, 1.261, 1.257, 1.241, 1.223, 1.204, 1.194, 1.191,
+ 1.182, 1.192, 1.202, 1.219, 1.239, 1.255, 1.266, 1.271, 1.271, 1.265, 1.258, 1.242, 1.223, 1.205, 1.192, 1.191,
+ 1.181, 1.189, 1.199, 1.218, 1.239, 1.254, 1.262, 1.268, 1.268, 1.258, 1.253, 1.241, 1.221, 1.204, 1.191, 1.187,
+ 1.179, 1.184, 1.193, 1.211, 1.232, 1.243, 1.254, 1.257, 1.256, 1.253, 1.242, 1.232, 1.216, 1.199, 1.187, 1.183,
+ 1.174, 1.179, 1.187, 1.202, 1.218, 1.232, 1.243, 1.246, 1.246, 1.239, 1.232, 1.218, 1.207, 1.191, 1.183, 1.179,
+ 1.169, 1.175, 1.181, 1.189, 1.202, 1.218, 1.229, 1.232, 1.232, 1.224, 1.218, 1.207, 1.199, 1.185, 1.181, 1.174,
+ 1.164, 1.168, 1.175, 1.179, 1.189, 1.201, 1.209, 1.213, 1.213, 1.209, 1.201, 1.198, 1.186, 1.181, 1.174, 1.173,
+ 1.161, 1.166, 1.171, 1.175, 1.179, 1.189, 1.197, 1.198, 1.198, 1.197, 1.196, 1.186, 1.182, 1.175, 1.173, 1.173
+ ]
+ },
+ {
+ "ct": 6500,
+ "table":
+ [
+ 1.166, 1.171, 1.173, 1.178, 1.187, 1.193, 1.201, 1.205, 1.205, 1.205, 1.199, 1.191, 1.184, 1.179, 1.174, 1.171,
+ 1.166, 1.172, 1.176, 1.184, 1.195, 1.202, 1.209, 1.216, 1.216, 1.213, 1.208, 1.201, 1.189, 1.182, 1.176, 1.171,
+ 1.166, 1.173, 1.183, 1.195, 1.202, 1.214, 1.221, 1.228, 1.229, 1.228, 1.221, 1.209, 1.201, 1.186, 1.179, 1.174,
+ 1.165, 1.174, 1.187, 1.201, 1.214, 1.223, 1.235, 1.241, 1.242, 1.241, 1.229, 1.221, 1.205, 1.188, 1.181, 1.177,
+ 1.165, 1.174, 1.189, 1.207, 1.223, 1.235, 1.242, 1.253, 1.252, 1.245, 1.241, 1.228, 1.211, 1.189, 1.181, 1.178,
+ 1.164, 1.173, 1.189, 1.207, 1.224, 1.238, 1.249, 1.255, 1.255, 1.249, 1.242, 1.228, 1.211, 1.191, 1.179, 1.176,
+ 1.163, 1.172, 1.187, 1.207, 1.223, 1.237, 1.245, 1.253, 1.252, 1.243, 1.237, 1.228, 1.207, 1.188, 1.176, 1.173,
+ 1.159, 1.167, 1.179, 1.199, 1.217, 1.227, 1.237, 1.241, 1.241, 1.237, 1.228, 1.217, 1.201, 1.184, 1.174, 1.169,
+ 1.156, 1.164, 1.172, 1.189, 1.205, 1.217, 1.226, 1.229, 1.229, 1.222, 1.217, 1.204, 1.192, 1.177, 1.171, 1.166,
+ 1.154, 1.159, 1.166, 1.177, 1.189, 1.205, 1.213, 1.216, 1.216, 1.209, 1.204, 1.192, 1.183, 1.172, 1.168, 1.162,
+ 1.152, 1.155, 1.161, 1.166, 1.177, 1.188, 1.195, 1.198, 1.199, 1.196, 1.187, 1.183, 1.173, 1.168, 1.163, 1.162,
+ 1.151, 1.154, 1.158, 1.162, 1.168, 1.177, 1.183, 1.184, 1.184, 1.184, 1.182, 1.172, 1.168, 1.165, 1.162, 1.161
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 2.236, 2.111, 1.912, 1.741, 1.579, 1.451, 1.379, 1.349, 1.349, 1.361, 1.411, 1.505, 1.644, 1.816, 2.034, 2.159,
+ 2.139, 1.994, 1.796, 1.625, 1.467, 1.361, 1.285, 1.248, 1.239, 1.265, 1.321, 1.408, 1.536, 1.703, 1.903, 2.087,
+ 2.047, 1.898, 1.694, 1.511, 1.373, 1.254, 1.186, 1.152, 1.142, 1.166, 1.226, 1.309, 1.441, 1.598, 1.799, 1.978,
+ 1.999, 1.824, 1.615, 1.429, 1.281, 1.179, 1.113, 1.077, 1.071, 1.096, 1.153, 1.239, 1.357, 1.525, 1.726, 1.915,
+ 1.976, 1.773, 1.563, 1.374, 1.222, 1.119, 1.064, 1.032, 1.031, 1.049, 1.099, 1.188, 1.309, 1.478, 1.681, 1.893,
+ 1.973, 1.756, 1.542, 1.351, 1.196, 1.088, 1.028, 1.011, 1.004, 1.029, 1.077, 1.169, 1.295, 1.459, 1.663, 1.891,
+ 1.973, 1.761, 1.541, 1.349, 1.193, 1.087, 1.031, 1.006, 1.006, 1.023, 1.075, 1.169, 1.298, 1.463, 1.667, 1.891,
+ 1.982, 1.789, 1.568, 1.373, 1.213, 1.111, 1.051, 1.029, 1.024, 1.053, 1.106, 1.199, 1.329, 1.495, 1.692, 1.903,
+ 2.015, 1.838, 1.621, 1.426, 1.268, 1.159, 1.101, 1.066, 1.068, 1.099, 1.166, 1.259, 1.387, 1.553, 1.751, 1.937,
+ 2.076, 1.911, 1.692, 1.507, 1.346, 1.236, 1.169, 1.136, 1.139, 1.174, 1.242, 1.349, 1.475, 1.641, 1.833, 2.004,
+ 2.193, 2.011, 1.798, 1.604, 1.444, 1.339, 1.265, 1.235, 1.237, 1.273, 1.351, 1.461, 1.598, 1.758, 1.956, 2.125,
+ 2.263, 2.154, 1.916, 1.711, 1.549, 1.432, 1.372, 1.356, 1.356, 1.383, 1.455, 1.578, 1.726, 1.914, 2.119, 2.211
+ ],
+ "sigma": 0.006,
+ "sigma_Cb": 0.00208
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2500,
+ "ccm":
+ [
+ 1.70741, -0.05307, -0.65433,
+ -0.62822, 1.68836, -0.06014,
+ -0.04452, -1.87628, 2.92079
+ ]
+ },
+ {
+ "ct": 2803,
+ "ccm":
+ [
+ 1.74383, -0.18731, -0.55652,
+ -0.56491, 1.67772, -0.11281,
+ -0.01522, -1.60635, 2.62157
+ ]
+ },
+ {
+ "ct": 2912,
+ "ccm":
+ [
+ 1.75215, -0.22221, -0.52995,
+ -0.54568, 1.63522, -0.08954,
+ 0.02633, -1.56997, 2.54364
+ ]
+ },
+ {
+ "ct": 2914,
+ "ccm":
+ [
+ 1.72423, -0.28939, -0.43484,
+ -0.55188, 1.62925, -0.07737,
+ 0.01959, -1.28661, 2.26702
+ ]
+ },
+ {
+ "ct": 3605,
+ "ccm":
+ [
+ 1.80381, -0.43646, -0.36735,
+ -0.46505, 1.56814, -0.10309,
+ 0.00929, -1.00424, 1.99495
+ ]
+ },
+ {
+ "ct": 4540,
+ "ccm":
+ [
+ 1.85263, -0.46545, -0.38719,
+ -0.44136, 1.68443, -0.24307,
+ 0.04108, -0.85599, 1.81491
+ ]
+ },
+ {
+ "ct": 5699,
+ "ccm":
+ [
+ 1.98595, -0.63542, -0.35054,
+ -0.34623, 1.54146, -0.19522,
+ 0.00411, -0.70936, 1.70525
+ ]
+ },
+ {
+ "ct": 8625,
+ "ccm":
+ [
+ 2.21637, -0.56663, -0.64974,
+ -0.41133, 1.96625, -0.55492,
+ -0.02307, -0.83529, 1.85837
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/ov64a40.json b/src/ipa/rpi/vc4/data/ov64a40.json
new file mode 100644
index 00000000..096f0b1e
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/ov64a40.json
@@ -0,0 +1,422 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 17861,
+ "reference_gain": 2.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 1073,
+ "reference_Y": 9022
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.984
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 215,
+ "slope": 0.01121
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2300.0, 1.0522, 0.4091,
+ 2700.0, 0.7884, 0.4327,
+ 3000.0, 0.7597, 0.4421,
+ 4000.0, 0.5972, 0.5404,
+ 4150.0, 0.5598, 0.5779,
+ 6500.0, 0.4388, 0.7582
+ ],
+ "sensitivity_r": 1.0,
+ "sensitivity_b": 1.0,
+ "transverse_pos": 0.0558,
+ "transverse_neg": 0.04278
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.8,
+ "calibrations_Cr": [
+ {
+ "ct": 6500,
+ "table":
+ [
+ 2.437, 2.415, 2.392, 2.378, 2.369, 2.353, 2.344, 2.336, 2.329, 2.325, 2.325, 2.325, 2.333, 2.344, 2.366, 2.381,
+ 2.434, 2.405, 2.386, 2.369, 2.361, 2.334, 2.314, 2.302, 2.295, 2.289, 2.289, 2.303, 2.327, 2.334, 2.356, 2.378,
+ 2.434, 2.405, 2.385, 2.363, 2.334, 2.314, 2.289, 2.277, 2.269, 2.262, 2.262, 2.283, 2.303, 2.328, 2.352, 2.375,
+ 2.434, 2.405, 2.385, 2.348, 2.315, 2.289, 2.277, 2.258, 2.251, 2.242, 2.249, 2.258, 2.283, 2.321, 2.352, 2.375,
+ 2.434, 2.413, 2.385, 2.343, 2.311, 2.282, 2.258, 2.251, 2.229, 2.233, 2.242, 2.251, 2.281, 2.321, 2.356, 2.375,
+ 2.437, 2.418, 2.388, 2.343, 2.311, 2.282, 2.251, 2.229, 2.221, 2.226, 2.233, 2.251, 2.281, 2.322, 2.361, 2.381,
+ 2.444, 2.422, 2.393, 2.351, 2.314, 2.284, 2.251, 2.227, 2.221, 2.227, 2.234, 2.256, 2.287, 2.326, 2.366, 2.389,
+ 2.445, 2.424, 2.395, 2.353, 2.316, 2.287, 2.266, 2.251, 2.228, 2.234, 2.251, 2.259, 2.289, 2.331, 2.371, 2.395,
+ 2.445, 2.424, 2.399, 2.364, 2.329, 2.308, 2.287, 2.266, 2.259, 2.254, 2.259, 2.283, 2.304, 2.343, 2.375, 2.395,
+ 2.445, 2.425, 2.407, 2.385, 2.364, 2.329, 2.308, 2.299, 2.291, 2.284, 2.284, 2.304, 2.335, 2.354, 2.381, 2.399,
+ 2.449, 2.427, 2.418, 2.407, 2.385, 2.364, 2.349, 2.338, 2.333, 2.326, 2.326, 2.335, 2.354, 2.374, 2.389, 2.408,
+ 2.458, 2.441, 2.427, 2.411, 2.403, 2.395, 2.391, 2.383, 2.375, 2.369, 2.369, 2.369, 2.369, 2.385, 2.408, 2.418
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 6500,
+ "table":
+ [
+ 1.297, 1.297, 1.289, 1.289, 1.289, 1.291, 1.293, 1.294, 1.294, 1.294, 1.294, 1.296, 1.298, 1.304, 1.312, 1.313,
+ 1.297, 1.289, 1.286, 1.286, 1.287, 1.289, 1.292, 1.294, 1.294, 1.294, 1.294, 1.294, 1.296, 1.298, 1.306, 1.312,
+ 1.289, 1.286, 1.283, 1.283, 1.285, 1.287, 1.291, 1.294, 1.294, 1.292, 1.291, 1.289, 1.293, 1.294, 1.298, 1.304,
+ 1.283, 1.282, 1.279, 1.281, 1.282, 1.285, 1.287, 1.294, 1.294, 1.291, 1.289, 1.289, 1.289, 1.293, 1.294, 1.298,
+ 1.281, 1.279, 1.279, 1.279, 1.281, 1.283, 1.287, 1.292, 1.292, 1.291, 1.291, 1.289, 1.289, 1.291, 1.294, 1.297,
+ 1.279, 1.277, 1.277, 1.279, 1.281, 1.282, 1.286, 1.289, 1.291, 1.291, 1.291, 1.291, 1.289, 1.291, 1.293, 1.297,
+ 1.277, 1.275, 1.275, 1.278, 1.279, 1.281, 1.284, 1.287, 1.289, 1.291, 1.291, 1.291, 1.289, 1.289, 1.292, 1.297,
+ 1.277, 1.275, 1.274, 1.275, 1.277, 1.278, 1.279, 1.284, 1.285, 1.285, 1.286, 1.288, 1.289, 1.289, 1.292, 1.297,
+ 1.277, 1.272, 1.272, 1.274, 1.274, 1.277, 1.279, 1.282, 1.284, 1.284, 1.285, 1.286, 1.288, 1.289, 1.292, 1.297,
+ 1.277, 1.272, 1.272, 1.273, 1.274, 1.276, 1.279, 1.282, 1.284, 1.284, 1.286, 1.286, 1.288, 1.289, 1.293, 1.297,
+ 1.279, 1.272, 1.271, 1.272, 1.274, 1.276, 1.279, 1.283, 1.284, 1.284, 1.285, 1.286, 1.288, 1.291, 1.294, 1.299,
+ 1.281, 1.274, 1.271, 1.271, 1.273, 1.276, 1.278, 1.282, 1.284, 1.284, 1.285, 1.286, 1.286, 1.291, 1.295, 1.302
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 3.811, 3.611, 3.038, 2.632, 2.291, 2.044, 1.967, 1.957, 1.957, 1.957, 2.009, 2.222, 2.541, 2.926, 3.455, 3.652,
+ 3.611, 3.135, 2.636, 2.343, 2.044, 1.846, 1.703, 1.626, 1.626, 1.671, 1.796, 1.983, 2.266, 2.549, 3.007, 3.455,
+ 3.135, 2.781, 2.343, 2.044, 1.831, 1.554, 1.411, 1.337, 1.337, 1.379, 1.502, 1.749, 1.983, 2.266, 2.671, 3.007,
+ 2.903, 2.538, 2.149, 1.831, 1.554, 1.401, 1.208, 1.145, 1.145, 1.183, 1.339, 1.502, 1.749, 2.072, 2.446, 2.801,
+ 2.812, 2.389, 2.018, 1.684, 1.401, 1.208, 1.139, 1.028, 1.028, 1.109, 1.183, 1.339, 1.604, 1.939, 2.309, 2.723,
+ 2.799, 2.317, 1.948, 1.606, 1.327, 1.139, 1.028, 1.019, 1.001, 1.021, 1.109, 1.272, 1.531, 1.869, 2.246, 2.717,
+ 2.799, 2.317, 1.948, 1.606, 1.327, 1.139, 1.027, 1.006, 1.001, 1.007, 1.109, 1.272, 1.531, 1.869, 2.246, 2.717,
+ 2.799, 2.372, 1.997, 1.661, 1.378, 1.184, 1.118, 1.019, 1.012, 1.103, 1.158, 1.326, 1.589, 1.926, 2.302, 2.717,
+ 2.884, 2.507, 2.116, 1.795, 1.511, 1.361, 1.184, 1.118, 1.118, 1.158, 1.326, 1.461, 1.726, 2.056, 2.434, 2.799,
+ 3.083, 2.738, 2.303, 1.989, 1.783, 1.511, 1.361, 1.291, 1.291, 1.337, 1.461, 1.726, 1.942, 2.251, 2.657, 2.999,
+ 3.578, 3.083, 2.589, 2.303, 1.989, 1.783, 1.637, 1.563, 1.563, 1.613, 1.743, 1.942, 2.251, 2.537, 2.999, 3.492,
+ 3.764, 3.578, 2.999, 2.583, 2.237, 1.986, 1.913, 1.905, 1.905, 1.905, 1.962, 2.196, 2.525, 2.932, 3.492, 3.659
+ ],
+ "sigma": 0.005,
+ "sigma_Cb": 0.005
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2300,
+ "ccm":
+ [
+ 1.77644, -0.14825, -0.62819,
+ -0.25816, 1.66348, -0.40532,
+ -0.21633, -1.95132, 3.16765
+ ]
+ },
+ {
+ "ct": 2700,
+ "ccm":
+ [
+ 1.53605, 0.03047, -0.56652,
+ -0.27159, 1.78525, -0.51366,
+ -0.13581, -1.22128, 2.35709
+ ]
+ },
+ {
+ "ct": 3000,
+ "ccm":
+ [
+ 1.72928, -0.18819, -0.54108,
+ -0.44398, 2.04756, -0.60358,
+ -0.13203, -0.94711, 2.07913
+ ]
+ },
+ {
+ "ct": 4000,
+ "ccm":
+ [
+ 1.69895, -0.23055, -0.46841,
+ -0.33934, 1.80391, -0.46456,
+ -0.13902, -0.75385, 1.89287
+ ]
+ },
+ {
+ "ct": 4150,
+ "ccm":
+ [
+ 2.08494, -0.68698, -0.39796,
+ -0.37928, 1.78795, -0.40867,
+ -0.11537, -0.74686, 1.86223
+ ]
+ },
+ {
+ "ct": 6500,
+ "ccm":
+ [
+ 1.69813, -0.27304, -0.42509,
+ -0.23364, 1.87586, -0.64221,
+ -0.07587, -0.62348, 1.69935
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ },
+ {
+ "rpi.af":
+ {
+ "ranges":
+ {
+ "normal":
+ {
+ "min": 0.0,
+ "max": 12.0,
+ "default": 1.0
+ },
+ "macro":
+ {
+ "min": 3.0,
+ "max": 15.0,
+ "default": 4.0
+ }
+ },
+ "speeds":
+ {
+ "normal":
+ {
+ "step_coarse": 1.0,
+ "step_fine": 0.25,
+ "contrast_ratio": 0.75,
+ "pdaf_gain": -0.02,
+ "pdaf_squelch": 0.125,
+ "max_slew": 2.0,
+ "pdaf_frames": 0,
+ "dropout_frames": 0,
+ "step_frames": 4
+ }
+ },
+ "conf_epsilon": 8,
+ "conf_thresh": 16,
+ "conf_clip": 512,
+ "skip_frames": 5,
+ "map": [ 0.0, 0, 15.0, 1023 ]
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/ov7251_mono.json b/src/ipa/rpi/vc4/data/ov7251_mono.json
new file mode 100644
index 00000000..a9d05a01
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/ov7251_mono.json
@@ -0,0 +1,136 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 2000,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 800,
+ "reference_Y": 20000
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.5
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 4, 4, 4, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 3.0, 4.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 30000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.4,
+ 1000, 0.4
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "n_iter": 0,
+ "luminance_strength": 1.0,
+ "corner_strength": 1.5
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 0,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/ov9281_mono.json b/src/ipa/rpi/vc4/data/ov9281_mono.json
new file mode 100644
index 00000000..a9d05a01
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/ov9281_mono.json
@@ -0,0 +1,136 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 2000,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 800,
+ "reference_Y": 20000
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 2.5
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 4, 4, 4, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 3.0, 4.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 30000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.4,
+ 1000, 0.4
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "n_iter": 0,
+ "luminance_strength": 1.0,
+ "corner_strength": 1.5
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 0,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/se327m12.json b/src/ipa/rpi/vc4/data/se327m12.json
new file mode 100644
index 00000000..948169db
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/se327m12.json
@@ -0,0 +1,432 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 3840
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 6873,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 800,
+ "reference_Y": 12293
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 0,
+ "reference_slope": 1.986
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 207,
+ "slope": 0.00539
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2900.0, 0.9217, 0.3657,
+ 3600.0, 0.7876, 0.4651,
+ 4600.0, 0.6807, 0.5684,
+ 5800.0, 0.5937, 0.6724,
+ 8100.0, 0.5447, 0.7403
+ ],
+ "sensitivity_r": 1.0,
+ "sensitivity_b": 1.0,
+ "transverse_pos": 0.0162,
+ "transverse_neg": 0.0204
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.5,
+ "calibrations_Cr": [
+ {
+ "ct": 4000,
+ "table":
+ [
+ 1.481, 1.471, 1.449, 1.429, 1.416, 1.404, 1.394, 1.389, 1.389, 1.389, 1.392, 1.397, 1.404, 1.416, 1.429, 1.437,
+ 1.472, 1.456, 1.436, 1.418, 1.405, 1.394, 1.389, 1.384, 1.382, 1.382, 1.386, 1.388, 1.398, 1.407, 1.422, 1.429,
+ 1.465, 1.443, 1.426, 1.411, 1.397, 1.389, 1.383, 1.377, 1.377, 1.377, 1.379, 1.384, 1.388, 1.398, 1.411, 1.422,
+ 1.462, 1.441, 1.423, 1.409, 1.395, 1.385, 1.379, 1.376, 1.374, 1.374, 1.375, 1.379, 1.384, 1.394, 1.407, 1.418,
+ 1.461, 1.439, 1.421, 1.407, 1.394, 1.385, 1.381, 1.376, 1.373, 1.373, 1.373, 1.376, 1.381, 1.389, 1.403, 1.415,
+ 1.461, 1.439, 1.419, 1.404, 1.392, 1.384, 1.379, 1.376, 1.373, 1.372, 1.374, 1.375, 1.379, 1.389, 1.401, 1.413,
+ 1.461, 1.438, 1.419, 1.402, 1.389, 1.383, 1.377, 1.375, 1.373, 1.372, 1.372, 1.375, 1.381, 1.388, 1.401, 1.414,
+ 1.462, 1.438, 1.419, 1.403, 1.391, 1.381, 1.377, 1.374, 1.373, 1.373, 1.374, 1.376, 1.381, 1.389, 1.401, 1.414,
+ 1.462, 1.441, 1.423, 1.405, 1.392, 1.383, 1.377, 1.374, 1.373, 1.372, 1.373, 1.376, 1.382, 1.391, 1.402, 1.414,
+ 1.465, 1.444, 1.424, 1.407, 1.393, 1.382, 1.378, 1.373, 1.369, 1.369, 1.372, 1.375, 1.381, 1.389, 1.402, 1.417,
+ 1.469, 1.449, 1.427, 1.413, 1.396, 1.384, 1.381, 1.375, 1.371, 1.371, 1.373, 1.377, 1.385, 1.393, 1.407, 1.422,
+ 1.474, 1.456, 1.436, 1.419, 1.407, 1.391, 1.383, 1.379, 1.377, 1.377, 1.378, 1.381, 1.391, 1.404, 1.422, 1.426
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.742, 1.721, 1.689, 1.661, 1.639, 1.623, 1.613, 1.609, 1.607, 1.606, 1.609, 1.617, 1.626, 1.641, 1.665, 1.681,
+ 1.728, 1.703, 1.672, 1.645, 1.631, 1.614, 1.602, 1.599, 1.596, 1.597, 1.601, 1.608, 1.618, 1.631, 1.653, 1.671,
+ 1.713, 1.691, 1.658, 1.635, 1.618, 1.606, 1.595, 1.591, 1.588, 1.588, 1.591, 1.601, 1.608, 1.624, 1.641, 1.658,
+ 1.707, 1.681, 1.651, 1.627, 1.613, 1.599, 1.591, 1.585, 1.583, 1.584, 1.587, 1.591, 1.601, 1.615, 1.633, 1.655,
+ 1.699, 1.672, 1.644, 1.622, 1.606, 1.593, 1.586, 1.581, 1.579, 1.581, 1.583, 1.587, 1.597, 1.611, 1.631, 1.652,
+ 1.697, 1.665, 1.637, 1.617, 1.601, 1.589, 1.584, 1.579, 1.577, 1.578, 1.581, 1.585, 1.597, 1.607, 1.627, 1.652,
+ 1.697, 1.662, 1.634, 1.613, 1.599, 1.591, 1.583, 1.578, 1.576, 1.576, 1.579, 1.586, 1.597, 1.607, 1.628, 1.653,
+ 1.697, 1.662, 1.633, 1.613, 1.598, 1.589, 1.582, 1.578, 1.576, 1.577, 1.582, 1.589, 1.598, 1.611, 1.635, 1.655,
+ 1.701, 1.666, 1.636, 1.616, 1.602, 1.589, 1.583, 1.578, 1.577, 1.581, 1.583, 1.591, 1.601, 1.617, 1.639, 1.659,
+ 1.708, 1.671, 1.641, 1.618, 1.603, 1.591, 1.584, 1.581, 1.578, 1.581, 1.585, 1.594, 1.604, 1.622, 1.646, 1.666,
+ 1.714, 1.681, 1.648, 1.622, 1.608, 1.599, 1.591, 1.584, 1.583, 1.584, 1.589, 1.599, 1.614, 1.629, 1.653, 1.673,
+ 1.719, 1.691, 1.659, 1.631, 1.618, 1.606, 1.596, 1.591, 1.591, 1.593, 1.599, 1.608, 1.623, 1.642, 1.665, 1.681
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 4000,
+ "table":
+ [
+ 2.253, 2.267, 2.289, 2.317, 2.342, 2.359, 2.373, 2.381, 2.381, 2.378, 2.368, 2.361, 2.344, 2.337, 2.314, 2.301,
+ 2.262, 2.284, 2.314, 2.335, 2.352, 2.371, 2.383, 2.391, 2.393, 2.391, 2.381, 2.368, 2.361, 2.342, 2.322, 2.308,
+ 2.277, 2.303, 2.321, 2.346, 2.364, 2.381, 2.391, 2.395, 2.397, 2.397, 2.395, 2.381, 2.367, 2.354, 2.332, 2.321,
+ 2.277, 2.304, 2.327, 2.349, 2.369, 2.388, 2.393, 2.396, 2.396, 2.398, 2.396, 2.391, 2.376, 2.359, 2.339, 2.328,
+ 2.279, 2.311, 2.327, 2.354, 2.377, 2.389, 2.393, 2.397, 2.397, 2.398, 2.395, 2.393, 2.382, 2.363, 2.344, 2.332,
+ 2.282, 2.311, 2.329, 2.354, 2.377, 2.386, 2.396, 2.396, 2.395, 2.396, 2.397, 2.394, 2.383, 2.367, 2.346, 2.333,
+ 2.283, 2.314, 2.333, 2.353, 2.375, 2.389, 2.394, 2.395, 2.395, 2.395, 2.396, 2.394, 2.386, 2.368, 2.354, 2.336,
+ 2.287, 2.309, 2.331, 2.352, 2.373, 2.386, 2.394, 2.395, 2.395, 2.396, 2.396, 2.394, 2.384, 2.371, 2.354, 2.339,
+ 2.289, 2.307, 2.326, 2.347, 2.369, 2.385, 2.392, 2.397, 2.398, 2.398, 2.397, 2.392, 2.383, 2.367, 2.352, 2.337,
+ 2.286, 2.303, 2.322, 2.342, 2.361, 2.379, 2.389, 2.394, 2.397, 2.398, 2.396, 2.389, 2.381, 2.366, 2.346, 2.332,
+ 2.284, 2.291, 2.312, 2.329, 2.351, 2.372, 2.381, 2.389, 2.393, 2.394, 2.389, 2.385, 2.374, 2.362, 2.338, 2.325,
+ 2.283, 2.288, 2.305, 2.319, 2.339, 2.365, 2.374, 2.381, 2.384, 2.386, 2.385, 2.379, 2.368, 2.342, 2.325, 2.318
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.897, 1.919, 1.941, 1.969, 1.989, 2.003, 2.014, 2.019, 2.019, 2.017, 2.014, 2.008, 1.999, 1.988, 1.968, 1.944,
+ 1.914, 1.932, 1.957, 1.982, 1.998, 2.014, 2.023, 2.029, 2.031, 2.029, 2.022, 2.014, 2.006, 1.995, 1.976, 1.955,
+ 1.925, 1.951, 1.974, 1.996, 2.013, 2.027, 2.035, 2.039, 2.039, 2.038, 2.035, 2.026, 2.015, 2.002, 1.984, 1.963,
+ 1.932, 1.958, 1.986, 2.007, 2.024, 2.034, 2.041, 2.041, 2.045, 2.045, 2.042, 2.033, 2.023, 2.009, 1.995, 1.971,
+ 1.942, 1.964, 1.994, 2.012, 2.029, 2.038, 2.043, 2.046, 2.047, 2.046, 2.045, 2.039, 2.029, 2.014, 1.997, 1.977,
+ 1.946, 1.974, 1.999, 2.015, 2.031, 2.041, 2.046, 2.047, 2.048, 2.047, 2.044, 2.041, 2.031, 2.019, 1.999, 1.978,
+ 1.948, 1.975, 2.002, 2.018, 2.031, 2.041, 2.046, 2.047, 2.048, 2.048, 2.045, 2.041, 2.029, 2.019, 1.998, 1.978,
+ 1.948, 1.973, 2.002, 2.018, 2.029, 2.042, 2.045, 2.048, 2.048, 2.048, 2.044, 2.037, 2.027, 2.014, 1.993, 1.978,
+ 1.945, 1.969, 1.998, 2.015, 2.028, 2.037, 2.045, 2.046, 2.047, 2.044, 2.039, 2.033, 2.022, 2.008, 1.989, 1.971,
+ 1.939, 1.964, 1.991, 2.011, 2.024, 2.032, 2.036, 2.042, 2.042, 2.039, 2.035, 2.024, 2.012, 1.998, 1.977, 1.964,
+ 1.932, 1.953, 1.981, 2.006, 2.016, 2.024, 2.028, 2.031, 2.034, 2.031, 2.024, 2.015, 2.005, 1.989, 1.966, 1.955,
+ 1.928, 1.944, 1.973, 1.999, 2.007, 2.016, 2.019, 2.025, 2.026, 2.025, 2.017, 2.008, 1.997, 1.975, 1.958, 1.947
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 1.877, 1.597, 1.397, 1.269, 1.191, 1.131, 1.093, 1.078, 1.071, 1.069, 1.086, 1.135, 1.221, 1.331, 1.474, 1.704,
+ 1.749, 1.506, 1.334, 1.229, 1.149, 1.088, 1.058, 1.053, 1.051, 1.046, 1.053, 1.091, 1.163, 1.259, 1.387, 1.587,
+ 1.661, 1.451, 1.295, 1.195, 1.113, 1.061, 1.049, 1.048, 1.047, 1.049, 1.049, 1.066, 1.124, 1.211, 1.333, 1.511,
+ 1.615, 1.411, 1.267, 1.165, 1.086, 1.052, 1.047, 1.047, 1.047, 1.049, 1.052, 1.056, 1.099, 1.181, 1.303, 1.471,
+ 1.576, 1.385, 1.252, 1.144, 1.068, 1.049, 1.044, 1.044, 1.045, 1.049, 1.053, 1.054, 1.083, 1.163, 1.283, 1.447,
+ 1.561, 1.373, 1.245, 1.135, 1.064, 1.049, 1.044, 1.044, 1.044, 1.046, 1.048, 1.054, 1.073, 1.153, 1.271, 1.432,
+ 1.571, 1.377, 1.242, 1.137, 1.066, 1.055, 1.052, 1.051, 1.051, 1.049, 1.047, 1.048, 1.068, 1.148, 1.271, 1.427,
+ 1.582, 1.396, 1.259, 1.156, 1.085, 1.068, 1.059, 1.054, 1.049, 1.045, 1.041, 1.043, 1.074, 1.157, 1.284, 1.444,
+ 1.623, 1.428, 1.283, 1.178, 1.105, 1.074, 1.069, 1.063, 1.056, 1.048, 1.046, 1.051, 1.094, 1.182, 1.311, 1.473,
+ 1.691, 1.471, 1.321, 1.213, 1.135, 1.088, 1.073, 1.069, 1.063, 1.059, 1.053, 1.071, 1.129, 1.222, 1.351, 1.521,
+ 1.808, 1.543, 1.371, 1.253, 1.174, 1.118, 1.085, 1.072, 1.067, 1.064, 1.071, 1.106, 1.176, 1.274, 1.398, 1.582,
+ 1.969, 1.666, 1.447, 1.316, 1.223, 1.166, 1.123, 1.094, 1.089, 1.097, 1.118, 1.163, 1.239, 1.336, 1.471, 1.681
+ ],
+ "sigma": 0.00218,
+ "sigma_Cb": 0.00194
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2900,
+ "ccm":
+ [
+ 1.44924, -0.12935, -0.31989,
+ -0.65839, 1.95441, -0.29602,
+ 0.18344, -1.22282, 2.03938
+ ]
+ },
+ {
+ "ct": 3000,
+ "ccm":
+ [
+ 1.38736, 0.07714, -0.46451,
+ -0.59691, 1.84335, -0.24644,
+ 0.10092, -1.30441, 2.20349
+ ]
+ },
+ {
+ "ct": 3600,
+ "ccm":
+ [
+ 1.51261, -0.27921, -0.23339,
+ -0.55129, 1.83241, -0.28111,
+ 0.11649, -0.93195, 1.81546
+ ]
+ },
+ {
+ "ct": 4600,
+ "ccm":
+ [
+ 1.47082, -0.18523, -0.28559,
+ -0.48923, 1.95126, -0.46203,
+ 0.07951, -0.83987, 1.76036
+ ]
+ },
+ {
+ "ct": 5800,
+ "ccm":
+ [
+ 1.57294, -0.36229, -0.21065,
+ -0.42272, 1.80305, -0.38032,
+ 0.03671, -0.66862, 1.63191
+ ]
+ },
+ {
+ "ct": 8100,
+ "ccm":
+ [
+ 1.58803, -0.09912, -0.48891,
+ -0.42594, 2.22303, -0.79709,
+ -0.00621, -0.90516, 1.91137
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen":
+ {
+ "threshold": 2.0,
+ "strength": 0.5,
+ "limit": 0.5
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/data/uncalibrated.json b/src/ipa/rpi/vc4/data/uncalibrated.json
new file mode 100644
index 00000000..cdc56b32
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/uncalibrated.json
@@ -0,0 +1,131 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 4096
+ }
+ },
+ {
+ "rpi.awb":
+ {
+ "use_derivatives": 0,
+ "bayes": 0
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 4, 4, 4, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 15000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 3.0, 4.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 30000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ]
+ },
+ "long":
+ {
+ "shutter": [ 1000, 30000, 60000, 90000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.4,
+ 1000, 0.4
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 4000,
+ "ccm":
+ [
+ 2.0, -1.0, 0.0,
+ -0.5, 2.0, -0.5,
+ 0, -1.0, 2.0
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 0,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/ipa/rpi/vc4/meson.build b/src/ipa/rpi/vc4/meson.build
new file mode 100644
index 00000000..c10fa17e
--- /dev/null
+++ b/src/ipa/rpi/vc4/meson.build
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: CC0-1.0
+
+ipa_name = 'ipa_rpi_vc4'
+
+vc4_ipa_deps = [
+ libcamera_private,
+ libatomic,
+]
+
+vc4_ipa_libs = [
+ rpi_ipa_cam_helper_lib,
+ rpi_ipa_common_lib,
+ rpi_ipa_controller_lib
+]
+
+vc4_ipa_includes = [
+ ipa_includes,
+]
+
+vc4_ipa_sources = files([
+ 'vc4.cpp',
+])
+
+vc4_ipa_includes += include_directories('..')
+
+mod = shared_module(ipa_name, vc4_ipa_sources,
+ name_prefix : '',
+ include_directories : vc4_ipa_includes,
+ dependencies : [vc4_ipa_deps, libipa_dep],
+ link_whole : vc4_ipa_libs,
+ install : true,
+ install_dir : ipa_install_dir)
+
+if ipa_sign_module
+ custom_target(ipa_name + '.so.sign',
+ input : mod,
+ output : ipa_name + '.so.sign',
+ command : [ipa_sign, ipa_priv_key, '@INPUT@', '@OUTPUT@'],
+ install : false,
+ build_by_default : true)
+endif
+
+subdir('data')
+
+ipa_names += ipa_name
diff --git a/src/ipa/rpi/vc4/vc4.cpp b/src/ipa/rpi/vc4/vc4.cpp
new file mode 100644
index 00000000..ba43e474
--- /dev/null
+++ b/src/ipa/rpi/vc4/vc4.cpp
@@ -0,0 +1,597 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2019-2021, Raspberry Pi Ltd
+ *
+ * Raspberry Pi VC4/BCM2835 ISP IPA.
+ */
+
+#include <string.h>
+#include <sys/mman.h>
+
+#include <linux/bcm2835-isp.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/span.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/ipa/ipa_module_info.h>
+
+#include "common/ipa_base.h"
+#include "controller/af_status.h"
+#include "controller/agc_algorithm.h"
+#include "controller/alsc_status.h"
+#include "controller/awb_status.h"
+#include "controller/black_level_status.h"
+#include "controller/ccm_status.h"
+#include "controller/contrast_status.h"
+#include "controller/denoise_algorithm.h"
+#include "controller/denoise_status.h"
+#include "controller/dpc_status.h"
+#include "controller/geq_status.h"
+#include "controller/lux_status.h"
+#include "controller/noise_status.h"
+#include "controller/sharpen_status.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(IPARPI)
+
+namespace ipa::RPi {
+
+class IpaVc4 final : public IpaBase
+{
+public:
+ IpaVc4()
+ : IpaBase(), lsTable_(nullptr)
+ {
+ }
+
+ ~IpaVc4()
+ {
+ if (lsTable_)
+ munmap(lsTable_, MaxLsGridSize);
+ }
+
+private:
+ int32_t platformInit(const InitParams &params, InitResult *result) override;
+ int32_t platformStart(const ControlList &controls, StartResult *result) override;
+ int32_t platformConfigure(const ConfigParams &params, ConfigResult *result) override;
+
+ void platformPrepareIsp(const PrepareParams &params, RPiController::Metadata &rpiMetadata) override;
+ RPiController::StatisticsPtr platformProcessStats(Span<uint8_t> mem) override;
+
+ void handleControls(const ControlList &controls) override;
+ bool validateIspControls();
+
+ void applyAWB(const struct AwbStatus *awbStatus, ControlList &ctrls);
+ void applyDG(const struct AgcPrepareStatus *dgStatus, ControlList &ctrls);
+ void applyCCM(const struct CcmStatus *ccmStatus, ControlList &ctrls);
+ void applyBlackLevel(const struct BlackLevelStatus *blackLevelStatus, ControlList &ctrls);
+ void applyGamma(const struct ContrastStatus *contrastStatus, ControlList &ctrls);
+ void applyGEQ(const struct GeqStatus *geqStatus, ControlList &ctrls);
+ void applyDenoise(const struct DenoiseStatus *denoiseStatus, ControlList &ctrls);
+ void applySharpen(const struct SharpenStatus *sharpenStatus, ControlList &ctrls);
+ void applyDPC(const struct DpcStatus *dpcStatus, ControlList &ctrls);
+ void applyLS(const struct AlscStatus *lsStatus, ControlList &ctrls);
+ void applyAF(const struct AfStatus *afStatus, ControlList &lensCtrls);
+ void resampleTable(uint16_t dest[], const std::vector<double> &src, int destW, int destH);
+
+ /* VC4 ISP controls. */
+ ControlInfoMap ispCtrls_;
+
+ /* LS table allocation passed in from the pipeline handler. */
+ SharedFD lsTableHandle_;
+ void *lsTable_;
+};
+
+int32_t IpaVc4::platformInit([[maybe_unused]] const InitParams &params, [[maybe_unused]] InitResult *result)
+{
+ const std::string &target = controller_.getTarget();
+
+ if (target != "bcm2835") {
+ LOG(IPARPI, Error)
+ << "Tuning data file target returned \"" << target << "\""
+ << ", expected \"bcm2835\"";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int32_t IpaVc4::platformStart([[maybe_unused]] const ControlList &controls,
+ [[maybe_unused]] StartResult *result)
+{
+ return 0;
+}
+
+int32_t IpaVc4::platformConfigure(const ConfigParams &params, [[maybe_unused]] ConfigResult *result)
+{
+ ispCtrls_ = params.ispControls;
+ if (!validateIspControls()) {
+ LOG(IPARPI, Error) << "ISP control validation failed.";
+ return -1;
+ }
+
+ /* Store the lens shading table pointer and handle if available. */
+ if (params.lsTableHandle.isValid()) {
+ /* Remove any previous table, if there was one. */
+ if (lsTable_) {
+ munmap(lsTable_, MaxLsGridSize);
+ lsTable_ = nullptr;
+ }
+
+ /* Map the LS table buffer into user space. */
+ lsTableHandle_ = std::move(params.lsTableHandle);
+ if (lsTableHandle_.isValid()) {
+ lsTable_ = mmap(nullptr, MaxLsGridSize, PROT_READ | PROT_WRITE,
+ MAP_SHARED, lsTableHandle_.get(), 0);
+
+ if (lsTable_ == MAP_FAILED) {
+ LOG(IPARPI, Error) << "dmaHeap mmap failure for LS table.";
+ lsTable_ = nullptr;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void IpaVc4::platformPrepareIsp([[maybe_unused]] const PrepareParams &params,
+ RPiController::Metadata &rpiMetadata)
+{
+ ControlList ctrls(ispCtrls_);
+
+ /* Lock the metadata buffer to avoid constant locks/unlocks. */
+ std::unique_lock<RPiController::Metadata> lock(rpiMetadata);
+
+ AwbStatus *awbStatus = rpiMetadata.getLocked<AwbStatus>("awb.status");
+ if (awbStatus)
+ applyAWB(awbStatus, ctrls);
+
+ CcmStatus *ccmStatus = rpiMetadata.getLocked<CcmStatus>("ccm.status");
+ if (ccmStatus)
+ applyCCM(ccmStatus, ctrls);
+
+ AgcPrepareStatus *dgStatus = rpiMetadata.getLocked<AgcPrepareStatus>("agc.prepare_status");
+ if (dgStatus)
+ applyDG(dgStatus, ctrls);
+
+ AlscStatus *lsStatus = rpiMetadata.getLocked<AlscStatus>("alsc.status");
+ if (lsStatus)
+ applyLS(lsStatus, ctrls);
+
+ ContrastStatus *contrastStatus = rpiMetadata.getLocked<ContrastStatus>("contrast.status");
+ if (contrastStatus)
+ applyGamma(contrastStatus, ctrls);
+
+ BlackLevelStatus *blackLevelStatus = rpiMetadata.getLocked<BlackLevelStatus>("black_level.status");
+ if (blackLevelStatus)
+ applyBlackLevel(blackLevelStatus, ctrls);
+
+ GeqStatus *geqStatus = rpiMetadata.getLocked<GeqStatus>("geq.status");
+ if (geqStatus)
+ applyGEQ(geqStatus, ctrls);
+
+ DenoiseStatus *denoiseStatus = rpiMetadata.getLocked<DenoiseStatus>("denoise.status");
+ if (denoiseStatus)
+ applyDenoise(denoiseStatus, ctrls);
+
+ SharpenStatus *sharpenStatus = rpiMetadata.getLocked<SharpenStatus>("sharpen.status");
+ if (sharpenStatus)
+ applySharpen(sharpenStatus, ctrls);
+
+ DpcStatus *dpcStatus = rpiMetadata.getLocked<DpcStatus>("dpc.status");
+ if (dpcStatus)
+ applyDPC(dpcStatus, ctrls);
+
+ const AfStatus *afStatus = rpiMetadata.getLocked<AfStatus>("af.status");
+ if (afStatus) {
+ ControlList lensctrls(lensCtrls_);
+ applyAF(afStatus, lensctrls);
+ if (!lensctrls.empty())
+ setLensControls.emit(lensctrls);
+ }
+
+ if (!ctrls.empty())
+ setIspControls.emit(ctrls);
+}
+
+RPiController::StatisticsPtr IpaVc4::platformProcessStats(Span<uint8_t> mem)
+{
+ using namespace RPiController;
+
+ const bcm2835_isp_stats *stats = reinterpret_cast<bcm2835_isp_stats *>(mem.data());
+ StatisticsPtr statistics = std::make_shared<Statistics>(Statistics::AgcStatsPos::PreWb,
+ Statistics::ColourStatsPos::PostLsc);
+ const Controller::HardwareConfig &hw = controller_.getHardwareConfig();
+ unsigned int i;
+
+ /* RGB histograms are not used, so do not populate them. */
+ statistics->yHist = RPiController::Histogram(stats->hist[0].g_hist,
+ hw.numHistogramBins);
+
+ /* All region sums are based on a 16-bit normalised pipeline bit-depth. */
+ unsigned int scale = Statistics::NormalisationFactorPow2 - hw.pipelineWidth;
+
+ statistics->awbRegions.init(hw.awbRegions);
+ for (i = 0; i < statistics->awbRegions.numRegions(); i++)
+ statistics->awbRegions.set(i, { { stats->awb_stats[i].r_sum << scale,
+ stats->awb_stats[i].g_sum << scale,
+ stats->awb_stats[i].b_sum << scale },
+ stats->awb_stats[i].counted,
+ stats->awb_stats[i].notcounted });
+
+ RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>(
+ controller_.getAlgorithm("agc"));
+ if (!agc) {
+ LOG(IPARPI, Debug) << "No AGC algorithm - not copying statistics";
+ statistics->agcRegions.init(0);
+ } else {
+ statistics->agcRegions.init(hw.agcRegions);
+ const std::vector<double> &weights = agc->getWeights();
+ for (i = 0; i < statistics->agcRegions.numRegions(); i++) {
+ uint64_t rSum = (stats->agc_stats[i].r_sum << scale) * weights[i];
+ uint64_t gSum = (stats->agc_stats[i].g_sum << scale) * weights[i];
+ uint64_t bSum = (stats->agc_stats[i].b_sum << scale) * weights[i];
+ uint32_t counted = stats->agc_stats[i].counted * weights[i];
+ uint32_t notcounted = stats->agc_stats[i].notcounted * weights[i];
+ statistics->agcRegions.set(i, { { rSum, gSum, bSum },
+ counted,
+ notcounted });
+ }
+ }
+
+ statistics->focusRegions.init(hw.focusRegions);
+ for (i = 0; i < statistics->focusRegions.numRegions(); i++)
+ statistics->focusRegions.set(i, { stats->focus_stats[i].contrast_val[1][1] / 1000,
+ stats->focus_stats[i].contrast_val_num[1][1],
+ stats->focus_stats[i].contrast_val_num[1][0] });
+
+ if (statsMetadataOutput_) {
+ Span<const uint8_t> statsSpan(reinterpret_cast<const uint8_t *>(stats),
+ sizeof(bcm2835_isp_stats));
+ libcameraMetadata_.set(controls::rpi::Bcm2835StatsOutput, statsSpan);
+ }
+
+ return statistics;
+}
+
+void IpaVc4::handleControls(const ControlList &controls)
+{
+ static const std::map<int32_t, RPiController::DenoiseMode> DenoiseModeTable = {
+ { controls::draft::NoiseReductionModeOff, RPiController::DenoiseMode::Off },
+ { controls::draft::NoiseReductionModeFast, RPiController::DenoiseMode::ColourFast },
+ { controls::draft::NoiseReductionModeHighQuality, RPiController::DenoiseMode::ColourHighQuality },
+ { controls::draft::NoiseReductionModeMinimal, RPiController::DenoiseMode::ColourOff },
+ { controls::draft::NoiseReductionModeZSL, RPiController::DenoiseMode::ColourHighQuality },
+ };
+
+ for (auto const &ctrl : controls) {
+ switch (ctrl.first) {
+ case controls::draft::NOISE_REDUCTION_MODE: {
+ RPiController::DenoiseAlgorithm *sdn = dynamic_cast<RPiController::DenoiseAlgorithm *>(
+ controller_.getAlgorithm("SDN"));
+ /* Some platforms may have a combined "denoise" algorithm instead. */
+ if (!sdn)
+ sdn = dynamic_cast<RPiController::DenoiseAlgorithm *>(
+ controller_.getAlgorithm("denoise"));
+ if (!sdn) {
+ LOG(IPARPI, Warning)
+ << "Could not set NOISE_REDUCTION_MODE - no SDN algorithm";
+ return;
+ }
+
+ int32_t idx = ctrl.second.get<int32_t>();
+ auto mode = DenoiseModeTable.find(idx);
+ if (mode != DenoiseModeTable.end())
+ sdn->setMode(mode->second);
+ break;
+ }
+ }
+ }
+}
+
+bool IpaVc4::validateIspControls()
+{
+ static const uint32_t ctrls[] = {
+ V4L2_CID_RED_BALANCE,
+ V4L2_CID_BLUE_BALANCE,
+ V4L2_CID_DIGITAL_GAIN,
+ V4L2_CID_USER_BCM2835_ISP_CC_MATRIX,
+ V4L2_CID_USER_BCM2835_ISP_GAMMA,
+ V4L2_CID_USER_BCM2835_ISP_BLACK_LEVEL,
+ V4L2_CID_USER_BCM2835_ISP_GEQ,
+ V4L2_CID_USER_BCM2835_ISP_DENOISE,
+ V4L2_CID_USER_BCM2835_ISP_SHARPEN,
+ V4L2_CID_USER_BCM2835_ISP_DPC,
+ V4L2_CID_USER_BCM2835_ISP_LENS_SHADING,
+ V4L2_CID_USER_BCM2835_ISP_CDN,
+ };
+
+ for (auto c : ctrls) {
+ if (ispCtrls_.find(c) == ispCtrls_.end()) {
+ LOG(IPARPI, Error) << "Unable to find ISP control "
+ << utils::hex(c);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void IpaVc4::applyAWB(const struct AwbStatus *awbStatus, ControlList &ctrls)
+{
+ LOG(IPARPI, Debug) << "Applying WB R: " << awbStatus->gainR << " B: "
+ << awbStatus->gainB;
+
+ ctrls.set(V4L2_CID_RED_BALANCE,
+ static_cast<int32_t>(awbStatus->gainR * 1000));
+ ctrls.set(V4L2_CID_BLUE_BALANCE,
+ static_cast<int32_t>(awbStatus->gainB * 1000));
+}
+
+void IpaVc4::applyDG(const struct AgcPrepareStatus *dgStatus, ControlList &ctrls)
+{
+ ctrls.set(V4L2_CID_DIGITAL_GAIN,
+ static_cast<int32_t>(dgStatus->digitalGain * 1000));
+}
+
+void IpaVc4::applyCCM(const struct CcmStatus *ccmStatus, ControlList &ctrls)
+{
+ bcm2835_isp_custom_ccm ccm;
+
+ for (int i = 0; i < 9; i++) {
+ ccm.ccm.ccm[i / 3][i % 3].den = 1000;
+ ccm.ccm.ccm[i / 3][i % 3].num = 1000 * ccmStatus->matrix[i];
+ }
+
+ ccm.enabled = 1;
+ ccm.ccm.offsets[0] = ccm.ccm.offsets[1] = ccm.ccm.offsets[2] = 0;
+
+ ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&ccm),
+ sizeof(ccm) });
+ ctrls.set(V4L2_CID_USER_BCM2835_ISP_CC_MATRIX, c);
+}
+
+void IpaVc4::applyBlackLevel(const struct BlackLevelStatus *blackLevelStatus, ControlList &ctrls)
+{
+ bcm2835_isp_black_level blackLevel;
+
+ blackLevel.enabled = 1;
+ blackLevel.black_level_r = blackLevelStatus->blackLevelR;
+ blackLevel.black_level_g = blackLevelStatus->blackLevelG;
+ blackLevel.black_level_b = blackLevelStatus->blackLevelB;
+
+ ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&blackLevel),
+ sizeof(blackLevel) });
+ ctrls.set(V4L2_CID_USER_BCM2835_ISP_BLACK_LEVEL, c);
+}
+
+void IpaVc4::applyGamma(const struct ContrastStatus *contrastStatus, ControlList &ctrls)
+{
+ const unsigned int numGammaPoints = controller_.getHardwareConfig().numGammaPoints;
+ struct bcm2835_isp_gamma gamma;
+
+ for (unsigned int i = 0; i < numGammaPoints - 1; i++) {
+ int x = i < 16 ? i * 1024
+ : (i < 24 ? (i - 16) * 2048 + 16384
+ : (i - 24) * 4096 + 32768);
+ gamma.x[i] = x;
+ gamma.y[i] = std::min<uint16_t>(65535, contrastStatus->gammaCurve.eval(x));
+ }
+
+ gamma.x[numGammaPoints - 1] = 65535;
+ gamma.y[numGammaPoints - 1] = 65535;
+ gamma.enabled = 1;
+
+ ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&gamma),
+ sizeof(gamma) });
+ ctrls.set(V4L2_CID_USER_BCM2835_ISP_GAMMA, c);
+}
+
+void IpaVc4::applyGEQ(const struct GeqStatus *geqStatus, ControlList &ctrls)
+{
+ bcm2835_isp_geq geq;
+
+ geq.enabled = 1;
+ geq.offset = geqStatus->offset;
+ geq.slope.den = 1000;
+ geq.slope.num = 1000 * geqStatus->slope;
+
+ ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&geq),
+ sizeof(geq) });
+ ctrls.set(V4L2_CID_USER_BCM2835_ISP_GEQ, c);
+}
+
+void IpaVc4::applyDenoise(const struct DenoiseStatus *denoiseStatus, ControlList &ctrls)
+{
+ using RPiController::DenoiseMode;
+
+ bcm2835_isp_denoise denoise;
+ DenoiseMode mode = static_cast<DenoiseMode>(denoiseStatus->mode);
+
+ denoise.enabled = mode != DenoiseMode::Off;
+ denoise.constant = denoiseStatus->noiseConstant;
+ denoise.slope.num = 1000 * denoiseStatus->noiseSlope;
+ denoise.slope.den = 1000;
+ denoise.strength.num = 1000 * denoiseStatus->strength;
+ denoise.strength.den = 1000;
+
+ /* Set the CDN mode to match the SDN operating mode. */
+ bcm2835_isp_cdn cdn;
+ switch (mode) {
+ case DenoiseMode::ColourFast:
+ cdn.enabled = 1;
+ cdn.mode = CDN_MODE_FAST;
+ break;
+ case DenoiseMode::ColourHighQuality:
+ cdn.enabled = 1;
+ cdn.mode = CDN_MODE_HIGH_QUALITY;
+ break;
+ default:
+ cdn.enabled = 0;
+ }
+
+ ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&denoise),
+ sizeof(denoise) });
+ ctrls.set(V4L2_CID_USER_BCM2835_ISP_DENOISE, c);
+
+ c = ControlValue(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&cdn),
+ sizeof(cdn) });
+ ctrls.set(V4L2_CID_USER_BCM2835_ISP_CDN, c);
+}
+
+void IpaVc4::applySharpen(const struct SharpenStatus *sharpenStatus, ControlList &ctrls)
+{
+ bcm2835_isp_sharpen sharpen;
+
+ sharpen.enabled = 1;
+ sharpen.threshold.num = 1000 * sharpenStatus->threshold;
+ sharpen.threshold.den = 1000;
+ sharpen.strength.num = 1000 * sharpenStatus->strength;
+ sharpen.strength.den = 1000;
+ sharpen.limit.num = 1000 * sharpenStatus->limit;
+ sharpen.limit.den = 1000;
+
+ ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&sharpen),
+ sizeof(sharpen) });
+ ctrls.set(V4L2_CID_USER_BCM2835_ISP_SHARPEN, c);
+}
+
+void IpaVc4::applyDPC(const struct DpcStatus *dpcStatus, ControlList &ctrls)
+{
+ bcm2835_isp_dpc dpc;
+
+ dpc.enabled = 1;
+ dpc.strength = dpcStatus->strength;
+
+ ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&dpc),
+ sizeof(dpc) });
+ ctrls.set(V4L2_CID_USER_BCM2835_ISP_DPC, c);
+}
+
+void IpaVc4::applyLS(const struct AlscStatus *lsStatus, ControlList &ctrls)
+{
+ /*
+ * Program lens shading tables into pipeline.
+ * Choose smallest cell size that won't exceed 63x48 cells.
+ */
+ const int cellSizes[] = { 16, 32, 64, 128, 256 };
+ unsigned int numCells = std::size(cellSizes);
+ unsigned int i, w, h, cellSize;
+ for (i = 0; i < numCells; i++) {
+ cellSize = cellSizes[i];
+ w = (mode_.width + cellSize - 1) / cellSize;
+ h = (mode_.height + cellSize - 1) / cellSize;
+ if (w < 64 && h <= 48)
+ break;
+ }
+
+ if (i == numCells) {
+ LOG(IPARPI, Error) << "Cannot find cell size";
+ return;
+ }
+
+ /* We're going to supply corner sampled tables, 16 bit samples. */
+ w++, h++;
+ bcm2835_isp_lens_shading ls = {
+ .enabled = 1,
+ .grid_cell_size = cellSize,
+ .grid_width = w,
+ .grid_stride = w,
+ .grid_height = h,
+ /* .dmabuf will be filled in by pipeline handler. */
+ .dmabuf = 0,
+ .ref_transform = 0,
+ .corner_sampled = 1,
+ .gain_format = GAIN_FORMAT_U4P10
+ };
+
+ if (!lsTable_ || w * h * 4 * sizeof(uint16_t) > MaxLsGridSize) {
+ LOG(IPARPI, Error) << "Do not have a correctly allocate lens shading table!";
+ return;
+ }
+
+ if (lsStatus) {
+ /* Format will be u4.10 */
+ uint16_t *grid = static_cast<uint16_t *>(lsTable_);
+
+ resampleTable(grid, lsStatus->r, w, h);
+ resampleTable(grid + w * h, lsStatus->g, w, h);
+ memcpy(grid + 2 * w * h, grid + w * h, w * h * sizeof(uint16_t));
+ resampleTable(grid + 3 * w * h, lsStatus->b, w, h);
+ }
+
+ ControlValue c(Span<const uint8_t>{ reinterpret_cast<uint8_t *>(&ls),
+ sizeof(ls) });
+ ctrls.set(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING, c);
+}
+
+void IpaVc4::applyAF(const struct AfStatus *afStatus, ControlList &lensCtrls)
+{
+ if (afStatus->lensSetting) {
+ ControlValue v(afStatus->lensSetting.value());
+ lensCtrls.set(V4L2_CID_FOCUS_ABSOLUTE, v);
+ }
+}
+
+/*
+ * Resamples a 16x12 table with central sampling to destW x destH with corner
+ * sampling.
+ */
+void IpaVc4::resampleTable(uint16_t dest[], const std::vector<double> &src,
+ int destW, int destH)
+{
+ /*
+ * Precalculate and cache the x sampling locations and phases to
+ * save recomputing them on every row.
+ */
+ assert(destW > 1 && destH > 1 && destW <= 64);
+ int xLo[64], xHi[64];
+ double xf[64];
+ double x = -0.5, xInc = 16.0 / (destW - 1);
+ for (int i = 0; i < destW; i++, x += xInc) {
+ xLo[i] = floor(x);
+ xf[i] = x - xLo[i];
+ xHi[i] = xLo[i] < 15 ? xLo[i] + 1 : 15;
+ xLo[i] = xLo[i] > 0 ? xLo[i] : 0;
+ }
+
+ /* Now march over the output table generating the new values. */
+ double y = -0.5, yInc = 12.0 / (destH - 1);
+ for (int j = 0; j < destH; j++, y += yInc) {
+ int yLo = floor(y);
+ double yf = y - yLo;
+ int yHi = yLo < 11 ? yLo + 1 : 11;
+ yLo = yLo > 0 ? yLo : 0;
+ double const *rowAbove = src.data() + yLo * 16;
+ double const *rowBelow = src.data() + yHi * 16;
+ for (int i = 0; i < destW; i++) {
+ double above = rowAbove[xLo[i]] * (1 - xf[i]) + rowAbove[xHi[i]] * xf[i];
+ double below = rowBelow[xLo[i]] * (1 - xf[i]) + rowBelow[xHi[i]] * xf[i];
+ int result = floor(1024 * (above * (1 - yf) + below * yf) + .5);
+ *(dest++) = result > 16383 ? 16383 : result; /* want u4.10 */
+ }
+ }
+}
+
+} /* namespace ipa::RPi */
+
+/*
+ * External IPA module interface
+ */
+extern "C" {
+const struct IPAModuleInfo ipaModuleInfo = {
+ IPA_MODULE_API_VERSION,
+ 1,
+ "rpi/vc4",
+ "rpi/vc4",
+};
+
+IPAInterface *ipaCreate()
+{
+ return new ipa::RPi::IpaVc4();
+}
+
+} /* extern "C" */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/algorithms/agc.cpp b/src/ipa/simple/algorithms/agc.cpp
new file mode 100644
index 00000000..72aade14
--- /dev/null
+++ b/src/ipa/simple/algorithms/agc.cpp
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * Exposure and gain
+ */
+
+#include "agc.h"
+
+#include <stdint.h>
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(IPASoftExposure)
+
+namespace ipa::soft::algorithms {
+
+/*
+ * The number of bins to use for the optimal exposure calculations.
+ */
+static constexpr unsigned int kExposureBinsCount = 5;
+
+/*
+ * The exposure is optimal when the mean sample value of the histogram is
+ * in the middle of the range.
+ */
+static constexpr float kExposureOptimal = kExposureBinsCount / 2.0;
+
+/*
+ * This implements the hysteresis for the exposure adjustment.
+ * It is small enough to have the exposure close to the optimal, and is big
+ * enough to prevent the exposure from wobbling around the optimal value.
+ */
+static constexpr float kExposureSatisfactory = 0.2;
+
+Agc::Agc()
+{
+}
+
+void Agc::updateExposure(IPAContext &context, IPAFrameContext &frameContext, double exposureMSV)
+{
+ /*
+ * kExpDenominator of 10 gives ~10% increment/decrement;
+ * kExpDenominator of 5 - about ~20%
+ */
+ static constexpr uint8_t kExpDenominator = 10;
+ static constexpr uint8_t kExpNumeratorUp = kExpDenominator + 1;
+ static constexpr uint8_t kExpNumeratorDown = kExpDenominator - 1;
+
+ double next;
+ int32_t &exposure = frameContext.sensor.exposure;
+ double &again = frameContext.sensor.gain;
+
+ if (exposureMSV < kExposureOptimal - kExposureSatisfactory) {
+ next = exposure * kExpNumeratorUp / kExpDenominator;
+ if (next - exposure < 1)
+ exposure += 1;
+ else
+ exposure = next;
+ if (exposure >= context.configuration.agc.exposureMax) {
+ next = again * kExpNumeratorUp / kExpDenominator;
+ if (next - again < context.configuration.agc.againMinStep)
+ again += context.configuration.agc.againMinStep;
+ else
+ again = next;
+ }
+ }
+
+ if (exposureMSV > kExposureOptimal + kExposureSatisfactory) {
+ if (exposure == context.configuration.agc.exposureMax &&
+ again > context.configuration.agc.againMin) {
+ next = again * kExpNumeratorDown / kExpDenominator;
+ if (again - next < context.configuration.agc.againMinStep)
+ again -= context.configuration.agc.againMinStep;
+ else
+ again = next;
+ } else {
+ next = exposure * kExpNumeratorDown / kExpDenominator;
+ if (exposure - next < 1)
+ exposure -= 1;
+ else
+ exposure = next;
+ }
+ }
+
+ exposure = std::clamp(exposure, context.configuration.agc.exposureMin,
+ context.configuration.agc.exposureMax);
+ again = std::clamp(again, context.configuration.agc.againMin,
+ context.configuration.agc.againMax);
+
+ LOG(IPASoftExposure, Debug)
+ << "exposureMSV " << exposureMSV
+ << " exp " << exposure << " again " << again;
+}
+
+void Agc::process(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ const SwIspStats *stats,
+ [[maybe_unused]] ControlList &metadata)
+{
+ /*
+ * Calculate Mean Sample Value (MSV) according to formula from:
+ * https://www.araa.asn.au/acra/acra2007/papers/paper84final.pdf
+ */
+ const auto &histogram = stats->yHistogram;
+ const unsigned int blackLevelHistIdx =
+ context.activeState.blc.level / (256 / SwIspStats::kYHistogramSize);
+ const unsigned int histogramSize =
+ SwIspStats::kYHistogramSize - blackLevelHistIdx;
+ const unsigned int yHistValsPerBin = histogramSize / kExposureBinsCount;
+ const unsigned int yHistValsPerBinMod =
+ histogramSize / (histogramSize % kExposureBinsCount + 1);
+ int exposureBins[kExposureBinsCount] = {};
+ unsigned int denom = 0;
+ unsigned int num = 0;
+
+ for (unsigned int i = 0; i < histogramSize; i++) {
+ unsigned int idx = (i - (i / yHistValsPerBinMod)) / yHistValsPerBin;
+ exposureBins[idx] += histogram[blackLevelHistIdx + i];
+ }
+
+ for (unsigned int i = 0; i < kExposureBinsCount; i++) {
+ LOG(IPASoftExposure, Debug) << i << ": " << exposureBins[i];
+ denom += exposureBins[i];
+ num += exposureBins[i] * (i + 1);
+ }
+
+ float exposureMSV = (denom == 0 ? 0 : static_cast<float>(num) / denom);
+ updateExposure(context, frameContext, exposureMSV);
+}
+
+REGISTER_IPA_ALGORITHM(Agc, "Agc")
+
+} /* namespace ipa::soft::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/algorithms/agc.h b/src/ipa/simple/algorithms/agc.h
new file mode 100644
index 00000000..112d9f5a
--- /dev/null
+++ b/src/ipa/simple/algorithms/agc.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * Exposure and gain
+ */
+
+#pragma once
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::soft::algorithms {
+
+class Agc : public Algorithm
+{
+public:
+ Agc();
+ ~Agc() = default;
+
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const SwIspStats *stats,
+ ControlList &metadata) override;
+
+private:
+ void updateExposure(IPAContext &context, IPAFrameContext &frameContext, double exposureMSV);
+};
+
+} /* namespace ipa::soft::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/algorithms/algorithm.h b/src/ipa/simple/algorithms/algorithm.h
new file mode 100644
index 00000000..41f63170
--- /dev/null
+++ b/src/ipa/simple/algorithms/algorithm.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Red Hat, Inc.
+ *
+ * Software ISP control algorithm interface
+ */
+
+#pragma once
+
+#include <libipa/algorithm.h>
+
+#include "module.h"
+
+namespace libcamera {
+
+namespace ipa::soft {
+
+using Algorithm = libcamera::ipa::Algorithm<Module>;
+
+} /* namespace ipa::soft */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/algorithms/awb.cpp b/src/ipa/simple/algorithms/awb.cpp
new file mode 100644
index 00000000..195de41d
--- /dev/null
+++ b/src/ipa/simple/algorithms/awb.cpp
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * Auto white balance
+ */
+
+#include "awb.h"
+
+#include <numeric>
+#include <stdint.h>
+
+#include <libcamera/base/log.h>
+
+#include "simple/ipa_context.h"
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(IPASoftAwb)
+
+namespace ipa::soft::algorithms {
+
+int Awb::configure(IPAContext &context,
+ [[maybe_unused]] const IPAConfigInfo &configInfo)
+{
+ auto &gains = context.activeState.gains;
+ gains.red = gains.green = gains.blue = 1.0;
+
+ return 0;
+}
+
+void Awb::process(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ const SwIspStats *stats,
+ [[maybe_unused]] ControlList &metadata)
+{
+ const SwIspStats::Histogram &histogram = stats->yHistogram;
+ const uint8_t blackLevel = context.activeState.blc.level;
+
+ /*
+ * Black level must be subtracted to get the correct AWB ratios, they
+ * would be off if they were computed from the whole brightness range
+ * rather than from the sensor range.
+ */
+ const uint64_t nPixels = std::accumulate(
+ histogram.begin(), histogram.end(), 0);
+ const uint64_t offset = blackLevel * nPixels;
+ const uint64_t sumR = stats->sumR_ - offset / 4;
+ const uint64_t sumG = stats->sumG_ - offset / 2;
+ const uint64_t sumB = stats->sumB_ - offset / 4;
+
+ /*
+ * Calculate red and blue gains for AWB.
+ * Clamp max gain at 4.0, this also avoids 0 division.
+ */
+ auto &gains = context.activeState.gains;
+ gains.red = sumR <= sumG / 4 ? 4.0 : static_cast<double>(sumG) / sumR;
+ gains.blue = sumB <= sumG / 4 ? 4.0 : static_cast<double>(sumG) / sumB;
+ /* Green gain is fixed to 1.0 */
+
+ LOG(IPASoftAwb, Debug) << "gain R/B " << gains.red << "/" << gains.blue;
+}
+
+REGISTER_IPA_ALGORITHM(Awb, "Awb")
+
+} /* namespace ipa::soft::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/algorithms/awb.h b/src/ipa/simple/algorithms/awb.h
new file mode 100644
index 00000000..db1496cd
--- /dev/null
+++ b/src/ipa/simple/algorithms/awb.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * Auto white balance
+ */
+
+#pragma once
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::soft::algorithms {
+
+class Awb : public Algorithm
+{
+public:
+ Awb() = default;
+ ~Awb() = default;
+
+ int configure(IPAContext &context, const IPAConfigInfo &configInfo) override;
+ void process(IPAContext &context,
+ const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const SwIspStats *stats,
+ ControlList &metadata) override;
+};
+
+} /* namespace ipa::soft::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/algorithms/blc.cpp b/src/ipa/simple/algorithms/blc.cpp
new file mode 100644
index 00000000..1d7d370b
--- /dev/null
+++ b/src/ipa/simple/algorithms/blc.cpp
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * Black level handling
+ */
+
+#include "blc.h"
+
+#include <numeric>
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+namespace ipa::soft::algorithms {
+
+LOG_DEFINE_CATEGORY(IPASoftBL)
+
+BlackLevel::BlackLevel()
+{
+}
+
+int BlackLevel::init([[maybe_unused]] IPAContext &context,
+ const YamlObject &tuningData)
+{
+ auto blackLevel = tuningData["blackLevel"].get<int16_t>();
+ if (blackLevel.has_value()) {
+ /*
+ * Convert 16 bit values from the tuning file to 8 bit black
+ * level for the SoftISP.
+ */
+ definedLevel_ = blackLevel.value() >> 8;
+ }
+ return 0;
+}
+
+int BlackLevel::configure(IPAContext &context,
+ [[maybe_unused]] const IPAConfigInfo &configInfo)
+{
+ if (definedLevel_.has_value())
+ context.configuration.black.level = definedLevel_;
+ context.activeState.blc.level =
+ context.configuration.black.level.value_or(255);
+ return 0;
+}
+
+void BlackLevel::process(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const SwIspStats *stats,
+ [[maybe_unused]] ControlList &metadata)
+{
+ if (context.configuration.black.level.has_value())
+ return;
+
+ if (frameContext.sensor.exposure == exposure_ &&
+ frameContext.sensor.gain == gain_) {
+ return;
+ }
+
+ const SwIspStats::Histogram &histogram = stats->yHistogram;
+
+ /*
+ * The constant is selected to be "good enough", not overly
+ * conservative or aggressive. There is no magic about the given value.
+ */
+ constexpr float ignoredPercentage = 0.02;
+ const unsigned int total =
+ std::accumulate(begin(histogram), end(histogram), 0);
+ const unsigned int pixelThreshold = ignoredPercentage * total;
+ const unsigned int histogramRatio = 256 / SwIspStats::kYHistogramSize;
+ const unsigned int currentBlackIdx =
+ context.activeState.blc.level / histogramRatio;
+
+ for (unsigned int i = 0, seen = 0;
+ i < currentBlackIdx && i < SwIspStats::kYHistogramSize;
+ i++) {
+ seen += histogram[i];
+ if (seen >= pixelThreshold) {
+ context.activeState.blc.level = i * histogramRatio;
+ exposure_ = frameContext.sensor.exposure;
+ gain_ = frameContext.sensor.gain;
+ LOG(IPASoftBL, Debug)
+ << "Auto-set black level: "
+ << i << "/" << SwIspStats::kYHistogramSize
+ << " (" << 100 * (seen - histogram[i]) / total << "% below, "
+ << 100 * seen / total << "% at or below)";
+ break;
+ }
+ };
+}
+
+REGISTER_IPA_ALGORITHM(BlackLevel, "BlackLevel")
+
+} /* namespace ipa::soft::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/algorithms/blc.h b/src/ipa/simple/algorithms/blc.h
new file mode 100644
index 00000000..52d59cab
--- /dev/null
+++ b/src/ipa/simple/algorithms/blc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * Black level handling
+ */
+
+#pragma once
+
+#include <optional>
+#include <stdint.h>
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::soft::algorithms {
+
+class BlackLevel : public Algorithm
+{
+public:
+ BlackLevel();
+ ~BlackLevel() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ int configure(IPAContext &context, const IPAConfigInfo &configInfo) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const SwIspStats *stats,
+ ControlList &metadata) override;
+
+private:
+ int32_t exposure_;
+ double gain_;
+ std::optional<uint8_t> definedLevel_;
+};
+
+} /* namespace ipa::soft::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/algorithms/lut.cpp b/src/ipa/simple/algorithms/lut.cpp
new file mode 100644
index 00000000..0ba2391f
--- /dev/null
+++ b/src/ipa/simple/algorithms/lut.cpp
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * Color lookup tables construction
+ */
+
+#include "lut.h"
+
+#include <algorithm>
+#include <cmath>
+#include <optional>
+#include <stdint.h>
+
+#include <libcamera/base/log.h>
+
+#include "simple/ipa_context.h"
+
+#include "control_ids.h"
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(IPASoftLut)
+
+namespace ipa::soft::algorithms {
+
+int Lut::init(IPAContext &context,
+ [[maybe_unused]] const YamlObject &tuningData)
+{
+ context.ctrlMap[&controls::Contrast] = ControlInfo(0.0f, 2.0f, 1.0f);
+ return 0;
+}
+
+int Lut::configure(IPAContext &context,
+ [[maybe_unused]] const IPAConfigInfo &configInfo)
+{
+ /* Gamma value is fixed */
+ context.configuration.gamma = 0.5;
+ context.activeState.knobs.contrast = std::optional<double>();
+ updateGammaTable(context);
+
+ return 0;
+}
+
+void Lut::queueRequest(typename Module::Context &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] typename Module::FrameContext &frameContext,
+ const ControlList &controls)
+{
+ const auto &contrast = controls.get(controls::Contrast);
+ if (contrast.has_value()) {
+ context.activeState.knobs.contrast = contrast;
+ LOG(IPASoftLut, Debug) << "Setting contrast to " << contrast.value();
+ }
+}
+
+void Lut::updateGammaTable(IPAContext &context)
+{
+ auto &gammaTable = context.activeState.gamma.gammaTable;
+ const auto blackLevel = context.activeState.blc.level;
+ const unsigned int blackIndex = blackLevel * gammaTable.size() / 256;
+ const auto contrast = context.activeState.knobs.contrast.value_or(1.0);
+
+ std::fill(gammaTable.begin(), gammaTable.begin() + blackIndex, 0);
+ const float divisor = gammaTable.size() - blackIndex - 1.0;
+ for (unsigned int i = blackIndex; i < gammaTable.size(); i++) {
+ double normalized = (i - blackIndex) / divisor;
+ /* Convert 0..2 to 0..infinity; avoid actual inifinity at tan(pi/2) */
+ double contrastExp = tan(std::clamp(contrast * M_PI_4, 0.0, M_PI_2 - 0.00001));
+ /* Apply simple S-curve */
+ if (normalized < 0.5)
+ normalized = 0.5 * std::pow(normalized / 0.5, contrastExp);
+ else
+ normalized = 1.0 - 0.5 * std::pow((1.0 - normalized) / 0.5, contrastExp);
+ gammaTable[i] = UINT8_MAX *
+ std::pow(normalized, context.configuration.gamma);
+ }
+
+ context.activeState.gamma.blackLevel = blackLevel;
+ context.activeState.gamma.contrast = contrast;
+}
+
+void Lut::prepare(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ [[maybe_unused]] DebayerParams *params)
+{
+ /*
+ * Update the gamma table if needed. This means if black level changes
+ * and since the black level gets updated only if a lower value is
+ * observed, it's not permanently prone to minor fluctuations or
+ * rounding errors.
+ */
+ if (context.activeState.gamma.blackLevel != context.activeState.blc.level ||
+ context.activeState.gamma.contrast != context.activeState.knobs.contrast)
+ updateGammaTable(context);
+
+ auto &gains = context.activeState.gains;
+ auto &gammaTable = context.activeState.gamma.gammaTable;
+ const unsigned int gammaTableSize = gammaTable.size();
+
+ for (unsigned int i = 0; i < DebayerParams::kRGBLookupSize; i++) {
+ const double div = static_cast<double>(DebayerParams::kRGBLookupSize) /
+ gammaTableSize;
+ /* Apply gamma after gain! */
+ unsigned int idx;
+ idx = std::min({ static_cast<unsigned int>(i * gains.red / div),
+ gammaTableSize - 1 });
+ params->red[i] = gammaTable[idx];
+ idx = std::min({ static_cast<unsigned int>(i * gains.green / div),
+ gammaTableSize - 1 });
+ params->green[i] = gammaTable[idx];
+ idx = std::min({ static_cast<unsigned int>(i * gains.blue / div),
+ gammaTableSize - 1 });
+ params->blue[i] = gammaTable[idx];
+ }
+}
+
+REGISTER_IPA_ALGORITHM(Lut, "Lut")
+
+} /* namespace ipa::soft::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/algorithms/lut.h b/src/ipa/simple/algorithms/lut.h
new file mode 100644
index 00000000..889f864b
--- /dev/null
+++ b/src/ipa/simple/algorithms/lut.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ *
+ * Color lookup tables construction
+ */
+
+#pragma once
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::soft::algorithms {
+
+class Lut : public Algorithm
+{
+public:
+ Lut() = default;
+ ~Lut() = default;
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ int configure(IPAContext &context, const IPAConfigInfo &configInfo) override;
+ void queueRequest(typename Module::Context &context,
+ const uint32_t frame,
+ typename Module::FrameContext &frameContext,
+ const ControlList &controls)
+ override;
+ void prepare(IPAContext &context,
+ const uint32_t frame,
+ IPAFrameContext &frameContext,
+ DebayerParams *params) override;
+
+private:
+ void updateGammaTable(IPAContext &context);
+};
+
+} /* namespace ipa::soft::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/algorithms/meson.build b/src/ipa/simple/algorithms/meson.build
new file mode 100644
index 00000000..37a2eb53
--- /dev/null
+++ b/src/ipa/simple/algorithms/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: CC0-1.0
+
+soft_simple_ipa_algorithms = files([
+ 'awb.cpp',
+ 'agc.cpp',
+ 'blc.cpp',
+ 'lut.cpp',
+])
diff --git a/src/ipa/simple/data/meson.build b/src/ipa/simple/data/meson.build
new file mode 100644
index 00000000..92795ee4
--- /dev/null
+++ b/src/ipa/simple/data/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: CC0-1.0
+
+conf_files = files([
+ 'uncalibrated.yaml',
+])
+
+# The install_dir must match the name from the IPAModuleInfo
+install_data(conf_files,
+ install_dir : ipa_data_dir / 'simple',
+ install_tag : 'runtime')
diff --git a/src/ipa/simple/data/uncalibrated.yaml b/src/ipa/simple/data/uncalibrated.yaml
new file mode 100644
index 00000000..3f147112
--- /dev/null
+++ b/src/ipa/simple/data/uncalibrated.yaml
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+version: 1
+algorithms:
+ - BlackLevel:
+ - Awb:
+ - Lut:
+ - Agc:
+...
diff --git a/src/ipa/simple/ipa_context.cpp b/src/ipa/simple/ipa_context.cpp
new file mode 100644
index 00000000..3f94bbeb
--- /dev/null
+++ b/src/ipa/simple/ipa_context.cpp
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ * Copyright (C) 2024 Red Hat Inc.
+ *
+ * Software ISP IPA Context
+ */
+
+#include "ipa_context.h"
+
+/**
+ * \file ipa_context.h
+ * \brief Context and state information shared between the algorithms
+ */
+
+namespace libcamera::ipa::soft {
+
+/**
+ * \struct IPASessionConfiguration
+ * \brief Session configuration for the IPA module
+ *
+ * The session configuration contains all IPA configuration parameters that
+ * remain constant during the capture session, from IPA module start to stop.
+ * It is typically set during the configure() operation of the IPA module, but
+ * may also be updated in the start() operation.
+ */
+
+/**
+ * \struct IPAActiveState
+ * \brief The active state of the IPA algorithms
+ *
+ * The IPA is fed with the statistics generated from the latest frame processed.
+ * The statistics are then processed by the IPA algorithms to compute parameters
+ * required for the next frame capture and processing. The current state of the
+ * algorithms is reflected through the IPAActiveState to store the values most
+ * recently computed by the IPA algorithms.
+ */
+
+/**
+ * \struct IPAContext
+ * \brief Global IPA context data shared between all algorithms
+ *
+ * \var IPAContext::configuration
+ * \brief The IPA session configuration, immutable during the session
+ *
+ * \var IPAContext::frameContexts
+ * \brief Ring buffer of the IPAFrameContext(s)
+ *
+ * \var IPAContext::activeState
+ * \brief The current state of IPA algorithms
+ */
+
+/**
+ * \var IPASessionConfiguration::gamma
+ * \brief Gamma value to be used in the raw image processing
+ */
+
+/**
+ * \var IPAActiveState::black
+ * \brief Context for the Black Level algorithm
+ *
+ * \var IPAActiveState::black.level
+ * \brief Current determined black level
+ */
+
+/**
+ * \var IPAActiveState::gains
+ * \brief Context for gains in the Colors algorithm
+ *
+ * \var IPAActiveState::gains.red
+ * \brief Gain of red color
+ *
+ * \var IPAActiveState::gains.green
+ * \brief Gain of green color
+ *
+ * \var IPAActiveState::gains.blue
+ * \brief Gain of blue color
+ */
+
+/**
+ * \var IPAActiveState::agc
+ * \brief Context for the AGC algorithm
+ *
+ * \var IPAActiveState::agc.exposure
+ * \brief Current exposure value
+ *
+ * \var IPAActiveState::agc.again
+ * \brief Current analog gain value
+ */
+
+/**
+ * \var IPAActiveState::gamma
+ * \brief Context for gamma in the Colors algorithm
+ *
+ * \var IPAActiveState::gamma.gammaTable
+ * \brief Computed gamma table
+ *
+ * \var IPAActiveState::gamma.blackLevel
+ * \brief Black level used for the gamma table computation
+ */
+
+} /* namespace libcamera::ipa::soft */
diff --git a/src/ipa/simple/ipa_context.h b/src/ipa/simple/ipa_context.h
new file mode 100644
index 00000000..4af51306
--- /dev/null
+++ b/src/ipa/simple/ipa_context.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Red Hat, Inc.
+ *
+ * Simple pipeline IPA Context
+ */
+
+#pragma once
+
+#include <array>
+#include <optional>
+#include <stdint.h>
+
+#include <libcamera/controls.h>
+
+#include <libipa/fc_queue.h>
+
+namespace libcamera {
+
+namespace ipa::soft {
+
+struct IPASessionConfiguration {
+ float gamma;
+ struct {
+ int32_t exposureMin, exposureMax;
+ double againMin, againMax, againMinStep;
+ } agc;
+ struct {
+ std::optional<uint8_t> level;
+ } black;
+};
+
+struct IPAActiveState {
+ struct {
+ uint8_t level;
+ } blc;
+
+ struct {
+ double red;
+ double green;
+ double blue;
+ } gains;
+
+ static constexpr unsigned int kGammaLookupSize = 1024;
+ struct {
+ std::array<double, kGammaLookupSize> gammaTable;
+ uint8_t blackLevel;
+ double contrast;
+ } gamma;
+ struct {
+ /* 0..2 range, 1.0 = normal */
+ std::optional<double> contrast;
+ } knobs;
+};
+
+struct IPAFrameContext : public FrameContext {
+ struct {
+ int32_t exposure;
+ double gain;
+ } sensor;
+};
+
+struct IPAContext {
+ IPAContext(unsigned int frameContextSize)
+ : frameContexts(frameContextSize)
+ {
+ }
+
+ IPASessionConfiguration configuration;
+ IPAActiveState activeState;
+ FCQueue<IPAFrameContext> frameContexts;
+ ControlInfoMap::Map ctrlMap;
+};
+
+} /* namespace ipa::soft */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/meson.build b/src/ipa/simple/meson.build
new file mode 100644
index 00000000..2f9f15f4
--- /dev/null
+++ b/src/ipa/simple/meson.build
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: CC0-1.0
+
+subdir('algorithms')
+subdir('data')
+
+ipa_name = 'ipa_soft_simple'
+
+soft_simple_sources = files([
+ 'ipa_context.cpp',
+ 'soft_simple.cpp',
+])
+
+soft_simple_sources += soft_simple_ipa_algorithms
+
+mod = shared_module(ipa_name, soft_simple_sources,
+ name_prefix : '',
+ include_directories : [ipa_includes],
+ dependencies : [libcamera_private, libipa_dep],
+ install : true,
+ install_dir : ipa_install_dir)
+
+if ipa_sign_module
+ custom_target(ipa_name + '.so.sign',
+ input : mod,
+ output : ipa_name + '.so.sign',
+ command : [ipa_sign, ipa_priv_key, '@INPUT@', '@OUTPUT@'],
+ install : false,
+ build_by_default : true)
+endif
+
+ipa_names += ipa_name
diff --git a/src/ipa/simple/module.h b/src/ipa/simple/module.h
new file mode 100644
index 00000000..8d4d53fb
--- /dev/null
+++ b/src/ipa/simple/module.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024 Red Hat, Inc.
+ *
+ * Software ISP IPA Module
+ */
+
+#pragma once
+
+#include <libcamera/controls.h>
+
+#include <libcamera/ipa/soft_ipa_interface.h>
+
+#include "libcamera/internal/software_isp/debayer_params.h"
+#include "libcamera/internal/software_isp/swisp_stats.h"
+
+#include <libipa/module.h>
+
+#include "ipa_context.h"
+
+namespace libcamera {
+
+namespace ipa::soft {
+
+using Module = ipa::Module<IPAContext, IPAFrameContext, IPAConfigInfo,
+ DebayerParams, SwIspStats>;
+
+} /* namespace ipa::soft */
+
+} /* namespace libcamera */
diff --git a/src/ipa/simple/soft_simple.cpp b/src/ipa/simple/soft_simple.cpp
new file mode 100644
index 00000000..b26e4e15
--- /dev/null
+++ b/src/ipa/simple/soft_simple.cpp
@@ -0,0 +1,350 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ *
+ * Simple Software Image Processing Algorithm module
+ */
+
+#include <stdint.h>
+#include <sys/mman.h>
+
+#include <linux/v4l2-controls.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/shared_fd.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+
+#include <libcamera/ipa/ipa_interface.h>
+#include <libcamera/ipa/ipa_module_info.h>
+#include <libcamera/ipa/soft_ipa_interface.h>
+
+#include "libcamera/internal/software_isp/debayer_params.h"
+#include "libcamera/internal/software_isp/swisp_stats.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "libipa/camera_sensor_helper.h"
+
+#include "module.h"
+
+namespace libcamera {
+LOG_DEFINE_CATEGORY(IPASoft)
+
+namespace ipa::soft {
+
+/* Maximum number of frame contexts to be held */
+static constexpr uint32_t kMaxFrameContexts = 16;
+
+class IPASoftSimple : public ipa::soft::IPASoftInterface, public Module
+{
+public:
+ IPASoftSimple()
+ : context_(kMaxFrameContexts)
+ {
+ }
+
+ ~IPASoftSimple();
+
+ int init(const IPASettings &settings,
+ const SharedFD &fdStats,
+ const SharedFD &fdParams,
+ const ControlInfoMap &sensorInfoMap,
+ ControlInfoMap *ipaControls) override;
+ int configure(const IPAConfigInfo &configInfo) override;
+
+ int start() override;
+ void stop() override;
+
+ void queueRequest(const uint32_t frame, const ControlList &controls) override;
+ void computeParams(const uint32_t frame) override;
+ void processStats(const uint32_t frame, const uint32_t bufferId,
+ const ControlList &sensorControls) override;
+
+protected:
+ std::string logPrefix() const override;
+
+private:
+ void updateExposure(double exposureMSV);
+
+ DebayerParams *params_;
+ SwIspStats *stats_;
+ std::unique_ptr<CameraSensorHelper> camHelper_;
+ ControlInfoMap sensorInfoMap_;
+
+ /* Local parameter storage */
+ struct IPAContext context_;
+};
+
+IPASoftSimple::~IPASoftSimple()
+{
+ if (stats_)
+ munmap(stats_, sizeof(SwIspStats));
+ if (params_)
+ munmap(params_, sizeof(DebayerParams));
+}
+
+int IPASoftSimple::init(const IPASettings &settings,
+ const SharedFD &fdStats,
+ const SharedFD &fdParams,
+ const ControlInfoMap &sensorInfoMap,
+ ControlInfoMap *ipaControls)
+{
+ camHelper_ = CameraSensorHelperFactoryBase::create(settings.sensorModel);
+ if (!camHelper_) {
+ LOG(IPASoft, Warning)
+ << "Failed to create camera sensor helper for "
+ << settings.sensorModel;
+ }
+
+ /* Load the tuning data file */
+ File file(settings.configurationFile);
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ int ret = file.error();
+ LOG(IPASoft, Error)
+ << "Failed to open configuration file "
+ << settings.configurationFile << ": " << strerror(-ret);
+ return ret;
+ }
+
+ std::unique_ptr<libcamera::YamlObject> data = YamlParser::parse(file);
+ if (!data)
+ return -EINVAL;
+
+ /* \todo Use the IPA configuration file for real. */
+ unsigned int version = (*data)["version"].get<uint32_t>(0);
+ LOG(IPASoft, Debug) << "Tuning file version " << version;
+
+ if (!data->contains("algorithms")) {
+ LOG(IPASoft, Error) << "Tuning file doesn't contain algorithms";
+ return -EINVAL;
+ }
+
+ int ret = createAlgorithms(context_, (*data)["algorithms"]);
+ if (ret)
+ return ret;
+
+ params_ = nullptr;
+ stats_ = nullptr;
+
+ if (!fdStats.isValid()) {
+ LOG(IPASoft, Error) << "Invalid Statistics handle";
+ return -ENODEV;
+ }
+
+ if (!fdParams.isValid()) {
+ LOG(IPASoft, Error) << "Invalid Parameters handle";
+ return -ENODEV;
+ }
+
+ {
+ void *mem = mmap(nullptr, sizeof(DebayerParams), PROT_WRITE,
+ MAP_SHARED, fdParams.get(), 0);
+ if (mem == MAP_FAILED) {
+ LOG(IPASoft, Error) << "Unable to map Parameters";
+ return -errno;
+ }
+
+ params_ = static_cast<DebayerParams *>(mem);
+ }
+
+ {
+ void *mem = mmap(nullptr, sizeof(SwIspStats), PROT_READ,
+ MAP_SHARED, fdStats.get(), 0);
+ if (mem == MAP_FAILED) {
+ LOG(IPASoft, Error) << "Unable to map Statistics";
+ return -errno;
+ }
+
+ stats_ = static_cast<SwIspStats *>(mem);
+ }
+
+ ControlInfoMap::Map ctrlMap = context_.ctrlMap;
+ *ipaControls = ControlInfoMap(std::move(ctrlMap), controls::controls);
+
+ /*
+ * Check if the sensor driver supports the controls required by the
+ * Soft IPA.
+ * Don't save the min and max control values yet, as e.g. the limits
+ * for V4L2_CID_EXPOSURE depend on the configured sensor resolution.
+ */
+ if (sensorInfoMap.find(V4L2_CID_EXPOSURE) == sensorInfoMap.end()) {
+ LOG(IPASoft, Error) << "Don't have exposure control";
+ return -EINVAL;
+ }
+
+ if (sensorInfoMap.find(V4L2_CID_ANALOGUE_GAIN) == sensorInfoMap.end()) {
+ LOG(IPASoft, Error) << "Don't have gain control";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int IPASoftSimple::configure(const IPAConfigInfo &configInfo)
+{
+ sensorInfoMap_ = configInfo.sensorControls;
+
+ const ControlInfo &exposureInfo = sensorInfoMap_.find(V4L2_CID_EXPOSURE)->second;
+ const ControlInfo &gainInfo = sensorInfoMap_.find(V4L2_CID_ANALOGUE_GAIN)->second;
+
+ /* Clear the IPA context before the streaming session. */
+ context_.configuration = {};
+ context_.activeState = {};
+ context_.frameContexts.clear();
+
+ context_.configuration.agc.exposureMin = exposureInfo.min().get<int32_t>();
+ context_.configuration.agc.exposureMax = exposureInfo.max().get<int32_t>();
+ if (!context_.configuration.agc.exposureMin) {
+ LOG(IPASoft, Warning) << "Minimum exposure is zero, that can't be linear";
+ context_.configuration.agc.exposureMin = 1;
+ }
+
+ int32_t againMin = gainInfo.min().get<int32_t>();
+ int32_t againMax = gainInfo.max().get<int32_t>();
+
+ if (camHelper_) {
+ context_.configuration.agc.againMin = camHelper_->gain(againMin);
+ context_.configuration.agc.againMax = camHelper_->gain(againMax);
+ context_.configuration.agc.againMinStep =
+ (context_.configuration.agc.againMax -
+ context_.configuration.agc.againMin) /
+ 100.0;
+ if (camHelper_->blackLevel().has_value()) {
+ /*
+ * The black level from camHelper_ is a 16 bit value, software ISP
+ * works with 8 bit pixel values, both regardless of the actual
+ * sensor pixel width. Hence we obtain the pixel-based black value
+ * by dividing the value from the helper by 256.
+ */
+ context_.configuration.black.level =
+ camHelper_->blackLevel().value() / 256;
+ }
+ } else {
+ /*
+ * The camera sensor gain (g) is usually not equal to the value written
+ * into the gain register (x). But the way how the AGC algorithm changes
+ * the gain value to make the total exposure closer to the optimum
+ * assumes that g(x) is not too far from linear function. If the minimal
+ * gain is 0, the g(x) is likely to be far from the linear, like
+ * g(x) = a / (b * x + c). To avoid unexpected changes to the gain by
+ * the AGC algorithm (abrupt near one edge, and very small near the
+ * other) we limit the range of the gain values used.
+ */
+ context_.configuration.agc.againMax = againMax;
+ if (!againMin) {
+ LOG(IPASoft, Warning)
+ << "Minimum gain is zero, that can't be linear";
+ context_.configuration.agc.againMin =
+ std::min(100, againMin / 2 + againMax / 2);
+ }
+ context_.configuration.agc.againMinStep = 1.0;
+ }
+
+ for (auto const &algo : algorithms()) {
+ int ret = algo->configure(context_, configInfo);
+ if (ret)
+ return ret;
+ }
+
+ LOG(IPASoft, Info)
+ << "Exposure " << context_.configuration.agc.exposureMin << "-"
+ << context_.configuration.agc.exposureMax
+ << ", gain " << context_.configuration.agc.againMin << "-"
+ << context_.configuration.agc.againMax
+ << " (" << context_.configuration.agc.againMinStep << ")";
+
+ return 0;
+}
+
+int IPASoftSimple::start()
+{
+ return 0;
+}
+
+void IPASoftSimple::stop()
+{
+ context_.frameContexts.clear();
+}
+
+void IPASoftSimple::queueRequest(const uint32_t frame, const ControlList &controls)
+{
+ IPAFrameContext &frameContext = context_.frameContexts.alloc(frame);
+
+ for (auto const &algo : algorithms())
+ algo->queueRequest(context_, frame, frameContext, controls);
+}
+
+void IPASoftSimple::computeParams(const uint32_t frame)
+{
+ IPAFrameContext &frameContext = context_.frameContexts.get(frame);
+ for (auto const &algo : algorithms())
+ algo->prepare(context_, frame, frameContext, params_);
+ setIspParams.emit();
+}
+
+void IPASoftSimple::processStats(const uint32_t frame,
+ [[maybe_unused]] const uint32_t bufferId,
+ const ControlList &sensorControls)
+{
+ IPAFrameContext &frameContext = context_.frameContexts.get(frame);
+
+ frameContext.sensor.exposure =
+ sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>();
+ int32_t again = sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>();
+ frameContext.sensor.gain = camHelper_ ? camHelper_->gain(again) : again;
+
+ /*
+ * Software ISP currently does not produce any metadata. Use an empty
+ * ControlList for now.
+ *
+ * \todo Implement proper metadata handling
+ */
+ ControlList metadata(controls::controls);
+ for (auto const &algo : algorithms())
+ algo->process(context_, frame, frameContext, stats_, metadata);
+
+ /* Sanity check */
+ if (!sensorControls.contains(V4L2_CID_EXPOSURE) ||
+ !sensorControls.contains(V4L2_CID_ANALOGUE_GAIN)) {
+ LOG(IPASoft, Error) << "Control(s) missing";
+ return;
+ }
+
+ ControlList ctrls(sensorInfoMap_);
+
+ auto &againNew = frameContext.sensor.gain;
+ ctrls.set(V4L2_CID_EXPOSURE, frameContext.sensor.exposure);
+ ctrls.set(V4L2_CID_ANALOGUE_GAIN,
+ static_cast<int32_t>(camHelper_ ? camHelper_->gainCode(againNew) : againNew));
+
+ setSensorControls.emit(ctrls);
+}
+
+std::string IPASoftSimple::logPrefix() const
+{
+ return "IPASoft";
+}
+
+} /* namespace ipa::soft */
+
+/*
+ * External IPA module interface
+ */
+extern "C" {
+const struct IPAModuleInfo ipaModuleInfo = {
+ IPA_MODULE_API_VERSION,
+ 0,
+ "simple",
+ "simple",
+};
+
+IPAInterface *ipaCreate()
+{
+ return new ipa::soft::IPASoftSimple();
+}
+
+} /* extern "C" */
+
+} /* namespace libcamera */
diff --git a/src/ipa/vimc/data/meson.build b/src/ipa/vimc/data/meson.build
new file mode 100644
index 00000000..628d6a29
--- /dev/null
+++ b/src/ipa/vimc/data/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: CC0-1.0
+
+conf_files = files([
+ 'vimc.conf',
+])
+
+install_data(conf_files,
+ install_dir : ipa_data_dir / 'vimc',
+ install_tag : 'runtime')
diff --git a/src/ipa/vimc/data/vimc.conf b/src/ipa/vimc/data/vimc.conf
new file mode 100644
index 00000000..8e73b161
--- /dev/null
+++ b/src/ipa/vimc/data/vimc.conf
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Dummy configuration file for the vimc IPA.
diff --git a/src/ipa/vimc/meson.build b/src/ipa/vimc/meson.build
index 435c7d31..2cc5f80b 100644
--- a/src/ipa/vimc/meson.build
+++ b/src/ipa/vimc/meson.build
@@ -1,15 +1,23 @@
-ipa_vimc_sources = [
- ['ipa_vimc', 'LGPL-2.1-or-later'],
- ['ipa_vimc_isolate', 'Proprietary'],
-]
-
-foreach t : ipa_vimc_sources
- ipa = shared_module(t[0], 'vimc.cpp',
- name_prefix : '',
- include_directories : [ipa_includes, libipa_includes],
- dependencies : libcamera_dep,
- link_with : libipa,
- install : true,
- install_dir : ipa_install_dir,
- cpp_args : '-DLICENSE="' + t[1] + '"')
-endforeach
+# SPDX-License-Identifier: CC0-1.0
+
+ipa_name = 'ipa_vimc'
+
+mod = shared_module(ipa_name, 'vimc.cpp',
+ name_prefix : '',
+ include_directories : [ipa_includes],
+ dependencies : [libcamera_private, libipa_dep],
+ install : true,
+ install_dir : ipa_install_dir)
+
+if ipa_sign_module
+ custom_target(ipa_name + '.so.sign',
+ input : mod,
+ output : ipa_name + '.so.sign',
+ command : [ipa_sign, ipa_priv_key, '@INPUT@', '@OUTPUT@'],
+ install : false,
+ build_by_default : true)
+endif
+
+subdir('data')
+
+ipa_names += ipa_name
diff --git a/src/ipa/vimc/vimc.cpp b/src/ipa/vimc/vimc.cpp
index 6e2095b5..a1351a0f 100644
--- a/src/ipa/vimc/vimc.cpp
+++ b/src/ipa/vimc/vimc.cpp
@@ -2,10 +2,9 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_vimc.cpp - Vimc Image Processing Algorithm module
+ * Vimc Image Processing Algorithm module
*/
-
-#include <ipa/ipa_vimc.h>
+#include <libcamera/ipa/vimc_ipa_interface.h>
#include <fcntl.h>
#include <string.h>
@@ -14,35 +13,49 @@
#include <iostream>
-#include <ipa/ipa_interface.h>
-#include <ipa/ipa_module_info.h>
+#include <libcamera/base/file.h>
+#include <libcamera/base/flags.h>
+#include <libcamera/base/log.h>
-#include <libipa/ipa_interface_wrapper.h>
+#include <libcamera/ipa/ipa_interface.h>
+#include <libcamera/ipa/ipa_module_info.h>
-#include "log.h"
+#include "libcamera/internal/mapped_framebuffer.h"
namespace libcamera {
LOG_DEFINE_CATEGORY(IPAVimc)
-class IPAVimc : public IPAInterface
+class IPAVimc : public ipa::vimc::IPAVimcInterface
{
public:
IPAVimc();
~IPAVimc();
- int init() override;
- void configure(const std::map<unsigned int, IPAStream> &streamConfig,
- const std::map<unsigned int, const ControlInfoMap &> &entityControls) override {}
- void mapBuffers(const std::vector<IPABuffer> &buffers) override {}
- void unmapBuffers(const std::vector<unsigned int> &ids) override {}
- void processEvent(const IPAOperationData &event) override {}
+ int init(const IPASettings &settings,
+ const ipa::vimc::IPAOperationCode code,
+ const Flags<ipa::vimc::TestFlag> inFlags,
+ Flags<ipa::vimc::TestFlag> *outFlags) override;
+
+ int start() override;
+ void stop() override;
+
+ int configure(const IPACameraSensorInfo &sensorInfo,
+ const std::map<unsigned int, IPAStream> &streamConfig,
+ const std::map<unsigned int, ControlInfoMap> &entityControls) override;
+
+ void mapBuffers(const std::vector<IPABuffer> &buffers) override;
+ void unmapBuffers(const std::vector<unsigned int> &ids) override;
+
+ void queueRequest(uint32_t frame, const ControlList &controls) override;
+ void computeParams(uint32_t frame, uint32_t bufferId) override;
private:
void initTrace();
- void trace(enum IPAOperationCode operation);
+ void trace(enum ipa::vimc::IPAOperationCode operation);
int fd_;
+ std::map<unsigned int, MappedFrameBuffer> buffers_;
};
IPAVimc::IPAVimc()
@@ -53,27 +66,110 @@ IPAVimc::IPAVimc()
IPAVimc::~IPAVimc()
{
- if (fd_)
+ if (fd_ != -1)
::close(fd_);
}
-int IPAVimc::init()
+int IPAVimc::init(const IPASettings &settings,
+ const ipa::vimc::IPAOperationCode code,
+ const Flags<ipa::vimc::TestFlag> inFlags,
+ Flags<ipa::vimc::TestFlag> *outFlags)
+{
+ trace(ipa::vimc::IPAOperationInit);
+
+ LOG(IPAVimc, Debug)
+ << "initializing vimc IPA with configuration file "
+ << settings.configurationFile;
+
+ LOG(IPAVimc, Debug) << "Got opcode " << code;
+
+ LOG(IPAVimc, Debug)
+ << "Flag 2 was "
+ << (inFlags & ipa::vimc::TestFlag::Flag2 ? "" : "not ")
+ << "set";
+
+ *outFlags |= ipa::vimc::TestFlag::Flag1;
+
+ File conf(settings.configurationFile);
+ if (!conf.open(File::OpenModeFlag::ReadOnly)) {
+ LOG(IPAVimc, Error) << "Failed to open configuration file";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int IPAVimc::start()
{
- trace(IPAOperationInit);
+ trace(ipa::vimc::IPAOperationStart);
- LOG(IPAVimc, Debug) << "initializing vimc IPA!";
+ LOG(IPAVimc, Debug) << "start vimc IPA!";
return 0;
}
+void IPAVimc::stop()
+{
+ trace(ipa::vimc::IPAOperationStop);
+
+ LOG(IPAVimc, Debug) << "stop vimc IPA!";
+}
+
+int IPAVimc::configure([[maybe_unused]] const IPACameraSensorInfo &sensorInfo,
+ [[maybe_unused]] const std::map<unsigned int, IPAStream> &streamConfig,
+ [[maybe_unused]] const std::map<unsigned int, ControlInfoMap> &entityControls)
+{
+ LOG(IPAVimc, Debug) << "configure()";
+
+ return 0;
+}
+
+void IPAVimc::mapBuffers(const std::vector<IPABuffer> &buffers)
+{
+ for (const IPABuffer &buffer : buffers) {
+ const FrameBuffer fb(buffer.planes);
+ buffers_.emplace(std::piecewise_construct,
+ std::forward_as_tuple(buffer.id),
+ std::forward_as_tuple(&fb, MappedFrameBuffer::MapFlag::Read));
+ }
+}
+
+void IPAVimc::unmapBuffers(const std::vector<unsigned int> &ids)
+{
+ for (unsigned int id : ids) {
+ auto it = buffers_.find(id);
+ if (it == buffers_.end())
+ continue;
+
+ buffers_.erase(it);
+ }
+}
+
+void IPAVimc::queueRequest([[maybe_unused]] uint32_t frame,
+ [[maybe_unused]] const ControlList &controls)
+{
+}
+
+void IPAVimc::computeParams([[maybe_unused]] uint32_t frame, uint32_t bufferId)
+{
+ auto it = buffers_.find(bufferId);
+ if (it == buffers_.end()) {
+ LOG(IPAVimc, Error) << "Could not find parameter buffer";
+ return;
+ }
+
+ Flags<ipa::vimc::TestFlag> flags;
+ paramsComputed.emit(bufferId, flags);
+}
+
void IPAVimc::initTrace()
{
struct stat fifoStat;
- int ret = stat(VIMC_IPA_FIFO_PATH, &fifoStat);
+ int ret = stat(ipa::vimc::VimcIPAFIFOPath.c_str(), &fifoStat);
if (ret)
return;
- ret = ::open(VIMC_IPA_FIFO_PATH, O_WRONLY);
+ ret = ::open(ipa::vimc::VimcIPAFIFOPath.c_str(), O_WRONLY | O_CLOEXEC);
if (ret < 0) {
ret = errno;
LOG(IPAVimc, Error) << "Failed to open vimc IPA test FIFO: "
@@ -84,7 +180,7 @@ void IPAVimc::initTrace()
fd_ = ret;
}
-void IPAVimc::trace(enum IPAOperationCode operation)
+void IPAVimc::trace(enum ipa::vimc::IPAOperationCode operation)
{
if (fd_ < 0)
return;
@@ -105,14 +201,13 @@ extern "C" {
const struct IPAModuleInfo ipaModuleInfo = {
IPA_MODULE_API_VERSION,
0,
- "PipelineHandlerVimc",
- "Dummy IPA for Vimc",
- LICENSE,
+ "vimc",
+ "vimc",
};
-struct ipa_context *ipaCreate()
+IPAInterface *ipaCreate()
{
- return new IPAInterfaceWrapper(std::make_unique<IPAVimc>());
+ return new IPAVimc();
}
}
diff --git a/src/libcamera/base/backtrace.cpp b/src/libcamera/base/backtrace.cpp
new file mode 100644
index 00000000..0b04629c
--- /dev/null
+++ b/src/libcamera/base/backtrace.cpp
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Ideas on Board Oy
+ *
+ * Call stack backtraces
+ */
+
+#include <libcamera/base/backtrace.h>
+
+#if HAVE_BACKTRACE
+#include <execinfo.h>
+#include <stdlib.h>
+#endif
+
+#ifdef HAVE_DW
+#include <elfutils/libdwfl.h>
+#include <unistd.h>
+#endif
+
+#if HAVE_UNWIND
+/*
+ * Disable support for remote unwinding to enable a more optimized
+ * implementation.
+ */
+#define UNW_LOCAL_ONLY
+#include <libunwind.h>
+#endif
+
+#include <cxxabi.h>
+#include <sstream>
+
+#include <libcamera/base/span.h>
+#include <libcamera/base/utils.h>
+
+/**
+ * \file backtrace.h
+ * \brief Generate call stack backtraces
+ */
+
+namespace libcamera {
+
+namespace {
+
+#if HAVE_DW
+class DwflParser
+{
+public:
+ DwflParser();
+ ~DwflParser();
+
+ bool isValid() const { return valid_; }
+ std::string stackEntry(const void *ip);
+
+private:
+ Dwfl_Callbacks callbacks_;
+ Dwfl *dwfl_;
+ bool valid_;
+};
+
+DwflParser::DwflParser()
+ : callbacks_({}), dwfl_(nullptr), valid_(false)
+{
+ callbacks_.find_elf = dwfl_linux_proc_find_elf;
+ callbacks_.find_debuginfo = dwfl_standard_find_debuginfo;
+
+ dwfl_ = dwfl_begin(&callbacks_);
+ if (!dwfl_)
+ return;
+
+ int ret = dwfl_linux_proc_report(dwfl_, getpid());
+ if (ret)
+ return;
+
+ ret = dwfl_report_end(dwfl_, nullptr, nullptr);
+ if (ret)
+ return;
+
+ valid_ = true;
+}
+
+DwflParser::~DwflParser()
+{
+ if (dwfl_)
+ dwfl_end(dwfl_);
+}
+
+std::string DwflParser::stackEntry(const void *ip)
+{
+ Dwarf_Addr addr = reinterpret_cast<Dwarf_Addr>(ip);
+
+ Dwfl_Module *module = dwfl_addrmodule(dwfl_, addr);
+ if (!module)
+ return std::string();
+
+ std::ostringstream entry;
+
+ GElf_Off offset;
+ GElf_Sym sym;
+ const char *symbol = dwfl_module_addrinfo(module, addr, &offset, &sym,
+ nullptr, nullptr, nullptr);
+ if (symbol) {
+ char *name = abi::__cxa_demangle(symbol, nullptr, nullptr, nullptr);
+ entry << (name ? name : symbol) << "+0x" << std::hex << offset
+ << std::dec;
+ free(name);
+ } else {
+ entry << "??? [" << utils::hex(addr) << "]";
+ }
+
+ entry << " (";
+
+ Dwfl_Line *line = dwfl_module_getsrc(module, addr);
+ if (line) {
+ const char *filename;
+ int lineNumber = 0;
+
+ filename = dwfl_lineinfo(line, &addr, &lineNumber, nullptr,
+ nullptr, nullptr);
+
+ entry << (filename ? filename : "???") << ":" << lineNumber;
+ } else {
+ const char *filename = nullptr;
+
+ dwfl_module_info(module, nullptr, nullptr, nullptr, nullptr,
+ nullptr, &filename, nullptr);
+
+ entry << (filename ? filename : "???") << " [" << utils::hex(addr) << "]";
+ }
+
+ entry << ")";
+ return entry.str();
+}
+#endif /* HAVE_DW */
+
+} /* namespace */
+
+/**
+ * \class Backtrace
+ * \brief Representation of a call stack backtrace
+ *
+ * The Backtrace class represents a function call stack. Constructing an
+ * instance captures the call stack at the point the instance is constructed.
+ * The instance can later be used to access the call stack and to generate a
+ * human-readable representation with the toString() function.
+ *
+ * Depending on the platform, different backends can be used to generate the
+ * backtrace. The Backtrace class provides a best effort to capture accurate
+ * backtraces, but doesn't offer any guarantee of a particular backtrace format.
+ */
+
+/**
+ * \brief Construct a backtrace
+ *
+ * The backtrace captures the call stack at the point where it is constructed.
+ * It can later be converted to a string with toString().
+ */
+Backtrace::Backtrace()
+{
+ /* Try libunwind first and fall back to backtrace() if it fails. */
+ if (unwindTrace())
+ return;
+
+ backtraceTrace();
+}
+
+/*
+ * Avoid inlining to make sure that the Backtrace constructor adds exactly two
+ * calls to the stack, which are later skipped in toString().
+ */
+__attribute__((__noinline__))
+bool Backtrace::backtraceTrace()
+{
+#if HAVE_BACKTRACE
+ backtrace_.resize(32);
+
+ int num_entries = backtrace(backtrace_.data(), backtrace_.size());
+ if (num_entries < 0) {
+ backtrace_.clear();
+ return false;
+ }
+
+ backtrace_.resize(num_entries);
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+__attribute__((__noinline__))
+bool Backtrace::unwindTrace()
+{
+#if HAVE_UNWIND
+/*
+ * unw_getcontext() for ARM32 is an inline assembly function using the stmia
+ * instruction to store SP and PC. This is considered by clang-11 as deprecated,
+ * and generates a warning.
+ */
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Winline-asm"
+#endif
+ unw_context_t uc;
+ int ret = unw_getcontext(&uc);
+ if (ret)
+ return false;
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
+
+ unw_cursor_t cursor;
+ ret = unw_init_local(&cursor, &uc);
+ if (ret)
+ return false;
+
+ do {
+#if HAVE_BACKTRACE || HAVE_DW
+ /*
+ * If backtrace() or libdw is available, they will be used in
+ * toString() to provide symbol information for the stack
+ * frames using the IP register value.
+ */
+ unw_word_t ip;
+ ret = unw_get_reg(&cursor, UNW_REG_IP, &ip);
+ if (ret) {
+ backtrace_.push_back(nullptr);
+ continue;
+ }
+
+ backtrace_.push_back(reinterpret_cast<void *>(ip));
+#else
+ /*
+ * Otherwise, use libunwind to get the symbol information. As
+ * the libunwind API uses cursors, we can't store the IP values
+ * and delay symbol lookup to toString().
+ */
+ char symbol[256];
+ unw_word_t offset = 0;
+ ret = unw_get_proc_name(&cursor, symbol, sizeof(symbol), &offset);
+ if (ret) {
+ backtraceText_.emplace_back("???\n");
+ continue;
+ }
+
+ std::ostringstream entry;
+
+ char *name = abi::__cxa_demangle(symbol, nullptr, nullptr, nullptr);
+ entry << (name ? name : symbol);
+ free(name);
+
+ entry << "+0x" << std::hex << offset << "\n";
+ backtraceText_.emplace_back(entry.str());
+#endif
+ } while (unw_step(&cursor) > 0);
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+/**
+ * \brief Convert a backtrace to a string representation
+ * \param[in] skipLevels Number of initial levels to skip in the backtrace
+ *
+ * The string representation of the backtrace is a multi-line string, with one
+ * line per call stack entry. The format of the entries isn't specified and is
+ * platform-dependent.
+ *
+ * The \a skipLevels parameter indicates how many initial entries to skip from
+ * the backtrace. This can be used to hide functions that wrap the construction
+ * of the Backtrace instance from the call stack. The Backtrace constructor
+ * itself is automatically skipped and never shown in the backtrace.
+ *
+ * If backtrace generation fails for any reason (usually because the platform
+ * doesn't support this feature), an empty string is returned.
+ *
+ * \return A string representation of the backtrace, or an empty string if
+ * backtrace generation isn't possible
+ */
+std::string Backtrace::toString(unsigned int skipLevels) const
+{
+ /*
+ * Skip the first two entries, corresponding to the Backtrace
+ * construction.
+ */
+ skipLevels += 2;
+
+ if (backtrace_.size() <= skipLevels &&
+ backtraceText_.size() <= skipLevels)
+ return std::string();
+
+ if (!backtraceText_.empty()) {
+ Span<const std::string> trace{ backtraceText_ };
+ return utils::join(trace.subspan(skipLevels), "");
+ }
+
+#if HAVE_DW
+ DwflParser dwfl;
+
+ if (dwfl.isValid()) {
+ std::ostringstream msg;
+
+ Span<void *const> trace{ backtrace_ };
+ for (const void *ip : trace.subspan(skipLevels)) {
+ if (ip)
+ msg << dwfl.stackEntry(ip) << std::endl;
+ else
+ msg << "???" << std::endl;
+ }
+
+ return msg.str();
+ }
+#endif
+
+#if HAVE_BACKTRACE
+ Span<void *const> trace{ backtrace_ };
+ trace = trace.subspan(skipLevels);
+
+ char **strings = backtrace_symbols(trace.data(), trace.size());
+ if (strings) {
+ std::ostringstream msg;
+
+ for (unsigned int i = 0; i < trace.size(); ++i)
+ msg << strings[i] << std::endl;
+
+ free(strings);
+ return msg.str();
+ }
+#endif
+
+ return std::string();
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/bound_method.cpp b/src/libcamera/base/bound_method.cpp
new file mode 100644
index 00000000..322029a8
--- /dev/null
+++ b/src/libcamera/base/bound_method.cpp
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Method bind and invocation
+ */
+
+#include <libcamera/base/bound_method.h>
+#include <libcamera/base/message.h>
+#include <libcamera/base/object.h>
+#include <libcamera/base/semaphore.h>
+#include <libcamera/base/thread.h>
+
+/**
+ * \file base/bound_method.h
+ * \brief Method bind and invocation
+ */
+
+namespace libcamera {
+
+/**
+ * \enum ConnectionType
+ * \brief Connection type for asynchronous communication
+ *
+ * This enumeration describes the possible types of asynchronous communication
+ * between a sender and a receiver. It applies to Signal::emit() and
+ * Object::invokeMethod().
+ *
+ * \var ConnectionTypeAuto
+ * \brief If the sender and the receiver live in the same thread,
+ * ConnectionTypeDirect is used. Otherwise ConnectionTypeQueued is used.
+ *
+ * \var ConnectionTypeDirect
+ * \brief The receiver is invoked immediately and synchronously in the sender's
+ * thread.
+ *
+ * \var ConnectionTypeQueued
+ * \brief The receiver is invoked asynchronously
+ *
+ * Invoke the receiver asynchronously in its thread when control returns to the
+ * thread's event loop. The sender proceeds without waiting for the invocation
+ * to complete.
+ *
+ * \var ConnectionTypeBlocking
+ * \brief The receiver is invoked synchronously
+ *
+ * If the sender and the receiver live in the same thread, this is equivalent to
+ * ConnectionTypeDirect. Otherwise, the receiver is invoked asynchronously in
+ * its thread when control returns to the thread's event loop. The sender
+ * blocks until the receiver signals the completion of the invocation.
+ */
+
+/**
+ * \brief Invoke the bound method with packed arguments
+ * \param[in] pack Packed arguments
+ * \param[in] deleteMethod True to delete \a this bound method instance when
+ * method invocation completes
+ *
+ * The bound method stores its return value, if any, in the arguments \a pack.
+ * For direct and blocking invocations, this is performed synchronously, and
+ * the return value contained in the pack may be used. For queued invocations,
+ * the return value is stored at an undefined point of time and shall thus not
+ * be used by the caller.
+ *
+ * \return True if the return value contained in the \a pack may be used by the
+ * caller, false otherwise
+ */
+bool BoundMethodBase::activatePack(std::shared_ptr<BoundMethodPackBase> pack,
+ bool deleteMethod)
+{
+ ConnectionType type = connectionType_;
+ if (type == ConnectionTypeAuto) {
+ if (Thread::current() == object_->thread())
+ type = ConnectionTypeDirect;
+ else
+ type = ConnectionTypeQueued;
+ } else if (type == ConnectionTypeBlocking) {
+ if (Thread::current() == object_->thread())
+ type = ConnectionTypeDirect;
+ }
+
+ switch (type) {
+ case ConnectionTypeDirect:
+ default:
+ invokePack(pack.get());
+ if (deleteMethod)
+ delete this;
+ return true;
+
+ case ConnectionTypeQueued: {
+ std::unique_ptr<Message> msg =
+ std::make_unique<InvokeMessage>(this, pack, nullptr, deleteMethod);
+ object_->postMessage(std::move(msg));
+ return false;
+ }
+
+ case ConnectionTypeBlocking: {
+ Semaphore semaphore;
+
+ std::unique_ptr<Message> msg =
+ std::make_unique<InvokeMessage>(this, pack, &semaphore, deleteMethod);
+ object_->postMessage(std::move(msg));
+
+ semaphore.acquire();
+ return true;
+ }
+ }
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/class.cpp b/src/libcamera/base/class.cpp
new file mode 100644
index 00000000..61998398
--- /dev/null
+++ b/src/libcamera/base/class.cpp
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Utilities and helpers for classes
+ */
+
+#include <libcamera/base/class.h>
+
+/**
+ * \file class.h
+ * \brief Utilities to help constructing class interfaces
+ *
+ * The extensible class can be inherited to create public classes with stable
+ * ABIs.
+ */
+
+namespace libcamera {
+
+/**
+ * \def LIBCAMERA_DISABLE_COPY
+ * \brief Disable copy construction and assignment of the \a klass
+ * \param klass The name of the class
+ *
+ * Example usage:
+ * \code{.cpp}
+ * class NonCopyable
+ * {
+ * public:
+ * NonCopyable();
+ * ...
+ *
+ * private:
+ * LIBCAMERA_DISABLE_COPY(NonCopyable)
+ * };
+ * \endcode
+ */
+
+/**
+ * \def LIBCAMERA_DISABLE_MOVE
+ * \brief Disable move construction and assignment of the \a klass
+ * \param klass The name of the class
+ *
+ * Example usage:
+ * \code{.cpp}
+ * class NonMoveable
+ * {
+ * public:
+ * NonMoveable();
+ * ...
+ *
+ * private:
+ * LIBCAMERA_DISABLE_MOVE(NonMoveable)
+ * };
+ * \endcode
+ */
+
+/**
+ * \def LIBCAMERA_DISABLE_COPY_AND_MOVE
+ * \brief Disable copy and move construction and assignment of the \a klass
+ * \param klass The name of the class
+ *
+ * Example usage:
+ * \code{.cpp}
+ * class NonCopyableNonMoveable
+ * {
+ * public:
+ * NonCopyableNonMoveable();
+ * ...
+ *
+ * private:
+ * LIBCAMERA_DISABLE_COPY_AND_MOVE(NonCopyableNonMoveable)
+ * };
+ * \endcode
+ */
+
+/**
+ * \def LIBCAMERA_DECLARE_PRIVATE
+ * \brief Declare private data for a public class
+ *
+ * The LIBCAMERA_DECLARE_PRIVATE() macro plumbs the infrastructure necessary to
+ * make a class manage its private data through a d-pointer. It shall be used at
+ * the very top of the class definition.
+ */
+
+/**
+ * \def LIBCAMERA_DECLARE_PUBLIC
+ * \brief Declare public data for a private class
+ * \param klass The public class name
+ *
+ * The LIBCAMERA_DECLARE_PUBLIC() macro is the counterpart of
+ * LIBCAMERA_DECLARE_PRIVATE() to be used in the private data class. It shall be
+ * used at the very top of the private class definition, with the public class
+ * name passed as the \a klass parameter.
+ */
+
+/**
+ * \def LIBCAMERA_O_PTR()
+ * \brief Retrieve the public instance corresponding to the private data
+ *
+ * This macro is part of the libcamera::Extensible class infrastructure. It may
+ * be used in any member function of a libcamera::Extensible::Private subclass
+ * to access the public class instance corresponding to the private data.
+ */
+
+/**
+ * \class Extensible
+ * \brief Base class to manage private data through a d-pointer
+ *
+ * The Extensible class provides a base class to implement the
+ * <a href="https://wiki.qt.io/D-Pointer">d-pointer</a> design pattern (also
+ * known as <a href="https://en.wikipedia.org/wiki/Opaque_pointer">opaque pointer</a>
+ * or <a href="https://en.cppreference.com/w/cpp/language/pimpl">pImpl idiom</a>).
+ * It helps creating public classes that can be extended without breaking their
+ * ABI. Such classes store their private data in a separate private data object,
+ * referenced by a pointer in the public class (hence the name d-pointer).
+ *
+ * Classes that follow this design pattern are referred herein as extensible
+ * classes. To be extensible, a class PublicClass shall:
+ *
+ * - inherit from the Extensible class or from another extensible class
+ * - invoke the LIBCAMERA_DECLARE_PRIVATE() macro at the very top of the class
+ * definition
+ * - define a private data class named PublicClass::Private that inherits from
+ * the Private data class of the base class
+ * - invoke the LIBCAMERA_DECLARE_PUBLIC() macro at the very top of the Private
+ * data class definition
+ * - pass a pointer to a newly allocated Private data object to the constructor
+ * of the base class
+ *
+ * Additionally, if the PublicClass is not final, it shall expose one or more
+ * constructors that takes a pointer to a Private data instance, to be used by
+ * derived classes.
+ *
+ * The Private class is fully opaque to users of the libcamera public API.
+ * Internally, it can be kept private to the implementation of PublicClass, or
+ * be exposed to other classes. In the latter case, the members of the Private
+ * class need to be qualified with appropriate access specifiers. The
+ * PublicClass and Private classes always have full access to each other's
+ * protected and private members.
+ *
+ * The PublicClass exposes its Private data pointer through the _d() function.
+ * In the other direction, the pointer to the PublicClass can be retrieved in
+ * functions of the Private class using the LIBCAMERA_O_PTR() macro.
+ */
+
+/**
+ * \brief Construct an instance of an Extensible class
+ * \param[in] d Pointer to the private data instance
+ *
+ * The private data lifetime is managed by the Extensible class, which destroys
+ * it when the Extensible instance is destroyed.
+ */
+Extensible::Extensible(std::unique_ptr<Extensible::Private> d)
+ : d_(std::move(d))
+{
+ *const_cast<Extensible **>(&d_->o_) = this;
+}
+
+/**
+ * \fn Extensible::_d() const
+ * \brief Retrieve the private data instance
+ *
+ * This template function isn't meant to be called directly. Instead, classes
+ * derived from Extensible get, through the LIBCAMERA_DECLARE_PRIVATE() macro,
+ * overriden _d() functions that return the correct pointer type to the
+ * corresponding derived Private class.
+ *
+ * The lifetime of the private data is tied to the Extensible class. The caller
+ * shall not retain any reference to the returned pointer for longer than it
+ * holds a reference to the Extensible instance.
+ *
+ * \return A pointer to the private data instance
+ */
+
+/**
+ * \fn Extensible::_d()
+ * \copydoc Extensible::_d() const
+ */
+
+/**
+ * \var Extensible::d_
+ * \brief Pointer to the private data instance
+ */
+
+/**
+ * \class Extensible::Private
+ * \brief Base class for private data managed through a d-pointer
+ */
+
+/**
+ * \brief Construct an instance of an Extensible class private data
+ */
+Extensible::Private::Private()
+ : o_(nullptr)
+{
+}
+
+Extensible::Private::~Private()
+{
+}
+
+/**
+ * \var Extensible::Private::o_
+ * \brief Pointer to the public class object
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/event_dispatcher.cpp b/src/libcamera/base/event_dispatcher.cpp
new file mode 100644
index 00000000..5f4a5cb4
--- /dev/null
+++ b/src/libcamera/base/event_dispatcher.cpp
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Event dispatcher
+ */
+
+#include <libcamera/base/event_dispatcher.h>
+#include <libcamera/base/log.h>
+
+/**
+ * \file base/event_dispatcher.h
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Event)
+
+/**
+ * \class EventDispatcher
+ * \brief Interface to manage the libcamera events and timers
+ *
+ * The EventDispatcher class allows the integration of the application event
+ * loop with libcamera by abstracting how events and timers are managed and
+ * processed.
+ *
+ * To listen to events, libcamera creates EventNotifier instances and registers
+ * them with the dispatcher with registerEventNotifier(). The event notifier
+ * \ref EventNotifier::activated signal is then emitted by the dispatcher
+ * whenever the event is detected.
+ *
+ * To set timers, libcamera creates Timer instances and registers them with the
+ * dispatcher with registerTimer(). The timer \ref Timer::timeout signal is then
+ * emitted by the dispatcher when the timer times out.
+ */
+
+EventDispatcher::~EventDispatcher()
+{
+}
+
+/**
+ * \fn EventDispatcher::registerEventNotifier()
+ * \brief Register an event notifier
+ * \param[in] notifier The event notifier to register
+ *
+ * Once the \a notifier is registered with the dispatcher, the dispatcher will
+ * emit the notifier \ref EventNotifier::activated signal whenever a
+ * corresponding event is detected on the notifier's file descriptor. The event
+ * is monitored until the notifier is unregistered with
+ * unregisterEventNotifier().
+ *
+ * Registering multiple notifiers for the same file descriptor and event type is
+ * not allowed and results in undefined behaviour.
+ */
+
+/**
+ * \fn EventDispatcher::unregisterEventNotifier()
+ * \brief Unregister an event notifier
+ * \param[in] notifier The event notifier to unregister
+ *
+ * After this function returns the \a notifier is guaranteed not to emit the
+ * \ref EventNotifier::activated signal.
+ *
+ * If the notifier isn't registered, this function performs no operation.
+ */
+
+/**
+ * \fn EventDispatcher::registerTimer()
+ * \brief Register a timer
+ * \param[in] timer The timer to register
+ *
+ * Once the \a timer is registered with the dispatcher, the dispatcher will emit
+ * the timer \ref Timer::timeout signal when the timer times out. The timer can
+ * be unregistered with unregisterTimer() before it times out, in which case the
+ * signal will not be emitted.
+ *
+ * When the \a timer times out, it is automatically unregistered by the
+ * dispatcher and can be registered back as early as from the \ref Timer::timeout
+ * signal handlers.
+ *
+ * Registering the same timer multiple times is not allowed and results in
+ * undefined behaviour.
+ */
+
+/**
+ * \fn EventDispatcher::unregisterTimer()
+ * \brief Unregister a timer
+ * \param[in] timer The timer to unregister
+ *
+ * After this function returns the \a timer is guaranteed not to emit the
+ * \ref Timer::timeout signal.
+ *
+ * If the timer isn't registered, this function performs no operation.
+ */
+
+/**
+ * \fn EventDispatcher::processEvents()
+ * \brief Wait for and process pending events
+ *
+ * This function processes all pending events associated with registered event
+ * notifiers and timers and signals the corresponding EventNotifier and Timer
+ * objects. If no events are pending, it waits for the first event and processes
+ * it before returning.
+ */
+
+/**
+ * \fn EventDispatcher::interrupt()
+ * \brief Interrupt any running processEvents() call as soon as possible
+ *
+ * Calling this function interrupts any blocking processEvents() call in
+ * progress. The processEvents() function will return as soon as possible,
+ * after processing pending timers and events. If processEvents() isn't in
+ * progress, it will be interrupted immediately the next time it gets called.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/event_dispatcher_poll.cpp b/src/libcamera/base/event_dispatcher_poll.cpp
new file mode 100644
index 00000000..52bfb34e
--- /dev/null
+++ b/src/libcamera/base/event_dispatcher_poll.cpp
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Poll-based event dispatcher
+ */
+
+#include <libcamera/base/event_dispatcher_poll.h>
+
+#include <iomanip>
+#include <poll.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/eventfd.h>
+#include <unistd.h>
+#include <vector>
+
+#include <libcamera/base/event_notifier.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/thread.h>
+#include <libcamera/base/timer.h>
+#include <libcamera/base/utils.h>
+
+/**
+ * \file base/event_dispatcher_poll.h
+ */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Event)
+
+static const char *notifierType(EventNotifier::Type type)
+{
+ if (type == EventNotifier::Read)
+ return "read";
+ if (type == EventNotifier::Write)
+ return "write";
+ if (type == EventNotifier::Exception)
+ return "exception";
+
+ return "";
+}
+
+/**
+ * \class EventDispatcherPoll
+ * \brief A poll-based event dispatcher
+ */
+
+EventDispatcherPoll::EventDispatcherPoll()
+ : processingEvents_(false)
+{
+ /*
+ * Create the event fd. Failures are fatal as we can't implement an
+ * interruptible dispatcher without the fd.
+ */
+ eventfd_ = UniqueFD(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK));
+ if (!eventfd_.isValid())
+ LOG(Event, Fatal) << "Unable to create eventfd";
+}
+
+EventDispatcherPoll::~EventDispatcherPoll()
+{
+}
+
+void EventDispatcherPoll::registerEventNotifier(EventNotifier *notifier)
+{
+ EventNotifierSetPoll &set = notifiers_[notifier->fd()];
+ EventNotifier::Type type = notifier->type();
+
+ if (set.notifiers[type] && set.notifiers[type] != notifier) {
+ LOG(Event, Warning)
+ << "Ignoring duplicate " << notifierType(type)
+ << " notifier for fd " << notifier->fd();
+ return;
+ }
+
+ set.notifiers[type] = notifier;
+}
+
+void EventDispatcherPoll::unregisterEventNotifier(EventNotifier *notifier)
+{
+ auto iter = notifiers_.find(notifier->fd());
+ if (iter == notifiers_.end())
+ return;
+
+ EventNotifierSetPoll &set = iter->second;
+ EventNotifier::Type type = notifier->type();
+
+ if (!set.notifiers[type])
+ return;
+
+ if (set.notifiers[type] != notifier) {
+ LOG(Event, Warning)
+ << notifierType(type) << " notifier for fd "
+ << notifier->fd() << " is not registered";
+ return;
+ }
+
+ set.notifiers[type] = nullptr;
+
+ /*
+ * Don't race with event processing if this function is called from an
+ * event notifier. The notifiers_ entry will be erased by
+ * processEvents().
+ */
+ if (processingEvents_)
+ return;
+
+ if (!set.notifiers[0] && !set.notifiers[1] && !set.notifiers[2])
+ notifiers_.erase(iter);
+}
+
+void EventDispatcherPoll::registerTimer(Timer *timer)
+{
+ for (auto iter = timers_.begin(); iter != timers_.end(); ++iter) {
+ if ((*iter)->deadline() > timer->deadline()) {
+ timers_.insert(iter, timer);
+ return;
+ }
+ }
+
+ timers_.push_back(timer);
+}
+
+void EventDispatcherPoll::unregisterTimer(Timer *timer)
+{
+ for (auto iter = timers_.begin(); iter != timers_.end(); ++iter) {
+ if (*iter == timer) {
+ timers_.erase(iter);
+ return;
+ }
+
+ /*
+ * As the timers list is ordered, we can stop as soon as we go
+ * past the deadline.
+ */
+ if ((*iter)->deadline() > timer->deadline())
+ break;
+ }
+}
+
+void EventDispatcherPoll::processEvents()
+{
+ int ret;
+
+ Thread::current()->dispatchMessages();
+
+ /* Create the pollfd array. */
+ std::vector<struct pollfd> pollfds;
+ pollfds.reserve(notifiers_.size() + 1);
+
+ for (auto notifier : notifiers_)
+ pollfds.push_back({ notifier.first, notifier.second.events(), 0 });
+
+ pollfds.push_back({ eventfd_.get(), POLLIN, 0 });
+
+ /* Wait for events and process notifiers and timers. */
+ do {
+ ret = poll(&pollfds);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret < 0) {
+ ret = -errno;
+ LOG(Event, Warning) << "poll() failed with " << strerror(-ret);
+ } else if (ret > 0) {
+ processInterrupt(pollfds.back());
+ pollfds.pop_back();
+ processNotifiers(pollfds);
+ }
+
+ processTimers();
+}
+
+void EventDispatcherPoll::interrupt()
+{
+ uint64_t value = 1;
+ ssize_t ret = write(eventfd_.get(), &value, sizeof(value));
+ if (ret != sizeof(value)) {
+ if (ret < 0)
+ ret = -errno;
+ LOG(Event, Error)
+ << "Failed to interrupt event dispatcher ("
+ << ret << ")";
+ }
+}
+
+short EventDispatcherPoll::EventNotifierSetPoll::events() const
+{
+ short events = 0;
+
+ if (notifiers[EventNotifier::Read])
+ events |= POLLIN;
+ if (notifiers[EventNotifier::Write])
+ events |= POLLOUT;
+ if (notifiers[EventNotifier::Exception])
+ events |= POLLPRI;
+
+ return events;
+}
+
+int EventDispatcherPoll::poll(std::vector<struct pollfd> *pollfds)
+{
+ /* Compute the timeout. */
+ Timer *nextTimer = !timers_.empty() ? timers_.front() : nullptr;
+ struct timespec timeout;
+
+ if (nextTimer) {
+ utils::time_point now = utils::clock::now();
+
+ if (nextTimer->deadline() > now)
+ timeout = utils::duration_to_timespec(nextTimer->deadline() - now);
+ else
+ timeout = { 0, 0 };
+
+ LOG(Event, Debug)
+ << "next timer " << nextTimer << " expires in "
+ << timeout.tv_sec << "."
+ << std::setfill('0') << std::setw(9)
+ << timeout.tv_nsec;
+ }
+
+ return ppoll(pollfds->data(), pollfds->size(),
+ nextTimer ? &timeout : nullptr, nullptr);
+}
+
+void EventDispatcherPoll::processInterrupt(const struct pollfd &pfd)
+{
+ if (!(pfd.revents & POLLIN))
+ return;
+
+ uint64_t value;
+ ssize_t ret = read(eventfd_.get(), &value, sizeof(value));
+ if (ret != sizeof(value)) {
+ if (ret < 0)
+ ret = -errno;
+ LOG(Event, Error)
+ << "Failed to process interrupt (" << ret << ")";
+ }
+}
+
+void EventDispatcherPoll::processNotifiers(const std::vector<struct pollfd> &pollfds)
+{
+ static const struct {
+ EventNotifier::Type type;
+ short events;
+ } events[] = {
+ { EventNotifier::Read, POLLIN },
+ { EventNotifier::Write, POLLOUT },
+ { EventNotifier::Exception, POLLPRI },
+ };
+
+ processingEvents_ = true;
+
+ for (const pollfd &pfd : pollfds) {
+ auto iter = notifiers_.find(pfd.fd);
+ ASSERT(iter != notifiers_.end());
+
+ EventNotifierSetPoll &set = iter->second;
+
+ for (const auto &event : events) {
+ EventNotifier *notifier = set.notifiers[event.type];
+
+ if (!notifier)
+ continue;
+
+ /*
+ * If the file descriptor is invalid, disable the
+ * notifier immediately.
+ */
+ if (pfd.revents & POLLNVAL) {
+ LOG(Event, Warning)
+ << "Disabling " << notifierType(event.type)
+ << " due to invalid file descriptor "
+ << pfd.fd;
+ unregisterEventNotifier(notifier);
+ continue;
+ }
+
+ if (pfd.revents & event.events)
+ notifier->activated.emit();
+ }
+
+ /* Erase the notifiers_ entry if it is now empty. */
+ if (!set.notifiers[0] && !set.notifiers[1] && !set.notifiers[2])
+ notifiers_.erase(iter);
+ }
+
+ processingEvents_ = false;
+}
+
+void EventDispatcherPoll::processTimers()
+{
+ utils::time_point now = utils::clock::now();
+
+ while (!timers_.empty()) {
+ Timer *timer = timers_.front();
+ if (timer->deadline() > now)
+ break;
+
+ timers_.pop_front();
+ timer->stop();
+ timer->timeout.emit();
+ }
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/event_notifier.cpp b/src/libcamera/base/event_notifier.cpp
new file mode 100644
index 00000000..495c281d
--- /dev/null
+++ b/src/libcamera/base/event_notifier.cpp
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * File descriptor event notifier
+ */
+
+#include <libcamera/base/event_notifier.h>
+
+#include <libcamera/base/event_dispatcher.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/message.h>
+#include <libcamera/base/thread.h>
+
+#include <libcamera/camera_manager.h>
+
+/**
+ * \file event_notifier.h
+ * \brief File descriptor event notifier
+ */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Event)
+
+/**
+ * \class EventNotifier
+ * \brief Notify of activity on a file descriptor
+ *
+ * The EventNotifier models a file descriptor event source that can be
+ * monitored. It is created with the file descriptor to be monitored and the
+ * type of event, and is enabled by default. It will emit the \ref activated
+ * signal whenever an event of the monitored type occurs on the file descriptor.
+ *
+ * Supported type of events are EventNotifier::Read, EventNotifier::Write and
+ * EventNotifier::Exception. The type is specified when constructing the
+ * notifier, and can be retrieved using the type() function. To listen to
+ * multiple event types on the same file descriptor multiple notifiers must be
+ * created.
+ *
+ * The notifier can be disabled with the setEnabled() function. When the notifier
+ * is disabled it ignores events and does not emit the \ref activated signal.
+ * The notifier can then be re-enabled with the setEnabled() function.
+ *
+ * Creating multiple notifiers of the same type for the same file descriptor is
+ * not allowed and results in undefined behaviour.
+ *
+ * Notifier events are detected and dispatched from the
+ * EventDispatcher::processEvents() function.
+ */
+
+/**
+ * \enum EventNotifier::Type
+ * Type of file descriptor event to listen for.
+ * \var EventNotifier::Read
+ * Data is available to be read from the file descriptor
+ * \var EventNotifier::Write
+ * Data can be written to the file descriptor
+ * \var EventNotifier::Exception
+ * An exception has occurred on the file descriptor
+ */
+
+/**
+ * \brief Construct an event notifier with a file descriptor and event type
+ * \param[in] fd The file descriptor to monitor
+ * \param[in] type The event type to monitor
+ * \param[in] parent The parent Object
+ */
+EventNotifier::EventNotifier(int fd, Type type, Object *parent)
+ : Object(parent), fd_(fd), type_(type), enabled_(false)
+{
+ setEnabled(true);
+}
+
+EventNotifier::~EventNotifier()
+{
+ setEnabled(false);
+}
+
+/**
+ * \fn EventNotifier::type()
+ * \brief Retrieve the type of the event being monitored
+ * \return The type of the event
+ */
+
+/**
+ * \fn EventNotifier::fd()
+ * \brief Retrieve the file descriptor being monitored
+ * \return The file descriptor
+ */
+
+/**
+ * \fn EventNotifier::enabled()
+ * \brief Retrieve the notifier state
+ * \return True if the notifier is enabled, or false otherwise
+ * \sa setEnabled()
+ */
+
+/**
+ * \brief Enable or disable the notifier
+ * \param[in] enable True to enable the notifier, false to disable it
+ *
+ * This function enables or disables the notifier. A disabled notifier ignores
+ * events and does not emit the \ref activated signal.
+ *
+ * \context This function is \threadbound.
+ */
+void EventNotifier::setEnabled(bool enable)
+{
+ if (!assertThreadBound("EventNotifier can't be enabled from another thread"))
+ return;
+
+ if (enabled_ == enable)
+ return;
+
+ enabled_ = enable;
+
+ EventDispatcher *dispatcher = thread()->eventDispatcher();
+ if (enable)
+ dispatcher->registerEventNotifier(this);
+ else
+ dispatcher->unregisterEventNotifier(this);
+}
+
+/**
+ * \var EventNotifier::activated
+ * \brief Signal emitted when the event occurs
+ *
+ * This signal is emitted when the event \ref type() occurs on the file
+ * descriptor monitored by the notifier. The notifier pointer is passed as a
+ * parameter.
+ */
+
+void EventNotifier::message(Message *msg)
+{
+ if (msg->type() == Message::ThreadMoveMessage) {
+ if (enabled_) {
+ setEnabled(false);
+ invokeMethod(&EventNotifier::setEnabled,
+ ConnectionTypeQueued, true);
+ }
+ }
+
+ Object::message(msg);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/file.cpp b/src/libcamera/base/file.cpp
new file mode 100644
index 00000000..2b83a517
--- /dev/null
+++ b/src/libcamera/base/file.cpp
@@ -0,0 +1,478 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * File I/O operations
+ */
+
+#include <libcamera/base/file.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/shared_fd.h>
+
+/**
+ * \file base/file.h
+ * \brief File I/O operations
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(File)
+
+/**
+ * \class File
+ * \brief Interface for I/O operations on files
+ *
+ * The File class provides an interface to perform I/O operations on files. It
+ * wraps opening, closing and mapping files in memory, and handles the cleaning
+ * of allocated resources.
+ *
+ * File instances are usually constructed with a file name, but the name can be
+ * set later through the setFileName() function. Instances are not automatically
+ * opened when constructed, and shall be opened explictly with open().
+ *
+ * Files can be mapped to the process memory with map(). Mapped regions can be
+ * unmapped manually with munmap(), and are automatically unmapped when the File
+ * is destroyed or when it is used to reference another file with setFileName().
+ */
+
+/**
+ * \enum File::MapFlag
+ * \brief Flags for the File::map() function
+ * \var File::MapFlag::NoOption
+ * \brief No option (used as default value)
+ * \var File::MapFlag::Private
+ * \brief The memory region is mapped as private, changes are not reflected in
+ * the file constents
+ */
+
+/**
+ * \typedef File::MapFlags
+ * \brief A bitwise combination of File::MapFlag values
+ */
+
+/**
+ * \enum File::OpenModeFlag
+ * \brief Mode in which a file is opened
+ * \var File::OpenModeFlag::NotOpen
+ * \brief The file is not open
+ * \var File::OpenModeFlag::ReadOnly
+ * \brief The file is open for reading
+ * \var File::OpenModeFlag::WriteOnly
+ * \brief The file is open for writing
+ * \var File::OpenModeFlag::ReadWrite
+ * \brief The file is open for reading and writing
+ */
+
+/**
+ * \typedef File::OpenMode
+ * \brief A bitwise combination of File::OpenModeFlag values
+ */
+
+/**
+ * \brief Construct a File to represent the file \a name
+ * \param[in] name The file name
+ *
+ * Upon construction the File object is closed and shall be opened with open()
+ * before performing I/O operations.
+ */
+File::File(const std::string &name)
+ : name_(name), mode_(OpenModeFlag::NotOpen), error_(0)
+{
+}
+
+/**
+ * \brief Construct a File without an associated name
+ *
+ * Before being used for any purpose, the file name shall be set with
+ * setFileName().
+ */
+File::File()
+ : mode_(OpenModeFlag::NotOpen), error_(0)
+{
+}
+
+/**
+ * \brief Destroy a File instance
+ *
+ * Any memory mapping associated with the File is unmapped, and the File is
+ * closed if it is open.
+ */
+File::~File()
+{
+ unmapAll();
+ close();
+}
+
+/**
+ * \fn const std::string &File::fileName() const
+ * \brief Retrieve the file name
+ * \return The file name
+ */
+
+/**
+ * \brief Set the name of the file
+ * \param[in] name The name of the file
+ *
+ * The \a name can contain an absolute path, a relative path or no path at all.
+ * Calling this function on an open file results in undefined behaviour.
+ *
+ * Any memory mapping associated with the File is unmapped.
+ */
+void File::setFileName(const std::string &name)
+{
+ if (isOpen()) {
+ LOG(File, Error)
+ << "Can't set file name on already open file " << name_;
+ return;
+ }
+
+ unmapAll();
+
+ name_ = name;
+}
+
+/**
+ * \brief Check if the file specified by fileName() exists
+ *
+ * This function checks if the file specified by fileName() exists. The File
+ * instance doesn't need to be open to check for file existence, and this
+ * function may return false even if the file is open, if it was deleted from
+ * the file system.
+ *
+ * \return True if the the file exists, false otherwise
+ */
+bool File::exists() const
+{
+ return exists(name_);
+}
+
+/**
+ * \brief Open the file in the given mode
+ * \param[in] mode The open mode
+ *
+ * This function opens the file specified by fileName() in \a mode. If the file
+ * doesn't exist and the mode is WriteOnly or ReadWrite, this function will
+ * attempt to create the file with initial permissions set to 0666 (modified by
+ * the process' umask).
+ *
+ * The file is opened with the O_CLOEXEC flag, and will be closed automatically
+ * when a new binary is executed with one of the exec(3) functions.
+ *
+ * The error() status is updated.
+ *
+ * \return True on success, false otherwise
+ */
+bool File::open(File::OpenMode mode)
+{
+ if (isOpen()) {
+ LOG(File, Error) << "File " << name_ << " is already open";
+ return false;
+ }
+
+ int flags = static_cast<OpenMode::Type>(mode & OpenModeFlag::ReadWrite) - 1;
+ if (mode & OpenModeFlag::WriteOnly)
+ flags |= O_CREAT;
+
+ fd_ = UniqueFD(::open(name_.c_str(), flags | O_CLOEXEC, 0666));
+ if (!fd_.isValid()) {
+ error_ = -errno;
+ return false;
+ }
+
+ mode_ = mode;
+ error_ = 0;
+ return true;
+}
+
+/**
+ * \fn bool File::isOpen() const
+ * \brief Check if the file is open
+ * \return True if the file is open, false otherwise
+ */
+
+/**
+ * \fn OpenMode File::openMode() const
+ * \brief Retrieve the file open mode
+ * \return The file open mode
+ */
+
+/**
+ * \brief Close the file
+ *
+ * This function closes the File. If the File is not open, it performs no
+ * operation. Memory mappings created with map() are not destroyed when the
+ * file is closed.
+ */
+void File::close()
+{
+ if (!fd_.isValid())
+ return;
+
+ fd_.reset();
+ mode_ = OpenModeFlag::NotOpen;
+}
+
+/**
+ * \fn int File::error() const
+ * \brief Retrieve the file error status
+ *
+ * This function retrieves the error status from the last file open or I/O
+ * operation. The error status is a negative number as defined by errno.h. If
+ * no error occurred, this function returns 0.
+ *
+ * \return The file error status
+ */
+
+/**
+ * \brief Retrieve the file size
+ *
+ * This function retrieves the size of the file on the filesystem. The File
+ * instance shall be open to retrieve its size. The error() status is not
+ * modified, error codes are returned directly on failure.
+ *
+ * \return The file size in bytes on success, or a negative error code otherwise
+ */
+ssize_t File::size() const
+{
+ if (!isOpen())
+ return -EINVAL;
+
+ struct stat st;
+ int ret = fstat(fd_.get(), &st);
+ if (ret < 0)
+ return -errno;
+
+ return st.st_size;
+}
+
+/**
+ * \brief Return current read or write position
+ *
+ * If the file is closed, this function returns 0.
+ *
+ * \return The current read or write position
+ */
+off_t File::pos() const
+{
+ if (!isOpen())
+ return 0;
+
+ return lseek(fd_.get(), 0, SEEK_CUR);
+}
+
+/**
+ * \brief Set the read or write position
+ * \param[in] pos The desired position
+ * \return The resulting offset from the beginning of the file on success, or a
+ * negative error code otherwise
+ */
+off_t File::seek(off_t pos)
+{
+ if (!isOpen())
+ return -EINVAL;
+
+ off_t ret = lseek(fd_.get(), pos, SEEK_SET);
+ if (ret < 0)
+ return -errno;
+
+ return ret;
+}
+
+/**
+ * \brief Read data from the file
+ * \param[in] data Memory to read data into
+ *
+ * Read at most \a data.size() bytes from the file into \a data.data(), and
+ * return the number of bytes read. If less data than requested is available,
+ * the returned byte count may be smaller than the requested size. If no more
+ * data is available, 0 is returned.
+ *
+ * The position of the file as returned by pos() is advanced by the number of
+ * bytes read. If an error occurs, the position isn't modified.
+ *
+ * \return The number of bytes read on success, or a negative error code
+ * otherwise
+ */
+ssize_t File::read(const Span<uint8_t> &data)
+{
+ if (!isOpen())
+ return -EINVAL;
+
+ size_t readBytes = 0;
+ ssize_t ret = 0;
+
+ /* Retry in case of interrupted system calls. */
+ while (readBytes < data.size()) {
+ ret = ::read(fd_.get(), data.data() + readBytes,
+ data.size() - readBytes);
+ if (ret <= 0)
+ break;
+
+ readBytes += ret;
+ }
+
+ if (ret < 0 && !readBytes)
+ return -errno;
+
+ return readBytes;
+}
+
+/**
+ * \brief Write data to the file
+ * \param[in] data Memory containing data to be written
+ *
+ * Write at most \a data.size() bytes from \a data.data() to the file, and
+ * return the number of bytes written. If the file system doesn't have enough
+ * space for the data, the returned byte count may be less than requested.
+ *
+ * The position of the file as returned by pos() is advanced by the number of
+ * bytes written. If an error occurs, the position isn't modified.
+ *
+ * \return The number of bytes written on success, or a negative error code
+ * otherwise
+ */
+ssize_t File::write(const Span<const uint8_t> &data)
+{
+ if (!isOpen())
+ return -EINVAL;
+
+ size_t writtenBytes = 0;
+
+ /* Retry in case of interrupted system calls. */
+ while (writtenBytes < data.size()) {
+ ssize_t ret = ::write(fd_.get(), data.data() + writtenBytes,
+ data.size() - writtenBytes);
+ if (ret <= 0)
+ break;
+
+ writtenBytes += ret;
+ }
+
+ if (data.size() && !writtenBytes)
+ return -errno;
+
+ return writtenBytes;
+}
+
+/**
+ * \brief Map a region of the file in the process memory
+ * \param[in] offset The region offset within the file
+ * \param[in] size The region sise
+ * \param[in] flags The mapping flags
+ *
+ * This function maps a region of \a size bytes of the file starting at \a
+ * offset into the process memory. The File instance shall be open, but may be
+ * closed after mapping the region. Mappings stay valid when the File is
+ * closed, and are destroyed automatically when the File is deleted.
+ *
+ * If \a size is a negative value, this function maps the region starting at \a
+ * offset until the end of the file.
+ *
+ * The mapping memory protection is controlled by the file open mode, unless \a
+ * flags contains MapFlag::Private in which case the region is mapped in
+ * read/write mode.
+ *
+ * The error() status is updated.
+ *
+ * \return The mapped memory on success, or an empty span otherwise
+ */
+Span<uint8_t> File::map(off_t offset, ssize_t size, File::MapFlags flags)
+{
+ if (!isOpen()) {
+ error_ = -EBADF;
+ return {};
+ }
+
+ if (size < 0) {
+ size = File::size();
+ if (size < 0) {
+ error_ = size;
+ return {};
+ }
+
+ size -= offset;
+ }
+
+ int mmapFlags = flags & MapFlag::Private ? MAP_PRIVATE : MAP_SHARED;
+
+ int prot = 0;
+ if (mode_ & OpenModeFlag::ReadOnly)
+ prot |= PROT_READ;
+ if (mode_ & OpenModeFlag::WriteOnly)
+ prot |= PROT_WRITE;
+ if (flags & MapFlag::Private)
+ prot |= PROT_WRITE;
+
+ void *map = mmap(NULL, size, prot, mmapFlags, fd_.get(), offset);
+ if (map == MAP_FAILED) {
+ error_ = -errno;
+ return {};
+ }
+
+ maps_.emplace(map, size);
+
+ error_ = 0;
+ return { static_cast<uint8_t *>(map), static_cast<size_t>(size) };
+}
+
+/**
+ * \brief Unmap a region mapped with map()
+ * \param[in] addr The region address
+ *
+ * The error() status is updated.
+ *
+ * \return True on success, or false if an error occurs
+ */
+bool File::unmap(uint8_t *addr)
+{
+ auto iter = maps_.find(static_cast<void *>(addr));
+ if (iter == maps_.end()) {
+ error_ = -ENOENT;
+ return false;
+ }
+
+ int ret = munmap(addr, iter->second);
+ if (ret < 0) {
+ error_ = -errno;
+ return false;
+ }
+
+ maps_.erase(iter);
+
+ error_ = 0;
+ return true;
+}
+
+void File::unmapAll()
+{
+ for (const auto &map : maps_)
+ munmap(map.first, map.second);
+
+ maps_.clear();
+}
+
+/**
+ * \brief Check if the file specified by \a name exists
+ * \param[in] name The file name
+ * \return True if the file exists, false otherwise
+ */
+bool File::exists(const std::string &name)
+{
+ struct stat st;
+ int ret = stat(name.c_str(), &st);
+ if (ret < 0)
+ return false;
+
+ /* Directories can not be handled here, even if they exist. */
+ return !S_ISDIR(st.st_mode);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/flags.cpp b/src/libcamera/base/flags.cpp
new file mode 100644
index 00000000..9981f2ed
--- /dev/null
+++ b/src/libcamera/base/flags.cpp
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Type-safe enum-based bitfields
+ */
+
+#include <libcamera/base/flags.h>
+
+/**
+ * \file base/flags.h
+ * \brief Enum-based bit fields
+ */
+
+namespace libcamera {
+
+/**
+ * \class Flags
+ * \brief Type-safe container for enum-based bitfields
+ *
+ * The Flags template class provides type-safe bitwise operators on enum values.
+ * It allows using enum types for bitfields, while preventing unsafe casts from
+ * integer types and mixing of flags from different enum types.
+ *
+ * To use the Flags class, declare an enum containing the desired bit flags, and
+ * use the Flags<enum> class to store bitfields based on the enum. If bitwise
+ * operators on the underlying enum are also desired, they can be enabled with
+ * the LIBCAMERA_FLAGS_ENABLE_OPERATORS(enum) macro.
+ */
+
+/**
+ * \typedef Flags::Type
+ * \brief The underlying data type of the enum
+ */
+
+/**
+ * \fn Flags::Flags()
+ * \brief Construct a Flags instance with a zero value
+ */
+
+/**
+ * \fn Flags::Flags(E flag)
+ * \brief Construct a Flags instance storing the \a flag
+ * \param[in] flag The initial value
+ */
+
+/**
+ * \fn Flags &Flags::operator&=(E flag)
+ * \brief Store the bitwise AND of this Flags and the \a flag in this Flags
+ * \param[in] flag The second operand
+ * \return A reference to this Flags
+ */
+
+/**
+ * \fn Flags &Flags::operator&=(Flags other)
+ * \brief Store the bitwise AND of this Flags and the \a other Flags in this Flags
+ * \param[in] other The second operand
+ * \return A reference to this Flags
+ */
+
+/**
+ * \fn Flags &Flags::operator|=(E flag)
+ * \brief Store the bitwise OR of this Flags and the \a flag in this Flags
+ * \param[in] flag The second operand
+ * \return A reference to this Flags
+ */
+
+/**
+ * \fn Flags &Flags::operator|=(Flags other)
+ * \brief Store the bitwise OR of this Flags and the \a other Flags in this Flags
+ * \param[in] other The second operand
+ * \return A reference to this Flags
+ */
+
+/**
+ * \fn Flags &Flags::operator^=(E flag)
+ * \brief Store the bitwise XOR of this Flags and the \a flag in this Flags
+ * \param[in] flag The second operand
+ * \return A reference to this Flags
+ */
+
+/**
+ * \fn Flags &Flags::operator^=(Flags other)
+ * \brief Store the bitwise XOR of this Flags and the \a other Flags in this Flags
+ * \param[in] other The second operand
+ * \return A reference to this Flags
+ */
+
+/**
+ * \fn bool Flags::operator==(E flag)
+ * \brief Compare flags for equality
+ * \param[in] flag The second operand
+ * \return True if the Flags and \a flag are equal, false otherwise
+ */
+
+/**
+ * \fn bool Flags::operator==(Flags other)
+ * \brief Compare flags for equality
+ * \param[in] other The second operand
+ * \return True if the Flags and \a other are equal, false otherwise
+ */
+
+/**
+ * \fn bool Flags::operator!=(E flag)
+ * \brief Compare flags for non-equality
+ * \param[in] flag The second operand
+ * \return True if the Flags and \a flag are not equal, false otherwise
+ */
+
+/**
+ * \fn bool Flags::operator!=(Flags other)
+ * \brief Compare flags for non-equality
+ * \param[in] other The second operand
+ * \return True if the Flags and \a other are not equal, false otherwise
+ */
+
+/**
+ * \fn Flags::operator Type() const
+ * \brief Convert the Flags to the underlying integer type
+ * \return The Flags value as an integer
+ */
+
+/**
+ * \fn Flags::operator bool() const
+ * \brief Convert the Flags to a boolean
+ * \return True if at least one flag is set, false otherwise
+ */
+
+/**
+ * \fn Flags Flags::operator&(E flag) const
+ * \brief Compute the bitwise AND of this Flags and the \a flag
+ * \param[in] flag The second operand
+ * \return A Flags containing the result of the AND operation
+ */
+
+/**
+ * \fn Flags Flags::operator&(Flags other) const
+ * \brief Compute the bitwise AND of this Flags and the \a other Flags
+ * \param[in] other The second operand
+ * \return A Flags containing the result of the AND operation
+ */
+
+/**
+ * \fn Flags Flags::operator|(E flag) const
+ * \brief Compute the bitwise OR of this Flags and the \a flag
+ * \param[in] flag The second operand
+ * \return A Flags containing the result of the OR operation
+ */
+
+/**
+ * \fn Flags Flags::operator|(Flags other) const
+ * \brief Compute the bitwise OR of this Flags and the \a other Flags
+ * \param[in] other The second operand
+ * \return A Flags containing the result of the OR operation
+ */
+
+/**
+ * \fn Flags Flags::operator^(E flag) const
+ * \brief Compute the bitwise XOR of this Flags and the \a flag
+ * \param[in] flag The second operand
+ * \return A Flags containing the result of the XOR operation
+ */
+
+/**
+ * \fn Flags Flags::operator^(Flags other) const
+ * \brief Compute the bitwise XOR of this Flags and the \a other Flags
+ * \param[in] other The second operand
+ * \return A Flags containing the result of the XOR operation
+ */
+
+/**
+ * \fn Flags Flags::operator~() const
+ * \brief Compute the bitwise NOT of this Flags
+ * \return A Flags containing the result of the NOT operation
+ */
+
+/**
+ * \fn bool Flags::operator!() const
+ * \brief Check if flags are set
+ * \return True if no flags is set, false otherwise
+ */
+
+/**
+ * \def LIBCAMERA_FLAGS_ENABLE_OPERATORS(enum)
+ * \brief Enable bitwise operations on the \a enum enumeration
+ *
+ * This macro enables the bitwise AND, OR, XOR and NOT operators on the given
+ * \a enum. This allows the enum values to be safely used in bitwise operations
+ * with the Flags<> class.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/log.cpp b/src/libcamera/base/log.cpp
new file mode 100644
index 00000000..3a656b8f
--- /dev/null
+++ b/src/libcamera/base/log.cpp
@@ -0,0 +1,1112 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2018, Google Inc.
+ *
+ * Logging infrastructure
+ */
+
+#include <libcamera/base/log.h>
+
+#include <array>
+#include <fstream>
+#include <iostream>
+#include <list>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syslog.h>
+#include <time.h>
+#include <unordered_set>
+
+#include <libcamera/logging.h>
+
+#include <libcamera/base/backtrace.h>
+#include <libcamera/base/mutex.h>
+#include <libcamera/base/thread.h>
+#include <libcamera/base/utils.h>
+
+/**
+ * \file base/log.h
+ * \brief Logging infrastructure
+ *
+ * libcamera includes a logging infrastructure used through the library that
+ * allows inspection of internal operation in a user-configurable way. The log
+ * messages are grouped in categories that represent areas of libcamera, and
+ * output of messages for each category can be controlled by independent log
+ * levels.
+ *
+ * The levels are configurable through the LIBCAMERA_LOG_LEVELS environment
+ * variable that contains a comma-separated list of 'category:level' pairs.
+ *
+ * The category names are strings and can include a wildcard ('*') character at
+ * the end to match multiple categories.
+ *
+ * The level are either numeric values, or strings containing the log level
+ * name. The available log levels are DEBUG, INFO, WARN, ERROR and FATAL. Log
+ * message with a level higher than or equal to the configured log level for
+ * their category are output to the log, while other messages are silently
+ * discarded.
+ *
+ * By default log messages are output to std::cerr. They can be redirected to a
+ * log file by setting the LIBCAMERA_LOG_FILE environment variable to the name
+ * of the file. The file must be writable and is truncated if it exists. If any
+ * error occurs when opening the file, the file is ignored and the log is output
+ * to std::cerr.
+ */
+
+/**
+ * \file logging.h
+ * \brief Logging management
+ *
+ * API to change the logging output destination and log levels programatically.
+ */
+
+namespace libcamera {
+
+static int log_severity_to_syslog(LogSeverity severity)
+{
+ switch (severity) {
+ case LogDebug:
+ return LOG_DEBUG;
+ case LogInfo:
+ return LOG_INFO;
+ case LogWarning:
+ return LOG_WARNING;
+ case LogError:
+ return LOG_ERR;
+ case LogFatal:
+ return LOG_ALERT;
+ default:
+ return LOG_NOTICE;
+ }
+}
+
+static const char *log_severity_name(LogSeverity severity)
+{
+ static const char *const names[] = {
+ "DEBUG",
+ " INFO",
+ " WARN",
+ "ERROR",
+ "FATAL",
+ };
+
+ if (static_cast<unsigned int>(severity) < std::size(names))
+ return names[severity];
+ else
+ return "UNKWN";
+}
+
+/**
+ * \brief Log output
+ *
+ * The LogOutput class models a log output destination
+ */
+class LogOutput
+{
+public:
+ LogOutput(const char *path, bool color);
+ LogOutput(std::ostream *stream, bool color);
+ LogOutput();
+ ~LogOutput();
+
+ bool isValid() const;
+ void write(const LogMessage &msg);
+ void write(const std::string &msg);
+
+private:
+ void writeSyslog(LogSeverity severity, const std::string &msg);
+ void writeStream(const std::string &msg);
+
+ std::ostream *stream_;
+ LoggingTarget target_;
+ bool color_;
+};
+
+/**
+ * \brief Construct a log output based on a file
+ * \param[in] path Full path to log file
+ * \param[in] color True to output colored messages
+ */
+LogOutput::LogOutput(const char *path, bool color)
+ : target_(LoggingTargetFile), color_(color)
+{
+ stream_ = new std::ofstream(path);
+}
+
+/**
+ * \brief Construct a log output based on a stream
+ * \param[in] stream Stream to send log output to
+ * \param[in] color True to output colored messages
+ */
+LogOutput::LogOutput(std::ostream *stream, bool color)
+ : stream_(stream), target_(LoggingTargetStream), color_(color)
+{
+}
+
+/**
+ * \brief Construct a log output to syslog
+ */
+LogOutput::LogOutput()
+ : stream_(nullptr), target_(LoggingTargetSyslog), color_(false)
+{
+ openlog("libcamera", LOG_PID, 0);
+}
+
+LogOutput::~LogOutput()
+{
+ switch (target_) {
+ case LoggingTargetFile:
+ delete stream_;
+ break;
+ case LoggingTargetSyslog:
+ closelog();
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * \brief Check if the log output is valid
+ * \return True if the log output is valid
+ */
+bool LogOutput::isValid() const
+{
+ switch (target_) {
+ case LoggingTargetFile:
+ return stream_->good();
+ case LoggingTargetStream:
+ return stream_ != nullptr;
+ default:
+ return true;
+ }
+}
+
+namespace {
+
+/*
+ * For more information about ANSI escape codes, see
+ * https://en.wikipedia.org/wiki/ANSI_escape_code#Colors.
+ */
+constexpr const char *kColorReset = "\033[0m";
+constexpr const char *kColorGreen = "\033[0;32m";
+constexpr const char *kColorBrightRed = "\033[1;31m";
+constexpr const char *kColorBrightGreen = "\033[1;32m";
+constexpr const char *kColorBrightYellow = "\033[1;33m";
+constexpr const char *kColorBrightBlue = "\033[1;34m";
+constexpr const char *kColorBrightMagenta = "\033[1;35m";
+constexpr const char *kColorBrightCyan = "\033[1;36m";
+constexpr const char *kColorBrightWhite = "\033[1;37m";
+
+} /* namespace */
+
+/**
+ * \brief Write message to log output
+ * \param[in] msg Message to write
+ */
+void LogOutput::write(const LogMessage &msg)
+{
+ static const char *const severityColors[] = {
+ kColorBrightCyan,
+ kColorBrightGreen,
+ kColorBrightYellow,
+ kColorBrightRed,
+ kColorBrightMagenta,
+ };
+
+ const char *categoryColor = color_ ? kColorBrightWhite : "";
+ const char *fileColor = color_ ? kColorBrightBlue : "";
+ const char *prefixColor = color_ ? kColorGreen : "";
+ const char *resetColor = color_ ? kColorReset : "";
+ const char *severityColor = "";
+ LogSeverity severity = msg.severity();
+ std::string str;
+
+ if (color_) {
+ if (static_cast<unsigned int>(severity) < std::size(severityColors))
+ severityColor = severityColors[severity];
+ else
+ severityColor = kColorBrightWhite;
+ }
+
+ switch (target_) {
+ case LoggingTargetSyslog:
+ str = std::string(log_severity_name(severity)) + " "
+ + msg.category().name() + " " + msg.fileInfo() + " ";
+ if (!msg.prefix().empty())
+ str += msg.prefix() + ": ";
+ str += msg.msg();
+ writeSyslog(severity, str);
+ break;
+ case LoggingTargetStream:
+ case LoggingTargetFile:
+ str = "[" + utils::time_point_to_string(msg.timestamp()) + "] ["
+ + std::to_string(Thread::currentId()) + "] "
+ + severityColor + log_severity_name(severity) + " "
+ + categoryColor + msg.category().name() + " "
+ + fileColor + msg.fileInfo() + " ";
+ if (!msg.prefix().empty())
+ str += prefixColor + msg.prefix() + ": ";
+ str += resetColor + msg.msg();
+ writeStream(str);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * \brief Write string to log output
+ * \param[in] str String to write
+ */
+void LogOutput::write(const std::string &str)
+{
+ switch (target_) {
+ case LoggingTargetSyslog:
+ writeSyslog(LogDebug, str);
+ break;
+ case LoggingTargetStream:
+ case LoggingTargetFile:
+ writeStream(str);
+ break;
+ default:
+ break;
+ }
+}
+
+void LogOutput::writeSyslog(LogSeverity severity, const std::string &str)
+{
+ syslog(log_severity_to_syslog(severity), "%s", str.c_str());
+}
+
+void LogOutput::writeStream(const std::string &str)
+{
+ stream_->write(str.c_str(), str.size());
+ stream_->flush();
+}
+
+/**
+ * \brief Message logger
+ *
+ * The Logger class handles log configuration.
+ */
+class Logger
+{
+public:
+ ~Logger();
+
+ static Logger *instance();
+
+ void write(const LogMessage &msg);
+ void backtrace();
+
+ int logSetFile(const char *path, bool color);
+ int logSetStream(std::ostream *stream, bool color);
+ int logSetTarget(LoggingTarget target);
+ void logSetLevel(const char *category, const char *level);
+
+private:
+ Logger();
+
+ void parseLogFile();
+ void parseLogLevels();
+ static LogSeverity parseLogLevel(const std::string &level);
+
+ friend LogCategory;
+ void registerCategory(LogCategory *category);
+ LogCategory *findCategory(const char *name) const;
+
+ static bool destroyed_;
+
+ std::vector<LogCategory *> categories_;
+ std::list<std::pair<std::string, LogSeverity>> levels_;
+
+ std::shared_ptr<LogOutput> output_;
+};
+
+bool Logger::destroyed_ = false;
+
+/**
+ * \enum LoggingTarget
+ * \brief Log destination type
+ * \var LoggingTargetNone
+ * \brief No logging destination
+ * \sa Logger::logSetTarget
+ * \var LoggingTargetSyslog
+ * \brief Log to syslog
+ * \sa Logger::logSetTarget
+ * \var LoggingTargetFile
+ * \brief Log to file
+ * \sa Logger::logSetFile
+ * \var LoggingTargetStream
+ * \brief Log to stream
+ * \sa Logger::logSetStream
+ */
+
+/**
+ * \brief Direct logging to a file
+ * \param[in] path Full path to the log file
+ * \param[in] color True to output colored messages
+ *
+ * This function directs the log output to the file identified by \a path. The
+ * previous log target, if any, is closed, and all new log messages will be
+ * written to the new log file.
+ *
+ * \a color controls whether or not the messages will be colored with standard
+ * ANSI escape codes. This is done regardless of whether \a path refers to a
+ * standard file or a TTY, the caller is responsible for disabling coloring when
+ * not suitable for the log target.
+ *
+ * If the function returns an error, the log target is not changed.
+ *
+ * \return Zero on success, or a negative error code otherwise
+ */
+int logSetFile(const char *path, bool color)
+{
+ return Logger::instance()->logSetFile(path, color);
+}
+
+/**
+ * \brief Direct logging to a stream
+ * \param[in] stream Stream to send log output to
+ * \param[in] color True to output colored messages
+ *
+ * This function directs the log output to \a stream. The previous log target,
+ * if any, is closed, and all new log messages will be written to the new log
+ * stream.
+ *
+ * \a color controls whether or not the messages will be colored with standard
+ * ANSI escape codes. This is done regardless of whether \a stream refers to a
+ * standard file or a TTY, the caller is responsible for disabling coloring when
+ * not suitable for the log target.
+ *
+ * If the function returns an error, the log file is not changed
+ *
+ * \return Zero on success, or a negative error code otherwise.
+ */
+int logSetStream(std::ostream *stream, bool color)
+{
+ return Logger::instance()->logSetStream(stream, color);
+}
+
+/**
+ * \brief Set the logging target
+ * \param[in] target Logging destination
+ *
+ * This function sets the logging output to the target specified by \a target.
+ * The allowed values of \a target are LoggingTargetNone and
+ * LoggingTargetSyslog. LoggingTargetNone will send the log output to nowhere,
+ * and LoggingTargetSyslog will send the log output to syslog. The previous
+ * log target, if any, is closed, and all new log messages will be written to
+ * the new log destination.
+ *
+ * LoggingTargetFile and LoggingTargetStream are not valid values for \a target.
+ * Use logSetFile() and logSetStream() instead, respectively.
+ *
+ * If the function returns an error, the log file is not changed.
+ *
+ * \return Zero on success, or a negative error code otherwise.
+ */
+int logSetTarget(LoggingTarget target)
+{
+ return Logger::instance()->logSetTarget(target);
+}
+
+/**
+ * \brief Set the log level
+ * \param[in] category Logging category
+ * \param[in] level Log level
+ *
+ * This function sets the log level of \a category to \a level.
+ * \a level shall be one of the following strings:
+ * - "DEBUG"
+ * - "INFO"
+ * - "WARN"
+ * - "ERROR"
+ * - "FATAL"
+ *
+ * "*" is not a valid \a category for this function.
+ */
+void logSetLevel(const char *category, const char *level)
+{
+ Logger::instance()->logSetLevel(category, level);
+}
+
+Logger::~Logger()
+{
+ destroyed_ = true;
+
+ for (LogCategory *category : categories_)
+ delete category;
+}
+
+/**
+ * \brief Retrieve the logger instance
+ *
+ * The Logger is a singleton and can't be constructed manually. This function
+ * shall instead be used to retrieve the single global instance of the logger.
+ *
+ * \return The logger instance
+ */
+Logger *Logger::instance()
+{
+ static Logger instance;
+
+ if (destroyed_)
+ return nullptr;
+
+ return &instance;
+}
+
+/**
+ * \brief Write a message to the configured logger output
+ * \param[in] msg The message object
+ */
+void Logger::write(const LogMessage &msg)
+{
+ std::shared_ptr<LogOutput> output = std::atomic_load(&output_);
+ if (!output)
+ return;
+
+ output->write(msg);
+}
+
+/**
+ * \brief Write a backtrace to the log
+ */
+void Logger::backtrace()
+{
+ std::shared_ptr<LogOutput> output = std::atomic_load(&output_);
+ if (!output)
+ return;
+
+ /*
+ * Skip the first two entries that correspond to this function and
+ * ~LogMessage().
+ */
+ std::string backtrace = Backtrace().toString(2);
+ if (backtrace.empty()) {
+ output->write("Backtrace not available\n");
+ return;
+ }
+
+ output->write("Backtrace:\n");
+ output->write(backtrace);
+}
+
+/**
+ * \brief Set the log file
+ * \param[in] path Full path to the log file
+ * \param[in] color True to output colored messages
+ *
+ * \sa libcamera::logSetFile()
+ *
+ * \return Zero on success, or a negative error code otherwise.
+ */
+int Logger::logSetFile(const char *path, bool color)
+{
+ std::shared_ptr<LogOutput> output =
+ std::make_shared<LogOutput>(path, color);
+ if (!output->isValid())
+ return -EINVAL;
+
+ std::atomic_store(&output_, output);
+ return 0;
+}
+
+/**
+ * \brief Set the log stream
+ * \param[in] stream Stream to send log output to
+ * \param[in] color True to output colored messages
+ *
+ * \sa libcamera::logSetStream()
+ *
+ * \return Zero on success, or a negative error code otherwise.
+ */
+int Logger::logSetStream(std::ostream *stream, bool color)
+{
+ std::shared_ptr<LogOutput> output =
+ std::make_shared<LogOutput>(stream, color);
+ std::atomic_store(&output_, output);
+ return 0;
+}
+
+/**
+ * \brief Set the log target
+ * \param[in] target Log destination
+ *
+ * \sa libcamera::logSetTarget()
+ *
+ * \return Zero on success, or a negative error code otherwise.
+ */
+int Logger::logSetTarget(enum LoggingTarget target)
+{
+ switch (target) {
+ case LoggingTargetSyslog:
+ std::atomic_store(&output_, std::make_shared<LogOutput>());
+ break;
+ case LoggingTargetNone:
+ std::atomic_store(&output_, std::shared_ptr<LogOutput>());
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * \brief Set the log level
+ * \param[in] category Logging category
+ * \param[in] level Log level
+ *
+ * \sa libcamera::logSetLevel()
+ */
+void Logger::logSetLevel(const char *category, const char *level)
+{
+ LogSeverity severity = parseLogLevel(level);
+ if (severity == LogInvalid)
+ return;
+
+ for (LogCategory *c : categories_) {
+ if (c->name() == category) {
+ c->setSeverity(severity);
+ break;
+ }
+ }
+}
+
+/**
+ * \brief Construct a logger
+ *
+ * If the environment variable is not set, log to std::cerr. The log messages
+ * are then colored by default. This can be overridden by setting the
+ * LIBCAMERA_LOG_NO_COLOR environment variable to disable coloring.
+ */
+Logger::Logger()
+{
+ bool color = !utils::secure_getenv("LIBCAMERA_LOG_NO_COLOR");
+ logSetStream(&std::cerr, color);
+
+ parseLogFile();
+ parseLogLevels();
+}
+
+/**
+ * \brief Parse the log output file from the environment
+ *
+ * If the LIBCAMERA_LOG_FILE environment variable is set, open the file it
+ * points to and redirect the logger output to it. If the environment variable
+ * is set to "syslog", then the logger output will be directed to syslog. Errors
+ * are silently ignored and don't affect the logger output (set to std::cerr by
+ * default).
+ */
+void Logger::parseLogFile()
+{
+ const char *file = utils::secure_getenv("LIBCAMERA_LOG_FILE");
+ if (!file)
+ return;
+
+ if (!strcmp(file, "syslog")) {
+ logSetTarget(LoggingTargetSyslog);
+ return;
+ }
+
+ logSetFile(file, false);
+}
+
+/**
+ * \brief Parse the log levels from the environment
+ *
+ * The log levels are stored in the LIBCAMERA_LOG_LEVELS environment variable
+ * as a list of "category:level" pairs, separated by commas (','). Parse the
+ * variable and store the levels to configure all log categories.
+ */
+void Logger::parseLogLevels()
+{
+ const char *debug = utils::secure_getenv("LIBCAMERA_LOG_LEVELS");
+ if (!debug)
+ return;
+
+ for (const char *pair = debug; *debug != '\0'; pair = debug) {
+ const char *comma = strchrnul(debug, ',');
+ size_t len = comma - pair;
+
+ /* Skip over the comma. */
+ debug = *comma == ',' ? comma + 1 : comma;
+
+ /* Skip to the next pair if the pair is empty. */
+ if (!len)
+ continue;
+
+ std::string category;
+ std::string level;
+
+ const char *colon = static_cast<const char *>(memchr(pair, ':', len));
+ if (!colon) {
+ /* 'x' is a shortcut for '*:x'. */
+ category = "*";
+ level = std::string(pair, len);
+ } else {
+ category = std::string(pair, colon - pair);
+ level = std::string(colon + 1, comma - colon - 1);
+ }
+
+ /* Both the category and the level must be specified. */
+ if (category.empty() || level.empty())
+ continue;
+
+ LogSeverity severity = parseLogLevel(level);
+ if (severity == LogInvalid)
+ continue;
+
+ levels_.push_back({ category, severity });
+ }
+}
+
+/**
+ * \brief Parse a log level string into a LogSeverity
+ * \param[in] level The log level string
+ *
+ * Log levels can be specified as an integer value in the range from LogDebug to
+ * LogFatal, or as a string corresponding to the severity name in uppercase. Any
+ * other value is invalid.
+ *
+ * \return The log severity, or LogInvalid if the string is invalid
+ */
+LogSeverity Logger::parseLogLevel(const std::string &level)
+{
+ static const char *const names[] = {
+ "DEBUG",
+ "INFO",
+ "WARN",
+ "ERROR",
+ "FATAL",
+ };
+
+ int severity;
+
+ if (std::isdigit(level[0])) {
+ char *endptr;
+ severity = strtoul(level.c_str(), &endptr, 10);
+ if (*endptr != '\0' || severity > LogFatal)
+ severity = LogInvalid;
+ } else {
+ severity = LogInvalid;
+ for (unsigned int i = 0; i < std::size(names); ++i) {
+ if (names[i] == level) {
+ severity = i;
+ break;
+ }
+ }
+ }
+
+ return static_cast<LogSeverity>(severity);
+}
+
+/**
+ * \brief Register a log category with the logger
+ * \param[in] category The log category
+ *
+ * Log categories must have unique names. It is invalid to call this function
+ * if a log category with the same name already exists.
+ */
+void Logger::registerCategory(LogCategory *category)
+{
+ categories_.push_back(category);
+
+ const std::string &name = category->name();
+ for (const std::pair<std::string, LogSeverity> &level : levels_) {
+ bool match = true;
+
+ for (unsigned int i = 0; i < level.first.size(); ++i) {
+ if (level.first[i] == '*')
+ break;
+
+ if (i >= name.size() ||
+ name[i] != level.first[i]) {
+ match = false;
+ break;
+ }
+ }
+
+ if (match) {
+ category->setSeverity(level.second);
+ break;
+ }
+ }
+}
+
+/**
+ * \brief Find an existing log category with the given name
+ * \param[in] name Name of the log category
+ * \return The pointer to the found log category or nullptr if not found
+ */
+LogCategory *Logger::findCategory(const char *name) const
+{
+ if (auto it = std::find_if(categories_.begin(), categories_.end(),
+ [name](auto c) { return c->name() == name; });
+ it != categories_.end()) {
+ return *it;
+ }
+
+ return nullptr;
+}
+
+/**
+ * \enum LogSeverity
+ * Log message severity
+ * \var LogDebug
+ * Debug message
+ * \var LogInfo
+ * Informational message
+ * \var LogWarning
+ * Warning message, signals a potential issue
+ * \var LogError
+ * Error message, signals an unrecoverable issue
+ * \var LogFatal
+ * Fatal message, signals an unrecoverable issue and aborts execution
+ */
+
+/**
+ * \class LogCategory
+ * \brief A category of log message
+ *
+ * The LogCategory class represents a category of log messages, related to an
+ * area of the library. It groups all messages belonging to the same category,
+ * and is used to control the log level per group.
+ */
+
+/**
+ * \brief Create a new LogCategory or return an existing one
+ * \param[in] name Name of the log category
+ *
+ * Create and return a new LogCategory with the given name if such a category
+ * does not yet exist, or return the existing one.
+ *
+ * \return The pointer to the LogCategory
+ */
+LogCategory *LogCategory::create(const char *name)
+{
+ static Mutex mutex_;
+ MutexLocker locker(mutex_);
+ LogCategory *category = Logger::instance()->findCategory(name);
+
+ if (!category) {
+ category = new LogCategory(name);
+ Logger::instance()->registerCategory(category);
+ }
+
+ return category;
+}
+
+/**
+ * \brief Construct a log category
+ * \param[in] name The category name
+ */
+LogCategory::LogCategory(const char *name)
+ : name_(name), severity_(LogSeverity::LogInfo)
+{
+}
+
+/**
+ * \fn LogCategory::name()
+ * \brief Retrieve the log category name
+ * \return The log category name
+ */
+
+/**
+ * \fn LogCategory::severity()
+ * \brief Retrieve the severity of the log category
+ * \sa setSeverity()
+ * \return Return the severity of the log category
+ */
+
+/**
+ * \brief Set the severity of the log category
+ *
+ * Messages of severity higher than or equal to the severity of the log category
+ * are printed, other messages are discarded.
+ */
+void LogCategory::setSeverity(LogSeverity severity)
+{
+ severity_ = severity;
+}
+
+/**
+ * \brief Retrieve the default log category
+ *
+ * The default log category is named "default" and is used by the LOG() macro
+ * when no log category is specified.
+ *
+ * \return A reference to the default log category
+ */
+const LogCategory &LogCategory::defaultCategory()
+{
+ static const LogCategory *category = LogCategory::create("default");
+ return *category;
+}
+
+/**
+ * \class LogMessage
+ * \brief Internal log message representation.
+ *
+ * The LogMessage class models a single message in the log. It serves as a
+ * helper to provide the std::ostream API for logging, and must never be used
+ * directly. Use the LOG() macro instead access the log infrastructure.
+ */
+
+/**
+ * \brief Construct a log message for a given category
+ * \param[in] fileName The file name where the message is logged from
+ * \param[in] line The line number where the message is logged from
+ * \param[in] category The log message category, controlling how the message
+ * will be displayed
+ * \param[in] severity The log message severity, controlling how the message
+ * will be displayed
+ * \param[in] prefix The log message prefix
+ *
+ * Create a log message pertaining to line \a line of file \a fileName. The
+ * \a severity argument sets the message severity to control whether it will be
+ * output or dropped. The \a prefix optionally identifies the object instance
+ * logging the message.
+ */
+LogMessage::LogMessage(const char *fileName, unsigned int line,
+ const LogCategory &category, LogSeverity severity,
+ const std::string &prefix)
+ : category_(category), severity_(severity), prefix_(prefix)
+{
+ init(fileName, line);
+}
+
+/**
+ * \brief Move-construct a log message
+ * \param[in] other The other message
+ *
+ * The move constructor is meant to support the _log() functions. Thanks to copy
+ * elision it will likely never be called, but C++11 only permits copy elision,
+ * it doesn't enforce it unlike C++17. To avoid potential link errors depending
+ * on the compiler type and version, and optimization level, the move
+ * constructor is defined even if it will likely never be called, and ensures
+ * that the destructor of the \a other message will not output anything to the
+ * log by setting the severity to LogInvalid.
+ */
+LogMessage::LogMessage(LogMessage &&other)
+ : msgStream_(std::move(other.msgStream_)), category_(other.category_),
+ severity_(other.severity_)
+{
+ other.severity_ = LogInvalid;
+}
+
+void LogMessage::init(const char *fileName, unsigned int line)
+{
+ /* Log the timestamp, severity and file information. */
+ timestamp_ = utils::clock::now();
+
+ std::ostringstream ossFileInfo;
+ ossFileInfo << utils::basename(fileName) << ":" << line;
+ fileInfo_ = ossFileInfo.str();
+}
+
+LogMessage::~LogMessage()
+{
+ /* Don't print anything if we have been moved to another LogMessage. */
+ if (severity_ == LogInvalid)
+ return;
+
+ Logger *logger = Logger::instance();
+ if (!logger)
+ return;
+
+ msgStream_ << std::endl;
+
+ if (severity_ >= category_.severity())
+ logger->write(*this);
+
+ if (severity_ == LogSeverity::LogFatal) {
+ logger->backtrace();
+ std::abort();
+ }
+}
+
+/**
+ * \fn std::ostream& LogMessage::stream()
+ *
+ * Data is added to a LogMessage through the stream returned by this function.
+ * The stream implements the std::ostream API and can be used for logging as
+ * std::cout.
+ *
+ * \return A reference to the log message stream
+ */
+
+/**
+ * \fn LogMessage::timestamp()
+ * \brief Retrieve the timestamp of the log message
+ * \return The timestamp of the message
+ */
+
+/**
+ * \fn LogMessage::severity()
+ * \brief Retrieve the severity of the log message
+ * \return The severity of the message
+ */
+
+/**
+ * \fn LogMessage::category()
+ * \brief Retrieve the category of the log message
+ * \return The category of the message
+ */
+
+/**
+ * \fn LogMessage::fileInfo()
+ * \brief Retrieve the file info of the log message
+ * \return The file info of the message
+ */
+
+/**
+ * \fn LogMessage::prefix()
+ * \brief Retrieve the prefix of the log message
+ * \return The prefix of the message
+ */
+
+/**
+ * \fn LogMessage::msg()
+ * \brief Retrieve the message text of the log message
+ * \return The message text of the message, as a string
+ */
+
+/**
+ * \class Loggable
+ * \brief Base class to support log message extensions
+ *
+ * The Loggable class allows classes to extend log messages without any change
+ * to the way the LOG() macro is invoked. By inheriting from Loggable and
+ * implementing the logPrefix() virtual function, a class can specify extra
+ * information to be automatically added to messages logged from class member
+ * function.
+ */
+
+Loggable::~Loggable()
+{
+}
+
+/**
+ * \fn Loggable::logPrefix()
+ * \brief Retrieve a string to be prefixed to the log message
+ *
+ * This function allows classes inheriting from the Loggable class to extend the
+ * logger with an object-specific prefix output right before the log message
+ * contents.
+ *
+ * \return A string to be prefixed to the log message
+ */
+
+/**
+ * \brief Create a temporary LogMessage object to log a message
+ * \param[in] category The log message category
+ * \param[in] severity The log message severity
+ * \param[in] fileName The file name where the message is logged from
+ * \param[in] line The line number where the message is logged from
+ *
+ * This function is used as a backend by the LOG() macro to create a log message
+ * for locations inheriting from the Loggable class.
+ *
+ * \return A log message
+ */
+LogMessage Loggable::_log(const LogCategory *category, LogSeverity severity,
+ const char *fileName, unsigned int line) const
+{
+ return LogMessage(fileName, line,
+ category ? *category : LogCategory::defaultCategory(),
+ severity, logPrefix());
+}
+
+/**
+ * \brief Create a temporary LogMessage object to log a message
+ * \param[in] category The log message category
+ * \param[in] severity The log message severity
+ * \param[in] fileName The file name where the message is logged from
+ * \param[in] line The line number where the message is logged from
+ *
+ * This function is used as a backend by the LOG() macro to create a log
+ * message for locations not inheriting from the Loggable class.
+ *
+ * \return A log message
+ */
+LogMessage _log(const LogCategory *category, LogSeverity severity,
+ const char *fileName, unsigned int line)
+{
+ return LogMessage(fileName, line,
+ category ? *category : LogCategory::defaultCategory(),
+ severity);
+}
+
+/**
+ * \def LOG_DECLARE_CATEGORY(name)
+ * \hideinitializer
+ * \brief Declare a category of log messages
+ *
+ * This macro is used to declare a log category defined in another compilation
+ * unit by the LOG_DEFINE_CATEGORY() macro.
+ *
+ * The LOG_DECLARE_CATEGORY() macro must be used in the libcamera namespace.
+ *
+ * \sa LogCategory
+ */
+
+/**
+ * \def LOG_DEFINE_CATEGORY(name)
+ * \hideinitializer
+ * \brief Define a category of log messages
+ *
+ * This macro is used to define a log category that can then be used with the
+ * LOGC() macro. Category names shall be unique, if a category is shared between
+ * compilation units, it shall be defined in one compilation unit only and
+ * declared with LOG_DECLARE_CATEGORY() in the other compilation units.
+ *
+ * The LOG_DEFINE_CATEGORY() macro must be used in the libcamera namespace.
+ *
+ * \sa LogCategory
+ */
+
+/**
+ * \def LOG(category, severity)
+ * \hideinitializer
+ * \brief Log a message
+ * \param[in] category Category (optional)
+ * \param[in] severity Severity
+ *
+ * Return an std::ostream reference to which a message can be logged using the
+ * iostream API. The \a category, if specified, sets the message category. When
+ * absent the default category is used. The \a severity controls whether the
+ * message is printed or discarded, depending on the log level for the category.
+ *
+ * If the severity is set to Fatal, execution is aborted and the program
+ * terminates immediately after printing the message.
+ *
+ * \warning Logging from the destructor of a global object, either directly or
+ * indirectly, results in undefined behaviour.
+ *
+ * \todo Allow logging from destructors of global objects to the largest
+ * possible extent
+ */
+
+/**
+ * \def ASSERT(condition)
+ * \hideinitializer
+ * \brief Abort program execution if assertion fails
+ *
+ * If \a condition is false, ASSERT() logs an error message with the Fatal log
+ * level and aborts program execution.
+ *
+ * If the macro NDEBUG is defined before including log.h, ASSERT() generates no
+ * code.
+ *
+ * Using conditions that have side effects with ASSERT() is not recommended, as
+ * these effects would depend on whether NDEBUG is defined or not. Similarly,
+ * ASSERT() should not be used to check for errors that can occur under normal
+ * conditions as those checks would then be removed when compiling with NDEBUG.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/memfd.cpp b/src/libcamera/base/memfd.cpp
new file mode 100644
index 00000000..ed0b299b
--- /dev/null
+++ b/src/libcamera/base/memfd.cpp
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * Anonymous file creation
+ */
+
+#include <libcamera/base/memfd.h>
+
+#include <fcntl.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file base/memfd.h
+ * \brief Anonymous file creation
+ */
+
+#ifndef __DOXYGEN__
+namespace {
+
+/* uClibc doesn't provide the file sealing API. */
+#if not HAVE_FILE_SEALS
+#define F_ADD_SEALS 1033
+#define F_SEAL_SHRINK 0x0002
+#define F_SEAL_GROW 0x0004
+#endif
+
+#if not HAVE_MEMFD_CREATE
+int memfd_create(const char *name, unsigned int flags)
+{
+ return syscall(SYS_memfd_create, name, flags);
+}
+#endif
+
+} /* namespace */
+#endif /* __DOXYGEN__ */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(File)
+
+/**
+ * \class MemFd
+ * \brief Helper class to create anonymous files
+ *
+ * Anonymous files behave like regular files, and can be modified, truncated,
+ * memory-mapped and so on. Unlike regular files, they however live in RAM and
+ * don't have permanent backing storage.
+ */
+
+/**
+ * \enum MemFd::Seal
+ * \brief Seals for the MemFd::create() function
+ * \var MemFd::Seal::None
+ * \brief No seals (used as default value)
+ * \var MemFd::Seal::Shrink
+ * \brief Prevent the memfd from shrinking
+ * \var MemFd::Seal::Grow
+ * \brief Prevent the memfd from growing
+ */
+
+/**
+ * \typedef MemFd::Seals
+ * \brief A bitwise combination of MemFd::Seal values
+ */
+
+/**
+ * \brief Create an anonymous file
+ * \param[in] name The file name (displayed in symbolic links in /proc/self/fd/)
+ * \param[in] size The file size
+ * \param[in] seals The file seals
+ *
+ * This function is a helper that wraps anonymous file (memfd) creation and
+ * sets the file size and optional seals.
+ *
+ * \return The descriptor of the anonymous file if creation succeeded, or an
+ * invalid UniqueFD otherwise
+ */
+UniqueFD MemFd::create(const char *name, std::size_t size, Seals seals)
+{
+ int ret = memfd_create(name, MFD_ALLOW_SEALING | MFD_CLOEXEC);
+ if (ret < 0) {
+ ret = errno;
+ LOG(File, Error)
+ << "Failed to allocate memfd storage for " << name
+ << ": " << strerror(ret);
+ return {};
+ }
+
+ UniqueFD memfd(ret);
+
+ ret = ftruncate(memfd.get(), size);
+ if (ret < 0) {
+ ret = errno;
+ LOG(File, Error)
+ << "Failed to set memfd size for " << name
+ << ": " << strerror(ret);
+ return {};
+ }
+
+ if (seals) {
+ int fileSeals = (seals & Seal::Shrink ? F_SEAL_SHRINK : 0)
+ | (seals & Seal::Grow ? F_SEAL_GROW : 0);
+
+ ret = fcntl(memfd.get(), F_ADD_SEALS, fileSeals);
+ if (ret < 0) {
+ ret = errno;
+ LOG(File, Error)
+ << "Failed to seal the memfd for " << name
+ << ": " << strerror(ret);
+ return {};
+ }
+ }
+
+ return memfd;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/meson.build b/src/libcamera/base/meson.build
new file mode 100644
index 00000000..a742dfdf
--- /dev/null
+++ b/src/libcamera/base/meson.build
@@ -0,0 +1,81 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_base_public_sources = files([
+ 'bound_method.cpp',
+ 'class.cpp',
+ 'flags.cpp',
+ 'object.cpp',
+ 'shared_fd.cpp',
+ 'signal.cpp',
+ 'unique_fd.cpp',
+])
+
+libcamera_base_internal_sources = files([
+ 'backtrace.cpp',
+ 'event_dispatcher.cpp',
+ 'event_dispatcher_poll.cpp',
+ 'event_notifier.cpp',
+ 'file.cpp',
+ 'log.cpp',
+ 'memfd.cpp',
+ 'message.cpp',
+ 'mutex.cpp',
+ 'semaphore.cpp',
+ 'thread.cpp',
+ 'timer.cpp',
+ 'utils.cpp',
+])
+
+libdw = dependency('libdw', required : false)
+libunwind = dependency('libunwind', required : false)
+
+if cc.has_header_symbol('execinfo.h', 'backtrace')
+ config_h.set('HAVE_BACKTRACE', 1)
+endif
+
+if libdw.found()
+ config_h.set('HAVE_DW', 1)
+endif
+
+if libunwind.found()
+ config_h.set('HAVE_UNWIND', 1)
+endif
+
+libcamera_base_deps = [
+ libatomic,
+ libdw,
+ libthreads,
+ libunwind,
+]
+
+# Internal components must use the libcamera_base_private dependency to enable
+# the use of headers which must not be exposed to the libcamera public api.
+libcamera_base_args = [ '-DLIBCAMERA_BASE_PRIVATE' ]
+
+libcamera_base_lib = shared_library('libcamera-base',
+ [
+ libcamera_base_public_sources,
+ libcamera_base_internal_sources,
+ libcamera_base_headers,
+ ],
+ version : libcamera_version,
+ soversion : libcamera_soversion,
+ name_prefix : '',
+ install : true,
+ cpp_args : libcamera_base_args,
+ include_directories : libcamera_includes,
+ dependencies : libcamera_base_deps)
+
+libcamera_base = declare_dependency(sources : [
+ libcamera_base_headers,
+ ],
+ include_directories : libcamera_includes,
+ link_with : libcamera_base_lib)
+
+pkg_mod = import('pkgconfig')
+pkg_mod.generate(libcamera_base_lib,
+ description : 'Camera support base utility library',
+ subdirs : 'libcamera')
+
+libcamera_base_private = declare_dependency(dependencies : libcamera_base,
+ compile_args : libcamera_base_args)
diff --git a/src/libcamera/base/message.cpp b/src/libcamera/base/message.cpp
new file mode 100644
index 00000000..098faac6
--- /dev/null
+++ b/src/libcamera/base/message.cpp
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Message queue support
+ */
+
+#include <libcamera/base/message.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/signal.h>
+
+/**
+ * \file base/message.h
+ * \brief Message queue support
+ *
+ * The messaging API enables inter-thread communication through message
+ * posting. Messages can be sent from any thread to any recipient deriving from
+ * the Object class.
+ *
+ * To post a message, the sender allocates it dynamically as instance of a class
+ * derived from Message. It then posts the message to an Object recipient
+ * through Object::postMessage(). Message ownership is passed to the object,
+ * thus the message shall not store any temporary data.
+ *
+ * The message is delivered in the context of the object's thread, through the
+ * Object::message() virtual function. After delivery the message is
+ * automatically deleted.
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Message)
+
+std::atomic_uint Message::nextUserType_{ Message::UserMessage };
+
+/**
+ * \class Message
+ * \brief A message that can be posted to a Thread
+ */
+
+/**
+ * \enum Message::Type
+ * \brief The message type
+ * \var Message::None
+ * \brief Invalid message type
+ * \var Message::InvokeMessage
+ * \brief Asynchronous method invocation across threads
+ * \var Message::ThreadMoveMessage
+ * \brief Object is being moved to a different thread
+ * \var Message::DeferredDelete
+ * \brief Object is scheduled for deletion
+ * \var Message::UserMessage
+ * \brief First value available for user-defined messages
+ */
+
+/**
+ * \brief Construct a message object of type \a type
+ * \param[in] type The message type
+ */
+Message::Message(Message::Type type)
+ : type_(type)
+{
+}
+
+Message::~Message()
+{
+}
+
+/**
+ * \fn Message::type()
+ * \brief Retrieve the message type
+ * \return The message type
+ */
+
+/**
+ * \fn Message::receiver()
+ * \brief Retrieve the message receiver
+ * \return The message receiver
+ */
+
+/**
+ * \brief Reserve and register a custom user-defined message type
+ *
+ * Custom message types use values starting at Message::UserMessage. Assigning
+ * custom types manually may lead to accidental duplicated types. To avoid this
+ * problem, this function reserves and returns the next available user-defined
+ * message type.
+ *
+ * The recommended way to use this function is to subclass Message and provide a
+ * static accessor for the custom message type.
+ *
+ * \code{.cpp}
+ * class MyCustomMessage : public Message
+ * {
+ * public:
+ * MyCustomMessage() : Message(type()) {}
+ *
+ * static Message::Type type()
+ * {
+ * static MessageType type = registerMessageType();
+ * return type;
+ * }
+ * };
+ * \endcode
+ *
+ * \return A new unique message type
+ */
+Message::Type Message::registerMessageType()
+{
+ return static_cast<Message::Type>(nextUserType_++);
+}
+
+/**
+ * \class InvokeMessage
+ * \brief A message carrying a method invocation across threads
+ */
+
+/**
+ * \brief Construct an InvokeMessage for method invocation on an Object
+ * \param[in] method The bound method
+ * \param[in] pack The packed method arguments
+ * \param[in] semaphore The semaphore used to signal message delivery
+ * \param[in] deleteMethod True to delete the \a method when the message is
+ * destroyed
+ */
+InvokeMessage::InvokeMessage(BoundMethodBase *method,
+ std::shared_ptr<BoundMethodPackBase> pack,
+ Semaphore *semaphore, bool deleteMethod)
+ : Message(Message::InvokeMessage), method_(method), pack_(pack),
+ semaphore_(semaphore), deleteMethod_(deleteMethod)
+{
+}
+
+InvokeMessage::~InvokeMessage()
+{
+ if (deleteMethod_)
+ delete method_;
+}
+
+/**
+ * \fn InvokeMessage::semaphore()
+ * \brief Retrieve the message semaphore passed to the constructor
+ * \return The message semaphore
+ */
+
+/**
+ * \brief Invoke the method bound to InvokeMessage::method_ with arguments
+ * InvokeMessage::pack_
+ */
+void InvokeMessage::invoke()
+{
+ method_->invokePack(pack_.get());
+}
+
+/**
+ * \var InvokeMessage::method_
+ * \brief The method to be invoked
+ */
+
+/**
+ * \var InvokeMessage::pack_
+ * \brief The packed method invocation arguments
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/mutex.cpp b/src/libcamera/base/mutex.cpp
new file mode 100644
index 00000000..2a4542c4
--- /dev/null
+++ b/src/libcamera/base/mutex.cpp
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Mutex classes with clang thread safety annotation
+ */
+
+#include <libcamera/base/mutex.h>
+
+/**
+ * \file base/mutex.h
+ * \brief Mutex classes with clang thread safety annotation
+ */
+
+namespace libcamera {
+
+/**
+ * \class Mutex
+ * \brief std::mutex wrapper with clang thread safety annotation
+ *
+ * The Mutex class wraps a std::mutex instance to add clang thread safety
+ * annotation support. The class exposes the same interface as std::mutex and
+ * can be used as a transparent replacement. It integrates with the
+ * MutexLocker and ConditionVariable classes.
+ *
+ * See https://en.cppreference.com/w/cpp/thread/mutex for the complete API
+ * documentation.
+ */
+
+/**
+ * \class MutexLocker
+ * \brief std::unique_lock wrapper with clang thread safety annotation
+ *
+ * The MutexLocker class wraps a std::unique_lock instance to add clang thread
+ * safety annotation support. The class exposes the same interface as
+ * std::unique_lock and can be used as a transparent replacement. It integrates
+ * with the Mutex and ConditionVariable classes.
+ *
+ * See https://en.cppreference.com/w/cpp/thread/unique_lock for the complete API
+ * documentation.
+ */
+
+/**
+ * \class ConditionVariable
+ * \brief std::condition_variable wrapper integrating with MutexLocker
+ *
+ * The ConditionVariable class wraps a std::condition_variable instance to
+ * integrate with the MutexLocker class. The class exposes the same interface as
+ * std::condition_variable and can be used as a transparent replacement.
+ *
+ * See https://en.cppreference.com/w/cpp/thread/condition_variable for the
+ * complete API documentation.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/object.cpp b/src/libcamera/base/object.cpp
new file mode 100644
index 00000000..745d2565
--- /dev/null
+++ b/src/libcamera/base/object.cpp
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Base object
+ */
+
+#include <libcamera/base/object.h>
+
+#include <algorithm>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/message.h>
+#include <libcamera/base/semaphore.h>
+#include <libcamera/base/signal.h>
+#include <libcamera/base/thread.h>
+#include <libcamera/base/utils.h>
+
+/**
+ * \file base/object.h
+ * \brief Base object to support automatic signal disconnection
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Object)
+
+/**
+ * \class Object
+ * \brief Base object to support automatic signal disconnection
+ *
+ * The Object class simplifies signal/slot handling for classes implementing
+ * slots. By inheriting from Object, an object is automatically disconnected
+ * from all connected signals when it gets destroyed.
+ *
+ * Object instances are bound to the thread of their parent, or the thread in
+ * which they're created when they have no parent. When a message is posted to
+ * an object, its handler will run in the object's thread. This allows
+ * implementing easy message passing between threads by inheriting from the
+ * Object class.
+ *
+ * Deleting an object from a thread other than the one the object is bound to is
+ * unsafe, unless the caller ensures that the object's thread is stopped and no
+ * parent or child of the object gets deleted concurrently. See
+ * Object::~Object() for more information.
+ *
+ * Object slots connected to signals will also run in the context of the
+ * object's thread, regardless of whether the signal is emitted in the same or
+ * in another thread.
+ *
+ * Objects can be connected to multiple signals, but they can only be connected
+ * to each signal once. Attempting to create multiple concurrent connections
+ * between the same signal and the same Object (to either the same or differents
+ * slots of the object) will cause an assertion failure. While it would be
+ * possible to allow the implementation to let objects connect to the same
+ * signal multiple times, there are no expected use cases for this in libcamera
+ * and this behaviour is restricted to favour defensive programming.
+ *
+ * \sa Message, Signal, Thread
+ */
+
+/**
+ * \brief Construct an Object instance
+ * \param[in] parent The object parent
+ *
+ * The new Object instance is bound to the thread of its \a parent, or to the
+ * current thread if the \a parent is nullptr.
+ */
+Object::Object(Object *parent)
+ : parent_(parent), pendingMessages_(0)
+{
+ thread_ = parent ? parent->thread() : Thread::current();
+
+ if (parent)
+ parent->children_.push_back(this);
+}
+
+/**
+ * \brief Destroy an Object instance
+ *
+ * Deleting an Object automatically disconnects all signals from the Object's
+ * slots. All the Object's children are made orphan, but stay bound to their
+ * current thread.
+ *
+ * Object instances shall be destroyed from the thread they are bound to,
+ * otherwise undefined behaviour may occur. If deletion of an Object needs to
+ * be scheduled from a different thread, deleteLater() shall be used.
+ *
+ * As an exception to this rule, Object instances may be deleted from a
+ * different thread if the thread the instance is bound to is stopped through
+ * the whole duration of the object's destruction, *and* the parent and children
+ * of the object do not get deleted concurrently. The caller is responsible for
+ * fulfilling those requirements.
+ *
+ * In all cases Object instances shall be deleted before the Thread they are
+ * bound to.
+ */
+Object::~Object()
+{
+ ASSERT(Thread::current() == thread_ || !thread_->isRunning());
+
+ /*
+ * Move signals to a private list to avoid concurrent iteration and
+ * deletion of items from Signal::disconnect().
+ */
+ std::list<SignalBase *> signals(std::move(signals_));
+ for (SignalBase *signal : signals)
+ signal->disconnect(this);
+
+ if (pendingMessages_)
+ thread()->removeMessages(this);
+
+ if (parent_) {
+ auto it = std::find(parent_->children_.begin(),
+ parent_->children_.end(), this);
+ ASSERT(it != parent_->children_.end());
+ parent_->children_.erase(it);
+ }
+
+ for (auto child : children_)
+ child->parent_ = nullptr;
+}
+
+/**
+ * \brief Schedule deletion of the instance in the thread it belongs to
+ *
+ * This function schedules deletion of the Object when control returns to the
+ * event loop that the object belongs to. This ensures the object is destroyed
+ * from the right context, as required by the libcamera threading model.
+ *
+ * If this function is called before the thread's event loop is started or after
+ * it has stopped, the object will be deleted when the event loop (re)starts. If
+ * this never occurs, the object will be leaked.
+ *
+ * Deferred deletion can be used to control the destruction context with shared
+ * pointers. An object managed with shared pointers is deleted when the last
+ * reference is destroyed, which makes difficult to ensure through software
+ * design which context the deletion will take place in. With a custom deleter
+ * for the shared pointer using deleteLater(), the deletion can be guaranteed to
+ * happen in the thread the object is bound to.
+ *
+ * \code{.cpp}
+ * std::shared_ptr<MyObject> createObject()
+ * {
+ * struct Deleter : std::default_delete<MyObject> {
+ * void operator()(MyObject *obj)
+ * {
+ * obj->deleteLater();
+ * }
+ * };
+ *
+ * MyObject *obj = new MyObject();
+ *
+ * return std::shared_ptr<MyObject>(obj, Deleter());
+ * }
+ * \endcode
+ *
+ * \context This function is \threadsafe.
+ */
+void Object::deleteLater()
+{
+ postMessage(std::make_unique<Message>(Message::DeferredDelete));
+}
+
+/**
+ * \brief Post a message to the object's thread
+ * \param[in] msg The message
+ *
+ * This function posts the message \a msg to the message queue of the object's
+ * thread, to be delivered to the object through the message() function in the
+ * context of its thread. Message ownership is passed to the thread, and the
+ * message will be deleted after being delivered.
+ *
+ * Messages are delivered through the thread's event loop. If the thread is not
+ * running its event loop the message will not be delivered until the event
+ * loop gets started.
+ *
+ * Due to their asynchronous nature, threads do not provide any guarantee that
+ * all posted messages are delivered before the thread is stopped. See
+ * \ref thread-stop for additional information.
+ *
+ * \context This function is \threadsafe.
+ */
+void Object::postMessage(std::unique_ptr<Message> msg)
+{
+ thread()->postMessage(std::move(msg), this);
+}
+
+/**
+ * \brief Message handler for the object
+ * \param[in] msg The message
+ *
+ * This virtual function receives messages for the object. It is called in the
+ * context of the object's thread, and can be overridden to process custom
+ * messages. The parent Object::message() function shall be called for any
+ * message not handled by the override function.
+ *
+ * The message \a msg is valid only for the duration of the call, no reference
+ * to it shall be kept after this function returns.
+ */
+void Object::message(Message *msg)
+{
+ switch (msg->type()) {
+ case Message::InvokeMessage: {
+ /*
+ * A static_cast should be enough, but gcc 10 and 11 choke on
+ * it in release mode (with -O2 or -O3).
+ */
+ InvokeMessage *iMsg = dynamic_cast<InvokeMessage *>(msg);
+ Semaphore *semaphore = iMsg->semaphore();
+ iMsg->invoke();
+
+ if (semaphore)
+ semaphore->release();
+
+ break;
+ }
+
+ case Message::DeferredDelete:
+ delete this;
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * \fn Object::assertThreadBound()
+ * \brief Check if the caller complies with thread-bound constraints
+ * \param[in] message The message to be printed on error
+ *
+ * This function verifies the calling constraints required by the \threadbound
+ * definition. It shall be called at the beginning of member functions of an
+ * Object subclass that are explicitly marked as thread-bound in their
+ * documentation.
+ *
+ * If the thread-bound constraints are not met, the function prints \a message
+ * as an error message. For debug builds, it additionally causes an assertion
+ * error.
+ *
+ * \todo Verify the thread-bound requirements for functions marked as
+ * thread-bound at the class level.
+ *
+ * \return True if the call is thread-bound compliant, false otherwise
+ */
+bool Object::assertThreadBound(const char *message)
+{
+ if (Thread::current() == thread_)
+ return true;
+
+ LOG(Object, Error) << message;
+ ASSERT(false);
+ return false;
+}
+
+/**
+ * \fn R Object::invokeMethod()
+ * \brief Invoke a method asynchronously on an Object instance
+ * \param[in] func The object method to invoke
+ * \param[in] type Connection type for method invocation
+ * \param[in] args The method arguments
+ *
+ * This function invokes the member method \a func with arguments \a args, based
+ * on the connection \a type. Depending on the type, the method will be called
+ * synchronously in the same thread or asynchronously in the object's thread.
+ *
+ * Arguments \a args passed by value or reference are copied, while pointers
+ * are passed untouched. The caller shall ensure that any pointer argument
+ * remains valid until the method is invoked.
+ *
+ * Due to the asynchronous nature of threads, functions invoked asynchronously
+ * with the ConnectionTypeQueued type are not guaranteed to be called before
+ * the thread is stopped. See \ref thread-stop for additional information.
+ *
+ * \context This function is \threadsafe.
+ *
+ * \return For connection types ConnectionTypeDirect and
+ * ConnectionTypeBlocking, return the return value of the invoked method. For
+ * connection type ConnectionTypeQueued, return a default-constructed R value.
+ */
+
+/**
+ * \fn Object::thread()
+ * \brief Retrieve the thread the object is bound to
+ * \context This function is \threadsafe.
+ * \return The thread the object is bound to
+ */
+
+/**
+ * \brief Move the object and all its children to a different thread
+ * \param[in] thread The target thread
+ *
+ * This function moves the object and all its children from the current thread
+ * to the new \a thread.
+ *
+ * Before the object is moved, a Message::ThreadMoveMessage message is sent to
+ * it. The message() function can be reimplement in derived classes to be
+ * notified of the upcoming thread move and perform any required processing.
+ *
+ * Moving an object that has a parent is not allowed, and causes undefined
+ * behaviour.
+ *
+ * \context This function is \threadbound.
+ */
+void Object::moveToThread(Thread *thread)
+{
+ if (!assertThreadBound("Object can't be moved from another thread"))
+ return;
+
+ if (thread_ == thread)
+ return;
+
+ if (parent_) {
+ LOG(Object, Error)
+ << "Moving object to thread with a parent is not permitted";
+ return;
+ }
+
+ notifyThreadMove();
+
+ thread->moveObject(this);
+}
+
+void Object::notifyThreadMove()
+{
+ Message msg(Message::ThreadMoveMessage);
+ message(&msg);
+
+ for (auto child : children_)
+ child->notifyThreadMove();
+}
+
+/**
+ * \fn Object::parent()
+ * \brief Retrieve the object's parent
+ * \return The object's parent
+ */
+
+void Object::connect(SignalBase *signal)
+{
+ /*
+ * Connecting the same signal to an object multiple times is not
+ * supported.
+ */
+ ASSERT(std::find(signals_.begin(), signals_.end(), signal) == signals_.end());
+
+ signals_.push_back(signal);
+}
+
+void Object::disconnect(SignalBase *signal)
+{
+ for (auto iter = signals_.begin(); iter != signals_.end(); ) {
+ if (*iter == signal)
+ iter = signals_.erase(iter);
+ else
+ iter++;
+ }
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/semaphore.cpp b/src/libcamera/base/semaphore.cpp
new file mode 100644
index 00000000..862f3b31
--- /dev/null
+++ b/src/libcamera/base/semaphore.cpp
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * General-purpose counting semaphore
+ */
+
+#include <libcamera/base/semaphore.h>
+
+/**
+ * \file base/semaphore.h
+ * \brief General-purpose counting semaphore
+ */
+
+namespace libcamera {
+
+/**
+ * \class Semaphore
+ * \brief General-purpose counting semaphore
+ *
+ * A semaphore is a locking primitive that protects resources. It is created
+ * with an initial number of resources (which may be 0), and offers two
+ * primitives to acquire and release resources. The acquire() function tries to
+ * acquire a number of resources, and blocks if not enough resources are
+ * available until they get released. The release() function releases a number
+ * of resources, waking up any consumer blocked on an acquire() call.
+ */
+
+/**
+ * \brief Construct a semaphore with \a n resources
+ * \param[in] n The resource count
+ */
+Semaphore::Semaphore(unsigned int n)
+ : available_(n)
+{
+}
+
+/**
+ * \brief Retrieve the number of available resources
+ * \return The number of available resources
+ */
+unsigned int Semaphore::available()
+{
+ MutexLocker locker(mutex_);
+ return available_;
+}
+
+/**
+ * \brief Acquire \a n resources
+ * \param[in] n The resource count
+ *
+ * This function attempts to acquire \a n resources. If \a n is higher than the
+ * number of available resources, the call will block until enough resources
+ * become available.
+ */
+void Semaphore::acquire(unsigned int n)
+{
+ MutexLocker locker(mutex_);
+ cv_.wait(locker, [&]() LIBCAMERA_TSA_REQUIRES(mutex_) {
+ return available_ >= n;
+ });
+ available_ -= n;
+}
+
+/**
+ * \brief Try to acquire \a n resources without blocking
+ * \param[in] n The resource count
+ *
+ * This function attempts to acquire \a n resources. If \a n is higher than the
+ * number of available resources, it returns false immediately without
+ * acquiring any resource. Otherwise it acquires the resources and returns
+ * true.
+ *
+ * \return True if the resources have been acquired, false otherwise
+ */
+bool Semaphore::tryAcquire(unsigned int n)
+{
+ MutexLocker locker(mutex_);
+ if (available_ < n)
+ return false;
+
+ available_ -= n;
+ return true;
+}
+
+/**
+ * \brief Release \a n resources
+ * \param[in] n The resource count
+ *
+ * This function releases \a n resources, increasing the available resource
+ * count by \a n. If the number of available resources becomes large enough for
+ * any consumer blocked on an acquire() call, those consumers get woken up.
+ */
+void Semaphore::release(unsigned int n)
+{
+ {
+ MutexLocker locker(mutex_);
+ available_ += n;
+ }
+
+ cv_.notify_all();
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/shared_fd.cpp b/src/libcamera/base/shared_fd.cpp
new file mode 100644
index 00000000..7afc8ca5
--- /dev/null
+++ b/src/libcamera/base/shared_fd.cpp
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * File descriptor wrapper with shared ownership
+ */
+
+#include <libcamera/base/shared_fd.h>
+
+#include <string.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <utility>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/unique_fd.h>
+
+/**
+ * \file base/shared_fd.h
+ * \brief File descriptor wrapper
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(SharedFD)
+
+/**
+ * \class SharedFD
+ * \brief RAII-style wrapper for file descriptors
+ *
+ * The SharedFD class provides RAII-style lifetime management of file
+ * descriptors with an efficient mechanism for ownership sharing. At its core,
+ * an internal Descriptor object wraps a file descriptor (expressed as a signed
+ * integer) with an RAII-style interface. The Descriptor is then implicitly
+ * shared with all SharedFD instances constructed as copies.
+ *
+ * When constructed from a numerical file descriptor, the SharedFD instance
+ * either duplicates or takes over the file descriptor:
+ *
+ * - The SharedFD(const int &) constructor duplicates the numerical file
+ * descriptor and wraps the duplicate in a Descriptor. The caller is
+ * responsible for closing the original file descriptor, and the value
+ * returned by fd() will be different from the value passed to the
+ * constructor.
+ *
+ * - The SharedFD(int &&) constructor takes over the numerical file descriptor
+ * and wraps it in a Descriptor. The caller shall not touch the original file
+ * descriptor once the function returns, and the value returned by fd() will
+ * be identical to the value passed to the constructor.
+ *
+ * The copy constructor and assignment operator create copies that share the
+ * Descriptor, while the move versions of those functions additionally make the
+ * other SharedFD invalid. When the last SharedFD that references a Descriptor
+ * is destroyed, the file descriptor is closed.
+ *
+ * The numerical file descriptor is available through the fd() function. All
+ * SharedFD instances created as copies of a SharedFD will report the same fd()
+ * value. Callers can perform operations on the fd(), but shall never close it
+ * manually.
+ */
+
+/**
+ * \brief Create a SharedFD copying a given \a fd
+ * \param[in] fd File descriptor
+ *
+ * Construct a SharedFD from a numerical file descriptor by duplicating the
+ * \a fd, and take ownership of the copy. The original \a fd is left untouched,
+ * and the caller is responsible for closing it when appropriate. The duplicated
+ * file descriptor will be closed automatically when all SharedFD instances that
+ * reference it are destroyed.
+ *
+ * If the \a fd is negative, the SharedFD is constructed as invalid and the fd()
+ * function will return -1.
+ */
+SharedFD::SharedFD(const int &fd)
+{
+ if (fd < 0)
+ return;
+
+ fd_ = std::make_shared<Descriptor>(fd, true);
+ if (fd_->fd() < 0)
+ fd_.reset();
+}
+
+/**
+ * \brief Create a SharedFD taking ownership of a given \a fd
+ * \param[in] fd File descriptor
+ *
+ * Construct a SharedFD from a numerical file descriptor by taking ownership of
+ * the \a fd. The original \a fd is set to -1 and shall not be touched by the
+ * caller anymore. In particular, the caller shall not close the original \a fd
+ * manually. The duplicated file descriptor will be closed automatically when
+ * all SharedFD instances that reference it are destroyed.
+ *
+ * If the \a fd is negative, the SharedFD is constructed as invalid and the fd()
+ * function will return -1.
+ */
+SharedFD::SharedFD(int &&fd)
+{
+ if (fd < 0)
+ return;
+
+ fd_ = std::make_shared<Descriptor>(fd, false);
+ /*
+ * The Descriptor constructor can't have failed here, as it took over
+ * the fd without duplicating it. Just set the original fd to -1 to
+ * implement move semantics.
+ */
+ fd = -1;
+}
+
+/**
+ * \brief Create a SharedFD taking ownership of a given UniqueFD \a fd
+ * \param[in] fd UniqueFD
+ *
+ * Construct a SharedFD from UniqueFD by taking ownership of the \a fd. The
+ * original \a fd becomes invalid.
+ */
+SharedFD::SharedFD(UniqueFD fd)
+ : SharedFD(fd.release())
+{
+}
+
+/**
+ * \brief Copy constructor, create a SharedFD from a copy of \a other
+ * \param[in] other The other SharedFD
+ *
+ * Copying a SharedFD implicitly shares ownership of the wrapped file
+ * descriptor. The original SharedFD is left untouched, and the caller is
+ * responsible for destroying it when appropriate. The wrapped file descriptor
+ * will be closed automatically when all SharedFD instances that reference it
+ * are destroyed.
+ */
+SharedFD::SharedFD(const SharedFD &other)
+ : fd_(other.fd_)
+{
+}
+
+/**
+ * \brief Move constructor, create a SharedFD by taking over \a other
+ * \param[in] other The other SharedFD
+ *
+ * Moving a SharedFD moves the reference to the wrapped descriptor owned by
+ * \a other to the new SharedFD. The \a other SharedFD is invalidated and its
+ * fd() function will return -1. The wrapped file descriptor will be closed
+ * automatically when all SharedFD instances that reference it are destroyed.
+ */
+SharedFD::SharedFD(SharedFD &&other)
+ : fd_(std::move(other.fd_))
+{
+}
+
+/**
+ * \brief Destroy the SharedFD instance
+ *
+ * Destroying a SharedFD instance releases its reference to the wrapped
+ * descriptor, if any. When the last instance that references a wrapped
+ * descriptor is destroyed, the file descriptor is automatically closed.
+ */
+SharedFD::~SharedFD()
+{
+}
+
+/**
+ * \brief Copy assignment operator, replace the wrapped file descriptor with a
+ * copy of \a other
+ * \param[in] other The other SharedFD
+ *
+ * Copying a SharedFD creates a new reference to the wrapped file descriptor
+ * owner by \a other. If \a other is invalid, *this will also be invalid. The
+ * original SharedFD is left untouched, and the caller is responsible for
+ * destroying it when appropriate. The wrapped file descriptor will be closed
+ * automatically when all SharedFD instances that reference it are destroyed.
+ *
+ * \return A reference to this SharedFD
+ */
+SharedFD &SharedFD::operator=(const SharedFD &other)
+{
+ fd_ = other.fd_;
+
+ return *this;
+}
+
+/**
+ * \brief Move assignment operator, replace the wrapped file descriptor by
+ * taking over \a other
+ * \param[in] other The other SharedFD
+ *
+ * Moving a SharedFD moves the reference to the wrapped descriptor owned by
+ * \a other to the new SharedFD. If \a other is invalid, *this will also be
+ * invalid. The \a other SharedFD is invalidated and its fd() function will
+ * return -1. The wrapped file descriptor will be closed automatically when
+ * all SharedFD instances that reference it are destroyed.
+ *
+ * \return A reference to this SharedFD
+ */
+SharedFD &SharedFD::operator=(SharedFD &&other)
+{
+ fd_ = std::move(other.fd_);
+
+ return *this;
+}
+
+/**
+ * \fn SharedFD::isValid()
+ * \brief Check if the SharedFD instance is valid
+ * \return True if the SharedFD is valid, false otherwise
+ */
+
+/**
+ * \fn SharedFD::get()
+ * \brief Retrieve the numerical file descriptor
+ * \return The numerical file descriptor, which may be -1 if the SharedFD
+ * instance is invalid
+ */
+
+/**
+ * \fn bool operator==(const SharedFD &lhs, const SharedFD &rhs)
+ * \brief Compare the owned file descriptors of two SharedFD for equality
+ * \param[in] lhs The first SharedFD
+ * \param[in] rhs The second SharedFD
+ *
+ * Two file descriptors are considered equal if they have the same numerical
+ * value. File descriptors with different values that both reference the same
+ * file (for instance obtained using dup()) are considered not equal.
+ *
+ * \return True if the two file descriptors are equal, false otherwise
+ */
+
+/**
+ * \fn bool operator!=(const SharedFD &lhs, const SharedFD &rhs)
+ * \brief Compare the owned file descriptors of two SharedFD for equality
+ * \param[in] lhs The first SharedFD
+ * \param[in] rhs The second SharedFD
+ *
+ * Two file descriptors are considered equal if they have the same numerical
+ * value. File descriptors with different values that both reference the same
+ * file (for instance obtained using dup()) are considered not equal.
+ *
+ * \return True if the two file descriptors are not equal, false otherwise
+ */
+
+/**
+ * \brief Duplicate a SharedFD
+ *
+ * Duplicating a SharedFD creates a duplicate of the wrapped file descriptor and
+ * returns a UniqueFD that owns the duplicate. The fd() function of the original
+ * and the get() function of the duplicate will return different values. The
+ * duplicate instance will not be affected by destruction of the original
+ * instance or its copies.
+ *
+ * \return A UniqueFD owning a duplicate of the original file descriptor
+ */
+UniqueFD SharedFD::dup() const
+{
+ if (!isValid())
+ return {};
+
+ UniqueFD dupFd(::dup(get()));
+ if (!dupFd.isValid()) {
+ int ret = -errno;
+ LOG(SharedFD, Error)
+ << "Failed to dup() fd: " << strerror(-ret);
+ }
+
+ return dupFd;
+}
+
+SharedFD::Descriptor::Descriptor(int fd, bool duplicate)
+{
+ if (!duplicate) {
+ fd_ = fd;
+ return;
+ }
+
+ /* Failing to dup() a fd should not happen and is fatal. */
+ fd_ = ::dup(fd);
+ if (fd_ == -1) {
+ int ret = -errno;
+ LOG(SharedFD, Fatal)
+ << "Failed to dup() fd: " << strerror(-ret);
+ }
+}
+
+SharedFD::Descriptor::~Descriptor()
+{
+ if (fd_ != -1)
+ close(fd_);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/signal.cpp b/src/libcamera/base/signal.cpp
new file mode 100644
index 00000000..b782e050
--- /dev/null
+++ b/src/libcamera/base/signal.cpp
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Signal & slot implementation
+ */
+
+#include <libcamera/base/signal.h>
+
+#include <libcamera/base/mutex.h>
+#include <libcamera/base/object.h>
+
+/**
+ * \file base/signal.h
+ * \brief Signal & slot implementation
+ */
+
+namespace libcamera {
+
+namespace {
+
+/*
+ * Mutex to protect the SignalBase::slots_ and Object::signals_ lists. If lock
+ * contention needs to be decreased, this could be replaced with locks in
+ * Object and SignalBase, or with a mutex pool.
+ */
+Mutex signalsLock;
+
+} /* namespace */
+
+void SignalBase::connect(BoundMethodBase *slot)
+{
+ MutexLocker locker(signalsLock);
+
+ Object *object = slot->object();
+ if (object)
+ object->connect(this);
+ slots_.push_back(slot);
+}
+
+void SignalBase::disconnect(Object *object)
+{
+ disconnect([object](SlotList::iterator &iter) {
+ return (*iter)->match(object);
+ });
+}
+
+void SignalBase::disconnect(std::function<bool(SlotList::iterator &)> match)
+{
+ MutexLocker locker(signalsLock);
+
+ for (auto iter = slots_.begin(); iter != slots_.end(); ) {
+ if (match(iter)) {
+ Object *object = (*iter)->object();
+ if (object)
+ object->disconnect(this);
+
+ delete *iter;
+ iter = slots_.erase(iter);
+ } else {
+ ++iter;
+ }
+ }
+}
+
+SignalBase::SlotList SignalBase::slots()
+{
+ MutexLocker locker(signalsLock);
+ return slots_;
+}
+
+/**
+ * \class Signal
+ * \brief Generic signal and slot communication mechanism
+ *
+ * Signals and slots are a language construct aimed at communication between
+ * objects through the observer pattern without the need for boilerplate code.
+ * See http://doc.qt.io/qt-6/signalsandslots.html for more information.
+ *
+ * Signals model events that can be observed from objects unrelated to the event
+ * source. Slots are functions that are called in response to a signal. Signals
+ * can be connected to and disconnected from slots dynamically at runtime. When
+ * a signal is emitted, all connected slots are called sequentially in the order
+ * they have been connected.
+ *
+ * Signals are defined with zero, one or more typed parameters. They are emitted
+ * with a value for each of the parameters, and those values are passed to the
+ * connected slots.
+ *
+ * Slots are normal static or class member functions. In order to be connected
+ * to a signal, their signature must match the signal type (taking the same
+ * arguments as the signal and returning void).
+ *
+ * Connecting a signal to a slot results in the slot being called with the
+ * arguments passed to the emit() function when the signal is emitted. Multiple
+ * slots can be connected to the same signal, and multiple signals can connected
+ * to the same slot.
+ *
+ * When a slot belongs to an instance of the Object class, the slot is called
+ * in the context of the thread that the object is bound to. If the signal is
+ * emitted from the same thread, the slot will be called synchronously, before
+ * Signal::emit() returns. If the signal is emitted from a different thread,
+ * the slot will be called asynchronously from the object's thread's event
+ * loop, after the Signal::emit() function returns, with a copy of the signal's
+ * arguments. The emitter shall thus ensure that any pointer or reference
+ * passed through the signal will remain valid after the signal is emitted.
+ *
+ * Duplicate connections between a signal and a slot are not expected and use of
+ * the Object class to manage signals will enforce this restriction.
+ */
+
+/**
+ * \fn Signal::connect(T *object, R (T::*func)(Args...))
+ * \brief Connect the signal to a member function slot
+ * \param[in] object The slot object pointer
+ * \param[in] func The slot member function
+ *
+ * If the typename T inherits from Object, the signal will be automatically
+ * disconnected from the \a func slot of \a object when \a object is destroyed.
+ * Otherwise the caller shall disconnect signals manually before destroying \a
+ * object.
+ *
+ * \context This function is \threadsafe.
+ */
+
+/**
+ * \fn Signal::connect(T *object, Func func)
+ * \brief Connect the signal to a function object slot
+ * \param[in] object The slot object pointer
+ * \param[in] func The function object
+ *
+ * If the typename T inherits from Object, the signal will be automatically
+ * disconnected from the \a func slot of \a object when \a object is destroyed.
+ * Otherwise the caller shall disconnect signals manually before destroying \a
+ * object.
+ *
+ * The function object is typically a lambda function, but may be any object
+ * that satisfies the FunctionObject named requirements. The types of the
+ * function object arguments shall match the types of the signal arguments.
+ *
+ * No matching disconnect() function exist, as it wouldn't be possible to pass
+ * to a disconnect() function the same lambda that was passed to connect(). The
+ * connection created by this function can not be removed selectively if the
+ * signal is connected to multiple slots of the same receiver, but may be
+ * otherwise be removed using the disconnect(T *object) function.
+ *
+ * \context This function is \threadsafe.
+ */
+
+/**
+ * \fn Signal::connect(R (*func)(Args...))
+ * \brief Connect the signal to a static function slot
+ * \param[in] func The slot static function
+ *
+ * \context This function is \threadsafe.
+ */
+
+/**
+ * \fn Signal::disconnect()
+ * \brief Disconnect the signal from all slots
+ *
+ * \context This function is \threadsafe.
+ */
+
+/**
+ * \fn Signal::disconnect(T *object)
+ * \brief Disconnect the signal from all slots of the \a object
+ * \param[in] object The object pointer whose slots to disconnect
+ *
+ * \context This function is \threadsafe.
+ */
+
+/**
+ * \fn Signal::disconnect(T *object, R (T::*func)(Args...))
+ * \brief Disconnect the signal from the \a object slot member function \a func
+ * \param[in] object The object pointer whose slots to disconnect
+ * \param[in] func The slot member function to disconnect
+ *
+ * \context This function is \threadsafe.
+ */
+
+/**
+ * \fn Signal::disconnect(R (*func)(Args...))
+ * \brief Disconnect the signal from the slot static function \a func
+ * \param[in] func The slot static function to disconnect
+ *
+ * \context This function is \threadsafe.
+ */
+
+/**
+ * \fn Signal::emit(Args... args)
+ * \brief Emit the signal and call all connected slots
+ * \param args The arguments passed to the connected slots
+ *
+ * Emitting a signal calls all connected slots synchronously and sequentially in
+ * the order the slots have been connected. The arguments passed to the emit()
+ * function are passed to the slot functions unchanged. If a slot modifies one
+ * of the arguments (when passed by pointer or reference), the modification is
+ * thus visible to all subsequently called slots.
+ *
+ * This function is not \threadsafe, but thread-safety is guaranteed against
+ * concurrent connect() and disconnect() calls.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/thread.cpp b/src/libcamera/base/thread.cpp
new file mode 100644
index 00000000..319bfda9
--- /dev/null
+++ b/src/libcamera/base/thread.cpp
@@ -0,0 +1,717 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Thread support
+ */
+
+#include <libcamera/base/thread.h>
+
+#include <atomic>
+#include <list>
+#include <optional>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <libcamera/base/event_dispatcher.h>
+#include <libcamera/base/event_dispatcher_poll.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/message.h>
+#include <libcamera/base/mutex.h>
+#include <libcamera/base/object.h>
+
+/**
+ * \page thread Thread Support
+ *
+ * libcamera supports multi-threaded applications through a threading model that
+ * sets precise rules to guarantee thread-safe usage of the API. Additionally,
+ * libcamera makes internal use of threads, and offers APIs that simplify
+ * interactions with application threads. Careful compliance with the threading
+ * model will ensure avoidance of race conditions.
+ *
+ * Every thread created by libcamera is associated with an instance of the
+ * Thread class. Those threads run an internal event loop by default to
+ * dispatch events to objects. Additionally, the main thread of the application
+ * (defined as the thread that calls CameraManager::start()) is also associated
+ * with a Thread instance, but has no event loop accessible to libcamera. Other
+ * application threads are not visible to libcamera.
+ *
+ * \section thread-objects Threads and Objects
+ *
+ * Instances of the Object class and all its derived classes are thread-aware
+ * and are bound to the thread they are created in. They are said to *live* in
+ * a thread, and they interact with the event loop of their thread for the
+ * purpose of message passing and signal delivery. Messages posted to the
+ * object with Object::postMessage() will be delivered from the event loop of
+ * the thread that the object lives in. Signals delivered to the object, unless
+ * explicitly connected with ConnectionTypeDirect, will also be delivered from
+ * the object thread's event loop.
+ *
+ * All Object instances created internally by libcamera are bound to internal
+ * threads. As objects interact with thread event loops for proper operation,
+ * creating an Object instance in a thread that has no internal event loop (such
+ * as the main application thread, or libcamera threads that have a custom main
+ * loop), prevents some features of the Object class from being used. See
+ * Thread::exec() for more details.
+ *
+ * \section thread-signals Threads and Signals
+ *
+ * When sent to a receiver that does not inherit from the Object class, signals
+ * are delivered synchronously in the thread of the sender. When the receiver
+ * inherits from the Object class, delivery is by default asynchronous if the
+ * sender and receiver live in different threads. In that case, the signal is
+ * posted to the receiver's message queue and will be delivered from the
+ * receiver's event loop, running in the receiver's thread. This mechanism can
+ * be overridden by selecting a different connection type when calling
+ * Signal::connect().
+ */
+
+/**
+ * \file base/thread.h
+ * \brief Thread support
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Thread)
+
+class ThreadMain;
+
+/**
+ * \brief A queue of posted messages
+ */
+class MessageQueue
+{
+public:
+ /**
+ * \brief List of queued Message instances
+ */
+ std::list<std::unique_ptr<Message>> list_;
+ /**
+ * \brief Protects the \ref list_
+ */
+ Mutex mutex_;
+ /**
+ * \brief The recursion level for recursive Thread::dispatchMessages()
+ * calls
+ */
+ unsigned int recursion_ = 0;
+};
+
+/**
+ * \brief Thread-local internal data
+ */
+class ThreadData
+{
+public:
+ ThreadData()
+ : thread_(nullptr), running_(false), dispatcher_(nullptr)
+ {
+ }
+
+ static ThreadData *current();
+
+private:
+ friend class Thread;
+ friend class ThreadMain;
+
+ Thread *thread_;
+ bool running_ LIBCAMERA_TSA_GUARDED_BY(mutex_);
+ pid_t tid_;
+
+ Mutex mutex_;
+
+ std::atomic<EventDispatcher *> dispatcher_;
+
+ ConditionVariable cv_;
+ std::atomic<bool> exit_;
+ int exitCode_;
+
+ MessageQueue messages_;
+
+ std::optional<cpu_set_t> cpuset_;
+};
+
+/**
+ * \brief Thread wrapper for the main thread
+ */
+class ThreadMain : public Thread
+{
+public:
+ ThreadMain()
+ {
+ data_->running_ = true;
+ }
+
+protected:
+ void run() override
+ {
+ LOG(Thread, Fatal) << "The main thread can't be restarted";
+ }
+};
+
+static thread_local ThreadData *currentThreadData = nullptr;
+static ThreadMain mainThread;
+
+/**
+ * \brief Retrieve thread-local internal data for the current thread
+ * \return The thread-local internal data for the current thread
+ */
+ThreadData *ThreadData::current()
+{
+ if (currentThreadData)
+ return currentThreadData;
+
+ /*
+ * The main thread doesn't receive thread-local data when it is
+ * started, set it here.
+ */
+ ThreadData *data = mainThread.data_;
+ data->tid_ = syscall(SYS_gettid);
+ currentThreadData = data;
+ return data;
+}
+
+/**
+ * \class Thread
+ * \brief A thread of execution
+ *
+ * The Thread class is a wrapper around std::thread that handles integration
+ * with the Object, Signal and EventDispatcher classes.
+ *
+ * Thread instances by default run an event loop until the exit() function is
+ * called. The event loop dispatches events (messages, notifiers and timers)
+ * sent to the objects living in the thread. This behaviour can be modified by
+ * overriding the run() function.
+ *
+ * \section thread-stop Stopping Threads
+ *
+ * Threads can't be forcibly stopped. Instead, a thread user first requests the
+ * thread to exit and then waits for the thread's main function to react to the
+ * request and return, at which points the thread will stop.
+ *
+ * For threads running exec(), the exit() function is used to request the thread
+ * to exit. For threads subclassing the Thread class and implementing a custom
+ * run() function, a subclass-specific mechanism shall be provided. In either
+ * case, the wait() function shall be called to wait for the thread to stop.
+ *
+ * Due to their asynchronous nature, threads are subject to race conditions when
+ * they stop. This is of particular importance for messages posted to the thread
+ * with postMessage() (and the other mechanisms that rely on it, such as
+ * Object::invokeMethod() or asynchronous signal delivery). To understand the
+ * issues, three contexts need to be considered:
+ *
+ * - The worker is the Thread performing work and being instructed to stop.
+ * - The controller is the context which instructs the worker thread to stop.
+ * - The other contexts are any threads other than the worker and controller
+ * that interact with the worker thread.
+ *
+ * Messages posted to the worker thread from the controller context before
+ * calling exit() are queued to the thread's message queue, and the Thread class
+ * offers no guarantee that those messages will be processed before the thread
+ * stops. This allows threads to stop fast.
+ *
+ * A thread that requires delivery of messages posted from the controller
+ * context before exit() should reimplement the run() function and call
+ * dispatchMessages() after exec().
+ *
+ * Messages posted to the worker thread from the other contexts are asynchronous
+ * with respect to the exit() call from the controller context. There is no
+ * guarantee as to whether those messages will be processed or not before the
+ * thread stops.
+ *
+ * Messages that are not processed will stay in the queue, in the exact same way
+ * as messages posted after the thread has stopped. They will be processed when
+ * the thread is restarted. If the thread is never restarted, they will be
+ * deleted without being processed when the Thread instance is destroyed.
+ */
+
+/**
+ * \brief Create a thread
+ */
+Thread::Thread()
+{
+ data_ = new ThreadData;
+ data_->thread_ = this;
+}
+
+Thread::~Thread()
+{
+ delete data_->dispatcher_.load(std::memory_order_relaxed);
+ delete data_;
+}
+
+/**
+ * \brief Start the thread
+ */
+void Thread::start()
+{
+ MutexLocker locker(data_->mutex_);
+
+ if (data_->running_)
+ return;
+
+ data_->running_ = true;
+ data_->exitCode_ = -1;
+ data_->exit_.store(false, std::memory_order_relaxed);
+
+ thread_ = std::thread(&Thread::startThread, this);
+
+ setThreadAffinityInternal();
+}
+
+void Thread::startThread()
+{
+ struct ThreadCleaner {
+ ThreadCleaner(Thread *thread, void (Thread::*cleaner)())
+ : thread_(thread), cleaner_(cleaner)
+ {
+ }
+ ~ThreadCleaner()
+ {
+ (thread_->*cleaner_)();
+ }
+
+ Thread *thread_;
+ void (Thread::*cleaner_)();
+ };
+
+ /*
+ * Make sure the thread is cleaned up even if the run() function exits
+ * abnormally (for instance via a direct call to pthread_cancel()).
+ */
+ thread_local ThreadCleaner cleaner(this, &Thread::finishThread);
+
+ data_->tid_ = syscall(SYS_gettid);
+ currentThreadData = data_;
+
+ run();
+}
+
+/**
+ * \brief Enter the event loop
+ *
+ * This function enters an event loop based on the event dispatcher instance for
+ * the thread, and blocks until the exit() function is called. It is meant to be
+ * called within the thread from the run() function and shall not be called
+ * outside of the thread.
+ *
+ * \return The exit code passed to the exit() function
+ */
+int Thread::exec()
+{
+ MutexLocker locker(data_->mutex_);
+
+ EventDispatcher *dispatcher = eventDispatcher();
+
+ locker.unlock();
+
+ while (!data_->exit_.load(std::memory_order_acquire))
+ dispatcher->processEvents();
+
+ locker.lock();
+
+ return data_->exitCode_;
+}
+
+/**
+ * \brief Main function of the thread
+ *
+ * When the thread is started with start(), it calls this function in the
+ * context of the new thread. The run() function can be overridden to perform
+ * custom work, either custom initialization and cleanup before and after
+ * calling the Thread::exec() function, or a custom thread loop altogether. When
+ * this function returns the thread execution is stopped, and the \ref finished
+ * signal is emitted.
+ *
+ * Note that if this function is overridden and doesn't call Thread::exec(), no
+ * events will be dispatched to the objects living in the thread. These objects
+ * will not be able to use the EventNotifier, Timer or Message facilities. This
+ * includes functions that rely on message dispatching, such as
+ * Object::deleteLater().
+ *
+ * The base implementation just calls exec().
+ */
+void Thread::run()
+{
+ exec();
+}
+
+void Thread::finishThread()
+{
+ /*
+ * Objects may have been scheduled for deletion right before the thread
+ * exited. Ensure they get deleted now, before the thread stops.
+ */
+ dispatchMessages(Message::Type::DeferredDelete);
+
+ data_->mutex_.lock();
+ data_->running_ = false;
+ data_->mutex_.unlock();
+
+ finished.emit();
+ data_->cv_.notify_all();
+}
+
+/**
+ * \brief Stop the thread's event loop
+ * \param[in] code The exit code
+ *
+ * This function interrupts the event loop started by the exec() function,
+ * causing exec() to return \a code.
+ *
+ * Calling exit() on a thread that reimplements the run() function and doesn't
+ * call exec() will likely have no effect.
+ *
+ * \context This function is \threadsafe.
+ */
+void Thread::exit(int code)
+{
+ data_->exitCode_ = code;
+ data_->exit_.store(true, std::memory_order_release);
+
+ EventDispatcher *dispatcher = data_->dispatcher_.load(std::memory_order_relaxed);
+ if (!dispatcher)
+ return;
+
+ dispatcher->interrupt();
+}
+
+/**
+ * \brief Wait for the thread to finish
+ * \param[in] duration Maximum wait duration
+ *
+ * This function waits until the thread finishes or the \a duration has
+ * elapsed, whichever happens first. If \a duration is equal to
+ * utils::duration::max(), the wait never times out. If the thread is not
+ * running the function returns immediately.
+ *
+ * \context This function is \threadsafe.
+ *
+ * \return True if the thread has finished, or false if the wait timed out
+ */
+bool Thread::wait(utils::duration duration)
+{
+ bool hasFinished = true;
+
+ {
+ MutexLocker locker(data_->mutex_);
+
+ auto isRunning = ([&]() LIBCAMERA_TSA_REQUIRES(data_->mutex_) {
+ return !data_->running_;
+ });
+
+ if (duration == utils::duration::max())
+ data_->cv_.wait(locker, isRunning);
+ else
+ hasFinished = data_->cv_.wait_for(locker, duration,
+ isRunning);
+ }
+
+ if (thread_.joinable())
+ thread_.join();
+
+ return hasFinished;
+}
+
+/**
+ * \brief Set the CPU affinity mask of the thread
+ * \param[in] cpus The list of CPU indices that the thread is set affinity to
+ *
+ * The CPU indices should be within [0, std::thread::hardware_concurrency()).
+ * If any index is invalid, this function won't modify the thread affinity and
+ * will return an error.
+ *
+ * \return 0 if all indices are valid, -EINVAL otherwise
+ */
+int Thread::setThreadAffinity(const Span<const unsigned int> &cpus)
+{
+ const unsigned int numCpus = std::thread::hardware_concurrency();
+
+ MutexLocker locker(data_->mutex_);
+ data_->cpuset_ = cpu_set_t();
+ CPU_ZERO(&data_->cpuset_.value());
+
+ for (const unsigned int &cpu : cpus) {
+ if (cpu >= numCpus) {
+ LOG(Thread, Error) << "Invalid CPU " << cpu << "for thread affinity";
+ return -EINVAL;
+ }
+
+ CPU_SET(cpu, &data_->cpuset_.value());
+ }
+
+ if (data_->running_)
+ setThreadAffinityInternal();
+
+ return 0;
+}
+
+void Thread::setThreadAffinityInternal()
+{
+ if (!data_->cpuset_)
+ return;
+
+ const cpu_set_t &cpuset = data_->cpuset_.value();
+ pthread_setaffinity_np(thread_.native_handle(), sizeof(cpuset), &cpuset);
+}
+
+/**
+ * \brief Check if the thread is running
+ *
+ * A Thread instance is considered as running once the underlying thread has
+ * started. This function guarantees that it returns true after the start()
+ * function returns, and false after the wait() function returns.
+ *
+ * \context This function is \threadsafe.
+ *
+ * \return True if the thread is running, false otherwise
+ */
+bool Thread::isRunning()
+{
+ MutexLocker locker(data_->mutex_);
+ return data_->running_;
+}
+
+/**
+ * \var Thread::finished
+ * \brief Signal the end of thread execution
+ */
+
+/**
+ * \brief Retrieve the Thread instance for the current thread
+ * \context This function is \threadsafe.
+ * \return The Thread instance for the current thread
+ */
+Thread *Thread::current()
+{
+ ThreadData *data = ThreadData::current();
+ return data->thread_;
+}
+
+/**
+ * \brief Retrieve the ID of the current thread
+ *
+ * The thread ID corresponds to the Linux thread ID (TID) as returned by the
+ * gettid system call.
+ *
+ * \context This function is \threadsafe.
+ *
+ * \return The ID of the current thread
+ */
+pid_t Thread::currentId()
+{
+ ThreadData *data = ThreadData::current();
+ return data->tid_;
+}
+
+/**
+ * \brief Retrieve the event dispatcher
+ *
+ * This function retrieves the internal event dispatcher for the thread. The
+ * returned event dispatcher is valid until the thread is destroyed.
+ *
+ * \context This function is \threadsafe.
+ *
+ * \return Pointer to the event dispatcher
+ */
+EventDispatcher *Thread::eventDispatcher()
+{
+ if (!data_->dispatcher_.load(std::memory_order_relaxed))
+ data_->dispatcher_.store(new EventDispatcherPoll(),
+ std::memory_order_release);
+
+ return data_->dispatcher_.load(std::memory_order_relaxed);
+}
+
+/**
+ * \brief Post a message to the thread for the \a receiver
+ * \param[in] msg The message
+ * \param[in] receiver The receiver
+ *
+ * This function stores the message \a msg in the message queue of the thread
+ * for the \a receiver and wake up the thread's event loop. Message ownership is
+ * passed to the thread, and the message will be deleted after being delivered.
+ *
+ * Messages are delivered through the thread's event loop. If the thread is not
+ * running its event loop the message will not be delivered until the event
+ * loop gets started.
+ *
+ * When the thread is stopped, posted messages may not have all been processed.
+ * See \ref thread-stop for additional information.
+ *
+ * If the \a receiver is not bound to this thread the behaviour is undefined.
+ *
+ * \context This function is \threadsafe.
+ *
+ * \sa exec()
+ */
+void Thread::postMessage(std::unique_ptr<Message> msg, Object *receiver)
+{
+ msg->receiver_ = receiver;
+
+ ASSERT(data_ == receiver->thread()->data_);
+
+ MutexLocker locker(data_->messages_.mutex_);
+ data_->messages_.list_.push_back(std::move(msg));
+ receiver->pendingMessages_++;
+ locker.unlock();
+
+ EventDispatcher *dispatcher =
+ data_->dispatcher_.load(std::memory_order_acquire);
+ if (dispatcher)
+ dispatcher->interrupt();
+}
+
+/**
+ * \brief Remove all posted messages for the \a receiver
+ * \param[in] receiver The receiver
+ *
+ * If the \a receiver is not bound to this thread the behaviour is undefined.
+ */
+void Thread::removeMessages(Object *receiver)
+{
+ ASSERT(data_ == receiver->thread()->data_);
+
+ MutexLocker locker(data_->messages_.mutex_);
+ if (!receiver->pendingMessages_)
+ return;
+
+ std::vector<std::unique_ptr<Message>> toDelete;
+ for (std::unique_ptr<Message> &msg : data_->messages_.list_) {
+ if (!msg)
+ continue;
+ if (msg->receiver_ != receiver)
+ continue;
+
+ /*
+ * Move the message to the pending deletion list to delete it
+ * after releasing the lock. The messages list element will
+ * contain a null pointer, and will be removed when dispatching
+ * messages.
+ */
+ toDelete.push_back(std::move(msg));
+ receiver->pendingMessages_--;
+ }
+
+ ASSERT(!receiver->pendingMessages_);
+ locker.unlock();
+
+ toDelete.clear();
+}
+
+/**
+ * \brief Dispatch posted messages for this thread
+ * \param[in] type The message type
+ *
+ * This function immediately dispatches all the messages previously posted for
+ * this thread with postMessage() that match the message \a type. If the \a type
+ * is Message::Type::None, all messages are dispatched.
+ *
+ * Messages shall only be dispatched from the current thread, typically within
+ * the thread from the run() function. Calling this function outside of the
+ * thread results in undefined behaviour.
+ *
+ * This function is not thread-safe, but it may be called recursively in the
+ * same thread from an object's message handler. It guarantees delivery of
+ * messages in the order they have been posted in all cases.
+ */
+void Thread::dispatchMessages(Message::Type type)
+{
+ ASSERT(data_ == ThreadData::current());
+
+ ++data_->messages_.recursion_;
+
+ MutexLocker locker(data_->messages_.mutex_);
+
+ std::list<std::unique_ptr<Message>> &messages = data_->messages_.list_;
+
+ for (std::unique_ptr<Message> &msg : messages) {
+ if (!msg)
+ continue;
+
+ if (type != Message::Type::None && msg->type() != type)
+ continue;
+
+ /*
+ * Move the message, setting the entry in the list to null. It
+ * will cause recursive calls to ignore the entry, and the erase
+ * loop at the end of the function to delete it from the list.
+ */
+ std::unique_ptr<Message> message = std::move(msg);
+
+ Object *receiver = message->receiver_;
+ ASSERT(data_ == receiver->thread()->data_);
+ receiver->pendingMessages_--;
+
+ locker.unlock();
+ receiver->message(message.get());
+ message.reset();
+ locker.lock();
+ }
+
+ /*
+ * If the recursion level is 0, erase all null messages in the list. We
+ * can't do so during recursion, as it would invalidate the iterator of
+ * the outer calls.
+ */
+ if (!--data_->messages_.recursion_) {
+ for (auto iter = messages.begin(); iter != messages.end(); ) {
+ if (!*iter)
+ iter = messages.erase(iter);
+ else
+ ++iter;
+ }
+ }
+}
+
+/**
+ * \brief Move an \a object and all its children to the thread
+ * \param[in] object The object
+ */
+void Thread::moveObject(Object *object)
+{
+ ThreadData *currentData = object->thread_->data_;
+ ThreadData *targetData = data_;
+
+ MutexLocker lockerFrom(currentData->messages_.mutex_, std::defer_lock);
+ MutexLocker lockerTo(targetData->messages_.mutex_, std::defer_lock);
+ std::lock(lockerFrom, lockerTo);
+
+ moveObject(object, currentData, targetData);
+}
+
+void Thread::moveObject(Object *object, ThreadData *currentData,
+ ThreadData *targetData)
+{
+ /* Move pending messages to the message queue of the new thread. */
+ if (object->pendingMessages_) {
+ unsigned int movedMessages = 0;
+
+ for (std::unique_ptr<Message> &msg : currentData->messages_.list_) {
+ if (!msg)
+ continue;
+ if (msg->receiver_ != object)
+ continue;
+
+ targetData->messages_.list_.push_back(std::move(msg));
+ movedMessages++;
+ }
+
+ if (movedMessages) {
+ EventDispatcher *dispatcher =
+ targetData->dispatcher_.load(std::memory_order_acquire);
+ if (dispatcher)
+ dispatcher->interrupt();
+ }
+ }
+
+ object->thread_ = this;
+
+ /* Move all children. */
+ for (auto child : object->children_)
+ moveObject(child, currentData, targetData);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/timer.cpp b/src/libcamera/base/timer.cpp
new file mode 100644
index 00000000..7b0f3725
--- /dev/null
+++ b/src/libcamera/base/timer.cpp
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Generic timer
+ */
+
+#include <libcamera/base/timer.h>
+
+#include <chrono>
+
+#include <libcamera/base/event_dispatcher.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/message.h>
+#include <libcamera/base/thread.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/camera_manager.h>
+
+/**
+ * \file base/timer.h
+ * \brief Generic timer
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Timer)
+
+/**
+ * \class Timer
+ * \brief Single-shot timer interface
+ *
+ * The Timer class models a single-shot timer that is started with start() and
+ * emits the \ref timeout signal when it times out.
+ *
+ * Once started the timer will run until it times out. It can be stopped with
+ * stop(), and once it times out or is stopped, can be started again with
+ * start().
+ *
+ * The timer deadline is specified as either a duration in milliseconds or an
+ * absolute time point. If the deadline is set to the current time or to the
+ * past, the timer will time out immediately when execution returns to the
+ * event loop of the timer's thread.
+ *
+ * Timers run in the thread they belong to, and thus emit the \a ref timeout
+ * signal from that thread. To avoid race conditions they must not be started
+ * or stopped from a different thread, attempts to do so will be rejected and
+ * logged, and may cause undefined behaviour.
+ */
+
+/**
+ * \brief Construct a timer
+ * \param[in] parent The parent Object
+ */
+Timer::Timer(Object *parent)
+ : Object(parent), running_(false)
+{
+}
+
+Timer::~Timer()
+{
+ stop();
+}
+
+/**
+ * \brief Start or restart the timer with a timeout of \a duration
+ * \param[in] duration The timer duration in milliseconds
+ *
+ * If the timer is already running it will be stopped and restarted.
+ *
+ * \context This function is \threadbound.
+ */
+void Timer::start(std::chrono::milliseconds duration)
+{
+ start(utils::clock::now() + duration);
+}
+
+/**
+ * \brief Start or restart the timer with a \a deadline
+ * \param[in] deadline The timer deadline
+ *
+ * If the timer is already running it will be stopped and restarted.
+ *
+ * \context This function is \threadbound.
+ */
+void Timer::start(std::chrono::steady_clock::time_point deadline)
+{
+ if (!assertThreadBound("Timer can't be started from another thread"))
+ return;
+
+ deadline_ = deadline;
+
+ LOG(Timer, Debug)
+ << "Starting timer " << this << ": deadline "
+ << utils::time_point_to_string(deadline_);
+
+ if (isRunning())
+ unregisterTimer();
+
+ registerTimer();
+}
+
+/**
+ * \brief Stop the timer
+ *
+ * After this function returns the timer is guaranteed not to emit the
+ * \ref timeout signal.
+ *
+ * If the timer is not running this function performs no operation.
+ *
+ * \context This function is \threadbound.
+ */
+void Timer::stop()
+{
+ if (!assertThreadBound("Timer can't be stopped from another thread"))
+ return;
+
+ if (!isRunning())
+ return;
+
+ unregisterTimer();
+}
+
+void Timer::registerTimer()
+{
+ thread()->eventDispatcher()->registerTimer(this);
+ running_ = true;
+}
+
+void Timer::unregisterTimer()
+{
+ running_ = false;
+ thread()->eventDispatcher()->unregisterTimer(this);
+}
+
+/**
+ * \brief Check if the timer is running
+ * \return True if the timer is running, false otherwise
+ */
+bool Timer::isRunning() const
+{
+ return running_;
+}
+
+/**
+ * \fn Timer::deadline()
+ * \brief Retrieve the timer deadline
+ * \return The timer deadline
+ */
+
+/**
+ * \var Timer::timeout
+ * \brief Signal emitted when the timer times out
+ *
+ * The timer pointer is passed as a parameter.
+ */
+
+void Timer::message(Message *msg)
+{
+ if (msg->type() == Message::ThreadMoveMessage) {
+ if (isRunning()) {
+ unregisterTimer();
+ invokeMethod(&Timer::registerTimer,
+ ConnectionTypeQueued);
+ }
+ }
+
+ Object::message(msg);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/unique_fd.cpp b/src/libcamera/base/unique_fd.cpp
new file mode 100644
index 00000000..d0649e4d
--- /dev/null
+++ b/src/libcamera/base/unique_fd.cpp
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * File descriptor wrapper that owns a file descriptor
+ */
+
+#include <libcamera/base/unique_fd.h>
+
+#include <unistd.h>
+#include <utility>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file base/unique_fd.h
+ * \brief File descriptor wrapper that owns a file descriptor
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(UniqueFD)
+
+/**
+ * \class UniqueFD
+ * \brief unique_ptr-like wrapper for a file descriptor
+ *
+ * The UniqueFD is a wrapper that owns and manages the lifetime of a file
+ * descriptor. It is constructed from a numerical file descriptor, and takes
+ * over its ownership. The file descriptor is closed when the UniqueFD is
+ * destroyed, or when it is assigned another file descriptor with operator=()
+ * or reset().
+ */
+
+/**
+ * \fn UniqueFD::UniqueFD()
+ * \brief Construct a UniqueFD that owns no file descriptor
+ */
+
+/**
+ * \fn UniqueFD::UniqueFD(int fd)
+ * \brief Construct a UniqueFD that owns \a fd
+ * \param[in] fd A file descriptor to manage
+ */
+
+/**
+ * \fn UniqueFD::UniqueFD(UniqueFD &&other)
+ * \brief Move constructor, create a UniqueFD by taking over \a other
+ * \param[in] other The other UniqueFD
+ *
+ * Create a UniqueFD by transferring ownership of the file descriptor owned by
+ * \a other. Upon return, the \a other UniqueFD is invalid.
+ */
+
+/**
+ * \fn UniqueFD::~UniqueFD()
+ * \brief Destroy the UniqueFD instance
+ *
+ * If a file descriptor is owned, it is closed.
+ */
+
+/**
+ * \fn UniqueFD::operator=(UniqueFD &&other)
+ * \brief Move assignment operator, replace a UniqueFD by taking over \a other
+ * \param[in] other The other UniqueFD
+ *
+ * If this UniqueFD owns a file descriptor, the file descriptor is closed
+ * first. The file descriptor is then replaced by the one of \a other. Upon
+ * return, \a other is invalid.
+ *
+ * \return A reference to this UniqueFD
+ */
+
+/**
+ * \fn UniqueFD::release()
+ * \brief Release ownership of the file descriptor without closing it
+ *
+ * This function releases and returns the owned file descriptor without closing
+ * it. The caller owns the returned value and must take care of handling its
+ * life time to avoid file descriptor leakages. Upon return this UniqueFD is
+ * invalid.
+ *
+ * \return The managed file descriptor, or -1 if no file descriptor was owned
+ */
+
+/**
+ * \brief Replace the managed file descriptor
+ * \param[in] fd The new file descriptor to manage
+ *
+ * Close the managed file descriptor, if any, and replace it with the new \a fd.
+ *
+ * Self-resetting (passing an \a fd already managed by this instance) is invalid
+ * and results in undefined behaviour.
+ */
+void UniqueFD::reset(int fd)
+{
+ ASSERT(!isValid() || fd != fd_);
+
+ std::swap(fd, fd_);
+
+ if (fd >= 0)
+ close(fd);
+}
+
+/**
+ * \fn UniqueFD::swap(UniqueFD &other)
+ * \brief Swap the managed file descriptors with another UniqueFD
+ * \param[in] other Another UniqueFD to swap the file descriptor with
+ */
+
+/**
+ * \fn UniqueFD::get()
+ * \brief Retrieve the managed file descriptor
+ * \return The managed file descriptor, or -1 if no file descriptor is owned
+ */
+
+/**
+ * \fn UniqueFD::isValid()
+ * \brief Check if the UniqueFD owns a valid file descriptor
+ * \return True if the UniqueFD owns a valid file descriptor, false otherwise
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/base/utils.cpp b/src/libcamera/base/utils.cpp
new file mode 100644
index 00000000..bcfc1941
--- /dev/null
+++ b/src/libcamera/base/utils.cpp
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Miscellaneous utility functions
+ */
+
+#include <libcamera/base/utils.h>
+
+#include <iomanip>
+#include <locale.h>
+#include <sstream>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+/**
+ * \file base/utils.h
+ * \brief Miscellaneous utility functions
+ */
+
+namespace libcamera {
+
+namespace utils {
+
+/**
+ * \brief Strip the directory prefix from the path
+ * \param[in] path The path to process
+ *
+ * basename is implemented differently across different C libraries. This
+ * implementation matches the one provided by the GNU libc, and does not
+ * modify its input parameter.
+ *
+ * \return A pointer within the given path without any leading directory
+ * components.
+ */
+const char *basename(const char *path)
+{
+ const char *base = strrchr(path, '/');
+ return base ? base + 1 : path;
+}
+
+/**
+ * \brief Get an environment variable
+ * \param[in] name The name of the variable to return
+ *
+ * The environment list is searched to find the variable 'name', and the
+ * corresponding string is returned.
+ *
+ * If 'secure execution' is required then this function always returns NULL to
+ * avoid vulnerabilities that could occur if set-user-ID or set-group-ID
+ * programs accidentally trust the environment.
+ *
+ * \note Not all platforms may support the features required to implement the
+ * secure execution check, in which case this function behaves as getenv(). A
+ * notable example of this is Android.
+ *
+ * \return A pointer to the value in the environment or NULL if the requested
+ * environment variable doesn't exist or if secure execution is required.
+ */
+char *secure_getenv(const char *name)
+{
+#if HAVE_SECURE_GETENV
+ return ::secure_getenv(name);
+#else
+#if HAVE_ISSETUGID
+ if (issetugid())
+ return NULL;
+#endif
+ return getenv(name);
+#endif
+}
+
+/**
+ * \brief Identify the dirname portion of a path
+ * \param[in] path The full path to parse
+ *
+ * This function conforms with the behaviour of the %dirname() function as
+ * defined by POSIX.
+ *
+ * \return A string of the directory component of the path
+ */
+std::string dirname(const std::string &path)
+{
+ if (path.empty())
+ return ".";
+
+ /*
+ * Skip all trailing slashes. If the path is only made of slashes,
+ * return "/".
+ */
+ size_t pos = path.size() - 1;
+ while (path[pos] == '/') {
+ if (!pos)
+ return "/";
+ pos--;
+ }
+
+ /*
+ * Find the previous slash. If the path contains no non-trailing slash,
+ * return ".".
+ */
+ while (path[pos] != '/') {
+ if (!pos)
+ return ".";
+ pos--;
+ }
+
+ /*
+ * Return the directory name up to (but not including) any trailing
+ * slash. If this would result in an empty string, return "/".
+ */
+ while (path[pos] == '/') {
+ if (!pos)
+ return "/";
+ pos--;
+ }
+
+ return path.substr(0, pos + 1);
+}
+
+/**
+ * \fn std::vector<typename T::key_type> map_keys(const T &map)
+ * \brief Retrieve the keys of a std::map<>
+ * \param[in] map The map whose keys to retrieve
+ * \return A std::vector<> containing the keys of \a map
+ */
+
+/**
+ * \fn libcamera::utils::set_overlap(InputIt1 first1, InputIt1 last1,
+ * InputIt2 first2, InputIt2 last2)
+ * \brief Count the number of elements in the intersection of two ranges
+ *
+ * Count the number of elements in the intersection of the sorted ranges [\a
+ * first1, \a last1) and [\a first1, \a last2). Elements are compared using
+ * operator< and the ranges must be sorted with respect to the same.
+ *
+ * \return The number of elements in the intersection of the two ranges
+ */
+
+/**
+ * \typedef clock
+ * \brief The libcamera clock (monotonic)
+ */
+
+/**
+ * \typedef duration
+ * \brief The libcamera duration related to libcamera::utils::clock
+ */
+
+/**
+ * \typedef time_point
+ * \brief The libcamera time point related to libcamera::utils::clock
+ */
+
+/**
+ * \brief Convert a duration to a timespec
+ * \param[in] value The duration
+ * \return A timespec expressing the duration
+ */
+struct timespec duration_to_timespec(const duration &value)
+{
+ uint64_t nsecs = std::chrono::duration_cast<std::chrono::nanoseconds>(value).count();
+ struct timespec ts;
+ ts.tv_sec = nsecs / 1000000000ULL;
+ ts.tv_nsec = nsecs % 1000000000ULL;
+ return ts;
+}
+
+/**
+ * \brief Convert a time point to a string representation
+ * \param[in] time The time point
+ * \return A string representing the time point in hh:mm:ss.nanoseconds format
+ */
+std::string time_point_to_string(const time_point &time)
+{
+ uint64_t nsecs = std::chrono::duration_cast<std::chrono::nanoseconds>(time.time_since_epoch()).count();
+ unsigned int secs = nsecs / 1000000000ULL;
+
+ std::ostringstream ossTimestamp;
+ ossTimestamp.fill('0');
+ ossTimestamp << secs / (60 * 60) << ":"
+ << std::setw(2) << (secs / 60) % 60 << ":"
+ << std::setw(2) << secs % 60 << "."
+ << std::setw(9) << nsecs % 1000000000ULL;
+ return ossTimestamp.str();
+}
+
+std::basic_ostream<char, std::char_traits<char>> &
+operator<<(std::basic_ostream<char, std::char_traits<char>> &stream, const _hex &h)
+{
+ stream << "0x";
+
+ std::ostream::fmtflags flags = stream.setf(std::ios_base::hex,
+ std::ios_base::basefield);
+ std::streamsize width = stream.width(h.w);
+ char fill = stream.fill('0');
+
+ stream << h.v;
+
+ stream.flags(flags);
+ stream.width(width);
+ stream.fill(fill);
+
+ return stream;
+}
+
+/**
+ * \fn hex(T value, unsigned int width)
+ * \brief Write an hexadecimal value to an output string
+ * \param value The value
+ * \param width The width
+ *
+ * Return an object of unspecified type such that, if \a os is the name of an
+ * output stream of type std::ostream, and T is an integer type, then the
+ * expression
+ *
+ * \code{.cpp}
+ * os << utils::hex(value)
+ * \endcode
+ *
+ * will output the \a value to the stream in hexadecimal form with the base
+ * prefix and the filling character set to '0'. The field width is set to \a
+ * width if specified to a non-zero value, or to the native width of type T
+ * otherwise. The \a os stream configuration is not modified.
+ */
+
+/**
+ * \brief Copy a string with a size limit
+ * \param[in] dst The destination string
+ * \param[in] src The source string
+ * \param[in] size The size of the destination string
+ *
+ * This function copies the null-terminated string \a src to \a dst with a limit
+ * of \a size - 1 characters, and null-terminates the result if \a size is
+ * larger than 0. If \a src is larger than \a size - 1, \a dst is truncated.
+ *
+ * \return The size of \a src
+ */
+size_t strlcpy(char *dst, const char *src, size_t size)
+{
+ if (size) {
+ strncpy(dst, src, size);
+ dst[size - 1] = '\0';
+ }
+
+ return strlen(src);
+}
+
+details::StringSplitter::StringSplitter(const std::string &str, const std::string &delim)
+ : str_(str), delim_(delim)
+{
+}
+
+details::StringSplitter::iterator::iterator(const details::StringSplitter *ss, std::string::size_type pos)
+ : ss_(ss), pos_(pos)
+{
+ next_ = ss_->str_.find(ss_->delim_, pos_);
+}
+
+details::StringSplitter::iterator &details::StringSplitter::iterator::operator++()
+{
+ pos_ = next_;
+ if (pos_ != std::string::npos) {
+ pos_ += ss_->delim_.length();
+ next_ = ss_->str_.find(ss_->delim_, pos_);
+ }
+
+ return *this;
+}
+
+std::string details::StringSplitter::iterator::operator*() const
+{
+ std::string::size_type count;
+ count = next_ != std::string::npos ? next_ - pos_ : next_;
+ return ss_->str_.substr(pos_, count);
+}
+
+/**
+ * \fn template<typename Container, typename UnaryOp> \
+ * std::string utils::join(const Container &items, const std::string &sep, UnaryOp op)
+ * \brief Join elements of a container in a string with a separator
+ * \param[in] items The container
+ * \param[in] sep The separator to add between elements
+ * \param[in] op A function that converts individual elements to strings
+ *
+ * This function joins all elements in the \a items container into a string and
+ * returns it. The \a sep separator is added between elements. If the container
+ * elements are not implicitly convertible to std::string, the \a op function
+ * shall be provided to perform conversion of elements to std::string.
+ *
+ * \return A string that concatenates all elements in the container
+ */
+
+/**
+ * \fn split(const std::string &str, const std::string &delim)
+ * \brief Split a string based on a delimiter
+ * \param[in] str The string to split
+ * \param[in] delim The delimiter string
+ *
+ * This function splits the string \a str into substrings based on the
+ * delimiter \a delim. It returns an object of unspecified type that can be
+ * used in a range-based for loop and yields the substrings in sequence.
+ *
+ * \return An object that can be used in a range-based for loop to iterate over
+ * the substrings
+ */
+details::StringSplitter split(const std::string &str, const std::string &delim)
+{
+ /** \todo Try to avoid copies of str and delim */
+ return details::StringSplitter(str, delim);
+}
+
+/**
+ * \brief Remove any non-ASCII characters from a string
+ * \param[in] str The string to strip
+ *
+ * Remove all non-ASCII characters from a string.
+ *
+ * \return A string equal to \a str stripped out of all non-ASCII characters
+ */
+std::string toAscii(const std::string &str)
+{
+ std::string ret;
+ for (const char &c : str)
+ if (!(c & 0x80))
+ ret += c;
+ return ret;
+}
+
+/**
+ * \fn alignDown(unsigned int value, unsigned int alignment)
+ * \brief Align \a value down to \a alignment
+ * \param[in] value The value to align
+ * \param[in] alignment The alignment
+ * \return The value rounded down to the nearest multiple of \a alignment
+ */
+
+/**
+ * \fn alignUp(unsigned int value, unsigned int alignment)
+ * \brief Align \a value up to \a alignment
+ * \param[in] value The value to align
+ * \param[in] alignment The alignment
+ * \return The value rounded up to the nearest multiple of \a alignment
+ */
+
+/**
+ * \fn reverse(T &&iterable)
+ * \brief Wrap an iterable to reverse iteration in a range-based loop
+ * \param[in] iterable The iterable
+ * \return A value of unspecified type that, when used in a range-based for
+ * loop, will cause the loop to iterate over the \a iterable in reverse order
+ */
+
+/**
+ * \fn enumerate(T &iterable)
+ * \brief Wrap an iterable to enumerate index and value in a range-based loop
+ * \param[in] iterable The iterable
+ *
+ * Range-based for loops are handy and widely preferred in C++, but are limited
+ * in their ability to replace for loops that require access to a loop counter.
+ * The enumerate() function solves this problem by wrapping the \a iterable in
+ * an adapter that, when used as a range-expression, will provide iterators
+ * whose value_type is a pair of index and value reference.
+ *
+ * The iterable must support std::begin() and std::end(). This includes all
+ * containers provided by the standard C++ library, as well as C-style arrays.
+ *
+ * A typical usage pattern would use structured binding to store the index and
+ * value in two separate variables:
+ *
+ * \code{.cpp}
+ * std::vector<int> values = ...;
+ *
+ * for (auto [index, value] : utils::enumerate(values)) {
+ * ...
+ * }
+ * \endcode
+ *
+ * Note that the argument to enumerate() has to be an lvalue, as the lifetime
+ * of any rvalue would not be extended to the whole for loop. The compiler will
+ * complain if an rvalue is passed to the function, in which case it should be
+ * stored in a local variable before the loop.
+ *
+ * \return A value of unspecified type that, when used in a range-based for
+ * loop, iterates over an indexed view of the \a iterable
+ */
+
+/**
+ * \class Duration
+ * \brief Helper class from std::chrono::duration that represents a time
+ * duration in nanoseconds with double precision
+ */
+
+/**
+ * \fn Duration::Duration(const Rep &r)
+ * \brief Construct a Duration with \a r ticks
+ * \param[in] r The number of ticks
+ *
+ * The constructed \a Duration object is internally represented in double
+ * precision with \a r nanoseconds ticks.
+ */
+
+/**
+ * \fn Duration::Duration(const std::chrono::duration<Rep, Period> &d)
+ * \brief Construct a Duration by converting an arbitrary std::chrono::duration
+ * \param[in] d The std::chrono::duration object to convert from
+ *
+ * The constructed \a Duration object is internally represented in double
+ * precision with nanoseconds ticks.
+ */
+
+/**
+ * \fn Duration::get<Period>()
+ * \brief Retrieve the tick count, converted to the timebase provided by the
+ * template argument Period of type \a std::ratio
+ *
+ * A typical usage example is given below:
+ *
+ * \code{.cpp}
+ * utils::Duration d = 5s;
+ * double d_in_ms = d.get<std::milli>();
+ * \endcode
+ *
+ * \return The tick count of the Duration expressed in \a Period
+ */
+
+/**
+ * \fn Duration::operator bool()
+ * \brief Boolean operator to test if a \a Duration holds a non-zero time value
+ *
+ * \return True if \a Duration is a non-zero time value, False otherwise
+ */
+
+/**
+ * \fn abs_diff(const T& a, const T& b)
+ * \brief Calculates the absolute value of the difference between two elements
+ * \param[in] a The first element
+ * \param[in] b The second element
+ *
+ * This function calculates the absolute value of the difference between two
+ * elements of the same type, in such a way that a negative value will never
+ * occur during the calculation.
+ *
+ * This is inspired by the std::abs_diff() candidate proposed in N4318
+ * (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2014/n4318.pdf).
+ *
+ * \return The absolute value of the difference of the two parameters \a a and
+ * \a b
+ */
+
+#if HAVE_LOCALE_T
+
+namespace {
+
+/*
+ * RAII wrapper around locale_t instances, to support global locale instances
+ * without leaking memory.
+ */
+class Locale
+{
+public:
+ Locale(const char *locale)
+ {
+ locale_ = newlocale(LC_ALL_MASK, locale, static_cast<locale_t>(0));
+ }
+
+ ~Locale()
+ {
+ freelocale(locale_);
+ }
+
+ locale_t locale() { return locale_; }
+
+private:
+ locale_t locale_;
+};
+
+Locale cLocale("C");
+
+} /* namespace */
+
+#endif /* HAVE_LOCALE_T */
+
+/**
+ * \brief Convert a string to a double independently of the current locale
+ * \param[in] nptr The string to convert
+ * \param[out] endptr Pointer to trailing portion of the string after conversion
+ *
+ * This function is a locale-independent version of the std::strtod() function.
+ * It behaves as the standard function, but uses the "C" locale instead of the
+ * current locale.
+ *
+ * \return The converted value, if any, or 0.0 if the conversion failed.
+ */
+double strtod(const char *__restrict nptr, char **__restrict endptr)
+{
+#if HAVE_LOCALE_T
+ return strtod_l(nptr, endptr, cLocale.locale());
+#else
+ /*
+ * If the libc implementation doesn't provide locale object support,
+ * assume that strtod() is locale-independent.
+ */
+ return ::strtod(nptr, endptr);
+#endif
+}
+
+/**
+ * \fn to_underlying(Enum e)
+ * \brief Convert an enumeration to its underlygin type
+ * \param[in] e Enumeration value to convert
+ *
+ * This function is equivalent to the C++23 std::to_underlying().
+ *
+ * \return The value of e converted to its underlying type
+ */
+
+/**
+ * \class ScopeExitActions
+ * \brief An object that performs actions upon destruction
+ *
+ * The ScopeExitActions class is a simple object that performs user-provided
+ * actions upon destruction. It is meant to simplify cleanup tasks in error
+ * handling paths.
+ *
+ * When the code flow performs multiple sequential actions that each need a
+ * corresponding cleanup action, error handling quickly become tedious:
+ *
+ * \code{.cpp}
+ * {
+ * int ret = allocateMemory();
+ * if (ret)
+ * return ret;
+ *
+ * ret = startProducer();
+ * if (ret) {
+ * freeMemory();
+ * return ret;
+ * }
+ *
+ * ret = startConsumer();
+ * if (ret) {
+ * stopProducer();
+ * freeMemory();
+ * return ret;
+ * }
+ *
+ * return 0;
+ * }
+ * \endcode
+ *
+ * This is prone to programming mistakes, as cleanup actions can easily be
+ * forgotten or ordered incorrectly. One strategy to simplify error handling is
+ * to use goto statements:
+ *
+ * \code{.cpp}
+ * {
+ * int ret = allocateMemory();
+ * if (ret)
+ * return ret;
+ *
+ * ret = startProducer();
+ * if (ret)
+ * goto error_free;
+ *
+ * ret = startConsumer();
+ * if (ret)
+ * goto error_stop;
+ *
+ * return 0;
+ *
+ * error_stop:
+ * stopProducer();
+ * error_free:
+ * freeMemory();
+ * return ret;
+ * }
+ * \endcode
+ *
+ * While this may be considered better, this solution is still quite
+ * error-prone. Beside the risk of picking the wrong error label, the error
+ * handling logic is separated from the normal code flow, which increases the
+ * risk of error when refactoring the code. Additionally, C++ doesn't allow
+ * goto statements to jump over local variable declarations, which can make
+ * usage of this pattern more difficult.
+ *
+ * The ScopeExitActions class solves these issues by allowing code that
+ * requires cleanup actions to be grouped with its corresponding error handling
+ * code:
+ *
+ * \code{.cpp}
+ * {
+ * ScopeExitActions actions;
+ *
+ * int ret = allocateMemory();
+ * if (ret)
+ * return ret;
+ *
+ * actions += [&]() { freeMemory(); };
+ *
+ * ret = startProducer();
+ * if (ret)
+ * return ret;
+ *
+ * actions += [&]() { stopProducer(); };
+ *
+ * ret = startConsumer();
+ * if (ret)
+ * return ret;
+ *
+ * actions.release();
+ * return 0;
+ * }
+ * \endcode
+ *
+ * Error handlers are executed when the ScopeExitActions instance is destroyed,
+ * in the reverse order of their addition.
+ */
+
+ScopeExitActions::~ScopeExitActions()
+{
+ for (const auto &action : utils::reverse(actions_))
+ action();
+}
+
+/**
+ * \brief Add an exit action
+ * \param[in] action The action
+ *
+ * Add an exit action to the ScopeExitActions. Actions will be called upon
+ * destruction in the reverse order of their addition.
+ */
+void ScopeExitActions::operator+=(std::function<void()> &&action)
+{
+ actions_.push_back(std::move(action));
+}
+
+/**
+ * \brief Remove all exit actions
+ *
+ * This function should be called in scope exit paths that don't need the
+ * actions to be executed, such as success return paths from a function when
+ * the ScopeExitActions is used for error cleanup.
+ */
+void ScopeExitActions::release()
+{
+ actions_.clear();
+}
+
+} /* namespace utils */
+
+#ifndef __DOXYGEN__
+template<class CharT, class Traits>
+std::basic_ostream<CharT, Traits> &operator<<(std::basic_ostream<CharT, Traits> &os,
+ const utils::Duration &d)
+{
+ std::basic_ostringstream<CharT, Traits> s;
+
+ s.flags(os.flags());
+ s.imbue(os.getloc());
+ s.setf(std::ios_base::fixed, std::ios_base::floatfield);
+ s.precision(2);
+ s << d.get<std::micro>() << "us";
+ return os << s.str();
+}
+
+template
+std::basic_ostream<char, std::char_traits<char>> &
+operator<< <char, std::char_traits<char>>(std::basic_ostream<char, std::char_traits<char>> &os,
+ const utils::Duration &d);
+#endif
+
+} /* namespace libcamera */
diff --git a/src/libcamera/bayer_format.cpp b/src/libcamera/bayer_format.cpp
new file mode 100644
index 00000000..3dab91fc
--- /dev/null
+++ b/src/libcamera/bayer_format.cpp
@@ -0,0 +1,460 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Class to represent Bayer formats
+ */
+
+#include "libcamera/internal/bayer_format.h"
+
+#include <algorithm>
+#include <map>
+#include <sstream>
+#include <unordered_map>
+
+#include <linux/media-bus-format.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/transform.h>
+
+/**
+ * \file bayer_format.h
+ * \brief Class to represent Bayer formats and manipulate them
+ */
+
+namespace libcamera {
+
+/**
+ * \class BayerFormat
+ * \brief Class to represent a raw image Bayer format
+ *
+ * This class encodes the different Bayer formats in such a way that they can
+ * be easily manipulated. For example, the bit depth or Bayer order can be
+ * easily altered - the Bayer order can even be "transformed" in the same
+ * manner as happens in many sensors when their horizontal or vertical "flip"
+ * controls are set.
+ */
+
+/**
+ * \enum BayerFormat::Order
+ * \brief The order of the colour channels in the Bayer pattern
+ *
+ * \var BayerFormat::BGGR
+ * \brief B then G on the first row, G then R on the second row.
+ * \var BayerFormat::GBRG
+ * \brief G then B on the first row, R then G on the second row.
+ * \var BayerFormat::GRBG
+ * \brief G then R on the first row, B then G on the second row.
+ * \var BayerFormat::RGGB
+ * \brief R then G on the first row, G then B on the second row.
+ * \var BayerFormat::MONO
+ * \brief Monochrome image data, there is no colour filter array.
+ */
+
+/**
+ * \enum BayerFormat::Packing
+ * \brief Different types of packing that can be applied to a BayerFormat
+ *
+ * \var BayerFormat::Packing::None
+ * \brief No packing
+ * \var BayerFormat::Packing::CSI2
+ * \brief Format uses MIPI CSI-2 style packing
+ * \var BayerFormat::Packing::IPU3
+ * \brief Format uses IPU3 style packing
+ * \var BayerFormat::Packing::PISP1
+ * \brief Format uses PISP mode 1 compression
+ * \var BayerFormat::Packing::PISP2
+ * \brief Format uses PISP mode 2 compression
+ */
+
+namespace {
+
+/* Define a slightly arbitrary ordering so that we can use a std::map. */
+struct BayerFormatComparator {
+ constexpr bool operator()(const BayerFormat &lhs, const BayerFormat &rhs) const
+ {
+ if (lhs.bitDepth < rhs.bitDepth)
+ return true;
+ else if (lhs.bitDepth > rhs.bitDepth)
+ return false;
+
+ if (lhs.order < rhs.order)
+ return true;
+ else if (lhs.order > rhs.order)
+ return false;
+
+ if (lhs.packing < rhs.packing)
+ return true;
+ else
+ return false;
+ }
+};
+
+struct Formats {
+ PixelFormat pixelFormat;
+ V4L2PixelFormat v4l2Format;
+};
+
+const std::map<BayerFormat, Formats, BayerFormatComparator> bayerToFormat{
+ { { BayerFormat::BGGR, 8, BayerFormat::Packing::None },
+ { formats::SBGGR8, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR8) } },
+ { { BayerFormat::GBRG, 8, BayerFormat::Packing::None },
+ { formats::SGBRG8, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG8) } },
+ { { BayerFormat::GRBG, 8, BayerFormat::Packing::None },
+ { formats::SGRBG8, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG8) } },
+ { { BayerFormat::RGGB, 8, BayerFormat::Packing::None },
+ { formats::SRGGB8, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB8) } },
+ { { BayerFormat::BGGR, 10, BayerFormat::Packing::None },
+ { formats::SBGGR10, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10) } },
+ { { BayerFormat::GBRG, 10, BayerFormat::Packing::None },
+ { formats::SGBRG10, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10) } },
+ { { BayerFormat::GRBG, 10, BayerFormat::Packing::None },
+ { formats::SGRBG10, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10) } },
+ { { BayerFormat::RGGB, 10, BayerFormat::Packing::None },
+ { formats::SRGGB10, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10) } },
+ { { BayerFormat::BGGR, 10, BayerFormat::Packing::CSI2 },
+ { formats::SBGGR10_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10P) } },
+ { { BayerFormat::GBRG, 10, BayerFormat::Packing::CSI2 },
+ { formats::SGBRG10_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10P) } },
+ { { BayerFormat::GRBG, 10, BayerFormat::Packing::CSI2 },
+ { formats::SGRBG10_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10P) } },
+ { { BayerFormat::RGGB, 10, BayerFormat::Packing::CSI2 },
+ { formats::SRGGB10_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10P) } },
+ { { BayerFormat::BGGR, 10, BayerFormat::Packing::IPU3 },
+ { formats::SBGGR10_IPU3, V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SBGGR10) } },
+ { { BayerFormat::GBRG, 10, BayerFormat::Packing::IPU3 },
+ { formats::SGBRG10_IPU3, V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGBRG10) } },
+ { { BayerFormat::GRBG, 10, BayerFormat::Packing::IPU3 },
+ { formats::SGRBG10_IPU3, V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGRBG10) } },
+ { { BayerFormat::RGGB, 10, BayerFormat::Packing::IPU3 },
+ { formats::SRGGB10_IPU3, V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SRGGB10) } },
+ { { BayerFormat::BGGR, 12, BayerFormat::Packing::None },
+ { formats::SBGGR12, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12) } },
+ { { BayerFormat::GBRG, 12, BayerFormat::Packing::None },
+ { formats::SGBRG12, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12) } },
+ { { BayerFormat::GRBG, 12, BayerFormat::Packing::None },
+ { formats::SGRBG12, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12) } },
+ { { BayerFormat::RGGB, 12, BayerFormat::Packing::None },
+ { formats::SRGGB12, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12) } },
+ { { BayerFormat::BGGR, 12, BayerFormat::Packing::CSI2 },
+ { formats::SBGGR12_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12P) } },
+ { { BayerFormat::GBRG, 12, BayerFormat::Packing::CSI2 },
+ { formats::SGBRG12_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12P) } },
+ { { BayerFormat::GRBG, 12, BayerFormat::Packing::CSI2 },
+ { formats::SGRBG12_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12P) } },
+ { { BayerFormat::RGGB, 12, BayerFormat::Packing::CSI2 },
+ { formats::SRGGB12_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12P) } },
+ { { BayerFormat::BGGR, 14, BayerFormat::Packing::None },
+ { formats::SBGGR14, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14) } },
+ { { BayerFormat::GBRG, 14, BayerFormat::Packing::None },
+ { formats::SGBRG14, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14) } },
+ { { BayerFormat::GRBG, 14, BayerFormat::Packing::None },
+ { formats::SGRBG14, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14) } },
+ { { BayerFormat::RGGB, 14, BayerFormat::Packing::None },
+ { formats::SRGGB14, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14) } },
+ { { BayerFormat::BGGR, 14, BayerFormat::Packing::CSI2 },
+ { formats::SBGGR14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14P) } },
+ { { BayerFormat::GBRG, 14, BayerFormat::Packing::CSI2 },
+ { formats::SGBRG14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14P) } },
+ { { BayerFormat::GRBG, 14, BayerFormat::Packing::CSI2 },
+ { formats::SGRBG14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14P) } },
+ { { BayerFormat::RGGB, 14, BayerFormat::Packing::CSI2 },
+ { formats::SRGGB14_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14P) } },
+ { { BayerFormat::BGGR, 16, BayerFormat::Packing::None },
+ { formats::SBGGR16, V4L2PixelFormat(V4L2_PIX_FMT_SBGGR16) } },
+ { { BayerFormat::GBRG, 16, BayerFormat::Packing::None },
+ { formats::SGBRG16, V4L2PixelFormat(V4L2_PIX_FMT_SGBRG16) } },
+ { { BayerFormat::GRBG, 16, BayerFormat::Packing::None },
+ { formats::SGRBG16, V4L2PixelFormat(V4L2_PIX_FMT_SGRBG16) } },
+ { { BayerFormat::RGGB, 16, BayerFormat::Packing::None },
+ { formats::SRGGB16, V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16) } },
+ { { BayerFormat::BGGR, 16, BayerFormat::Packing::PISP1 },
+ { formats::BGGR_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_BGGR) } },
+ { { BayerFormat::GBRG, 16, BayerFormat::Packing::PISP1 },
+ { formats::GBRG_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GBRG) } },
+ { { BayerFormat::GRBG, 16, BayerFormat::Packing::PISP1 },
+ { formats::GRBG_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GRBG) } },
+ { { BayerFormat::RGGB, 16, BayerFormat::Packing::PISP1 },
+ { formats::RGGB_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_RGGB) } },
+ { { BayerFormat::MONO, 8, BayerFormat::Packing::None },
+ { formats::R8, V4L2PixelFormat(V4L2_PIX_FMT_GREY) } },
+ { { BayerFormat::MONO, 10, BayerFormat::Packing::None },
+ { formats::R10, V4L2PixelFormat(V4L2_PIX_FMT_Y10) } },
+ { { BayerFormat::MONO, 10, BayerFormat::Packing::CSI2 },
+ { formats::R10_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_Y10P) } },
+ { { BayerFormat::MONO, 12, BayerFormat::Packing::None },
+ { formats::R12, V4L2PixelFormat(V4L2_PIX_FMT_Y12) } },
+ { { BayerFormat::MONO, 12, BayerFormat::Packing::CSI2 },
+ { formats::R12_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_Y12P) } },
+ { { BayerFormat::MONO, 16, BayerFormat::Packing::None },
+ { formats::R16, V4L2PixelFormat(V4L2_PIX_FMT_Y16) } },
+ { { BayerFormat::MONO, 16, BayerFormat::Packing::PISP1 },
+ { formats::MONO_PISP_COMP1, V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_MONO) } },
+};
+
+const std::unordered_map<unsigned int, BayerFormat> mbusCodeToBayer{
+ { MEDIA_BUS_FMT_SBGGR8_1X8, { BayerFormat::BGGR, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, { BayerFormat::GBRG, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, { BayerFormat::GRBG, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, { BayerFormat::RGGB, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8, { BayerFormat::BGGR, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8, { BayerFormat::GBRG, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8, { BayerFormat::GRBG, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8, { BayerFormat::RGGB, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, { BayerFormat::BGGR, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, { BayerFormat::GBRG, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, { BayerFormat::GRBG, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, { BayerFormat::RGGB, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE, { BayerFormat::BGGR, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, { BayerFormat::BGGR, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE, { BayerFormat::BGGR, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE, { BayerFormat::BGGR, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, { BayerFormat::BGGR, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, { BayerFormat::GBRG, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, { BayerFormat::GRBG, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, { BayerFormat::RGGB, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, { BayerFormat::BGGR, 12, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, { BayerFormat::GBRG, 12, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, { BayerFormat::GRBG, 12, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, { BayerFormat::RGGB, 12, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, { BayerFormat::BGGR, 14, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, { BayerFormat::GBRG, 14, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, { BayerFormat::GRBG, 14, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, { BayerFormat::RGGB, 14, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR16_1X16, { BayerFormat::BGGR, 16, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGBRG16_1X16, { BayerFormat::GBRG, 16, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGRBG16_1X16, { BayerFormat::GRBG, 16, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SRGGB16_1X16, { BayerFormat::RGGB, 16, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SBGGR20_1X20, { BayerFormat::BGGR, 20, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGBRG20_1X20, { BayerFormat::GBRG, 20, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SGRBG20_1X20, { BayerFormat::GRBG, 20, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_SRGGB20_1X20, { BayerFormat::RGGB, 20, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_Y8_1X8, { BayerFormat::MONO, 8, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_Y10_1X10, { BayerFormat::MONO, 10, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_Y12_1X12, { BayerFormat::MONO, 12, BayerFormat::Packing::None } },
+ { MEDIA_BUS_FMT_Y16_1X16, { BayerFormat::MONO, 16, BayerFormat::Packing::None } },
+};
+
+} /* namespace */
+
+/**
+ * \fn BayerFormat::BayerFormat()
+ * \brief Construct an empty (and invalid) BayerFormat
+ */
+
+/**
+ * \fn BayerFormat::BayerFormat(Order o, uint8_t b, Packing p)
+ * \brief Construct a BayerFormat from explicit values
+ * \param[in] o The order of the Bayer pattern
+ * \param[in] b The bit depth of the Bayer samples
+ * \param[in] p The type of packing applied to the pixel values
+ */
+
+/**
+ * \brief Retrieve the BayerFormat associated with a media bus code
+ * \param[in] mbusCode The media bus code to convert into a BayerFormat
+ *
+ * The media bus code numeric identifiers are defined by the V4L2 specification.
+ */
+const BayerFormat &BayerFormat::fromMbusCode(unsigned int mbusCode)
+{
+ static BayerFormat empty;
+
+ const auto it = mbusCodeToBayer.find(mbusCode);
+ if (it == mbusCodeToBayer.end())
+ return empty;
+ else
+ return it->second;
+}
+
+/**
+ * \fn BayerFormat::isValid()
+ * \brief Return whether a BayerFormat is valid
+ */
+
+/**
+ * \brief Assemble and return a readable string representation of the
+ * BayerFormat
+ * \return A string describing the BayerFormat
+ */
+std::string BayerFormat::toString() const
+{
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
+}
+
+/**
+ * \brief Compare two BayerFormats for equality
+ * \return True if order, bitDepth and packing are equal, or false otherwise
+ */
+bool operator==(const BayerFormat &lhs, const BayerFormat &rhs)
+{
+ return lhs.order == rhs.order && lhs.bitDepth == rhs.bitDepth &&
+ lhs.packing == rhs.packing;
+}
+
+/**
+ * \brief Insert a text representation of a BayerFormats into an output stream
+ * \param[in] out The output stream
+ * \param[in] f The BayerFormat
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const BayerFormat &f)
+{
+ static const char *orderStrings[] = {
+ "BGGR-",
+ "GBRG-",
+ "GRBG-",
+ "RGGB-",
+ "MONO-"
+ };
+
+ if (!f.isValid() || f.order > BayerFormat::MONO) {
+ out << "INVALID";
+ return out;
+ }
+
+ /* The cast is required to avoid bitDepth being interpreted as a char. */
+ out << orderStrings[f.order] << static_cast<unsigned int>(f.bitDepth);
+
+ if (f.packing == BayerFormat::Packing::CSI2)
+ out << "-CSI2P";
+ else if (f.packing == BayerFormat::Packing::IPU3)
+ out << "-IPU3P";
+ else if (f.packing == BayerFormat::Packing::PISP1)
+ out << "-PISP1";
+ else if (f.packing == BayerFormat::Packing::PISP2)
+ out << "-PISP2";
+
+ return out;
+}
+
+/**
+ * \fn bool operator!=(const BayerFormat &lhs, const BayerFormat &rhs)
+ * \brief Compare two BayerFormats for inequality
+ * \return True if either order, bitdepth or packing are not equal, or false
+ * otherwise
+ */
+
+/**
+ * \brief Convert a BayerFormat into the corresponding V4L2PixelFormat
+ * \return The V4L2PixelFormat corresponding to this BayerFormat
+ */
+V4L2PixelFormat BayerFormat::toV4L2PixelFormat() const
+{
+ const auto it = bayerToFormat.find(*this);
+ if (it != bayerToFormat.end())
+ return it->second.v4l2Format;
+
+ return V4L2PixelFormat();
+}
+
+/**
+ * \brief Convert \a v4l2Format to the corresponding BayerFormat
+ * \param[in] v4l2Format The raw format to convert into a BayerFormat
+ * \return The BayerFormat corresponding to \a v4l2Format
+ */
+BayerFormat BayerFormat::fromV4L2PixelFormat(V4L2PixelFormat v4l2Format)
+{
+ auto it = std::find_if(bayerToFormat.begin(), bayerToFormat.end(),
+ [v4l2Format](const auto &i) {
+ return i.second.v4l2Format == v4l2Format;
+ });
+ if (it != bayerToFormat.end())
+ return it->first;
+
+ return BayerFormat();
+}
+
+/**
+ * \brief Convert a BayerFormat into the corresponding PixelFormat
+ * \return The PixelFormat corresponding to this BayerFormat
+ */
+PixelFormat BayerFormat::toPixelFormat() const
+{
+ const auto it = bayerToFormat.find(*this);
+ if (it != bayerToFormat.end())
+ return it->second.pixelFormat;
+
+ return PixelFormat();
+}
+
+/**
+ * \brief Convert a PixelFormat into the corresponding BayerFormat
+ * \return The BayerFormat corresponding to this PixelFormat
+ */
+BayerFormat BayerFormat::fromPixelFormat(PixelFormat format)
+{
+ const auto it = std::find_if(bayerToFormat.begin(), bayerToFormat.end(),
+ [format](const auto &i) {
+ return i.second.pixelFormat == format;
+ });
+ if (it != bayerToFormat.end())
+ return it->first;
+
+ return BayerFormat();
+}
+
+/**
+ * \brief Apply a transform to this BayerFormat
+ * \param[in] t The transform to apply
+ *
+ * Applying a transform to an image stored in a Bayer format affects the Bayer
+ * order. For example, performing a horizontal flip on the Bayer pattern RGGB
+ * causes the RG rows of pixels to become GR, and the GB rows to become BG. The
+ * transformed image would have a GRBG order. Performing a vertical flip on the
+ * Bayer pattern RGGB causes the GB rows to come before the RG ones and the
+ * transformed image would have GBRG order. Applying both vertical and
+ * horizontal flips on the Bayer patter RGGB results in transformed images with
+ * BGGR order. The bit depth and modifiers are not affected.
+ *
+ * Horizontal and vertical flips are applied before transpose.
+ *
+ * \return The transformed Bayer format
+ */
+BayerFormat BayerFormat::transform(Transform t) const
+{
+ BayerFormat result = *this;
+
+ if (order == MONO)
+ return result;
+
+ /*
+ * Observe that flipping bit 0 of the Order enum performs a horizontal
+ * mirror on the Bayer pattern (e.g. RG/GB goes to GR/BG). Similarly,
+ * flipping bit 1 performs a vertical mirror operation on it (e.g RG/GB
+ * goes to GB/RG). Applying both vertical and horizontal flips
+ * combines vertical and horizontal mirroring on the Bayer pattern
+ * (e.g. RG/GB goes to BG/GR). Hence:
+ */
+ if (!!(t & Transform::HFlip))
+ result.order = static_cast<Order>(result.order ^ 1);
+ if (!!(t & Transform::VFlip))
+ result.order = static_cast<Order>(result.order ^ 2);
+
+ if (!!(t & Transform::Transpose) && result.order == 1)
+ result.order = static_cast<Order>(2);
+ else if (!!(t & Transform::Transpose) && result.order == 2)
+ result.order = static_cast<Order>(1);
+
+ return result;
+}
+
+/**
+ * \var BayerFormat::order
+ * \brief The order of the colour channels in the Bayer pattern
+ */
+
+/**
+ * \var BayerFormat::bitDepth
+ * \brief The bit depth of the samples in the Bayer pattern
+ */
+
+/**
+ * \var BayerFormat::packing
+ * \brief Any packing scheme applied to this BayerFormat
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/bound_method.cpp b/src/libcamera/bound_method.cpp
deleted file mode 100644
index 9aa59dc3..00000000
--- a/src/libcamera/bound_method.cpp
+++ /dev/null
@@ -1,110 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * bound_method.cpp - Method bind and invocation
- */
-
-#include <libcamera/bound_method.h>
-
-#include "message.h"
-#include "semaphore.h"
-#include "thread.h"
-
-/**
- * \file bound_method.h
- * \brief Method bind and invocation
- */
-
-namespace libcamera {
-
-/**
- * \enum ConnectionType
- * \brief Connection type for asynchronous communication
- *
- * This enumeration describes the possible types of asynchronous communication
- * between a sender and a receiver. It applies to Signal::emit() and
- * Object::invokeMethod().
- *
- * \var ConnectionType::ConnectionTypeAuto
- * \brief If the sender and the receiver live in the same thread,
- * ConnectionTypeDirect is used. Otherwise ConnectionTypeQueued is used.
- *
- * \var ConnectionType::ConnectionTypeDirect
- * \brief The receiver is invoked immediately and synchronously in the sender's
- * thread.
- *
- * \var ConnectionType::ConnectionTypeQueued
- * \brief The receiver is invoked asynchronously
- *
- * Invoke the receiver asynchronously in its thread when control returns to the
- * thread's event loop. The sender proceeds without waiting for the invocation
- * to complete.
- *
- * \var ConnectionType::ConnectionTypeBlocking
- * \brief The receiver is invoked synchronously
- *
- * If the sender and the receiver live in the same thread, this is equivalent to
- * ConnectionTypeDirect. Otherwise, the receiver is invoked asynchronously in
- * its thread when control returns to the thread's event loop. The sender
- * blocks until the receiver signals the completion of the invocation.
- */
-
-/**
- * \brief Invoke the bound method with packed arguments
- * \param[in] pack Packed arguments
- * \param[in] deleteMethod True to delete \a this bound method instance when
- * method invocation completes
- *
- * The bound method stores its return value, if any, in the arguments \a pack.
- * For direct and blocking invocations, this is performed synchronously, and
- * the return value contained in the pack may be used. For queued invocations,
- * the return value is stored at an undefined point of time and shall thus not
- * be used by the caller.
- *
- * \return True if the return value contained in the \a pack may be used by the
- * caller, false otherwise
- */
-bool BoundMethodBase::activatePack(std::shared_ptr<BoundMethodPackBase> pack,
- bool deleteMethod)
-{
- ConnectionType type = connectionType_;
- if (type == ConnectionTypeAuto) {
- if (Thread::current() == object_->thread())
- type = ConnectionTypeDirect;
- else
- type = ConnectionTypeQueued;
- } else if (type == ConnectionTypeBlocking) {
- if (Thread::current() == object_->thread())
- type = ConnectionTypeDirect;
- }
-
- switch (type) {
- case ConnectionTypeDirect:
- default:
- invokePack(pack.get());
- if (deleteMethod)
- delete this;
- return true;
-
- case ConnectionTypeQueued: {
- std::unique_ptr<Message> msg =
- std::make_unique<InvokeMessage>(this, pack, nullptr, deleteMethod);
- object_->postMessage(std::move(msg));
- return false;
- }
-
- case ConnectionTypeBlocking: {
- Semaphore semaphore;
-
- std::unique_ptr<Message> msg =
- std::make_unique<InvokeMessage>(this, pack, &semaphore, deleteMethod);
- object_->postMessage(std::move(msg));
-
- semaphore.acquire();
- return true;
- }
- }
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/buffer.cpp b/src/libcamera/buffer.cpp
deleted file mode 100644
index 673a63d3..00000000
--- a/src/libcamera/buffer.cpp
+++ /dev/null
@@ -1,214 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * buffer.cpp - Buffer handling
- */
-
-#include <libcamera/buffer.h>
-
-#include <errno.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <unistd.h>
-
-#include "log.h"
-
-/**
- * \file buffer.h
- * \brief Buffer handling
- */
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(Buffer)
-
-/**
- * \struct FrameMetadata
- * \brief Metadata related to a captured frame
- *
- * The FrameMetadata structure stores all metadata related to a captured frame,
- * as stored in a FrameBuffer, such as capture status, timestamp and bytesused.
- */
-
-/**
- * \enum FrameMetadata::Status
- * \brief Define the frame completion status
- * \var FrameMetadata::FrameSuccess
- * The frame has been captured with success and contains valid data. All fields
- * of the FrameMetadata structure are valid.
- * \var FrameMetadata::FrameError
- * An error occurred during capture of the frame. The frame data may be partly
- * or fully invalid. The sequence and timestamp fields of the FrameMetadata
- * structure is valid, the other fields may be invalid.
- * \var FrameMetadata::FrameCancelled
- * Capture stopped before the frame completed. The frame data is not valid. All
- * fields of the FrameMetadata structure but the status field are invalid.
- */
-
-/**
- * \struct FrameMetadata::Plane
- * \brief Per-plane frame metadata
- *
- * Frames are stored in memory in one or multiple planes. The
- * FrameMetadata::Plane structure stores per-plane metadata.
- */
-
-/**
- * \var FrameMetadata::Plane::bytesused
- * \brief Number of bytes occupied by the data in the plane, including line
- * padding
- *
- * This value may vary per frame for compressed formats. For uncompressed
- * formats it will be constant for all frames, but may be smaller than the
- * FrameBuffer size.
- */
-
-/**
- * \var FrameMetadata::status
- * \brief Status of the frame
- *
- * The validity of other fields of the FrameMetadata structure depends on the
- * status value.
- */
-
-/**
- * \var FrameMetadata::sequence
- * \brief Frame sequence number
- *
- * The sequence number is a monotonically increasing number assigned to the
- * frames captured by the stream. The value is increased by one for each frame.
- * Gaps in the sequence numbers indicate dropped frames.
- */
-
-/**
- * \var FrameMetadata::timestamp
- * \brief Time when the frame was captured
- *
- * The timestamp is expressed as a number of nanoseconds relative to the system
- * clock since an unspecified time point.
- *
- * \todo Be more precise on what timestamps refer to.
- */
-
-/**
- * \var FrameMetadata::planes
- * \brief Array of per-plane metadata
- */
-
-/**
- * \class FrameBuffer
- * \brief Frame buffer data and its associated dynamic metadata
- *
- * The FrameBuffer class is the primary interface for applications, IPAs and
- * pipeline handlers to interact with frame memory. It contains all the static
- * and dynamic information to manage the whole life cycle of a frame capture,
- * from buffer creation to consumption.
- *
- * The static information describes the memory planes that make a frame. The
- * planes are specified when creating the FrameBuffer and are expressed as a set
- * of dmabuf file descriptors and length.
- *
- * The dynamic information is grouped in a FrameMetadata instance. It is updated
- * during the processing of a queued capture request, and is valid from the
- * completion of the buffer as signaled by Camera::bufferComplete() until the
- * FrameBuffer is either reused in a new request or deleted.
- *
- * The creator of a FrameBuffer (application, IPA or pipeline handler) may
- * associate to it an integer cookie for any private purpose. The cookie may be
- * set when creating the FrameBuffer, and updated at any time with setCookie().
- * The cookie is transparent to the libcamera core and shall only be set by the
- * creator of the FrameBuffer. This mechanism supplements the Request cookie.
- */
-
-/**
- * \struct FrameBuffer::Plane
- * \brief A memory region to store a single plane of a frame
- *
- * Planar pixel formats use multiple memory regions to store the different
- * colour components of a frame. The Plane structure describes such a memory
- * region by a dmabuf file descriptor and a length. A FrameBuffer then
- * contains one or multiple planes, depending on the pixel format of the
- * frames it is meant to store.
- *
- * To support DMA access, planes are associated with dmabuf objects represented
- * by FileDescriptor handles. The Plane class doesn't handle mapping of the
- * memory to the CPU, but applications and IPAs may use the dmabuf file
- * descriptors to map the plane memory with mmap() and access its contents.
- *
- * \todo Once we have a Kernel API which can express offsets within a plane
- * this structure shall be extended to contain this information. See commit
- * 83148ce8be55e for initial documentation of this feature.
- */
-
-/**
- * \var FrameBuffer::Plane::fd
- * \brief The dmabuf file descriptor
- */
-
-/**
- * \var FrameBuffer::Plane::length
- * \brief The plane length in bytes
- */
-
-/**
- * \brief Construct a FrameBuffer with an array of planes
- * \param[in] planes The frame memory planes
- * \param[in] cookie Cookie
- */
-FrameBuffer::FrameBuffer(const std::vector<Plane> &planes, unsigned int cookie)
- : planes_(planes), request_(nullptr), cookie_(cookie)
-{
-}
-
-/**
- * \fn FrameBuffer::planes()
- * \brief Retrieve the static plane descriptors
- * \return Array of plane descriptors
- */
-
-/**
- * \fn FrameBuffer::request()
- * \brief Retrieve the request this buffer belongs to
- *
- * The intended callers of this method are buffer completion handlers that
- * need to associate a buffer to the request it belongs to.
- *
- * A Buffer is associated to a request by Request::addBuffer() and the
- * association is valid until the buffer completes. The returned request
- * pointer is valid only during that interval.
- *
- * \return The Request the Buffer belongs to, or nullptr if the buffer is
- * not associated with a request
- */
-
-/**
- * \fn FrameBuffer::metadata()
- * \brief Retrieve the dynamic metadata
- * \return Dynamic metadata for the frame contained in the buffer
- */
-
-/**
- * \fn FrameBuffer::cookie()
- * \brief Retrieve the cookie
- *
- * The cookie belongs to the creator of the FrameBuffer, which controls its
- * lifetime and value.
- *
- * \sa setCookie()
- *
- * \return The cookie
- */
-
-/**
- * \fn FrameBuffer::setCookie()
- * \brief Set the cookie
- * \param[in] cookie Cookie to set
- *
- * The cookie belongs to the creator of the FrameBuffer. Its value may be
- * modified at any time with this method. Applications and IPAs shall not modify
- * the cookie value of buffers they haven't created themselves. The libcamera
- * core never modifies the buffer cookie.
- */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/byte_stream_buffer.cpp b/src/libcamera/byte_stream_buffer.cpp
index 20d6a655..fba9a6f3 100644
--- a/src/libcamera/byte_stream_buffer.cpp
+++ b/src/libcamera/byte_stream_buffer.cpp
@@ -2,25 +2,25 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * byte_stream_buffer.cpp - Byte stream buffer
+ * Byte stream buffer
*/
-#include "byte_stream_buffer.h"
+#include "libcamera/internal/byte_stream_buffer.h"
#include <stdint.h>
#include <string.h>
-#include "log.h"
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(Serialization);
+#include <libcamera/base/log.h>
/**
* \file byte_stream_buffer.h
* \brief Managed memory container for serialized data
*/
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Serialization)
+
/**
* \class ByteStreamBuffer
* \brief Wrap a memory buffer and provide sequential data read and write
@@ -40,7 +40,8 @@ LOG_DEFINE_CATEGORY(Serialization);
* respectively. Access is strictly sequential, the buffer keeps track of the
* current access location and advances it automatically. Reading or writing
* the same location multiple times is thus not possible. Bytes may also be
- * skipped with the skip() method.
+ * skipped with the skip() function.
+ *
*
* The ByteStreamBuffer also supports carving out pieces of memory into other
* ByteStreamBuffer instances. Like a read or write operation, a carveOut()
@@ -52,7 +53,7 @@ LOG_DEFINE_CATEGORY(Serialization);
* the buffer being marked as having overflown. If the buffer has been carved
* out from a parent buffer, the parent buffer is also marked as having
* overflown. Any later access on an overflown buffer is blocked. The buffer
- * overflow status can be checked with the overflow() method.
+ * overflow status can be checked with the overflow() function.
*/
/**
@@ -155,7 +156,7 @@ void ByteStreamBuffer::setOverflow()
* \brief Carve out an area of \a size bytes into a new ByteStreamBuffer
* \param[in] size The size of the newly created memory buffer
*
- * This method carves out an area of \a size bytes from the buffer into a new
+ * This function carves out an area of \a size bytes from the buffer into a new
* ByteStreamBuffer, and returns the new buffer. It operates identically to a
* read or write access from the point of view of the current buffer, but allows
* the new buffer to be read or written at a later time after other read or
@@ -194,7 +195,7 @@ ByteStreamBuffer ByteStreamBuffer::carveOut(size_t size)
* \brief Skip \a size bytes from the buffer
* \param[in] size The number of bytes to skip
*
- * This method skips the next \a size bytes from the buffer.
+ * This function skips the next \a size bytes from the buffer.
*
* \return 0 on success, a negative error code otherwise
* \retval -ENOSPC no more space is available in the managed memory buffer
diff --git a/src/libcamera/camera.cpp b/src/libcamera/camera.cpp
index 8c3bb2c2..69a7ee53 100644
--- a/src/libcamera/camera.cpp
+++ b/src/libcamera/camera.cpp
@@ -2,42 +2,128 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * camera.cpp - Camera device
+ * Camera device
*/
#include <libcamera/camera.h>
+#include <array>
#include <atomic>
-#include <iomanip>
+#include <ios>
+#include <memory>
+#include <optional>
+#include <set>
+#include <sstream>
+#include <libcamera/base/log.h>
+#include <libcamera/base/thread.h>
+
+#include <libcamera/color_space.h>
#include <libcamera/framebuffer_allocator.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
-#include "log.h"
-#include "pipeline_handler.h"
-#include "utils.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_controls.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/request.h"
/**
- * \file camera.h
+ * \file libcamera/camera.h
* \brief Camera device handling
*
- * At the core of libcamera is the camera device, combining one image source
- * with processing hardware able to provide one or multiple image streams. The
- * Camera class represents a camera device.
- *
- * A camera device contains a single image source, and separate camera device
- * instances relate to different image sources. For instance, a phone containing
- * front and back image sensors will be modelled with two camera devices, one
- * for each sensor. When multiple streams can be produced from the same image
- * source, all those streams are guaranteed to be part of the same camera
- * device.
- *
- * While not sharing image sources, separate camera devices can share other
- * system resources, such as an ISP. For this reason camera device instances may
- * not be fully independent, in which case usage restrictions may apply. For
- * instance, a phone with a front and a back camera device may not allow usage
- * of the two devices simultaneously.
+ * \page camera-model Camera Model
+ *
+ * libcamera acts as a middleware between applications and camera hardware. It
+ * provides a solution to an unsolvable problem: reconciling applications,
+ * which need to run on different systems without dealing with device-specific
+ * details, and camera hardware, which exhibits a wide variety of features,
+ * limitations and architecture variations. In order to do so, it creates an
+ * abstract camera model that hides the camera hardware from applications. The
+ * model is designed to strike the right balance between genericity, to please
+ * generic applications, and flexibility, to expose even the most specific
+ * hardware features to the most demanding applications.
+ *
+ * In libcamera, a Camera is defined as a device that can capture frames
+ * continuously from a camera sensor and store them in memory. If supported by
+ * the device and desired by the application, the camera may store each
+ * captured frame in multiple copies, possibly in different formats and sizes.
+ * Each of these memory outputs of the camera is called a Stream.
+ *
+ * A camera contains a single image source, and separate camera instances
+ * relate to different image sources. For instance, a phone containing front
+ * and back image sensors will be modelled with two cameras, one for each
+ * sensor. When multiple streams can be produced from the same image source,
+ * all those streams are guaranteed to be part of the same camera.
+ *
+ * While not sharing image sources, separate cameras can share other system
+ * resources, such as ISPs. For this reason camera instances may not be fully
+ * independent, in which case usage restrictions may apply. For instance, a
+ * phone with a front and a back camera may not allow usage of the two cameras
+ * simultaneously.
+ *
+ * The camera model defines an implicit pipeline, whose input is the camera
+ * sensor, and whose outputs are the streams. Along the pipeline, the frames
+ * produced by the camera sensor are transformed by the camera into a format
+ * suitable for applications, with image processing that improves the quality
+ * of the captured frames. The camera exposes a set of controls that
+ * applications may use to manually control the processing steps. This
+ * high-level camera model is the minimum baseline that all cameras must
+ * conform to.
+ *
+ * \section camera-pipeline-model Pipeline Model
+ *
+ * Camera hardware differs in the supported image processing operations and the
+ * order in which they are applied. The libcamera pipelines abstract the
+ * hardware differences and expose a logical view of the processing operations
+ * with a fixed order. This offers low-level control of those operations to
+ * applications, while keeping application code generic.
+ *
+ * Starting from the camera sensor, a pipeline applies the following
+ * operations, in that order.
+ *
+ * - Pixel exposure
+ * - Analog to digital conversion and readout
+ * - Black level subtraction
+ * - Defective pixel correction
+ * - Lens shading correction
+ * - Spatial noise filtering
+ * - Per-channel gains (white balance)
+ * - Demosaicing (color filter array interpolation)
+ * - Color correction matrix (typically RGB to RGB)
+ * - Gamma correction
+ * - Color space transformation (typically RGB to YUV)
+ * - Cropping
+ * - Scaling
+ *
+ * Not all cameras implement all operations, and they are not necessarily
+ * implemented in the above order at the hardware level. The libcamera pipeline
+ * handlers translate the pipeline model to the real hardware configuration.
+ *
+ * \subsection camera-sensor-model Camera Sensor Model
+ *
+ * By default, libcamera configures the camera sensor automatically based on the
+ * configuration of the streams. Applications may instead specify a manual
+ * configuration for the camera sensor. This allows precise control of the frame
+ * geometry and frame rate delivered by the sensor.
+ *
+ * More details about the camera sensor model implemented by libcamera are
+ * available in the libcamera camera-sensor-model documentation page.
+ *
+ * \subsection digital-zoom Digital Zoom
+ *
+ * Digital zoom is implemented as a combination of the cropping and scaling
+ * stages of the pipeline. Cropping is controlled explicitly through the
+ * controls::ScalerCrop control, while scaling is controlled implicitly based
+ * on the crop rectangle and the output stream size. The crop rectangle is
+ * expressed relatively to the full pixel array size and indicates how the field
+ * of view is affected by the pipeline.
+ */
+
+/**
+ * \internal
+ * \file libcamera/internal/camera.h
+ * \brief Internal camera device handling
*/
namespace libcamera {
@@ -45,15 +131,136 @@ namespace libcamera {
LOG_DECLARE_CATEGORY(Camera)
/**
+ * \class SensorConfiguration
+ * \brief Camera sensor configuration
+ *
+ * The SensorConfiguration class collects parameters to control the operations
+ * of the camera sensor, according to the abstract camera sensor model
+ * implemented by libcamera.
+ *
+ * \todo Applications shall fully populate all fields of the
+ * CameraConfiguration::sensorConfig class members before validating the
+ * CameraConfiguration. If the SensorConfiguration is not fully populated, or if
+ * any of its parameters cannot be applied to the sensor in use, the
+ * CameraConfiguration validation process will fail and return
+ * CameraConfiguration::Status::Invalid.
+ *
+ * Applications that populate the SensorConfiguration class members are
+ * expected to be highly-specialized applications that know what sensor
+ * they are operating with and what parameters are valid for the sensor in use.
+ *
+ * A detailed description of the abstract camera sensor model implemented by
+ * libcamera and the description of its configuration parameters is available
+ * in the libcamera documentation camera-sensor-model file.
+ */
+
+/**
+ * \var SensorConfiguration::bitDepth
+ * \brief The sensor image format bit depth
+ *
+ * The number of bits (resolution) used to represent a pixel sample.
+ */
+
+/**
+ * \var SensorConfiguration::analogCrop
+ * \brief The analog crop rectangle
+ *
+ * The selected portion of the active pixel array used to produce the image
+ * frame.
+ */
+
+/**
+ * \var SensorConfiguration::binning
+ * \brief Sensor binning configuration
+ *
+ * Refer to the camera-sensor-model documentation for an accurate description
+ * of the binning operations. Disabled by default.
+ */
+
+/**
+ * \var SensorConfiguration::binX
+ * \brief Horizontal binning factor
+ *
+ * The horizontal binning factor. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::binY
+ * \brief Vertical binning factor
+ *
+ * The vertical binning factor. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::skipping
+ * \brief The sensor skipping configuration
+ *
+ * Refer to the camera-sensor-model documentation for an accurate description
+ * of the skipping operations.
+ *
+ * If no skipping is performed, all the structure fields should be
+ * set to 1. Disabled by default.
+ */
+
+/**
+ * \var SensorConfiguration::xOddInc
+ * \brief Horizontal increment for odd rows. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::xEvenInc
+ * \brief Horizontal increment for even rows. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::yOddInc
+ * \brief Vertical increment for odd columns. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::yEvenInc
+ * \brief Vertical increment for even columns. Default to 1.
+ */
+
+/**
+ * \var SensorConfiguration::outputSize
+ * \brief The frame output (visible) size
+ *
+ * The size of the data frame as received by the host processor.
+ */
+
+/**
+ * \brief Check if the sensor configuration is valid
+ *
+ * A sensor configuration is valid if it's fully populated.
+ *
+ * \todo For now allow applications to populate the bitDepth and the outputSize
+ * only as skipping and binnings factors are initialized to 1 and the analog
+ * crop is ignored.
+ *
+ * \return True if the sensor configuration is valid, false otherwise
+ */
+bool SensorConfiguration::isValid() const
+{
+ if (bitDepth && binning.binX && binning.binY &&
+ skipping.xOddInc && skipping.yOddInc &&
+ skipping.xEvenInc && skipping.yEvenInc &&
+ !outputSize.isNull())
+ return true;
+
+ return false;
+}
+
+/**
* \class CameraConfiguration
* \brief Hold configuration for streams of the camera
* The CameraConfiguration holds an ordered list of stream configurations. It
* supports iterators and operates as a vector of StreamConfiguration instances.
* The stream configurations are inserted by addConfiguration(), and the
- * operator[](int) returns a reference to the StreamConfiguration based on its
- * insertion index. Accessing a stream configuration with an invalid index
- * results in undefined behaviour.
+ * at() function or operator[] return a reference to the StreamConfiguration
+ * based on its insertion index. Accessing a stream configuration with an
+ * invalid index results in undefined behaviour.
*
* CameraConfiguration instances are retrieved from the camera with
* Camera::generateConfiguration(). Applications may then inspect the
@@ -93,7 +300,7 @@ LOG_DECLARE_CATEGORY(Camera)
* \brief Create an empty camera configuration
*/
CameraConfiguration::CameraConfiguration()
- : config_({})
+ : orientation(Orientation::Rotate0), config_({})
{
}
@@ -114,22 +321,22 @@ void CameraConfiguration::addConfiguration(const StreamConfiguration &cfg)
* \fn CameraConfiguration::validate()
* \brief Validate and possibly adjust the camera configuration
*
- * This method adjusts the camera configuration to the closest valid
+ * This function adjusts the camera configuration to the closest valid
* configuration and returns the validation status.
*
- * \todo: Define exactly when to return each status code. Should stream
+ * \todo Define exactly when to return each status code. Should stream
* parameters set to 0 by the caller be adjusted without returning Adjusted ?
* This would potentially be useful for applications but would get in the way
* in Camera::configure(). Do we need an extra status code to signal this ?
*
- * \todo: Handle validation of buffers count when refactoring the buffers API.
+ * \todo Handle validation of buffers count when refactoring the buffers API.
*
* \return A CameraConfiguration::Status value that describes the validation
* status.
* \retval CameraConfiguration::Invalid The configuration is invalid and can't
* be adjusted. This may only occur in extreme cases such as when the
* configuration is empty.
- * \retval CameraConfigutation::Adjusted The configuration has been adjusted
+ * \retval CameraConfiguration::Adjusted The configuration has been adjusted
* and is now valid. Parameters may have changed for any stream, and stream
* configurations may have been removed. The caller shall check the
* configuration carefully.
@@ -143,7 +350,7 @@ void CameraConfiguration::addConfiguration(const StreamConfiguration &cfg)
*
* The \a index represents the zero based insertion order of stream
* configuration into the camera configuration with addConfiguration(). Calling
- * this method with an invalid index results in undefined behaviour.
+ * this function with an invalid index results in undefined behaviour.
*
* \return The stream configuration
*/
@@ -158,7 +365,7 @@ StreamConfiguration &CameraConfiguration::at(unsigned int index)
*
* The \a index represents the zero based insertion order of stream
* configuration into the camera configuration with addConfiguration(). Calling
- * this method with an invalid index results in undefined behaviour.
+ * this function with an invalid index results in undefined behaviour.
*
* \return The stream configuration
*/
@@ -174,7 +381,7 @@ const StreamConfiguration &CameraConfiguration::at(unsigned int index) const
*
* The \a index represents the zero based insertion order of stream
* configuration into the camera configuration with addConfiguration(). Calling
- * this method with an invalid index results in undefined behaviour.
+ * this function with an invalid index results in undefined behaviour.
*
* \return The stream configuration
*/
@@ -186,7 +393,7 @@ const StreamConfiguration &CameraConfiguration::at(unsigned int index) const
*
* The \a index represents the zero based insertion order of stream
* configuration into the camera configuration with addConfiguration(). Calling
- * this method with an invalid index results in undefined behaviour.
+ * this function with an invalid index results in undefined behaviour.
*
* \return The stream configuration
*/
@@ -251,44 +458,135 @@ std::size_t CameraConfiguration::size() const
}
/**
- * \var CameraConfiguration::config_
- * \brief The vector of stream configurations
+ * \enum CameraConfiguration::ColorSpaceFlag
+ * \brief Specify the behaviour of validateColorSpaces
+ * \var CameraConfiguration::ColorSpaceFlag::None
+ * \brief No extra validation of color spaces is required
+ * \var CameraConfiguration::ColorSpaceFlag::StreamsShareColorSpace
+ * \brief Non-raw output streams must share the same color space
+ */
+
+/**
+ * \typedef CameraConfiguration::ColorSpaceFlags
+ * \brief A bitwise combination of ColorSpaceFlag values
*/
-class Camera::Private
+/**
+ * \brief Check the color spaces requested for each stream
+ * \param[in] flags Flags to control the behaviour of this function
+ *
+ * This function performs certain consistency checks on the color spaces of
+ * the streams and may adjust them so that:
+ *
+ * - Any raw streams have the Raw color space
+ * - If the StreamsShareColorSpace flag is set, all output streams are forced
+ * to share the same color space (this may be a constraint on some platforms).
+ *
+ * It is optional for a pipeline handler to use this function.
+ *
+ * \return A CameraConfiguration::Status value that describes the validation
+ * status.
+ * \retval CameraConfigutation::Adjusted The configuration has been adjusted
+ * and is now valid. The color space of some or all of the streams may have
+ * been changed. The caller shall check the color spaces carefully.
+ * \retval CameraConfiguration::Valid The configuration was already valid and
+ * hasn't been adjusted.
+ */
+CameraConfiguration::Status CameraConfiguration::validateColorSpaces(ColorSpaceFlags flags)
{
-public:
- enum State {
- CameraAvailable,
- CameraAcquired,
- CameraConfigured,
- CameraRunning,
- };
+ Status status = Valid;
+
+ /*
+ * Set all raw streams to the Raw color space, and make a note of the
+ * largest non-raw stream with a defined color space (if there is one).
+ */
+ std::optional<ColorSpace> colorSpace;
+ Size size;
- Private(PipelineHandler *pipe, const std::string &name,
- const std::set<Stream *> &streams);
- ~Private();
+ for (StreamConfiguration &cfg : config_) {
+ if (!cfg.colorSpace)
+ continue;
- int isAccessAllowed(State state, bool allowDisconnected = false) const;
- int isAccessAllowed(State low, State high,
- bool allowDisconnected = false) const;
+ if (cfg.colorSpace->adjust(cfg.pixelFormat))
+ status = Adjusted;
- void disconnect();
- void setState(State state);
+ if (cfg.colorSpace != ColorSpace::Raw && cfg.size > size) {
+ colorSpace = cfg.colorSpace;
+ size = cfg.size;
+ }
+ }
- std::shared_ptr<PipelineHandler> pipe_;
- std::string name_;
- std::set<Stream *> streams_;
- std::set<Stream *> activeStreams_;
+ if (!colorSpace || !(flags & ColorSpaceFlag::StreamsShareColorSpace))
+ return status;
-private:
- bool disconnected_;
- std::atomic<State> state_;
-};
+ /* Make all output color spaces the same, if requested. */
+ for (auto &cfg : config_) {
+ if (cfg.colorSpace != ColorSpace::Raw &&
+ cfg.colorSpace != colorSpace) {
+ cfg.colorSpace = colorSpace;
+ status = Adjusted;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * \var CameraConfiguration::sensorConfig
+ * \brief The camera sensor configuration
+ *
+ * The sensorConfig member allows manual control of the configuration of the
+ * camera sensor. By default, if sensorConfig is not set, the camera will
+ * configure the sensor automatically based on the configuration of the streams.
+ * Applications can override this by manually specifying the full sensor
+ * configuration.
+ *
+ * Refer to the camera-sensor-model documentation and to the SensorConfiguration
+ * class documentation for details about the sensor configuration process.
+ *
+ * The camera sensor configuration applies to all streams produced by a camera
+ * from the same image source.
+ */
+
+/**
+ * \var CameraConfiguration::orientation
+ * \brief The desired orientation of the images produced by the camera
+ *
+ * The orientation field is a user-specified 2D plane transformation that
+ * specifies how the application wants the camera images to be rotated in
+ * the memory buffers.
+ *
+ * If the orientation requested by the application cannot be obtained, the
+ * camera will not rotate or flip the images, and the validate() function will
+ * Adjust this value to the native image orientation produced by the camera.
+ *
+ * By default the orientation field is set to Orientation::Rotate0.
+ */
+
+/**
+ * \var CameraConfiguration::config_
+ * \brief The vector of stream configurations
+ */
+
+#ifndef __DOXYGEN_PUBLIC__
+/**
+ * \class Camera::Private
+ * \brief Base class for camera private data
+ *
+ * The Camera::Private class stores all private data associated with a camera.
+ * In addition to hiding core Camera data from the public API, it is expected to
+ * be subclassed by pipeline handlers to store pipeline-specific data.
+ *
+ * Pipeline handlers can obtain the Camera::Private instance associated with a
+ * camera by calling Camera::_d().
+ */
-Camera::Private::Private(PipelineHandler *pipe, const std::string &name,
- const std::set<Stream *> &streams)
- : pipe_(pipe->shared_from_this()), name_(name), streams_(streams),
+/**
+ * \brief Construct a Camera::Private instance
+ * \param[in] pipe The pipeline handler responsible for the camera device
+ */
+Camera::Private::Private(PipelineHandler *pipe)
+ : requestSequence_(0), pipe_(pipe->shared_from_this()),
disconnected_(false), state_(CameraAvailable)
{
}
@@ -299,14 +597,86 @@ Camera::Private::~Private()
LOG(Camera, Error) << "Removing camera while still in use";
}
+/**
+ * \fn Camera::Private::pipe()
+ * \brief Retrieve the pipeline handler related to this camera
+ * \return The pipeline handler that created this camera
+ */
+
+/**
+ * \fn Camera::Private::pipe() const
+ * \copydoc Camera::Private::pipe()
+ */
+
+/**
+ * \fn Camera::Private::validator()
+ * \brief Retrieve the control validator related to this camera
+ * \return The control validator associated with this camera
+ */
+
+/**
+ * \var Camera::Private::queuedRequests_
+ * \brief The list of queued and not yet completed requests
+ *
+ * This list tracks requests queued in order to ensure completion of all
+ * requests when the pipeline handler is stopped.
+ *
+ * \sa PipelineHandler::queueRequest(), PipelineHandler::stop(),
+ * PipelineHandler::completeRequest()
+ */
+
+/**
+ * \var Camera::Private::controlInfo_
+ * \brief The set of controls supported by the camera
+ *
+ * The control information shall be initialised by the pipeline handler when
+ * creating the camera.
+ *
+ * \todo This member was initially meant to stay constant after the camera is
+ * created. Several pipeline handlers are already updating it when the camera
+ * is configured. Update the documentation accordingly, and possibly the API as
+ * well, when implementing official support for control info updates.
+ */
+
+/**
+ * \var Camera::Private::properties_
+ * \brief The list of properties supported by the camera
+ *
+ * The list of camera properties shall be initialised by the pipeline handler
+ * when creating the camera, and shall not be modified afterwards.
+ */
+
+/**
+ * \var Camera::Private::requestSequence_
+ * \brief The queuing sequence number of the request
+ *
+ * When requests are queued, they are given a per-camera sequence number to
+ * facilitate debugging of internal request usage.
+ *
+ * The requestSequence_ tracks the number of requests queued to a camera
+ * over a single capture session.
+ */
+
static const char *const camera_state_names[] = {
"Available",
"Acquired",
"Configured",
+ "Stopping",
"Running",
};
-int Camera::Private::isAccessAllowed(State state, bool allowDisconnected) const
+bool Camera::Private::isAcquired() const
+{
+ return state_.load(std::memory_order_acquire) != CameraAvailable;
+}
+
+bool Camera::Private::isRunning() const
+{
+ return state_.load(std::memory_order_acquire) == CameraRunning;
+}
+
+int Camera::Private::isAccessAllowed(State state, bool allowDisconnected,
+ const char *from) const
{
if (!allowDisconnected && disconnected_)
return -ENODEV;
@@ -315,17 +685,18 @@ int Camera::Private::isAccessAllowed(State state, bool allowDisconnected) const
if (currentState == state)
return 0;
- ASSERT(static_cast<unsigned int>(state) < ARRAY_SIZE(camera_state_names));
+ ASSERT(static_cast<unsigned int>(state) < std::size(camera_state_names));
- LOG(Camera, Debug) << "Camera in " << camera_state_names[currentState]
- << " state trying operation requiring state "
+ LOG(Camera, Error) << "Camera in " << camera_state_names[currentState]
+ << " state trying " << from << "() requiring state "
<< camera_state_names[state];
return -EACCES;
}
int Camera::Private::isAccessAllowed(State low, State high,
- bool allowDisconnected) const
+ bool allowDisconnected,
+ const char *from) const
{
if (!allowDisconnected && disconnected_)
return -ENODEV;
@@ -334,11 +705,12 @@ int Camera::Private::isAccessAllowed(State low, State high,
if (currentState >= low && currentState <= high)
return 0;
- ASSERT(static_cast<unsigned int>(low) < ARRAY_SIZE(camera_state_names) &&
- static_cast<unsigned int>(high) < ARRAY_SIZE(camera_state_names));
+ ASSERT(static_cast<unsigned int>(low) < std::size(camera_state_names) &&
+ static_cast<unsigned int>(high) < std::size(camera_state_names));
- LOG(Camera, Debug) << "Camera in " << camera_state_names[currentState]
- << " state trying operation requiring state between "
+ LOG(Camera, Error) << "Camera in " << camera_state_names[currentState]
+ << " state trying " << from
+ << "() requiring state between "
<< camera_state_names[low] << " and "
<< camera_state_names[high];
@@ -362,6 +734,7 @@ void Camera::Private::setState(State state)
{
state_.store(state, std::memory_order_release);
}
+#endif /* __DOXYGEN_PUBLIC__ */
/**
* \class Camera
@@ -409,6 +782,7 @@ void Camera::Private::setState(State state)
* node [shape = doublecircle ]; Available;
* node [shape = circle ]; Acquired;
* node [shape = circle ]; Configured;
+ * node [shape = circle ]; Stopping;
* node [shape = circle ]; Running;
*
* Available -> Available [label = "release()"];
@@ -421,7 +795,8 @@ void Camera::Private::setState(State state)
* Configured -> Configured [label = "configure(), createRequest()"];
* Configured -> Running [label = "start()"];
*
- * Running -> Configured [label = "stop()"];
+ * Running -> Stopping [label = "stop()"];
+ * Stopping -> Configured;
* Running -> Running [label = "createRequest(), queueRequest()"];
* }
* \enddot
@@ -441,6 +816,12 @@ void Camera::Private::setState(State state)
* release() the camera and to get back to the Available state or start()
* it to progress to the Running state.
*
+ * \subsubsection Stopping
+ * The camera has been asked to stop. Pending requests are being completed or
+ * cancelled, and no new requests are permitted to be queued. The camera will
+ * transition to the Configured state when all queued requests have been
+ * returned to the application.
+ *
* \subsubsection Running
* The camera is running and ready to process requests queued by the
* application. The camera remains in this state until it is stopped and moved
@@ -448,39 +829,66 @@ void Camera::Private::setState(State state)
*/
/**
+ * \internal
* \brief Create a camera instance
- * \param[in] name The name of the camera device
- * \param[in] pipe The pipeline handler responsible for the camera device
+ * \param[in] d Camera private data
+ * \param[in] id The ID of the camera device
* \param[in] streams Array of streams the camera provides
*
- * The caller is responsible for guaranteeing unicity of the camera name.
+ * The caller is responsible for guaranteeing a stable and unique camera ID
+ * matching the constraints described by Camera::id(). Parameters that are
+ * allocated dynamically at system startup, such as bus numbers that may be
+ * enumerated differently, are therefore not suitable to use in the ID.
+ *
+ * Pipeline handlers that use a CameraSensor may use the CameraSensor::id() to
+ * generate an ID that satisfies the criteria of a stable and unique camera ID.
*
* \return A shared pointer to the newly created camera object
*/
-std::shared_ptr<Camera> Camera::create(PipelineHandler *pipe,
- const std::string &name,
+std::shared_ptr<Camera> Camera::create(std::unique_ptr<Private> d,
+ const std::string &id,
const std::set<Stream *> &streams)
{
+ ASSERT(d);
+
struct Deleter : std::default_delete<Camera> {
void operator()(Camera *camera)
{
- delete camera;
+ if (Thread::current() == camera->thread())
+ delete camera;
+ else
+ camera->deleteLater();
}
};
- Camera *camera = new Camera(pipe, name, streams);
+ Camera *camera = new Camera(std::move(d), id, streams);
return std::shared_ptr<Camera>(camera, Deleter());
}
/**
- * \brief Retrieve the name of the camera
+ * \brief Retrieve the ID of the camera
+ *
+ * The camera ID is a free-form string that identifies a camera in the system.
+ * IDs are guaranteed to be unique and stable: the same camera, when connected
+ * to the system in the same way (e.g. in the same USB port), will have the same
+ * ID across both unplug/replug and system reboots.
+ *
+ * Applications may store the camera ID and use it later to acquire the same
+ * camera. They shall treat the ID as an opaque identifier, without interpreting
+ * its value.
+ *
+ * Camera IDs may change when the system hardware or firmware is modified, for
+ * instance when replacing a PCI USB controller or moving it to another PCI
+ * slot, or updating the ACPI tables or Device Tree.
+ *
* \context This function is \threadsafe.
- * \return Name of the camera device
+ *
+ * \return ID of the camera device
*/
-const std::string &Camera::name() const
+const std::string &Camera::id() const
{
- return p_->name_;
+ return _d()->id_;
}
/**
@@ -506,10 +914,13 @@ const std::string &Camera::name() const
* application API calls by returning errors immediately.
*/
-Camera::Camera(PipelineHandler *pipe, const std::string &name,
+Camera::Camera(std::unique_ptr<Private> d, const std::string &id,
const std::set<Stream *> &streams)
- : p_(new Private(pipe, name, streams))
+ : Extensible(std::move(d))
{
+ _d()->id_ = id;
+ _d()->streams_ = streams;
+ _d()->validator_ = std::make_unique<CameraControlValidator>(this);
}
Camera::~Camera()
@@ -519,7 +930,7 @@ Camera::~Camera()
/**
* \brief Notify camera disconnection
*
- * This method is used to notify the camera instance that the underlying
+ * This function is used to notify the camera instance that the underlying
* hardware has been unplugged. In response to the disconnection the camera
* instance notifies the application by emitting the #disconnected signal, and
* ensures that all new calls to the application-facing Camera API return an
@@ -530,28 +941,30 @@ Camera::~Camera()
*/
void Camera::disconnect()
{
- LOG(Camera, Debug) << "Disconnecting camera " << name();
+ LOG(Camera, Debug) << "Disconnecting camera " << id();
- p_->disconnect();
- disconnected.emit(this);
+ _d()->disconnect();
+ disconnected.emit();
}
int Camera::exportFrameBuffers(Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
- int ret = p_->isAccessAllowed(Private::CameraConfigured);
+ Private *const d = _d();
+
+ int ret = d->isAccessAllowed(Private::CameraConfigured);
if (ret < 0)
return ret;
if (streams().find(stream) == streams().end())
return -EINVAL;
- if (p_->activeStreams_.find(stream) == p_->activeStreams_.end())
+ if (d->activeStreams_.find(stream) == d->activeStreams_.end())
return -EINVAL;
- return p_->pipe_->invokeMethod(&PipelineHandler::exportFrameBuffers,
- ConnectionTypeBlocking, this, stream,
- buffers);
+ return d->pipe_->invokeMethod(&PipelineHandler::exportFrameBuffers,
+ ConnectionTypeBlocking, this, stream,
+ buffers);
}
/**
@@ -562,7 +975,7 @@ int Camera::exportFrameBuffers(Stream *stream,
* not blocking, if the device has already been acquired (by the same or another
* process) the -EBUSY error code is returned.
*
- * Acquiring a camera will limit usage of any other camera(s) provided by the
+ * Acquiring a camera may limit usage of any other camera(s) provided by the
* same pipeline handler to the same instance of libcamera. The limit is in
* effect until all cameras from the pipeline handler are released. Other
* instances of libcamera can still list and examine the cameras but will fail
@@ -580,21 +993,24 @@ int Camera::exportFrameBuffers(Stream *stream,
*/
int Camera::acquire()
{
+ Private *const d = _d();
+
/*
* No manual locking is required as PipelineHandler::lock() is
* thread-safe.
*/
- int ret = p_->isAccessAllowed(Private::CameraAvailable);
+ int ret = d->isAccessAllowed(Private::CameraAvailable);
if (ret < 0)
return ret == -EACCES ? -EBUSY : ret;
- if (!p_->pipe_->lock()) {
+ if (!d->pipe_->invokeMethod(&PipelineHandler::acquire,
+ ConnectionTypeBlocking, this)) {
LOG(Camera, Info)
<< "Pipeline handler in use by another process";
return -EBUSY;
}
- p_->setState(Private::CameraAcquired);
+ d->setState(Private::CameraAcquired);
return 0;
}
@@ -615,14 +1031,18 @@ int Camera::acquire()
*/
int Camera::release()
{
- int ret = p_->isAccessAllowed(Private::CameraAvailable,
- Private::CameraConfigured, true);
+ Private *const d = _d();
+
+ int ret = d->isAccessAllowed(Private::CameraAvailable,
+ Private::CameraConfigured, true);
if (ret < 0)
return ret == -EACCES ? -EBUSY : ret;
- p_->pipe_->unlock();
+ if (d->isAcquired())
+ d->pipe_->invokeMethod(&PipelineHandler::release,
+ ConnectionTypeBlocking, this);
- p_->setState(Private::CameraAvailable);
+ d->setState(Private::CameraAvailable);
return 0;
}
@@ -637,9 +1057,9 @@ int Camera::release()
*
* \return A ControlInfoMap listing the controls supported by the camera
*/
-const ControlInfoMap &Camera::controls()
+const ControlInfoMap &Camera::controls() const
{
- return p_->pipe_->controls(this);
+ return _d()->controlInfo_;
}
/**
@@ -650,9 +1070,9 @@ const ControlInfoMap &Camera::controls()
*
* \return A ControlList of properties supported by the camera
*/
-const ControlList &Camera::properties()
+const ControlList &Camera::properties() const
{
- return p_->pipe_->properties(this);
+ return _d()->properties_;
}
/**
@@ -664,11 +1084,11 @@ const ControlList &Camera::properties()
*
* \context This function is \threadsafe.
*
- * \return An array of all the camera's streams.
+ * \return An array of all the camera's streams
*/
const std::set<Stream *> &Camera::streams() const
{
- return p_->streams_;
+ return _d()->streams_;
}
/**
@@ -684,20 +1104,22 @@ const std::set<Stream *> &Camera::streams() const
* \context This function is \threadsafe.
*
* \return A CameraConfiguration if the requested roles can be satisfied, or a
- * null pointer otherwise. The ownership of the returned configuration is
- * passed to the caller.
+ * null pointer otherwise.
*/
-std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(Span<const StreamRole> roles)
{
- int ret = p_->isAccessAllowed(Private::CameraAvailable,
- Private::CameraRunning);
+ Private *const d = _d();
+
+ int ret = d->isAccessAllowed(Private::CameraAvailable,
+ Private::CameraRunning);
if (ret < 0)
return nullptr;
if (roles.size() > streams().size())
return nullptr;
- CameraConfiguration *config = p_->pipe_->generateConfiguration(this, roles);
+ std::unique_ptr<CameraConfiguration> config =
+ d->pipe_->generateConfiguration(this, roles);
if (!config) {
LOG(Camera, Debug)
<< "Pipeline handler failed to generate configuration";
@@ -714,10 +1136,16 @@ std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(const StreamR
LOG(Camera, Debug) << msg.str();
- return std::unique_ptr<CameraConfiguration>(config);
+ return config;
}
/**
+ * \fn std::unique_ptr<CameraConfiguration> \
+ * Camera::generateConfiguration(std::initializer_list<StreamRole> roles)
+ * \overload
+ */
+
+/**
* \brief Configure the camera prior to capture
* \param[in] config The camera configurations to setup
*
@@ -727,7 +1155,7 @@ std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(const StreamR
* by populating \a config.
*
* The configuration is created by generateConfiguration(), and adjusted by the
- * caller with CameraConfiguration::validate(). This method only accepts fully
+ * caller with CameraConfiguration::validate(). This function only accepts fully
* valid configurations and returns an error if \a config is not valid.
*
* Exclusive access to the camera shall be ensured by a call to acquire() prior
@@ -748,11 +1176,16 @@ std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(const StreamR
*/
int Camera::configure(CameraConfiguration *config)
{
- int ret = p_->isAccessAllowed(Private::CameraAcquired,
- Private::CameraConfigured);
+ Private *const d = _d();
+
+ int ret = d->isAccessAllowed(Private::CameraAcquired,
+ Private::CameraConfigured);
if (ret < 0)
return ret;
+ for (auto &cfg : *config)
+ cfg.setStream(nullptr);
+
if (config->validate() != CameraConfiguration::Valid) {
LOG(Camera, Error)
<< "Can't configure camera with invalid configuration";
@@ -763,29 +1196,31 @@ int Camera::configure(CameraConfiguration *config)
for (unsigned int index = 0; index < config->size(); ++index) {
StreamConfiguration &cfg = config->at(index);
- cfg.setStream(nullptr);
msg << " (" << index << ") " << cfg.toString();
}
LOG(Camera, Info) << msg.str();
- ret = p_->pipe_->invokeMethod(&PipelineHandler::configure,
- ConnectionTypeBlocking, this, config);
+ ret = d->pipe_->invokeMethod(&PipelineHandler::configure,
+ ConnectionTypeBlocking, this, config);
if (ret)
return ret;
- p_->activeStreams_.clear();
+ d->activeStreams_.clear();
for (const StreamConfiguration &cfg : *config) {
Stream *stream = cfg.stream();
- if (!stream)
+ if (!stream) {
LOG(Camera, Fatal)
<< "Pipeline handler failed to update stream configuration";
+ d->activeStreams_.clear();
+ return -EINVAL;
+ }
stream->configuration_ = cfg;
- p_->activeStreams_.insert(stream);
+ d->activeStreams_.insert(stream);
}
- p_->setState(Private::CameraConfigured);
+ d->setState(Private::CameraConfigured);
return 0;
}
@@ -794,37 +1229,45 @@ int Camera::configure(CameraConfiguration *config)
* \brief Create a request object for the camera
* \param[in] cookie Opaque cookie for application use
*
- * This method creates an empty request for the application to fill with
+ * This function creates an empty request for the application to fill with
* buffers and parameters, and queue for capture.
*
* The \a cookie is stored in the request and is accessible through the
- * Request::cookie() method at any time. It is typically used by applications
+ * Request::cookie() function at any time. It is typically used by applications
* to map the request to an external resource in the request completion
* handler, and is completely opaque to libcamera.
*
* The ownership of the returned request is passed to the caller, which is
- * responsible for either queueing the request or deleting it.
+ * responsible for deleting it. The request may be deleted in the completion
+ * handler, or reused after resetting its state with Request::reuse().
*
* \context This function is \threadsafe. It may only be called when the camera
* is in the Configured or Running state as defined in \ref camera_operation.
*
* \return A pointer to the newly created request, or nullptr on error
*/
-Request *Camera::createRequest(uint64_t cookie)
+std::unique_ptr<Request> Camera::createRequest(uint64_t cookie)
{
- int ret = p_->isAccessAllowed(Private::CameraConfigured,
- Private::CameraRunning);
+ Private *const d = _d();
+
+ int ret = d->isAccessAllowed(Private::CameraConfigured,
+ Private::CameraRunning);
if (ret < 0)
return nullptr;
- return new Request(this, cookie);
+ std::unique_ptr<Request> request = std::make_unique<Request>(this, cookie);
+
+ /* Associate the request with the pipeline handler. */
+ d->pipe_->registerRequest(request.get());
+
+ return request;
}
/**
* \brief Queue a request to the camera
* \param[in] request The request to queue to the camera
*
- * This method queues a \a request to the camera for capture.
+ * This function queues a \a request to the camera for capture.
*
* After allocating the request with createRequest(), the application shall
* fill it with at least one capture buffer before queuing it. Requests that
@@ -833,26 +1276,37 @@ Request *Camera::createRequest(uint64_t cookie)
* Once the request has been queued, the camera will notify its completion
* through the \ref requestCompleted signal.
*
- * Ownership of the request is transferred to the camera. It will be deleted
- * automatically after it completes.
- *
* \context This function is \threadsafe. It may only be called when the camera
* is in the Running state as defined in \ref camera_operation.
*
* \return 0 on success or a negative error code otherwise
* \retval -ENODEV The camera has been disconnected from the system
* \retval -EACCES The camera is not running so requests can't be queued
+ * \retval -EXDEV The request does not belong to this camera
* \retval -EINVAL The request is invalid
* \retval -ENOMEM No buffer memory was available to handle the request
*/
int Camera::queueRequest(Request *request)
{
- int ret = p_->isAccessAllowed(Private::CameraRunning);
+ Private *const d = _d();
+
+ int ret = d->isAccessAllowed(Private::CameraRunning);
if (ret < 0)
return ret;
+ /* Requests can only be queued to the camera that created them. */
+ if (request->_d()->camera() != this) {
+ LOG(Camera, Error) << "Request was not created by this camera";
+ return -EXDEV;
+ }
+
+ if (request->status() != Request::RequestPending) {
+ LOG(Camera, Error) << request->toString() << " is not valid";
+ return -EINVAL;
+ }
+
/*
- * The camera state may chance until the end of the function. No locking
+ * The camera state may change until the end of the function. No locking
* is however needed as PipelineHandler::queueRequest() will handle
* this.
*/
@@ -863,24 +1317,28 @@ int Camera::queueRequest(Request *request)
}
for (auto const &it : request->buffers()) {
- Stream *stream = it.first;
+ const Stream *stream = it.first;
- if (p_->activeStreams_.find(stream) == p_->activeStreams_.end()) {
+ if (d->activeStreams_.find(stream) == d->activeStreams_.end()) {
LOG(Camera, Error) << "Invalid request";
return -EINVAL;
}
}
- return p_->pipe_->invokeMethod(&PipelineHandler::queueRequest,
- ConnectionTypeQueued, this, request);
+ d->pipe_->invokeMethod(&PipelineHandler::queueRequest,
+ ConnectionTypeQueued, request);
+
+ return 0;
}
/**
* \brief Start capture from camera
+ * \param[in] controls Controls to be applied before starting the Camera
*
- * Start the camera capture session. Once the camera is started the application
- * can queue requests to the camera to process and return to the application
- * until the capture session is terminated with \a stop().
+ * Start the camera capture session, optionally providing a list of controls to
+ * apply before starting. Once the camera is started the application can queue
+ * requests to the camera to process and return to the application until the
+ * capture session is terminated with \a stop().
*
* \context This function may only be called when the camera is in the
* Configured state as defined in \ref camera_operation, and shall be
@@ -891,20 +1349,24 @@ int Camera::queueRequest(Request *request)
* \retval -ENODEV The camera has been disconnected from the system
* \retval -EACCES The camera is not in a state where it can be started
*/
-int Camera::start()
+int Camera::start(const ControlList *controls)
{
- int ret = p_->isAccessAllowed(Private::CameraConfigured);
+ Private *const d = _d();
+
+ int ret = d->isAccessAllowed(Private::CameraConfigured);
if (ret < 0)
return ret;
LOG(Camera, Debug) << "Starting capture";
- ret = p_->pipe_->invokeMethod(&PipelineHandler::start,
- ConnectionTypeBlocking, this);
+ ASSERT(d->requestSequence_ == 0);
+
+ ret = d->pipe_->invokeMethod(&PipelineHandler::start,
+ ConnectionTypeBlocking, this, controls);
if (ret)
return ret;
- p_->setState(Private::CameraRunning);
+ d->setState(Private::CameraRunning);
return 0;
}
@@ -912,12 +1374,13 @@ int Camera::start()
/**
* \brief Stop capture from camera
*
- * This method stops capturing and processing requests immediately. All pending
- * requests are cancelled and complete synchronously in an error state.
+ * This function stops capturing and processing requests immediately. All
+ * pending requests are cancelled and complete synchronously in an error state.
*
- * \context This function may only be called when the camera is in the Running
- * state as defined in \ref camera_operation, and shall be synchronized by the
- * caller with other functions that affect the camera state.
+ * \context This function may be called in any camera state as defined in \ref
+ * camera_operation, and shall be synchronized by the caller with other
+ * functions that affect the camera state. If called when the camera isn't
+ * running, it is a no-op.
*
* \return 0 on success or a negative error code otherwise
* \retval -ENODEV The camera has been disconnected from the system
@@ -925,16 +1388,29 @@ int Camera::start()
*/
int Camera::stop()
{
- int ret = p_->isAccessAllowed(Private::CameraRunning);
+ Private *const d = _d();
+
+ /*
+ * \todo Make calling stop() when not in 'Running' part of the state
+ * machine rather than take this shortcut
+ */
+ if (!d->isRunning())
+ return 0;
+
+ int ret = d->isAccessAllowed(Private::CameraRunning);
if (ret < 0)
return ret;
LOG(Camera, Debug) << "Stopping capture";
- p_->setState(Private::CameraConfigured);
+ d->setState(Private::CameraStopping);
+
+ d->pipe_->invokeMethod(&PipelineHandler::stop, ConnectionTypeBlocking,
+ this);
- p_->pipe_->invokeMethod(&PipelineHandler::stop, ConnectionTypeBlocking,
- this);
+ ASSERT(!d->pipe_->hasPendingRequests(this));
+
+ d->setState(Private::CameraConfigured);
return 0;
}
@@ -944,13 +1420,16 @@ int Camera::stop()
* \param[in] request The request that has completed
*
* This function is called by the pipeline handler to notify the camera that
- * the request has completed. It emits the requestCompleted signal and deletes
- * the request.
+ * the request has completed. It emits the requestCompleted signal.
*/
void Camera::requestComplete(Request *request)
{
+ /* Disconnected cameras are still able to complete requests. */
+ if (_d()->isAccessAllowed(Private::CameraStopping, Private::CameraRunning,
+ true))
+ LOG(Camera, Fatal) << "Trying to complete a request when stopped";
+
requestCompleted.emit(request);
- delete request;
}
} /* namespace libcamera */
diff --git a/src/libcamera/camera_controls.cpp b/src/libcamera/camera_controls.cpp
index 59dcede2..b672c7cf 100644
--- a/src/libcamera/camera_controls.cpp
+++ b/src/libcamera/camera_controls.cpp
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_controls.cpp - Camera controls
+ * Camera controls
*/
-#include "camera_controls.h"
+#include "libcamera/internal/camera_controls.h"
#include <libcamera/camera.h>
#include <libcamera/controls.h>
@@ -36,7 +36,7 @@ CameraControlValidator::CameraControlValidator(Camera *camera)
const std::string &CameraControlValidator::name() const
{
- return camera_->name();
+ return camera_->id();
}
/**
diff --git a/src/libcamera/camera_lens.cpp b/src/libcamera/camera_lens.cpp
new file mode 100644
index 00000000..ccc2a6a6
--- /dev/null
+++ b/src/libcamera/camera_lens.cpp
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * A camera lens
+ */
+
+#include "libcamera/internal/camera_lens.h"
+
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/v4l2_subdevice.h"
+
+/**
+ * \file camera_lens.h
+ * \brief A camera lens controller
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(CameraLens)
+
+/**
+ * \class CameraLens
+ * \brief A camera lens based on V4L2 subdevices
+ *
+ * The CameraLens class eases handling of lens for pipeline handlers by
+ * hiding the details of the V4L2 subdevice kernel API and caching lens
+ * information.
+ */
+
+/**
+ * \brief Construct a CameraLens
+ * \param[in] entity The media entity backing the camera lens controller
+ *
+ * Once constructed the instance must be initialized with init().
+ */
+CameraLens::CameraLens(const MediaEntity *entity)
+ : entity_(entity)
+{
+}
+
+/**
+ * \brief Destroy a CameraLens
+ */
+CameraLens::~CameraLens() = default;
+
+/**
+ * \brief Initialize the camera lens instance
+ *
+ * This function performs the initialisation steps of the CameraLens that may
+ * fail. It shall be called once and only once after constructing the instance.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int CameraLens::init()
+{
+ if (entity_->function() != MEDIA_ENT_F_LENS) {
+ LOG(CameraLens, Error)
+ << "Invalid lens function "
+ << utils::hex(entity_->function());
+ return -EINVAL;
+ }
+
+ /* Create and open the subdev. */
+ subdev_ = std::make_unique<V4L2Subdevice>(entity_);
+ int ret = subdev_->open();
+ if (ret < 0)
+ return ret;
+
+ ret = validateLensDriver();
+ if (ret)
+ return ret;
+
+ model_ = subdev_->model();
+ return 0;
+}
+
+/**
+ * \brief This function sets the focal point of the lens to a specific position.
+ * \param[in] position The focal point of the lens
+ *
+ * This function sets the value of focal point of the lens as in \a position.
+ *
+ * \return 0 on success or -EINVAL otherwise
+ */
+int CameraLens::setFocusPosition(int32_t position)
+{
+ ControlList lensCtrls(subdev_->controls());
+ lensCtrls.set(V4L2_CID_FOCUS_ABSOLUTE, static_cast<int32_t>(position));
+
+ if (subdev_->setControls(&lensCtrls))
+ return -EINVAL;
+
+ return 0;
+}
+
+int CameraLens::validateLensDriver()
+{
+ int ret = 0;
+ static constexpr uint32_t mandatoryControls[] = {
+ V4L2_CID_FOCUS_ABSOLUTE,
+ };
+
+ const ControlInfoMap &controls = subdev_->controls();
+ for (uint32_t ctrl : mandatoryControls) {
+ if (!controls.count(ctrl)) {
+ LOG(CameraLens, Error)
+ << "Mandatory V4L2 control " << utils::hex(ctrl)
+ << " not available";
+ ret = -EINVAL;
+ }
+ }
+
+ if (ret) {
+ LOG(CameraLens, Error)
+ << "The lens kernel driver needs to be fixed";
+ LOG(CameraLens, Error)
+ << "See Documentation/lens_driver_requirements.rst in"
+ << " the libcamera sources for more information";
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * \fn CameraLens::model()
+ * \brief Retrieve the lens model name
+ *
+ * The lens model name is a free-formed string that uniquely identifies the
+ * lens model.
+ *
+ * \return The lens model name
+ */
+
+std::string CameraLens::logPrefix() const
+{
+ return "'" + entity_->name() + "'";
+}
+
+/**
+ * \fn CameraLens::controls()
+ * \brief Retrieve the V4L2 controls of the lens' subdev
+ *
+ * \return A map of the V4L2 controls supported by the lens' driver
+ */
+const ControlInfoMap &CameraLens::controls() const
+{
+ return subdev_->controls();
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/camera_manager.cpp b/src/libcamera/camera_manager.cpp
index fddf7349..87e6717e 100644
--- a/src/libcamera/camera_manager.cpp
+++ b/src/libcamera/camera_manager.cpp
@@ -2,72 +2,45 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * camera_manager.h - Camera management
+ * Camera management
*/
-#include <libcamera/camera_manager.h>
+#include "libcamera/internal/camera_manager.h"
-#include <condition_variable>
-#include <map>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
-#include <libcamera/event_dispatcher.h>
+#include <libcamera/property_ids.h>
-#include "device_enumerator.h"
-#include "event_dispatcher_poll.h"
-#include "log.h"
-#include "pipeline_handler.h"
-#include "thread.h"
-#include "utils.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/pipeline_handler.h"
/**
- * \file camera_manager.h
+ * \file libcamera/camera_manager.h
* \brief The camera manager
*/
+/**
+ * \internal
+ * \file libcamera/internal/camera_manager.h
+ * \brief Internal camera manager support
+ */
+
+/**
+ * \brief Top-level libcamera namespace
+ */
namespace libcamera {
LOG_DEFINE_CATEGORY(Camera)
-class CameraManager::Private : public Thread
-{
-public:
- Private(CameraManager *cm);
-
- int start();
- void addCamera(std::shared_ptr<Camera> &camera, dev_t devnum);
- void removeCamera(Camera *camera);
-
- /*
- * This mutex protects
- *
- * - initialized_ and status_ during initialization
- * - cameras_ and camerasByDevnum_ after initialization
- */
- Mutex mutex_;
- std::vector<std::shared_ptr<Camera>> cameras_;
- std::map<dev_t, std::weak_ptr<Camera>> camerasByDevnum_;
-
-protected:
- void run() override;
-
-private:
- int init();
- void cleanup();
-
- CameraManager *cm_;
-
- std::condition_variable cv_;
- bool initialized_;
- int status_;
-
- std::vector<std::shared_ptr<PipelineHandler>> pipes_;
- std::unique_ptr<DeviceEnumerator> enumerator_;
-};
-
-CameraManager::Private::Private(CameraManager *cm)
- : cm_(cm), initialized_(false)
+#ifndef __DOXYGEN_PUBLIC__
+CameraManager::Private::Private()
+ : initialized_(false)
{
+ ipaManager_ = std::make_unique<IPAManager>();
}
int CameraManager::Private::start()
@@ -79,7 +52,9 @@ int CameraManager::Private::start()
{
MutexLocker locker(mutex_);
- cv_.wait(locker, [&] { return initialized_; });
+ cv_.wait(locker, [&]() LIBCAMERA_TSA_REQUIRES(mutex_) {
+ return initialized_;
+ });
status = status_;
}
@@ -105,8 +80,10 @@ void CameraManager::Private::run()
mutex_.unlock();
cv_.notify_one();
- if (ret < 0)
+ if (ret < 0) {
+ cleanup();
return;
+ }
/* Now start processing events and messages. */
exec();
@@ -120,97 +97,173 @@ int CameraManager::Private::init()
if (!enumerator_ || enumerator_->enumerate())
return -ENODEV;
+ createPipelineHandlers();
+ enumerator_->devicesAdded.connect(this, &Private::createPipelineHandlers);
+
+ return 0;
+}
+
+void CameraManager::Private::createPipelineHandlers()
+{
/*
- * TODO: Try to read handlers and order from configuration
- * file and only fallback on all handlers if there is no
- * configuration file.
+ * \todo Try to read handlers and order from configuration
+ * file and only fallback on environment variable or all handlers, if
+ * there is no configuration file.
*/
- std::vector<PipelineHandlerFactory *> &factories = PipelineHandlerFactory::factories();
-
- for (PipelineHandlerFactory *factory : factories) {
+ const char *pipesList =
+ utils::secure_getenv("LIBCAMERA_PIPELINES_MATCH_LIST");
+ if (pipesList) {
/*
- * Try each pipeline handler until it exhaust
- * all pipelines it can provide.
+ * When a list of preferred pipelines is defined, iterate
+ * through the ordered list to match the enumerated devices.
*/
- while (1) {
- std::shared_ptr<PipelineHandler> pipe = factory->create(cm_);
- if (!pipe->match(enumerator_.get()))
- break;
+ for (const auto &pipeName : utils::split(pipesList, ",")) {
+ const PipelineHandlerFactoryBase *factory;
+ factory = PipelineHandlerFactoryBase::getFactoryByName(pipeName);
+ if (!factory)
+ continue;
LOG(Camera, Debug)
- << "Pipeline handler \"" << factory->name()
- << "\" matched";
- pipes_.push_back(std::move(pipe));
+ << "Found listed pipeline handler '"
+ << pipeName << "'";
+ pipelineFactoryMatch(factory);
}
+
+ return;
}
- /* TODO: register hot-plug callback here */
+ const std::vector<PipelineHandlerFactoryBase *> &factories =
+ PipelineHandlerFactoryBase::factories();
- return 0;
+ /* Match all the registered pipeline handlers. */
+ for (const PipelineHandlerFactoryBase *factory : factories) {
+ LOG(Camera, Debug)
+ << "Found registered pipeline handler '"
+ << factory->name() << "'";
+ /*
+ * Try each pipeline handler until it exhaust
+ * all pipelines it can provide.
+ */
+ pipelineFactoryMatch(factory);
+ }
+}
+
+void CameraManager::Private::pipelineFactoryMatch(const PipelineHandlerFactoryBase *factory)
+{
+ CameraManager *const o = LIBCAMERA_O_PTR();
+
+ /* Provide as many matching pipelines as possible. */
+ while (1) {
+ std::shared_ptr<PipelineHandler> pipe = factory->create(o);
+ if (!pipe->match(enumerator_.get()))
+ break;
+
+ LOG(Camera, Debug)
+ << "Pipeline handler \"" << factory->name()
+ << "\" matched";
+ }
}
void CameraManager::Private::cleanup()
{
- /* TODO: unregister hot-plug callback here */
+ enumerator_->devicesAdded.disconnect(this);
/*
- * Release all references to cameras and pipeline handlers to ensure
- * they all get destroyed before the device enumerator deletes the
- * media devices.
+ * Release all references to cameras to ensure they all get destroyed
+ * before the device enumerator deletes the media devices. Cameras are
+ * destroyed via Object::deleteLater() API, hence we need to explicitly
+ * process deletion requests from the thread's message queue as the event
+ * loop is not in action here.
*/
- pipes_.clear();
- cameras_.clear();
+ {
+ MutexLocker locker(mutex_);
+ cameras_.clear();
+ }
+
+ dispatchMessages(Message::Type::DeferredDelete);
enumerator_.reset(nullptr);
}
-void CameraManager::Private::addCamera(std::shared_ptr<Camera> &camera,
- dev_t devnum)
+/**
+ * \brief Add a camera to the camera manager
+ * \param[in] camera The camera to be added
+ *
+ * This function is called by pipeline handlers to register the cameras they
+ * handle with the camera manager. Registered cameras are immediately made
+ * available to the system.
+ *
+ * Device numbers from the SystemDevices property are used by the V4L2
+ * compatibility layer to map V4L2 device nodes to Camera instances.
+ *
+ * \context This function shall be called from the CameraManager thread.
+ */
+void CameraManager::Private::addCamera(std::shared_ptr<Camera> camera)
{
+ ASSERT(Thread::current() == this);
+
MutexLocker locker(mutex_);
- for (std::shared_ptr<Camera> c : cameras_) {
- if (c->name() == camera->name()) {
- LOG(Camera, Warning)
- << "Registering camera with duplicate name '"
- << camera->name() << "'";
- break;
+ for (const std::shared_ptr<Camera> &c : cameras_) {
+ if (c->id() == camera->id()) {
+ LOG(Camera, Fatal)
+ << "Trying to register a camera with a duplicated ID '"
+ << camera->id() << "'";
+ return;
}
}
cameras_.push_back(std::move(camera));
- if (devnum) {
- unsigned int index = cameras_.size() - 1;
- camerasByDevnum_[devnum] = cameras_[index];
- }
+ unsigned int index = cameras_.size() - 1;
+
+ /* Report the addition to the public signal */
+ CameraManager *const o = LIBCAMERA_O_PTR();
+ o->cameraAdded.emit(cameras_[index]);
}
-void CameraManager::Private::removeCamera(Camera *camera)
+/**
+ * \brief Remove a camera from the camera manager
+ * \param[in] camera The camera to be removed
+ *
+ * This function is called by pipeline handlers to unregister cameras from the
+ * camera manager. Unregistered cameras won't be reported anymore by the
+ * cameras() and get() calls, but references may still exist in applications.
+ *
+ * \context This function shall be called from the CameraManager thread.
+ */
+void CameraManager::Private::removeCamera(std::shared_ptr<Camera> camera)
{
+ ASSERT(Thread::current() == this);
+
MutexLocker locker(mutex_);
auto iter = std::find_if(cameras_.begin(), cameras_.end(),
[camera](std::shared_ptr<Camera> &c) {
- return c.get() == camera;
+ return c.get() == camera.get();
});
if (iter == cameras_.end())
return;
LOG(Camera, Debug)
- << "Unregistering camera '" << camera->name() << "'";
-
- auto iter_d = std::find_if(camerasByDevnum_.begin(), camerasByDevnum_.end(),
- [camera](const std::pair<dev_t, std::weak_ptr<Camera>> &p) {
- return p.second.lock().get() == camera;
- });
- if (iter_d != camerasByDevnum_.end())
- camerasByDevnum_.erase(iter_d);
+ << "Unregistering camera '" << camera->id() << "'";
cameras_.erase(iter);
+
+ /* Report the removal to the public signal */
+ CameraManager *const o = LIBCAMERA_O_PTR();
+ o->cameraRemoved.emit(camera);
}
/**
+ * \fn CameraManager::Private::ipaManager() const
+ * \brief Retrieve the IPAManager
+ * \context This function is \threadsafe.
+ * \return The IPAManager for this CameraManager
+ */
+#endif /* __DOXYGEN_PUBLIC__ */
+
+/**
* \class CameraManager
* \brief Provide access and manage all cameras in the system
*
@@ -224,12 +277,8 @@ void CameraManager::Private::removeCamera(Camera *camera)
* a time. Attempting to create a second instance without first deleting the
* existing instance results in undefined behaviour.
*
- * The manager is initially stopped, and shall be configured before being
- * started. In particular a custom event dispatcher shall be installed if
- * needed with CameraManager::setEventDispatcher().
- *
- * Once the camera manager is configured, it shall be started with start().
- * This will enumerate all the cameras present in the system, which can then be
+ * The manager is initially stopped, and shall be started with start(). This
+ * will enumerate all the cameras present in the system, which can then be
* listed with list() and retrieved with get().
*
* Cameras are shared through std::shared_ptr<>, ensuring that a camera will
@@ -237,16 +286,12 @@ void CameraManager::Private::removeCamera(Camera *camera)
* action from the application. Once the application has released all the
* references it held to cameras, the camera manager can be stopped with
* stop().
- *
- * \todo Add interface to register a notification callback to the user to be
- * able to inform it new cameras have been hot-plugged or cameras have been
- * removed due to hot-unplug.
*/
CameraManager *CameraManager::self_ = nullptr;
CameraManager::CameraManager()
- : p_(new CameraManager::Private(this))
+ : Extensible(std::make_unique<CameraManager::Private>())
{
if (self_)
LOG(Camera, Fatal)
@@ -255,6 +300,11 @@ CameraManager::CameraManager()
self_ = this;
}
+/**
+ * \brief Destroy the camera manager
+ *
+ * Destroying the camera manager stops it if it is currently running.
+ */
CameraManager::~CameraManager()
{
stop();
@@ -276,7 +326,7 @@ int CameraManager::start()
{
LOG(Camera, Info) << "libcamera " << version_;
- int ret = p_->start();
+ int ret = _d()->start();
if (ret)
LOG(Camera, Error) << "Failed to start camera manager: "
<< strerror(-ret);
@@ -296,8 +346,9 @@ int CameraManager::start()
*/
void CameraManager::stop()
{
- p_->exit();
- p_->wait();
+ Private *const d = _d();
+ d->exit();
+ d->wait();
}
/**
@@ -313,14 +364,16 @@ void CameraManager::stop()
*/
std::vector<std::shared_ptr<Camera>> CameraManager::cameras() const
{
- MutexLocker locker(p_->mutex_);
+ const Private *const d = _d();
- return p_->cameras_;
+ MutexLocker locker(d->mutex_);
+
+ return d->cameras_;
}
/**
- * \brief Get a camera based on name
- * \param[in] name Name of camera to get
+ * \brief Get a camera based on ID
+ * \param[in] id ID of camera to get
*
* Before calling this function the caller is responsible for ensuring that
* the camera manager is running.
@@ -329,12 +382,14 @@ std::vector<std::shared_ptr<Camera>> CameraManager::cameras() const
*
* \return Shared pointer to Camera object or nullptr if camera not found
*/
-std::shared_ptr<Camera> CameraManager::get(const std::string &name)
+std::shared_ptr<Camera> CameraManager::get(const std::string &id)
{
- MutexLocker locker(p_->mutex_);
+ Private *const d = _d();
+
+ MutexLocker locker(d->mutex_);
- for (std::shared_ptr<Camera> camera : p_->cameras_) {
- if (camera->name() == name)
+ for (const std::shared_ptr<Camera> &camera : d->cameras_) {
+ if (camera->id() == id)
return camera;
}
@@ -342,69 +397,32 @@ std::shared_ptr<Camera> CameraManager::get(const std::string &name)
}
/**
- * \brief Retrieve a camera based on device number
- * \param[in] devnum Device number of camera to get
+ * \var CameraManager::cameraAdded
+ * \brief Notify of a new camera added to the system
*
- * This method is meant solely for the use of the V4L2 compatibility
- * layer, to map device nodes to Camera instances. Applications shall
- * not use it and shall instead retrieve cameras by name.
+ * This signal is emitted when a new camera is detected and successfully handled
+ * by the camera manager. The notification occurs alike for cameras detected
+ * when the manager is started with start() or when new cameras are later
+ * connected to the system. When the signal is emitted the new camera is already
+ * available from the list of cameras().
*
- * Before calling this function the caller is responsible for ensuring that
- * the camera manager is running.
- *
- * \context This function is \threadsafe.
- *
- * \return Shared pointer to Camera object, which is empty if the camera is
- * not found
+ * The signal is emitted from the CameraManager thread. Applications shall
+ * minimize the time spent in the signal handler and shall in particular not
+ * perform any blocking operation.
*/
-std::shared_ptr<Camera> CameraManager::get(dev_t devnum)
-{
- MutexLocker locker(p_->mutex_);
-
- auto iter = p_->camerasByDevnum_.find(devnum);
- if (iter == p_->camerasByDevnum_.end())
- return nullptr;
-
- return iter->second.lock();
-}
/**
- * \brief Add a camera to the camera manager
- * \param[in] camera The camera to be added
- * \param[in] devnum The device number to associate with \a camera
- *
- * This function is called by pipeline handlers to register the cameras they
- * handle with the camera manager. Registered cameras are immediately made
- * available to the system.
+ * \var CameraManager::cameraRemoved
+ * \brief Notify of a new camera removed from the system
*
- * \a devnum is used by the V4L2 compatibility layer to map V4L2 device nodes
- * to Camera instances.
+ * This signal is emitted when a camera is removed from the system. When the
+ * signal is emitted the camera is not available from the list of cameras()
+ * anymore.
*
- * \context This function shall be called from the CameraManager thread.
+ * The signal is emitted from the CameraManager thread. Applications shall
+ * minimize the time spent in the signal handler and shall in particular not
+ * perform any blocking operation.
*/
-void CameraManager::addCamera(std::shared_ptr<Camera> camera, dev_t devnum)
-{
- ASSERT(Thread::current() == p_.get());
-
- p_->addCamera(camera, devnum);
-}
-
-/**
- * \brief Remove a camera from the camera manager
- * \param[in] camera The camera to be removed
- *
- * This function is called by pipeline handlers to unregister cameras from the
- * camera manager. Unregistered cameras won't be reported anymore by the
- * cameras() and get() calls, but references may still exist in applications.
- *
- * \context This function shall be called from the CameraManager thread.
- */
-void CameraManager::removeCamera(Camera *camera)
-{
- ASSERT(Thread::current() == p_.get());
-
- p_->removeCamera(camera);
-}
/**
* \fn const std::string &CameraManager::version()
@@ -413,38 +431,4 @@ void CameraManager::removeCamera(Camera *camera)
* \return The libcamera version string
*/
-/**
- * \brief Set the event dispatcher
- * \param[in] dispatcher Pointer to the event dispatcher
- *
- * libcamera requires an event dispatcher to integrate event notification and
- * timers with the application event loop. Applications that want to provide
- * their own event dispatcher shall call this function once and only once before
- * the camera manager is started with start(). If no event dispatcher is
- * provided, a default poll-based implementation will be used.
- *
- * The CameraManager takes ownership of the event dispatcher and will delete it
- * when the application terminates.
- */
-void CameraManager::setEventDispatcher(std::unique_ptr<EventDispatcher> dispatcher)
-{
- thread()->setEventDispatcher(std::move(dispatcher));
-}
-
-/**
- * \brief Retrieve the event dispatcher
- *
- * This function retrieves the event dispatcher set with setEventDispatcher().
- * If no dispatcher has been set, a default poll-based implementation is created
- * and returned, and no custom event dispatcher may be installed anymore.
- *
- * The returned event dispatcher is valid until the camera manager is destroyed.
- *
- * \return Pointer to the event dispatcher
- */
-EventDispatcher *CameraManager::eventDispatcher()
-{
- return thread()->eventDispatcher();
-}
-
} /* namespace libcamera */
diff --git a/src/libcamera/camera_sensor.cpp b/src/libcamera/camera_sensor.cpp
deleted file mode 100644
index 2219a430..00000000
--- a/src/libcamera/camera_sensor.cpp
+++ /dev/null
@@ -1,369 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * camera_sensor.cpp - A camera sensor
- */
-
-#include "camera_sensor.h"
-
-#include <algorithm>
-#include <float.h>
-#include <iomanip>
-#include <limits.h>
-#include <math.h>
-
-#include <libcamera/property_ids.h>
-
-#include "formats.h"
-#include "utils.h"
-#include "v4l2_subdevice.h"
-
-/**
- * \file camera_sensor.h
- * \brief A camera sensor
- */
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(CameraSensor);
-
-/**
- * \class CameraSensor
- * \brief A camera sensor based on V4L2 subdevices
- *
- * The CameraSensor class eases handling of sensors for pipeline handlers by
- * hiding the details of the V4L2 subdevice kernel API and caching sensor
- * information.
- *
- * The implementation is currently limited to sensors that expose a single V4L2
- * subdevice with a single pad, and support the same frame sizes for all
- * supported media bus codes. It will be extended to support more complex
- * devices as the needs arise.
- */
-
-/**
- * \brief Construct a CameraSensor
- * \param[in] entity The media entity backing the camera sensor
- *
- * Once constructed the instance must be initialized with init().
- */
-CameraSensor::CameraSensor(const MediaEntity *entity)
- : entity_(entity), properties_(properties::properties)
-{
- subdev_ = new V4L2Subdevice(entity);
-}
-
-/**
- * \brief Destroy a CameraSensor
- */
-CameraSensor::~CameraSensor()
-{
- delete subdev_;
-}
-
-/**
- * \brief Initialize the camera sensor instance
- *
- * This method performs the initialisation steps of the CameraSensor that may
- * fail. It shall be called once and only once after constructing the instance.
- *
- * \return 0 on success or a negative error code otherwise
- */
-int CameraSensor::init()
-{
- int ret;
-
- if (entity_->pads().size() != 1) {
- LOG(CameraSensor, Error)
- << "Sensors with more than one pad are not supported";
- return -EINVAL;
- }
-
- if (entity_->function() != MEDIA_ENT_F_CAM_SENSOR) {
- LOG(CameraSensor, Error)
- << "Invalid sensor function "
- << utils::hex(entity_->function());
- return -EINVAL;
- }
-
- ret = subdev_->open();
- if (ret < 0)
- return ret;
-
- /* Retrieve and store the camera sensor properties. */
- const ControlInfoMap &controls = subdev_->controls();
- int32_t propertyValue;
-
- /* Camera Location: default is front location. */
- const auto &locationControl = controls.find(V4L2_CID_CAMERA_SENSOR_LOCATION);
- if (locationControl != controls.end()) {
- int32_t v4l2Location =
- locationControl->second.def().get<int32_t>();
-
- switch (v4l2Location) {
- default:
- LOG(CameraSensor, Warning)
- << "Unsupported camera location "
- << v4l2Location << ", setting to Front";
- /* Fall-through */
- case V4L2_LOCATION_FRONT:
- propertyValue = properties::CameraLocationFront;
- break;
- case V4L2_LOCATION_BACK:
- propertyValue = properties::CameraLocationBack;
- break;
- case V4L2_LOCATION_EXTERNAL:
- propertyValue = properties::CameraLocationExternal;
- break;
- }
- } else {
- propertyValue = properties::CameraLocationFront;
- }
- properties_.set(properties::Location, propertyValue);
-
- /* Camera Rotation: default is 0 degrees. */
- const auto &rotationControl = controls.find(V4L2_CID_CAMERA_SENSOR_ROTATION);
- if (rotationControl != controls.end())
- propertyValue = rotationControl->second.def().get<int32_t>();
- else
- propertyValue = 0;
- properties_.set(properties::Rotation, propertyValue);
-
- /* Enumerate and cache media bus codes and sizes. */
- const ImageFormats formats = subdev_->formats(0);
- if (formats.isEmpty()) {
- LOG(CameraSensor, Error) << "No image format found";
- return -EINVAL;
- }
-
- mbusCodes_ = formats.formats();
-
- /*
- * Extract the supported sizes from the first format as we only support
- * sensors that offer the same frame sizes for all media bus codes.
- * Verify this assumption and reject the sensor if it isn't true.
- */
- const std::vector<SizeRange> &sizes = formats.sizes(mbusCodes_[0]);
- std::transform(sizes.begin(), sizes.end(), std::back_inserter(sizes_),
- [](const SizeRange &range) { return range.max; });
-
- for (unsigned int code : mbusCodes_) {
- if (formats.sizes(code) != sizes) {
- LOG(CameraSensor, Error)
- << "Frame sizes differ between media bus codes";
- return -EINVAL;
- }
- }
-
- /* Sort the media bus codes and sizes. */
- std::sort(mbusCodes_.begin(), mbusCodes_.end());
- std::sort(sizes_.begin(), sizes_.end());
-
- return 0;
-}
-
-/**
- * \fn CameraSensor::entity()
- * \brief Retrieve the sensor media entity
- * \return The sensor media entity
- */
-
-/**
- * \fn CameraSensor::mbusCodes()
- * \brief Retrieve the media bus codes supported by the camera sensor
- * \return The supported media bus codes sorted in increasing order
- */
-
-/**
- * \fn CameraSensor::sizes()
- * \brief Retrieve the frame sizes supported by the camera sensor
- * \return The supported frame sizes sorted in increasing order
- */
-
-/**
- * \brief Retrieve the camera sensor resolution
- * \return The camera sensor resolution in pixels
- */
-const Size &CameraSensor::resolution() const
-{
- /*
- * The sizes_ vector is sorted in ascending order, the resolution is
- * thus the last element of the vector.
- */
- return sizes_.back();
-}
-
-/**
- * \brief Retrieve the best sensor format for a desired output
- * \param[in] mbusCodes The list of acceptable media bus codes
- * \param[in] size The desired size
- *
- * Media bus codes are selected from \a mbusCodes, which lists all acceptable
- * codes in decreasing order of preference. This method selects the first code
- * from the list that is supported by the sensor. If none of the desired codes
- * is supported, it returns an error.
- *
- * \a size indicates the desired size at the output of the sensor. This method
- * selects the best size supported by the sensor according to the following
- * criteria.
- *
- * - The desired \a size shall fit in the sensor output size to avoid the need
- * to up-scale.
- * - The sensor output size shall match the desired aspect ratio to avoid the
- * need to crop the field of view.
- * - The sensor output size shall be as small as possible to lower the required
- * bandwidth.
- *
- * The use of this method is optional, as the above criteria may not match the
- * needs of all pipeline handlers. Pipeline handlers may implement custom
- * sensor format selection when needed.
- *
- * The returned sensor output format is guaranteed to be acceptable by the
- * setFormat() method without any modification.
- *
- * \return The best sensor output format matching the desired media bus codes
- * and size on success, or an empty format otherwise.
- */
-V4L2SubdeviceFormat CameraSensor::getFormat(const std::vector<unsigned int> &mbusCodes,
- const Size &size) const
-{
- V4L2SubdeviceFormat format{};
-
- for (unsigned int code : mbusCodes) {
- if (std::any_of(mbusCodes_.begin(), mbusCodes_.end(),
- [code](unsigned int c) { return c == code; })) {
- format.mbus_code = code;
- break;
- }
- }
-
- if (!format.mbus_code) {
- LOG(CameraSensor, Debug) << "No supported format found";
- return format;
- }
-
- unsigned int desiredArea = size.width * size.height;
- unsigned int bestArea = UINT_MAX;
- float desiredRatio = static_cast<float>(size.width) / size.height;
- float bestRatio = FLT_MAX;
- const Size *bestSize = nullptr;
-
- for (const Size &sz : sizes_) {
- if (sz.width < size.width || sz.height < size.height)
- continue;
-
- float ratio = static_cast<float>(sz.width) / sz.height;
- float ratioDiff = fabsf(ratio - desiredRatio);
- unsigned int area = sz.width * sz.height;
- unsigned int areaDiff = area - desiredArea;
-
- if (ratioDiff > bestRatio)
- continue;
-
- if (ratioDiff < bestRatio || areaDiff < bestArea) {
- bestRatio = ratioDiff;
- bestArea = areaDiff;
- bestSize = &sz;
- }
- }
-
- if (!bestSize) {
- LOG(CameraSensor, Debug) << "No supported size found";
- return format;
- }
-
- format.size = *bestSize;
-
- return format;
-}
-
-/**
- * \brief Set the sensor output format
- * \param[in] format The desired sensor output format
- * \return 0 on success or a negative error code otherwise
- */
-int CameraSensor::setFormat(V4L2SubdeviceFormat *format)
-{
- return subdev_->setFormat(0, format);
-}
-
-/**
- * \brief Retrieve the supported V4L2 controls and their information
- * \return A map of the V4L2 controls supported by the sensor
- */
-const ControlInfoMap &CameraSensor::controls() const
-{
- return subdev_->controls();
-}
-
-/**
- * \brief Read controls from the sensor
- * \param[inout] ctrls The list of controls to read
- *
- * This method reads the value of all controls contained in \a ctrls, and stores
- * their values in the corresponding \a ctrls entry.
- *
- * If any control in \a ctrls is not supported by the device, is disabled (i.e.
- * has the V4L2_CTRL_FLAG_DISABLED flag set), is a compound control, or if any
- * other error occurs during validation of the requested controls, no control is
- * read and this method returns -EINVAL.
- *
- * If an error occurs while reading the controls, the index of the first control
- * that couldn't be read is returned. The value of all controls below that index
- * are updated in \a ctrls, while the value of all the other controls are not
- * changed.
- *
- * \sa V4L2Device::getControls()
- *
- * \return 0 on success or an error code otherwise
- * \retval -EINVAL One of the control is not supported or not accessible
- * \retval i The index of the control that failed
- */
-int CameraSensor::getControls(ControlList *ctrls)
-{
- return subdev_->getControls(ctrls);
-}
-
-/**
- * \fn CameraSensor::properties()
- * \brief Retrieve the camera sensor properties
- * \return The list of camera sensor properties
- */
-
-/**
- * \brief Write controls to the sensor
- * \param[in] ctrls The list of controls to write
- *
- * This method writes the value of all controls contained in \a ctrls, and
- * stores the values actually applied to the device in the corresponding
- * \a ctrls entry.
- *
- * If any control in \a ctrls is not supported by the device, is disabled (i.e.
- * has the V4L2_CTRL_FLAG_DISABLED flag set), is read-only, is a
- * compound control, or if any other error occurs during validation of
- * the requested controls, no control is written and this method returns
- * -EINVAL.
- *
- * If an error occurs while writing the controls, the index of the first
- * control that couldn't be written is returned. All controls below that index
- * are written and their values are updated in \a ctrls, while all other
- * controls are not written and their values are not changed.
- *
- * \sa V4L2Device::setControls()
- *
- * \return 0 on success or an error code otherwise
- * \retval -EINVAL One of the control is not supported or not accessible
- * \retval i The index of the control that failed
- */
-int CameraSensor::setControls(ControlList *ctrls)
-{
- return subdev_->setControls(ctrls);
-}
-
-std::string CameraSensor::logPrefix() const
-{
- return "'" + subdev_->entity()->name() + "'";
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/color_space.cpp b/src/libcamera/color_space.cpp
new file mode 100644
index 00000000..3d1c456c
--- /dev/null
+++ b/src/libcamera/color_space.cpp
@@ -0,0 +1,520 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Raspberry Pi Ltd
+ *
+ * color spaces.
+ */
+
+#include <libcamera/color_space.h>
+
+#include <algorithm>
+#include <array>
+#include <map>
+#include <sstream>
+#include <utility>
+#include <vector>
+
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/formats.h"
+
+/**
+ * \file color_space.h
+ * \brief Class and enums to represent color spaces
+ */
+
+namespace libcamera {
+
+/**
+ * \class ColorSpace
+ * \brief Class to describe a color space
+ *
+ * The ColorSpace class defines the color primaries, the transfer function,
+ * the Y'CbCr encoding associated with the color space, and the range
+ * (sometimes also referred to as the quantisation) of the color space.
+ *
+ * Certain combinations of these fields form well-known standard color
+ * spaces such as "sRGB" or "Rec709".
+ *
+ * In the strictest sense a "color space" formally only refers to the
+ * color primaries and white point. Here, however, the ColorSpace class
+ * adopts the common broader usage that includes the transfer function,
+ * Y'CbCr encoding method and quantisation.
+ *
+ * More information on color spaces is available in the V4L2 documentation, see
+ * in particular
+ *
+ * - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-srgb">sRGB</a>
+ * - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-jpeg">JPEG</a>
+ * - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-smpte-170m">SMPTE 170M</a>
+ * - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-rec709">Rec.709</a>
+ * - <a href="https://www.kernel.org/doc/html/latest/userspace-api/media/v4l/colorspaces-details.html#col-bt2020">Rec.2020</a>
+ *
+ * Note that there is no guarantee of a 1:1 mapping between color space names
+ * and definitions in libcamera and V4L2. Two notable differences are
+ *
+ * - The sRGB libcamera color space is defined for RGB formats only with no
+ * Y'CbCr encoding and a full quantization range, while the V4L2 SRGB color
+ * space has a Y'CbCr encoding and a limited quantization range.
+ * - The sYCC libcamera color space is called JPEG in V4L2 due to historical
+ * reasons.
+ *
+ * \todo Define the color space fully in the libcamera API to avoid referencing
+ * V4L2
+ */
+
+/**
+ * \enum ColorSpace::Primaries
+ * \brief The color primaries for this color space
+ *
+ * \var ColorSpace::Primaries::Raw
+ * \brief These are raw colors directly from a sensor, the primaries are
+ * unspecified
+ *
+ * \var ColorSpace::Primaries::Smpte170m
+ * \brief SMPTE 170M color primaries
+ *
+ * \var ColorSpace::Primaries::Rec709
+ * \brief Rec.709 color primaries
+ *
+ * \var ColorSpace::Primaries::Rec2020
+ * \brief Rec.2020 color primaries
+ */
+
+/**
+ * \enum ColorSpace::TransferFunction
+ * \brief The transfer function used for this color space
+ *
+ * \var ColorSpace::TransferFunction::Linear
+ * \brief This color space uses a linear (identity) transfer function
+ *
+ * \var ColorSpace::TransferFunction::Srgb
+ * \brief sRGB transfer function
+ *
+ * \var ColorSpace::TransferFunction::Rec709
+ * \brief Rec.709 transfer function
+ */
+
+/**
+ * \enum ColorSpace::YcbcrEncoding
+ * \brief The Y'CbCr encoding
+ *
+ * \var ColorSpace::YcbcrEncoding::None
+ * \brief There is no defined Y'CbCr encoding (used for non-YUV formats)
+ *
+ * \var ColorSpace::YcbcrEncoding::Rec601
+ * \brief Rec.601 Y'CbCr encoding
+ *
+ * \var ColorSpace::YcbcrEncoding::Rec709
+ * \brief Rec.709 Y'CbCr encoding
+ *
+ * \var ColorSpace::YcbcrEncoding::Rec2020
+ * \brief Rec.2020 Y'CbCr encoding
+ */
+
+/**
+ * \enum ColorSpace::Range
+ * \brief The range (sometimes "quantisation") for this color space
+ *
+ * \var ColorSpace::Range::Full
+ * \brief This color space uses full range pixel values
+ *
+ * \var ColorSpace::Range::Limited
+ * \brief This color space uses limited range pixel values, being
+ * 16 to 235 for Y' and 16 to 240 for Cb and Cr (8 bits per sample)
+ * or 64 to 940 for Y' and 16 to 960 for Cb and Cr (10 bits)
+ */
+
+/**
+ * \fn ColorSpace::ColorSpace(Primaries p, TransferFunction t, Encoding e, Range r)
+ * \brief Construct a ColorSpace from explicit values
+ * \param[in] p The color primaries
+ * \param[in] t The transfer function for the color space
+ * \param[in] e The Y'CbCr encoding
+ * \param[in] r The range of the pixel values in this color space
+ */
+
+/**
+ * \brief A constant representing a raw color space (from a sensor)
+ */
+const ColorSpace ColorSpace::Raw = {
+ Primaries::Raw,
+ TransferFunction::Linear,
+ YcbcrEncoding::None,
+ Range::Full
+};
+
+/**
+ * \brief A constant representing the sRGB color space (RGB formats only)
+ */
+const ColorSpace ColorSpace::Srgb = {
+ Primaries::Rec709,
+ TransferFunction::Srgb,
+ YcbcrEncoding::None,
+ Range::Full
+};
+
+/**
+ * \brief A constant representing the sYCC color space, typically used for
+ * encoding JPEG images
+ */
+const ColorSpace ColorSpace::Sycc = {
+ Primaries::Rec709,
+ TransferFunction::Srgb,
+ YcbcrEncoding::Rec601,
+ Range::Full
+};
+
+/**
+ * \brief A constant representing the SMPTE170M color space
+ */
+const ColorSpace ColorSpace::Smpte170m = {
+ Primaries::Smpte170m,
+ TransferFunction::Rec709,
+ YcbcrEncoding::Rec601,
+ Range::Limited
+};
+
+/**
+ * \brief A constant representing the Rec.709 color space
+ */
+const ColorSpace ColorSpace::Rec709 = {
+ Primaries::Rec709,
+ TransferFunction::Rec709,
+ YcbcrEncoding::Rec709,
+ Range::Limited
+};
+
+/**
+ * \brief A constant representing the Rec.2020 color space
+ */
+const ColorSpace ColorSpace::Rec2020 = {
+ Primaries::Rec2020,
+ TransferFunction::Rec709,
+ YcbcrEncoding::Rec2020,
+ Range::Limited
+};
+
+/**
+ * \var ColorSpace::primaries
+ * \brief The color primaries of this color space
+ */
+
+/**
+ * \var ColorSpace::transferFunction
+ * \brief The transfer function used by this color space
+ */
+
+/**
+ * \var ColorSpace::ycbcrEncoding
+ * \brief The Y'CbCr encoding used by this color space
+ */
+
+/**
+ * \var ColorSpace::range
+ * \brief The pixel range used with by color space
+ */
+
+namespace {
+
+const std::array<std::pair<ColorSpace, const char *>, 6> colorSpaceNames = { {
+ { ColorSpace::Raw, "RAW" },
+ { ColorSpace::Srgb, "sRGB" },
+ { ColorSpace::Sycc, "sYCC" },
+ { ColorSpace::Smpte170m, "SMPTE170M" },
+ { ColorSpace::Rec709, "Rec709" },
+ { ColorSpace::Rec2020, "Rec2020" },
+} };
+
+const std::map<ColorSpace::Primaries, std::string> primariesNames = {
+ { ColorSpace::Primaries::Raw, "RAW" },
+ { ColorSpace::Primaries::Smpte170m, "SMPTE170M" },
+ { ColorSpace::Primaries::Rec709, "Rec709" },
+ { ColorSpace::Primaries::Rec2020, "Rec2020" },
+};
+
+const std::map<ColorSpace::TransferFunction, std::string> transferNames = {
+ { ColorSpace::TransferFunction::Linear, "Linear" },
+ { ColorSpace::TransferFunction::Srgb, "sRGB" },
+ { ColorSpace::TransferFunction::Rec709, "Rec709" },
+};
+
+const std::map<ColorSpace::YcbcrEncoding, std::string> encodingNames = {
+ { ColorSpace::YcbcrEncoding::None, "None" },
+ { ColorSpace::YcbcrEncoding::Rec601, "Rec601" },
+ { ColorSpace::YcbcrEncoding::Rec709, "Rec709" },
+ { ColorSpace::YcbcrEncoding::Rec2020, "Rec2020" },
+};
+
+const std::map<ColorSpace::Range, std::string> rangeNames = {
+ { ColorSpace::Range::Full, "Full" },
+ { ColorSpace::Range::Limited, "Limited" },
+};
+
+} /* namespace */
+
+/**
+ * \brief Assemble and return a readable string representation of the
+ * ColorSpace
+ *
+ * If the color space matches a standard ColorSpace (such as ColorSpace::Sycc)
+ * then the short name of the color space ("sYCC") is returned. Otherwise
+ * the four constituent parts of the ColorSpace are assembled into a longer
+ * string.
+ *
+ * \return A string describing the ColorSpace
+ */
+std::string ColorSpace::toString() const
+{
+ /* Print out a brief name only for standard color spaces. */
+
+ auto it = std::find_if(colorSpaceNames.begin(), colorSpaceNames.end(),
+ [this](const auto &item) {
+ return *this == item.first;
+ });
+ if (it != colorSpaceNames.end())
+ return std::string(it->second);
+
+ /* Assemble a name made of the constituent fields. */
+
+ auto itPrimaries = primariesNames.find(primaries);
+ std::string primariesName =
+ itPrimaries == primariesNames.end() ? "Invalid" : itPrimaries->second;
+
+ auto itTransfer = transferNames.find(transferFunction);
+ std::string transferName =
+ itTransfer == transferNames.end() ? "Invalid" : itTransfer->second;
+
+ auto itEncoding = encodingNames.find(ycbcrEncoding);
+ std::string encodingName =
+ itEncoding == encodingNames.end() ? "Invalid" : itEncoding->second;
+
+ auto itRange = rangeNames.find(range);
+ std::string rangeName =
+ itRange == rangeNames.end() ? "Invalid" : itRange->second;
+
+ std::stringstream ss;
+ ss << primariesName << "/" << transferName << "/" << encodingName << "/" << rangeName;
+
+ return ss.str();
+}
+
+/**
+ * \brief Assemble and return a readable string representation of an
+ * optional ColorSpace
+ *
+ * This is a convenience helper to easily obtain a string representation
+ * for a ColorSpace in parts of the libcamera API where it is stored in a
+ * std::optional<>. If the ColorSpace is set, this function returns
+ * \a colorSpace.toString(), otherwise it returns "Unset".
+ *
+ * \return A string describing the optional ColorSpace
+ */
+std::string ColorSpace::toString(const std::optional<ColorSpace> &colorSpace)
+{
+ if (!colorSpace)
+ return "Unset";
+
+ return colorSpace->toString();
+}
+
+/**
+ * \brief Construct a color space from a string
+ * \param[in] str The string
+ *
+ * The string \a str can contain the name of a well-known color space, or be
+ * made of the four color space components separated by a '/' character, ordered
+ * as
+ *
+ * \verbatim primaries '/' transferFunction '/' ycbcrEncoding '/' range \endverbatim
+ *
+ * Any failure to parse the string, either because it doesn't match the expected
+ * format, or because the one of the names isn't recognized, will cause this
+ * function to return std::nullopt.
+ *
+ * \return The ColorSpace corresponding to the string, or std::nullopt if the
+ * string doesn't describe a known color space
+ */
+std::optional<ColorSpace> ColorSpace::fromString(const std::string &str)
+{
+ /* First search for a standard color space name match. */
+ auto itColorSpace = std::find_if(colorSpaceNames.begin(), colorSpaceNames.end(),
+ [&str](const auto &item) {
+ return str == item.second;
+ });
+ if (itColorSpace != colorSpaceNames.end())
+ return itColorSpace->first;
+
+ /*
+ * If not found, the string must contain the four color space
+ * components separated by a '/' character.
+ */
+ const auto &split = utils::split(str, "/");
+ std::vector<std::string> components{ split.begin(), split.end() };
+
+ if (components.size() != 4)
+ return std::nullopt;
+
+ ColorSpace colorSpace = ColorSpace::Raw;
+
+ /* Color primaries */
+ auto itPrimaries = std::find_if(primariesNames.begin(), primariesNames.end(),
+ [&components](const auto &item) {
+ return components[0] == item.second;
+ });
+ if (itPrimaries == primariesNames.end())
+ return std::nullopt;
+
+ colorSpace.primaries = itPrimaries->first;
+
+ /* Transfer function */
+ auto itTransfer = std::find_if(transferNames.begin(), transferNames.end(),
+ [&components](const auto &item) {
+ return components[1] == item.second;
+ });
+ if (itTransfer == transferNames.end())
+ return std::nullopt;
+
+ colorSpace.transferFunction = itTransfer->first;
+
+ /* YCbCr encoding */
+ auto itEncoding = std::find_if(encodingNames.begin(), encodingNames.end(),
+ [&components](const auto &item) {
+ return components[2] == item.second;
+ });
+ if (itEncoding == encodingNames.end())
+ return std::nullopt;
+
+ colorSpace.ycbcrEncoding = itEncoding->first;
+
+ /* Quantization range */
+ auto itRange = std::find_if(rangeNames.begin(), rangeNames.end(),
+ [&components](const auto &item) {
+ return components[3] == item.second;
+ });
+ if (itRange == rangeNames.end())
+ return std::nullopt;
+
+ colorSpace.range = itRange->first;
+
+ return colorSpace;
+}
+
+/**
+ * \brief Adjust the color space to match a pixel format
+ * \param[in] format The pixel format
+ *
+ * Not all combinations of pixel formats and color spaces make sense. For
+ * instance, nobody uses a limited quantization range with raw Bayer formats,
+ * and the YcbcrEncoding::None encoding isn't valid for YUV formats. This
+ * function adjusts the ColorSpace to make it compatible with the given \a
+ * format, by applying the following rules:
+ *
+ * - The color space for RAW formats must be Raw.
+ * - The Y'CbCr encoding and quantization range for RGB formats must be
+ * YcbcrEncoding::None and Range::Full respectively.
+ * - The Y'CbCr encoding for YUV formats must not be YcbcrEncoding::None. The
+ * best encoding is in that case guessed based on the primaries and transfer
+ * function.
+ *
+ * \return True if the color space has been adjusted, or false if it was
+ * already compatible with the format and hasn't been changed
+ */
+bool ColorSpace::adjust(PixelFormat format)
+{
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+ bool adjusted = false;
+
+ switch (info.colourEncoding) {
+ case PixelFormatInfo::ColourEncodingRAW:
+ /* Raw formats must use the raw color space. */
+ if (*this != ColorSpace::Raw) {
+ *this = ColorSpace::Raw;
+ adjusted = true;
+ }
+ break;
+
+ case PixelFormatInfo::ColourEncodingRGB:
+ /*
+ * RGB formats can't have a Y'CbCr encoding, and must use full
+ * range quantization.
+ */
+ if (ycbcrEncoding != YcbcrEncoding::None) {
+ ycbcrEncoding = YcbcrEncoding::None;
+ adjusted = true;
+ }
+
+ if (range != Range::Full) {
+ range = Range::Full;
+ adjusted = true;
+ }
+ break;
+
+ case PixelFormatInfo::ColourEncodingYUV:
+ if (ycbcrEncoding != YcbcrEncoding::None)
+ break;
+
+ /*
+ * YUV formats must have a Y'CbCr encoding. Infer the most
+ * probable option from the transfer function and primaries.
+ */
+ switch (transferFunction) {
+ case TransferFunction::Linear:
+ /*
+ * Linear YUV is not used in any standard color space,
+ * pick the widely supported and used Rec601 as default.
+ */
+ ycbcrEncoding = YcbcrEncoding::Rec601;
+ break;
+
+ case TransferFunction::Rec709:
+ switch (primaries) {
+ /* Raw should never happen. */
+ case Primaries::Raw:
+ case Primaries::Smpte170m:
+ ycbcrEncoding = YcbcrEncoding::Rec601;
+ break;
+ case Primaries::Rec709:
+ ycbcrEncoding = YcbcrEncoding::Rec709;
+ break;
+ case Primaries::Rec2020:
+ ycbcrEncoding = YcbcrEncoding::Rec2020;
+ break;
+ }
+ break;
+
+ case TransferFunction::Srgb:
+ /*
+ * Only the sYCC color space uses the sRGB transfer
+ * function, the corresponding encoding is Rec601.
+ */
+ ycbcrEncoding = YcbcrEncoding::Rec601;
+ break;
+ }
+
+ adjusted = true;
+ break;
+ }
+
+ return adjusted;
+}
+
+/**
+ * \brief Compare color spaces for equality
+ * \return True if the two color spaces are identical, false otherwise
+ */
+bool operator==(const ColorSpace &lhs, const ColorSpace &rhs)
+{
+ return lhs.primaries == rhs.primaries &&
+ lhs.transferFunction == rhs.transferFunction &&
+ lhs.ycbcrEncoding == rhs.ycbcrEncoding &&
+ lhs.range == rhs.range;
+}
+
+/**
+ * \fn bool operator!=(const ColorSpace &lhs, const ColorSpace &rhs)
+ * \brief Compare color spaces for inequality
+ * \return True if the two color spaces are not identical, false otherwise
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/control_ids.cpp.in b/src/libcamera/control_ids.cpp.in
index 99c511d0..65668d48 100644
--- a/src/libcamera/control_ids.cpp.in
+++ b/src/libcamera/control_ids.cpp.in
@@ -2,42 +2,122 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_ids.cpp : Control ID list
+ * {{mode}} ID list
*
* This file is auto-generated. Do not edit.
*/
-#include <libcamera/control_ids.h>
+#include <libcamera/{{filename}}.h>
+#include <libcamera/controls.h>
/**
- * \file control_ids.h
- * \brief Camera control identifiers
+ * \file {{filename}}.h
+ * \brief Camera {{mode}} identifiers
*/
namespace libcamera {
/**
- * \brief Namespace for libcamera controls
+ * \brief Namespace for libcamera {{mode}}
*/
-namespace controls {
+namespace {{mode}} {
-${controls_doc}
+{%- for vendor, ctrls in controls -%}
+
+{%- if vendor != 'libcamera' %}
+/**
+ * \brief Namespace for {{vendor}} {{mode}}
+ */
+namespace {{vendor}} {
+{%- endif -%}
+
+{% for ctrl in ctrls %}
+
+{% if ctrl.is_enum -%}
+/**
+ * \enum {{ctrl.name}}Enum
+ * \brief Supported {{ctrl.name}} values
+{%- for enum in ctrl.enum_values %}
+ *
+ * \var {{enum.name}}
+ * \brief {{enum.description|format_description}}
+{%- endfor %}
+ */
+
+/**
+ * \var {{ctrl.name}}Values
+ * \brief List of all {{ctrl.name}} supported values
+ */
+
+/**
+ * \var {{ctrl.name}}NameValueMap
+ * \brief Map of all {{ctrl.name}} supported value names (in std::string format) to value
+ */
+
+{% endif -%}
+/**
+ * \var {{ctrl.name}}
+ * \brief {{ctrl.description|format_description}}
+ */
+{%- endfor %}
+{% if vendor != 'libcamera' %}
+} /* namespace {{vendor}} */
+{% endif -%}
+
+{%- endfor %}
#ifndef __DOXYGEN__
/*
- * Keep the controls definitions hidden from doxygen as it incorrectly parses
+ * Keep the {{mode}} definitions hidden from doxygen as it incorrectly parses
* them as functions.
*/
-${controls_def}
-#endif
+{% for vendor, ctrls in controls -%}
+
+{% if vendor != 'libcamera' %}
+namespace {{vendor}} {
+{% endif %}
+
+{%- for ctrl in ctrls %}
+{% if ctrl.is_enum -%}
+extern const std::array<const ControlValue, {{ctrl.enum_values_count}}> {{ctrl.name}}Values = {
+{%- for enum in ctrl.enum_values %}
+ static_cast<{{ctrl.type}}>({{enum.name}}),
+{%- endfor %}
+};
+extern const std::map<std::string, {{ctrl.type}}> {{ctrl.name}}NameValueMap = {
+{%- for enum in ctrl.enum_values %}
+ { "{{enum.name}}", {{enum.name}} },
+{%- endfor %}
+};
+extern const Control<{{ctrl.type}}> {{ctrl.name}}({{ctrl.name|snake_case|upper}}, "{{ctrl.name}}", "{{vendor}}", {{ctrl.direction}}, {{ctrl.name}}NameValueMap);
+{% else -%}
+extern const Control<{{ctrl.type}}> {{ctrl.name}}({{ctrl.name|snake_case|upper}}, "{{ctrl.name}}", "{{vendor}}", {{ctrl.direction}});
+{% endif -%}
+{%- endfor %}
+
+{% if vendor != 'libcamera' %}
+} /* namespace {{vendor}} */
+{% endif -%}
+
+{%- endfor %}
+#endif /* __DOXYGEN__ */
/**
- * \brief List of all supported libcamera controls
+ * \brief List of all supported libcamera {{mode}}
+{%- if mode == 'controls' %}
+ *
+ * Unless otherwise stated, all controls are bi-directional, i.e. they can be
+ * set through Request::controls() and returned out through Request::metadata().
+{%- endif %}
*/
-extern const ControlIdMap controls {
-${controls_map}
+extern const ControlIdMap {{mode}} {
+{%- for vendor, ctrls in controls -%}
+{%- for ctrl in ctrls %}
+ { {{ctrl.namespace}}{{ctrl.name|snake_case|upper}}, &{{ctrl.namespace}}{{ctrl.name}} },
+{%- endfor -%}
+{%- endfor %}
};
-} /* namespace controls */
+} /* namespace {{mode}} */
} /* namespace libcamera */
diff --git a/src/libcamera/control_ids.yaml b/src/libcamera/control_ids.yaml
deleted file mode 100644
index 4befec74..00000000
--- a/src/libcamera/control_ids.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-# SPDX-License-Identifier: LGPL-2.1-or-later
-#
-# Copyright (C) 2019, Google Inc.
-#
-%YAML 1.2
----
-controls:
- - AeEnable:
- type: bool
- description: |
- Enable or disable the AE.
-
- \sa ManualExposure
-
- - AeLocked:
- type: bool
- description: |
- Report the lock status of a running AE algorithm.
-
- If the AE algorithm is locked the value shall be set to true, if it's
- converging it shall be set to false. If the AE algorithm is not
- running the control shall not be present in the metadata control list.
-
- \sa AeEnable
-
- - AwbEnable:
- type: bool
- description: |
- Enable or disable the AWB.
-
- \sa ManualGain
-
- - Brightness:
- type: int32_t
- description: Specify a fixed brightness parameter
-
- - Contrast:
- type: int32_t
- description: Specify a fixed contrast parameter
-
- - Saturation:
- type: int32_t
- description: Specify a fixed saturation parameter
-
- - ManualExposure:
- type: int32_t
- description: Specify a fixed exposure time in milli-seconds
-
- - ManualGain:
- type: int32_t
- description: Specify a fixed gain parameter
-
-...
diff --git a/src/libcamera/control_ids_core.yaml b/src/libcamera/control_ids_core.yaml
new file mode 100644
index 00000000..1dfaee0c
--- /dev/null
+++ b/src/libcamera/control_ids_core.yaml
@@ -0,0 +1,1052 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2019, Google Inc.
+#
+%YAML 1.1
+---
+# Unless otherwise stated, all controls are bi-directional, i.e. they can be
+# set through Request::controls() and returned out through Request::metadata().
+vendor: libcamera
+controls:
+ - AeEnable:
+ type: bool
+ direction: inout
+ description: |
+ Enable or disable the AE.
+
+ \sa ExposureTime AnalogueGain
+
+ - AeLocked:
+ type: bool
+ direction: out
+ description: |
+ Report the lock status of a running AE algorithm.
+
+ If the AE algorithm is locked the value shall be set to true, if it's
+ converging it shall be set to false. If the AE algorithm is not
+ running the control shall not be present in the metadata control list.
+
+ \sa AeEnable
+
+ # AeMeteringMode needs further attention:
+ # - Auto-generate max enum value.
+ # - Better handling of custom types.
+ - AeMeteringMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Specify a metering mode for the AE algorithm to use.
+
+ The metering modes determine which parts of the image are used to
+ determine the scene brightness. Metering modes may be platform specific
+ and not all metering modes may be supported.
+ enum:
+ - name: MeteringCentreWeighted
+ value: 0
+ description: Centre-weighted metering mode.
+ - name: MeteringSpot
+ value: 1
+ description: Spot metering mode.
+ - name: MeteringMatrix
+ value: 2
+ description: Matrix metering mode.
+ - name: MeteringCustom
+ value: 3
+ description: Custom metering mode.
+
+ # AeConstraintMode needs further attention:
+ # - Auto-generate max enum value.
+ # - Better handling of custom types.
+ - AeConstraintMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Specify a constraint mode for the AE algorithm to use.
+
+ The constraint modes determine how the measured scene brightness is
+ adjusted to reach the desired target exposure. Constraint modes may be
+ platform specific, and not all constraint modes may be supported.
+ enum:
+ - name: ConstraintNormal
+ value: 0
+ description: |
+ Default constraint mode.
+
+ This mode aims to balance the exposure of different parts of the
+ image so as to reach a reasonable average level. However, highlights
+ in the image may appear over-exposed and lowlights may appear
+ under-exposed.
+ - name: ConstraintHighlight
+ value: 1
+ description: |
+ Highlight constraint mode.
+
+ This mode adjusts the exposure levels in order to try and avoid
+ over-exposing the brightest parts (highlights) of an image.
+ Other non-highlight parts of the image may appear under-exposed.
+ - name: ConstraintShadows
+ value: 2
+ description: |
+ Shadows constraint mode.
+
+ This mode adjusts the exposure levels in order to try and avoid
+ under-exposing the dark parts (shadows) of an image. Other normally
+ exposed parts of the image may appear over-exposed.
+ - name: ConstraintCustom
+ value: 3
+ description: |
+ Custom constraint mode.
+
+ # AeExposureMode needs further attention:
+ # - Auto-generate max enum value.
+ # - Better handling of custom types.
+ - AeExposureMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Specify an exposure mode for the AE algorithm to use.
+
+ The exposure modes specify how the desired total exposure is divided
+ between the exposure time and the sensor's analogue gain. They are
+ platform specific, and not all exposure modes may be supported.
+ enum:
+ - name: ExposureNormal
+ value: 0
+ description: Default exposure mode.
+ - name: ExposureShort
+ value: 1
+ description: Exposure mode allowing only short exposure times.
+ - name: ExposureLong
+ value: 2
+ description: Exposure mode allowing long exposure times.
+ - name: ExposureCustom
+ value: 3
+ description: Custom exposure mode.
+
+ - ExposureValue:
+ type: float
+ direction: inout
+ description: |
+ Specify an Exposure Value (EV) parameter.
+
+ The EV parameter will only be applied if the AE algorithm is currently
+ enabled.
+
+ By convention EV adjusts the exposure as log2. For example
+ EV = [-2, -1, -0.5, 0, 0.5, 1, 2] results in an exposure adjustment
+ of [1/4x, 1/2x, 1/sqrt(2)x, 1x, sqrt(2)x, 2x, 4x].
+
+ \sa AeEnable
+
+ - ExposureTime:
+ type: int32_t
+ direction: inout
+ description: |
+ Exposure time for the frame applied in the sensor device.
+
+ This value is specified in micro-seconds.
+
+ Setting this value means that it is now fixed and the AE algorithm may
+ not change it. Setting it back to zero returns it to the control of the
+ AE algorithm.
+
+ \sa AnalogueGain AeEnable
+
+ \todo Document the interactions between AeEnable and setting a fixed
+ value for this control. Consider interactions with other AE features,
+ such as aperture and aperture/shutter priority mode, and decide if
+ control of which features should be automatically adjusted shouldn't
+ better be handled through a separate AE mode control.
+
+ - AnalogueGain:
+ type: float
+ direction: inout
+ description: |
+ Analogue gain value applied in the sensor device.
+
+ The value of the control specifies the gain multiplier applied to all
+ colour channels. This value cannot be lower than 1.0.
+
+ Setting this value means that it is now fixed and the AE algorithm may
+ not change it. Setting it back to zero returns it to the control of the
+ AE algorithm.
+
+ \sa ExposureTime AeEnable
+
+ \todo Document the interactions between AeEnable and setting a fixed
+ value for this control. Consider interactions with other AE features,
+ such as aperture and aperture/shutter priority mode, and decide if
+ control of which features should be automatically adjusted shouldn't
+ better be handled through a separate AE mode control.
+
+ - AeFlickerMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Set the flicker avoidance mode for AGC/AEC.
+
+ The flicker mode determines whether, and how, the AGC/AEC algorithm
+ attempts to hide flicker effects caused by the duty cycle of artificial
+ lighting.
+
+ Although implementation dependent, many algorithms for "flicker
+ avoidance" work by restricting this exposure time to integer multiples
+ of the cycle period, wherever possible.
+
+ Implementations may not support all of the flicker modes listed below.
+
+ By default the system will start in FlickerAuto mode if this is
+ supported, otherwise the flicker mode will be set to FlickerOff.
+
+ enum:
+ - name: FlickerOff
+ value: 0
+ description: |
+ No flicker avoidance is performed.
+ - name: FlickerManual
+ value: 1
+ description: |
+ Manual flicker avoidance.
+
+ Suppress flicker effects caused by lighting running with a period
+ specified by the AeFlickerPeriod control.
+ \sa AeFlickerPeriod
+ - name: FlickerAuto
+ value: 2
+ description: |
+ Automatic flicker period detection and avoidance.
+
+ The system will automatically determine the most likely value of
+ flicker period, and avoid flicker of this frequency. Once flicker
+ is being corrected, it is implementation dependent whether the
+ system is still able to detect a change in the flicker period.
+ \sa AeFlickerDetected
+
+ - AeFlickerPeriod:
+ type: int32_t
+ direction: inout
+ description: |
+ Manual flicker period in microseconds.
+
+ This value sets the current flicker period to avoid. It is used when
+ AeFlickerMode is set to FlickerManual.
+
+ To cancel 50Hz mains flicker, this should be set to 10000 (corresponding
+ to 100Hz), or 8333 (120Hz) for 60Hz mains.
+
+ Setting the mode to FlickerManual when no AeFlickerPeriod has ever been
+ set means that no flicker cancellation occurs (until the value of this
+ control is updated).
+
+ Switching to modes other than FlickerManual has no effect on the
+ value of the AeFlickerPeriod control.
+
+ \sa AeFlickerMode
+
+ - AeFlickerDetected:
+ type: int32_t
+ direction: out
+ description: |
+ Flicker period detected in microseconds.
+
+ The value reported here indicates the currently detected flicker
+ period, or zero if no flicker at all is detected.
+
+ When AeFlickerMode is set to FlickerAuto, there may be a period during
+ which the value reported here remains zero. Once a non-zero value is
+ reported, then this is the flicker period that has been detected and is
+ now being cancelled.
+
+ In the case of 50Hz mains flicker, the value would be 10000
+ (corresponding to 100Hz), or 8333 (120Hz) for 60Hz mains flicker.
+
+ It is implementation dependent whether the system can continue to detect
+ flicker of different periods when another frequency is already being
+ cancelled.
+
+ \sa AeFlickerMode
+
+ - Brightness:
+ type: float
+ direction: inout
+ description: |
+ Specify a fixed brightness parameter.
+
+ Positive values (up to 1.0) produce brighter images; negative values
+ (up to -1.0) produce darker images and 0.0 leaves pixels unchanged.
+
+ - Contrast:
+ type: float
+ direction: inout
+ description: |
+ Specify a fixed contrast parameter.
+
+ Normal contrast is given by the value 1.0; larger values produce images
+ with more contrast.
+
+ - Lux:
+ type: float
+ direction: out
+ description: |
+ Report an estimate of the current illuminance level in lux.
+
+ The Lux control can only be returned in metadata.
+
+ - AwbEnable:
+ type: bool
+ direction: inout
+ description: |
+ Enable or disable the AWB.
+
+ When AWB is enabled, the algorithm estimates the colour temperature of
+ the scene and computes colour gains and the colour correction matrix
+ automatically. The computed colour temperature, gains and correction
+ matrix are reported in metadata. The corresponding controls are ignored
+ if set in a request.
+
+ When AWB is disabled, the colour temperature, gains and correction
+ matrix are not updated automatically and can be set manually in
+ requests.
+
+ \sa ColourCorrectionMatrix
+ \sa ColourGains
+ \sa ColourTemperature
+
+ # AwbMode needs further attention:
+ # - Auto-generate max enum value.
+ # - Better handling of custom types.
+ - AwbMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Specify the range of illuminants to use for the AWB algorithm.
+
+ The modes supported are platform specific, and not all modes may be
+ supported.
+ enum:
+ - name: AwbAuto
+ value: 0
+ description: Search over the whole colour temperature range.
+ - name: AwbIncandescent
+ value: 1
+ description: Incandescent AWB lamp mode.
+ - name: AwbTungsten
+ value: 2
+ description: Tungsten AWB lamp mode.
+ - name: AwbFluorescent
+ value: 3
+ description: Fluorescent AWB lamp mode.
+ - name: AwbIndoor
+ value: 4
+ description: Indoor AWB lighting mode.
+ - name: AwbDaylight
+ value: 5
+ description: Daylight AWB lighting mode.
+ - name: AwbCloudy
+ value: 6
+ description: Cloudy AWB lighting mode.
+ - name: AwbCustom
+ value: 7
+ description: Custom AWB mode.
+
+ - AwbLocked:
+ type: bool
+ direction: out
+ description: |
+ Report the lock status of a running AWB algorithm.
+
+ If the AWB algorithm is locked the value shall be set to true, if it's
+ converging it shall be set to false. If the AWB algorithm is not
+ running the control shall not be present in the metadata control list.
+
+ \sa AwbEnable
+
+ - ColourGains:
+ type: float
+ direction: inout
+ description: |
+ Pair of gain values for the Red and Blue colour channels, in that
+ order.
+
+ ColourGains can only be applied in a Request when the AWB is disabled.
+ If ColourGains is set in a request but ColourTemperature is not, the
+ implementation shall calculate and set the ColourTemperature based on
+ the ColourGains.
+
+ \sa AwbEnable
+ \sa ColourTemperature
+ size: [2]
+
+ - ColourTemperature:
+ type: int32_t
+ direction: out
+ description: |
+ ColourTemperature of the frame, in kelvin.
+
+ ColourTemperature can only be applied in a Request when the AWB is
+ disabled.
+
+ If ColourTemperature is set in a request but ColourGains is not, the
+ implementation shall calculate and set the ColourGains based on the
+ given ColourTemperature. If ColourTemperature is set (either directly,
+ or indirectly by setting ColourGains) but ColourCorrectionMatrix is not,
+ the ColourCorrectionMatrix is updated based on the ColourTemperature.
+
+ The ColourTemperature used to process the frame is reported in metadata.
+
+ \sa AwbEnable
+ \sa ColourCorrectionMatrix
+ \sa ColourGains
+
+ - Saturation:
+ type: float
+ direction: inout
+ description: |
+ Specify a fixed saturation parameter.
+
+ Normal saturation is given by the value 1.0; larger values produce more
+ saturated colours; 0.0 produces a greyscale image.
+
+ - SensorBlackLevels:
+ type: int32_t
+ direction: out
+ description: |
+ Reports the sensor black levels used for processing a frame.
+
+ The values are in the order R, Gr, Gb, B. They are returned as numbers
+ out of a 16-bit pixel range (as if pixels ranged from 0 to 65535). The
+ SensorBlackLevels control can only be returned in metadata.
+ size: [4]
+
+ - Sharpness:
+ type: float
+ direction: inout
+ description: |
+ Intensity of the sharpening applied to the image.
+
+ A value of 0.0 means no sharpening. The minimum value means
+ minimal sharpening, and shall be 0.0 unless the camera can't
+ disable sharpening completely. The default value shall give a
+ "reasonable" level of sharpening, suitable for most use cases.
+ The maximum value may apply extremely high levels of sharpening,
+ higher than anyone could reasonably want. Negative values are
+ not allowed. Note also that sharpening is not applied to raw
+ streams.
+
+ - FocusFoM:
+ type: int32_t
+ direction: out
+ description: |
+ Reports a Figure of Merit (FoM) to indicate how in-focus the frame is.
+
+ A larger FocusFoM value indicates a more in-focus frame. This singular
+ value may be based on a combination of statistics gathered from
+ multiple focus regions within an image. The number of focus regions and
+ method of combination is platform dependent. In this respect, it is not
+ necessarily aimed at providing a way to implement a focus algorithm by
+ the application, rather an indication of how in-focus a frame is.
+
+ - ColourCorrectionMatrix:
+ type: float
+ direction: inout
+ description: |
+ The 3x3 matrix that converts camera RGB to sRGB within the imaging
+ pipeline.
+
+ This should describe the matrix that is used after pixels have been
+ white-balanced, but before any gamma transformation. The 3x3 matrix is
+ stored in conventional reading order in an array of 9 floating point
+ values.
+
+ ColourCorrectionMatrix can only be applied in a Request when the AWB is
+ disabled.
+
+ \sa AwbEnable
+ \sa ColourTemperature
+ size: [3,3]
+
+ - ScalerCrop:
+ type: Rectangle
+ direction: inout
+ description: |
+ Sets the image portion that will be scaled to form the whole of
+ the final output image.
+
+ The (x,y) location of this rectangle is relative to the
+ PixelArrayActiveAreas that is being used. The units remain native
+ sensor pixels, even if the sensor is being used in a binning or
+ skipping mode.
+
+ This control is only present when the pipeline supports scaling. Its
+ maximum valid value is given by the properties::ScalerCropMaximum
+ property, and the two can be used to implement digital zoom.
+
+ - DigitalGain:
+ type: float
+ direction: inout
+ description: |
+ Digital gain value applied during the processing steps applied
+ to the image as captured from the sensor.
+
+ The global digital gain factor is applied to all the colour channels
+ of the RAW image. Different pipeline models are free to
+ specify how the global gain factor applies to each separate
+ channel.
+
+ If an imaging pipeline applies digital gain in distinct
+ processing steps, this value indicates their total sum.
+ Pipelines are free to decide how to adjust each processing
+ step to respect the received gain factor and shall report
+ their total value in the request metadata.
+
+ - FrameDuration:
+ type: int64_t
+ direction: out
+ description: |
+ The instantaneous frame duration from start of frame exposure to start
+ of next exposure, expressed in microseconds.
+
+ This control is meant to be returned in metadata.
+
+ - FrameDurationLimits:
+ type: int64_t
+ direction: inout
+ description: |
+ The minimum and maximum (in that order) frame duration, expressed in
+ microseconds.
+
+ When provided by applications, the control specifies the sensor frame
+ duration interval the pipeline has to use. This limits the largest
+ exposure time the sensor can use. For example, if a maximum frame
+ duration of 33ms is requested (corresponding to 30 frames per second),
+ the sensor will not be able to raise the exposure time above 33ms.
+ A fixed frame duration is achieved by setting the minimum and maximum
+ values to be the same. Setting both values to 0 reverts to using the
+ camera defaults.
+
+ The maximum frame duration provides the absolute limit to the exposure
+ time computed by the AE algorithm and it overrides any exposure mode
+ setting specified with controls::AeExposureMode. Similarly, when a
+ manual exposure time is set through controls::ExposureTime, it also
+ gets clipped to the limits set by this control. When reported in
+ metadata, the control expresses the minimum and maximum frame durations
+ used after being clipped to the sensor provided frame duration limits.
+
+ \sa AeExposureMode
+ \sa ExposureTime
+
+ \todo Define how to calculate the capture frame rate by
+ defining controls to report additional delays introduced by
+ the capture pipeline or post-processing stages (ie JPEG
+ conversion, frame scaling).
+
+ \todo Provide an explicit definition of default control values, for
+ this and all other controls.
+
+ size: [2]
+
+ - SensorTemperature:
+ type: float
+ direction: out
+ description: |
+ Temperature measure from the camera sensor in Celsius.
+
+ This value is typically obtained by a thermal sensor present on-die or
+ in the camera module. The range of reported temperatures is device
+ dependent.
+
+ The SensorTemperature control will only be returned in metadata if a
+ thermal sensor is present.
+
+ - SensorTimestamp:
+ type: int64_t
+ direction: out
+ description: |
+ The time when the first row of the image sensor active array is exposed.
+
+ The timestamp, expressed in nanoseconds, represents a monotonically
+ increasing counter since the system boot time, as defined by the
+ Linux-specific CLOCK_BOOTTIME clock id.
+
+ The SensorTimestamp control can only be returned in metadata.
+
+ \todo Define how the sensor timestamp has to be used in the reprocessing
+ use case.
+
+ - AfMode:
+ type: int32_t
+ direction: inout
+ description: |
+ The mode of the AF (autofocus) algorithm.
+
+ An implementation may choose not to implement all the modes.
+
+ enum:
+ - name: AfModeManual
+ value: 0
+ description: |
+ The AF algorithm is in manual mode.
+
+ In this mode it will never perform any action nor move the lens of
+ its own accord, but an application can specify the desired lens
+ position using the LensPosition control. The AfState will always
+ report AfStateIdle.
+
+ If the camera is started in AfModeManual, it will move the focus
+ lens to the position specified by the LensPosition control.
+
+ This mode is the recommended default value for the AfMode control.
+ External cameras (as reported by the Location property set to
+ CameraLocationExternal) may use a different default value.
+ - name: AfModeAuto
+ value: 1
+ description: |
+ The AF algorithm is in auto mode.
+
+ In this mode the algorithm will never move the lens or change state
+ unless the AfTrigger control is used. The AfTrigger control can be
+ used to initiate a focus scan, the results of which will be
+ reported by AfState.
+
+ If the autofocus algorithm is moved from AfModeAuto to another mode
+ while a scan is in progress, the scan is cancelled immediately,
+ without waiting for the scan to finish.
+
+ When first entering this mode the AfState will report AfStateIdle.
+ When a trigger control is sent, AfState will report AfStateScanning
+ for a period before spontaneously changing to AfStateFocused or
+ AfStateFailed, depending on the outcome of the scan. It will remain
+ in this state until another scan is initiated by the AfTrigger
+ control. If a scan is cancelled (without changing to another mode),
+ AfState will return to AfStateIdle.
+ - name: AfModeContinuous
+ value: 2
+ description: |
+ The AF algorithm is in continuous mode.
+
+ In this mode the lens can re-start a scan spontaneously at any
+ moment, without any user intervention. The AfState still reports
+ whether the algorithm is currently scanning or not, though the
+ application has no ability to initiate or cancel scans, nor to move
+ the lens for itself.
+
+ However, applications can pause the AF algorithm from continuously
+ scanning by using the AfPause control. This allows video or still
+ images to be captured whilst guaranteeing that the focus is fixed.
+
+ When set to AfModeContinuous, the system will immediately initiate a
+ scan so AfState will report AfStateScanning, and will settle on one
+ of AfStateFocused or AfStateFailed, depending on the scan result.
+
+ - AfRange:
+ type: int32_t
+ direction: inout
+ description: |
+ The range of focus distances that is scanned.
+
+ An implementation may choose not to implement all the options here.
+ enum:
+ - name: AfRangeNormal
+ value: 0
+ description: |
+ A wide range of focus distances is scanned.
+
+ Scanned distances cover all the way from infinity down to close
+ distances, though depending on the implementation, possibly not
+ including the very closest macro positions.
+ - name: AfRangeMacro
+ value: 1
+ description: |
+ Only close distances are scanned.
+ - name: AfRangeFull
+ value: 2
+ description: |
+ The full range of focus distances is scanned.
+
+ This range is similar to AfRangeNormal but includes the very
+ closest macro positions.
+
+ - AfSpeed:
+ type: int32_t
+ direction: inout
+ description: |
+ Determine whether the AF is to move the lens as quickly as possible or
+ more steadily.
+
+ For example, during video recording it may be desirable not to move the
+ lens too abruptly, but when in a preview mode (waiting for a still
+ capture) it may be helpful to move the lens as quickly as is reasonably
+ possible.
+ enum:
+ - name: AfSpeedNormal
+ value: 0
+ description: Move the lens at its usual speed.
+ - name: AfSpeedFast
+ value: 1
+ description: Move the lens more quickly.
+
+ - AfMetering:
+ type: int32_t
+ direction: inout
+ description: |
+ The parts of the image used by the AF algorithm to measure focus.
+ enum:
+ - name: AfMeteringAuto
+ value: 0
+ description: |
+ Let the AF algorithm decide for itself where it will measure focus.
+ - name: AfMeteringWindows
+ value: 1
+ description: |
+ Use the rectangles defined by the AfWindows control to measure focus.
+
+ If no windows are specified the behaviour is platform dependent.
+
+ - AfWindows:
+ type: Rectangle
+ direction: inout
+ description: |
+ The focus windows used by the AF algorithm when AfMetering is set to
+ AfMeteringWindows.
+
+ The units used are pixels within the rectangle returned by the
+ ScalerCropMaximum property.
+
+ In order to be activated, a rectangle must be programmed with non-zero
+ width and height. Internally, these rectangles are intersected with the
+ ScalerCropMaximum rectangle. If the window becomes empty after this
+ operation, then the window is ignored. If all the windows end up being
+ ignored, then the behaviour is platform dependent.
+
+ On platforms that support the ScalerCrop control (for implementing
+ digital zoom, for example), no automatic recalculation or adjustment of
+ AF windows is performed internally if the ScalerCrop is changed. If any
+ window lies outside the output image after the scaler crop has been
+ applied, it is up to the application to recalculate them.
+
+ The details of how the windows are used are platform dependent. We note
+ that when there is more than one AF window, a typical implementation
+ might find the optimal focus position for each one and finally select
+ the window where the focal distance for the objects shown in that part
+ of the image are closest to the camera.
+
+ size: [n]
+
+ - AfTrigger:
+ type: int32_t
+ direction: in
+ description: |
+ Start an autofocus scan.
+
+ This control starts an autofocus scan when AfMode is set to AfModeAuto,
+ and is ignored if AfMode is set to AfModeManual or AfModeContinuous. It
+ can also be used to terminate a scan early.
+
+ enum:
+ - name: AfTriggerStart
+ value: 0
+ description: |
+ Start an AF scan.
+
+ Setting the control to AfTriggerStart is ignored if a scan is in
+ progress.
+ - name: AfTriggerCancel
+ value: 1
+ description: |
+ Cancel an AF scan.
+
+ This does not cause the lens to move anywhere else. Ignored if no
+ scan is in progress.
+
+ - AfPause:
+ type: int32_t
+ direction: in
+ description: |
+ Pause lens movements when in continuous autofocus mode.
+
+ This control has no effect except when in continuous autofocus mode
+ (AfModeContinuous). It can be used to pause any lens movements while
+ (for example) images are captured. The algorithm remains inactive
+ until it is instructed to resume.
+
+ enum:
+ - name: AfPauseImmediate
+ value: 0
+ description: |
+ Pause the continuous autofocus algorithm immediately.
+
+ The autofocus algorithm is paused whether or not any kind of scan
+ is underway. AfPauseState will subsequently report
+ AfPauseStatePaused. AfState may report any of AfStateScanning,
+ AfStateFocused or AfStateFailed, depending on the algorithm's state
+ when it received this control.
+ - name: AfPauseDeferred
+ value: 1
+ description: |
+ Pause the continuous autofocus algorithm at the end of the scan.
+
+ This is similar to AfPauseImmediate, and if the AfState is
+ currently reporting AfStateFocused or AfStateFailed it will remain
+ in that state and AfPauseState will report AfPauseStatePaused.
+
+ However, if the algorithm is scanning (AfStateScanning),
+ AfPauseState will report AfPauseStatePausing until the scan is
+ finished, at which point AfState will report one of AfStateFocused
+ or AfStateFailed, and AfPauseState will change to
+ AfPauseStatePaused.
+
+ - name: AfPauseResume
+ value: 2
+ description: |
+ Resume continuous autofocus operation.
+
+ The algorithm starts again from exactly where it left off, and
+ AfPauseState will report AfPauseStateRunning.
+
+ - LensPosition:
+ type: float
+ direction: inout
+ description: |
+ Set and report the focus lens position.
+
+ This control instructs the lens to move to a particular position and
+ also reports back the position of the lens for each frame.
+
+ The LensPosition control is ignored unless the AfMode is set to
+ AfModeManual, though the value is reported back unconditionally in all
+ modes.
+
+ This value, which is generally a non-integer, is the reciprocal of the
+ focal distance in metres, also known as dioptres. That is, to set a
+ focal distance D, the lens position LP is given by
+
+ \f$LP = \frac{1\mathrm{m}}{D}\f$
+
+ For example:
+
+ - 0 moves the lens to infinity.
+ - 0.5 moves the lens to focus on objects 2m away.
+ - 2 moves the lens to focus on objects 50cm away.
+ - And larger values will focus the lens closer.
+
+ The default value of the control should indicate a good general
+ position for the lens, often corresponding to the hyperfocal distance
+ (the closest position for which objects at infinity are still
+ acceptably sharp). The minimum will often be zero (meaning infinity),
+ and the maximum value defines the closest focus position.
+
+ \todo Define a property to report the Hyperfocal distance of calibrated
+ lenses.
+
+ - AfState:
+ type: int32_t
+ direction: out
+ description: |
+ The current state of the AF algorithm.
+
+ This control reports the current state of the AF algorithm in
+ conjunction with the reported AfMode value and (in continuous AF mode)
+ the AfPauseState value. The possible state changes are described below,
+ though we note the following state transitions that occur when the
+ AfMode is changed.
+
+ If the AfMode is set to AfModeManual, then the AfState will always
+ report AfStateIdle (even if the lens is subsequently moved). Changing
+ to the AfModeManual state does not initiate any lens movement.
+
+ If the AfMode is set to AfModeAuto then the AfState will report
+ AfStateIdle. However, if AfModeAuto and AfTriggerStart are sent
+ together then AfState will omit AfStateIdle and move straight to
+ AfStateScanning (and start a scan).
+
+ If the AfMode is set to AfModeContinuous then the AfState will
+ initially report AfStateScanning.
+
+ enum:
+ - name: AfStateIdle
+ value: 0
+ description: |
+ The AF algorithm is in manual mode (AfModeManual) or in auto mode
+ (AfModeAuto) and a scan has not yet been triggered, or an
+ in-progress scan was cancelled.
+ - name: AfStateScanning
+ value: 1
+ description: |
+ The AF algorithm is in auto mode (AfModeAuto), and a scan has been
+ started using the AfTrigger control.
+
+ The scan can be cancelled by sending AfTriggerCancel at which point
+ the algorithm will either move back to AfStateIdle or, if the scan
+ actually completes before the cancel request is processed, to one
+ of AfStateFocused or AfStateFailed.
+
+ Alternatively the AF algorithm could be in continuous mode
+ (AfModeContinuous) at which point it may enter this state
+ spontaneously whenever it determines that a rescan is needed.
+ - name: AfStateFocused
+ value: 2
+ description: |
+ The AF algorithm is in auto (AfModeAuto) or continuous
+ (AfModeContinuous) mode and a scan has completed with the result
+ that the algorithm believes the image is now in focus.
+ - name: AfStateFailed
+ value: 3
+ description: |
+ The AF algorithm is in auto (AfModeAuto) or continuous
+ (AfModeContinuous) mode and a scan has completed with the result
+ that the algorithm did not find a good focus position.
+
+ - AfPauseState:
+ type: int32_t
+ direction: out
+ description: |
+ Report whether the autofocus is currently running, paused or pausing.
+
+ This control is only applicable in continuous (AfModeContinuous) mode,
+ and reports whether the algorithm is currently running, paused or
+ pausing (that is, will pause as soon as any in-progress scan
+ completes).
+
+ Any change to AfMode will cause AfPauseStateRunning to be reported.
+
+ enum:
+ - name: AfPauseStateRunning
+ value: 0
+ description: |
+ Continuous AF is running and the algorithm may restart a scan
+ spontaneously.
+ - name: AfPauseStatePausing
+ value: 1
+ description: |
+ Continuous AF has been sent an AfPauseDeferred control, and will
+ pause as soon as any in-progress scan completes.
+
+ When the scan completes, the AfPauseState control will report
+ AfPauseStatePaused. No new scans will be start spontaneously until
+ the AfPauseResume control is sent.
+ - name: AfPauseStatePaused
+ value: 2
+ description: |
+ Continuous AF is paused.
+
+ No further state changes or lens movements will occur until the
+ AfPauseResume control is sent.
+
+ - HdrMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Set the mode to be used for High Dynamic Range (HDR) imaging.
+
+ HDR techniques typically include multiple exposure, image fusion and
+ tone mapping techniques to improve the dynamic range of the resulting
+ images.
+
+ When using an HDR mode, images are captured with different sets of AGC
+ settings called HDR channels. Channels indicate in particular the type
+ of exposure (short, medium or long) used to capture the raw image,
+ before fusion. Each HDR image is tagged with the corresponding channel
+ using the HdrChannel control.
+
+ \sa HdrChannel
+
+ enum:
+ - name: HdrModeOff
+ value: 0
+ description: |
+ HDR is disabled.
+
+ Metadata for this frame will not include the HdrChannel control.
+ - name: HdrModeMultiExposureUnmerged
+ value: 1
+ description: |
+ Multiple exposures will be generated in an alternating fashion.
+
+ The multiple exposures will not be merged together and will be
+ returned to the application as they are. Each image will be tagged
+ with the correct HDR channel, indicating what kind of exposure it
+ is. The tag should be the same as in the HdrModeMultiExposure case.
+
+ The expectation is that an application using this mode would merge
+ the frames to create HDR images for itself if it requires them.
+ - name: HdrModeMultiExposure
+ value: 2
+ description: |
+ Multiple exposures will be generated and merged to create HDR
+ images.
+
+ Each image will be tagged with the HDR channel (long, medium or
+ short) that arrived and which caused this image to be output.
+
+ Systems that use two channels for HDR will return images tagged
+ alternately as the short and long channel. Systems that use three
+ channels for HDR will cycle through the short, medium and long
+ channel before repeating.
+ - name: HdrModeSingleExposure
+ value: 3
+ description: |
+ Multiple frames all at a single exposure will be used to create HDR
+ images.
+
+ These images should be reported as all corresponding to the HDR
+ short channel.
+ - name: HdrModeNight
+ value: 4
+ description: |
+ Multiple frames will be combined to produce "night mode" images.
+
+ It is up to the implementation exactly which HDR channels it uses,
+ and the images will all be tagged accordingly with the correct HDR
+ channel information.
+
+ - HdrChannel:
+ type: int32_t
+ direction: out
+ description: |
+ The HDR channel used to capture the frame.
+
+ This value is reported back to the application so that it can discover
+ whether this capture corresponds to the short or long exposure image
+ (or any other image used by the HDR procedure). An application can
+ monitor the HDR channel to discover when the differently exposed images
+ have arrived.
+
+ This metadata is only available when an HDR mode has been enabled.
+
+ \sa HdrMode
+
+ enum:
+ - name: HdrChannelNone
+ value: 0
+ description: |
+ This image does not correspond to any of the captures used to create
+ an HDR image.
+ - name: HdrChannelShort
+ value: 1
+ description: |
+ This is a short exposure image.
+ - name: HdrChannelMedium
+ value: 2
+ description: |
+ This is a medium exposure image.
+ - name: HdrChannelLong
+ value: 3
+ description: |
+ This is a long exposure image.
+
+ - Gamma:
+ type: float
+ direction: inout
+ description: |
+ Specify a fixed gamma value.
+
+ The default gamma value must be 2.2 which closely mimics sRGB gamma.
+ Note that this is camera gamma, so it is applied as 1.0/gamma.
+
+ - DebugMetadataEnable:
+ type: bool
+ direction: inout
+ description: |
+ Enable or disable the debug metadata.
+
+...
diff --git a/src/libcamera/control_ids_debug.yaml b/src/libcamera/control_ids_debug.yaml
new file mode 100644
index 00000000..79753271
--- /dev/null
+++ b/src/libcamera/control_ids_debug.yaml
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+%YAML 1.1
+---
+vendor: debug
+controls: []
diff --git a/src/libcamera/control_ids_draft.yaml b/src/libcamera/control_ids_draft.yaml
new file mode 100644
index 00000000..87e4e02d
--- /dev/null
+++ b/src/libcamera/control_ids_draft.yaml
@@ -0,0 +1,327 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2019, Google Inc.
+#
+%YAML 1.1
+---
+# Unless otherwise stated, all controls are bi-directional, i.e. they can be
+# set through Request::controls() and returned out through Request::metadata().
+vendor: draft
+controls:
+ - AePrecaptureTrigger:
+ type: int32_t
+ direction: inout
+ description: |
+ Control for AE metering trigger. Currently identical to
+ ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER.
+
+ Whether the camera device will trigger a precapture metering sequence
+ when it processes this request.
+ enum:
+ - name: AePrecaptureTriggerIdle
+ value: 0
+ description: The trigger is idle.
+ - name: AePrecaptureTriggerStart
+ value: 1
+ description: The pre-capture AE metering is started by the camera.
+ - name: AePrecaptureTriggerCancel
+ value: 2
+ description: |
+ The camera will cancel any active or completed metering sequence.
+ The AE algorithm is reset to its initial state.
+
+ - NoiseReductionMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Control to select the noise reduction algorithm mode. Currently
+ identical to ANDROID_NOISE_REDUCTION_MODE.
+
+ Mode of operation for the noise reduction algorithm.
+ enum:
+ - name: NoiseReductionModeOff
+ value: 0
+ description: No noise reduction is applied
+ - name: NoiseReductionModeFast
+ value: 1
+ description: |
+ Noise reduction is applied without reducing the frame rate.
+ - name: NoiseReductionModeHighQuality
+ value: 2
+ description: |
+ High quality noise reduction at the expense of frame rate.
+ - name: NoiseReductionModeMinimal
+ value: 3
+ description: |
+ Minimal noise reduction is applied without reducing the frame rate.
+ - name: NoiseReductionModeZSL
+ value: 4
+ description: |
+ Noise reduction is applied at different levels to different streams.
+
+ - ColorCorrectionAberrationMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Control to select the color correction aberration mode. Currently
+ identical to ANDROID_COLOR_CORRECTION_ABERRATION_MODE.
+
+ Mode of operation for the chromatic aberration correction algorithm.
+ enum:
+ - name: ColorCorrectionAberrationOff
+ value: 0
+ description: No aberration correction is applied.
+ - name: ColorCorrectionAberrationFast
+ value: 1
+ description: Aberration correction will not slow down the frame rate.
+ - name: ColorCorrectionAberrationHighQuality
+ value: 2
+ description: |
+ High quality aberration correction which might reduce the frame
+ rate.
+
+ - AeState:
+ type: int32_t
+ direction: out
+ description: |
+ Control to report the current AE algorithm state. Currently identical to
+ ANDROID_CONTROL_AE_STATE.
+
+ Current state of the AE algorithm.
+ enum:
+ - name: AeStateInactive
+ value: 0
+ description: The AE algorithm is inactive.
+ - name: AeStateSearching
+ value: 1
+ description: The AE algorithm has not converged yet.
+ - name: AeStateConverged
+ value: 2
+ description: The AE algorithm has converged.
+ - name: AeStateLocked
+ value: 3
+ description: The AE algorithm is locked.
+ - name: AeStateFlashRequired
+ value: 4
+ description: The AE algorithm would need a flash for good results
+ - name: AeStatePrecapture
+ value: 5
+ description: |
+ The AE algorithm has started a pre-capture metering session.
+ \sa AePrecaptureTrigger
+
+ - AwbState:
+ type: int32_t
+ direction: out
+ description: |
+ Control to report the current AWB algorithm state. Currently identical
+ to ANDROID_CONTROL_AWB_STATE.
+
+ Current state of the AWB algorithm.
+ enum:
+ - name: AwbStateInactive
+ value: 0
+ description: The AWB algorithm is inactive.
+ - name: AwbStateSearching
+ value: 1
+ description: The AWB algorithm has not converged yet.
+ - name: AwbConverged
+ value: 2
+ description: The AWB algorithm has converged.
+ - name: AwbLocked
+ value: 3
+ description: The AWB algorithm is locked.
+
+ - SensorRollingShutterSkew:
+ type: int64_t
+ direction: out
+ description: |
+ Control to report the time between the start of exposure of the first
+ row and the start of exposure of the last row. Currently identical to
+ ANDROID_SENSOR_ROLLING_SHUTTER_SKEW
+
+ - LensShadingMapMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Control to report if the lens shading map is available. Currently
+ identical to ANDROID_STATISTICS_LENS_SHADING_MAP_MODE.
+ enum:
+ - name: LensShadingMapModeOff
+ value: 0
+ description: No lens shading map mode is available.
+ - name: LensShadingMapModeOn
+ value: 1
+ description: The lens shading map mode is available.
+
+ - PipelineDepth:
+ type: int32_t
+ direction: out
+ description: |
+ Specifies the number of pipeline stages the frame went through from when
+ it was exposed to when the final completed result was available to the
+ framework. Always less than or equal to PipelineMaxDepth. Currently
+ identical to ANDROID_REQUEST_PIPELINE_DEPTH.
+
+ The typical value for this control is 3 as a frame is first exposed,
+ captured and then processed in a single pass through the ISP. Any
+ additional processing step performed after the ISP pass (in example face
+ detection, additional format conversions etc) count as an additional
+ pipeline stage.
+
+ - MaxLatency:
+ type: int32_t
+ direction: out
+ description: |
+ The maximum number of frames that can occur after a request (different
+ than the previous) has been submitted, and before the result's state
+ becomes synchronized. A value of -1 indicates unknown latency, and 0
+ indicates per-frame control. Currently identical to
+ ANDROID_SYNC_MAX_LATENCY.
+
+ - TestPatternMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Control to select the test pattern mode. Currently identical to
+ ANDROID_SENSOR_TEST_PATTERN_MODE.
+ enum:
+ - name: TestPatternModeOff
+ value: 0
+ description: |
+ No test pattern mode is used. The camera device returns frames from
+ the image sensor.
+ - name: TestPatternModeSolidColor
+ value: 1
+ description: |
+ Each pixel in [R, G_even, G_odd, B] is replaced by its respective
+ color channel provided in test pattern data.
+ \todo Add control for test pattern data.
+ - name: TestPatternModeColorBars
+ value: 2
+ description: |
+ All pixel data is replaced with an 8-bar color pattern. The vertical
+ bars (left-to-right) are as follows; white, yellow, cyan, green,
+ magenta, red, blue and black. Each bar should take up 1/8 of the
+ sensor pixel array width. When this is not possible, the bar size
+ should be rounded down to the nearest integer and the pattern can
+ repeat on the right side. Each bar's height must always take up the
+ full sensor pixel array height.
+ - name: TestPatternModeColorBarsFadeToGray
+ value: 3
+ description: |
+ The test pattern is similar to TestPatternModeColorBars,
+ except that each bar should start at its specified color at the top
+ and fade to gray at the bottom. Furthermore each bar is further
+ subdevided into a left and right half. The left half should have a
+ smooth gradient, and the right half should have a quantized
+ gradient. In particular, the right half's should consist of blocks
+ of the same color for 1/16th active sensor pixel array width. The
+ least significant bits in the quantized gradient should be copied
+ from the most significant bits of the smooth gradient. The height of
+ each bar should always be a multiple of 128. When this is not the
+ case, the pattern should repeat at the bottom of the image.
+ - name: TestPatternModePn9
+ value: 4
+ description: |
+ All pixel data is replaced by a pseudo-random sequence generated
+ from a PN9 512-bit sequence (typically implemented in hardware with
+ a linear feedback shift register). The generator should be reset at
+ the beginning of each frame, and thus each subsequent raw frame with
+ this test pattern should be exactly the same as the last.
+ - name: TestPatternModeCustom1
+ value: 256
+ description: |
+ The first custom test pattern. All custom patterns that are
+ available only on this camera device are at least this numeric
+ value. All of the custom test patterns will be static (that is the
+ raw image must not vary from frame to frame).
+
+ - FaceDetectMode:
+ type: int32_t
+ direction: inout
+ description: |
+ Control to select the face detection mode used by the pipeline.
+
+ Currently identical to ANDROID_STATISTICS_FACE_DETECT_MODE.
+
+ \sa FaceDetectFaceRectangles
+ \sa FaceDetectFaceScores
+ \sa FaceDetectFaceLandmarks
+ \sa FaceDetectFaceIds
+
+ enum:
+ - name: FaceDetectModeOff
+ value: 0
+ description: |
+ Pipeline doesn't perform face detection and doesn't report any
+ control related to face detection.
+ - name: FaceDetectModeSimple
+ value: 1
+ description: |
+ Pipeline performs face detection and reports the
+ FaceDetectFaceRectangles and FaceDetectFaceScores controls for each
+ detected face. FaceDetectFaceLandmarks and FaceDetectFaceIds are
+ optional.
+ - name: FaceDetectModeFull
+ value: 2
+ description: |
+ Pipeline performs face detection and reports all the controls
+ related to face detection including FaceDetectFaceRectangles,
+ FaceDetectFaceScores, FaceDetectFaceLandmarks, and
+ FaceDeteceFaceIds for each detected face.
+
+ - FaceDetectFaceRectangles:
+ type: Rectangle
+ direction: out
+ description: |
+ Boundary rectangles of the detected faces. The number of values is
+ the number of detected faces.
+
+ The FaceDetectFaceRectangles control can only be returned in metadata.
+
+ Currently identical to ANDROID_STATISTICS_FACE_RECTANGLES.
+ size: [n]
+
+ - FaceDetectFaceScores:
+ type: uint8_t
+ direction: out
+ description: |
+ Confidence score of each of the detected faces. The range of score is
+ [0, 100]. The number of values should be the number of faces reported
+ in FaceDetectFaceRectangles.
+
+ The FaceDetectFaceScores control can only be returned in metadata.
+
+ Currently identical to ANDROID_STATISTICS_FACE_SCORES.
+ size: [n]
+
+ - FaceDetectFaceLandmarks:
+ type: Point
+ direction: out
+ description: |
+ Array of human face landmark coordinates in format [..., left_eye_i,
+ right_eye_i, mouth_i, left_eye_i+1, ...], with i = index of face. The
+ number of values should be 3 * the number of faces reported in
+ FaceDetectFaceRectangles.
+
+ The FaceDetectFaceLandmarks control can only be returned in metadata.
+
+ Currently identical to ANDROID_STATISTICS_FACE_LANDMARKS.
+ size: [n]
+
+ - FaceDetectFaceIds:
+ type: int32_t
+ direction: out
+ description: |
+ Each detected face is given a unique ID that is valid for as long as the
+ face is visible to the camera device. A face that leaves the field of
+ view and later returns may be assigned a new ID. The number of values
+ should be the number of faces reported in FaceDetectFaceRectangles.
+
+ The FaceDetectFaceIds control can only be returned in metadata.
+
+ Currently identical to ANDROID_STATISTICS_FACE_IDS.
+ size: [n]
+
+...
diff --git a/src/libcamera/control_ids_rpi.yaml b/src/libcamera/control_ids_rpi.yaml
new file mode 100644
index 00000000..7524c5d2
--- /dev/null
+++ b/src/libcamera/control_ids_rpi.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2023, Raspberry Pi Ltd
+#
+%YAML 1.1
+---
+# Raspberry Pi (VC4 and PiSP) specific vendor controls
+vendor: rpi
+controls:
+ - StatsOutputEnable:
+ type: bool
+ direction: inout
+ description: |
+ Toggles the Raspberry Pi IPA to output the hardware generated statistics.
+
+ When this control is set to true, the IPA outputs a binary dump of the
+ hardware generated statistics through the Request metadata in the
+ Bcm2835StatsOutput control.
+
+ \sa Bcm2835StatsOutput
+
+ - Bcm2835StatsOutput:
+ type: uint8_t
+ size: [n]
+ direction: out
+ description: |
+ Span of the BCM2835 ISP generated statistics for the current frame.
+
+ This is sent in the Request metadata if the StatsOutputEnable is set to
+ true. The statistics struct definition can be found in
+ include/linux/bcm2835-isp.h.
+
+ \sa StatsOutputEnable
+
+ - ScalerCrops:
+ type: Rectangle
+ size: [n]
+ direction: out
+ description: |
+ An array of rectangles, where each singular value has identical
+ functionality to the ScalerCrop control. This control allows the
+ Raspberry Pi pipeline handler to control individual scaler crops per
+ output stream.
+
+ The order of rectangles passed into the control must match the order of
+ streams configured by the application. The pipeline handler will only
+ configure crop retangles up-to the number of output streams configured.
+ All subsequent rectangles passed into this control are ignored by the
+ pipeline handler.
+
+ If both rpi::ScalerCrops and ScalerCrop controls are present in a
+ ControlList, the latter is discarded, and crops are obtained from this
+ control.
+
+ Note that using different crop rectangles for each output stream with
+ this control is only applicable on the Pi5/PiSP platform. This control
+ should also be considered temporary/draft and will be replaced with
+ official libcamera API support for per-stream controls in the future.
+
+ \sa ScalerCrop
+...
diff --git a/src/libcamera/control_ranges.yaml b/src/libcamera/control_ranges.yaml
new file mode 100644
index 00000000..6752eb98
--- /dev/null
+++ b/src/libcamera/control_ranges.yaml
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2023, Raspberry Pi Ltd
+#
+%YAML 1.1
+---
+# Specifies the control id ranges/offsets for core/draft libcamera and vendor
+# controls and properties.
+ranges:
+ # Core libcamera controls
+ libcamera: 0
+ # Draft designated libcamera controls
+ draft: 10000
+ # Raspberry Pi vendor controls
+ rpi: 20000
+ # Controls for debug metadata
+ debug: 30000
+ # Next range starts at 40000
+
+...
diff --git a/src/libcamera/control_serializer.cpp b/src/libcamera/control_serializer.cpp
index fcff5e56..17834648 100644
--- a/src/libcamera/control_serializer.cpp
+++ b/src/libcamera/control_serializer.cpp
@@ -2,22 +2,25 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_serializer.cpp - Control (de)serializer
+ * Control (de)serializer
*/
-#include "control_serializer.h"
+#include "libcamera/internal/control_serializer.h"
#include <algorithm>
#include <memory>
#include <vector>
-#include <ipa/ipa_controls.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/span.h>
+
#include <libcamera/control_ids.h>
#include <libcamera/controls.h>
-#include <libcamera/span.h>
+#include <libcamera/property_ids.h>
+
+#include <libcamera/ipa/ipa_controls.h>
-#include "byte_stream_buffer.h"
-#include "log.h"
+#include "libcamera/internal/byte_stream_buffer.h"
/**
* \file control_serializer.h
@@ -59,6 +62,14 @@ LOG_DEFINE_CATEGORY(Serializer)
* corresponding ControlInfoMap handle in the binary data, and when
* deserializing to retrieve the corresponding ControlInfoMap.
*
+ * As independent ControlSerializer instances are used on both sides of the IPC
+ * boundary, and the two instances operate without a shared point of control,
+ * there is a potential risk of collision of the numerical handles assigned to
+ * each serialized ControlInfoMap. For this reason the control serializer is
+ * initialized with a seed and the handle is incremented by 2, so that instances
+ * initialized with a different seed operate on a separate numerical space,
+ * avoiding any collision risk.
+ *
* In order to perform those tasks, the serializer keeps an internal state that
* needs to be properly populated. This mechanism requires the ControlInfoMap
* corresponding to a ControlList to have been serialized or deserialized
@@ -74,9 +85,45 @@ LOG_DEFINE_CATEGORY(Serializer)
* proceed with care to avoid stale references.
*/
-ControlSerializer::ControlSerializer()
- : serial_(0)
+/**
+ * \enum ControlSerializer::Role
+ * \brief Define the role of the IPC component using the control serializer
+ *
+ * The role of the component that creates the serializer is used to initialize
+ * the handles numerical space.
+ *
+ * \var ControlSerializer::Role::Proxy
+ * \brief The control serializer is used by the IPC Proxy classes
+ *
+ * \var ControlSerializer::Role::Worker
+ * \brief The control serializer is used by the IPC ProxyWorker classes
+ */
+
+/**
+ * \brief Construct a new ControlSerializer
+ * \param[in] role The role of the IPC component using the serializer
+ */
+ControlSerializer::ControlSerializer(Role role)
{
+ /*
+ * Initialize the handle numerical space using the role of the
+ * component that created the instance.
+ *
+ * Instances initialized for a different role will use a different
+ * numerical handle space, avoiding any collision risk when, in example,
+ * two instances of the ControlSerializer class are used at the IPC
+ * boundaries.
+ *
+ * Start counting handles from '1' as '0' is a special value used as
+ * place holder when serializing lists that do not have a ControlInfoMap
+ * associated (in example list of libcamera controls::controls).
+ *
+ * \todo This is a temporary hack and should probably be better
+ * engineered, but for the time being it avoids collisions on the handle
+ * value when using IPC.
+ */
+ serialSeed_ = role == Role::Proxy ? 1 : 2;
+ serial_ = serialSeed_;
}
/**
@@ -87,21 +134,22 @@ ControlSerializer::ControlSerializer()
*/
void ControlSerializer::reset()
{
- serial_ = 0;
+ serial_ = serialSeed_;
infoMapHandles_.clear();
infoMaps_.clear();
controlIds_.clear();
+ controlIdMaps_.clear();
}
size_t ControlSerializer::binarySize(const ControlValue &value)
{
- return value.data().size_bytes();
+ return sizeof(ControlType) + value.data().size_bytes();
}
size_t ControlSerializer::binarySize(const ControlInfo &info)
{
- return binarySize(info.min()) + binarySize(info.max());
+ return binarySize(info.min()) + binarySize(info.max()) + binarySize(info.def());
}
/**
@@ -147,6 +195,8 @@ size_t ControlSerializer::binarySize(const ControlList &list)
void ControlSerializer::store(const ControlValue &value,
ByteStreamBuffer &buffer)
{
+ const ControlType type = value.type();
+ buffer.write(&type);
buffer.write(value.data());
}
@@ -154,6 +204,7 @@ void ControlSerializer::store(const ControlInfo &info, ByteStreamBuffer &buffer)
{
store(info.min(), buffer);
store(info.max(), buffer);
+ store(info.def(), buffer);
}
/**
@@ -173,6 +224,12 @@ void ControlSerializer::store(const ControlInfo &info, ByteStreamBuffer &buffer)
int ControlSerializer::serialize(const ControlInfoMap &infoMap,
ByteStreamBuffer &buffer)
{
+ if (isCached(infoMap)) {
+ LOG(Serializer, Debug)
+ << "Skipping already serialized ControlInfoMap";
+ return 0;
+ }
+
/* Compute entries and data required sizes. */
size_t entriesSize = infoMap.size()
* sizeof(struct ipa_control_info_entry);
@@ -180,17 +237,36 @@ int ControlSerializer::serialize(const ControlInfoMap &infoMap,
for (const auto &ctrl : infoMap)
valuesSize += binarySize(ctrl.second);
- /* Prepare the packet header, assign a handle to the ControlInfoMap. */
+ const ControlIdMap *idmap = &infoMap.idmap();
+ enum ipa_controls_id_map_type idMapType;
+ if (idmap == &controls::controls)
+ idMapType = IPA_CONTROL_ID_MAP_CONTROLS;
+ else if (idmap == &properties::properties)
+ idMapType = IPA_CONTROL_ID_MAP_PROPERTIES;
+ else
+ idMapType = IPA_CONTROL_ID_MAP_V4L2;
+
+ /* Prepare the packet header. */
struct ipa_controls_header hdr;
hdr.version = IPA_CONTROLS_FORMAT_VERSION;
- hdr.handle = ++serial_;
+ hdr.handle = serial_;
hdr.entries = infoMap.size();
hdr.size = sizeof(hdr) + entriesSize + valuesSize;
hdr.data_offset = sizeof(hdr) + entriesSize;
+ hdr.id_map_type = idMapType;
buffer.write(&hdr);
/*
+ * Increment the handle for the ControlInfoMap by 2 to keep the handles
+ * numerical space partitioned between instances initialized for a
+ * different role.
+ *
+ * \sa ControlSerializer::Role
+ */
+ serial_ += 2;
+
+ /*
* Serialize all entries.
* \todo Serialize the control name too
*/
@@ -205,6 +281,7 @@ int ControlSerializer::serialize(const ControlInfoMap &infoMap,
entry.id = id->id();
entry.type = id->type();
entry.offset = values.offset();
+ entry.direction = static_cast<ControlId::DirectionFlags::Type>(id->direction());
entries.write(&entry);
store(info, values);
@@ -255,6 +332,15 @@ int ControlSerializer::serialize(const ControlList &list,
infoMapHandle = 0;
}
+ const ControlIdMap *idmap = list.idMap();
+ enum ipa_controls_id_map_type idMapType;
+ if (idmap == &controls::controls)
+ idMapType = IPA_CONTROL_ID_MAP_CONTROLS;
+ else if (idmap == &properties::properties)
+ idMapType = IPA_CONTROL_ID_MAP_PROPERTIES;
+ else
+ idMapType = IPA_CONTROL_ID_MAP_V4L2;
+
size_t entriesSize = list.size() * sizeof(struct ipa_control_value_entry);
size_t valuesSize = 0;
for (const auto &ctrl : list)
@@ -267,6 +353,7 @@ int ControlSerializer::serialize(const ControlList &list,
hdr.entries = list.size();
hdr.size = sizeof(hdr) + entriesSize + valuesSize;
hdr.data_offset = sizeof(hdr) + entriesSize;
+ hdr.id_map_type = idMapType;
buffer.write(&hdr);
@@ -295,11 +382,13 @@ int ControlSerializer::serialize(const ControlList &list,
return 0;
}
-ControlValue ControlSerializer::loadControlValue(ControlType type,
- ByteStreamBuffer &buffer,
+ControlValue ControlSerializer::loadControlValue(ByteStreamBuffer &buffer,
bool isArray,
unsigned int count)
{
+ ControlType type;
+ buffer.read(&type);
+
ControlValue value;
value.reserve(type, isArray, count);
@@ -308,16 +397,13 @@ ControlValue ControlSerializer::loadControlValue(ControlType type,
return value;
}
-ControlInfo ControlSerializer::loadControlInfo(ControlType type,
- ByteStreamBuffer &b)
+ControlInfo ControlSerializer::loadControlInfo(ByteStreamBuffer &b)
{
- if (type == ControlTypeString)
- type = ControlTypeInteger32;
-
- ControlValue min = loadControlValue(type, b);
- ControlValue max = loadControlValue(type, b);
+ ControlValue min = loadControlValue(b);
+ ControlValue max = loadControlValue(b);
+ ControlValue def = loadControlValue(b);
- return ControlInfo(min, max);
+ return ControlInfo(min, max, def);
}
/**
@@ -325,7 +411,7 @@ ControlInfo ControlSerializer::loadControlInfo(ControlType type,
* \brief Deserialize an object from a binary buffer
* \param[in] buffer The memory buffer that contains the object
*
- * This method is only valid when specialized for ControlInfoMap or
+ * This function is only valid when specialized for ControlInfoMap or
* ControlList. Any other typename \a T is not supported.
*/
@@ -334,7 +420,7 @@ ControlInfo ControlSerializer::loadControlInfo(ControlType type,
* \param[in] buffer The memory buffer that contains the serialized map
*
* Re-construct a ControlInfoMap from a binary \a buffer containing data
- * serialized using the serialize() method.
+ * serialized using the serialize() function.
*
* \return The deserialized ControlInfoMap
*/
@@ -347,6 +433,12 @@ ControlInfoMap ControlSerializer::deserialize<ControlInfoMap>(ByteStreamBuffer &
return {};
}
+ auto iter = infoMaps_.find(hdr->handle);
+ if (iter != infoMaps_.end()) {
+ LOG(Serializer, Debug) << "Use cached ControlInfoMap";
+ return iter->second;
+ }
+
if (hdr->version != IPA_CONTROLS_FORMAT_VERSION) {
LOG(Serializer, Error)
<< "Unsupported controls format version "
@@ -354,6 +446,33 @@ ControlInfoMap ControlSerializer::deserialize<ControlInfoMap>(ByteStreamBuffer &
return {};
}
+ /*
+ * Use the ControlIdMap corresponding to the id map type. If the type
+ * references a globally defined id map (such as controls::controls
+ * or properties::properties), use it. Otherwise, create a local id map
+ * that will be populated with dynamically created ControlId instances
+ * when deserializing individual ControlInfoMap entries.
+ */
+ const ControlIdMap *idMap = nullptr;
+ ControlIdMap *localIdMap = nullptr;
+ switch (hdr->id_map_type) {
+ case IPA_CONTROL_ID_MAP_CONTROLS:
+ idMap = &controls::controls;
+ break;
+ case IPA_CONTROL_ID_MAP_PROPERTIES:
+ idMap = &properties::properties;
+ break;
+ case IPA_CONTROL_ID_MAP_V4L2:
+ controlIdMaps_.emplace_back(std::make_unique<ControlIdMap>());
+ localIdMap = controlIdMaps_.back().get();
+ idMap = localIdMap;
+ break;
+ default:
+ LOG(Serializer, Error)
+ << "Unknown id map type: " << hdr->id_map_type;
+ return {};
+ }
+
ByteStreamBuffer entries = buffer.carveOut(hdr->data_offset - sizeof(*hdr));
ByteStreamBuffer values = buffer.carveOut(hdr->size - hdr->data_offset);
@@ -363,7 +482,6 @@ ControlInfoMap ControlSerializer::deserialize<ControlInfoMap>(ByteStreamBuffer &
}
ControlInfoMap::Map ctrls;
-
for (unsigned int i = 0; i < hdr->entries; ++i) {
const struct ipa_control_info_entry *entry =
entries.read<decltype(*entry)>();
@@ -372,13 +490,26 @@ ControlInfoMap ControlSerializer::deserialize<ControlInfoMap>(ByteStreamBuffer &
return {};
}
- /* Create and cache the individual ControlId. */
ControlType type = static_cast<ControlType>(entry->type);
- /**
- * \todo Find a way to preserve the control name for debugging
- * purpose.
- */
- controlIds_.emplace_back(std::make_unique<ControlId>(entry->id, "", type));
+
+ /* If we're using a local id map, populate it. */
+ if (localIdMap) {
+ ControlId::DirectionFlags flags{
+ static_cast<ControlId::Direction>(entry->direction)
+ };
+
+ /**
+ * \todo Find a way to preserve the control name for
+ * debugging purpose.
+ */
+ controlIds_.emplace_back(std::make_unique<ControlId>(entry->id,
+ "", "local", type,
+ flags));
+ (*localIdMap)[entry->id] = controlIds_.back().get();
+ }
+
+ const ControlId *controlId = idMap->at(entry->id);
+ ASSERT(controlId);
if (entry->offset != values.offset()) {
LOG(Serializer, Error)
@@ -388,15 +519,15 @@ ControlInfoMap ControlSerializer::deserialize<ControlInfoMap>(ByteStreamBuffer &
}
/* Create and store the ControlInfo. */
- ctrls.emplace(controlIds_.back().get(),
- loadControlInfo(type, values));
+ ctrls.emplace(controlId, loadControlInfo(values));
}
/*
* Create the ControlInfoMap in the cache, and store the map to handle
* association.
*/
- ControlInfoMap &map = infoMaps_[hdr->handle] = std::move(ctrls);
+ infoMaps_[hdr->handle] = ControlInfoMap(std::move(ctrls), *idMap);
+ ControlInfoMap &map = infoMaps_[hdr->handle];
infoMapHandles_[&map] = hdr->handle;
return map;
@@ -407,7 +538,7 @@ ControlInfoMap ControlSerializer::deserialize<ControlInfoMap>(ByteStreamBuffer &
* \param[in] buffer The memory buffer that contains the serialized list
*
* Re-construct a ControlList from a binary \a buffer containing data
- * serialized using the serialize() method.
+ * serialized using the serialize() function.
*
* \return The deserialized ControlList
*/
@@ -436,13 +567,15 @@ ControlList ControlSerializer::deserialize<ControlList>(ByteStreamBuffer &buffer
}
/*
- * Retrieve the ControlInfoMap associated with the ControlList based on
- * its ID. The mapping between infoMap and ID is set up when serializing
- * or deserializing ControlInfoMap. If no mapping is found (which is
- * currently the case for ControlList related to libcamera controls),
- * use the global control::control idmap.
+ * Retrieve the ControlIdMap associated with the ControlList.
+ *
+ * The idmap is either retrieved from the list's ControlInfoMap when
+ * a valid handle has been initialized at serialization time, or by
+ * using the header's id_map_type field for lists that refer to the
+ * globally defined libcamera controls and properties, for which no
+ * ControlInfoMap is available.
*/
- const ControlInfoMap *infoMap;
+ const ControlIdMap *idMap;
if (hdr->handle) {
auto iter = std::find_if(infoMapHandles_.begin(), infoMapHandles_.end(),
[&](decltype(infoMapHandles_)::value_type &entry) {
@@ -454,12 +587,33 @@ ControlList ControlSerializer::deserialize<ControlList>(ByteStreamBuffer &buffer
return {};
}
- infoMap = iter->first;
+ const ControlInfoMap *infoMap = iter->first;
+ idMap = &infoMap->idmap();
} else {
- infoMap = nullptr;
+ switch (hdr->id_map_type) {
+ case IPA_CONTROL_ID_MAP_CONTROLS:
+ idMap = &controls::controls;
+ break;
+
+ case IPA_CONTROL_ID_MAP_PROPERTIES:
+ idMap = &properties::properties;
+ break;
+
+ case IPA_CONTROL_ID_MAP_V4L2:
+ default:
+ LOG(Serializer, Fatal)
+ << "A list of V4L2 controls requires an ControlInfoMap";
+ return {};
+ }
}
- ControlList ctrls(infoMap ? infoMap->idmap() : controls::controls);
+ /*
+ * \todo When available, initialize the list with the ControlInfoMap
+ * so that controls can be validated against their limits.
+ * Currently no validation is performed, so it's fine relying on the
+ * idmap only.
+ */
+ ControlList ctrls(*idMap);
for (unsigned int i = 0; i < hdr->entries; ++i) {
const struct ipa_control_value_entry *entry =
@@ -476,13 +630,25 @@ ControlList ControlSerializer::deserialize<ControlList>(ByteStreamBuffer &buffer
return {};
}
- ControlType type = static_cast<ControlType>(entry->type);
ctrls.set(entry->id,
- loadControlValue(type, values, entry->is_array,
- entry->count));
+ loadControlValue(values, entry->is_array, entry->count));
}
return ctrls;
}
+/**
+ * \brief Check if a ControlInfoMap is cached
+ * \param[in] infoMap The ControlInfoMap to check
+ *
+ * The ControlSerializer caches all ControlInfoMaps that it has (de)serialized.
+ * This function checks if \a infoMap is in the cache.
+ *
+ * \return True if \a infoMap is in the cache or false otherwise
+ */
+bool ControlSerializer::isCached(const ControlInfoMap &infoMap)
+{
+ return infoMapHandles_.count(&infoMap);
+}
+
} /* namespace libcamera */
diff --git a/src/libcamera/control_validator.cpp b/src/libcamera/control_validator.cpp
index 8e5cf3c3..93982cff 100644
--- a/src/libcamera/control_validator.cpp
+++ b/src/libcamera/control_validator.cpp
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_validator.cpp - Control validator
+ * Control validator
*/
-#include "control_validator.h"
+#include "libcamera/internal/control_validator.h"
/**
* \file control_validator.h
@@ -35,7 +35,7 @@ namespace libcamera {
* \brief Validate a control
* \param[in] id The control ID
*
- * This method validates the control \a id against the object corresponding to
+ * This function validates the control \a id against the object corresponding to
* the validator. It shall at least validate that the control is applicable to
* the object instance, and may perform additional checks.
*
diff --git a/src/libcamera/controls.cpp b/src/libcamera/controls.cpp
index 540cc026..70f6f609 100644
--- a/src/libcamera/controls.cpp
+++ b/src/libcamera/controls.cpp
@@ -2,19 +2,19 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * controls.cpp - Control handling
+ * Control handling
*/
#include <libcamera/controls.h>
-#include <iomanip>
#include <sstream>
-#include <string>
#include <string.h>
+#include <string>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
-#include "control_validator.h"
-#include "log.h"
-#include "utils.h"
+#include "libcamera/internal/control_validator.h"
/**
* \file controls.h
@@ -40,7 +40,7 @@
* int32_t exposure = controls->get(controls::ManualExposure);
* \endcode
*
- * The ControlList::get() and ControlList::set() methods automatically deduce
+ * The ControlList::get() and ControlList::set() functions automatically deduce
* the data type based on the control.
*/
@@ -54,10 +54,15 @@ static constexpr size_t ControlValueSize[] = {
[ControlTypeNone] = 0,
[ControlTypeBool] = sizeof(bool),
[ControlTypeByte] = sizeof(uint8_t),
+ [ControlTypeUnsigned16] = sizeof(uint16_t),
+ [ControlTypeUnsigned32] = sizeof(uint32_t),
[ControlTypeInteger32] = sizeof(int32_t),
[ControlTypeInteger64] = sizeof(int64_t),
[ControlTypeFloat] = sizeof(float),
[ControlTypeString] = sizeof(char),
+ [ControlTypeRectangle] = sizeof(Rectangle),
+ [ControlTypeSize] = sizeof(Size),
+ [ControlTypePoint] = sizeof(Point),
};
} /* namespace */
@@ -71,10 +76,14 @@ static constexpr size_t ControlValueSize[] = {
* The control stores a boolean value
* \var ControlTypeByte
* The control stores a byte value as an unsigned 8-bit integer
+ * \var ControlTypeUnsigned16
+ * The control stores an unsigned 16-bit integer value
+ * \var ControlTypeUnsigned32
+ * The control stores an unsigned 32-bit integer value
* \var ControlTypeInteger32
- * The control stores a 32-bit integer value
+ * The control stores a signed 32-bit integer value
* \var ControlTypeInteger64
- * The control stores a 64-bit integer value
+ * The control stores a signed 64-bit integer value
* \var ControlTypeFloat
* The control stores a 32-bit floating point value
* \var ControlTypeString
@@ -227,6 +236,16 @@ std::string ControlValue::toString() const
str += std::to_string(*value);
break;
}
+ case ControlTypeUnsigned16: {
+ const uint16_t *value = reinterpret_cast<const uint16_t *>(data);
+ str += std::to_string(*value);
+ break;
+ }
+ case ControlTypeUnsigned32: {
+ const uint32_t *value = reinterpret_cast<const uint32_t *>(data);
+ str += std::to_string(*value);
+ break;
+ }
case ControlTypeInteger32: {
const int32_t *value = reinterpret_cast<const int32_t *>(data);
str += std::to_string(*value);
@@ -242,6 +261,21 @@ std::string ControlValue::toString() const
str += std::to_string(*value);
break;
}
+ case ControlTypeRectangle: {
+ const Rectangle *value = reinterpret_cast<const Rectangle *>(data);
+ str += value->toString();
+ break;
+ }
+ case ControlTypeSize: {
+ const Size *value = reinterpret_cast<const Size *>(data);
+ str += value->toString();
+ break;
+ }
+ case ControlTypePoint: {
+ const Point *value = reinterpret_cast<const Point *>(data);
+ str += value->toString();
+ break;
+ }
case ControlTypeNone:
case ControlTypeString:
break;
@@ -376,8 +410,22 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen
* \brief Construct a ControlId instance
* \param[in] id The control numerical ID
* \param[in] name The control name
+ * \param[in] vendor The vendor name
* \param[in] type The control data type
- */
+ * \param[in] direction The direction of the control, if it can be used in Controls or Metadata
+ * \param[in] size The size of the array control, or 0 if scalar control
+ * \param[in] enumStrMap The map from enum names to values (optional)
+ */
+ControlId::ControlId(unsigned int id, const std::string &name,
+ const std::string &vendor, ControlType type,
+ DirectionFlags direction, std::size_t size,
+ const std::map<std::string, int32_t> &enumStrMap)
+ : id_(id), name_(name), vendor_(vendor), type_(type),
+ direction_(direction), size_(size), enumStrMap_(enumStrMap)
+{
+ for (const auto &pair : enumStrMap_)
+ reverseMap_[pair.second] = pair.first;
+}
/**
* \fn unsigned int ControlId::id() const
@@ -392,12 +440,68 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen
*/
/**
+ * \fn const std::string &ControlId::vendor() const
+ * \brief Retrieve the vendor name
+ * \return The vendor name, as a string
+ */
+
+/**
* \fn ControlType ControlId::type() const
* \brief Retrieve the control data type
* \return The control data type
*/
/**
+ * \fn DirectionFlags ControlId::direction() const
+ * \brief Return the direction that the control can be used in
+ *
+ * This is similar to \sa isInput() and \sa isOutput(), but returns the flags
+ * direction instead of booleans for each direction.
+ *
+ * \return The direction flags corresponding to if the control can be used as
+ * an input control or as output metadata
+ */
+
+/**
+ * \fn bool ControlId::isInput() const
+ * \brief Determine if the control is available to be used as an input control
+ *
+ * Controls can be used either as input in controls, or as output in metadata.
+ * This function checks if the control is allowed to be used as the former.
+ *
+ * \return True if the control can be used as an input control, false otherwise
+ */
+
+/**
+ * \fn bool ControlId::isOutput() const
+ * \brief Determine if the control is available to be used in output metadata
+ *
+ * Controls can be used either as input in controls, or as output in metadata.
+ * This function checks if the control is allowed to be used as the latter.
+ *
+ * \return True if the control can be returned in output metadata, false otherwise
+ */
+
+/**
+ * \fn bool ControlId::isArray() const
+ * \brief Determine if the control is an array control
+ * \return True if the control is an array control, false otherwise
+ */
+
+/**
+ * \fn std::size_t ControlId::size() const
+ * \brief Retrieve the size of the control if it is an array control
+ * \return The size of the array control, size_t::max for dynamic extent, or 0
+ * for non-array
+ */
+
+/**
+ * \fn const std::map<int32_t, std::string> &ControlId::enumerators() const
+ * \brief Retrieve the map of enum values to enum names
+ * \return The map of enum values to enum names
+ */
+
+/**
* \fn bool operator==(unsigned int lhs, const ControlId &rhs)
* \brief Compare a ControlId with a control numerical ID
* \param[in] lhs Left-hand side numerical ID
@@ -416,17 +520,33 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen
*/
/**
+ * \enum ControlId::Direction
+ * \brief The direction the control is capable of being passed from/to
+ *
+ * \var ControlId::Direction::In
+ * \brief The control can be passed as input in controls
+ *
+ * \var ControlId::Direction::Out
+ * \brief The control can be returned as output in metadata
+ */
+
+/**
+ * \typedef ControlId::DirectionFlags
+ * \brief A wrapper for ControlId::Direction so that it can be used as flags
+ */
+
+/**
* \class Control
* \brief Describe a control and its intrinsic properties
*
* The Control class models a control exposed by an object. Its template type
- * name T refers to the control data type, and allows methods that operate on
- * control values to be defined as template methods using the same type T for
- * the control value. See for instance how the ControlList::get() method
+ * name T refers to the control data type, and allows functions that operate on
+ * control values to be defined as template functions using the same type T for
+ * the control value. See for instance how the ControlList::get() function
* returns a value corresponding to the type of the requested control.
*
- * While this class is the main mean to refer to a control, the control
- * identifying information are stored in the non-template base ControlId class.
+ * While this class is the main means to refer to a control, the control
+ * identifying information is stored in the non-template base ControlId class.
* This allows code that operates on a set of controls of different types to
* reference those controls through a ControlId instead of a Control. For
* instance, the list of controls supported by a camera is exposed as ControlId
@@ -443,10 +563,14 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen
*/
/**
- * \fn Control::Control(unsigned int id, const char *name)
+ * \fn Control::Control(unsigned int id, const char *name, const char *vendor)
* \brief Construct a Control instance
* \param[in] id The control numerical ID
* \param[in] name The control name
+ * \param[in] vendor The vendor name
+ * \param[in] direction The direction of the control, if it can be used in
+ * Controls or Metadata
+ * \param[in] enumStrMap The map from enum names to values (optional)
*
* The control data type is automatically deduced from the template type T.
*/
@@ -480,6 +604,57 @@ ControlInfo::ControlInfo(const ControlValue &min,
}
/**
+ * \brief Construct a ControlInfo from the list of valid values
+ * \param[in] values The control valid values
+ * \param[in] def The control default value
+ *
+ * Construct a ControlInfo from a list of valid values. The ControlInfo
+ * minimum and maximum values are set to the first and last members of the
+ * values list respectively. The default value is set to \a def if provided, or
+ * to the minimum value otherwise.
+ */
+ControlInfo::ControlInfo(Span<const ControlValue> values,
+ const ControlValue &def)
+{
+ min_ = values.front();
+ max_ = values.back();
+ def_ = !def.isNone() ? def : values.front();
+
+ values_.reserve(values.size());
+ for (const ControlValue &value : values)
+ values_.push_back(value);
+}
+
+/**
+ * \brief Construct a boolean ControlInfo with both boolean values
+ * \param[in] values The control valid boolean values (both true and false)
+ * \param[in] def The control default boolean value
+ *
+ * Construct a ControlInfo for a boolean control, where both true and false are
+ * valid values. \a values must be { false, true } (the order is irrelevant).
+ * The minimum value will always be false, and the maximum always true. The
+ * default value is \a def.
+ */
+ControlInfo::ControlInfo(std::set<bool> values, bool def)
+ : min_(false), max_(true), def_(def), values_({ false, true })
+{
+ ASSERT(values.count(def) && values.size() == 2);
+}
+
+/**
+ * \brief Construct a boolean ControlInfo with only one valid value
+ * \param[in] value The control valid boolean value
+ *
+ * Construct a ControlInfo for a boolean control, where there is only valid
+ * value. The minimum, maximum, and default values will all be \a value.
+ */
+ControlInfo::ControlInfo(bool value)
+ : min_(value), max_(value), def_(value)
+{
+ values_ = { value };
+}
+
+/**
* \fn ControlInfo::min()
* \brief Retrieve the minimum value of the control
*
@@ -508,6 +683,17 @@ ControlInfo::ControlInfo(const ControlValue &min,
*/
/**
+ * \fn ControlInfo::values()
+ * \brief Retrieve the list of valid values
+ *
+ * For controls that support a pre-defined number of values, the enumeration of
+ * those is reported through a vector of ControlValue instances accessible with
+ * this function.
+ *
+ * \return A vector of ControlValue representing the control valid values
+ */
+
+/**
* \brief Provide a string representation of the ControlInfo
*/
std::string ControlInfo::toString() const
@@ -546,14 +732,14 @@ std::string ControlInfo::toString() const
*
* The ControlInfoMap class describes controls supported by an object as an
* unsorted map of ControlId pointers to ControlInfo instances. Unlike the
- * standard std::unsorted_map<> class, it is designed the be immutable once
+ * standard std::unsorted_map<> class, it is designed to be immutable once
* constructed, and thus only exposes the read accessors of the
* std::unsorted_map<> base class.
*
- * In addition to the features of the standard unsorted map, this class also
- * provides access to the mapped elements using numerical ID keys. It maintains
- * an internal map of numerical ID to ControlId for this purpose, and exposes it
- * through the idmap() method to help construction of ControlList instances.
+ * The class is constructed with a reference to a ControlIdMap. This allows
+ * providing access to the mapped elements using numerical ID keys, in addition
+ * to the features of the standard unsorted map. All ControlId keys in the map
+ * must appear in the ControlIdMap.
*/
/**
@@ -570,24 +756,27 @@ std::string ControlInfo::toString() const
/**
* \brief Construct a ControlInfoMap from an initializer list
* \param[in] init The initializer list
+ * \param[in] idmap The idmap used by the ControlInfoMap
*/
-ControlInfoMap::ControlInfoMap(std::initializer_list<Map::value_type> init)
- : Map(init)
+ControlInfoMap::ControlInfoMap(std::initializer_list<Map::value_type> init,
+ const ControlIdMap &idmap)
+ : Map(init), idmap_(&idmap)
{
- generateIdmap();
+ ASSERT(validate());
}
/**
* \brief Construct a ControlInfoMap from a plain map
* \param[in] info The control info plain map
+ * \param[in] idmap The idmap used by the ControlInfoMap
*
* Construct a new ControlInfoMap and populate its contents with those of
* \a info using move semantics. Upon return the \a info map will be empty.
*/
-ControlInfoMap::ControlInfoMap(Map &&info)
- : Map(std::move(info))
+ControlInfoMap::ControlInfoMap(Map &&info, const ControlIdMap &idmap)
+ : Map(std::move(info)), idmap_(&idmap)
{
- generateIdmap();
+ ASSERT(validate());
}
/**
@@ -597,32 +786,44 @@ ControlInfoMap::ControlInfoMap(Map &&info)
* \return A reference to the ControlInfoMap
*/
-/**
- * \brief Replace the contents with those from the initializer list
- * \param[in] init The initializer list
- * \return A reference to the ControlInfoMap
- */
-ControlInfoMap &ControlInfoMap::operator=(std::initializer_list<Map::value_type> init)
+bool ControlInfoMap::validate()
{
- Map::operator=(init);
- generateIdmap();
- return *this;
-}
+ if (!idmap_)
+ return false;
-/**
- * \brief Move assignment operator from a plain map
- * \param[in] info The control info plain map
- *
- * Populate the map by replacing its contents with those of \a info using move
- * semantics. Upon return the \a info map will be empty.
- *
- * \return A reference to the populated ControlInfoMap
- */
-ControlInfoMap &ControlInfoMap::operator=(Map &&info)
-{
- Map::operator=(std::move(info));
- generateIdmap();
- return *this;
+ for (const auto &ctrl : *this) {
+ const ControlId *id = ctrl.first;
+ auto it = idmap_->find(id->id());
+
+ /*
+ * Make sure all control ids are part of the idmap and verify
+ * the control info matches the expected type.
+ */
+ if (it == idmap_->end() || it->second != id) {
+ LOG(Controls, Error)
+ << "Control " << utils::hex(id->id())
+ << " not in the idmap";
+ return false;
+ }
+
+ /*
+ * For string controls, min and max define the valid
+ * range for the string size, not for the individual
+ * values.
+ */
+ ControlType rangeType = id->type() == ControlTypeString
+ ? ControlTypeInteger32 : id->type();
+ const ControlInfo &info = ctrl.second;
+
+ if (info.min().type() != rangeType) {
+ LOG(Controls, Error)
+ << "Control " << utils::hex(id->id())
+ << " type and info type mismatch";
+ return false;
+ }
+ }
+
+ return true;
}
/**
@@ -632,7 +833,9 @@ ControlInfoMap &ControlInfoMap::operator=(Map &&info)
*/
ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id)
{
- return at(idmap_.at(id));
+ ASSERT(idmap_);
+
+ return at(idmap_->at(id));
}
/**
@@ -642,7 +845,9 @@ ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id)
*/
const ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id) const
{
- return at(idmap_.at(id));
+ ASSERT(idmap_);
+
+ return at(idmap_->at(id));
}
/**
@@ -652,12 +857,15 @@ const ControlInfoMap::mapped_type &ControlInfoMap::at(unsigned int id) const
*/
ControlInfoMap::size_type ControlInfoMap::count(unsigned int id) const
{
+ if (!idmap_)
+ return 0;
+
/*
* The ControlInfoMap and its idmap have a 1:1 mapping between their
* entries, we can thus just count the matching entries in idmap to
* avoid an additional lookup.
*/
- return idmap_.count(id);
+ return idmap_->count(id);
}
/**
@@ -668,8 +876,11 @@ ControlInfoMap::size_type ControlInfoMap::count(unsigned int id) const
*/
ControlInfoMap::iterator ControlInfoMap::find(unsigned int id)
{
- auto iter = idmap_.find(id);
- if (iter == idmap_.end())
+ if (!idmap_)
+ return end();
+
+ auto iter = idmap_->find(id);
+ if (iter == idmap_->end())
return end();
return find(iter->second);
@@ -683,8 +894,11 @@ ControlInfoMap::iterator ControlInfoMap::find(unsigned int id)
*/
ControlInfoMap::const_iterator ControlInfoMap::find(unsigned int id) const
{
- auto iter = idmap_.find(id);
- if (iter == idmap_.end())
+ if (!idmap_)
+ return end();
+
+ auto iter = idmap_->find(id);
+ if (iter == idmap_->end())
return end();
return find(iter->second);
@@ -695,45 +909,18 @@ ControlInfoMap::const_iterator ControlInfoMap::find(unsigned int id) const
* \brief Retrieve the ControlId map
*
* Constructing ControlList instances for V4L2 controls requires a ControlIdMap
- * for the V4L2 device that the control list targets. This helper method
+ * for the V4L2 device that the control list targets. This helper function
* returns a suitable idmap for that purpose.
*
* \return The ControlId map
*/
-void ControlInfoMap::generateIdmap()
-{
- idmap_.clear();
-
- for (const auto &ctrl : *this) {
- /*
- * For string controls, min and max define the valid
- * range for the string size, not for the individual
- * values.
- */
- ControlType rangeType = ctrl.first->type() == ControlTypeString
- ? ControlTypeInteger32 : ctrl.first->type();
- const ControlInfo &info = ctrl.second;
-
- if (info.min().type() != rangeType) {
- LOG(Controls, Error)
- << "Control " << utils::hex(ctrl.first->id())
- << " type and info type mismatch";
- idmap_.clear();
- clear();
- return;
- }
-
- idmap_[ctrl.first->id()] = ctrl.first;
- }
-}
-
/**
* \class ControlList
* \brief Associate a list of ControlId with their values for an object
*
* The ControlList class stores values of controls exposed by an object. The
- * lists returned by the Request::controls() and Request::metadata() methods
+ * lists returned by the Request::controls() and Request::metadata() functions
* refer to the camera that the request belongs to.
*
* Control lists are constructed with a map of all the controls supported by
@@ -761,7 +948,8 @@ ControlList::ControlList()
* controls is provided by controls::controls and can be used as the \a idmap
* argument.
*/
-ControlList::ControlList(const ControlIdMap &idmap, ControlValidator *validator)
+ControlList::ControlList(const ControlIdMap &idmap,
+ const ControlValidator *validator)
: validator_(validator), idmap_(&idmap), infoMap_(nullptr)
{
}
@@ -771,7 +959,8 @@ ControlList::ControlList(const ControlIdMap &idmap, ControlValidator *validator)
* \param[in] infoMap The ControlInfoMap for the control list target object
* \param[in] validator The validator (may be null)
*/
-ControlList::ControlList(const ControlInfoMap &infoMap, ControlValidator *validator)
+ControlList::ControlList(const ControlInfoMap &infoMap,
+ const ControlValidator *validator)
: validator_(validator), idmap_(&infoMap.idmap()), infoMap_(&infoMap)
{
}
@@ -830,14 +1019,57 @@ ControlList::ControlList(const ControlInfoMap &infoMap, ControlValidator *valida
*/
/**
- * \brief Check if the list contains a control with the specified \a id
- * \param[in] id The control ID
+ * \enum ControlList::MergePolicy
+ * \brief The policy used by the merge function
*
- * \return True if the list contains a matching control, false otherwise
+ * \var ControlList::MergePolicy::KeepExisting
+ * \brief Existing controls in the target list are kept
+ *
+ * \var ControlList::MergePolicy::OverwriteExisting
+ * \brief Existing controls in the target list are updated
+ */
+
+/**
+ * \brief Merge the \a source into the ControlList
+ * \param[in] source The ControlList to merge into this object
+ * \param[in] policy Controls if existing elements in *this shall be
+ * overwritten
+ *
+ * Merging two control lists copies elements from the \a source and inserts
+ * them in *this. If the \a source contains elements whose key is already
+ * present in *this, then those elements are only overwritten if
+ * \a policy is MergePolicy::OverwriteExisting.
+ *
+ * Only control lists created from the same ControlIdMap or ControlInfoMap may
+ * be merged. Attempting to do otherwise results in undefined behaviour.
+ *
+ * \todo Reimplement or implement an overloaded version which internally uses
+ * std::unordered_map::merge() and accepts a non-const argument.
*/
-bool ControlList::contains(const ControlId &id) const
+void ControlList::merge(const ControlList &source, MergePolicy policy)
{
- return controls_.find(id.id()) != controls_.end();
+ /**
+ * \todo ASSERT that the current and source ControlList are derived
+ * from a compatible ControlIdMap, to prevent undefined behaviour due to
+ * id collisions.
+ *
+ * This can not currently be a direct pointer comparison due to the
+ * duplication of the ControlIdMaps in the isolated IPA use cases.
+ * Furthermore, manually checking each entry of the id map is identical
+ * is expensive.
+ * See https://bugs.libcamera.org/show_bug.cgi?id=31 for further details
+ */
+
+ for (const auto &ctrl : source) {
+ if (policy == MergePolicy::KeepExisting && contains(ctrl.first)) {
+ const ControlId *id = idmap_->at(ctrl.first);
+ LOG(Controls, Warning)
+ << "Control " << id->name() << " not overwritten";
+ continue;
+ }
+
+ set(ctrl.first, ctrl.second);
+ }
}
/**
@@ -852,27 +1084,25 @@ bool ControlList::contains(unsigned int id) const
}
/**
- * \fn template<typename T> T ControlList::get(const Control<T> &ctrl) const
+ * \fn ControlList::get(const Control<T> &ctrl) const
* \brief Get the value of control \a ctrl
* \param[in] ctrl The control
*
- * The behaviour is undefined if the control \a ctrl is not present in the
- * list. Use ControlList::contains() to test for the presence of a control in
- * the list before retrieving its value.
- *
- * The control value type shall match the type T, otherwise the behaviour is
- * undefined.
+ * Beside getting the value of a control, this function can also be used to
+ * check if a control is present in the ControlList by converting the returned
+ * std::optional<T> to bool (or calling its has_value() function).
*
- * \return The control value
+ * \return A std::optional<T> containing the control value, or std::nullopt if
+ * the control \a ctrl is not present in the list
*/
/**
- * \fn template<typename T, typename V> void ControlList::set(const Control<T> &ctrl, const V &value)
+ * \fn ControlList::set(const Control<T> &ctrl, const V &value)
* \brief Set the control \a ctrl value to \a value
* \param[in] ctrl The control
* \param[in] value The control value
*
- * This method sets the value of a control in the control list. If the control
+ * This function sets the value of a control in the control list. If the control
* is already present in the list, its value is updated, otherwise it is added
* to the list.
*
@@ -881,8 +1111,7 @@ bool ControlList::contains(unsigned int id) const
*/
/**
- * \fn template<typename T, typename V> \
- * void ControlList::set(const Control<T> &ctrl, const std::initializer_list<V> &value)
+ * \fn ControlList::set(const Control<Span<T, Size>> &ctrl, const std::initializer_list<V> &value)
* \copydoc ControlList::set(const Control<T> &ctrl, const V &value)
*/
@@ -912,7 +1141,7 @@ const ControlValue &ControlList::get(unsigned int id) const
* \param[in] id The control ID
* \param[in] value The control value
*
- * This method sets the value of a control in the control list. If the control
+ * This function sets the value of a control in the control list. If the control
* is already present in the list, its value is updated, otherwise it is added
* to the list.
*
@@ -938,6 +1167,14 @@ void ControlList::set(unsigned int id, const ControlValue &value)
* associated ControlInfoMap, nullptr is returned in that case.
*/
+/**
+ * \fn ControlList::idMap()
+ * \brief Retrieve the ControlId map used to construct the ControlList
+ * \return The ControlId map used to construct the ControlList. ControlList
+ * instances constructed with the default contructor have no associated idmap,
+ * nullptr is returned in that case.
+ */
+
const ControlValue *ControlList::find(unsigned int id) const
{
const auto iter = controls_.find(id);
diff --git a/src/libcamera/converter.cpp b/src/libcamera/converter.cpp
new file mode 100644
index 00000000..d551b908
--- /dev/null
+++ b/src/libcamera/converter.cpp
@@ -0,0 +1,458 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright 2022 NXP
+ *
+ * Generic format converter interface
+ */
+
+#include "libcamera/internal/converter.h"
+
+#include <algorithm>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/media_device.h"
+
+/**
+ * \file converter.h
+ * \brief Abstract converter
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Converter)
+
+/**
+ * \class Converter
+ * \brief Abstract Base Class for converter
+ *
+ * The Converter class is an Abstract Base Class defining the interfaces of
+ * converter implementations.
+ *
+ * Converters offer scaling and pixel format conversion services on an input
+ * stream. The converter can output multiple streams with individual conversion
+ * parameters from the same input stream.
+ */
+
+/**
+ * \enum Converter::Feature
+ * \brief Specify the features supported by the converter
+ * \var Converter::Feature::None
+ * \brief No extra features supported by the converter
+ * \var Converter::Feature::InputCrop
+ * \brief Cropping capability at input is supported by the converter
+ */
+
+/**
+ * \typedef Converter::Features
+ * \brief A bitwise combination of features supported by the converter
+ */
+
+/**
+ * \enum Converter::Alignment
+ * \brief The alignment mode specified when adjusting the converter input or
+ * output sizes
+ * \var Converter::Alignment::Down
+ * \brief Adjust the Converter sizes to a smaller valid size
+ * \var Converter::Alignment::Up
+ * \brief Adjust the Converter sizes to a larger valid size
+ */
+
+/**
+ * \brief Construct a Converter instance
+ * \param[in] media The media device implementing the converter
+ * \param[in] features Features flags representing supported features
+ *
+ * This searches for the entity implementing the data streaming function in the
+ * media graph entities and use its device node as the converter device node.
+ */
+Converter::Converter(MediaDevice *media, Features features)
+{
+ const std::vector<MediaEntity *> &entities = media->entities();
+ auto it = std::find_if(entities.begin(), entities.end(),
+ [](MediaEntity *entity) {
+ return entity->function() == MEDIA_ENT_F_IO_V4L;
+ });
+ if (it == entities.end()) {
+ LOG(Converter, Error)
+ << "No entity suitable for implementing a converter in "
+ << media->driver() << " entities list.";
+ return;
+ }
+
+ deviceNode_ = (*it)->deviceNode();
+ features_ = features;
+}
+
+Converter::~Converter()
+{
+}
+
+/**
+ * \fn Converter::loadConfiguration()
+ * \brief Load converter configuration from file
+ * \param[in] filename The file name path
+ *
+ * Load converter dependent configuration parameters to apply on the hardware.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::isValid()
+ * \brief Check if the converter configuration is valid
+ * \return True is the converter is valid, false otherwise
+ */
+
+/**
+ * \fn Converter::formats()
+ * \brief Retrieve the list of supported pixel formats for an input pixel format
+ * \param[in] input Input pixel format to retrieve output pixel format list for
+ * \return The list of supported output pixel formats
+ */
+
+/**
+ * \fn Converter::sizes()
+ * \brief Retrieve the range of minimum and maximum output sizes for an input size
+ * \param[in] input Input stream size to retrieve range for
+ * \return A range of output image sizes
+ */
+
+/**
+ * \fn Converter::adjustInputSize()
+ * \brief Adjust the converter input \a size to a valid value
+ * \param[in] pixFmt The pixel format of the converter input stream
+ * \param[in] size The converter input size to adjust to a valid value
+ * \param[in] align The desired alignment
+ * \return The adjusted converter input size or a null Size if \a size cannot
+ * be adjusted
+ */
+
+/**
+ * \fn Converter::adjustOutputSize()
+ * \brief Adjust the converter output \a size to a valid value
+ * \param[in] pixFmt The pixel format of the converter output stream
+ * \param[in] size The converter output size to adjust to a valid value
+ * \param[in] align The desired alignment
+ * \return The adjusted converter output size or a null Size if \a size cannot
+ * be adjusted
+ */
+
+/**
+ * \fn Converter::strideAndFrameSize()
+ * \brief Retrieve the output stride and frame size for an input configutation
+ * \param[in] pixelFormat Input stream pixel format
+ * \param[in] size Input stream size
+ * \return A tuple indicating the stride and frame size or an empty tuple on error
+ */
+
+/**
+ * \fn Converter::validateOutput()
+ * \brief Validate and possibily adjust \a cfg to a valid converter output
+ * \param[inout] cfg The StreamConfiguration to validate and adjust
+ * \param[out] adjusted Set to true if \a cfg has been adjusted
+ * \param[in] align The desired alignment
+ * \return 0 if \a cfg is valid or has been adjusted, a negative error code
+ * otherwise if \a cfg cannot be adjusted
+ */
+
+/**
+ * \fn Converter::configure()
+ * \brief Configure a set of output stream conversion from an input stream
+ * \param[in] inputCfg Input stream configuration
+ * \param[out] outputCfgs A list of output stream configurations
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::isConfigured()
+ * \brief Check if a given stream is configured
+ * \param[in] stream The output stream
+ * \return True if the \a stream is configured or false otherwise
+ */
+
+/**
+ * \fn Converter::exportBuffers()
+ * \brief Export buffers from the converter device
+ * \param[in] stream Output stream pointer exporting the buffers
+ * \param[in] count Number of buffers to allocate
+ * \param[out] buffers Vector to store allocated buffers
+ *
+ * This function operates similarly to V4L2VideoDevice::exportBuffers() on the
+ * output stream indicated by the \a output.
+ *
+ * \return The number of allocated buffers on success or a negative error code
+ * otherwise
+ */
+
+/**
+ * \fn Converter::start()
+ * \brief Start the converter streaming operation
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::stop()
+ * \brief Stop the converter streaming operation
+ */
+
+/**
+ * \fn Converter::queueBuffers()
+ * \brief Queue buffers to converter device
+ * \param[in] input The frame buffer to apply the conversion
+ * \param[out] outputs The container holding the output stream pointers and
+ * their respective frame buffer outputs.
+ *
+ * This function queues the \a input frame buffer on the output streams of the
+ * \a outputs map key and retrieve the output frame buffer indicated by the
+ * buffer map value.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::setInputCrop()
+ * \brief Set the crop rectangle \a rect for \a stream
+ * \param[in] stream The output stream
+ * \param[inout] rect The crop rectangle to apply and return the rectangle
+ * that is actually applied
+ *
+ * Set the crop rectangle \a rect for \a stream provided the converter supports
+ * cropping. The converter has the Feature::InputCrop flag in this case.
+ *
+ * The underlying hardware can adjust the rectangle supplied by the user
+ * due to hardware constraints. The caller can inspect \a rect to determine the
+ * actual rectangle that has been applied by the converter, after this function
+ * returns.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn Converter::inputCropBounds()
+ * \brief Retrieve the crop bounds of the converter
+ *
+ * Retrieve the minimum and maximum crop bounds of the converter. This can be
+ * used to query the crop bounds before configuring a stream.
+ *
+ * \return A pair containing the minimum and maximum crop bound in that order
+ */
+
+/**
+ * \fn Converter::inputCropBounds(const Stream *stream)
+ * \brief Retrieve the crop bounds for \a stream
+ * \param[in] stream The output stream
+ *
+ * Retrieve the minimum and maximum crop bounds for \a stream. The converter
+ * should support cropping (Feature::InputCrop).
+ *
+ * The crop bounds depend on the configuration of the output stream and hence
+ * this function should be called after the \a stream has been configured using
+ * configure().
+ *
+ * When called with an unconfigured \a stream, this function returns a pair of
+ * null rectangles.
+ *
+ * \return A pair containing the minimum and maximum crop bound in that order
+ */
+
+/**
+ * \var Converter::inputBufferReady
+ * \brief A signal emitted when the input frame buffer completes
+ */
+
+/**
+ * \var Converter::outputBufferReady
+ * \brief A signal emitted on each frame buffer completion of the output queue
+ */
+
+/**
+ * \var Converter::features_
+ * \brief Stores the features supported by the converter
+ */
+
+/**
+ * \fn Converter::deviceNode()
+ * \brief The converter device node attribute accessor
+ * \return The converter device node string
+ */
+
+/**
+ * \fn Converter::features()
+ * \brief Retrieve the features supported by the converter
+ * \return The converter Features flags
+ */
+
+/**
+ * \class ConverterFactoryBase
+ * \brief Base class for converter factories
+ *
+ * The ConverterFactoryBase class is the base of all specializations of the
+ * ConverterFactory class template. It implements the factory registration,
+ * maintains a registry of factories, and provides access to the registered
+ * factories.
+ */
+
+/**
+ * \brief Construct a converter factory base
+ * \param[in] name Name of the converter class
+ * \param[in] compatibles Name aliases of the converter class
+ *
+ * Creating an instance of the factory base registers it with the global list of
+ * factories, accessible through the factories() function.
+ *
+ * The factory \a name is used as unique identifier. If the converter
+ * implementation fully relies on a generic framework, the name should be the
+ * same as the framework. Otherwise, if the implementation is specialized, the
+ * factory name should match the driver name implementing the function.
+ *
+ * The factory \a compatibles holds a list of driver names implementing a generic
+ * subsystem without any personalizations.
+ */
+ConverterFactoryBase::ConverterFactoryBase(const std::string name, std::initializer_list<std::string> compatibles)
+ : name_(name), compatibles_(compatibles)
+{
+ registerType(this);
+}
+
+/**
+ * \fn ConverterFactoryBase::compatibles()
+ * \return The list of compatible name aliases of the converter
+ */
+
+/**
+ * \brief Create an instance of the converter corresponding to the media device
+ * \param[in] media The media device to create the converter for
+ *
+ * The converter is created by matching the factory name or any of its
+ * compatible aliases with the media device driver name.
+ *
+ * \return A new instance of the converter subclass corresponding to the media
+ * device, or null if the media device driver name doesn't match anything
+ */
+std::unique_ptr<Converter> ConverterFactoryBase::create(MediaDevice *media)
+{
+ const std::vector<ConverterFactoryBase *> &factories =
+ ConverterFactoryBase::factories();
+
+ for (const ConverterFactoryBase *factory : factories) {
+ const std::vector<std::string> &compatibles = factory->compatibles();
+ auto it = std::find(compatibles.begin(), compatibles.end(), media->driver());
+
+ if (it == compatibles.end() && media->driver() != factory->name_)
+ continue;
+
+ LOG(Converter, Debug)
+ << "Creating converter from "
+ << factory->name_ << " factory with "
+ << (it == compatibles.end() ? "no" : media->driver()) << " alias.";
+
+ std::unique_ptr<Converter> converter = factory->createInstance(media);
+ if (converter->isValid())
+ return converter;
+ }
+
+ return nullptr;
+}
+
+/**
+ * \brief Add a converter factory to the registry
+ * \param[in] factory Factory to use to construct the converter class
+ *
+ * The caller is responsible to guarantee the uniqueness of the converter
+ * factory name.
+ */
+void ConverterFactoryBase::registerType(ConverterFactoryBase *factory)
+{
+ std::vector<ConverterFactoryBase *> &factories =
+ ConverterFactoryBase::factories();
+
+ factories.push_back(factory);
+}
+
+/**
+ * \brief Retrieve the list of all converter factory names
+ * \return The list of all converter factory names
+ */
+std::vector<std::string> ConverterFactoryBase::names()
+{
+ std::vector<std::string> list;
+
+ std::vector<ConverterFactoryBase *> &factories =
+ ConverterFactoryBase::factories();
+
+ for (ConverterFactoryBase *factory : factories) {
+ list.push_back(factory->name_);
+
+ const auto &compatibles = factory->compatibles();
+ list.insert(list.end(), compatibles.begin(), compatibles.end());
+ }
+
+ return list;
+}
+
+/**
+ * \brief Retrieve the list of all converter factories
+ * \return The list of converter factories
+ */
+std::vector<ConverterFactoryBase *> &ConverterFactoryBase::factories()
+{
+ /*
+ * The static factories map is defined inside the function to ensure
+ * it gets initialized on first use, without any dependency on link
+ * order.
+ */
+ static std::vector<ConverterFactoryBase *> factories;
+ return factories;
+}
+
+/**
+ * \var ConverterFactoryBase::name_
+ * \brief The name of the factory
+ */
+
+/**
+ * \var ConverterFactoryBase::compatibles_
+ * \brief The list holding the factory compatibles
+ */
+
+/**
+ * \class ConverterFactory
+ * \brief Registration of ConverterFactory classes and creation of instances
+ * \param _Converter The converter class type for this factory
+ *
+ * To facilitate discovery and instantiation of Converter classes, the
+ * ConverterFactory class implements auto-registration of converter helpers.
+ * Each Converter subclass shall register itself using the REGISTER_CONVERTER()
+ * macro, which will create a corresponding instance of a ConverterFactory
+ * subclass and register it with the static list of factories.
+ */
+
+/**
+ * \fn ConverterFactory::ConverterFactory(const char *name, std::initializer_list<std::string> compatibles)
+ * \brief Construct a converter factory
+ * \details \copydetails ConverterFactoryBase::ConverterFactoryBase
+ */
+
+/**
+ * \fn ConverterFactory::createInstance() const
+ * \brief Create an instance of the Converter corresponding to the factory
+ * \param[in] media Media device pointer
+ * \return A unique pointer to a newly constructed instance of the Converter
+ * subclass corresponding to the factory
+ */
+
+/**
+ * \def REGISTER_CONVERTER
+ * \brief Register a converter with the Converter factory
+ * \param[in] name Converter name used to register the class
+ * \param[in] converter Class name of Converter derived class to register
+ * \param[in] compatibles List of compatible names
+ *
+ * Register a Converter subclass with the factory and make it available to try
+ * and match converters.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/converter/converter_v4l2_m2m.cpp b/src/libcamera/converter/converter_v4l2_m2m.cpp
new file mode 100644
index 00000000..566f18ce
--- /dev/null
+++ b/src/libcamera/converter/converter_v4l2_m2m.cpp
@@ -0,0 +1,751 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Laurent Pinchart
+ * Copyright 2022 NXP
+ *
+ * V4L2 M2M Format converter
+ */
+
+#include "libcamera/internal/converter/converter_v4l2_m2m.h"
+
+#include <algorithm>
+#include <limits.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/signal.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+/**
+ * \file converter/converter_v4l2_m2m.h
+ * \brief V4L2 M2M based converter
+ */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Converter)
+
+namespace {
+
+int getCropBounds(V4L2VideoDevice *device, Rectangle &minCrop,
+ Rectangle &maxCrop)
+{
+ Rectangle minC;
+ Rectangle maxC;
+
+ /* Find crop bounds */
+ minC.width = 1;
+ minC.height = 1;
+ maxC.width = UINT_MAX;
+ maxC.height = UINT_MAX;
+
+ int ret = device->setSelection(V4L2_SEL_TGT_CROP, &minC);
+ if (ret) {
+ LOG(Converter, Error)
+ << "Could not query minimum selection crop: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ ret = device->getSelection(V4L2_SEL_TGT_CROP_BOUNDS, &maxC);
+ if (ret) {
+ LOG(Converter, Error)
+ << "Could not query maximum selection crop: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ /* Reset the crop to its maximum */
+ ret = device->setSelection(V4L2_SEL_TGT_CROP, &maxC);
+ if (ret) {
+ LOG(Converter, Error)
+ << "Could not reset selection crop: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ minCrop = minC;
+ maxCrop = maxC;
+ return 0;
+}
+
+} /* namespace */
+
+/* -----------------------------------------------------------------------------
+ * V4L2M2MConverter::V4L2M2MStream
+ */
+
+V4L2M2MConverter::V4L2M2MStream::V4L2M2MStream(V4L2M2MConverter *converter, const Stream *stream)
+ : converter_(converter), stream_(stream)
+{
+ m2m_ = std::make_unique<V4L2M2MDevice>(converter->deviceNode());
+
+ m2m_->output()->bufferReady.connect(this, &V4L2M2MStream::outputBufferReady);
+ m2m_->capture()->bufferReady.connect(this, &V4L2M2MStream::captureBufferReady);
+
+ int ret = m2m_->open();
+ if (ret < 0)
+ m2m_.reset();
+}
+
+int V4L2M2MConverter::V4L2M2MStream::configure(const StreamConfiguration &inputCfg,
+ const StreamConfiguration &outputCfg)
+{
+ V4L2PixelFormat videoFormat =
+ m2m_->output()->toV4L2PixelFormat(inputCfg.pixelFormat);
+
+ V4L2DeviceFormat format;
+ format.fourcc = videoFormat;
+ format.size = inputCfg.size;
+ format.planesCount = 1;
+ format.planes[0].bpl = inputCfg.stride;
+
+ int ret = m2m_->output()->setFormat(&format);
+ if (ret < 0) {
+ LOG(Converter, Error)
+ << "Failed to set input format: " << strerror(-ret);
+ return ret;
+ }
+
+ if (format.fourcc != videoFormat || format.size != inputCfg.size ||
+ format.planes[0].bpl != inputCfg.stride) {
+ LOG(Converter, Error)
+ << "Input format not supported (requested "
+ << inputCfg.size << "-" << videoFormat
+ << ", got " << format << ")";
+ return -EINVAL;
+ }
+
+ /* Set the pixel format and size on the output. */
+ videoFormat = m2m_->capture()->toV4L2PixelFormat(outputCfg.pixelFormat);
+ format = {};
+ format.fourcc = videoFormat;
+ format.size = outputCfg.size;
+
+ ret = m2m_->capture()->setFormat(&format);
+ if (ret < 0) {
+ LOG(Converter, Error)
+ << "Failed to set output format: " << strerror(-ret);
+ return ret;
+ }
+
+ if (format.fourcc != videoFormat || format.size != outputCfg.size) {
+ LOG(Converter, Error)
+ << "Output format not supported";
+ return -EINVAL;
+ }
+
+ inputBufferCount_ = inputCfg.bufferCount;
+ outputBufferCount_ = outputCfg.bufferCount;
+
+ if (converter_->features() & Feature::InputCrop) {
+ ret = getCropBounds(m2m_->output(), inputCropBounds_.first,
+ inputCropBounds_.second);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int V4L2M2MConverter::V4L2M2MStream::exportBuffers(unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ return m2m_->capture()->exportBuffers(count, buffers);
+}
+
+int V4L2M2MConverter::V4L2M2MStream::start()
+{
+ int ret = m2m_->output()->importBuffers(inputBufferCount_);
+ if (ret < 0)
+ return ret;
+
+ ret = m2m_->capture()->importBuffers(outputBufferCount_);
+ if (ret < 0) {
+ stop();
+ return ret;
+ }
+
+ ret = m2m_->output()->streamOn();
+ if (ret < 0) {
+ stop();
+ return ret;
+ }
+
+ ret = m2m_->capture()->streamOn();
+ if (ret < 0) {
+ stop();
+ return ret;
+ }
+
+ return 0;
+}
+
+void V4L2M2MConverter::V4L2M2MStream::stop()
+{
+ m2m_->capture()->streamOff();
+ m2m_->output()->streamOff();
+ m2m_->capture()->releaseBuffers();
+ m2m_->output()->releaseBuffers();
+}
+
+int V4L2M2MConverter::V4L2M2MStream::queueBuffers(FrameBuffer *input, FrameBuffer *output)
+{
+ int ret = m2m_->output()->queueBuffer(input);
+ if (ret < 0)
+ return ret;
+
+ ret = m2m_->capture()->queueBuffer(output);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int V4L2M2MConverter::V4L2M2MStream::getInputSelection(unsigned int target, Rectangle *rect)
+{
+ return m2m_->output()->getSelection(target, rect);
+}
+
+int V4L2M2MConverter::V4L2M2MStream::setInputSelection(unsigned int target, Rectangle *rect)
+{
+ return m2m_->output()->setSelection(target, rect);
+}
+
+std::pair<Rectangle, Rectangle> V4L2M2MConverter::V4L2M2MStream::inputCropBounds()
+{
+ return inputCropBounds_;
+}
+
+std::string V4L2M2MConverter::V4L2M2MStream::logPrefix() const
+{
+ return stream_->configuration().toString();
+}
+
+void V4L2M2MConverter::V4L2M2MStream::outputBufferReady(FrameBuffer *buffer)
+{
+ auto it = converter_->queue_.find(buffer);
+ if (it == converter_->queue_.end())
+ return;
+
+ if (!--it->second) {
+ converter_->inputBufferReady.emit(buffer);
+ converter_->queue_.erase(it);
+ }
+}
+
+void V4L2M2MConverter::V4L2M2MStream::captureBufferReady(FrameBuffer *buffer)
+{
+ converter_->outputBufferReady.emit(buffer);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2M2MConverter
+ */
+
+/**
+ * \class libcamera::V4L2M2MConverter
+ * \brief The V4L2 M2M converter implements the converter interface based on
+ * V4L2 M2M device.
+*/
+
+/**
+ * \fn V4L2M2MConverter::V4L2M2MConverter
+ * \brief Construct a V4L2M2MConverter instance
+ * \param[in] media The media device implementing the converter
+ */
+
+V4L2M2MConverter::V4L2M2MConverter(MediaDevice *media)
+ : Converter(media)
+{
+ if (deviceNode().empty())
+ return;
+
+ m2m_ = std::make_unique<V4L2M2MDevice>(deviceNode());
+ int ret = m2m_->open();
+ if (ret < 0) {
+ m2m_.reset();
+ return;
+ }
+
+ ret = getCropBounds(m2m_->output(), inputCropBounds_.first,
+ inputCropBounds_.second);
+ if (!ret && inputCropBounds_.first != inputCropBounds_.second) {
+ features_ |= Feature::InputCrop;
+
+ LOG(Converter, Info)
+ << "Converter supports cropping on its input";
+ }
+}
+
+/**
+ * \fn libcamera::V4L2M2MConverter::loadConfiguration
+ * \details \copydetails libcamera::Converter::loadConfiguration
+ */
+
+/**
+ * \fn libcamera::V4L2M2MConverter::isValid
+ * \details \copydetails libcamera::Converter::isValid
+ */
+
+/**
+ * \fn libcamera::V4L2M2MConverter::formats
+ * \details \copydetails libcamera::Converter::formats
+ */
+std::vector<PixelFormat> V4L2M2MConverter::formats(PixelFormat input)
+{
+ if (!m2m_)
+ return {};
+
+ /*
+ * Set the format on the input side (V4L2 output) of the converter to
+ * enumerate the conversion capabilities on its output (V4L2 capture).
+ */
+ V4L2DeviceFormat v4l2Format;
+ v4l2Format.fourcc = m2m_->output()->toV4L2PixelFormat(input);
+ v4l2Format.size = { 1, 1 };
+
+ int ret = m2m_->output()->setFormat(&v4l2Format);
+ if (ret < 0) {
+ LOG(Converter, Error)
+ << "Failed to set format: " << strerror(-ret);
+ return {};
+ }
+
+ if (v4l2Format.fourcc != m2m_->output()->toV4L2PixelFormat(input)) {
+ LOG(Converter, Debug)
+ << "Input format " << input << " not supported.";
+ return {};
+ }
+
+ std::vector<PixelFormat> pixelFormats;
+
+ for (const auto &format : m2m_->capture()->formats()) {
+ PixelFormat pixelFormat = format.first.toPixelFormat();
+ if (pixelFormat)
+ pixelFormats.push_back(pixelFormat);
+ }
+
+ return pixelFormats;
+}
+
+/**
+ * \copydoc libcamera::Converter::sizes
+ */
+SizeRange V4L2M2MConverter::sizes(const Size &input)
+{
+ if (!m2m_)
+ return {};
+
+ /*
+ * Set the size on the input side (V4L2 output) of the converter to
+ * enumerate the scaling capabilities on its output (V4L2 capture).
+ */
+ V4L2DeviceFormat format;
+ format.fourcc = V4L2PixelFormat();
+ format.size = input;
+
+ int ret = m2m_->output()->setFormat(&format);
+ if (ret < 0) {
+ LOG(Converter, Error)
+ << "Failed to set format: " << strerror(-ret);
+ return {};
+ }
+
+ SizeRange sizes;
+
+ format.size = { 1, 1 };
+ ret = m2m_->capture()->setFormat(&format);
+ if (ret < 0) {
+ LOG(Converter, Error)
+ << "Failed to set format: " << strerror(-ret);
+ return {};
+ }
+
+ sizes.min = format.size;
+
+ format.size = { UINT_MAX, UINT_MAX };
+ ret = m2m_->capture()->setFormat(&format);
+ if (ret < 0) {
+ LOG(Converter, Error)
+ << "Failed to set format: " << strerror(-ret);
+ return {};
+ }
+
+ sizes.max = format.size;
+
+ return sizes;
+}
+
+/**
+ * \copydoc libcamera::Converter::strideAndFrameSize
+ */
+std::tuple<unsigned int, unsigned int>
+V4L2M2MConverter::strideAndFrameSize(const PixelFormat &pixelFormat,
+ const Size &size)
+{
+ V4L2DeviceFormat format;
+ format.fourcc = m2m_->capture()->toV4L2PixelFormat(pixelFormat);
+ format.size = size;
+
+ int ret = m2m_->capture()->tryFormat(&format);
+ if (ret < 0)
+ return std::make_tuple(0, 0);
+
+ return std::make_tuple(format.planes[0].bpl, format.planes[0].size);
+}
+
+/**
+ * \copydoc libcamera::Converter::adjustInputSize
+ */
+Size V4L2M2MConverter::adjustInputSize(const PixelFormat &pixFmt,
+ const Size &size, Alignment align)
+{
+ auto formats = m2m_->output()->formats();
+ V4L2PixelFormat v4l2PixFmt = m2m_->output()->toV4L2PixelFormat(pixFmt);
+
+ auto it = formats.find(v4l2PixFmt);
+ if (it == formats.end()) {
+ LOG(Converter, Info)
+ << "Unsupported pixel format " << pixFmt;
+ return {};
+ }
+
+ return adjustSizes(size, it->second, align);
+}
+
+/**
+ * \copydoc libcamera::Converter::adjustOutputSize
+ */
+Size V4L2M2MConverter::adjustOutputSize(const PixelFormat &pixFmt,
+ const Size &size, Alignment align)
+{
+ auto formats = m2m_->capture()->formats();
+ V4L2PixelFormat v4l2PixFmt = m2m_->capture()->toV4L2PixelFormat(pixFmt);
+
+ auto it = formats.find(v4l2PixFmt);
+ if (it == formats.end()) {
+ LOG(Converter, Info)
+ << "Unsupported pixel format " << pixFmt;
+ return {};
+ }
+
+ return adjustSizes(size, it->second, align);
+}
+
+Size V4L2M2MConverter::adjustSizes(const Size &cfgSize,
+ const std::vector<SizeRange> &ranges,
+ Alignment align)
+{
+ Size size = cfgSize;
+
+ if (ranges.size() == 1) {
+ /*
+ * The device supports either V4L2_FRMSIZE_TYPE_CONTINUOUS or
+ * V4L2_FRMSIZE_TYPE_STEPWISE.
+ */
+ const SizeRange &range = *ranges.begin();
+
+ size.width = std::clamp(size.width, range.min.width,
+ range.max.width);
+ size.height = std::clamp(size.height, range.min.height,
+ range.max.height);
+
+ /*
+ * Check if any alignment is needed. If the sizes are already
+ * aligned, or the device supports V4L2_FRMSIZE_TYPE_CONTINUOUS
+ * with hStep and vStep equal to 1, we're done here.
+ */
+ int widthR = size.width % range.hStep;
+ int heightR = size.height % range.vStep;
+
+ /* Align up or down according to the caller request. */
+
+ if (widthR != 0)
+ size.width = size.width - widthR
+ + ((align == Alignment::Up) ? range.hStep : 0);
+
+ if (heightR != 0)
+ size.height = size.height - heightR
+ + ((align == Alignment::Up) ? range.vStep : 0);
+ } else {
+ /*
+ * The device supports V4L2_FRMSIZE_TYPE_DISCRETE, find the
+ * size closer to the requested output configuration.
+ *
+ * The size ranges vector is not ordered, so we sort it first.
+ * If we align up, start from the larger element.
+ */
+ std::vector<Size> sizes(ranges.size());
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
+ [](const SizeRange &range) { return range.max; });
+ std::sort(sizes.begin(), sizes.end());
+
+ if (align == Alignment::Up)
+ std::reverse(sizes.begin(), sizes.end());
+
+ /*
+ * Return true if s2 is valid according to the desired
+ * alignment: smaller than s1 if we align down, larger than s1
+ * if we align up.
+ */
+ auto nextSizeValid = [](const Size &s1, const Size &s2, Alignment a) {
+ return a == Alignment::Down
+ ? (s1.width > s2.width && s1.height > s2.height)
+ : (s1.width < s2.width && s1.height < s2.height);
+ };
+
+ Size newSize;
+ for (const Size &sz : sizes) {
+ if (!nextSizeValid(size, sz, align))
+ break;
+
+ newSize = sz;
+ }
+
+ if (newSize.isNull()) {
+ LOG(Converter, Error)
+ << "Cannot adjust " << cfgSize
+ << " to a supported converter size";
+ return {};
+ }
+
+ size = newSize;
+ }
+
+ return size;
+}
+
+/**
+ * \copydoc libcamera::Converter::configure
+ */
+int V4L2M2MConverter::configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
+{
+ int ret = 0;
+
+ streams_.clear();
+
+ for (unsigned int i = 0; i < outputCfgs.size(); ++i) {
+ const StreamConfiguration &cfg = outputCfgs[i];
+ std::unique_ptr<V4L2M2MStream> stream =
+ std::make_unique<V4L2M2MStream>(this, cfg.stream());
+
+ if (!stream->isValid()) {
+ LOG(Converter, Error)
+ << "Failed to create stream " << i;
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = stream->configure(inputCfg, cfg);
+ if (ret < 0)
+ break;
+
+ streams_.emplace(cfg.stream(), std::move(stream));
+ }
+
+ if (ret < 0) {
+ streams_.clear();
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::Converter::isConfigured
+ */
+bool V4L2M2MConverter::isConfigured(const Stream *stream) const
+{
+ return streams_.find(stream) != streams_.end();
+}
+
+/**
+ * \copydoc libcamera::Converter::exportBuffers
+ */
+int V4L2M2MConverter::exportBuffers(const Stream *stream, unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ auto iter = streams_.find(stream);
+ if (iter == streams_.end())
+ return -EINVAL;
+
+ return iter->second->exportBuffers(count, buffers);
+}
+
+/**
+ * \copydoc libcamera::Converter::setInputCrop
+ */
+int V4L2M2MConverter::setInputCrop(const Stream *stream, Rectangle *rect)
+{
+ if (!(features_ & Feature::InputCrop))
+ return -ENOTSUP;
+
+ auto iter = streams_.find(stream);
+ if (iter == streams_.end()) {
+ LOG(Converter, Error) << "Invalid output stream";
+ return -EINVAL;
+ }
+
+ return iter->second->setInputSelection(V4L2_SEL_TGT_CROP, rect);
+}
+
+/**
+ * \fn libcamera::V4L2M2MConverter::inputCropBounds()
+ * \copydoc libcamera::Converter::inputCropBounds()
+ */
+
+/**
+ * \copydoc libcamera::Converter::inputCropBounds(const Stream *stream)
+ */
+std::pair<Rectangle, Rectangle>
+V4L2M2MConverter::inputCropBounds(const Stream *stream)
+{
+ auto iter = streams_.find(stream);
+ if (iter == streams_.end()) {
+ LOG(Converter, Error) << "Invalid output stream";
+ return {};
+ }
+
+ return iter->second->inputCropBounds();
+}
+
+/**
+ * \copydoc libcamera::Converter::start
+ */
+int V4L2M2MConverter::start()
+{
+ int ret;
+
+ for (auto &iter : streams_) {
+ ret = iter.second->start();
+ if (ret < 0) {
+ stop();
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::Converter::stop
+ */
+void V4L2M2MConverter::stop()
+{
+ for (auto &iter : streams_)
+ iter.second->stop();
+}
+
+/**
+ * \copydoc libcamera::Converter::validateOutput
+ */
+int V4L2M2MConverter::validateOutput(StreamConfiguration *cfg, bool *adjusted,
+ Alignment align)
+{
+ V4L2VideoDevice *capture = m2m_->capture();
+ V4L2VideoDevice::Formats fmts = capture->formats();
+
+ if (adjusted)
+ *adjusted = false;
+
+ PixelFormat fmt = cfg->pixelFormat;
+ V4L2PixelFormat v4l2PixFmt = capture->toV4L2PixelFormat(fmt);
+
+ auto it = fmts.find(v4l2PixFmt);
+ if (it == fmts.end()) {
+ it = fmts.begin();
+ v4l2PixFmt = it->first;
+ cfg->pixelFormat = v4l2PixFmt.toPixelFormat();
+
+ if (adjusted)
+ *adjusted = true;
+
+ LOG(Converter, Info)
+ << "Converter output pixel format adjusted to "
+ << cfg->pixelFormat;
+ }
+
+ const Size cfgSize = cfg->size;
+ cfg->size = adjustSizes(cfgSize, it->second, align);
+
+ if (cfg->size.isNull())
+ return -EINVAL;
+
+ if (cfg->size.width != cfgSize.width ||
+ cfg->size.height != cfgSize.height) {
+ LOG(Converter, Info)
+ << "Converter size adjusted to "
+ << cfg->size;
+ if (adjusted)
+ *adjusted = true;
+ }
+
+ return 0;
+}
+
+/**
+ * \copydoc libcamera::Converter::queueBuffers
+ */
+int V4L2M2MConverter::queueBuffers(FrameBuffer *input,
+ const std::map<const Stream *, FrameBuffer *> &outputs)
+{
+ std::set<FrameBuffer *> outputBufs;
+ int ret;
+
+ /*
+ * Validate the outputs as a sanity check: at least one output is
+ * required, all outputs must reference a valid stream and no two
+ * streams can reference same output framebuffers.
+ */
+ if (outputs.empty())
+ return -EINVAL;
+
+ for (auto [stream, buffer] : outputs) {
+ if (!buffer)
+ return -EINVAL;
+
+ outputBufs.insert(buffer);
+ }
+
+ if (outputBufs.size() != streams_.size())
+ return -EINVAL;
+
+ /* Queue the input and output buffers to all the streams. */
+ for (auto [stream, buffer] : outputs) {
+ ret = streams_.at(stream)->queueBuffers(input, buffer);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Add the input buffer to the queue, with the number of streams as a
+ * reference count. Completion of the input buffer will be signalled by
+ * the stream that releases the last reference.
+ */
+ queue_.emplace(std::piecewise_construct,
+ std::forward_as_tuple(input),
+ std::forward_as_tuple(outputs.size()));
+
+ return 0;
+}
+
+/*
+ * \todo: This should be extended to include Feature::Flag to denote
+ * what each converter supports feature-wise.
+ */
+static std::initializer_list<std::string> compatibles = {
+ "mtk-mdp",
+ "pxp",
+};
+
+REGISTER_CONVERTER("v4l2_m2m", V4L2M2MConverter, compatibles)
+
+} /* namespace libcamera */
diff --git a/src/libcamera/converter/meson.build b/src/libcamera/converter/meson.build
new file mode 100644
index 00000000..af1a80fe
--- /dev/null
+++ b/src/libcamera/converter/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'converter_v4l2_m2m.cpp'
+])
diff --git a/src/libcamera/debug_controls.cpp b/src/libcamera/debug_controls.cpp
new file mode 100644
index 00000000..33960231
--- /dev/null
+++ b/src/libcamera/debug_controls.cpp
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Helper to easily record debug metadata inside libcamera.
+ */
+
+#include "libcamera/internal/debug_controls.h"
+
+#include <libcamera/base/log.h>
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(DebugControls)
+
+/**
+ * \file debug_controls.h
+ * \brief Helper to easily record debug metadata inside libcamera
+ */
+
+/**
+ * \class DebugMetadata
+ * \brief Helper to record metadata for later use
+ *
+ * Metadata is a useful tool for debugging the internal state of libcamera. It
+ * has the benefit that it is easy to use and related tooling is readily
+ * available. The difficulty is that the metadata control list is often not
+ * directly available (either because the variable to debug lives inside
+ * process() of an IPA or inside a closed algorithm class with no direct access
+ * to the IPA and therefore the metadata list).
+ *
+ * This class helps in both cases. It allows to forward the data to a parent or
+ * alternatively record the data and at a later point in time copy it to the
+ * metadata list when it becomes available. Both mechanisms allow easy reuse and
+ * loose coupling.
+ *
+ * Typical usage is to instantiate a DebugMetadata object in every
+ * class/algorithm where debug metadata shall be recorded (the inner object). If
+ * the IPA doesn't support debug metadata, the object is still usable, but the
+ * debug data gets dropped. If the IPA supports debug metadata it will either
+ * register a parent DebugMetadata object on the inner object or manually
+ * retrieve the data using enable()/moveToList().
+ *
+ * The concepts of forwarding to a parent and recording for later retrieval are
+ * mutually exclusive and the parent takes precedence. E.g. it is not allowed to
+ * enable a DebugMetadata object, log entries to it and later set the parent.
+ *
+ * This is done to keep the path open for using other means of data transport
+ * (like tracing). For every tracing event a corresponding context needs to be
+ * available on set() time. The parent can be treated as such, the top level
+ * object (the one where enable() get's called) also lives in a place where that
+ * information is also available.
+ */
+
+/**
+ * \fn DebugMetadata::enableByControl()
+ * \brief Enable based on controls::DebugMetadataEnable in the supplied
+ * ControlList
+ * \param[in] controls The supplied ControlList
+ *
+ * This function looks for controls::DebugMetadataEnable and enables or disables
+ * debug metadata handling accordingly.
+ */
+void DebugMetadata::enableByControl(const ControlList &controls)
+{
+ const auto &ctrl = controls.get(controls::DebugMetadataEnable);
+ if (ctrl)
+ enable(*ctrl);
+}
+
+/**
+ * \fn DebugMetadata::enable()
+ * \brief Enable or disable metadata handling
+ * \param[in] enable The enable state
+ *
+ * When \a enable is true, all calls to set() get cached and can later be
+ * retrieved using moveEntries(). When \a enable is false, the cache gets
+ * cleared and no further metadata is recorded.
+ *
+ * Forwarding to a parent is independent of the enabled state.
+ */
+void DebugMetadata::enable(bool enable)
+{
+ enabled_ = enable;
+ if (!enabled_)
+ cache_.clear();
+}
+
+/**
+ * \fn DebugMetadata::setParent()
+ * \brief Set the parent metadata handler to \a parent
+ * \param[in] parent The parent handler
+ *
+ * When a \a parent is set, all further calls to set() are unconditionally
+ * forwarded to that instance.
+ *
+ * The parent can be reset by passing a nullptr.
+ */
+void DebugMetadata::setParent(DebugMetadata *parent)
+{
+ parent_ = parent;
+
+ if (!parent_)
+ return;
+
+ if (!cache_.empty())
+ LOG(DebugControls, Error)
+ << "Controls were recorded before setting a parent."
+ << " These are dropped.";
+
+ cache_.clear();
+}
+
+/**
+ * \fn DebugMetadata::moveEntries()
+ * \brief Move all cached entries into control list \a list
+ * \param[in] list The control list
+ *
+ * This function moves all entries into the list specified by \a list. Duplicate
+ * entries in \a list get overwritten.
+ */
+void DebugMetadata::moveEntries(ControlList &list)
+{
+ list.merge(std::move(cache_), ControlList::MergePolicy::OverwriteExisting);
+ cache_.clear();
+}
+
+/**
+ * \fn DebugMetadata::set(const Control<T> &ctrl, const V &value)
+ * \brief Set the value of \a ctrl to \a value
+ * \param[in] ctrl The control to set
+ * \param[in] value The control value
+ *
+ * If a parent is set, the value gets passed there unconditionally. Otherwise it
+ * gets cached if the instance is enabled or dropped silently when disabled.
+ *
+ * \sa enable()
+ */
+
+/**
+ * \fn DebugMetadata::set(unsigned int id, const ControlValue &value)
+ * \brief Set the value of control \a id to \a value
+ * \param[in] id The id of the control
+ * \param[in] value The control value
+ *
+ * If a parent is set, the value gets passed there unconditionally. Otherwise it
+ * gets cached if the instance is enabled or dropped silently when disabled.
+ *
+ * \sa enable()
+ */
+void DebugMetadata::set(unsigned int id, const ControlValue &value)
+{
+ if (parent_) {
+ parent_->set(id, value);
+ return;
+ }
+
+ if (!enabled_)
+ return;
+
+ cache_.set(id, value);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/delayed_controls.cpp b/src/libcamera/delayed_controls.cpp
new file mode 100644
index 00000000..94d0a575
--- /dev/null
+++ b/src/libcamera/delayed_controls.cpp
@@ -0,0 +1,285 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper to deal with controls that take effect with a delay
+ */
+
+#include "libcamera/internal/delayed_controls.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/controls.h>
+
+#include "libcamera/internal/v4l2_device.h"
+
+/**
+ * \file delayed_controls.h
+ * \brief Helper to deal with controls that take effect with a delay
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(DelayedControls)
+
+/**
+ * \class DelayedControls
+ * \brief Helper to deal with controls that take effect with a delay
+ *
+ * Some sensor controls take effect with a delay as the sensor needs time to
+ * adjust, for example exposure and analog gain. This is a helper class to deal
+ * with such controls and the intended users are pipeline handlers.
+ *
+ * The idea is to extend the concept of the buffer depth of a pipeline the
+ * application needs to maintain to also cover controls. Just as with buffer
+ * depth if the application keeps the number of requests queued above the
+ * control depth the controls are guaranteed to take effect for the correct
+ * request. The control depth is determined by the control with the greatest
+ * delay.
+ */
+
+/**
+ * \struct DelayedControls::ControlParams
+ * \brief Parameters associated with controls handled by the \a DelayedControls
+ * helper class
+ *
+ * \var ControlParams::delay
+ * \brief Frame delay from setting the control on a sensor device to when it is
+ * consumed during framing.
+ *
+ * \var ControlParams::priorityWrite
+ * \brief Flag to indicate that this control must be applied ahead of, and
+ * separately from the other controls.
+ *
+ * Typically set for the \a V4L2_CID_VBLANK control so that the device driver
+ * does not reject \a V4L2_CID_EXPOSURE control values that may be outside of
+ * the existing vertical blanking specified bounds, but are within the new
+ * blanking bounds.
+ */
+
+/**
+ * \brief Construct a DelayedControls instance
+ * \param[in] device The V4L2 device the controls have to be applied to
+ * \param[in] controlParams Map of the numerical V4L2 control ids to their
+ * associated control parameters.
+ *
+ * The control parameters comprise of delays (in frames) and a priority write
+ * flag. If this flag is set, the relevant control is written separately from,
+ * and ahead of the rest of the batched controls.
+ *
+ * Only controls specified in \a controlParams are handled. If it's desired to
+ * mix delayed controls and controls that take effect immediately the immediate
+ * controls must be listed in the \a controlParams map with a delay value of 0.
+ */
+DelayedControls::DelayedControls(V4L2Device *device,
+ const std::unordered_map<uint32_t, ControlParams> &controlParams)
+ : device_(device), maxDelay_(0)
+{
+ const ControlInfoMap &controls = device_->controls();
+
+ /*
+ * Create a map of control ids to delays for controls exposed by the
+ * device.
+ */
+ for (auto const &param : controlParams) {
+ auto it = controls.find(param.first);
+ if (it == controls.end()) {
+ LOG(DelayedControls, Error)
+ << "Delay request for control id "
+ << utils::hex(param.first)
+ << " but control is not exposed by device "
+ << device_->deviceNode();
+ continue;
+ }
+
+ const ControlId *id = it->first;
+
+ controlParams_[id] = param.second;
+
+ LOG(DelayedControls, Debug)
+ << "Set a delay of " << controlParams_[id].delay
+ << " and priority write flag " << controlParams_[id].priorityWrite
+ << " for " << id->name();
+
+ maxDelay_ = std::max(maxDelay_, controlParams_[id].delay);
+ }
+
+ reset();
+}
+
+/**
+ * \brief Reset state machine
+ *
+ * Resets the state machine to a starting position based on control values
+ * retrieved from the device.
+ */
+void DelayedControls::reset()
+{
+ queueCount_ = 1;
+ writeCount_ = 0;
+
+ /* Retrieve control as reported by the device. */
+ std::vector<uint32_t> ids;
+ for (auto const &param : controlParams_)
+ ids.push_back(param.first->id());
+
+ ControlList controls = device_->getControls(ids);
+
+ /* Seed the control queue with the controls reported by the device. */
+ values_.clear();
+ for (const auto &ctrl : controls) {
+ const ControlId *id = device_->controls().idmap().at(ctrl.first);
+ /*
+ * Do not mark this control value as updated, it does not need
+ * to be written to to device on startup.
+ */
+ values_[id][0] = Info(ctrl.second, false);
+ }
+}
+
+/**
+ * \brief Push a set of controls on the queue
+ * \param[in] controls List of controls to add to the device queue
+ *
+ * Push a set of controls to the control queue. This increases the control queue
+ * depth by one.
+ *
+ * \returns true if \a controls are accepted, or false otherwise
+ */
+bool DelayedControls::push(const ControlList &controls)
+{
+ /* Copy state from previous frame. */
+ for (auto &ctrl : values_) {
+ Info &info = ctrl.second[queueCount_];
+ info = values_[ctrl.first][queueCount_ - 1];
+ info.updated = false;
+ }
+
+ /* Update with new controls. */
+ const ControlIdMap &idmap = device_->controls().idmap();
+ for (const auto &control : controls) {
+ const auto &it = idmap.find(control.first);
+ if (it == idmap.end()) {
+ LOG(DelayedControls, Warning)
+ << "Unknown control " << control.first;
+ return false;
+ }
+
+ const ControlId *id = it->second;
+
+ if (controlParams_.find(id) == controlParams_.end())
+ return false;
+
+ Info &info = values_[id][queueCount_];
+
+ info = Info(control.second);
+
+ LOG(DelayedControls, Debug)
+ << "Queuing " << id->name()
+ << " to " << info.toString()
+ << " at index " << queueCount_;
+ }
+
+ queueCount_++;
+
+ return true;
+}
+
+/**
+ * \brief Read back controls in effect at a sequence number
+ * \param[in] sequence The sequence number to get controls for
+ *
+ * Read back what controls where in effect at a specific sequence number. The
+ * history is a ring buffer of 16 entries where new and old values coexist. It's
+ * the callers responsibility to not read too old sequence numbers that have been
+ * pushed out of the history.
+ *
+ * Historic values are evicted by pushing new values onto the queue using
+ * push(). The max history from the current sequence number that yields valid
+ * values are thus 16 minus number of controls pushed.
+ *
+ * \return The controls at \a sequence number
+ */
+ControlList DelayedControls::get(uint32_t sequence)
+{
+ unsigned int index = std::max<int>(0, sequence - maxDelay_);
+
+ ControlList out(device_->controls());
+ for (const auto &ctrl : values_) {
+ const ControlId *id = ctrl.first;
+ const Info &info = ctrl.second[index];
+
+ out.set(id->id(), info);
+
+ LOG(DelayedControls, Debug)
+ << "Reading " << id->name()
+ << " to " << info.toString()
+ << " at index " << index;
+ }
+
+ return out;
+}
+
+/**
+ * \brief Inform DelayedControls of the start of a new frame
+ * \param[in] sequence Sequence number of the frame that started
+ *
+ * Inform the state machine that a new frame has started and of its sequence
+ * number. Any user of these helpers is responsible to inform the helper about
+ * the start of any frame. This can be connected with ease to the start of a
+ * exposure (SOE) V4L2 event.
+ */
+void DelayedControls::applyControls(uint32_t sequence)
+{
+ LOG(DelayedControls, Debug) << "frame " << sequence << " started";
+
+ /*
+ * Create control list peeking ahead in the value queue to ensure
+ * values are set in time to satisfy the sensor delay.
+ */
+ ControlList out(device_->controls());
+ for (auto &ctrl : values_) {
+ const ControlId *id = ctrl.first;
+ unsigned int delayDiff = maxDelay_ - controlParams_[id].delay;
+ unsigned int index = std::max<int>(0, writeCount_ - delayDiff);
+ Info &info = ctrl.second[index];
+
+ if (info.updated) {
+ if (controlParams_[id].priorityWrite) {
+ /*
+ * This control must be written now, it could
+ * affect validity of the other controls.
+ */
+ ControlList priority(device_->controls());
+ priority.set(id->id(), info);
+ device_->setControls(&priority);
+ } else {
+ /*
+ * Batch up the list of controls and write them
+ * at the end of the function.
+ */
+ out.set(id->id(), info);
+ }
+
+ LOG(DelayedControls, Debug)
+ << "Setting " << id->name()
+ << " to " << info.toString()
+ << " at index " << index;
+
+ /* Done with this update, so mark as completed. */
+ info.updated = false;
+ }
+ }
+
+ writeCount_ = sequence + 1;
+
+ while (writeCount_ > queueCount_) {
+ LOG(DelayedControls, Debug)
+ << "Queue is empty, auto queue no-op.";
+ push({});
+ }
+
+ device_->setControls(&out);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/device_enumerator.cpp b/src/libcamera/device_enumerator.cpp
index dd17e3e3..ae17862f 100644
--- a/src/libcamera/device_enumerator.cpp
+++ b/src/libcamera/device_enumerator.cpp
@@ -2,17 +2,18 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * device_enumerator.cpp - Enumeration and matching
+ * Enumeration and matching
*/
-#include "device_enumerator.h"
-#include "device_enumerator_sysfs.h"
-#include "device_enumerator_udev.h"
+#include "libcamera/internal/device_enumerator.h"
#include <string.h>
-#include "log.h"
-#include "media_device.h"
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/device_enumerator_sysfs.h"
+#include "libcamera/internal/device_enumerator_udev.h"
+#include "libcamera/internal/media_device.h"
/**
* \file device_enumerator.h
@@ -55,7 +56,7 @@ LOG_DEFINE_CATEGORY(DeviceEnumerator)
* names can be added as match criteria.
*
* Pipeline handlers are recommended to add entities to DeviceMatch as
- * appropriare to ensure that the media device they need can be uniquely
+ * appropriate to ensure that the media device they need can be uniquely
* identified. This is useful when the corresponding kernel driver can produce
* different graphs, for instance as a result of different driver versions or
* hardware configurations, and not all those graphs are suitable for a pipeline
@@ -100,8 +101,14 @@ bool DeviceMatch::match(const MediaDevice *device) const
for (const MediaEntity *entity : device->entities()) {
if (name == entity->name()) {
- found = true;
- break;
+ if (!entity->deviceNode().empty()) {
+ found = true;
+ break;
+ } else {
+ LOG(DeviceEnumerator, Debug)
+ << "Skip " << entity->name()
+ << ": no device node";
+ }
}
}
@@ -160,7 +167,7 @@ std::unique_ptr<DeviceEnumerator> DeviceEnumerator::create()
DeviceEnumerator::~DeviceEnumerator()
{
- for (std::shared_ptr<MediaDevice> media : devices_) {
+ for (const std::shared_ptr<MediaDevice> &media : devices_) {
if (media->busy())
LOG(DeviceEnumerator, Error)
<< "Removing media device " << media->deviceNode()
@@ -228,20 +235,33 @@ std::unique_ptr<MediaDevice> DeviceEnumerator::createDevice(const std::string &d
}
/**
+* \var DeviceEnumerator::devicesAdded
+* \brief Notify of new media devices being found
+*
+* This signal is emitted when the device enumerator finds new media devices in
+* the system. It may be emitted for every newly detected device, or once for
+* multiple devices, at the discretion of the device enumerator. Not all device
+* enumerator types may support dynamic detection of new devices.
+*/
+
+/**
* \brief Add a media device to the enumerator
* \param[in] media media device instance to add
*
* Store the media device in the internal list for later matching with
* pipeline handlers. \a media shall be created with createDevice() first.
- * This method shall be called after all members of the entities of the
+ * This function shall be called after all members of the entities of the
* media graph have been confirmed to be initialized.
*/
-void DeviceEnumerator::addDevice(std::unique_ptr<MediaDevice> &&media)
+void DeviceEnumerator::addDevice(std::unique_ptr<MediaDevice> media)
{
LOG(DeviceEnumerator, Debug)
<< "Added device " << media->deviceNode() << ": " << media->driver();
devices_.push_back(std::move(media));
+
+ /* \todo To batch multiple additions, emit with a small delay here. */
+ devicesAdded.emit();
}
/**
@@ -274,7 +294,7 @@ void DeviceEnumerator::removeDevice(const std::string &deviceNode)
LOG(DeviceEnumerator, Debug)
<< "Media device for node " << deviceNode << " removed.";
- media->disconnected.emit(media.get());
+ media->disconnected.emit();
}
/**
diff --git a/src/libcamera/device_enumerator_sysfs.cpp b/src/libcamera/device_enumerator_sysfs.cpp
index 3446db59..7866885c 100644
--- a/src/libcamera/device_enumerator_sysfs.cpp
+++ b/src/libcamera/device_enumerator_sysfs.cpp
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * device_enumerator_sysfs.cpp - sysfs-based device enumerator
+ * sysfs-based device enumerator
*/
-#include "device_enumerator_sysfs.h"
+#include "libcamera/internal/device_enumerator_sysfs.h"
#include <dirent.h>
#include <fcntl.h>
@@ -17,8 +17,9 @@
#include <sys/types.h>
#include <unistd.h>
-#include "log.h"
-#include "media_device.h"
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/media_device.h"
namespace libcamera {
@@ -32,7 +33,7 @@ int DeviceEnumeratorSysfs::init()
int DeviceEnumeratorSysfs::enumerate()
{
struct dirent *ent;
- DIR *dir;
+ DIR *dir = nullptr;
static const char * const sysfs_dirs[] = {
"/sys/subsystem/media/devices",
diff --git a/src/libcamera/device_enumerator_udev.cpp b/src/libcamera/device_enumerator_udev.cpp
index 9cbc7e47..4e20a3cc 100644
--- a/src/libcamera/device_enumerator_udev.cpp
+++ b/src/libcamera/device_enumerator_udev.cpp
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2018-2019, Google Inc.
*
- * device_enumerator_udev.cpp - udev-based device enumerator
+ * udev-based device enumerator
*/
-#include "device_enumerator_udev.h"
+#include "libcamera/internal/device_enumerator_udev.h"
#include <algorithm>
#include <fcntl.h>
@@ -13,21 +13,22 @@
#include <list>
#include <map>
#include <string.h>
+#include <string_view>
#include <sys/ioctl.h>
#include <sys/sysmacros.h>
#include <unistd.h>
-#include <libcamera/event_notifier.h>
+#include <libcamera/base/event_notifier.h>
+#include <libcamera/base/log.h>
-#include "log.h"
-#include "media_device.h"
+#include "libcamera/internal/media_device.h"
namespace libcamera {
LOG_DECLARE_CATEGORY(DeviceEnumerator)
DeviceEnumeratorUdev::DeviceEnumeratorUdev()
- : udev_(nullptr)
+ : udev_(nullptr), monitor_(nullptr), notifier_(nullptr)
{
}
@@ -94,7 +95,8 @@ int DeviceEnumeratorUdev::addUdevDevice(struct udev_device *dev)
if (!deps.empty()) {
LOG(DeviceEnumerator, Debug)
<< "Defer media device " << media->deviceNode()
- << " due to " << ret << " missing dependencies";
+ << " due to " << deps.size()
+ << " missing dependencies";
pending_.emplace_back(std::move(media), std::move(deps));
MediaDeviceDeps *mediaDeps = &pending_.back();
@@ -314,6 +316,7 @@ int DeviceEnumeratorUdev::addV4L2Device(dev_t devnum)
* enumerator.
*/
deps->deps_.erase(devnum);
+ devMap_.erase(it);
if (deps->deps_.empty()) {
LOG(DeviceEnumerator, Debug)
@@ -326,21 +329,29 @@ int DeviceEnumeratorUdev::addV4L2Device(dev_t devnum)
return 0;
}
-void DeviceEnumeratorUdev::udevNotify(EventNotifier *notifier)
+void DeviceEnumeratorUdev::udevNotify()
{
struct udev_device *dev = udev_monitor_receive_device(monitor_);
- std::string action(udev_device_get_action(dev));
- std::string deviceNode(udev_device_get_devnode(dev));
+ if (!dev) {
+ int err = errno;
+ LOG(DeviceEnumerator, Warning)
+ << "Ignoring notfication received without a device: "
+ << strerror(err);
+ return;
+ }
+
+ std::string_view action(udev_device_get_action(dev));
+ std::string_view deviceNode(udev_device_get_devnode(dev));
LOG(DeviceEnumerator, Debug)
- << action << " device " << udev_device_get_devnode(dev);
+ << action << " device " << deviceNode;
if (action == "add") {
addUdevDevice(dev);
} else if (action == "remove") {
const char *subsystem = udev_device_get_subsystem(dev);
if (subsystem && !strcmp(subsystem, "media"))
- removeDevice(deviceNode);
+ removeDevice(std::string(deviceNode));
}
udev_device_unref(dev);
diff --git a/src/libcamera/dma_buf_allocator.cpp b/src/libcamera/dma_buf_allocator.cpp
new file mode 100644
index 00000000..a014c3b4
--- /dev/null
+++ b/src/libcamera/dma_buf_allocator.cpp
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Red Hat Inc.
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper class for dma-buf allocations.
+ */
+
+#include "libcamera/internal/dma_buf_allocator.h"
+
+#include <array>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/udmabuf.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/memfd.h>
+#include <libcamera/base/shared_fd.h>
+
+#include <libcamera/framebuffer.h>
+
+/**
+ * \file dma_buf_allocator.cpp
+ * \brief dma-buf allocator
+ */
+
+namespace libcamera {
+
+#ifndef __DOXYGEN__
+struct DmaBufAllocatorInfo {
+ DmaBufAllocator::DmaBufAllocatorFlag type;
+ const char *deviceNodeName;
+};
+#endif
+
+static constexpr std::array<DmaBufAllocatorInfo, 4> providerInfos = { {
+ /*
+ * /dev/dma_heap/linux,cma is the CMA dma-heap. When the cma heap size is
+ * specified on the kernel command line, this gets renamed to "reserved".
+ */
+ { DmaBufAllocator::DmaBufAllocatorFlag::CmaHeap, "/dev/dma_heap/linux,cma" },
+ { DmaBufAllocator::DmaBufAllocatorFlag::CmaHeap, "/dev/dma_heap/reserved" },
+ { DmaBufAllocator::DmaBufAllocatorFlag::SystemHeap, "/dev/dma_heap/system" },
+ { DmaBufAllocator::DmaBufAllocatorFlag::UDmaBuf, "/dev/udmabuf" },
+} };
+
+LOG_DEFINE_CATEGORY(DmaBufAllocator)
+
+/**
+ * \class DmaBufAllocator
+ * \brief Helper class for dma-buf allocations
+ *
+ * This class wraps a userspace dma-buf provider selected at construction time,
+ * and exposes functions to allocate dma-buffers from this provider.
+ *
+ * Different providers may provide dma-buffers with different properties for
+ * the underlying memory. Which providers are acceptable is specified through
+ * the type argument passed to the DmaBufAllocator() constructor.
+ */
+
+/**
+ * \enum DmaBufAllocator::DmaBufAllocatorFlag
+ * \brief Type of the dma-buf provider
+ * \var DmaBufAllocator::CmaHeap
+ * \brief Allocate from a CMA dma-heap, providing physically-contiguous memory
+ * \var DmaBufAllocator::SystemHeap
+ * \brief Allocate from the system dma-heap, using the page allocator
+ * \var DmaBufAllocator::UDmaBuf
+ * \brief Allocate using a memfd + /dev/udmabuf
+ */
+
+/**
+ * \typedef DmaBufAllocator::DmaBufAllocatorFlags
+ * \brief A bitwise combination of DmaBufAllocator::DmaBufAllocatorFlag values
+ */
+
+/**
+ * \brief Construct a DmaBufAllocator of a given type
+ * \param[in] type The type(s) of the dma-buf providers to allocate from
+ *
+ * The dma-buf provider type is selected with the \a type parameter, which
+ * defaults to the CMA heap. If no provider of the given type can be accessed,
+ * the constructed DmaBufAllocator instance is invalid as indicated by
+ * the isValid() function.
+ *
+ * Multiple types can be selected by combining type flags, in which case
+ * the constructed DmaBufAllocator will match one of the types. If multiple
+ * requested types can work on the system, which provider is used is undefined.
+ */
+DmaBufAllocator::DmaBufAllocator(DmaBufAllocatorFlags type)
+{
+ for (const auto &info : providerInfos) {
+ if (!(type & info.type))
+ continue;
+
+ int ret = ::open(info.deviceNodeName, O_RDWR | O_CLOEXEC, 0);
+ if (ret < 0) {
+ ret = errno;
+ LOG(DmaBufAllocator, Debug)
+ << "Failed to open " << info.deviceNodeName << ": "
+ << strerror(ret);
+ continue;
+ }
+
+ LOG(DmaBufAllocator, Debug) << "Using " << info.deviceNodeName;
+ providerHandle_ = UniqueFD(ret);
+ type_ = info.type;
+ break;
+ }
+
+ if (!providerHandle_.isValid())
+ LOG(DmaBufAllocator, Error) << "Could not open any dma-buf provider";
+}
+
+/**
+ * \brief Destroy the DmaBufAllocator instance
+ */
+DmaBufAllocator::~DmaBufAllocator() = default;
+
+/**
+ * \fn DmaBufAllocator::isValid()
+ * \brief Check if the DmaBufAllocator instance is valid
+ * \return True if the DmaBufAllocator is valid, false otherwise
+ */
+UniqueFD DmaBufAllocator::allocFromUDmaBuf(const char *name, std::size_t size)
+{
+ /* Size must be a multiple of the page size. Round it up. */
+ std::size_t pageMask = sysconf(_SC_PAGESIZE) - 1;
+ size = (size + pageMask) & ~pageMask;
+
+ /* udmabuf dma-buffers *must* have the F_SEAL_SHRINK seal. */
+ UniqueFD memfd = MemFd::create(name, size, MemFd::Seal::Shrink);
+ if (!memfd.isValid())
+ return {};
+
+ struct udmabuf_create create;
+
+ create.memfd = memfd.get();
+ create.flags = UDMABUF_FLAGS_CLOEXEC;
+ create.offset = 0;
+ create.size = size;
+
+ int ret = ::ioctl(providerHandle_.get(), UDMABUF_CREATE, &create);
+ if (ret < 0) {
+ ret = errno;
+ LOG(DmaBufAllocator, Error)
+ << "Failed to create dma buf for " << name
+ << ": " << strerror(ret);
+ return {};
+ }
+
+ /* The underlying memfd is kept as as a reference in the kernel. */
+ return UniqueFD(ret);
+}
+
+UniqueFD DmaBufAllocator::allocFromHeap(const char *name, std::size_t size)
+{
+ struct dma_heap_allocation_data alloc = {};
+ int ret;
+
+ alloc.len = size;
+ alloc.fd_flags = O_CLOEXEC | O_RDWR;
+
+ ret = ::ioctl(providerHandle_.get(), DMA_HEAP_IOCTL_ALLOC, &alloc);
+ if (ret < 0) {
+ LOG(DmaBufAllocator, Error)
+ << "dma-heap allocation failure for " << name;
+ return {};
+ }
+
+ UniqueFD allocFd(alloc.fd);
+ ret = ::ioctl(allocFd.get(), DMA_BUF_SET_NAME, name);
+ if (ret < 0) {
+ LOG(DmaBufAllocator, Error)
+ << "dma-heap naming failure for " << name;
+ return {};
+ }
+
+ return allocFd;
+}
+
+/**
+ * \brief Allocate a dma-buf from the DmaBufAllocator
+ * \param [in] name The name to set for the allocated buffer
+ * \param [in] size The size of the buffer to allocate
+ *
+ * Allocates a dma-buf with read/write access.
+ *
+ * If the allocation fails, return an invalid UniqueFD.
+ *
+ * \return The UniqueFD of the allocated buffer
+ */
+UniqueFD DmaBufAllocator::alloc(const char *name, std::size_t size)
+{
+ if (!name)
+ return {};
+
+ if (type_ == DmaBufAllocator::DmaBufAllocatorFlag::UDmaBuf)
+ return allocFromUDmaBuf(name, size);
+ else
+ return allocFromHeap(name, size);
+}
+
+/**
+ * \brief Allocate and export buffers from the DmaBufAllocator
+ * \param[in] count The number of requested FrameBuffers
+ * \param[in] planeSizes The sizes of planes in each FrameBuffer
+ * \param[out] buffers Array of buffers successfully allocated
+ *
+ * Planes in a FrameBuffer are allocated with a single dma buf.
+ * \todo Add the option to allocate each plane with a dma buf respectively.
+ *
+ * \return The number of allocated buffers on success or a negative error code
+ * otherwise
+ */
+int DmaBufAllocator::exportBuffers(unsigned int count,
+ const std::vector<unsigned int> &planeSizes,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ for (unsigned int i = 0; i < count; ++i) {
+ std::unique_ptr<FrameBuffer> buffer =
+ createBuffer("frame-" + std::to_string(i), planeSizes);
+ if (!buffer) {
+ LOG(DmaBufAllocator, Error) << "Unable to create buffer";
+
+ buffers->clear();
+ return -EINVAL;
+ }
+
+ buffers->push_back(std::move(buffer));
+ }
+
+ return count;
+}
+
+std::unique_ptr<FrameBuffer>
+DmaBufAllocator::createBuffer(std::string name,
+ const std::vector<unsigned int> &planeSizes)
+{
+ std::vector<FrameBuffer::Plane> planes;
+
+ unsigned int frameSize = 0, offset = 0;
+ for (auto planeSize : planeSizes)
+ frameSize += planeSize;
+
+ SharedFD fd(alloc(name.c_str(), frameSize));
+ if (!fd.isValid())
+ return nullptr;
+
+ for (auto planeSize : planeSizes) {
+ planes.emplace_back(FrameBuffer::Plane{ fd, offset, planeSize });
+ offset += planeSize;
+ }
+
+ return std::make_unique<FrameBuffer>(planes);
+}
+
+/**
+ * \class DmaSyncer
+ * \brief Helper class for dma-buf's synchronization
+ *
+ * This class wraps a userspace dma-buf's synchronization process with an
+ * object's lifetime.
+ *
+ * It's used when the user needs to access a dma-buf with CPU, mostly mapped
+ * with MappedFrameBuffer, so that the buffer is synchronized between CPU and
+ * ISP.
+ */
+
+/**
+ * \enum DmaSyncer::SyncType
+ * \brief Read and/or write access via the CPU map
+ * \var DmaSyncer::Read
+ * \brief Indicates that the mapped dma-buf will be read by the client via the
+ * CPU map
+ * \var DmaSyncer::Write
+ * \brief Indicates that the mapped dm-buf will be written by the client via the
+ * CPU map
+ * \var DmaSyncer::ReadWrite
+ * \brief Indicates that the mapped dma-buf will be read and written by the
+ * client via the CPU map
+ */
+
+/**
+ * \brief Construct a DmaSyncer with a dma-buf's fd and the access type
+ * \param[in] fd The dma-buf's file descriptor to synchronize
+ * \param[in] type Read and/or write access via the CPU map
+ */
+DmaSyncer::DmaSyncer(SharedFD fd, SyncType type)
+ : fd_(fd)
+{
+ switch (type) {
+ case SyncType::Read:
+ flags_ = DMA_BUF_SYNC_READ;
+ break;
+ case SyncType::Write:
+ flags_ = DMA_BUF_SYNC_WRITE;
+ break;
+ case SyncType::ReadWrite:
+ flags_ = DMA_BUF_SYNC_RW;
+ break;
+ }
+
+ sync(DMA_BUF_SYNC_START);
+}
+
+/**
+ * \fn DmaSyncer::DmaSyncer(DmaSyncer &&other);
+ * \param[in] other The other instance
+ * \brief Enable move on class DmaSyncer
+ */
+
+/**
+ * \fn DmaSyncer::operator=(DmaSyncer &&other);
+ * \param[in] other The other instance
+ * \brief Enable move on class DmaSyncer
+ */
+
+DmaSyncer::~DmaSyncer()
+{
+ sync(DMA_BUF_SYNC_END);
+}
+
+void DmaSyncer::sync(uint64_t step)
+{
+ struct dma_buf_sync sync = {
+ .flags = flags_ | step
+ };
+
+ int ret;
+ do {
+ ret = ioctl(fd_.get(), DMA_BUF_IOCTL_SYNC, &sync);
+ } while (ret && (errno == EINTR || errno == EAGAIN));
+
+ if (ret) {
+ ret = errno;
+ LOG(DmaBufAllocator, Error)
+ << "Unable to sync dma fd: " << fd_.get()
+ << ", err: " << strerror(ret)
+ << ", flags: " << sync.flags;
+ }
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/event_dispatcher.cpp b/src/libcamera/event_dispatcher.cpp
deleted file mode 100644
index bb4fddff..00000000
--- a/src/libcamera/event_dispatcher.cpp
+++ /dev/null
@@ -1,117 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * event_dispatcher.cpp - Event dispatcher
- */
-
-#include <libcamera/event_dispatcher.h>
-
-#include "log.h"
-
-/**
- * \file event_dispatcher.h
- */
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(Event)
-
-/**
- * \class EventDispatcher
- * \brief Interface to manage the libcamera events and timers
- *
- * The EventDispatcher class allows the integration of the application event
- * loop with libcamera by abstracting how events and timers are managed and
- * processed.
- *
- * To listen to events, libcamera creates EventNotifier instances and registers
- * them with the dispatcher with registerEventNotifier(). The event notifier
- * \ref EventNotifier::activated signal is then emitted by the dispatcher
- * whenever the event is detected.
- *
- * To set timers, libcamera creates Timer instances and registers them with the
- * dispatcher with registerTimer(). The timer \ref Timer::timeout signal is then
- * emitted by the dispatcher when the timer times out.
- */
-
-EventDispatcher::~EventDispatcher()
-{
-}
-
-/**
- * \fn EventDispatcher::registerEventNotifier()
- * \brief Register an event notifier
- * \param[in] notifier The event notifier to register
- *
- * Once the \a notifier is registered with the dispatcher, the dispatcher will
- * emit the notifier \ref EventNotifier::activated signal whenever a
- * corresponding event is detected on the notifier's file descriptor. The event
- * is monitored until the notifier is unregistered with
- * unregisterEventNotifier().
- *
- * Registering multiple notifiers for the same file descriptor and event type is
- * not allowed and results in undefined behaviour.
- */
-
-/**
- * \fn EventDispatcher::unregisterEventNotifier()
- * \brief Unregister an event notifier
- * \param[in] notifier The event notifier to unregister
- *
- * After this function returns the \a notifier is guaranteed not to emit the
- * \ref EventNotifier::activated signal.
- *
- * If the notifier isn't registered, this function performs no operation.
- */
-
-/**
- * \fn EventDispatcher::registerTimer()
- * \brief Register a timer
- * \param[in] timer The timer to register
- *
- * Once the \a timer is registered with the dispatcher, the dispatcher will emit
- * the timer \ref Timer::timeout signal when the timer times out. The timer can
- * be unregistered with unregisterTimer() before it times out, in which case the
- * signal will not be emitted.
- *
- * When the \a timer times out, it is automatically unregistered by the
- * dispatcher and can be registered back as early as from the \ref Timer::timeout
- * signal handlers.
- *
- * Registering the same timer multiple times is not allowed and results in
- * undefined behaviour.
- */
-
-/**
- * \fn EventDispatcher::unregisterTimer()
- * \brief Unregister a timer
- * \param[in] timer The timer to unregister
- *
- * After this function returns the \a timer is guaranteed not to emit the
- * \ref Timer::timeout signal.
- *
- * If the timer isn't registered, this function performs no operation.
- */
-
-/**
- * \fn EventDispatcher::processEvents()
- * \brief Wait for and process pending events
- *
- * This function processes all pending events associated with registered event
- * notifiers and timers and signals the corresponding EventNotifier and Timer
- * objects. If no events are pending, it waits for the first event and processes
- * it before returning.
- */
-
-/**
- * \fn EventDispatcher::interrupt()
- * \brief Interrupt any running processEvents() call as soon as possible
- *
- * Calling this function interrupts any blocking processEvents() call in
- * progress. The processEvents() function will return as soon as possible,
- * after processing pending timers and events. If processEvents() isn't in
- * progress, it will be interrupted immediately the next time it gets called.
- */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/event_dispatcher_poll.cpp b/src/libcamera/event_dispatcher_poll.cpp
deleted file mode 100644
index 51ac5adf..00000000
--- a/src/libcamera/event_dispatcher_poll.cpp
+++ /dev/null
@@ -1,308 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * event_dispatcher_poll.cpp - Poll-based event dispatcher
- */
-
-#include "event_dispatcher_poll.h"
-
-#include <algorithm>
-#include <chrono>
-#include <iomanip>
-#include <poll.h>
-#include <stdint.h>
-#include <string.h>
-#include <sys/eventfd.h>
-#include <unistd.h>
-
-#include <libcamera/event_notifier.h>
-#include <libcamera/timer.h>
-
-#include "log.h"
-#include "thread.h"
-#include "utils.h"
-
-/**
- * \file event_dispatcher_poll.h
- */
-
-namespace libcamera {
-
-LOG_DECLARE_CATEGORY(Event)
-
-static const char *notifierType(EventNotifier::Type type)
-{
- if (type == EventNotifier::Read)
- return "read";
- if (type == EventNotifier::Write)
- return "write";
- if (type == EventNotifier::Exception)
- return "exception";
-
- return "";
-}
-
-/**
- * \class EventDispatcherPoll
- * \brief A poll-based event dispatcher
- */
-
-EventDispatcherPoll::EventDispatcherPoll()
- : processingEvents_(false)
-{
- /*
- * Create the event fd. Failures are fatal as we can't implement an
- * interruptible dispatcher without the fd.
- */
- eventfd_ = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
- if (eventfd_ < 0)
- LOG(Event, Fatal) << "Unable to create eventfd";
-}
-
-EventDispatcherPoll::~EventDispatcherPoll()
-{
- close(eventfd_);
-}
-
-void EventDispatcherPoll::registerEventNotifier(EventNotifier *notifier)
-{
- EventNotifierSetPoll &set = notifiers_[notifier->fd()];
- EventNotifier::Type type = notifier->type();
-
- if (set.notifiers[type] && set.notifiers[type] != notifier) {
- LOG(Event, Warning)
- << "Ignoring duplicate " << notifierType(type)
- << " notifier for fd " << notifier->fd();
- return;
- }
-
- set.notifiers[type] = notifier;
-}
-
-void EventDispatcherPoll::unregisterEventNotifier(EventNotifier *notifier)
-{
- auto iter = notifiers_.find(notifier->fd());
- if (iter == notifiers_.end())
- return;
-
- EventNotifierSetPoll &set = iter->second;
- EventNotifier::Type type = notifier->type();
-
- if (!set.notifiers[type])
- return;
-
- if (set.notifiers[type] != notifier) {
- LOG(Event, Warning)
- << notifierType(type) << " notifier for fd "
- << notifier->fd() << " is not registered";
- return;
- }
-
- set.notifiers[type] = nullptr;
-
- /*
- * Don't race with event processing if this method is called from an
- * event notifier. The notifiers_ entry will be erased by
- * processEvents().
- */
- if (processingEvents_)
- return;
-
- if (!set.notifiers[0] && !set.notifiers[1] && !set.notifiers[2])
- notifiers_.erase(iter);
-}
-
-void EventDispatcherPoll::registerTimer(Timer *timer)
-{
- for (auto iter = timers_.begin(); iter != timers_.end(); ++iter) {
- if ((*iter)->deadline() > timer->deadline()) {
- timers_.insert(iter, timer);
- return;
- }
- }
-
- timers_.push_back(timer);
-}
-
-void EventDispatcherPoll::unregisterTimer(Timer *timer)
-{
- for (auto iter = timers_.begin(); iter != timers_.end(); ++iter) {
- if (*iter == timer) {
- timers_.erase(iter);
- return;
- }
-
- /*
- * As the timers list is ordered, we can stop as soon as we go
- * past the deadline.
- */
- if ((*iter)->deadline() > timer->deadline())
- break;
- }
-}
-
-void EventDispatcherPoll::processEvents()
-{
- int ret;
-
- Thread::current()->dispatchMessages();
-
- /* Create the pollfd array. */
- std::vector<struct pollfd> pollfds;
- pollfds.reserve(notifiers_.size() + 1);
-
- for (auto notifier : notifiers_)
- pollfds.push_back({ notifier.first, notifier.second.events(), 0 });
-
- pollfds.push_back({ eventfd_, POLLIN, 0 });
-
- /* Wait for events and process notifiers and timers. */
- do {
- ret = poll(&pollfds);
- } while (ret == -1 && errno == EINTR);
-
- if (ret < 0) {
- ret = -errno;
- LOG(Event, Warning) << "poll() failed with " << strerror(-ret);
- } else if (ret > 0) {
- processInterrupt(pollfds.back());
- pollfds.pop_back();
- processNotifiers(pollfds);
- }
-
- processTimers();
-}
-
-void EventDispatcherPoll::interrupt()
-{
- uint64_t value = 1;
- ssize_t ret = write(eventfd_, &value, sizeof(value));
- if (ret != sizeof(value)) {
- if (ret < 0)
- ret = -errno;
- LOG(Event, Error)
- << "Failed to interrupt event dispatcher ("
- << ret << ")";
- }
-}
-
-short EventDispatcherPoll::EventNotifierSetPoll::events() const
-{
- short events = 0;
-
- if (notifiers[EventNotifier::Read])
- events |= POLLIN;
- if (notifiers[EventNotifier::Write])
- events |= POLLOUT;
- if (notifiers[EventNotifier::Exception])
- events |= POLLPRI;
-
- return events;
-}
-
-int EventDispatcherPoll::poll(std::vector<struct pollfd> *pollfds)
-{
- /* Compute the timeout. */
- Timer *nextTimer = !timers_.empty() ? timers_.front() : nullptr;
- struct timespec timeout;
-
- if (nextTimer) {
- utils::time_point now = utils::clock::now();
-
- if (nextTimer->deadline() > now)
- timeout = utils::duration_to_timespec(nextTimer->deadline() - now);
- else
- timeout = { 0, 0 };
-
- LOG(Event, Debug)
- << "timeout " << timeout.tv_sec << "."
- << std::setfill('0') << std::setw(9)
- << timeout.tv_nsec;
- }
-
- return ppoll(pollfds->data(), pollfds->size(),
- nextTimer ? &timeout : nullptr, nullptr);
-}
-
-void EventDispatcherPoll::processInterrupt(const struct pollfd &pfd)
-{
- if (!(pfd.revents & POLLIN))
- return;
-
- uint64_t value;
- ssize_t ret = read(eventfd_, &value, sizeof(value));
- if (ret != sizeof(value)) {
- if (ret < 0)
- ret = -errno;
- LOG(Event, Error)
- << "Failed to process interrupt (" << ret << ")";
- }
-}
-
-void EventDispatcherPoll::processNotifiers(const std::vector<struct pollfd> &pollfds)
-{
- static const struct {
- EventNotifier::Type type;
- short events;
- } events[] = {
- { EventNotifier::Read, POLLIN },
- { EventNotifier::Write, POLLOUT },
- { EventNotifier::Exception, POLLPRI },
- };
-
- processingEvents_ = true;
-
- for (const pollfd &pfd : pollfds) {
- auto iter = notifiers_.find(pfd.fd);
- ASSERT(iter != notifiers_.end());
-
- EventNotifierSetPoll &set = iter->second;
-
- for (const auto &event : events) {
- EventNotifier *notifier = set.notifiers[event.type];
-
- if (!notifier)
- continue;
-
- /*
- * If the file descriptor is invalid, disable the
- * notifier immediately.
- */
- if (pfd.revents & POLLNVAL) {
- LOG(Event, Warning)
- << "Disabling " << notifierType(event.type)
- << " due to invalid file descriptor "
- << pfd.fd;
- unregisterEventNotifier(notifier);
- continue;
- }
-
- if (pfd.revents & event.events)
- notifier->activated.emit(notifier);
- }
-
- /* Erase the notifiers_ entry if it is now empty. */
- if (!set.notifiers[0] && !set.notifiers[1] && !set.notifiers[2])
- notifiers_.erase(iter);
- }
-
- processingEvents_ = false;
-}
-
-void EventDispatcherPoll::processTimers()
-{
- utils::time_point now = utils::clock::now();
-
- while (!timers_.empty()) {
- Timer *timer = timers_.front();
- if (timer->deadline() > now)
- break;
-
- timers_.pop_front();
- timer->stop();
- timer->timeout.emit(timer);
- }
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/event_notifier.cpp b/src/libcamera/event_notifier.cpp
deleted file mode 100644
index a9be686f..00000000
--- a/src/libcamera/event_notifier.cpp
+++ /dev/null
@@ -1,141 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * event_notifier.cpp - File descriptor event notifier
- */
-
-#include <libcamera/event_notifier.h>
-
-#include <libcamera/camera_manager.h>
-#include <libcamera/event_dispatcher.h>
-
-#include "message.h"
-#include "thread.h"
-
-/**
- * \file event_notifier.h
- * \brief File descriptor event notifier
- */
-
-namespace libcamera {
-
-/**
- * \class EventNotifier
- * \brief Notify of activity on a file descriptor
- *
- * The EventNotifier models a file descriptor event source that can be
- * monitored. It is created with the file descriptor to be monitored and the
- * type of event, and is enabled by default. It will emit the \ref activated
- * signal whenever an event of the monitored type occurs on the file descriptor.
- *
- * Supported type of events are EventNotifier::Read, EventNotifier::Write and
- * EventNotifier::Exception. The type is specified when constructing the
- * notifier, and can be retrieved using the type() function. To listen to
- * multiple event types on the same file descriptor multiple notifiers must be
- * created.
- *
- * The notifier can be disabled with the setEnable() function. When the notifier
- * is disabled it ignores events and does not emit the \ref activated signal.
- * The notifier can then be re-enabled with the setEnable() function.
- *
- * Creating multiple notifiers of the same type for the same file descriptor is
- * not allowed and results in undefined behaviour.
- *
- * Notifier events are detected and dispatched from the
- * EventDispatcher::processEvents() function.
- */
-
-/**
- * \enum EventNotifier::Type
- * Type of file descriptor event to listen for.
- * \var EventNotifier::Read
- * Data is available to be read from the file descriptor
- * \var EventNotifier::Write
- * Data can be written to the file descriptor
- * \var EventNotifier::Exception
- * An exception has occurred on the file descriptor
- */
-
-/**
- * \brief Construct an event notifier with a file descriptor and event type
- * \param[in] fd The file descriptor to monitor
- * \param[in] type The event type to monitor
- * \param[in] parent The parent Object
- */
-EventNotifier::EventNotifier(int fd, Type type, Object *parent)
- : Object(parent), fd_(fd), type_(type), enabled_(false)
-{
- setEnabled(true);
-}
-
-EventNotifier::~EventNotifier()
-{
- setEnabled(false);
-}
-
-/**
- * \fn EventNotifier::type()
- * \brief Retrieve the type of the event being monitored
- * \return The type of the event
- */
-
-/**
- * \fn EventNotifier::fd()
- * \brief Retrieve the file descriptor being monitored
- * \return The file descriptor
- */
-
-/**
- * \fn EventNotifier::enabled()
- * \brief Retrieve the notifier state
- * \return True if the notifier is enabled, or false otherwise
- * \sa setEnable()
- */
-
-/**
- * \brief Enable or disable the notifier
- * \param[in] enable True to enable the notifier, false to disable it
- *
- * This function enables or disables the notifier. A disabled notifier ignores
- * events and does not emit the \ref activated signal.
- *
- * \context This function is \threadbound.
- */
-void EventNotifier::setEnabled(bool enable)
-{
- if (enabled_ == enable)
- return;
-
- enabled_ = enable;
-
- EventDispatcher *dispatcher = thread()->eventDispatcher();
- if (enable)
- dispatcher->registerEventNotifier(this);
- else
- dispatcher->unregisterEventNotifier(this);
-}
-
-/**
- * \var EventNotifier::activated
- * \brief Signal emitted when the event occurs
- *
- * This signal is emitted when the event \ref type() occurs on the file
- * descriptor monitored by the notifier. The notifier pointer is passed as a
- * parameter.
- */
-
-void EventNotifier::message(Message *msg)
-{
- if (msg->type() == Message::ThreadMoveMessage) {
- if (enabled_) {
- setEnabled(false);
- invokeMethod(&EventNotifier::setEnabled,
- ConnectionTypeQueued, true);
- }
- }
-
- Object::message(msg);
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/fence.cpp b/src/libcamera/fence.cpp
new file mode 100644
index 00000000..73299b40
--- /dev/null
+++ b/src/libcamera/fence.cpp
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Synchronization fence
+ */
+
+#include "libcamera/fence.h"
+
+namespace libcamera {
+
+/**
+ *
+ * \file fence.h
+ * \brief Definition of the Fence class
+ */
+
+/**
+ * \class Fence
+ * \brief Synchronization primitive to manage resources
+ *
+ * The Fence class models a synchronization primitive that can be used by
+ * applications to explicitly synchronize resource usage, and can be shared by
+ * multiple processes.
+ *
+ * Fences are most commonly used in association with frame buffers. A
+ * FrameBuffer can be associated with a Fence so that the library can wait for
+ * the Fence to be signalled before allowing the camera device to actually
+ * access the memory area described by the FrameBuffer.
+ *
+ * \sa Request::addBuffer()
+ *
+ * By using a fence, applications can then synchronize between frame buffer
+ * consumers and producers, as for example a display device and a camera, to
+ * guarantee that a new data transfers only happen once the existing frames have
+ * been displayed.
+ *
+ * A Fence can be realized by different event notification primitives, the most
+ * common of which is represented by waiting for read events to happen on a
+ * <a href="https://www.kernel.org/doc/html/latest/driver-api/sync_file.html">kernel sync file.</a>
+ * This is currently the only mechanism supported by libcamera, but others can
+ * be implemented by extending or subclassing this class and implementing
+ * opportune handling in the core library.
+ *
+ * \internal
+ *
+ * The Fence class is a thin abstraction around a UniqueFD which simply allows
+ * to access it as a const reference or to move its ownership to the caller.
+ *
+ * The usage of the Fence class allows to abstract the underlying
+ * synchronization mechanism in use and implement an interface towards other
+ * library components that will not change when new synchronization primitives
+ * will be added as fences.
+ *
+ * A Fence is constructed with a UniqueFD whose ownership is moved in the Fence.
+ * A FrameBuffer can be associated with a Fence by passing it to the
+ * Request::addBuffer() function, which will move the Fence into the FrameBuffer
+ * itself. Once a Request is queued to the Camera, a preparation phase
+ * guarantees that before actually applying the Request to the hardware, all the
+ * valid fences of the frame buffers in a Request are correctly signalled. Once
+ * a Fence has completed, the library will release the FrameBuffer fence so that
+ * application won't be allowed to access it.
+ *
+ * An optional timeout can be started while waiting for a fence to complete. If
+ * waiting on a Fence fails for whatever reason, the FrameBuffer's fence is not
+ * reset and is made available to application for them to handle it, by
+ * releasing the Fence to correctly close the underlying UniqueFD.
+ *
+ * A failure in waiting for a Fence to complete will result in the Request to
+ * complete in failed state.
+ *
+ * \sa Request::prepare()
+ * \sa PipelineHandler::doQueueRequests()
+ */
+
+/**
+ * \brief Create a Fence
+ * \param[in] fd The fence file descriptor
+ *
+ * The file descriptor ownership is moved to the Fence.
+ */
+Fence::Fence(UniqueFD fd)
+ : fd_(std::move(fd))
+{
+}
+
+/**
+ * \fn Fence::isValid()
+ * \brief Check if a Fence is valid
+ *
+ * A Fence is valid if the file descriptor it wraps is valid.
+ *
+ * \return True if the Fence is valid, false otherwise
+ */
+
+/**
+ * \fn Fence::fd()
+ * \brief Retrieve a constant reference to the file descriptor
+ * \return A const reference to the fence file descriptor
+ */
+
+/**
+ * \fn Fence::release()
+ * \brief Release the ownership of the file descriptor
+ *
+ * Release the ownership of the wrapped file descriptor by returning it to the
+ * caller.
+ *
+ * \return The wrapper UniqueFD
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/file_descriptor.cpp b/src/libcamera/file_descriptor.cpp
deleted file mode 100644
index 88385476..00000000
--- a/src/libcamera/file_descriptor.cpp
+++ /dev/null
@@ -1,203 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * file_descriptor.cpp - File descriptor wrapper
- */
-
-#include <libcamera/file_descriptor.h>
-
-#include <string.h>
-#include <unistd.h>
-#include <utility>
-
-#include "log.h"
-
-/**
- * \file file_descriptor.h
- * \brief File descriptor wrapper
- */
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(FileDescriptor)
-
-/**
- * \class FileDescriptor
- * \brief RAII-style wrapper for file descriptors
- *
- * The FileDescriptor class provides RAII-style lifetime management of file
- * descriptors with an efficient mechanism for ownership sharing. At its core,
- * an internal Descriptor object wraps a file descriptor (expressed as a signed
- * integer) with an RAII-style interface. The Descriptor is then implicitly
- * shared with all FileDescriptor instances constructed as copies.
- *
- * When constructed from a numerical file descriptor, the FileDescriptor
- * instance duplicates the file descriptor and wraps the duplicate as a
- * Descriptor. The copy constructor and assignment operator create copies that
- * share the Descriptor, while the move versions of those methods additionally
- * make the other FileDescriptor invalid. When the last FileDescriptor that
- * references a Descriptor is destroyed, the file descriptor is closed.
- *
- * The numerical file descriptor is available through the fd() method. As
- * constructing a FileDescriptor from a numerical file descriptor duplicates
- * the file descriptor, the value returned by fd() will be different than the
- * value passed to the constructor. All FileDescriptor instances created as
- * copies of a FileDescriptor will report the same fd() value. Callers can
- * perform operations on the fd(), but shall never close it manually.
- */
-
-/**
- * \brief Create a FileDescriptor wrapping a copy of a given \a fd
- * \param[in] fd File descriptor
- *
- * Constructing a FileDescriptor from a numerical file descriptor duplicates the
- * \a fd and takes ownership of the copy. The original \a fd is left untouched,
- * and the caller is responsible for closing it when appropriate. The duplicated
- * file descriptor will be closed automatically when all FileDescriptor
- * instances that reference it are destroyed.
- *
- * If the \a fd is negative, the FileDescriptor is constructed as invalid and
- * the fd() method will return -1.
- */
-FileDescriptor::FileDescriptor(int fd)
-{
- if (fd < 0)
- return;
-
- fd_ = std::make_shared<Descriptor>(fd);
- if (fd_->fd() < 0)
- fd_.reset();
-}
-
-/**
- * \brief Copy constructor, create a FileDescriptor from a copy of \a other
- * \param[in] other The other FileDescriptor
- *
- * Copying a FileDescriptor implicitly shares ownership of the wrapped file
- * descriptor. The original FileDescriptor is left untouched, and the caller is
- * responsible for destroying it when appropriate. The wrapped file descriptor
- * will be closed automatically when all FileDescriptor instances that
- * reference it are destroyed.
- */
-FileDescriptor::FileDescriptor(const FileDescriptor &other)
- : fd_(other.fd_)
-{
-}
-
-/**
- * \brief Move constructor, create a FileDescriptor by taking over \a other
- * \param[in] other The other FileDescriptor
- *
- * Moving a FileDescriptor moves the reference to the wrapped descriptor owned
- * by \a other to the new FileDescriptor. The \a other FileDescriptor is
- * invalidated and its fd() method will return -1. The wrapped file descriptor
- * will be closed automatically when all FileDescriptor instances that
- * reference it are destroyed.
- */
-FileDescriptor::FileDescriptor(FileDescriptor &&other)
- : fd_(std::move(other.fd_))
-{
-}
-
-/**
- * \brief Destroy the FileDescriptor instance
- *
- * Destroying a FileDescriptor instance releases its reference to the wrapped
- * descriptor, if any. When the last instance that references a wrapped
- * descriptor is destroyed, the file descriptor is automatically closed.
- */
-FileDescriptor::~FileDescriptor()
-{
-}
-
-/**
- * \brief Copy assignment operator, replace the wrapped file descriptor with a
- * copy of \a other
- * \param[in] other The other FileDescriptor
- *
- * Copying a FileDescriptor creates a new reference to the wrapped file
- * descriptor owner by \a other. If \a other is invalid, *this will also be
- * invalid. The original FileDescriptor is left untouched, and the caller is
- * responsible for destroying it when appropriate. The wrapped file descriptor
- * will be closed automatically when all FileDescriptor instances that
- * reference it are destroyed.
- *
- * \return A reference to this FileDescriptor
- */
-FileDescriptor &FileDescriptor::operator=(const FileDescriptor &other)
-{
- fd_ = other.fd_;
-
- return *this;
-}
-
-/**
- * \brief Move assignment operator, replace the wrapped file descriptor by
- * taking over \a other
- * \param[in] other The other FileDescriptor
- *
- * Moving a FileDescriptor moves the reference to the wrapped descriptor owned
- * by \a other to the new FileDescriptor. If \a other is invalid, *this will
- * also be invalid. The \a other FileDescriptor is invalidated and its fd()
- * method will return -1. The wrapped file descriptor will be closed
- * automatically when all FileDescriptor instances that reference it are
- * destroyed.
- *
- * \return A reference to this FileDescriptor
- */
-FileDescriptor &FileDescriptor::operator=(FileDescriptor &&other)
-{
- fd_ = std::move(other.fd_);
-
- return *this;
-}
-
-/**
- * \fn FileDescriptor::isValid()
- * \brief Check if the FileDescriptor instance is valid
- * \return True if the FileDescriptor is valid, false otherwise
- */
-
-/**
- * \fn FileDescriptor::fd()
- * \brief Retrieve the numerical file descriptor
- * \return The numerical file descriptor, which may be -1 if the FileDescriptor
- * instance is invalid
- */
-
-/**
- * \brief Duplicate a FileDescriptor
- *
- * Duplicating a FileDescriptor creates a duplicate of the wrapped file
- * descriptor and returns a new FileDescriptor instance that wraps the
- * duplicate. The fd() method of the original and duplicate instances will
- * return different values. The duplicate instance will not be affected by
- * destruction of the original instance or its copies.
- *
- * \return A new FileDescriptor instance wrapping a duplicate of the original
- * file descriptor
- */
-FileDescriptor FileDescriptor::dup() const
-{
- return FileDescriptor(fd());
-}
-
-FileDescriptor::Descriptor::Descriptor(int fd)
-{
- /* Failing to dup() a fd should not happen and is fatal. */
- fd_ = ::dup(fd);
- if (fd_ == -1) {
- int ret = -errno;
- LOG(FileDescriptor, Fatal)
- << "Failed to dup() fd: " << strerror(-ret);
- }
-}
-
-FileDescriptor::Descriptor::~Descriptor()
-{
- if (fd_ != -1)
- close(fd_);
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/formats.cpp b/src/libcamera/formats.cpp
index 5f6552a4..bfcdfc08 100644
--- a/src/libcamera/formats.cpp
+++ b/src/libcamera/formats.cpp
@@ -2,106 +2,1217 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * formats.cpp - libcamera image formats
+ * libcamera image formats
*/
-#include "formats.h"
+#include "libcamera/internal/formats.h"
-#include <errno.h>
+#include <map>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/formats.h>
/**
- * \file formats.h
- * \brief Types and helper methods to handle libcamera image formats
+ * \file libcamera/internal/formats.h
+ * \brief Types and helper functions to handle libcamera image formats
*/
namespace libcamera {
+LOG_DEFINE_CATEGORY(Formats)
+
+/**
+ * \class PixelFormatInfo
+ * \brief Information about pixel formats
+ *
+ * The PixelFormatInfo class groups together information describing a pixel
+ * format. It facilitates handling of pixel formats by providing data commonly
+ * used in pipeline handlers.
+ *
+ * \var PixelFormatInfo::name
+ * \brief The format name as a human-readable string, used as the text
+ * representation of the PixelFormat
+ *
+ * \var PixelFormatInfo::format
+ * \brief The PixelFormat described by this instance
+ *
+ * \var PixelFormatInfo::v4l2Formats
+ * \brief The V4L2 pixel formats corresponding to the PixelFormat
+ *
+ * Multiple V4L2 formats may exist for one PixelFormat, as V4L2 defines
+ * separate 4CCs for contiguous and non-contiguous versions of the same image
+ * format.
+ *
+ * \var PixelFormatInfo::bitsPerPixel
+ * \brief The average number of bits per pixel
+ *
+ * The number of bits per pixel averages the total number of bits for all
+ * colour components over the whole image, excluding any padding bits or
+ * padding pixels.
+ *
+ * For formats that store pixels with bit padding within words, only the
+ * effective bits are taken into account. For instance, 12-bit Bayer data
+ * stored in two bytes per pixel report 12, not 16, in this field.
+ *
+ * Formats that don't have a fixed number of bits per pixel, such as compressed
+ * formats, report 0 in this field.
+ *
+ * \var PixelFormatInfo::colourEncoding
+ * \brief The colour encoding type
+ *
+ * \var PixelFormatInfo::packed
+ * \brief Tell if multiple pixels are packed in the same bytes
+ *
+ * Packed formats are defined as storing data from multiple pixels in the same
+ * bytes. For instance, 12-bit Bayer data with two pixels stored in three bytes
+ * is packed, while the same data stored with 4 bits of padding in two bytes
+ * per pixel is not packed.
+ *
+ * \var PixelFormatInfo::pixelsPerGroup
+ * \brief The number of pixels in a pixel group
+ *
+ * A pixel group is defined as the minimum number of pixels (including padding)
+ * necessary in a row when the image has only one column of effective pixels.
+ * pixelsPerGroup refers to this value. PixelFormatInfo::Plane::bytesPerGroup,
+ * then, refers to the number of bytes that a pixel group consumes. This
+ * definition of a pixel group allows simple calculation of stride, as
+ * ceil(width / pixelsPerGroup) * bytesPerGroup. These values are determined
+ * only in terms of a row. The ceiling accounts for padding.
+ *
+ * A pixel group has a second constraint, such that the pixel group
+ * (bytesPerGroup and pixelsPerGroup) is the smallest repeatable unit.
+ * What this means is that, for example, in the IPU3 formats, if there is only
+ * one column of effective pixels, it looks like it could be fit in 5 bytes
+ * with 3 padding pixels (for a total of 4 pixels over 5 bytes). However, this
+ * unit is not repeatable, as at the 7th group in the same row, the pattern
+ * is broken. Therefore, the pixel group for IPU3 formats must be 25 pixels
+ * over 32 bytes.
+ *
+ * For example, for something simple like BGR888, it is self-explanatory:
+ * the pixel group size is 1, and the bytes necessary is 3, and there is
+ * only one plane with no (= 1) vertical subsampling. For YUYV, the
+ * CbCr pair is shared between two pixels, so even if you have only one
+ * pixel, you would still need a padded second Y sample, therefore the pixel
+ * group size is 2, and bytes necessary is 4. YUYV also has no vertical
+ * subsampling. NV12 has a pixel group size of 2 pixels, due to the CbCr plane.
+ * The bytes per group then, for both planes, is 2. The first plane has no
+ * vertical subsampling, but the second plane is subsampled by a factor of 2.
+ *
+ * The IPU3 raw Bayer formats are single-planar, and have a pixel group size of
+ * 25, consuming 32 bytes, due to the packing pattern being repeated in memory
+ * every 32 bytes. The IPU3 hardware, however, has an additional constraint on
+ * the DMA burst size, requiring lines to be multiple of 64 bytes. This isn't an
+ * intrinsic property of the formats and is thus not reflected here. It is
+ * instead enforced by the corresponding pipeline handler.
+ *
+ * \var PixelFormatInfo::planes
+ * \brief Information about pixels for each plane
+ *
+ * \sa PixelFormatInfo::Plane
+ */
+
/**
- * \class ImageFormats
- * \brief Describe V4L2Device and V4L2SubDevice image formats
+ * \enum PixelFormatInfo::ColourEncoding
+ * \brief The colour encoding type
*
- * This class stores a list of image formats, each associated with a
- * corresponding set of image sizes. It is used to describe the formats and
- * sizes supported by a V4L2Device or V4L2Subdevice.
+ * \var PixelFormatInfo::ColourEncodingRGB
+ * \brief RGB colour encoding
*
- * Formats are stored as an integer. When used for a V4L2Device, the image
- * formats are fourcc pixel formats. When used for a V4L2Subdevice they are
- * media bus codes. Both are defined by the V4L2 specification.
+ * \var PixelFormatInfo::ColourEncodingYUV
+ * \brief YUV colour encoding
*
- * Sizes are stored as a list of SizeRange.
+ * \var PixelFormatInfo::ColourEncodingRAW
+ * \brief RAW colour encoding
*/
/**
- * \brief Add a format and corresponding sizes to the description
- * \param[in] format Pixel format or media bus code to describe
- * \param[in] sizes List of supported size ranges for the format
+ * \struct PixelFormatInfo::Plane
+ * \brief Information about a single plane of a pixel format
+ *
+ * \var PixelFormatInfo::Plane::bytesPerGroup
+ * \brief The number of bytes that a pixel group consumes
+ *
+ * \sa PixelFormatInfo::pixelsPerGroup
*
- * \return 0 on success or a negative error code otherwise
- * \retval -EEXIST The format is already described
+ * \var PixelFormatInfo::Plane::verticalSubSampling
+ * \brief Vertical subsampling multiplier
+ *
+ * This value is the ratio between the number of rows of pixels in the frame
+ * to the number of rows of pixels in the plane.
+ */
+
+namespace {
+
+const PixelFormatInfo pixelFormatInfoInvalid{};
+
+const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{
+ /* RGB formats. */
+ { formats::RGB565, {
+ .name = "RGB565",
+ .format = formats::RGB565,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB565), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGB565_BE, {
+ .name = "RGB565_BE",
+ .format = formats::RGB565_BE,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB565X), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::BGR888, {
+ .name = "BGR888",
+ .format = formats::BGR888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB24), },
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGB888, {
+ .name = "RGB888",
+ .format = formats::RGB888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGR24), },
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::XRGB8888, {
+ .name = "XRGB8888",
+ .format = formats::XRGB8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_XBGR32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::XBGR8888, {
+ .name = "XBGR8888",
+ .format = formats::XBGR8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGBX32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGBX8888, {
+ .name = "RGBX8888",
+ .format = formats::RGBX8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGRX32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::BGRX8888, {
+ .name = "BGRX8888",
+ .format = formats::BGRX8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_XRGB32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::ABGR8888, {
+ .name = "ABGR8888",
+ .format = formats::ABGR8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGBA32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::ARGB8888, {
+ .name = "ARGB8888",
+ .format = formats::ARGB8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_ABGR32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::BGRA8888, {
+ .name = "BGRA8888",
+ .format = formats::BGRA8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_ARGB32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGBA8888, {
+ .name = "RGBA8888",
+ .format = formats::RGBA8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGRA32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::BGR161616, {
+ .name = "BGR161616",
+ .format = formats::BGR161616,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_RGB48), },
+ .bitsPerPixel = 48,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGB161616, {
+ .name = "RGB161616",
+ .format = formats::RGB161616,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_BGR48), },
+ .bitsPerPixel = 48,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+
+ /* YUV packed formats. */
+ { formats::YUYV, {
+ .name = "YUYV",
+ .format = formats::YUYV,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUYV), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::YVYU, {
+ .name = "YVYU",
+ .format = formats::YVYU,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YVYU), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::UYVY, {
+ .name = "UYVY",
+ .format = formats::UYVY,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_UYVY), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::VYUY, {
+ .name = "VYUY",
+ .format = formats::VYUY,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_VYUY), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::AVUY8888, {
+ .name = "AVUY8888",
+ .format = formats::AVUY8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUVA32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::XVUY8888, {
+ .name = "XVUY8888",
+ .format = formats::XVUY8888,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUVX32), },
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+
+ /* YUV planar formats. */
+ { formats::NV12, {
+ .name = "NV12",
+ .format = formats::NV12,
+ .v4l2Formats = {
+ V4L2PixelFormat(V4L2_PIX_FMT_NV12),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV12M),
+ },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 2, 2 }, { 0, 0 } }},
+ } },
+ { formats::NV21, {
+ .name = "NV21",
+ .format = formats::NV21,
+ .v4l2Formats = {
+ V4L2PixelFormat(V4L2_PIX_FMT_NV21),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV21M),
+ },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 2, 2 }, { 0, 0 } }},
+ } },
+ { formats::NV16, {
+ .name = "NV16",
+ .format = formats::NV16,
+ .v4l2Formats = {
+ V4L2PixelFormat(V4L2_PIX_FMT_NV16),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV16M),
+ },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 2, 1 }, { 0, 0 } }},
+ } },
+ { formats::NV61, {
+ .name = "NV61",
+ .format = formats::NV61,
+ .v4l2Formats = {
+ V4L2PixelFormat(V4L2_PIX_FMT_NV61),
+ V4L2PixelFormat(V4L2_PIX_FMT_NV61M),
+ },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 2, 1 }, { 0, 0 } }},
+ } },
+ { formats::NV24, {
+ .name = "NV24",
+ .format = formats::NV24,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_NV24), },
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 2, 1 }, { 0, 0 } }},
+ } },
+ { formats::NV42, {
+ .name = "NV42",
+ .format = formats::NV42,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_NV42), },
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 2, 1 }, { 0, 0 } }},
+ } },
+ { formats::YUV420, {
+ .name = "YUV420",
+ .format = formats::YUV420,
+ .v4l2Formats = {
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV420),
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV420M),
+ },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 1, 2 }, { 1, 2 } }},
+ } },
+ { formats::YVU420, {
+ .name = "YVU420",
+ .format = formats::YVU420,
+ .v4l2Formats = {
+ V4L2PixelFormat(V4L2_PIX_FMT_YVU420),
+ V4L2PixelFormat(V4L2_PIX_FMT_YVU420M),
+ },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 1, 2 }, { 1, 2 } }},
+ } },
+ { formats::YUV422, {
+ .name = "YUV422",
+ .format = formats::YUV422,
+ .v4l2Formats = {
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV422P),
+ V4L2PixelFormat(V4L2_PIX_FMT_YUV422M),
+ },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 1, 1 }, { 1, 1 } }},
+ } },
+ { formats::YVU422, {
+ .name = "YVU422",
+ .format = formats::YVU422,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YVU422M), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 1, 1 }, { 1, 1 } }},
+ } },
+ { formats::YUV444, {
+ .name = "YUV444",
+ .format = formats::YUV444,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YUV444M), },
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 1, 1 }, { 1, 1 } }},
+ } },
+ { formats::YVU444, {
+ .name = "YVU444",
+ .format = formats::YVU444,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_YVU444M), },
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 1, 1 }, { 1, 1 } }},
+ } },
+
+ /* Greyscale formats. */
+ { formats::R8, {
+ .name = "R8",
+ .format = formats::R8,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_GREY), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::R10, {
+ .name = "R10",
+ .format = formats::R10,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y10), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::R10_CSI2P, {
+ .name = "R10_CSI2P",
+ .format = formats::R10_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y10P), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 5, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::R12_CSI2P, {
+ .name = "R12_CSI2P",
+ .format = formats::R12_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y12P), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::R12, {
+ .name = "R12",
+ .format = formats::R12,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y12), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::R16, {
+ .name = "R16",
+ .format = formats::R16,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y16), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::MONO_PISP_COMP1, {
+ .name = "MONO_PISP_COMP1",
+ .format = formats::MONO_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_MONO), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = true,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+
+ /* Bayer formats. */
+ { formats::SBGGR8, {
+ .name = "SBGGR8",
+ .format = formats::SBGGR8,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR8), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG8, {
+ .name = "SGBRG8",
+ .format = formats::SGBRG8,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG8), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG8, {
+ .name = "SGRBG8",
+ .format = formats::SGRBG8,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG8), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB8, {
+ .name = "SRGGB8",
+ .format = formats::SRGGB8,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB8), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SBGGR10, {
+ .name = "SBGGR10",
+ .format = formats::SBGGR10,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG10, {
+ .name = "SGBRG10",
+ .format = formats::SGBRG10,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG10, {
+ .name = "SGRBG10",
+ .format = formats::SGRBG10,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB10, {
+ .name = "SRGGB10",
+ .format = formats::SRGGB10,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SBGGR10_CSI2P, {
+ .name = "SBGGR10_CSI2P",
+ .format = formats::SBGGR10_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10P), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 5, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG10_CSI2P, {
+ .name = "SGBRG10_CSI2P",
+ .format = formats::SGBRG10_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10P), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 5, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG10_CSI2P, {
+ .name = "SGRBG10_CSI2P",
+ .format = formats::SGRBG10_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10P), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 5, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB10_CSI2P, {
+ .name = "SRGGB10_CSI2P",
+ .format = formats::SRGGB10_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10P), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 5, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SBGGR12, {
+ .name = "SBGGR12",
+ .format = formats::SBGGR12,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG12, {
+ .name = "SGBRG12",
+ .format = formats::SGBRG12,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG12, {
+ .name = "SGRBG12",
+ .format = formats::SGRBG12,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB12, {
+ .name = "SRGGB12",
+ .format = formats::SRGGB12,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SBGGR12_CSI2P, {
+ .name = "SBGGR12_CSI2P",
+ .format = formats::SBGGR12_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12P), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG12_CSI2P, {
+ .name = "SGBRG12_CSI2P",
+ .format = formats::SGBRG12_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12P), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG12_CSI2P, {
+ .name = "SGRBG12_CSI2P",
+ .format = formats::SGRBG12_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12P), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB12_CSI2P, {
+ .name = "SRGGB12_CSI2P",
+ .format = formats::SRGGB12_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12P), },
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SBGGR14, {
+ .name = "SBGGR14",
+ .format = formats::SBGGR14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG14, {
+ .name = "SGBRG14",
+ .format = formats::SGBRG14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG14, {
+ .name = "SGRBG14",
+ .format = formats::SGRBG14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB14, {
+ .name = "SRGGB14",
+ .format = formats::SRGGB14,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SBGGR14_CSI2P, {
+ .name = "SBGGR14_CSI2P",
+ .format = formats::SBGGR14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG14_CSI2P, {
+ .name = "SGBRG14_CSI2P",
+ .format = formats::SGBRG14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG14_CSI2P, {
+ .name = "SGRBG14_CSI2P",
+ .format = formats::SGRBG14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB14_CSI2P, {
+ .name = "SRGGB14_CSI2P",
+ .format = formats::SRGGB14_CSI2P,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14P), },
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 4,
+ .planes = {{ { 7, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SBGGR16, {
+ .name = "SBGGR16",
+ .format = formats::SBGGR16,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR16), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG16, {
+ .name = "SGBRG16",
+ .format = formats::SGBRG16,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG16), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG16, {
+ .name = "SGRBG16",
+ .format = formats::SGRBG16,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG16), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB16, {
+ .name = "SRGGB16",
+ .format = formats::SRGGB16,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16), },
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = false,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 4, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SBGGR10_IPU3, {
+ .name = "SBGGR10_IPU3",
+ .format = formats::SBGGR10_IPU3,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SBGGR10), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ /* \todo remember to double this in the ipu3 pipeline handler */
+ .pixelsPerGroup = 25,
+ .planes = {{ { 32, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGBRG10_IPU3, {
+ .name = "SGBRG10_IPU3",
+ .format = formats::SGBRG10_IPU3,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGBRG10), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 25,
+ .planes = {{ { 32, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SGRBG10_IPU3, {
+ .name = "SGRBG10_IPU3",
+ .format = formats::SGRBG10_IPU3,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGRBG10), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 25,
+ .planes = {{ { 32, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::SRGGB10_IPU3, {
+ .name = "SRGGB10_IPU3",
+ .format = formats::SRGGB10_IPU3,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SRGGB10), },
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 25,
+ .planes = {{ { 32, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::BGGR_PISP_COMP1, {
+ .name = "BGGR_PISP_COMP1",
+ .format = formats::BGGR_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_BGGR), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::GBRG_PISP_COMP1, {
+ .name = "GBRG_PISP_COMP1",
+ .format = formats::GBRG_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GBRG), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::GRBG_PISP_COMP1, {
+ .name = "GRBG_PISP_COMP1",
+ .format = formats::GRBG_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GRBG), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ { formats::RGGB_PISP_COMP1, {
+ .name = "RGGB_PISP_COMP1",
+ .format = formats::RGGB_PISP_COMP1,
+ .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_RGGB), },
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ .packed = true,
+ .pixelsPerGroup = 2,
+ .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+ /* Compressed formats. */
+ { formats::MJPEG, {
+ .name = "MJPEG",
+ .format = formats::MJPEG,
+ .v4l2Formats = {
+ V4L2PixelFormat(V4L2_PIX_FMT_MJPEG),
+ V4L2PixelFormat(V4L2_PIX_FMT_JPEG),
+ },
+ .bitsPerPixel = 0,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ .packed = false,
+ .pixelsPerGroup = 1,
+ .planes = {{ { 1, 1 }, { 0, 0 }, { 0, 0 } }},
+ } },
+};
+
+} /* namespace */
+
+/**
+ * \fn bool PixelFormatInfo::isValid() const
+ * \brief Check if the pixel format info is valid
+ * \return True if the pixel format info is valid, false otherwise
+ */
+
+/**
+ * \brief Retrieve information about a pixel format
+ * \param[in] format The pixel format
+ * \return The PixelFormatInfo describing the \a format if known, or an invalid
+ * PixelFormatInfo otherwise
*/
-int ImageFormats::addFormat(unsigned int format, const std::vector<SizeRange> &sizes)
+const PixelFormatInfo &PixelFormatInfo::info(const PixelFormat &format)
{
- if (data_.find(format) != data_.end())
- return -EEXIST;
+ const auto iter = pixelFormatInfo.find(format);
+ if (iter == pixelFormatInfo.end()) {
+ LOG(Formats, Warning)
+ << "Unsupported pixel format "
+ << utils::hex(format.fourcc());
+ return pixelFormatInfoInvalid;
+ }
- data_[format] = sizes;
+ return iter->second;
+}
+
+/**
+ * \brief Retrieve information about a V4L2 pixel format
+ * \param[in] format The V4L2 pixel format
+ * \return The PixelFormatInfo describing the V4L2 \a format if known, or an
+ * invalid PixelFormatInfo otherwise
+ */
+const PixelFormatInfo &PixelFormatInfo::info(const V4L2PixelFormat &format)
+{
+ PixelFormat pixelFormat = format.toPixelFormat(false);
+ if (!pixelFormat.isValid())
+ return pixelFormatInfoInvalid;
+
+ const auto iter = pixelFormatInfo.find(pixelFormat);
+ if (iter == pixelFormatInfo.end())
+ return pixelFormatInfoInvalid;
- return 0;
+ return iter->second;
}
/**
- * \brief Check if the list of devices supported formats is empty
- * \return True if the list of supported formats is empty
+ * \brief Retrieve information about a pixel format
+ * \param[in] name The name of pixel format
+ * \return The PixelFormatInfo describing the PixelFormat matching the
+ * \a name if known, or an invalid PixelFormatInfo otherwise
*/
-bool ImageFormats::isEmpty() const
+const PixelFormatInfo &PixelFormatInfo::info(const std::string &name)
{
- return data_.empty();
+ for (const auto &info : pixelFormatInfo) {
+ if (info.second.name == name)
+ return info.second;
+ }
+
+ return pixelFormatInfoInvalid;
}
/**
- * \brief Retrieve a list of all supported image formats
- * \return List of pixel formats or media bus codes
+ * \brief Compute the stride
+ * \param[in] width The width of the line, in pixels
+ * \param[in] plane The index of the plane whose stride is to be computed
+ * \param[in] align The stride alignment, in bytes
+ *
+ * The stride is the number of bytes necessary to store a full line of a frame,
+ * including padding at the end of the line. This function takes into account
+ * the alignment constraints intrinsic to the format (for instance, the
+ * SGRBG12_CSI2P format stores two 12-bit pixels in 3 bytes, and thus has a
+ * required stride alignment of 3 bytes). Additional alignment constraints may
+ * be specified through the \a align parameter, which will cause the stride to
+ * be rounded up to the next multiple of \a align.
+ *
+ * For multi-planar formats, different planes may have different stride values.
+ * The \a plane parameter selects which plane to compute the stride for.
+ *
+ * \return The number of bytes necessary to store a line, or 0 if the
+ * PixelFormatInfo instance or the \a plane is not valid
*/
-std::vector<unsigned int> ImageFormats::formats() const
+unsigned int PixelFormatInfo::stride(unsigned int width, unsigned int plane,
+ unsigned int align) const
{
- std::vector<unsigned int> formats;
- formats.reserve(data_.size());
+ if (!isValid()) {
+ LOG(Formats, Warning) << "Invalid pixel format, stride is zero";
+ return 0;
+ }
+
+ if (plane >= planes.size() || !planes[plane].bytesPerGroup) {
+ LOG(Formats, Warning) << "Invalid plane index, stride is zero";
+ return 0;
+ }
- /* \todo: Should this be cached instead of computed each time? */
- for (auto const &it : data_)
- formats.push_back(it.first);
+ /* ceil(width / pixelsPerGroup) * bytesPerGroup */
+ unsigned int stride = (width + pixelsPerGroup - 1) / pixelsPerGroup
+ * planes[plane].bytesPerGroup;
- return formats;
+ /* ceil(stride / align) * align */
+ return (stride + align - 1) / align * align;
}
/**
- * \brief Retrieve all sizes for a specific format
- * \param[in] format The pixel format or mbus code
+ * \brief Compute the number of bytes necessary to store a plane of a frame
+ * \param[in] size The size of the frame, in pixels
+ * \param[in] plane The plane index
+ * \param[in] align The stride alignment, in bytes (1 for default alignment)
*
- * Retrieve all size ranges for a specific format. For V4L2Device \a format is a
- * pixel format while for a V4L2Subdevice \a format is a media bus code.
+ * The plane size is computed by multiplying the line stride and the frame
+ * height, taking subsampling and other format characteristics into account.
+ * Stride alignment constraints may be specified through the \a align parameter.
*
- * \return The list of image sizes supported for \a format, or an empty list if
- * the format is not supported
+ * \sa stride()
+ *
+ * \return The number of bytes necessary to store the plane, or 0 if the
+ * PixelFormatInfo instance is not valid or the plane number isn't valid for the
+ * format
*/
-const std::vector<SizeRange> &ImageFormats::sizes(unsigned int format) const
+unsigned int PixelFormatInfo::planeSize(const Size &size, unsigned int plane,
+ unsigned int align) const
{
- static const std::vector<SizeRange> empty;
+ unsigned int stride = PixelFormatInfo::stride(size.width, plane, align);
+ if (!stride)
+ return 0;
+
+ return planeSize(size.height, plane, stride);
+}
- auto const &it = data_.find(format);
- if (it == data_.end())
- return empty;
+/**
+ * \brief Compute the number of bytes necessary to store a plane of a frame
+ * \param[in] height The height of the frame, in pixels
+ * \param[in] plane The plane index
+ * \param[in] stride The plane stride, in bytes
+ *
+ * The plane size is computed by multiplying the line stride and the frame
+ * height, taking subsampling and other format characteristics into account.
+ * Stride alignment constraints may be specified through the \a align parameter.
+ *
+ * \return The number of bytes necessary to store the plane, or 0 if the
+ * PixelFormatInfo instance is not valid or the plane number isn't valid for the
+ * format
+ */
+unsigned int PixelFormatInfo::planeSize(unsigned int height, unsigned int plane,
+ unsigned int stride) const
+{
+ unsigned int vertSubSample = planes[plane].verticalSubSampling;
+ if (!vertSubSample)
+ return 0;
- return it->second;
+ /* stride * ceil(height / verticalSubSampling) */
+ return stride * ((height + vertSubSample - 1) / vertSubSample);
}
/**
- * \brief Retrieve the map that associates formats to image sizes
- * \return The map that associates formats to image sizes
+ * \brief Compute the number of bytes necessary to store a frame
+ * \param[in] size The size of the frame, in pixels
+ * \param[in] align The stride alignment, in bytes (1 for default alignment)
+ *
+ * The frame size is computed by adding the size of all planes, as computed by
+ * planeSize(), using the specified alignment constraints for all planes. For
+ * more complex stride constraints, use the frameSize() overloaded version that
+ * takes an array of stride values.
+ *
+ * \sa planeSize()
+ *
+ * \return The number of bytes necessary to store the frame, or 0 if the
+ * PixelFormatInfo instance is not valid
+ */
+unsigned int PixelFormatInfo::frameSize(const Size &size, unsigned int align) const
+{
+ unsigned int sum = 0;
+
+ for (const auto &[i, plane] : utils::enumerate(planes)) {
+ if (plane.bytesPerGroup == 0)
+ break;
+
+ sum += planeSize(size, i, align);
+ }
+
+ return sum;
+}
+
+/**
+ * \brief Compute the number of bytes necessary to store a frame
+ * \param[in] size The size of the frame, in pixels
+ * \param[in] strides The strides to use for each plane
+ *
+ * This function is an overloaded version that takes custom strides for each
+ * plane, to be used when the device has custom alignment constraints that
+ * can't be described by just an alignment value.
+ *
+ * \return The number of bytes necessary to store the frame, or 0 if the
+ * PixelFormatInfo instance is not valid
+ */
+unsigned int
+PixelFormatInfo::frameSize(const Size &size,
+ const std::array<unsigned int, 3> &strides) const
+{
+ /* stride * ceil(height / verticalSubSampling) */
+ unsigned int sum = 0;
+ for (unsigned int i = 0; i < 3; i++) {
+ unsigned int vertSubSample = planes[i].verticalSubSampling;
+ if (!vertSubSample)
+ continue;
+ sum += strides[i]
+ * ((size.height + vertSubSample - 1) / vertSubSample);
+ }
+
+ return sum;
+}
+
+/**
+ * \brief Retrieve the number of planes represented by the format
+ * \return The number of planes used by the format
*/
-const std::map<unsigned int, std::vector<SizeRange>> &ImageFormats::data() const
+unsigned int PixelFormatInfo::numPlanes() const
{
- return data_;
+ unsigned int count = 0;
+
+ for (const Plane &p : planes) {
+ if (p.bytesPerGroup == 0)
+ break;
+
+ count++;
+ }
+
+ return count;
}
} /* namespace libcamera */
diff --git a/src/libcamera/formats.yaml b/src/libcamera/formats.yaml
new file mode 100644
index 00000000..2d54d391
--- /dev/null
+++ b/src/libcamera/formats.yaml
@@ -0,0 +1,212 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2020, Google Inc.
+#
+%YAML 1.1
+---
+formats:
+ - R8:
+ fourcc: DRM_FORMAT_R8
+ - R10:
+ fourcc: DRM_FORMAT_R10
+ - R12:
+ fourcc: DRM_FORMAT_R12
+ - R16:
+ fourcc: DRM_FORMAT_R16
+
+ - RGB565:
+ fourcc: DRM_FORMAT_RGB565
+ - RGB565_BE:
+ fourcc: DRM_FORMAT_RGB565
+ big_endian: true
+
+ - RGB888:
+ fourcc: DRM_FORMAT_RGB888
+ - BGR888:
+ fourcc: DRM_FORMAT_BGR888
+
+ - XRGB8888:
+ fourcc: DRM_FORMAT_XRGB8888
+ - XBGR8888:
+ fourcc: DRM_FORMAT_XBGR8888
+ - RGBX8888:
+ fourcc: DRM_FORMAT_RGBX8888
+ - BGRX8888:
+ fourcc: DRM_FORMAT_BGRX8888
+
+ - ARGB8888:
+ fourcc: DRM_FORMAT_ARGB8888
+ - ABGR8888:
+ fourcc: DRM_FORMAT_ABGR8888
+ - RGBA8888:
+ fourcc: DRM_FORMAT_RGBA8888
+ - BGRA8888:
+ fourcc: DRM_FORMAT_BGRA8888
+
+ - RGB161616:
+ fourcc: DRM_FORMAT_RGB161616
+ - BGR161616:
+ fourcc: DRM_FORMAT_BGR161616
+
+ - YUYV:
+ fourcc: DRM_FORMAT_YUYV
+ - YVYU:
+ fourcc: DRM_FORMAT_YVYU
+ - UYVY:
+ fourcc: DRM_FORMAT_UYVY
+ - VYUY:
+ fourcc: DRM_FORMAT_VYUY
+ - AVUY8888:
+ fourcc: DRM_FORMAT_AVUY8888
+ - XVUY8888:
+ fourcc: DRM_FORMAT_XVUY8888
+
+ - NV12:
+ fourcc: DRM_FORMAT_NV12
+ - NV21:
+ fourcc: DRM_FORMAT_NV21
+ - NV16:
+ fourcc: DRM_FORMAT_NV16
+ - NV61:
+ fourcc: DRM_FORMAT_NV61
+ - NV24:
+ fourcc: DRM_FORMAT_NV24
+ - NV42:
+ fourcc: DRM_FORMAT_NV42
+
+ - YUV420:
+ fourcc: DRM_FORMAT_YUV420
+ - YVU420:
+ fourcc: DRM_FORMAT_YVU420
+ - YUV422:
+ fourcc: DRM_FORMAT_YUV422
+ - YVU422:
+ fourcc: DRM_FORMAT_YVU422
+ - YUV444:
+ fourcc: DRM_FORMAT_YUV444
+ - YVU444:
+ fourcc: DRM_FORMAT_YVU444
+
+ - MJPEG:
+ fourcc: DRM_FORMAT_MJPEG
+
+ - SRGGB8:
+ fourcc: DRM_FORMAT_SRGGB8
+ - SGRBG8:
+ fourcc: DRM_FORMAT_SGRBG8
+ - SGBRG8:
+ fourcc: DRM_FORMAT_SGBRG8
+ - SBGGR8:
+ fourcc: DRM_FORMAT_SBGGR8
+
+ - SRGGB10:
+ fourcc: DRM_FORMAT_SRGGB10
+ - SGRBG10:
+ fourcc: DRM_FORMAT_SGRBG10
+ - SGBRG10:
+ fourcc: DRM_FORMAT_SGBRG10
+ - SBGGR10:
+ fourcc: DRM_FORMAT_SBGGR10
+
+ - SRGGB12:
+ fourcc: DRM_FORMAT_SRGGB12
+ - SGRBG12:
+ fourcc: DRM_FORMAT_SGRBG12
+ - SGBRG12:
+ fourcc: DRM_FORMAT_SGBRG12
+ - SBGGR12:
+ fourcc: DRM_FORMAT_SBGGR12
+
+ - SRGGB14:
+ fourcc: DRM_FORMAT_SRGGB14
+ - SGRBG14:
+ fourcc: DRM_FORMAT_SGRBG14
+ - SGBRG14:
+ fourcc: DRM_FORMAT_SGBRG14
+ - SBGGR14:
+ fourcc: DRM_FORMAT_SBGGR14
+
+ - SRGGB16:
+ fourcc: DRM_FORMAT_SRGGB16
+ - SGRBG16:
+ fourcc: DRM_FORMAT_SGRBG16
+ - SGBRG16:
+ fourcc: DRM_FORMAT_SGBRG16
+ - SBGGR16:
+ fourcc: DRM_FORMAT_SBGGR16
+
+ - R10_CSI2P:
+ fourcc: DRM_FORMAT_R10
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - R12_CSI2P:
+ fourcc: DRM_FORMAT_R12
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+
+ - SRGGB10_CSI2P:
+ fourcc: DRM_FORMAT_SRGGB10
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SGRBG10_CSI2P:
+ fourcc: DRM_FORMAT_SGRBG10
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SGBRG10_CSI2P:
+ fourcc: DRM_FORMAT_SGBRG10
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SBGGR10_CSI2P:
+ fourcc: DRM_FORMAT_SBGGR10
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+
+ - SRGGB12_CSI2P:
+ fourcc: DRM_FORMAT_SRGGB12
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SGRBG12_CSI2P:
+ fourcc: DRM_FORMAT_SGRBG12
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SGBRG12_CSI2P:
+ fourcc: DRM_FORMAT_SGBRG12
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SBGGR12_CSI2P:
+ fourcc: DRM_FORMAT_SBGGR12
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+
+ - SRGGB14_CSI2P:
+ fourcc: DRM_FORMAT_SRGGB14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SGRBG14_CSI2P:
+ fourcc: DRM_FORMAT_SGRBG14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SGBRG14_CSI2P:
+ fourcc: DRM_FORMAT_SGBRG14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+ - SBGGR14_CSI2P:
+ fourcc: DRM_FORMAT_SBGGR14
+ mod: MIPI_FORMAT_MOD_CSI2_PACKED
+
+ - SRGGB10_IPU3:
+ fourcc: DRM_FORMAT_SRGGB10
+ mod: IPU3_FORMAT_MOD_PACKED
+ - SGRBG10_IPU3:
+ fourcc: DRM_FORMAT_SGRBG10
+ mod: IPU3_FORMAT_MOD_PACKED
+ - SGBRG10_IPU3:
+ fourcc: DRM_FORMAT_SGBRG10
+ mod: IPU3_FORMAT_MOD_PACKED
+ - SBGGR10_IPU3:
+ fourcc: DRM_FORMAT_SBGGR10
+ mod: IPU3_FORMAT_MOD_PACKED
+
+ - RGGB_PISP_COMP1:
+ fourcc: DRM_FORMAT_SRGGB16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - GRBG_PISP_COMP1:
+ fourcc: DRM_FORMAT_SGRBG16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - GBRG_PISP_COMP1:
+ fourcc: DRM_FORMAT_SGBRG16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - BGGR_PISP_COMP1:
+ fourcc: DRM_FORMAT_SBGGR16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+ - MONO_PISP_COMP1:
+ fourcc: DRM_FORMAT_R16
+ mod: PISP_FORMAT_MOD_COMPRESS_MODE1
+...
diff --git a/src/libcamera/framebuffer.cpp b/src/libcamera/framebuffer.cpp
new file mode 100644
index 00000000..826848f7
--- /dev/null
+++ b/src/libcamera/framebuffer.cpp
@@ -0,0 +1,441 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Frame buffer handling
+ */
+
+#include <libcamera/framebuffer.h>
+#include "libcamera/internal/framebuffer.h"
+
+#include <sys/stat.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/shared_fd.h>
+
+/**
+ * \file libcamera/framebuffer.h
+ * \brief Frame buffer handling
+ */
+
+/**
+ * \internal
+ * \file libcamera/internal/framebuffer.h
+ * \brief Internal frame buffer handling support
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Buffer)
+
+/**
+ * \struct FrameMetadata
+ * \brief Metadata related to a captured frame
+ *
+ * The FrameMetadata structure stores all metadata related to a captured frame,
+ * as stored in a FrameBuffer, such as capture status, timestamp and bytes used.
+ */
+
+/**
+ * \enum FrameMetadata::Status
+ * \brief Define the frame completion status
+ * \var FrameMetadata::FrameSuccess
+ * The frame has been captured with success and contains valid data. All fields
+ * of the FrameMetadata structure are valid.
+ * \var FrameMetadata::FrameError
+ * An error occurred during capture of the frame. The frame data may be partly
+ * or fully invalid. The sequence and timestamp fields of the FrameMetadata
+ * structure is valid, the other fields may be invalid.
+ * \var FrameMetadata::FrameCancelled
+ * Capture stopped before the frame completed. The frame data is not valid. All
+ * fields of the FrameMetadata structure but the status field are invalid.
+ */
+
+/**
+ * \struct FrameMetadata::Plane
+ * \brief Per-plane frame metadata
+ *
+ * Frames are stored in memory in one or multiple planes. The
+ * FrameMetadata::Plane structure stores per-plane metadata.
+ */
+
+/**
+ * \var FrameMetadata::Plane::bytesused
+ * \brief Number of bytes occupied by the data in the plane, including line
+ * padding
+ *
+ * This value may vary per frame for compressed formats. For uncompressed
+ * formats it will be constant for all frames, but may be smaller than the
+ * FrameBuffer size.
+ */
+
+/**
+ * \var FrameMetadata::status
+ * \brief Status of the frame
+ *
+ * The validity of other fields of the FrameMetadata structure depends on the
+ * status value.
+ */
+
+/**
+ * \var FrameMetadata::sequence
+ * \brief Frame sequence number
+ *
+ * The sequence number is a monotonically increasing number assigned to the
+ * frames captured by the stream. The value is increased by one for each frame.
+ * Gaps in the sequence numbers indicate dropped frames.
+ */
+
+/**
+ * \var FrameMetadata::timestamp
+ * \brief Time when the frame was captured
+ *
+ * The timestamp is expressed as a number of nanoseconds relative to the system
+ * clock since an unspecified time point.
+ *
+ * \todo Be more precise on what timestamps refer to.
+ */
+
+/**
+ * \fn FrameMetadata::planes()
+ * \copydoc FrameMetadata::planes() const
+ */
+
+/**
+ * \fn FrameMetadata::planes() const
+ * \brief Retrieve the array of per-plane metadata
+ * \return The array of per-plane metadata
+ */
+
+#ifndef __DOXYGEN_PUBLIC__
+/**
+ * \class FrameBuffer::Private
+ * \brief Base class for FrameBuffer private data
+ *
+ * The FrameBuffer::Private class stores all private data associated with a
+ * framebuffer. It implements the d-pointer design pattern to hide core
+ * FrameBuffer data from the public API, and exposes utility functions to
+ * pipeline handlers.
+ */
+
+/**
+ * \brief Construct a FrameBuffer::Private instance
+ * \param[in] planes The frame memory planes
+ * \param[in] cookie Cookie
+ */
+FrameBuffer::Private::Private(const std::vector<Plane> &planes, uint64_t cookie)
+ : planes_(planes), cookie_(cookie), request_(nullptr),
+ isContiguous_(true)
+{
+ metadata_.planes_.resize(planes_.size());
+}
+
+/**
+ * \brief FrameBuffer::Private destructor
+ */
+FrameBuffer::Private::~Private()
+{
+}
+
+/**
+ * \fn FrameBuffer::Private::setRequest()
+ * \brief Set the request this buffer belongs to
+ * \param[in] request Request to set
+ *
+ * For buffers added to requests by applications, this function is called by
+ * Request::addBuffer() or Request::reuse(). For buffers internal to pipeline
+ * handlers, it is called by the pipeline handlers themselves.
+ */
+
+/**
+ * \fn FrameBuffer::Private::isContiguous()
+ * \brief Check if the frame buffer stores planes contiguously in memory
+ *
+ * Multi-planar frame buffers can store their planes contiguously in memory, or
+ * split them into discontiguous memory areas. This function checks in which of
+ * these two categories the frame buffer belongs.
+ *
+ * \return True if the planes are stored contiguously in memory, false otherwise
+ */
+
+/**
+ * \fn FrameBuffer::Private::fence()
+ * \brief Retrieve a const pointer to the Fence
+ *
+ * This function does only return a reference to the the fence and does not
+ * change its ownership. The fence is stored in the FrameBuffer and can only be
+ * reset with FrameBuffer::releaseFence() in case the buffer has completed with
+ * error due to a Fence wait failure.
+ *
+ * If buffer with a Fence completes with errors due to a failure in handling
+ * the fence, applications are responsible for releasing the Fence before
+ * calling Request::addBuffer() again.
+ *
+ * \sa Request::addBuffer()
+ *
+ * \return A const pointer to the Fence if any, nullptr otherwise
+ */
+
+/**
+ * \fn FrameBuffer::Private::setFence()
+ * \brief Move a \a fence in this buffer
+ * \param[in] fence The Fence
+ *
+ * This function associates a Fence with this Framebuffer. The intended caller
+ * is the Request::addBuffer() function.
+ *
+ * Once a FrameBuffer is associated with a Fence, the FrameBuffer will only be
+ * made available to the hardware device once the Fence has been correctly
+ * signalled.
+ *
+ * \sa Request::prepare()
+ *
+ * If the FrameBuffer completes successfully the core releases the Fence and the
+ * Buffer can be reused immediately. If handling of the Fence fails during the
+ * request preparation, the Fence is not released and is left in the
+ * FrameBuffer. It is applications responsibility to correctly release the
+ * fence and handle it opportunely before using the buffer again.
+ */
+
+/**
+ * \fn FrameBuffer::Private::cancel()
+ * \brief Marks the buffer as cancelled
+ *
+ * If a buffer is not used by a request, it shall be marked as cancelled to
+ * indicate that the metadata is invalid.
+ */
+
+/**
+ * \fn FrameBuffer::Private::metadata()
+ * \brief Retrieve the dynamic metadata
+ * \return Dynamic metadata for the frame contained in the buffer
+ */
+#endif /* __DOXYGEN_PUBLIC__ */
+
+/**
+ * \class FrameBuffer
+ * \brief Frame buffer data and its associated dynamic metadata
+ *
+ * The FrameBuffer class is the primary interface for applications, IPAs and
+ * pipeline handlers to interact with frame memory. It contains all the static
+ * and dynamic information to manage the whole life cycle of a frame capture,
+ * from buffer creation to consumption.
+ *
+ * The static information describes the memory planes that make a frame. The
+ * planes are specified when creating the FrameBuffer and are expressed as a set
+ * of dmabuf file descriptors, offset and length.
+ *
+ * The dynamic information is grouped in a FrameMetadata instance. It is updated
+ * during the processing of a queued capture request, and is valid from the
+ * completion of the buffer as signaled by Camera::bufferComplete() until the
+ * FrameBuffer is either reused in a new request or deleted.
+ *
+ * The creator of a FrameBuffer (application, IPA or pipeline handler) may
+ * associate to it an integer cookie for any private purpose. The cookie may be
+ * set when creating the FrameBuffer, and updated at any time with setCookie().
+ * The cookie is transparent to the libcamera core and shall only be set by the
+ * creator of the FrameBuffer. This mechanism supplements the Request cookie.
+ */
+
+/**
+ * \struct FrameBuffer::Plane
+ * \brief A memory region to store a single plane of a frame
+ *
+ * Planar pixel formats use multiple memory regions to store the different
+ * colour components of a frame. The Plane structure describes such a memory
+ * region by a dmabuf file descriptor, an offset within the dmabuf and a length.
+ * A FrameBuffer then contains one or multiple planes, depending on the pixel
+ * format of the frames it is meant to store.
+ *
+ * The offset identifies the location of the plane data from the start of the
+ * memory referenced by the dmabuf file descriptor. Multiple planes may be
+ * stored in the same dmabuf, in which case they will reference the same dmabuf
+ * and different offsets. No two planes may overlap, as specified by their
+ * offset and length.
+ *
+ * To support DMA access, planes are associated with dmabuf objects represented
+ * by SharedFD handles. The Plane class doesn't handle mapping of the memory to
+ * the CPU, but applications and IPAs may use the dmabuf file descriptors to map
+ * the plane memory with mmap() and access its contents.
+ *
+ * \todo Specify how an application shall decide whether to use a single or
+ * multiple dmabufs, based on the camera requirements.
+ */
+
+/**
+ * \var FrameBuffer::Plane::kInvalidOffset
+ * \brief Invalid offset value, to identify uninitialized planes
+ */
+
+/**
+ * \var FrameBuffer::Plane::fd
+ * \brief The dmabuf file descriptor
+ */
+
+/**
+ * \var FrameBuffer::Plane::offset
+ * \brief The plane offset in bytes
+*/
+
+/**
+ * \var FrameBuffer::Plane::length
+ * \brief The plane length in bytes
+ */
+
+namespace {
+
+ino_t fileDescriptorInode(const SharedFD &fd)
+{
+ if (!fd.isValid())
+ return 0;
+
+ struct stat st;
+ int ret = fstat(fd.get(), &st);
+ if (ret < 0) {
+ ret = -errno;
+ LOG(Buffer, Fatal)
+ << "Failed to fstat() fd: " << strerror(-ret);
+ return 0;
+ }
+
+ return st.st_ino;
+}
+
+} /* namespace */
+
+/**
+ * \brief Construct a FrameBuffer with an array of planes
+ * \param[in] planes The frame memory planes
+ * \param[in] cookie Cookie
+ */
+FrameBuffer::FrameBuffer(const std::vector<Plane> &planes, unsigned int cookie)
+ : FrameBuffer(std::make_unique<Private>(planes, cookie))
+{
+}
+
+/**
+ * \brief Construct a FrameBuffer with an extensible private class
+ * \param[in] d The extensible private class
+ */
+FrameBuffer::FrameBuffer(std::unique_ptr<Private> d)
+ : Extensible(std::move(d))
+{
+ unsigned int offset = 0;
+ bool isContiguous = true;
+ ino_t inode = 0;
+
+ for (const auto &plane : _d()->planes_) {
+ ASSERT(plane.offset != Plane::kInvalidOffset);
+
+ if (plane.offset != offset) {
+ isContiguous = false;
+ break;
+ }
+
+ /*
+ * Two different dmabuf file descriptors may still refer to the
+ * same dmabuf instance. Check this using inodes.
+ */
+ if (plane.fd != _d()->planes_[0].fd) {
+ if (!inode)
+ inode = fileDescriptorInode(_d()->planes_[0].fd);
+ if (fileDescriptorInode(plane.fd) != inode) {
+ isContiguous = false;
+ break;
+ }
+ }
+
+ offset += plane.length;
+ }
+
+ LOG(Buffer, Debug)
+ << "Buffer is " << (isContiguous ? "" : "not ") << "contiguous";
+
+ _d()->isContiguous_ = isContiguous;
+}
+
+/**
+ * \brief Retrieve the static plane descriptors
+ * \return Array of plane descriptors
+ */
+const std::vector<FrameBuffer::Plane> &FrameBuffer::planes() const
+{
+ return _d()->planes_;
+}
+
+/**
+ * \brief Retrieve the request this buffer belongs to
+ *
+ * The intended callers of this function are buffer completion handlers that
+ * need to associate a buffer to the request it belongs to.
+ *
+ * A FrameBuffer is associated to a request by Request::addBuffer() and the
+ * association is valid until the buffer completes. The returned request
+ * pointer is valid only during that interval.
+ *
+ * \return The Request the FrameBuffer belongs to, or nullptr if the buffer is
+ * not associated with a request
+ */
+Request *FrameBuffer::request() const
+{
+ return _d()->request_;
+}
+
+/**
+ * \brief Retrieve the dynamic metadata
+ * \return Dynamic metadata for the frame contained in the buffer
+ */
+const FrameMetadata &FrameBuffer::metadata() const
+{
+ return _d()->metadata_;
+}
+
+/**
+ * \brief Retrieve the cookie
+ *
+ * The cookie belongs to the creator of the FrameBuffer, which controls its
+ * lifetime and value.
+ *
+ * \sa setCookie()
+ *
+ * \return The cookie
+ */
+uint64_t FrameBuffer::cookie() const
+{
+ return _d()->cookie_;
+}
+
+/**
+ * \brief Set the cookie
+ * \param[in] cookie Cookie to set
+ *
+ * The cookie belongs to the creator of the FrameBuffer. Its value may be
+ * modified at any time with this function. Applications and IPAs shall not
+ * modify the cookie value of buffers they haven't created themselves. The
+ * libcamera core never modifies the buffer cookie.
+ */
+void FrameBuffer::setCookie(uint64_t cookie)
+{
+ _d()->cookie_ = cookie;
+}
+
+/**
+ * \brief Extract the Fence associated with this Framebuffer
+ *
+ * This function moves the buffer's fence ownership to the caller.
+ * After the fence has been released, calling this function always return
+ * nullptr.
+ *
+ * If buffer with a Fence completes with errors due to a failure in handling
+ * the fence, applications are responsible for releasing the Fence before
+ * calling Request::addBuffer() again.
+ *
+ * \return A unique pointer to the Fence if set, or nullptr if the fence has
+ * been released already
+ */
+std::unique_ptr<Fence> FrameBuffer::releaseFence()
+{
+ return std::move(_d()->fence_);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/framebuffer_allocator.cpp b/src/libcamera/framebuffer_allocator.cpp
index a37b564c..3d53bde2 100644
--- a/src/libcamera/framebuffer_allocator.cpp
+++ b/src/libcamera/framebuffer_allocator.cpp
@@ -2,19 +2,20 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * framebuffer_allocator.cpp - FrameBuffer allocator
+ * FrameBuffer allocator
*/
#include <libcamera/framebuffer_allocator.h>
#include <errno.h>
-#include <libcamera/buffer.h>
+#include <libcamera/base/log.h>
+
#include <libcamera/camera.h>
+#include <libcamera/framebuffer.h>
#include <libcamera/stream.h>
-#include "log.h"
-#include "pipeline_handler.h"
+#include "libcamera/internal/pipeline_handler.h"
/**
* \file framebuffer_allocator.h
@@ -58,14 +59,11 @@ LOG_DEFINE_CATEGORY(Allocator)
* \param[in] camera The camera
*/
FrameBufferAllocator::FrameBufferAllocator(std::shared_ptr<Camera> camera)
- : camera_(camera)
+ : camera_(std::move(camera))
{
}
-FrameBufferAllocator::~FrameBufferAllocator()
-{
- buffers_.clear();
-}
+FrameBufferAllocator::~FrameBufferAllocator() = default;
/**
* \brief Allocate buffers for a configured stream
@@ -76,7 +74,7 @@ FrameBufferAllocator::~FrameBufferAllocator()
* stopped, and the stream shall be part of the active camera configuration.
*
* Upon successful allocation, the allocated buffers can be retrieved with the
- * buffers() method.
+ * buffers() function.
*
* \return The number of allocated buffers on success or a negative error code
* otherwise
@@ -87,16 +85,22 @@ FrameBufferAllocator::~FrameBufferAllocator()
*/
int FrameBufferAllocator::allocate(Stream *stream)
{
- if (buffers_.count(stream)) {
+ const auto &[it, inserted] = buffers_.try_emplace(stream);
+
+ if (!inserted) {
LOG(Allocator, Error) << "Buffers already allocated for stream";
return -EBUSY;
}
- int ret = camera_->exportFrameBuffers(stream, &buffers_[stream]);
+ int ret = camera_->exportFrameBuffers(stream, &it->second);
if (ret == -EINVAL)
LOG(Allocator, Error)
- << "Stream is not part of " << camera_->name()
+ << "Stream is not part of " << camera_->id()
<< " active configuration";
+
+ if (ret < 0)
+ buffers_.erase(it);
+
return ret;
}
@@ -118,8 +122,6 @@ int FrameBufferAllocator::free(Stream *stream)
if (iter == buffers_.end())
return -EINVAL;
- std::vector<std::unique_ptr<FrameBuffer>> &buffers = iter->second;
- buffers.clear();
buffers_.erase(iter);
return 0;
@@ -136,7 +138,7 @@ int FrameBufferAllocator::free(Stream *stream)
* \brief Retrieve the buffers allocated for a \a stream
* \param[in] stream The stream to retrieve buffers for
*
- * This method shall only be called after successfully allocating buffers for
+ * This function shall only be called after successfully allocating buffers for
* \a stream with allocate(). The returned buffers are valid until free() is
* called for the same stream or the FrameBufferAllocator instance is destroyed.
*
diff --git a/src/libcamera/gen-controls.py b/src/libcamera/gen-controls.py
deleted file mode 100755
index 87c3d52a..00000000
--- a/src/libcamera/gen-controls.py
+++ /dev/null
@@ -1,173 +0,0 @@
-#!/usr/bin/env python3
-# SPDX-License-Identifier: GPL-2.0-or-later
-# Copyright (C) 2019, Google Inc.
-#
-# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
-#
-# gen-controls.py - Generate control definitions from YAML
-
-import argparse
-import string
-import sys
-import yaml
-
-
-def snake_case(s):
- return ''.join([c.isupper() and ('_' + c) or c for c in s]).strip('_')
-
-
-def format_description(description):
- description = description.strip('\n').split('\n')
- description[0] = '\\brief ' + description[0]
- return '\n'.join([(line and ' * ' or ' *') + line for line in description])
-
-
-def generate_cpp(controls):
- enum_doc_start_template = string.Template('''/**
- * \\enum ${name}Values
- * \\brief Supported ${name} values''')
- enum_doc_value_template = string.Template(''' * \\var ${name}Values::${value}
-${description}''')
- doc_template = string.Template('''/**
- * \\var ${name}
-${description}
- */''')
- def_template = string.Template('extern const Control<${type}> ${name}(${id_name}, "${name}");')
-
- ctrls_doc = []
- ctrls_def = []
- ctrls_map = []
-
- for ctrl in controls:
- name, ctrl = ctrl.popitem()
- id_name = snake_case(name).upper()
-
- ctrl_type = ctrl['type']
- if ctrl_type == 'string':
- ctrl_type = 'std::string'
- elif ctrl.get('size'):
- ctrl_type = 'Span<const %s>' % ctrl_type
-
- info = {
- 'name': name,
- 'type': ctrl_type,
- 'description': format_description(ctrl['description']),
- 'id_name': id_name,
- }
-
- enum = ctrl.get('enum')
- if enum:
- enum_doc = []
- enum_doc.append(enum_doc_start_template.substitute(info))
-
- for entry in enum:
- value_info = {
- 'name' : name,
- 'value': entry['name'],
- 'description': format_description(entry['description']),
- }
- enum_doc.append(enum_doc_value_template.substitute(value_info))
-
- enum_doc = '\n *\n'.join(enum_doc)
- enum_doc += '\n */'
- ctrls_doc.append(enum_doc)
-
- ctrls_doc.append(doc_template.substitute(info))
- ctrls_def.append(def_template.substitute(info))
- ctrls_map.append('\t{ ' + id_name + ', &' + name + ' },')
-
- return {
- 'controls_doc': '\n\n'.join(ctrls_doc),
- 'controls_def': '\n'.join(ctrls_def),
- 'controls_map': '\n'.join(ctrls_map),
- }
-
-
-def generate_h(controls):
- enum_template_start = string.Template('''enum ${name}Values {''')
- enum_value_template = string.Template('''\t${name} = ${value},''')
- template = string.Template('''extern const Control<${type}> ${name};''')
-
- ctrls = []
- ids = []
- id_value = 1
-
- for ctrl in controls:
- name, ctrl = ctrl.popitem()
- id_name = snake_case(name).upper()
-
- ids.append('\t' + id_name + ' = ' + str(id_value) + ',')
-
- ctrl_type = ctrl['type']
- if ctrl_type == 'string':
- ctrl_type = 'std::string'
- elif ctrl.get('size'):
- ctrl_type = 'Span<const %s>' % ctrl_type
-
- info = {
- 'name': name,
- 'type': ctrl_type,
- }
-
- enum = ctrl.get('enum')
- if enum:
- ctrls.append(enum_template_start.substitute(info))
-
- for entry in enum:
- value_info = {
- 'name': entry['name'],
- 'value': entry['value'],
- }
- ctrls.append(enum_value_template.substitute(value_info))
- ctrls.append("};")
-
- ctrls.append(template.substitute(info))
- id_value += 1
-
- return {'ids': '\n'.join(ids), 'controls': '\n'.join(ctrls)}
-
-
-def fill_template(template, data):
-
- template = open(template, 'rb').read()
- template = template.decode('utf-8')
- template = string.Template(template)
- return template.substitute(data)
-
-
-def main(argv):
-
- # Parse command line arguments
- parser = argparse.ArgumentParser()
- parser.add_argument('-o', dest='output', metavar='file', type=str,
- help='Output file name. Defaults to standard output if not specified.')
- parser.add_argument('input', type=str,
- help='Input file name.')
- parser.add_argument('template', type=str,
- help='Template file name.')
- args = parser.parse_args(argv[1:])
-
- data = open(args.input, 'rb').read()
- controls = yaml.safe_load(data)['controls']
-
- if args.template.endswith('.cpp.in'):
- data = generate_cpp(controls)
- elif args.template.endswith('.h.in'):
- data = generate_h(controls)
- else:
- raise RuntimeError('Unknown template type')
-
- data = fill_template(args.template, data)
-
- if args.output:
- output = open(args.output, 'wb')
- output.write(data.encode('utf-8'))
- output.close()
- else:
- sys.stdout.write(data)
-
- return 0
-
-
-if __name__ == '__main__':
- sys.exit(main(sys.argv))
diff --git a/src/libcamera/geometry.cpp b/src/libcamera/geometry.cpp
index 13f642be..81cc8cd5 100644
--- a/src/libcamera/geometry.cpp
+++ b/src/libcamera/geometry.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * geometry.cpp - Geometry-related structures
+ * Geometry-related structures
*/
#include <libcamera/geometry.h>
@@ -10,6 +10,8 @@
#include <sstream>
#include <stdint.h>
+#include <libcamera/base/log.h>
+
/**
* \file geometry.h
* \brief Data structures related to geometric objects
@@ -18,71 +20,85 @@
namespace libcamera {
/**
- * \struct Rectangle
- * \brief Describe a rectangle's position and dimensions
- *
- * Rectangles are used to identify an area of an image. They are specified by
- * the coordinates of top-left corner and their horizontal and vertical size.
+ * \class Point
+ * \brief Describe a point in two-dimensional space
*
- * The measure unit of the rectangle coordinates and size, as well as the
- * reference point from which the Rectangle::x and Rectangle::y displacements
- * refers to, are defined by the context were rectangle is used.
+ * The Point structure defines a point in two-dimensional space with integer
+ * precision. The coordinates of a Point may be negative as well as positive.
*/
/**
- * \var Rectangle::x
- * \brief The horizontal coordinate of the rectangle's top-left corner
+ * \fn Point::Point()
+ * \brief Construct a Point with x and y set to 0
*/
/**
- * \var Rectangle::y
- * \brief The vertical coordinate of the rectangle's top-left corner
+ * \fn Point::Point(int xpos, int ypos)
+ * \brief Construct a Point at given \a xpos and \a ypos values
+ * \param[in] xpos The x-coordinate
+ * \param[in] ypos The y-coordinate
*/
/**
- * \var Rectangle::w
- * \brief The distance between the left and right sides
+ * \var Point::x
+ * \brief The x-coordinate of the Point
*/
/**
- * \var Rectangle::h
- * \brief The distance between the top and bottom sides
+ * \var Point::y
+ * \brief The y-coordinate of the Point
*/
/**
- * \brief Assemble and return a string describing the rectangle
- * \return A string describing the Rectangle
+ * \brief Assemble and return a string describing the point
+ * \return A string describing the point
*/
-const std::string Rectangle::toString() const
+const std::string Point::toString() const
{
std::stringstream ss;
-
- ss << "(" << x << "x" << y << ")/" << w << "x" << h;
+ ss << *this;
return ss.str();
}
/**
- * \brief Compare rectangles for equality
- * \return True if the two rectangles are equal, false otherwise
+ * \fn Point Point::operator-() const
+ * \brief Negate a Point by negating both its x and y coordinates
+ * \return The negated point
*/
-bool operator==(const Rectangle &lhs, const Rectangle &rhs)
+
+/**
+ * \brief Compare points for equality
+ * \return True if the two points are equal, false otherwise
+ */
+bool operator==(const Point &lhs, const Point &rhs)
{
- return lhs.x == rhs.x && lhs.y == rhs.y &&
- lhs.w == rhs.w && lhs.h == rhs.h;
+ return lhs.x == rhs.x && lhs.y == rhs.y;
}
/**
- * \fn bool operator!=(const Rectangle &lhs, const Rectangle &rhs)
- * \brief Compare rectangles for inequality
- * \return True if the two rectangles are not equal, false otherwise
+ * \fn bool operator!=(const Point &lhs, const Point &rhs)
+ * \brief Compare points for inequality
+ * \return True if the two points are not equal, false otherwise
*/
/**
- * \struct Size
+ * \brief Insert a text representation of a Point into an output stream
+ * \param[in] out The output stream
+ * \param[in] p The point
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const Point &p)
+{
+ out << "(" << p.x << ", " << p.y << ")";
+ return out;
+}
+
+/**
+ * \class Size
* \brief Describe a two-dimensional size
*
- * The Size structure defines a two-dimensional size with integer precision.
+ * The Size class defines a two-dimensional size with integer precision.
*/
/**
@@ -108,12 +124,256 @@ bool operator==(const Rectangle &lhs, const Rectangle &rhs)
*/
/**
+ * \fn bool Size::isNull() const
+ * \brief Check if the size is null
+ * \return True if both the width and height are 0, or false otherwise
+ */
+
+/**
* \brief Assemble and return a string describing the size
* \return A string describing the size
*/
const std::string Size::toString() const
{
- return std::to_string(width) + "x" + std::to_string(height);
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
+}
+
+/**
+ * \fn Size::alignDownTo(unsigned int hAlignment, unsigned int vAlignment)
+ * \brief Align the size down horizontally and vertically in place
+ * \param[in] hAlignment Horizontal alignment
+ * \param[in] vAlignment Vertical alignment
+ *
+ * This functions rounds the width and height down to the nearest multiple of
+ * \a hAlignment and \a vAlignment respectively.
+ *
+ * \return A reference to this object
+ */
+
+/**
+ * \fn Size::alignUpTo(unsigned int hAlignment, unsigned int vAlignment)
+ * \brief Align the size up horizontally and vertically in place
+ * \param[in] hAlignment Horizontal alignment
+ * \param[in] vAlignment Vertical alignment
+ *
+ * This functions rounds the width and height up to the nearest multiple of
+ * \a hAlignment and \a vAlignment respectively.
+ *
+ * \return A reference to this object
+ */
+
+/**
+ * \fn Size::boundTo(const Size &bound)
+ * \brief Bound the size to \a bound in place
+ * \param[in] bound The maximum size
+ *
+ * This function sets the width and height to the minimum of this size and the
+ * \a bound size.
+ *
+ * \return A reference to this object
+ */
+
+/**
+ * \fn Size::expandTo(const Size &expand)
+ * \brief Expand the size to \a expand
+ * \param[in] expand The minimum size
+ *
+ * This function sets the width and height to the maximum of this size and the
+ * \a expand size.
+ *
+ * \return A reference to this object
+ */
+
+/**
+ * \fn Size::growBy(const Size &margins)
+ * \brief Grow the size by \a margins in place
+ * \param[in] margins The margins to add to the size
+ *
+ * This function adds the width and height of the \a margin size to this size.
+ *
+ * \return A reference to this object
+ */
+
+/**
+ * \fn Size::shrinkBy(const Size &margins)
+ * \brief Shrink the size by \a margins in place
+ * \param[in] margins The margins to subtract to the size
+ *
+ * This function subtracts the width and height of the \a margin size from this
+ * size. If the width or height of the size are smaller than those of \a
+ * margins, the result is clamped to 0.
+ *
+ * \return A reference to this object
+ */
+
+/**
+ * \fn Size::alignedDownTo(unsigned int hAlignment, unsigned int vAlignment)
+ * \brief Align the size down horizontally and vertically
+ * \param[in] hAlignment Horizontal alignment
+ * \param[in] vAlignment Vertical alignment
+ * \return A Size whose width and height are equal to the width and height of
+ * this size rounded down to the nearest multiple of \a hAlignment and
+ * \a vAlignment respectively
+ */
+
+/**
+ * \fn Size::alignedUpTo(unsigned int hAlignment, unsigned int vAlignment)
+ * \brief Align the size up horizontally and vertically
+ * \param[in] hAlignment Horizontal alignment
+ * \param[in] vAlignment Vertical alignment
+ * \return A Size whose width and height are equal to the width and height of
+ * this size rounded up to the nearest multiple of \a hAlignment and
+ * \a vAlignment respectively
+ */
+
+/**
+ * \fn Size::boundedTo(const Size &bound)
+ * \brief Bound the size to \a bound
+ * \param[in] bound The maximum size
+ * \return A Size whose width and height are the minimum of the width and
+ * height of this size and the \a bound size
+ */
+
+/**
+ * \fn Size::expandedTo(const Size &expand)
+ * \brief Expand the size to \a expand
+ * \param[in] expand The minimum size
+ * \return A Size whose width and height are the maximum of the width and
+ * height of this size and the \a expand size
+ */
+
+/**
+ * \fn Size::grownBy(const Size &margins)
+ * \brief Grow the size by \a margins
+ * \param[in] margins The margins to add to the size
+ * \return A Size whose width and height are the sum of the width and height of
+ * this size and the \a margins size
+ */
+
+/**
+ * \fn Size::shrunkBy(const Size &margins)
+ * \brief Shrink the size by \a margins
+ * \param[in] margins The margins to subtract to the size
+ *
+ * If the width or height of the size are smaller than those of \a margins, the
+ * resulting size has its width or height clamped to 0.
+ *
+ * \return A Size whose width and height are the difference of the width and
+ * height of this size and the \a margins size, clamped to 0
+ */
+
+/**
+ * \brief Bound the size down to match the aspect ratio given by \a ratio
+ * \param[in] ratio The size whose aspect ratio must be matched
+ *
+ * The behaviour of this function is undefined if either the width or the
+ * height of the \a ratio is zero.
+ *
+ * \return A Size whose width and height are equal to the width and height
+ * of this Size aligned down to the aspect ratio of \a ratio
+ */
+Size Size::boundedToAspectRatio(const Size &ratio) const
+{
+ ASSERT(ratio.width && ratio.height);
+
+ uint64_t ratio1 = static_cast<uint64_t>(width) *
+ static_cast<uint64_t>(ratio.height);
+ uint64_t ratio2 = static_cast<uint64_t>(ratio.width) *
+ static_cast<uint64_t>(height);
+
+ if (ratio1 > ratio2)
+ return { static_cast<unsigned int>(ratio2 / ratio.height), height };
+ else
+ return { width, static_cast<unsigned int>(ratio1 / ratio.width) };
+}
+
+/**
+ * \brief Expand the size to match the aspect ratio given by \a ratio
+ * \param[in] ratio The size whose aspect ratio must be matched
+ *
+ * The behaviour of this function is undefined if either the width or the
+ * height of the \a ratio is zero.
+ *
+ * \return A Size whose width and height are equal to the width and height
+ * of this Size expanded up to the aspect ratio of \a ratio
+ */
+Size Size::expandedToAspectRatio(const Size &ratio) const
+{
+ ASSERT(ratio.width && ratio.height);
+
+ uint64_t ratio1 = static_cast<uint64_t>(width) *
+ static_cast<uint64_t>(ratio.height);
+ uint64_t ratio2 = static_cast<uint64_t>(ratio.width) *
+ static_cast<uint64_t>(height);
+
+ if (ratio1 < ratio2)
+ return { static_cast<unsigned int>(ratio2 / ratio.height), height };
+ else
+ return { width, static_cast<unsigned int>(ratio1 / ratio.width) };
+}
+
+/**
+ * \brief Center a rectangle of this size at a given Point
+ * \param[in] center The center point the Rectangle is to have
+ *
+ * A Rectangle of this object's size is positioned so that its center
+ * is at the given Point.
+ *
+ * \return A Rectangle of this size, centered at the given Point.
+ */
+Rectangle Size::centeredTo(const Point &center) const
+{
+ int x = center.x - width / 2;
+ int y = center.y - height / 2;
+
+ return { x, y, width, height };
+}
+
+/**
+ * \brief Scale size up by the given factor
+ * \param[in] factor The factor
+ * \return The scaled Size
+ */
+Size Size::operator*(float factor) const
+{
+ return Size(width * factor, height * factor);
+}
+
+/**
+ * \brief Scale size down by the given factor
+ * \param[in] factor The factor
+ * \return The scaled Size
+ */
+Size Size::operator/(float factor) const
+{
+ return Size(width / factor, height / factor);
+}
+
+/**
+ * \brief Scale this size up by the given factor in place
+ * \param[in] factor The factor
+ * \return A reference to this object
+ */
+Size &Size::operator*=(float factor)
+{
+ width *= factor;
+ height *= factor;
+ return *this;
+}
+
+/**
+ * \brief Scale this size down by the given factor in place
+ * \param[in] factor The factor
+ * \return A reference to this object
+ */
+Size &Size::operator/=(float factor)
+{
+ width /= factor;
+ height /= factor;
+ return *this;
}
/**
@@ -183,7 +443,19 @@ bool operator<(const Size &lhs, const Size &rhs)
*/
/**
- * \struct SizeRange
+ * \brief Insert a text representation of a Size into an output stream
+ * \param[in] out The output stream
+ * \param[in] s The size
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const Size &s)
+{
+ out << s.width << "x" << s.height;
+ return out;
+}
+
+/**
+ * \class SizeRange
* \brief Describe a range of sizes
*
* A SizeRange describes a range of sizes included in the [min, max] interval
@@ -282,9 +554,7 @@ bool SizeRange::contains(const Size &size) const
std::string SizeRange::toString() const
{
std::stringstream ss;
-
- ss << "(" << min.toString() << ")-(" << max.toString() << ")/(+"
- << hStep << ",+" << vStep << ")";
+ ss << *this;
return ss.str();
}
@@ -304,4 +574,344 @@ bool operator==(const SizeRange &lhs, const SizeRange &rhs)
* \return True if the two size ranges are not equal, false otherwise
*/
+/**
+ * \brief Insert a text representation of a SizeRange into an output stream
+ * \param[in] out The output stream
+ * \param[in] sr The size range
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const SizeRange &sr)
+{
+ out << "(" << sr.min << ")-(" << sr.max << ")/(+"
+ << sr.hStep << ",+" << sr.vStep << ")";
+
+ return out;
+}
+
+/**
+ * \class Rectangle
+ * \brief Describe a rectangle's position and dimensions
+ *
+ * Rectangles are used to identify an area of an image. They are specified by
+ * the coordinates of top-left corner and their horizontal and vertical size.
+ * By convention, the top-left corner is defined as the corner with the lowest
+ * x and y coordinates, regardless of the origin and direction of the axes.
+ *
+ * The measure unit of the rectangle coordinates and size, as well as the
+ * reference point from which the Rectangle::x and Rectangle::y displacements
+ * refers to, are defined by the context were rectangle is used.
+ */
+
+/**
+ * \fn Rectangle::Rectangle()
+ * \brief Construct a Rectangle with all coordinates set to 0
+ */
+
+/**
+ * \fn Rectangle::Rectangle(int x, int y, const Size &size)
+ * \brief Construct a Rectangle with the given position and size
+ * \param[in] x The horizontal coordinate of the top-left corner
+ * \param[in] y The vertical coordinate of the top-left corner
+ * \param[in] size The size
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
+ */
+
+/**
+ * \fn Rectangle::Rectangle(int x, int y, unsigned int width, unsigned int height)
+ * \brief Construct a Rectangle with the given position and size
+ * \param[in] x The horizontal coordinate of the top-left corner
+ * \param[in] y The vertical coordinate of the top-left corner
+ * \param[in] width The width
+ * \param[in] height The height
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
+ */
+
+/**
+ * \fn Rectangle::Rectangle(const Size &size)
+ * \brief Construct a Rectangle of \a size with its top left corner located
+ * at (0,0)
+ * \param[in] size The desired Rectangle size
+ */
+
+/**
+ * \fn Rectangle::Rectangle(const Point &point1, const Point &point2)
+ * \brief Construct a Rectangle from two opposite corners
+ * \param[in] point1 One of corners of the rectangle
+ * \param[in] point2 The opposite corner of \a point1
+ */
+
+/**
+ * \var Rectangle::x
+ * \brief The horizontal coordinate of the rectangle's top-left corner
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
+ */
+
+/**
+ * \var Rectangle::y
+ * \brief The vertical coordinate of the rectangle's top-left corner
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
+ */
+
+/**
+ * \var Rectangle::width
+ * \brief The distance between the left and right sides
+ */
+
+/**
+ * \var Rectangle::height
+ * \brief The distance between the top and bottom sides
+ */
+
+/**
+ * \fn bool Rectangle::isNull() const
+ * \brief Check if the rectangle is null
+ * \return True if both the width and height are 0, or false otherwise
+ */
+
+/**
+ * \brief Assemble and return a string describing the rectangle
+ * \return A string describing the Rectangle
+ */
+const std::string Rectangle::toString() const
+{
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
+}
+
+/**
+ * \brief Retrieve the center point of this rectangle
+ * \return The center Point
+ */
+Point Rectangle::center() const
+{
+ return { x + static_cast<int>(width / 2), y + static_cast<int>(height / 2) };
+}
+
+/**
+ * \fn Size Rectangle::size() const
+ * \brief Retrieve the size of this rectangle
+ * \return The Rectangle size
+ */
+
+/**
+ * \fn Point Rectangle::topLeft() const
+ * \brief Retrieve the coordinates of the top left corner of this Rectangle
+ *
+ * The rectangle's top-left corner is the point with the smaller x and y values.
+ *
+ * \return The Rectangle's top left corner
+ */
+
+/**
+ * \brief Apply a non-uniform rational scaling in place to this Rectangle
+ * \param[in] numerator The numerators of the x and y scaling factors
+ * \param[in] denominator The denominators of the x and y scaling factors
+ *
+ * A non-uniform scaling is applied in place such the resulting x
+ * coordinates are multiplied by numerator.width / denominator.width,
+ * and similarly for the y coordinates (using height in place of width).
+ *
+ * \return A reference to this object
+ */
+Rectangle &Rectangle::scaleBy(const Size &numerator, const Size &denominator)
+{
+ x = static_cast<int64_t>(x) * numerator.width / denominator.width;
+ y = static_cast<int64_t>(y) * numerator.height / denominator.height;
+ width = static_cast<uint64_t>(width) * numerator.width / denominator.width;
+ height = static_cast<uint64_t>(height) * numerator.height / denominator.height;
+
+ return *this;
+}
+
+/**
+ * \brief Translate this Rectangle in place by the given Point
+ * \param[in] point The amount to translate the Rectangle by
+ *
+ * The Rectangle is translated in the x-direction by the point's x coordinate
+ * and in the y-direction by the point's y coordinate.
+ *
+ * \return A reference to this object
+ */
+Rectangle &Rectangle::translateBy(const Point &point)
+{
+ x += point.x;
+ y += point.y;
+
+ return *this;
+}
+
+/**
+ * \brief Calculate the intersection of this Rectangle with another
+ * \param[in] bound The Rectangle that is intersected with this Rectangle
+ *
+ * This function calculates the standard intersection of two rectangles. If the
+ * rectangles do not overlap in either the x or y direction, then the size
+ * of that dimension in the result (its width or height) is set to zero. Even
+ * when one dimension is set to zero, note that the other dimension may still
+ * have a positive value if there was some overlap.
+ *
+ * \return A Rectangle that is the intersection of the input rectangles
+ */
+Rectangle Rectangle::boundedTo(const Rectangle &bound) const
+{
+ int topLeftX = std::max(x, bound.x);
+ int topLeftY = std::max(y, bound.y);
+ int bottomRightX = std::min<int>(x + width, bound.x + bound.width);
+ int bottomRightY = std::min<int>(y + height, bound.y + bound.height);
+
+ unsigned int newWidth = std::max(bottomRightX - topLeftX, 0);
+ unsigned int newHeight = std::max(bottomRightY - topLeftY, 0);
+
+ return { topLeftX, topLeftY, newWidth, newHeight };
+}
+
+/**
+ * \brief Enclose a Rectangle so as not to exceed another Rectangle
+ * \param[in] boundary The limit that the returned Rectangle will not exceed
+ *
+ * The Rectangle is modified so that it does not exceed the given \a boundary.
+ * This process involves translating the Rectangle if any of its edges
+ * lie beyond \a boundary, so that those edges then lie along the boundary
+ * instead.
+ *
+ * If either width or height are larger than \a boundary, then the returned
+ * Rectangle is clipped to be no larger. But other than this, the
+ * Rectangle is not clipped or reduced in size, merely translated.
+ *
+ * Note that this is not a conventional Rectangle intersection function
+ * which is provided by boundedTo().
+ *
+ * \return A Rectangle that does not extend beyond a boundary Rectangle
+ */
+Rectangle Rectangle::enclosedIn(const Rectangle &boundary) const
+{
+ /* We can't be bigger than the boundary rectangle. */
+ Rectangle result = boundedTo(Rectangle{ x, y, boundary.size() });
+
+ result.x = std::clamp<int>(result.x, boundary.x,
+ boundary.x + boundary.width - result.width);
+ result.y = std::clamp<int>(result.y, boundary.y,
+ boundary.y + boundary.height - result.height);
+
+ return result;
+}
+
+/**
+ * \brief Apply a non-uniform rational scaling to this Rectangle
+ * \param[in] numerator The numerators of the x and y scaling factors
+ * \param[in] denominator The denominators of the x and y scaling factors
+ *
+ * A non-uniform scaling is applied such the resulting x
+ * coordinates are multiplied by numerator.width / denominator.width,
+ * and similarly for the y coordinates (using height in place of width).
+ *
+ * \return The non-uniformly scaled Rectangle
+ */
+Rectangle Rectangle::scaledBy(const Size &numerator, const Size &denominator) const
+{
+ int scaledX = static_cast<int64_t>(x) * numerator.width / denominator.width;
+ int scaledY = static_cast<int64_t>(y) * numerator.height / denominator.height;
+ unsigned int scaledWidth = static_cast<uint64_t>(width) * numerator.width / denominator.width;
+ unsigned int scaledHeight = static_cast<uint64_t>(height) * numerator.height / denominator.height;
+
+ return { scaledX, scaledY, scaledWidth, scaledHeight };
+}
+
+/**
+ * \brief Translate a Rectangle by the given amounts
+ * \param[in] point The amount to translate the Rectangle by
+ *
+ * The Rectangle is translated in the x-direction by the point's x coordinate
+ * and in the y-direction by the point's y coordinate.
+ *
+ * \return The translated Rectangle
+ */
+Rectangle Rectangle::translatedBy(const Point &point) const
+{
+ return { x + point.x, y + point.y, width, height };
+}
+
+/**
+ * \brief Transform a Rectangle from one reference rectangle to another
+ * \param[in] source The \a source reference rectangle
+ * \param[in] destination The \a destination reference rectangle
+ *
+ * The \a source and \a destination parameters describe two rectangles defined
+ * in different reference systems. The Rectangle is translated from the source
+ * reference system into the destination reference system.
+ *
+ * The typical use case for this function is to translate a selection rectangle
+ * specified in a reference system, in example the sensor's pixel array, into
+ * the same rectangle re-scaled and translated into a different reference
+ * system, in example the output frame on which the selection rectangle is
+ * applied to.
+ *
+ * For example, consider a sensor with a resolution of 4040x2360 pixels and a
+ * assume a rectangle of (100, 100)/3840x2160 (sensorFrame) in sensor
+ * coordinates is mapped to a rectangle (0,0)/(1920,1080) (displayFrame) in
+ * display coordinates. This function can be used to transform an arbitrary
+ * rectangle from display coordinates to sensor coordinates or vice versa:
+ *
+ * \code{.cpp}
+ * Rectangle sensorReference(100, 100, 3840, 2160);
+ * Rectangle displayReference(0, 0, 1920, 1080);
+ *
+ * // Bottom right quarter in sensor coordinates
+ * Rectangle sensorRect(2020, 100, 1920, 1080);
+ * displayRect = sensorRect.transformedBetween(sensorReference, displayReference);
+ * // displayRect is now (960, 540)/960x540
+ *
+ * // Transformation back to sensor coordinates
+ * sensorRect = displayRect.transformedBetween(displayReference, sensorReference);
+ * \endcode
+ */
+Rectangle Rectangle::transformedBetween(const Rectangle &source,
+ const Rectangle &destination) const
+{
+ Rectangle r;
+ double sx = static_cast<double>(destination.width) / source.width;
+ double sy = static_cast<double>(destination.height) / source.height;
+
+ r.x = static_cast<int>((x - source.x) * sx) + destination.x;
+ r.y = static_cast<int>((y - source.y) * sy) + destination.y;
+ r.width = static_cast<int>(width * sx);
+ r.height = static_cast<int>(height * sy);
+
+ return r;
+}
+
+/**
+ * \brief Compare rectangles for equality
+ * \return True if the two rectangles are equal, false otherwise
+ */
+bool operator==(const Rectangle &lhs, const Rectangle &rhs)
+{
+ return lhs.x == rhs.x && lhs.y == rhs.y &&
+ lhs.width == rhs.width && lhs.height == rhs.height;
+}
+
+/**
+ * \fn bool operator!=(const Rectangle &lhs, const Rectangle &rhs)
+ * \brief Compare rectangles for inequality
+ * \return True if the two rectangles are not equal, false otherwise
+ */
+
+/**
+ * \brief Insert a text representation of a Rectangle into an output stream
+ * \param[in] out The output stream
+ * \param[in] r The rectangle
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const Rectangle &r)
+{
+ out << "(" << r.x << ", " << r.y << ")/" << r.width << "x" << r.height;
+ return out;
+}
+
} /* namespace libcamera */
diff --git a/src/libcamera/include/byte_stream_buffer.h b/src/libcamera/include/byte_stream_buffer.h
deleted file mode 100644
index b3aaa8b9..00000000
--- a/src/libcamera/include/byte_stream_buffer.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * byte_stream_buffer.h - Byte stream buffer
- */
-#ifndef __LIBCAMERA_BYTE_STREAM_BUFFER_H__
-#define __LIBCAMERA_BYTE_STREAM_BUFFER_H__
-
-#include <stddef.h>
-#include <stdint.h>
-#include <type_traits>
-
-#include <libcamera/span.h>
-
-namespace libcamera {
-
-class ByteStreamBuffer
-{
-public:
- ByteStreamBuffer(const uint8_t *base, size_t size);
- ByteStreamBuffer(uint8_t *base, size_t size);
- ByteStreamBuffer(ByteStreamBuffer &&other);
- ByteStreamBuffer &operator=(ByteStreamBuffer &&other);
-
- const uint8_t *base() const { return base_; }
- uint32_t offset() const { return (write_ ? write_ : read_) - base_; }
- size_t size() const { return size_; }
- bool overflow() const { return overflow_; }
-
- ByteStreamBuffer carveOut(size_t size);
- int skip(size_t size);
-
- template<typename T>
- int read(T *t)
- {
- return read(reinterpret_cast<uint8_t *>(t), sizeof(*t));
- }
-
- template<typename T>
- int read(const Span<T> &data)
- {
- return read(reinterpret_cast<uint8_t *>(data.data()),
- data.size_bytes());
- }
-
- template<typename T>
- const std::remove_reference_t<T> *read(size_t count = 1)
- {
- using return_type = const std::remove_reference_t<T> *;
- return reinterpret_cast<return_type>(read(sizeof(T), count));
- }
-
- template<typename T>
- int write(const T *t)
- {
- return write(reinterpret_cast<const uint8_t *>(t), sizeof(*t));
- }
-
- template<typename T>
- int write(const Span<T> &data)
- {
- return write(reinterpret_cast<const uint8_t *>(data.data()),
- data.size_bytes());
- }
-
-private:
- ByteStreamBuffer(const ByteStreamBuffer &other) = delete;
- ByteStreamBuffer &operator=(const ByteStreamBuffer &other) = delete;
-
- void setOverflow();
-
- int read(uint8_t *data, size_t size);
- const uint8_t *read(size_t size, size_t count);
- int write(const uint8_t *data, size_t size);
-
- ByteStreamBuffer *parent_;
-
- const uint8_t *base_;
- size_t size_;
- bool overflow_;
-
- const uint8_t *read_;
- uint8_t *write_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_BYTE_STREAM_BUFFER_H__ */
diff --git a/src/libcamera/include/camera_controls.h b/src/libcamera/include/camera_controls.h
deleted file mode 100644
index 265c1fe3..00000000
--- a/src/libcamera/include/camera_controls.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * camera_controls.h - Camera controls
- */
-#ifndef __LIBCAMERA_CAMERA_CONTROLS_H__
-#define __LIBCAMERA_CAMERA_CONTROLS_H__
-
-#include "control_validator.h"
-
-namespace libcamera {
-
-class Camera;
-
-class CameraControlValidator final : public ControlValidator
-{
-public:
- CameraControlValidator(Camera *camera);
-
- const std::string &name() const override;
- bool validate(unsigned int id) const override;
-
-private:
- Camera *camera_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_CAMERA_CONTROLS_H__ */
diff --git a/src/libcamera/include/camera_sensor.h b/src/libcamera/include/camera_sensor.h
deleted file mode 100644
index 99cff981..00000000
--- a/src/libcamera/include/camera_sensor.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * camera_sensor.h - A camera sensor
- */
-#ifndef __LIBCAMERA_CAMERA_SENSOR_H__
-#define __LIBCAMERA_CAMERA_SENSOR_H__
-
-#include <string>
-#include <vector>
-
-#include <libcamera/controls.h>
-#include <libcamera/geometry.h>
-
-#include "log.h"
-
-namespace libcamera {
-
-class MediaEntity;
-class V4L2Subdevice;
-
-struct V4L2SubdeviceFormat;
-
-class CameraSensor : protected Loggable
-{
-public:
- explicit CameraSensor(const MediaEntity *entity);
- ~CameraSensor();
-
- CameraSensor(const CameraSensor &) = delete;
- CameraSensor &operator=(const CameraSensor &) = delete;
-
- int init();
-
- const MediaEntity *entity() const { return entity_; }
- const std::vector<unsigned int> &mbusCodes() const { return mbusCodes_; }
- const std::vector<Size> &sizes() const { return sizes_; }
- const Size &resolution() const;
-
- V4L2SubdeviceFormat getFormat(const std::vector<unsigned int> &mbusCodes,
- const Size &size) const;
- int setFormat(V4L2SubdeviceFormat *format);
-
- const ControlInfoMap &controls() const;
- int getControls(ControlList *ctrls);
- int setControls(ControlList *ctrls);
-
- const ControlList &properties() const { return properties_; }
-
-protected:
- std::string logPrefix() const;
-
-private:
- const MediaEntity *entity_;
- V4L2Subdevice *subdev_;
-
- std::vector<unsigned int> mbusCodes_;
- std::vector<Size> sizes_;
-
- ControlList properties_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_CAMERA_SENSOR_H__ */
diff --git a/src/libcamera/include/control_serializer.h b/src/libcamera/include/control_serializer.h
deleted file mode 100644
index 99bacd92..00000000
--- a/src/libcamera/include/control_serializer.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * control_serializer.h - Control (de)serializer
- */
-#ifndef __LIBCAMERA_CONTROL_SERIALIZER_H__
-#define __LIBCAMERA_CONTROL_SERIALIZER_H__
-
-#include <map>
-#include <memory>
-#include <vector>
-
-#include <libcamera/controls.h>
-
-namespace libcamera {
-
-class ByteStreamBuffer;
-
-class ControlSerializer
-{
-public:
- ControlSerializer();
-
- void reset();
-
- static size_t binarySize(const ControlInfoMap &infoMap);
- static size_t binarySize(const ControlList &list);
-
- int serialize(const ControlInfoMap &infoMap, ByteStreamBuffer &buffer);
- int serialize(const ControlList &list, ByteStreamBuffer &buffer);
-
- template<typename T>
- T deserialize(ByteStreamBuffer &buffer);
-
-private:
- static size_t binarySize(const ControlValue &value);
- static size_t binarySize(const ControlInfo &info);
-
- static void store(const ControlValue &value, ByteStreamBuffer &buffer);
- static void store(const ControlInfo &info, ByteStreamBuffer &buffer);
-
- ControlValue loadControlValue(ControlType type, ByteStreamBuffer &buffer,
- bool isArray = false, unsigned int count = 1);
- ControlInfo loadControlInfo(ControlType type, ByteStreamBuffer &buffer);
-
- unsigned int serial_;
- std::vector<std::unique_ptr<ControlId>> controlIds_;
- std::map<unsigned int, ControlInfoMap> infoMaps_;
- std::map<const ControlInfoMap *, unsigned int> infoMapHandles_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_CONTROL_SERIALIZER_H__ */
diff --git a/src/libcamera/include/control_validator.h b/src/libcamera/include/control_validator.h
deleted file mode 100644
index f1c9110b..00000000
--- a/src/libcamera/include/control_validator.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * control_validator.h - Control validator
- */
-#ifndef __LIBCAMERA_CONTROL_VALIDATOR_H__
-#define __LIBCAMERA_CONTROL_VALIDATOR_H__
-
-#include <string>
-
-namespace libcamera {
-
-class ControlId;
-
-class ControlValidator
-{
-public:
- virtual ~ControlValidator() {}
-
- virtual const std::string &name() const = 0;
- virtual bool validate(unsigned int id) const = 0;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_CONTROL_VALIDATOR_H__ */
diff --git a/src/libcamera/include/device_enumerator.h b/src/libcamera/include/device_enumerator.h
deleted file mode 100644
index 433e357a..00000000
--- a/src/libcamera/include/device_enumerator.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2018, Google Inc.
- *
- * device_enumerator.h - API to enumerate and find media devices
- */
-#ifndef __LIBCAMERA_DEVICE_ENUMERATOR_H__
-#define __LIBCAMERA_DEVICE_ENUMERATOR_H__
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include <linux/media.h>
-
-namespace libcamera {
-
-class MediaDevice;
-
-class DeviceMatch
-{
-public:
- DeviceMatch(const std::string &driver);
-
- void add(const std::string &entity);
-
- bool match(const MediaDevice *device) const;
-
-private:
- std::string driver_;
- std::vector<std::string> entities_;
-};
-
-class DeviceEnumerator
-{
-public:
- static std::unique_ptr<DeviceEnumerator> create();
-
- virtual ~DeviceEnumerator();
-
- virtual int init() = 0;
- virtual int enumerate() = 0;
-
- std::shared_ptr<MediaDevice> search(const DeviceMatch &dm);
-
-protected:
- std::unique_ptr<MediaDevice> createDevice(const std::string &deviceNode);
- void addDevice(std::unique_ptr<MediaDevice> &&media);
- void removeDevice(const std::string &deviceNode);
-
-private:
- std::vector<std::shared_ptr<MediaDevice>> devices_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_DEVICE_ENUMERATOR_H__ */
diff --git a/src/libcamera/include/device_enumerator_sysfs.h b/src/libcamera/include/device_enumerator_sysfs.h
deleted file mode 100644
index 5a5c9b0f..00000000
--- a/src/libcamera/include/device_enumerator_sysfs.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * device_enumerator_sysfs.h - sysfs-based device enumerator
- */
-#ifndef __LIBCAMERA_DEVICE_ENUMERATOR_SYSFS_H__
-#define __LIBCAMERA_DEVICE_ENUMERATOR_SYSFS_H__
-
-#include <memory>
-#include <string>
-
-#include "device_enumerator.h"
-
-class MediaDevice;
-
-namespace libcamera {
-
-class DeviceEnumeratorSysfs final : public DeviceEnumerator
-{
-public:
- int init();
- int enumerate();
-
-private:
- int populateMediaDevice(MediaDevice *media);
- std::string lookupDeviceNode(int major, int minor);
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_DEVICE_ENUMERATOR_SYSFS_H__ */
diff --git a/src/libcamera/include/device_enumerator_udev.h b/src/libcamera/include/device_enumerator_udev.h
deleted file mode 100644
index fdce4520..00000000
--- a/src/libcamera/include/device_enumerator_udev.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2018-2019, Google Inc.
- *
- * device_enumerator_udev.h - udev-based device enumerator
- */
-#ifndef __LIBCAMERA_DEVICE_ENUMERATOR_UDEV_H__
-#define __LIBCAMERA_DEVICE_ENUMERATOR_UDEV_H__
-
-#include <list>
-#include <map>
-#include <memory>
-#include <set>
-#include <string>
-#include <sys/types.h>
-
-#include "device_enumerator.h"
-
-struct udev;
-struct udev_device;
-struct udev_monitor;
-
-namespace libcamera {
-
-class EventNotifier;
-class MediaDevice;
-class MediaEntity;
-
-class DeviceEnumeratorUdev : public DeviceEnumerator
-{
-public:
- DeviceEnumeratorUdev();
- ~DeviceEnumeratorUdev();
-
- int init() final;
- int enumerate() final;
-
-private:
- struct udev *udev_;
- struct udev_monitor *monitor_;
- EventNotifier *notifier_;
-
- using DependencyMap = std::map<dev_t, std::list<MediaEntity *>>;
-
- struct MediaDeviceDeps {
- MediaDeviceDeps(std::unique_ptr<MediaDevice> &&media,
- DependencyMap &&deps)
- : media_(std::move(media)), deps_(std::move(deps))
- {
- }
-
- bool operator==(const MediaDeviceDeps &other) const
- {
- return media_ == other.media_;
- }
-
- std::unique_ptr<MediaDevice> media_;
- DependencyMap deps_;
- };
-
- std::set<dev_t> orphans_;
- std::list<MediaDeviceDeps> pending_;
- std::map<dev_t, MediaDeviceDeps *> devMap_;
-
- int addUdevDevice(struct udev_device *dev);
- int populateMediaDevice(MediaDevice *media, DependencyMap *deps);
- std::string lookupDeviceNode(dev_t devnum);
-
- int addV4L2Device(dev_t devnum);
- void udevNotify(EventNotifier *notifier);
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_DEVICE_ENUMERATOR_UDEV_H__ */
diff --git a/src/libcamera/include/event_dispatcher_poll.h b/src/libcamera/include/event_dispatcher_poll.h
deleted file mode 100644
index 1f073861..00000000
--- a/src/libcamera/include/event_dispatcher_poll.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * event_dispatcher_poll.h - Poll-based event dispatcher
- */
-#ifndef __LIBCAMERA_EVENT_DISPATCHER_POLL_H__
-#define __LIBCAMERA_EVENT_DISPATCHER_POLL_H__
-
-#include <list>
-#include <map>
-#include <vector>
-
-#include <libcamera/event_dispatcher.h>
-
-struct pollfd;
-
-namespace libcamera {
-
-class EventNotifier;
-class Timer;
-
-class EventDispatcherPoll final : public EventDispatcher
-{
-public:
- EventDispatcherPoll();
- ~EventDispatcherPoll();
-
- void registerEventNotifier(EventNotifier *notifier);
- void unregisterEventNotifier(EventNotifier *notifier);
-
- void registerTimer(Timer *timer);
- void unregisterTimer(Timer *timer);
-
- void processEvents();
- void interrupt();
-
-private:
- struct EventNotifierSetPoll {
- short events() const;
- EventNotifier *notifiers[3];
- };
-
- std::map<int, EventNotifierSetPoll> notifiers_;
- std::list<Timer *> timers_;
- int eventfd_;
-
- bool processingEvents_;
-
- int poll(std::vector<struct pollfd> *pollfds);
- void processInterrupt(const struct pollfd &pfd);
- void processNotifiers(const std::vector<struct pollfd> &pollfds);
- void processTimers();
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_EVENT_DISPATCHER_POLL_H__ */
diff --git a/src/libcamera/include/formats.h b/src/libcamera/include/formats.h
deleted file mode 100644
index f43bc8c0..00000000
--- a/src/libcamera/include/formats.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * formats.h - libcamera image formats
- */
-
-#ifndef __LIBCAMERA_FORMATS_H__
-#define __LIBCAMERA_FORMATS_H__
-
-#include <map>
-#include <vector>
-
-#include <libcamera/geometry.h>
-
-namespace libcamera {
-
-class ImageFormats
-{
-public:
- int addFormat(unsigned int format, const std::vector<SizeRange> &sizes);
-
- bool isEmpty() const;
- std::vector<unsigned int> formats() const;
- const std::vector<SizeRange> &sizes(unsigned int format) const;
- const std::map<unsigned int, std::vector<SizeRange>> &data() const;
-
-private:
- std::map<unsigned int, std::vector<SizeRange>> data_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_FORMATS_H__ */
diff --git a/src/libcamera/include/ipa_context_wrapper.h b/src/libcamera/include/ipa_context_wrapper.h
deleted file mode 100644
index c9e19412..00000000
--- a/src/libcamera/include/ipa_context_wrapper.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipa_context_wrapper.h - Image Processing Algorithm context wrapper
- */
-#ifndef __LIBCAMERA_IPA_CONTEXT_WRAPPER_H__
-#define __LIBCAMERA_IPA_CONTEXT_WRAPPER_H__
-
-#include <ipa/ipa_interface.h>
-
-#include "control_serializer.h"
-
-namespace libcamera {
-
-class IPAContextWrapper final : public IPAInterface
-{
-public:
- IPAContextWrapper(struct ipa_context *context);
- ~IPAContextWrapper();
-
- int init() override;
- void configure(const std::map<unsigned int, IPAStream> &streamConfig,
- const std::map<unsigned int, const ControlInfoMap &> &entityControls) override;
-
- void mapBuffers(const std::vector<IPABuffer> &buffers) override;
- void unmapBuffers(const std::vector<unsigned int> &ids) override;
-
- virtual void processEvent(const IPAOperationData &data) override;
-
-private:
- static void queue_frame_action(void *ctx, unsigned int frame,
- struct ipa_operation_data &data);
- static const struct ipa_callback_ops callbacks_;
-
- void doQueueFrameAction(unsigned int frame,
- const IPAOperationData &data);
-
- struct ipa_context *ctx_;
- IPAInterface *intf_;
-
- ControlSerializer serializer_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_IPA_CONTEXT_WRAPPER_H__ */
diff --git a/src/libcamera/include/ipa_manager.h b/src/libcamera/include/ipa_manager.h
deleted file mode 100644
index 467658e4..00000000
--- a/src/libcamera/include/ipa_manager.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipa_manager.h - Image Processing Algorithm module manager
- */
-#ifndef __LIBCAMERA_IPA_MANAGER_H__
-#define __LIBCAMERA_IPA_MANAGER_H__
-
-#include <vector>
-
-#include <ipa/ipa_interface.h>
-#include <ipa/ipa_module_info.h>
-
-#include "ipa_module.h"
-#include "pipeline_handler.h"
-
-namespace libcamera {
-
-class IPAManager
-{
-public:
- static IPAManager *instance();
-
- std::unique_ptr<IPAInterface> createIPA(PipelineHandler *pipe,
- uint32_t maxVersion,
- uint32_t minVersion);
-
-private:
- std::vector<IPAModule *> modules_;
-
- IPAManager();
- ~IPAManager();
-
- void parseDir(const char *libDir, unsigned int maxDepth,
- std::vector<std::string> &files);
- unsigned int addDir(const char *libDir, unsigned int maxDepth = 0);
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_IPA_MANAGER_H__ */
diff --git a/src/libcamera/include/ipa_module.h b/src/libcamera/include/ipa_module.h
deleted file mode 100644
index 2028b76a..00000000
--- a/src/libcamera/include/ipa_module.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipa_module.h - Image Processing Algorithm module
- */
-#ifndef __LIBCAMERA_IPA_MODULE_H__
-#define __LIBCAMERA_IPA_MODULE_H__
-
-#include <string>
-
-#include <ipa/ipa_interface.h>
-#include <ipa/ipa_module_info.h>
-
-#include "pipeline_handler.h"
-
-namespace libcamera {
-
-class IPAModule
-{
-public:
- explicit IPAModule(const std::string &libPath);
- ~IPAModule();
-
- bool isValid() const;
-
- const struct IPAModuleInfo &info() const;
- const std::string &path() const;
-
- bool load();
-
- struct ipa_context *createContext();
-
- bool match(PipelineHandler *pipe,
- uint32_t minVersion, uint32_t maxVersion) const;
-
- bool isOpenSource() const;
-
-private:
- struct IPAModuleInfo info_;
-
- std::string libPath_;
- bool valid_;
- bool loaded_;
-
- void *dlHandle_;
- typedef struct ipa_context *(*IPAIntfFactory)(void);
- IPAIntfFactory ipaCreate_;
-
- int loadIPAModuleInfo();
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_IPA_MODULE_H__ */
diff --git a/src/libcamera/include/ipa_proxy.h b/src/libcamera/include/ipa_proxy.h
deleted file mode 100644
index e696551a..00000000
--- a/src/libcamera/include/ipa_proxy.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipa_proxy.h - Image Processing Algorithm proxy
- */
-#ifndef __LIBCAMERA_IPA_PROXY_H__
-#define __LIBCAMERA_IPA_PROXY_H__
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include <ipa/ipa_interface.h>
-
-#include "ipa_module.h"
-
-namespace libcamera {
-
-class IPAProxy : public IPAInterface
-{
-public:
- IPAProxy();
- ~IPAProxy();
-
- bool isValid() const { return valid_; }
-
-protected:
- std::string resolvePath(const std::string &file) const;
-
- bool valid_;
-};
-
-class IPAProxyFactory
-{
-public:
- IPAProxyFactory(const char *name);
- virtual ~IPAProxyFactory() {}
-
- virtual std::unique_ptr<IPAProxy> create(IPAModule *ipam) = 0;
-
- const std::string &name() const { return name_; }
-
- static void registerType(IPAProxyFactory *factory);
- static std::vector<IPAProxyFactory *> &factories();
-
-private:
- std::string name_;
-};
-
-#define REGISTER_IPA_PROXY(proxy) \
-class proxy##Factory final : public IPAProxyFactory \
-{ \
-public: \
- proxy##Factory() : IPAProxyFactory(#proxy) {} \
- std::unique_ptr<IPAProxy> create(IPAModule *ipam) \
- { \
- return std::make_unique<proxy>(ipam); \
- } \
-}; \
-static proxy##Factory global_##proxy##Factory;
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_IPA_PROXY_H__ */
diff --git a/src/libcamera/include/ipc_unixsocket.h b/src/libcamera/include/ipc_unixsocket.h
deleted file mode 100644
index 820d0561..00000000
--- a/src/libcamera/include/ipc_unixsocket.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipc_unixsocket.h - IPC mechanism based on Unix sockets
- */
-
-#ifndef __LIBCAMERA_IPC_UNIXSOCKET_H__
-#define __LIBCAMERA_IPC_UNIXSOCKET_H__
-
-#include <stdint.h>
-#include <sys/types.h>
-#include <vector>
-
-#include <libcamera/event_notifier.h>
-
-namespace libcamera {
-
-class IPCUnixSocket
-{
-public:
- struct Payload {
- std::vector<uint8_t> data;
- std::vector<int32_t> fds;
- };
-
- IPCUnixSocket();
- ~IPCUnixSocket();
-
- int create();
- int bind(int fd);
- void close();
- bool isBound() const;
-
- int send(const Payload &payload);
- int receive(Payload *payload);
-
- Signal<IPCUnixSocket *> readyRead;
-
-private:
- struct Header {
- uint32_t data;
- uint8_t fds;
- };
-
- int sendData(const void *buffer, size_t length, const int32_t *fds, unsigned int num);
- int recvData(void *buffer, size_t length, int32_t *fds, unsigned int num);
-
- void dataNotifier(EventNotifier *notifier);
-
- int fd_;
- bool headerReceived_;
- struct Header header_;
- EventNotifier *notifier_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_IPC_UNIXSOCKET_H__ */
diff --git a/src/libcamera/include/log.h b/src/libcamera/include/log.h
deleted file mode 100644
index ee0b4069..00000000
--- a/src/libcamera/include/log.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2018, Google Inc.
- *
- * log.h - Logging infrastructure
- */
-#ifndef __LIBCAMERA_LOG_H__
-#define __LIBCAMERA_LOG_H__
-
-#include <chrono>
-#include <sstream>
-
-#include "utils.h"
-
-namespace libcamera {
-
-enum LogSeverity {
- LogInvalid = -1,
- LogDebug = 0,
- LogInfo,
- LogWarning,
- LogError,
- LogFatal,
-};
-
-class LogCategory
-{
-public:
- explicit LogCategory(const char *name);
- ~LogCategory();
-
- const char *name() const { return name_; }
- LogSeverity severity() const { return severity_; }
- void setSeverity(LogSeverity severity);
-
- static const LogCategory &defaultCategory();
-
-private:
- const char *name_;
- LogSeverity severity_;
-};
-
-#define LOG_DECLARE_CATEGORY(name) \
-extern const LogCategory &_LOG_CATEGORY(name)();
-
-#define LOG_DEFINE_CATEGORY(name) \
-const LogCategory &_LOG_CATEGORY(name)() \
-{ \
- static LogCategory category(#name); \
- return category; \
-}
-
-class LogMessage
-{
-public:
- LogMessage(const char *fileName, unsigned int line,
- LogSeverity severity);
- LogMessage(const char *fileName, unsigned int line,
- const LogCategory &category, LogSeverity severity);
- LogMessage(const LogMessage &) = delete;
- LogMessage(LogMessage &&);
- ~LogMessage();
-
- std::ostream &stream() { return msgStream_; }
-
- const utils::time_point &timestamp() const { return timestamp_; }
- LogSeverity severity() const { return severity_; }
- const LogCategory &category() const { return category_; }
- const std::string &fileInfo() const { return fileInfo_; }
- const std::string msg() const { return msgStream_.str(); }
-
-private:
- void init(const char *fileName, unsigned int line);
-
- std::ostringstream msgStream_;
- const LogCategory &category_;
- LogSeverity severity_;
- utils::time_point timestamp_;
- std::string fileInfo_;
-};
-
-class Loggable
-{
-public:
- virtual ~Loggable();
-
-protected:
- virtual std::string logPrefix() const = 0;
-
- LogMessage _log(const char *file, unsigned int line,
- LogSeverity severity) const;
- LogMessage _log(const char *file, unsigned int line,
- const LogCategory &category,
- LogSeverity severity) const;
-};
-
-LogMessage _log(const char *file, unsigned int line, LogSeverity severity);
-LogMessage _log(const char *file, unsigned int line,
- const LogCategory &category, LogSeverity severity);
-
-#ifndef __DOXYGEN__
-#define _LOG_CATEGORY(name) logCategory##name
-
-#define _LOG1(severity) \
- _log(__FILE__, __LINE__, Log##severity).stream()
-#define _LOG2(category, severity) \
- _log(__FILE__, __LINE__, _LOG_CATEGORY(category)(), Log##severity).stream()
-
-/*
- * Expand the LOG() macro to _LOG1() or _LOG2() based on the number of
- * arguments.
- */
-#define _LOG_MACRO(_1, _2, NAME, ...) NAME
-#define LOG(...) _LOG_MACRO(__VA_ARGS__, _LOG2, _LOG1)(__VA_ARGS__)
-#else /* __DOXYGEN___ */
-#define LOG(category, severity)
-#endif /* __DOXYGEN__ */
-
-#ifndef NDEBUG
-#define ASSERT(condition) static_cast<void>(({ \
- if (!(condition)) \
- LOG(Fatal) << "assertion \"" #condition "\" failed"; \
-}))
-#else
-#define ASSERT(condition) static_cast<void>(false && (condition))
-#endif
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_LOG_H__ */
diff --git a/src/libcamera/include/media_device.h b/src/libcamera/include/media_device.h
deleted file mode 100644
index 44a59e70..00000000
--- a/src/libcamera/include/media_device.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2018, Google Inc.
- *
- * media_device.h - Media device handler
- */
-#ifndef __LIBCAMERA_MEDIA_DEVICE_H__
-#define __LIBCAMERA_MEDIA_DEVICE_H__
-
-#include <map>
-#include <sstream>
-#include <string>
-#include <vector>
-
-#include <linux/media.h>
-
-#include <libcamera/signal.h>
-
-#include "log.h"
-#include "media_object.h"
-
-namespace libcamera {
-
-class MediaDevice : protected Loggable
-{
-public:
- MediaDevice(const std::string &deviceNode);
- ~MediaDevice();
-
- bool acquire();
- void release();
- bool busy() const { return acquired_; }
-
- bool lock();
- void unlock();
-
- int populate();
- bool valid() const { return valid_; }
-
- const std::string driver() const { return driver_; }
- const std::string deviceNode() const { return deviceNode_; }
- const std::string model() const { return model_; }
-
- const std::vector<MediaEntity *> &entities() const { return entities_; }
- MediaEntity *getEntityByName(const std::string &name) const;
-
- MediaLink *link(const std::string &sourceName, unsigned int sourceIdx,
- const std::string &sinkName, unsigned int sinkIdx);
- MediaLink *link(const MediaEntity *source, unsigned int sourceIdx,
- const MediaEntity *sink, unsigned int sinkIdx);
- MediaLink *link(const MediaPad *source, const MediaPad *sink);
- int disableLinks();
-
- Signal<MediaDevice *> disconnected;
-
-protected:
- std::string logPrefix() const;
-
-private:
- std::string driver_;
- std::string deviceNode_;
- std::string model_;
- unsigned int version_;
-
- int fd_;
- bool valid_;
- bool acquired_;
- bool lockOwner_;
-
- int open();
- void close();
-
- std::map<unsigned int, MediaObject *> objects_;
- MediaObject *object(unsigned int id);
- bool addObject(MediaObject *object);
- void clear();
-
- std::vector<MediaEntity *> entities_;
-
- struct media_v2_interface *findInterface(const struct media_v2_topology &topology,
- unsigned int entityId);
- bool populateEntities(const struct media_v2_topology &topology);
- bool populatePads(const struct media_v2_topology &topology);
- bool populateLinks(const struct media_v2_topology &topology);
- void fixupEntityFlags(struct media_v2_entity *entity);
-
- friend int MediaLink::setEnabled(bool enable);
- int setupLink(const MediaLink *link, unsigned int flags);
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_MEDIA_DEVICE_H__ */
diff --git a/src/libcamera/include/media_object.h b/src/libcamera/include/media_object.h
deleted file mode 100644
index 748eafdc..00000000
--- a/src/libcamera/include/media_object.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2018, Google Inc.
- *
- * media_object.h - Media Device objects: entities, pads and links.
- */
-#ifndef __LIBCAMERA_MEDIA_OBJECT_H__
-#define __LIBCAMERA_MEDIA_OBJECT_H__
-
-#include <string>
-#include <vector>
-
-#include <linux/media.h>
-
-namespace libcamera {
-
-class MediaDevice;
-class MediaEntity;
-class MediaPad;
-
-class MediaObject
-{
-public:
- MediaDevice *device() { return dev_; }
- unsigned int id() const { return id_; }
-
-protected:
- friend class MediaDevice;
-
- MediaObject(MediaDevice *dev, unsigned int id)
- : dev_(dev), id_(id)
- {
- }
- virtual ~MediaObject() {}
-
- MediaDevice *dev_;
- unsigned int id_;
-};
-
-class MediaLink : public MediaObject
-{
-public:
- MediaPad *source() const { return source_; }
- MediaPad *sink() const { return sink_; }
- unsigned int flags() const { return flags_; }
- int setEnabled(bool enable);
-
-private:
- friend class MediaDevice;
-
- MediaLink(const struct media_v2_link *link,
- MediaPad *source, MediaPad *sink);
- MediaLink(const MediaLink &) = delete;
- ~MediaLink() {}
-
- MediaPad *source_;
- MediaPad *sink_;
- unsigned int flags_;
-};
-
-class MediaPad : public MediaObject
-{
-public:
- unsigned int index() const { return index_; }
- MediaEntity *entity() const { return entity_; }
- unsigned int flags() const { return flags_; }
- const std::vector<MediaLink *> &links() const { return links_; }
-
- void addLink(MediaLink *link);
-
-private:
- friend class MediaDevice;
-
- MediaPad(const struct media_v2_pad *pad, MediaEntity *entity);
- MediaPad(const MediaPad &) = delete;
- ~MediaPad();
-
- unsigned int index_;
- MediaEntity *entity_;
- unsigned int flags_;
-
- std::vector<MediaLink *> links_;
-};
-
-class MediaEntity : public MediaObject
-{
-public:
- const std::string &name() const { return name_; }
- unsigned int function() const { return function_; }
- unsigned int flags() const { return flags_; }
- const std::string &deviceNode() const { return deviceNode_; }
- unsigned int deviceMajor() const { return major_; }
- unsigned int deviceMinor() const { return minor_; }
-
- const std::vector<MediaPad *> &pads() const { return pads_; }
-
- const MediaPad *getPadByIndex(unsigned int index) const;
- const MediaPad *getPadById(unsigned int id) const;
-
- int setDeviceNode(const std::string &deviceNode);
-
-private:
- friend class MediaDevice;
-
- MediaEntity(MediaDevice *dev, const struct media_v2_entity *entity,
- unsigned int major = 0, unsigned int minor = 0);
- MediaEntity(const MediaEntity &) = delete;
- ~MediaEntity();
-
- std::string name_;
- unsigned int function_;
- unsigned int flags_;
- std::string deviceNode_;
- unsigned int major_;
- unsigned int minor_;
-
- std::vector<MediaPad *> pads_;
-
- void addPad(MediaPad *pad);
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_MEDIA_OBJECT_H__ */
diff --git a/src/libcamera/include/meson.build b/src/libcamera/include/meson.build
deleted file mode 100644
index 17e2bed9..00000000
--- a/src/libcamera/include/meson.build
+++ /dev/null
@@ -1,30 +0,0 @@
-libcamera_headers = files([
- 'byte_stream_buffer.h',
- 'camera_controls.h',
- 'camera_sensor.h',
- 'control_serializer.h',
- 'control_validator.h',
- 'device_enumerator.h',
- 'device_enumerator_sysfs.h',
- 'device_enumerator_udev.h',
- 'event_dispatcher_poll.h',
- 'formats.h',
- 'ipa_context_wrapper.h',
- 'ipa_manager.h',
- 'ipa_module.h',
- 'ipa_proxy.h',
- 'ipc_unixsocket.h',
- 'log.h',
- 'media_device.h',
- 'media_object.h',
- 'message.h',
- 'pipeline_handler.h',
- 'process.h',
- 'semaphore.h',
- 'thread.h',
- 'utils.h',
- 'v4l2_controls.h',
- 'v4l2_device.h',
- 'v4l2_subdevice.h',
- 'v4l2_videodevice.h',
-])
diff --git a/src/libcamera/include/message.h b/src/libcamera/include/message.h
deleted file mode 100644
index 8e8b013d..00000000
--- a/src/libcamera/include/message.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * message.h - Message queue support
- */
-#ifndef __LIBCAMERA_MESSAGE_H__
-#define __LIBCAMERA_MESSAGE_H__
-
-#include <atomic>
-
-#include <libcamera/bound_method.h>
-
-namespace libcamera {
-
-class BoundMethodBase;
-class Object;
-class Semaphore;
-class Thread;
-
-class Message
-{
-public:
- enum Type {
- None = 0,
- InvokeMessage = 1,
- ThreadMoveMessage = 2,
- UserMessage = 1000,
- };
-
- Message(Type type);
- virtual ~Message();
-
- Type type() const { return type_; }
- Object *receiver() const { return receiver_; }
-
- static Type registerMessageType();
-
-private:
- friend class Thread;
-
- Type type_;
- Object *receiver_;
-
- static std::atomic_uint nextUserType_;
-};
-
-class InvokeMessage : public Message
-{
-public:
- InvokeMessage(BoundMethodBase *method,
- std::shared_ptr<BoundMethodPackBase> pack,
- Semaphore *semaphore = nullptr,
- bool deleteMethod = false);
- ~InvokeMessage();
-
- Semaphore *semaphore() const { return semaphore_; }
-
- void invoke();
-
-private:
- BoundMethodBase *method_;
- std::shared_ptr<BoundMethodPackBase> pack_;
- Semaphore *semaphore_;
- bool deleteMethod_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_MESSAGE_H__ */
diff --git a/src/libcamera/include/pipeline_handler.h b/src/libcamera/include/pipeline_handler.h
deleted file mode 100644
index 3fcfeda4..00000000
--- a/src/libcamera/include/pipeline_handler.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2018, Google Inc.
- *
- * pipeline_handler.h - Pipeline handler infrastructure
- */
-#ifndef __LIBCAMERA_PIPELINE_HANDLER_H__
-#define __LIBCAMERA_PIPELINE_HANDLER_H__
-
-#include <list>
-#include <map>
-#include <memory>
-#include <set>
-#include <string>
-#include <sys/types.h>
-#include <vector>
-
-#include <ipa/ipa_interface.h>
-#include <libcamera/controls.h>
-#include <libcamera/object.h>
-#include <libcamera/stream.h>
-
-namespace libcamera {
-
-class Camera;
-class CameraConfiguration;
-class CameraManager;
-class DeviceEnumerator;
-class DeviceMatch;
-class FrameBuffer;
-class MediaDevice;
-class PipelineHandler;
-class Request;
-
-class CameraData
-{
-public:
- explicit CameraData(PipelineHandler *pipe)
- : pipe_(pipe)
- {
- }
- virtual ~CameraData() {}
-
- Camera *camera_;
- PipelineHandler *pipe_;
- std::list<Request *> queuedRequests_;
- ControlInfoMap controlInfo_;
- ControlList properties_;
- std::unique_ptr<IPAInterface> ipa_;
-
-private:
- CameraData(const CameraData &) = delete;
- CameraData &operator=(const CameraData &) = delete;
-};
-
-class PipelineHandler : public std::enable_shared_from_this<PipelineHandler>,
- public Object
-{
-public:
- PipelineHandler(CameraManager *manager);
- virtual ~PipelineHandler();
-
- virtual bool match(DeviceEnumerator *enumerator) = 0;
- MediaDevice *acquireMediaDevice(DeviceEnumerator *enumerator,
- const DeviceMatch &dm);
-
- bool lock();
- void unlock();
-
- const ControlInfoMap &controls(Camera *camera);
- const ControlList &properties(Camera *camera);
-
- virtual CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) = 0;
- virtual int configure(Camera *camera, CameraConfiguration *config) = 0;
-
- virtual int exportFrameBuffers(Camera *camera, Stream *stream,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers) = 0;
-
- virtual int start(Camera *camera) = 0;
- virtual void stop(Camera *camera) = 0;
-
- int queueRequest(Camera *camera, Request *request);
-
- bool completeBuffer(Camera *camera, Request *request,
- FrameBuffer *buffer);
- void completeRequest(Camera *camera, Request *request);
-
- const char *name() const { return name_; }
-
-protected:
- void registerCamera(std::shared_ptr<Camera> camera,
- std::unique_ptr<CameraData> data, dev_t devnum = 0);
- void hotplugMediaDevice(MediaDevice *media);
-
- virtual int queueRequestDevice(Camera *camera, Request *request) = 0;
-
- CameraData *cameraData(const Camera *camera);
-
- CameraManager *manager_;
-
-private:
- void mediaDeviceDisconnected(MediaDevice *media);
- virtual void disconnect();
-
- std::vector<std::shared_ptr<MediaDevice>> mediaDevices_;
- std::vector<std::weak_ptr<Camera>> cameras_;
- std::map<const Camera *, std::unique_ptr<CameraData>> cameraData_;
-
- const char *name_;
-
- friend class PipelineHandlerFactory;
-};
-
-class PipelineHandlerFactory
-{
-public:
- PipelineHandlerFactory(const char *name);
- virtual ~PipelineHandlerFactory() {}
-
- std::shared_ptr<PipelineHandler> create(CameraManager *manager);
-
- const std::string &name() const { return name_; }
-
- static void registerType(PipelineHandlerFactory *factory);
- static std::vector<PipelineHandlerFactory *> &factories();
-
-private:
- virtual PipelineHandler *createInstance(CameraManager *manager) = 0;
-
- std::string name_;
-};
-
-#define REGISTER_PIPELINE_HANDLER(handler) \
-class handler##Factory final : public PipelineHandlerFactory \
-{ \
-public: \
- handler##Factory() : PipelineHandlerFactory(#handler) {} \
- \
-private: \
- PipelineHandler *createInstance(CameraManager *manager) \
- { \
- return new handler(manager); \
- } \
-}; \
-static handler##Factory global_##handler##Factory;
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_PIPELINE_HANDLER_H__ */
diff --git a/src/libcamera/include/process.h b/src/libcamera/include/process.h
deleted file mode 100644
index d322fce1..00000000
--- a/src/libcamera/include/process.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * process.h - Process object
- */
-#ifndef __LIBCAMERA_PROCESS_H__
-#define __LIBCAMERA_PROCESS_H__
-
-#include <string>
-#include <vector>
-
-#include <libcamera/event_notifier.h>
-
-namespace libcamera {
-
-class Process final
-{
-public:
- enum ExitStatus {
- NotExited,
- NormalExit,
- SignalExit,
- };
-
- Process();
- ~Process();
-
- int start(const std::string &path,
- const std::vector<std::string> &args = std::vector<std::string>(),
- const std::vector<int> &fds = std::vector<int>());
-
- ExitStatus exitStatus() const { return exitStatus_; }
- int exitCode() const { return exitCode_; }
-
- void kill();
-
- Signal<Process *, enum ExitStatus, int> finished;
-
-private:
- void closeAllFdsExcept(const std::vector<int> &fds);
- int isolate();
- void died(int wstatus);
-
- pid_t pid_;
- bool running_;
- enum ExitStatus exitStatus_;
- int exitCode_;
-
- friend class ProcessManager;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_PROCESS_H__ */
diff --git a/src/libcamera/include/semaphore.h b/src/libcamera/include/semaphore.h
deleted file mode 100644
index c6b28653..00000000
--- a/src/libcamera/include/semaphore.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * semaphore.h - General-purpose counting semaphore
- */
-#ifndef __LIBCAMERA_SEMAPHORE_H__
-#define __LIBCAMERA_SEMAPHORE_H__
-
-#include <condition_variable>
-
-#include "thread.h"
-
-namespace libcamera {
-
-class Semaphore
-{
-public:
- Semaphore(unsigned int n = 0);
-
- unsigned int available();
- void acquire(unsigned int n = 1);
- bool tryAcquire(unsigned int n = 1);
- void release(unsigned int n = 1);
-
-private:
- Mutex mutex_;
- std::condition_variable cv_;
- unsigned int available_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_SEMAPHORE_H__ */
diff --git a/src/libcamera/include/thread.h b/src/libcamera/include/thread.h
deleted file mode 100644
index d700f111..00000000
--- a/src/libcamera/include/thread.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * thread.h - Thread support
- */
-#ifndef __LIBCAMERA_THREAD_H__
-#define __LIBCAMERA_THREAD_H__
-
-#include <memory>
-#include <mutex>
-#include <sys/types.h>
-#include <thread>
-
-#include <libcamera/signal.h>
-
-#include "utils.h"
-
-namespace libcamera {
-
-class EventDispatcher;
-class Message;
-class Object;
-class ThreadData;
-class ThreadMain;
-
-using Mutex = std::mutex;
-using MutexLocker = std::unique_lock<std::mutex>;
-
-class Thread
-{
-public:
- Thread();
- virtual ~Thread();
-
- void start();
- void exit(int code = 0);
- bool wait(utils::duration duration = utils::duration::max());
-
- bool isRunning();
-
- Signal<Thread *> finished;
-
- static Thread *current();
- static pid_t currentId();
-
- EventDispatcher *eventDispatcher();
- void setEventDispatcher(std::unique_ptr<EventDispatcher> dispatcher);
-
- void dispatchMessages();
-
-protected:
- int exec();
- virtual void run();
-
-private:
- void startThread();
- void finishThread();
-
- void postMessage(std::unique_ptr<Message> msg, Object *receiver);
- void removeMessages(Object *receiver);
-
- friend class Object;
- friend class ThreadData;
- friend class ThreadMain;
-
- void moveObject(Object *object);
- void moveObject(Object *object, ThreadData *currentData,
- ThreadData *targetData);
-
- std::thread thread_;
- ThreadData *data_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_THREAD_H__ */
diff --git a/src/libcamera/include/utils.h b/src/libcamera/include/utils.h
deleted file mode 100644
index cfa620f2..00000000
--- a/src/libcamera/include/utils.h
+++ /dev/null
@@ -1,152 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2018, Google Inc.
- *
- * utils.h - Miscellaneous utility functions
- */
-#ifndef __LIBCAMERA_UTILS_H__
-#define __LIBCAMERA_UTILS_H__
-
-#include <algorithm>
-#include <chrono>
-#include <memory>
-#include <ostream>
-#include <string>
-#include <string.h>
-#include <sys/time.h>
-
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
-
-#ifndef __DOXYGEN__
-
-/* uClibc and uClibc-ng don't provide O_TMPFILE */
-#ifndef O_TMPFILE
-#define O_TMPFILE (020000000 | O_DIRECTORY)
-#endif
-
-#endif
-
-namespace libcamera {
-
-namespace utils {
-
-const char *basename(const char *path);
-
-char *secure_getenv(const char *name);
-std::string dirname(const std::string &path);
-
-template<class InputIt1, class InputIt2>
-unsigned int set_overlap(InputIt1 first1, InputIt1 last1,
- InputIt2 first2, InputIt2 last2)
-{
- unsigned int count = 0;
-
- while (first1 != last1 && first2 != last2) {
- if (*first1 < *first2) {
- ++first1;
- } else {
- if (!(*first2 < *first1))
- count++;
- ++first2;
- }
- }
-
- return count;
-}
-
-/* C++11 doesn't provide std::clamp */
-template <typename T>
-const T& clamp(const T& v, const T& lo, const T& hi)
-{
- return std::max(lo, std::min(v, hi));
-}
-
-using clock = std::chrono::steady_clock;
-using duration = std::chrono::steady_clock::duration;
-using time_point = std::chrono::steady_clock::time_point;
-
-struct timespec duration_to_timespec(const duration &value);
-std::string time_point_to_string(const time_point &time);
-
-#ifndef __DOXYGEN__
-struct _hex {
- uint64_t v;
- unsigned int w;
-};
-
-std::basic_ostream<char, std::char_traits<char>> &
-operator<<(std::basic_ostream<char, std::char_traits<char>> &stream, const _hex &h);
-#endif
-
-template<typename T>
-_hex hex(T value, unsigned int width = 0);
-
-#ifndef __DOXYGEN__
-template<>
-inline _hex hex<int32_t>(int32_t value, unsigned int width)
-{
- return { static_cast<uint64_t>(value), width ? width : 8 };
-}
-
-template<>
-inline _hex hex<uint32_t>(uint32_t value, unsigned int width)
-{
- return { static_cast<uint64_t>(value), width ? width : 8 };
-}
-
-template<>
-inline _hex hex<int64_t>(int64_t value, unsigned int width)
-{
- return { static_cast<uint64_t>(value), width ? width : 16 };
-}
-
-template<>
-inline _hex hex<uint64_t>(uint64_t value, unsigned int width)
-{
- return { static_cast<uint64_t>(value), width ? width : 16 };
-}
-#endif
-
-size_t strlcpy(char *dst, const char *src, size_t size);
-
-namespace details {
-
-class StringSplitter
-{
-public:
- StringSplitter(const std::string &str, const std::string &delim);
-
- class iterator
- {
- public:
- iterator(const StringSplitter *ss, std::string::size_type pos);
-
- iterator &operator++();
- std::string operator*() const;
- bool operator!=(const iterator &other) const;
-
- private:
- const StringSplitter *ss_;
- std::string::size_type pos_;
- std::string::size_type next_;
- };
-
- iterator begin() const;
- iterator end() const;
-
-private:
- std::string str_;
- std::string delim_;
-};
-
-} /* namespace details */
-
-details::StringSplitter split(const std::string &str, const std::string &delim);
-
-std::string libcameraBuildPath();
-
-} /* namespace utils */
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_UTILS_H__ */
diff --git a/src/libcamera/include/v4l2_controls.h b/src/libcamera/include/v4l2_controls.h
deleted file mode 100644
index cffe9efd..00000000
--- a/src/libcamera/include/v4l2_controls.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * v4l2_controls.h - V4L2 Controls Support
- */
-
-#ifndef __LIBCAMERA_V4L2_CONTROLS_H__
-#define __LIBCAMERA_V4L2_CONTROLS_H__
-
-#include <linux/videodev2.h>
-
-#include <libcamera/controls.h>
-
-namespace libcamera {
-
-class V4L2ControlId : public ControlId
-{
-public:
- V4L2ControlId(const struct v4l2_query_ext_ctrl &ctrl);
-};
-
-class V4L2ControlInfo : public ControlInfo
-{
-public:
- V4L2ControlInfo(const struct v4l2_query_ext_ctrl &ctrl);
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_V4L2_CONTROLS_H__ */
diff --git a/src/libcamera/include/v4l2_device.h b/src/libcamera/include/v4l2_device.h
deleted file mode 100644
index ce8edd98..00000000
--- a/src/libcamera/include/v4l2_device.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * v4l2_device.h - Common base for V4L2 video devices and subdevices
- */
-#ifndef __LIBCAMERA_V4L2_DEVICE_H__
-#define __LIBCAMERA_V4L2_DEVICE_H__
-
-#include <map>
-#include <memory>
-#include <vector>
-
-#include <linux/videodev2.h>
-
-#include "log.h"
-#include "v4l2_controls.h"
-
-namespace libcamera {
-
-class V4L2Device : protected Loggable
-{
-public:
- void close();
- bool isOpen() const { return fd_ != -1; }
-
- const ControlInfoMap &controls() const { return controls_; }
-
- int getControls(ControlList *ctrls);
- int setControls(ControlList *ctrls);
-
- const std::string &deviceNode() const { return deviceNode_; }
-
-protected:
- V4L2Device(const std::string &deviceNode);
- ~V4L2Device();
-
- int open(unsigned int flags);
- int setFd(int fd);
-
- int ioctl(unsigned long request, void *argp);
-
- int fd() { return fd_; }
-
-private:
- void listControls();
- void updateControls(ControlList *ctrls,
- const struct v4l2_ext_control *v4l2Ctrls,
- unsigned int count);
-
- std::map<unsigned int, struct v4l2_query_ext_ctrl> controlInfo_;
- std::vector<std::unique_ptr<V4L2ControlId>> controlIds_;
- ControlInfoMap controls_;
- std::string deviceNode_;
- int fd_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_V4L2_DEVICE_H__ */
diff --git a/src/libcamera/include/v4l2_subdevice.h b/src/libcamera/include/v4l2_subdevice.h
deleted file mode 100644
index 9c077674..00000000
--- a/src/libcamera/include/v4l2_subdevice.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * v4l2_subdevice.h - V4L2 Subdevice
- */
-#ifndef __LIBCAMERA_V4L2_SUBDEVICE_H__
-#define __LIBCAMERA_V4L2_SUBDEVICE_H__
-
-#include <map>
-#include <string>
-#include <vector>
-
-#include <libcamera/geometry.h>
-
-#include "formats.h"
-#include "log.h"
-#include "media_object.h"
-#include "v4l2_device.h"
-
-namespace libcamera {
-
-class MediaDevice;
-
-struct V4L2SubdeviceFormat {
- uint32_t mbus_code;
- Size size;
-
- const std::string toString() const;
-};
-
-class V4L2Subdevice : public V4L2Device
-{
-public:
- explicit V4L2Subdevice(const MediaEntity *entity);
- V4L2Subdevice(const V4L2Subdevice &) = delete;
- V4L2Subdevice &operator=(const V4L2Subdevice &) = delete;
- ~V4L2Subdevice();
-
- int open();
-
- const MediaEntity *entity() const { return entity_; }
-
- int setCrop(unsigned int pad, Rectangle *rect);
- int setCompose(unsigned int pad, Rectangle *rect);
-
- ImageFormats formats(unsigned int pad);
-
- int getFormat(unsigned int pad, V4L2SubdeviceFormat *format);
- int setFormat(unsigned int pad, V4L2SubdeviceFormat *format);
-
- static V4L2Subdevice *fromEntityName(const MediaDevice *media,
- const std::string &entity);
-
-protected:
- std::string logPrefix() const;
-
-private:
- std::vector<unsigned int> enumPadCodes(unsigned int pad);
- std::vector<SizeRange> enumPadSizes(unsigned int pad,
- unsigned int code);
-
- int setSelection(unsigned int pad, unsigned int target,
- Rectangle *rect);
-
- const MediaEntity *entity_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_V4L2_SUBDEVICE_H__ */
diff --git a/src/libcamera/include/v4l2_videodevice.h b/src/libcamera/include/v4l2_videodevice.h
deleted file mode 100644
index 7d7c4a9e..00000000
--- a/src/libcamera/include/v4l2_videodevice.h
+++ /dev/null
@@ -1,295 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * v4l2_videodevice.h - V4L2 Video Device
- */
-#ifndef __LIBCAMERA_V4L2_VIDEODEVICE_H__
-#define __LIBCAMERA_V4L2_VIDEODEVICE_H__
-
-#include <atomic>
-#include <memory>
-#include <stdint.h>
-#include <string>
-#include <vector>
-
-#include <linux/videodev2.h>
-
-#include <libcamera/buffer.h>
-#include <libcamera/geometry.h>
-#include <libcamera/pixelformats.h>
-#include <libcamera/signal.h>
-
-#include "formats.h"
-#include "log.h"
-#include "v4l2_device.h"
-
-namespace libcamera {
-
-class EventNotifier;
-class FileDescriptor;
-class MediaDevice;
-class MediaEntity;
-
-struct V4L2Capability final : v4l2_capability {
- const char *driver() const
- {
- return reinterpret_cast<const char *>(v4l2_capability::driver);
- }
- const char *card() const
- {
- return reinterpret_cast<const char *>(v4l2_capability::card);
- }
- const char *bus_info() const
- {
- return reinterpret_cast<const char *>(v4l2_capability::bus_info);
- }
- unsigned int device_caps() const
- {
- return capabilities & V4L2_CAP_DEVICE_CAPS
- ? v4l2_capability::device_caps
- : v4l2_capability::capabilities;
- }
- bool isMultiplanar() const
- {
- return device_caps() & (V4L2_CAP_VIDEO_CAPTURE_MPLANE |
- V4L2_CAP_VIDEO_OUTPUT_MPLANE |
- V4L2_CAP_VIDEO_M2M_MPLANE);
- }
- bool isCapture() const
- {
- return device_caps() & (V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_VIDEO_CAPTURE_MPLANE |
- V4L2_CAP_META_CAPTURE);
- }
- bool isOutput() const
- {
- return device_caps() & (V4L2_CAP_VIDEO_OUTPUT |
- V4L2_CAP_VIDEO_OUTPUT_MPLANE |
- V4L2_CAP_META_OUTPUT);
- }
- bool isVideo() const
- {
- return device_caps() & (V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_VIDEO_CAPTURE_MPLANE |
- V4L2_CAP_VIDEO_OUTPUT |
- V4L2_CAP_VIDEO_OUTPUT_MPLANE);
- }
- bool isM2M() const
- {
- return device_caps() & (V4L2_CAP_VIDEO_M2M |
- V4L2_CAP_VIDEO_M2M_MPLANE);
- }
- bool isMeta() const
- {
- return device_caps() & (V4L2_CAP_META_CAPTURE |
- V4L2_CAP_META_OUTPUT);
- }
- bool isVideoCapture() const
- {
- return isVideo() && isCapture();
- }
- bool isVideoOutput() const
- {
- return isVideo() && isOutput();
- }
- bool isMetaCapture() const
- {
- return isMeta() && isCapture();
- }
- bool isMetaOutput() const
- {
- return isMeta() && isOutput();
- }
- bool hasStreaming() const
- {
- return device_caps() & V4L2_CAP_STREAMING;
- }
-};
-
-class V4L2BufferCache
-{
-public:
- V4L2BufferCache(unsigned int numEntries);
- V4L2BufferCache(const std::vector<std::unique_ptr<FrameBuffer>> &buffers);
- ~V4L2BufferCache();
-
- int get(const FrameBuffer &buffer);
- void put(unsigned int index);
-
-private:
- class Entry
- {
- public:
- Entry();
- Entry(bool free, uint64_t lastUsed, const FrameBuffer &buffer);
-
- bool operator==(const FrameBuffer &buffer) const;
-
- bool free;
- uint64_t lastUsed;
-
- private:
- struct Plane {
- Plane(const FrameBuffer::Plane &plane)
- : fd(plane.fd.fd()), length(plane.length)
- {
- }
-
- int fd;
- unsigned int length;
- };
-
- std::vector<Plane> planes_;
- };
-
- std::atomic<uint64_t> lastUsedCounter_;
- std::vector<Entry> cache_;
- /* \todo Expose the miss counter through an instrumentation API. */
- unsigned int missCounter_;
-};
-
-class V4L2PixelFormat
-{
-public:
- V4L2PixelFormat()
- : fourcc_(0)
- {
- }
-
- explicit V4L2PixelFormat(uint32_t fourcc)
- : fourcc_(fourcc)
- {
- }
-
- bool isValid() const { return fourcc_ != 0; }
- uint32_t fourcc() const { return fourcc_; }
- operator uint32_t() const { return fourcc_; }
-
- std::string toString() const;
-
-private:
- uint32_t fourcc_;
-};
-
-class V4L2DeviceFormat
-{
-public:
- V4L2PixelFormat fourcc;
- Size size;
-
- struct {
- uint32_t size;
- uint32_t bpl;
- } planes[3];
- unsigned int planesCount;
-
- const std::string toString() const;
-};
-
-class V4L2VideoDevice : public V4L2Device
-{
-public:
- explicit V4L2VideoDevice(const std::string &deviceNode);
- explicit V4L2VideoDevice(const MediaEntity *entity);
- V4L2VideoDevice(const V4L2VideoDevice &) = delete;
- ~V4L2VideoDevice();
-
- V4L2VideoDevice &operator=(const V4L2VideoDevice &) = delete;
-
- int open();
- int open(int handle, enum v4l2_buf_type type);
- void close();
-
- const char *driverName() const { return caps_.driver(); }
- const char *deviceName() const { return caps_.card(); }
- const char *busName() const { return caps_.bus_info(); }
-
- int getFormat(V4L2DeviceFormat *format);
- int setFormat(V4L2DeviceFormat *format);
- std::map<V4L2PixelFormat, std::vector<SizeRange>> formats();
-
- int setCrop(Rectangle *rect);
- int setCompose(Rectangle *rect);
-
- int allocateBuffers(unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers);
- int exportBuffers(unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers);
- int importBuffers(unsigned int count);
- int releaseBuffers();
-
- int queueBuffer(FrameBuffer *buffer);
- Signal<FrameBuffer *> bufferReady;
-
- int streamOn();
- int streamOff();
-
- static V4L2VideoDevice *fromEntityName(const MediaDevice *media,
- const std::string &entity);
-
- static PixelFormat toPixelFormat(V4L2PixelFormat v4l2Fourcc);
- V4L2PixelFormat toV4L2PixelFormat(const PixelFormat &pixelFormat);
- static V4L2PixelFormat toV4L2PixelFormat(const PixelFormat &pixelFormat,
- bool multiplanar);
-
-protected:
- std::string logPrefix() const;
-
-private:
- int getFormatMeta(V4L2DeviceFormat *format);
- int setFormatMeta(V4L2DeviceFormat *format);
-
- int getFormatMultiplane(V4L2DeviceFormat *format);
- int setFormatMultiplane(V4L2DeviceFormat *format);
-
- int getFormatSingleplane(V4L2DeviceFormat *format);
- int setFormatSingleplane(V4L2DeviceFormat *format);
-
- std::vector<V4L2PixelFormat> enumPixelformats();
- std::vector<SizeRange> enumSizes(V4L2PixelFormat pixelFormat);
-
- int setSelection(unsigned int target, Rectangle *rect);
-
- int requestBuffers(unsigned int count, enum v4l2_memory memoryType);
- int createBuffers(unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers);
- std::unique_ptr<FrameBuffer> createBuffer(unsigned int index);
- FileDescriptor exportDmabufFd(unsigned int index, unsigned int plane);
-
- void bufferAvailable(EventNotifier *notifier);
- FrameBuffer *dequeueBuffer();
-
- V4L2Capability caps_;
-
- enum v4l2_buf_type bufferType_;
- enum v4l2_memory memoryType_;
-
- V4L2BufferCache *cache_;
- std::map<unsigned int, FrameBuffer *> queuedBuffers_;
-
- EventNotifier *fdEvent_;
-};
-
-class V4L2M2MDevice
-{
-public:
- V4L2M2MDevice(const std::string &deviceNode);
- ~V4L2M2MDevice();
-
- int open();
- void close();
-
- V4L2VideoDevice *output() { return output_; }
- V4L2VideoDevice *capture() { return capture_; }
-
-private:
- std::string deviceNode_;
-
- V4L2VideoDevice *output_;
- V4L2VideoDevice *capture_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_V4L2_VIDEODEVICE_H__ */
diff --git a/src/libcamera/ipa/meson.build b/src/libcamera/ipa/meson.build
new file mode 100644
index 00000000..ef73b3f9
--- /dev/null
+++ b/src/libcamera/ipa/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_ipa_interfaces = []
+
+foreach file : ipa_mojom_files
+ # {pipeline}_ipa_interface.cpp
+ libcamera_ipa_interfaces += \
+ custom_target(input : file,
+ output : '@BASENAME@_ipa_interface.cpp',
+ command : [
+ mojom_docs_extractor,
+ '-o', '@OUTPUT@', '@INPUT@'
+ ])
+endforeach
diff --git a/src/libcamera/ipa_context_wrapper.cpp b/src/libcamera/ipa_context_wrapper.cpp
deleted file mode 100644
index 946a2fd8..00000000
--- a/src/libcamera/ipa_context_wrapper.cpp
+++ /dev/null
@@ -1,251 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipa_context_wrapper.cpp - Image Processing Algorithm context wrapper
- */
-
-#include "ipa_context_wrapper.h"
-
-#include <vector>
-
-#include <libcamera/controls.h>
-
-#include "byte_stream_buffer.h"
-#include "utils.h"
-
-/**
- * \file ipa_context_wrapper.h
- * \brief Image Processing Algorithm context wrapper
- */
-
-namespace libcamera {
-
-/**
- * \class IPAContextWrapper
- * \brief Wrap an ipa_context and expose it as an IPAInterface
- *
- * The IPAContextWrapper class wraps an ipa_context, provided by an IPA module, and
- * exposes an IPAInterface. This mechanism is used for IPAs that are not
- * isolated in a separate process to allow direct calls from pipeline handler
- * using the IPAInterface API instead of the lower-level ipa_context API.
- *
- * The IPAInterface methods are converted to the ipa_context API by translating
- * all C++ arguments into plain C structures or byte arrays that contain no
- * pointer, as required by the ipa_context API.
- */
-
-/**
- * \brief Construct an IPAContextWrapper instance that wraps the \a context
- * \param[in] context The IPA module context
- *
- * Ownership of the \a context is passed to the IPAContextWrapper. The context remains
- * valid for the whole lifetime of the wrapper and is destroyed automatically
- * with it.
- */
-IPAContextWrapper::IPAContextWrapper(struct ipa_context *context)
- : ctx_(context), intf_(nullptr)
-{
- if (!ctx_)
- return;
-
- bool forceCApi = !!utils::secure_getenv("LIBCAMERA_IPA_FORCE_C_API");
-
- if (!forceCApi && ctx_ && ctx_->ops->get_interface) {
- intf_ = reinterpret_cast<IPAInterface *>(ctx_->ops->get_interface(ctx_));
- intf_->queueFrameAction.connect(this, &IPAContextWrapper::doQueueFrameAction);
- return;
- }
-
- ctx_->ops->register_callbacks(ctx_, &IPAContextWrapper::callbacks_,
- this);
-}
-
-IPAContextWrapper::~IPAContextWrapper()
-{
- if (!ctx_)
- return;
-
- ctx_->ops->destroy(ctx_);
-}
-
-int IPAContextWrapper::init()
-{
- if (intf_)
- return intf_->init();
-
- if (!ctx_)
- return 0;
-
- ctx_->ops->init(ctx_);
-
- return 0;
-}
-
-void IPAContextWrapper::configure(const std::map<unsigned int, IPAStream> &streamConfig,
- const std::map<unsigned int, const ControlInfoMap &> &entityControls)
-{
- if (intf_)
- return intf_->configure(streamConfig, entityControls);
-
- if (!ctx_)
- return;
-
- serializer_.reset();
-
- /* Translate the IPA stream configurations map. */
- struct ipa_stream c_streams[streamConfig.size()];
-
- unsigned int i = 0;
- for (const auto &stream : streamConfig) {
- struct ipa_stream *c_stream = &c_streams[i];
- unsigned int id = stream.first;
- const IPAStream &ipaStream = stream.second;
-
- c_stream->id = id;
- c_stream->pixel_format = ipaStream.pixelFormat;
- c_stream->width = ipaStream.size.width;
- c_stream->height = ipaStream.size.height;
-
- ++i;
- }
-
- /* Translate the IPA entity controls map. */
- struct ipa_control_info_map c_info_maps[entityControls.size()];
- std::vector<std::vector<uint8_t>> data(entityControls.size());
-
- i = 0;
- for (const auto &info : entityControls) {
- struct ipa_control_info_map &c_info_map = c_info_maps[i];
- unsigned int id = info.first;
- const ControlInfoMap &infoMap = info.second;
-
- size_t infoMapSize = serializer_.binarySize(infoMap);
- data[i].resize(infoMapSize);
- ByteStreamBuffer byteStream(data[i].data(), data[i].size());
- serializer_.serialize(infoMap, byteStream);
-
- c_info_map.id = id;
- c_info_map.data = byteStream.base();
- c_info_map.size = byteStream.size();
-
- ++i;
- }
-
- ctx_->ops->configure(ctx_, c_streams, streamConfig.size(),
- c_info_maps, entityControls.size());
-}
-
-void IPAContextWrapper::mapBuffers(const std::vector<IPABuffer> &buffers)
-{
- if (intf_)
- return intf_->mapBuffers(buffers);
-
- if (!ctx_)
- return;
-
- struct ipa_buffer c_buffers[buffers.size()];
-
- for (unsigned int i = 0; i < buffers.size(); ++i) {
- struct ipa_buffer &c_buffer = c_buffers[i];
- const IPABuffer &buffer = buffers[i];
- const std::vector<FrameBuffer::Plane> &planes = buffer.planes;
-
- c_buffer.id = buffer.id;
- c_buffer.num_planes = planes.size();
-
- for (unsigned int j = 0; j < planes.size(); ++j) {
- const FrameBuffer::Plane &plane = planes[j];
- c_buffer.planes[j].dmabuf = plane.fd.fd();
- c_buffer.planes[j].length = plane.length;
- }
- }
-
- ctx_->ops->map_buffers(ctx_, c_buffers, buffers.size());
-}
-
-void IPAContextWrapper::unmapBuffers(const std::vector<unsigned int> &ids)
-{
- if (intf_)
- return intf_->unmapBuffers(ids);
-
- if (!ctx_)
- return;
-
- ctx_->ops->unmap_buffers(ctx_, ids.data(), ids.size());
-}
-
-void IPAContextWrapper::processEvent(const IPAOperationData &data)
-{
- if (intf_)
- return intf_->processEvent(data);
-
- if (!ctx_)
- return;
-
- struct ipa_operation_data c_data;
- c_data.operation = data.operation;
- c_data.data = data.data.data();
- c_data.num_data = data.data.size();
-
- struct ipa_control_list control_lists[data.controls.size()];
- c_data.lists = control_lists;
- c_data.num_lists = data.controls.size();
-
- std::size_t listsSize = 0;
- for (const auto &list : data.controls)
- listsSize += serializer_.binarySize(list);
-
- std::vector<uint8_t> binaryData(listsSize);
- ByteStreamBuffer byteStreamBuffer(binaryData.data(), listsSize);
-
- unsigned int i = 0;
- for (const auto &list : data.controls) {
- struct ipa_control_list &c_list = control_lists[i];
- c_list.size = serializer_.binarySize(list);
- ByteStreamBuffer b = byteStreamBuffer.carveOut(c_list.size);
-
- serializer_.serialize(list, b);
-
- c_list.data = b.base();
- }
-
- ctx_->ops->process_event(ctx_, &c_data);
-}
-
-void IPAContextWrapper::doQueueFrameAction(unsigned int frame,
- const IPAOperationData &data)
-{
- IPAInterface::queueFrameAction.emit(frame, data);
-}
-
-void IPAContextWrapper::queue_frame_action(void *ctx, unsigned int frame,
- struct ipa_operation_data &data)
-{
- IPAContextWrapper *_this = static_cast<IPAContextWrapper *>(ctx);
- IPAOperationData opData;
-
- opData.operation = data.operation;
- for (unsigned int i = 0; i < data.num_data; ++i)
- opData.data.push_back(data.data[i]);
-
- for (unsigned int i = 0; i < data.num_lists; ++i) {
- const struct ipa_control_list &c_list = data.lists[i];
- ByteStreamBuffer b(c_list.data, c_list.size);
- opData.controls.push_back(_this->serializer_.deserialize<ControlList>(b));
- }
-
- _this->doQueueFrameAction(frame, opData);
-}
-
-#ifndef __DOXYGEN__
-/*
- * This construct confuses Doygen and makes it believe that all members of the
- * operations is a member of IPAInterfaceWrapper. It must thus be hidden.
- */
-const struct ipa_callback_ops IPAContextWrapper::callbacks_ = {
- .queue_frame_action = &IPAContextWrapper::queue_frame_action,
-};
-#endif
-
-} /* namespace libcamera */
diff --git a/src/libcamera/ipa_controls.cpp b/src/libcamera/ipa_controls.cpp
index b1d14190..12d92ebe 100644
--- a/src/libcamera/ipa_controls.cpp
+++ b/src/libcamera/ipa_controls.cpp
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_controls.cpp - IPA control handling
+ * IPA control handling
*/
-#include <ipa/ipa_controls.h>
+#include <libcamera/ipa/ipa_controls.h>
/**
* \file ipa_controls.h
@@ -108,17 +108,19 @@
* +-------------------------+ .
* / | ... | | entry[n].offset
* | +-------------------------+ <-----´
- * Data | | minimum value (#n) | \
- * section | +-------------------------+ | Entry #n
- * | | maximum value (#n) | /
+ * | | minimum value (#n) | \
+ * Data | +-------------------------+ |
+ * section | | maximum value (#n) | | Entry #n
+ * | +-------------------------+ |
+ * | | default value (#n) | /
* | +-------------------------+
* \ | ... |
* +-------------------------+
* ~~~~
*
- * The minimum and maximum value are stored in the platform's native data
- * format. The ipa_control_info_entry::offset field stores the offset from the
- * beginning of the data section to the info data.
+ * The minimum, maximum and default values are stored in the platform's native
+ * data format. The ipa_control_info_entry::offset field stores the offset from
+ * the beginning of the data section to the info data.
*
* Info data in the data section shall be stored in the same order as the
* entries array, shall be aligned to a multiple of 8 bytes, and shall be
@@ -129,12 +131,41 @@
* shall be ignored when parsing the packet.
*/
+namespace libcamera {
+
/**
* \def IPA_CONTROLS_FORMAT_VERSION
* \brief The current control serialization format version
*/
/**
+ * \var ipa_controls_id_map_type
+ * \brief Enumerates the different control id map types
+ *
+ * Each ControlInfoMap and ControlList refers to a control id map that
+ * associates the ControlId references to a numerical identifier.
+ * During the serialization procedure the raw pointers to the ControlId
+ * instances cannot be transported on the wire, hence their numerical id is
+ * used to identify them in the serialized data buffer. At deserialization time
+ * it is required to associate back to the numerical id the ControlId instance
+ * it represents. This enumeration describes which ControlIdMap should be
+ * used to perform such operation.
+ *
+ * \var ipa_controls_id_map_type::IPA_CONTROL_ID_MAP_CONTROLS
+ * \brief The numerical control identifier are resolved to a ControlId * using
+ * the global controls::controls id map
+ * \var ipa_controls_id_map_type::IPA_CONTROL_ID_MAP_PROPERTIES
+ * \brief The numerical control identifier are resolved to a ControlId * using
+ * the global properties::properties id map
+ * \var ipa_controls_id_map_type::IPA_CONTROL_ID_MAP_V4L2
+ * \brief ControlId for V4L2 defined controls are created by the video device
+ * that enumerates them, and are not available across the IPC boundaries. The
+ * deserializer shall create new ControlId instances for them as well as store
+ * them in a dedicated ControlIdMap. Only lookup by numerical id can be
+ * performed on de-serialized ControlInfoMap that represents V4L2 controls.
+ */
+
+/**
* \struct ipa_controls_header
* \brief Serialized control packet header
* \var ipa_controls_header::version
@@ -149,6 +180,8 @@
* The total packet size in bytes
* \var ipa_controls_header::data_offset
* Offset in bytes from the beginning of the packet of the data section start
+ * \var ipa_controls_header::id_map_type
+ * The id map type as defined by the ipa_controls_id_map_type enumeration
* \var ipa_controls_header::reserved
* Reserved for future extensions
*/
@@ -187,9 +220,15 @@ static_assert(sizeof(ipa_control_value_entry) == 16,
* \var ipa_control_info_entry::offset
* The offset in bytes from the beginning of the data section to the control
* info data (shall be a multiple of 8 bytes)
+ * \var ipa_control_info_entry::direction
+ * The directions in which the control is allowed to be sent. This is a flags
+ * value, where 0x1 signifies input (as controls), and 0x2 signifies output (as
+ * metadata). \sa ControlId::Direction
* \var ipa_control_info_entry::padding
* Padding bytes (shall be set to 0)
*/
static_assert(sizeof(ipa_control_info_entry) == 16,
"Invalid ABI size change for struct ipa_control_info_entry");
+
+} /* namespace libcamera */
diff --git a/src/libcamera/ipa_data_serializer.cpp b/src/libcamera/ipa_data_serializer.cpp
new file mode 100644
index 00000000..2189a246
--- /dev/null
+++ b/src/libcamera/ipa_data_serializer.cpp
@@ -0,0 +1,626 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Image Processing Algorithm data serializer
+ */
+
+#include "libcamera/internal/ipa_data_serializer.h"
+
+#include <unistd.h>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/byte_stream_buffer.h"
+
+/**
+ * \file ipa_data_serializer.h
+ * \brief IPA Data Serializer
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(IPADataSerializer)
+
+/**
+ * \class IPADataSerializer
+ * \brief IPA Data Serializer
+ *
+ * Static template class that provides functions for serializing and
+ * deserializing IPA data.
+ *
+ * \todo Switch to Span instead of byte and fd vector
+ *
+ * \todo Harden the vector and map deserializer
+ *
+ * \todo For SharedFDs, instead of storing a validity flag, store an
+ * index into the fd array. This will allow us to use views instead of copying.
+ */
+
+namespace {
+
+/**
+ * \fn template<typename T> void appendPOD(std::vector<uint8_t> &vec, T val)
+ * \brief Append POD to end of byte vector, in little-endian order
+ * \tparam T Type of POD to append
+ * \param[in] vec Byte vector to append to
+ * \param[in] val Value to append
+ *
+ * This function is meant to be used by the IPA data serializer, and the
+ * generated IPA proxies.
+ */
+
+/**
+ * \fn template<typename T> T readPOD(std::vector<uint8_t>::iterator it, size_t pos,
+ * std::vector<uint8_t>::iterator end)
+ * \brief Read POD from byte vector, in little-endian order
+ * \tparam T Type of POD to read
+ * \param[in] it Iterator of byte vector to read from
+ * \param[in] pos Index in byte vector to read from
+ * \param[in] end Iterator marking end of byte vector
+ *
+ * This function is meant to be used by the IPA data serializer, and the
+ * generated IPA proxies.
+ *
+ * If the \a pos plus the byte-width of the desired POD is past \a end, it is
+ * a fata error will occur, as it means there is insufficient data for
+ * deserialization, which should never happen.
+ *
+ * \return The POD read from \a it at index \a pos
+ */
+
+/**
+ * \fn template<typename T> T readPOD(std::vector<uint8_t> &vec, size_t pos)
+ * \brief Read POD from byte vector, in little-endian order
+ * \tparam T Type of POD to read
+ * \param[in] vec Byte vector to read from
+ * \param[in] pos Index in vec to start reading from
+ *
+ * This function is meant to be used by the IPA data serializer, and the
+ * generated IPA proxies.
+ *
+ * If the \a pos plus the byte-width of the desired POD is past the end of
+ * \a vec, a fatal error will occur, as it means there is insufficient data
+ * for deserialization, which should never happen.
+ *
+ * \return The POD read from \a vec at index \a pos
+ */
+
+} /* namespace */
+
+/**
+ * \fn template<typename T> IPADataSerializer<T>::serialize(
+ * T data,
+ * ControlSerializer *cs = nullptr)
+ * \brief Serialize an object into byte vector and fd vector
+ * \tparam T Type of object to serialize
+ * \param[in] data Object to serialize
+ * \param[in] cs ControlSerializer
+ *
+ * \a cs is only necessary if the object type \a T or its members contain
+ * ControlList or ControlInfoMap.
+ *
+ * \return Tuple of byte vector and fd vector, that is the serialized form
+ * of \a data
+ */
+
+/**
+ * \fn template<typename T> IPADataSerializer<T>::deserialize(
+ * const std::vector<uint8_t> &data,
+ * ControlSerializer *cs = nullptr)
+ * \brief Deserialize byte vector into an object
+ * \tparam T Type of object to deserialize to
+ * \param[in] data Byte vector to deserialize from
+ * \param[in] cs ControlSerializer
+ *
+ * This version of deserialize() can be used if the object type \a T and its
+ * members don't have any SharedFD.
+ *
+ * \a cs is only necessary if the object type \a T or its members contain
+ * ControlList or ControlInfoMap.
+ *
+ * \return The deserialized object
+ */
+
+/**
+ * \fn template<typename T> IPADataSerializer<T>::deserialize(
+ * std::vector<uint8_t>::const_iterator dataBegin,
+ * std::vector<uint8_t>::const_iterator dataEnd,
+ * ControlSerializer *cs = nullptr)
+ * \brief Deserialize byte vector into an object
+ * \tparam T Type of object to deserialize to
+ * \param[in] dataBegin Begin iterator of byte vector to deserialize from
+ * \param[in] dataEnd End iterator of byte vector to deserialize from
+ * \param[in] cs ControlSerializer
+ *
+ * This version of deserialize() can be used if the object type \a T and its
+ * members don't have any SharedFD.
+ *
+ * \a cs is only necessary if the object type \a T or its members contain
+ * ControlList or ControlInfoMap.
+ *
+ * \return The deserialized object
+ */
+
+/**
+ * \fn template<typename T> IPADataSerializer<T>::deserialize(
+ * const std::vector<uint8_t> &data,
+ * const std::vector<SharedFD> &fds,
+ * ControlSerializer *cs = nullptr)
+ * \brief Deserialize byte vector and fd vector into an object
+ * \tparam T Type of object to deserialize to
+ * \param[in] data Byte vector to deserialize from
+ * \param[in] fds Fd vector to deserialize from
+ * \param[in] cs ControlSerializer
+ *
+ * This version of deserialize() (or the iterator version) must be used if
+ * the object type \a T or its members contain SharedFD.
+ *
+ * \a cs is only necessary if the object type \a T or its members contain
+ * ControlList or ControlInfoMap.
+ *
+ * \return The deserialized object
+ */
+
+/**
+ * \fn template<typename T> IPADataSerializer::deserialize(
+ * std::vector<uint8_t>::const_iterator dataBegin,
+ * std::vector<uint8_t>::const_iterator dataEnd,
+ * std::vector<SharedFD>::const_iterator fdsBegin,
+ * std::vector<SharedFD>::const_iterator fdsEnd,
+ * ControlSerializer *cs = nullptr)
+ * \brief Deserialize byte vector and fd vector into an object
+ * \tparam T Type of object to deserialize to
+ * \param[in] dataBegin Begin iterator of byte vector to deserialize from
+ * \param[in] dataEnd End iterator of byte vector to deserialize from
+ * \param[in] fdsBegin Begin iterator of fd vector to deserialize from
+ * \param[in] fdsEnd End iterator of fd vector to deserialize from
+ * \param[in] cs ControlSerializer
+ *
+ * This version of deserialize() (or the vector version) must be used if
+ * the object type \a T or its members contain SharedFD.
+ *
+ * \a cs is only necessary if the object type \a T or its members contain
+ * ControlList or ControlInfoMap.
+ *
+ * \return The deserialized object
+ */
+
+#ifndef __DOXYGEN__
+
+#define DEFINE_POD_SERIALIZER(type) \
+ \
+template<> \
+std::tuple<std::vector<uint8_t>, std::vector<SharedFD>> \
+IPADataSerializer<type>::serialize(const type &data, \
+ [[maybe_unused]] ControlSerializer *cs) \
+{ \
+ std::vector<uint8_t> dataVec; \
+ dataVec.reserve(sizeof(type)); \
+ appendPOD<type>(dataVec, data); \
+ \
+ return { dataVec, {} }; \
+} \
+ \
+template<> \
+type IPADataSerializer<type>::deserialize(std::vector<uint8_t>::const_iterator dataBegin, \
+ std::vector<uint8_t>::const_iterator dataEnd, \
+ [[maybe_unused]] ControlSerializer *cs) \
+{ \
+ return readPOD<type>(dataBegin, 0, dataEnd); \
+} \
+ \
+template<> \
+type IPADataSerializer<type>::deserialize(const std::vector<uint8_t> &data, \
+ ControlSerializer *cs) \
+{ \
+ return deserialize(data.cbegin(), data.end(), cs); \
+} \
+ \
+template<> \
+type IPADataSerializer<type>::deserialize(const std::vector<uint8_t> &data, \
+ [[maybe_unused]] const std::vector<SharedFD> &fds, \
+ ControlSerializer *cs) \
+{ \
+ return deserialize(data.cbegin(), data.end(), cs); \
+} \
+ \
+template<> \
+type IPADataSerializer<type>::deserialize(std::vector<uint8_t>::const_iterator dataBegin, \
+ std::vector<uint8_t>::const_iterator dataEnd, \
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsBegin, \
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsEnd, \
+ ControlSerializer *cs) \
+{ \
+ return deserialize(dataBegin, dataEnd, cs); \
+}
+
+DEFINE_POD_SERIALIZER(bool)
+DEFINE_POD_SERIALIZER(uint8_t)
+DEFINE_POD_SERIALIZER(uint16_t)
+DEFINE_POD_SERIALIZER(uint32_t)
+DEFINE_POD_SERIALIZER(uint64_t)
+DEFINE_POD_SERIALIZER(int8_t)
+DEFINE_POD_SERIALIZER(int16_t)
+DEFINE_POD_SERIALIZER(int32_t)
+DEFINE_POD_SERIALIZER(int64_t)
+DEFINE_POD_SERIALIZER(float)
+DEFINE_POD_SERIALIZER(double)
+
+/*
+ * Strings are serialized simply by converting by {string.cbegin(), string.end()}.
+ * The size of the string is recorded by the container (struct, vector, map, or
+ * function parameter serdes).
+ */
+template<>
+std::tuple<std::vector<uint8_t>, std::vector<SharedFD>>
+IPADataSerializer<std::string>::serialize(const std::string &data,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ return { { data.cbegin(), data.end() }, {} };
+}
+
+template<>
+std::string
+IPADataSerializer<std::string>::deserialize(const std::vector<uint8_t> &data,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ return { data.cbegin(), data.cend() };
+}
+
+template<>
+std::string
+IPADataSerializer<std::string>::deserialize(std::vector<uint8_t>::const_iterator dataBegin,
+ std::vector<uint8_t>::const_iterator dataEnd,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ return { dataBegin, dataEnd };
+}
+
+template<>
+std::string
+IPADataSerializer<std::string>::deserialize(const std::vector<uint8_t> &data,
+ [[maybe_unused]] const std::vector<SharedFD> &fds,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ return { data.cbegin(), data.cend() };
+}
+
+template<>
+std::string
+IPADataSerializer<std::string>::deserialize(std::vector<uint8_t>::const_iterator dataBegin,
+ std::vector<uint8_t>::const_iterator dataEnd,
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsBegin,
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsEnd,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ return { dataBegin, dataEnd };
+}
+
+/*
+ * ControlList is serialized as:
+ *
+ * 4 bytes - uint32_t Size of serialized ControlInfoMap, in bytes
+ * 4 bytes - uint32_t Size of serialized ControlList, in bytes
+ * X bytes - Serialized ControlInfoMap (using ControlSerializer)
+ * X bytes - Serialized ControlList (using ControlSerializer)
+ *
+ * If data.infoMap() is nullptr, then the default controls::controls will
+ * be used. The serialized ControlInfoMap will have zero length.
+ */
+template<>
+std::tuple<std::vector<uint8_t>, std::vector<SharedFD>>
+IPADataSerializer<ControlList>::serialize(const ControlList &data, ControlSerializer *cs)
+{
+ if (!cs)
+ LOG(IPADataSerializer, Fatal)
+ << "ControlSerializer not provided for serialization of ControlList";
+
+ size_t size;
+ std::vector<uint8_t> infoData;
+ int ret;
+
+ /*
+ * \todo Revisit this opportunistic serialization of the
+ * ControlInfoMap, as it could be fragile
+ */
+ if (data.infoMap() && !cs->isCached(*data.infoMap())) {
+ size = cs->binarySize(*data.infoMap());
+ infoData.resize(size);
+ ByteStreamBuffer buffer(infoData.data(), infoData.size());
+ ret = cs->serialize(*data.infoMap(), buffer);
+
+ if (ret < 0 || buffer.overflow()) {
+ LOG(IPADataSerializer, Error) << "Failed to serialize ControlList's ControlInfoMap";
+ return { {}, {} };
+ }
+ }
+
+ size = cs->binarySize(data);
+ std::vector<uint8_t> listData(size);
+ ByteStreamBuffer buffer(listData.data(), listData.size());
+ ret = cs->serialize(data, buffer);
+
+ if (ret < 0 || buffer.overflow()) {
+ LOG(IPADataSerializer, Error) << "Failed to serialize ControlList";
+ return { {}, {} };
+ }
+
+ std::vector<uint8_t> dataVec;
+ dataVec.reserve(8 + infoData.size() + listData.size());
+ appendPOD<uint32_t>(dataVec, infoData.size());
+ appendPOD<uint32_t>(dataVec, listData.size());
+ dataVec.insert(dataVec.end(), infoData.begin(), infoData.end());
+ dataVec.insert(dataVec.end(), listData.begin(), listData.end());
+
+ return { dataVec, {} };
+}
+
+template<>
+ControlList
+IPADataSerializer<ControlList>::deserialize(std::vector<uint8_t>::const_iterator dataBegin,
+ std::vector<uint8_t>::const_iterator dataEnd,
+ ControlSerializer *cs)
+{
+ if (!cs)
+ LOG(IPADataSerializer, Fatal)
+ << "ControlSerializer not provided for deserialization of ControlList";
+
+ if (std::distance(dataBegin, dataEnd) < 8)
+ return {};
+
+ uint32_t infoDataSize = readPOD<uint32_t>(dataBegin, 0, dataEnd);
+ uint32_t listDataSize = readPOD<uint32_t>(dataBegin, 4, dataEnd);
+
+ std::vector<uint8_t>::const_iterator it = dataBegin + 8;
+
+ if (infoDataSize + listDataSize < infoDataSize ||
+ static_cast<uint32_t>(std::distance(it, dataEnd)) < infoDataSize + listDataSize)
+ return {};
+
+ if (infoDataSize > 0) {
+ ByteStreamBuffer buffer(&*it, infoDataSize);
+ ControlInfoMap map = cs->deserialize<ControlInfoMap>(buffer);
+ /* It's fine if map is empty. */
+ if (buffer.overflow()) {
+ LOG(IPADataSerializer, Error)
+ << "Failed to deserialize ControlLists's ControlInfoMap: buffer overflow";
+ return ControlList();
+ }
+ }
+
+ it += infoDataSize;
+ ByteStreamBuffer buffer(&*it, listDataSize);
+ ControlList list = cs->deserialize<ControlList>(buffer);
+ if (buffer.overflow())
+ LOG(IPADataSerializer, Error) << "Failed to deserialize ControlList: buffer overflow";
+
+ return list;
+}
+
+template<>
+ControlList
+IPADataSerializer<ControlList>::deserialize(const std::vector<uint8_t> &data,
+ ControlSerializer *cs)
+{
+ return deserialize(data.cbegin(), data.end(), cs);
+}
+
+template<>
+ControlList
+IPADataSerializer<ControlList>::deserialize(const std::vector<uint8_t> &data,
+ [[maybe_unused]] const std::vector<SharedFD> &fds,
+ ControlSerializer *cs)
+{
+ return deserialize(data.cbegin(), data.end(), cs);
+}
+
+template<>
+ControlList
+IPADataSerializer<ControlList>::deserialize(std::vector<uint8_t>::const_iterator dataBegin,
+ std::vector<uint8_t>::const_iterator dataEnd,
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsBegin,
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsEnd,
+ ControlSerializer *cs)
+{
+ return deserialize(dataBegin, dataEnd, cs);
+}
+
+/*
+ * const ControlInfoMap is serialized as:
+ *
+ * 4 bytes - uint32_t Size of serialized ControlInfoMap, in bytes
+ * X bytes - Serialized ControlInfoMap (using ControlSerializer)
+ */
+template<>
+std::tuple<std::vector<uint8_t>, std::vector<SharedFD>>
+IPADataSerializer<ControlInfoMap>::serialize(const ControlInfoMap &map,
+ ControlSerializer *cs)
+{
+ if (!cs)
+ LOG(IPADataSerializer, Fatal)
+ << "ControlSerializer not provided for serialization of ControlInfoMap";
+
+ size_t size = cs->binarySize(map);
+ std::vector<uint8_t> infoData(size);
+ ByteStreamBuffer buffer(infoData.data(), infoData.size());
+ int ret = cs->serialize(map, buffer);
+
+ if (ret < 0 || buffer.overflow()) {
+ LOG(IPADataSerializer, Error) << "Failed to serialize ControlInfoMap";
+ return { {}, {} };
+ }
+
+ std::vector<uint8_t> dataVec;
+ appendPOD<uint32_t>(dataVec, infoData.size());
+ dataVec.insert(dataVec.end(), infoData.begin(), infoData.end());
+
+ return { dataVec, {} };
+}
+
+template<>
+ControlInfoMap
+IPADataSerializer<ControlInfoMap>::deserialize(std::vector<uint8_t>::const_iterator dataBegin,
+ std::vector<uint8_t>::const_iterator dataEnd,
+ ControlSerializer *cs)
+{
+ if (!cs)
+ LOG(IPADataSerializer, Fatal)
+ << "ControlSerializer not provided for deserialization of ControlInfoMap";
+
+ if (std::distance(dataBegin, dataEnd) < 4)
+ return {};
+
+ uint32_t infoDataSize = readPOD<uint32_t>(dataBegin, 0, dataEnd);
+
+ std::vector<uint8_t>::const_iterator it = dataBegin + 4;
+
+ if (static_cast<uint32_t>(std::distance(it, dataEnd)) < infoDataSize)
+ return {};
+
+ ByteStreamBuffer buffer(&*it, infoDataSize);
+ ControlInfoMap map = cs->deserialize<ControlInfoMap>(buffer);
+
+ return map;
+}
+
+template<>
+ControlInfoMap
+IPADataSerializer<ControlInfoMap>::deserialize(const std::vector<uint8_t> &data,
+ ControlSerializer *cs)
+{
+ return deserialize(data.cbegin(), data.end(), cs);
+}
+
+template<>
+ControlInfoMap
+IPADataSerializer<ControlInfoMap>::deserialize(const std::vector<uint8_t> &data,
+ [[maybe_unused]] const std::vector<SharedFD> &fds,
+ ControlSerializer *cs)
+{
+ return deserialize(data.cbegin(), data.end(), cs);
+}
+
+template<>
+ControlInfoMap
+IPADataSerializer<ControlInfoMap>::deserialize(std::vector<uint8_t>::const_iterator dataBegin,
+ std::vector<uint8_t>::const_iterator dataEnd,
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsBegin,
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsEnd,
+ ControlSerializer *cs)
+{
+ return deserialize(dataBegin, dataEnd, cs);
+}
+
+/*
+ * SharedFD instances are serialized into four bytes that tells if the SharedFD
+ * is valid or not. If it is valid, then for serialization the fd will be
+ * written to the fd vector, or for deserialization the fd vector const_iterator
+ * will be valid.
+ *
+ * This validity is necessary so that we don't send -1 fd over sendmsg(). It
+ * also allows us to simply send the entire fd vector into the deserializer
+ * and it will be recursively consumed as necessary.
+ */
+template<>
+std::tuple<std::vector<uint8_t>, std::vector<SharedFD>>
+IPADataSerializer<SharedFD>::serialize(const SharedFD &data,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ std::vector<uint8_t> dataVec;
+ std::vector<SharedFD> fdVec;
+
+ /*
+ * Store as uint32_t to prepare for conversion from validity flag
+ * to index, and for alignment.
+ */
+ appendPOD<uint32_t>(dataVec, data.isValid());
+
+ if (data.isValid())
+ fdVec.push_back(data);
+
+ return { dataVec, fdVec };
+}
+
+template<>
+SharedFD IPADataSerializer<SharedFD>::deserialize([[maybe_unused]] std::vector<uint8_t>::const_iterator dataBegin,
+ [[maybe_unused]] std::vector<uint8_t>::const_iterator dataEnd,
+ std::vector<SharedFD>::const_iterator fdsBegin,
+ std::vector<SharedFD>::const_iterator fdsEnd,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ ASSERT(std::distance(dataBegin, dataEnd) >= 4);
+
+ uint32_t valid = readPOD<uint32_t>(dataBegin, 0, dataEnd);
+
+ ASSERT(!(valid && std::distance(fdsBegin, fdsEnd) < 1));
+
+ return valid ? *fdsBegin : SharedFD();
+}
+
+template<>
+SharedFD IPADataSerializer<SharedFD>::deserialize(const std::vector<uint8_t> &data,
+ const std::vector<SharedFD> &fds,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ return deserialize(data.cbegin(), data.end(), fds.cbegin(), fds.end());
+}
+
+/*
+ * FrameBuffer::Plane is serialized as:
+ *
+ * 4 byte - SharedFD
+ * 4 bytes - uint32_t Offset
+ * 4 bytes - uint32_t Length
+ */
+template<>
+std::tuple<std::vector<uint8_t>, std::vector<SharedFD>>
+IPADataSerializer<FrameBuffer::Plane>::serialize(const FrameBuffer::Plane &data,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ std::vector<uint8_t> dataVec;
+ std::vector<SharedFD> fdsVec;
+
+ std::vector<uint8_t> fdBuf;
+ std::vector<SharedFD> fdFds;
+ std::tie(fdBuf, fdFds) =
+ IPADataSerializer<SharedFD>::serialize(data.fd);
+ dataVec.insert(dataVec.end(), fdBuf.begin(), fdBuf.end());
+ fdsVec.insert(fdsVec.end(), fdFds.begin(), fdFds.end());
+
+ appendPOD<uint32_t>(dataVec, data.offset);
+ appendPOD<uint32_t>(dataVec, data.length);
+
+ return { dataVec, fdsVec };
+}
+
+template<>
+FrameBuffer::Plane
+IPADataSerializer<FrameBuffer::Plane>::deserialize(std::vector<uint8_t>::const_iterator dataBegin,
+ std::vector<uint8_t>::const_iterator dataEnd,
+ std::vector<SharedFD>::const_iterator fdsBegin,
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsEnd,
+ [[maybe_unused]] ControlSerializer *cs)
+{
+ FrameBuffer::Plane ret;
+
+ ret.fd = IPADataSerializer<SharedFD>::deserialize(dataBegin, dataBegin + 4,
+ fdsBegin, fdsBegin + 1);
+ ret.offset = readPOD<uint32_t>(dataBegin, 4, dataEnd);
+ ret.length = readPOD<uint32_t>(dataBegin, 8, dataEnd);
+
+ return ret;
+}
+
+template<>
+FrameBuffer::Plane
+IPADataSerializer<FrameBuffer::Plane>::deserialize(const std::vector<uint8_t> &data,
+ const std::vector<SharedFD> &fds,
+ ControlSerializer *cs)
+{
+ return deserialize(data.cbegin(), data.end(), fds.cbegin(), fds.end(), cs);
+}
+
+#endif /* __DOXYGEN__ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/ipa_interface.cpp b/src/libcamera/ipa_interface.cpp
index 5959e7de..a9dc54ad 100644
--- a/src/libcamera/ipa_interface.cpp
+++ b/src/libcamera/ipa_interface.cpp
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_interface.cpp - Image Processing Algorithm interface
+ * Image Processing Algorithm interface
*/
-#include <ipa/ipa_interface.h>
+#include <libcamera/ipa/ipa_interface.h>
/**
* \file ipa_interface.h
@@ -15,273 +15,51 @@
* an Image Processing Algorithm (IPA) module. An IPA module is developed for a
* specific pipeline handler and each pipeline handler may be compatible with
* multiple IPA implementations, both open and closed source. To support this,
- * libcamera communicates with IPA modules through a standard plain C interface.
+ * libcamera communicates with IPA modules through a per-pipeline C++ interface.
*
- * IPA modules shall expose a public function named ipaCreate() with the
- * following prototype.
+ * IPA modules shall provide an ipaCreate() function exported as a public C
+ * symbol with the following prototype:
*
* \code{.c}
- * struct ipa_context *ipaCreate();
+ * IPAInterface *ipaCreate();
* \endcode
*
- * The ipaCreate() function creates an instance of an IPA context, which models
+ * The ipaCreate() function creates an instance of an IPA interface, which models
* a context of execution for the IPA. IPA modules shall support creating one
* context per camera, as required by their associated pipeline handler.
*
- * The IPA module context operations are defined in the struct ipa_context_ops.
- * They model a low-level interface to configure the IPA, notify it of events,
- * and receive IPA actions through callbacks. An IPA module stores a pointer to
- * the operations corresponding to its context in the ipa_context::ops field.
- * That pointer is immutable for the lifetime of the context, and may differ
- * between different contexts created by the same IPA module.
+ * The IPA module interface operations are defined in the mojom file
+ * corresponding to the pipeline handler, in
+ * include/libcamera/ipa/{pipeline_name}.mojom.
*
- * The IPA interface defines base data types and functions to exchange data. On
- * top of this, each pipeline handler is responsible for defining the set of
- * events and actions used to communicate with their IPA. These are collectively
- * referred to as IPA operations and define the pipeline handler-specific IPA
- * protocol. Each operation defines the data that it carries, and how that data
- * is encoded in the ipa_context_ops functions arguments.
+ * The IPA interface is specific to each pipeline handler. The pipeline handlers
+ * define a set of operations used to communicate with their IPA modules. The
+ * operations, along with the data structures they use, are collectively
+ * referred to as the IPA protocol.
+ *
+ * The IPA protocol is defined using the
+ * <a href="https://chromium.googlesource.com/chromium/src/+/master/mojo/public/tools/bindings/README.md">Mojo interface definition language</a>,
+ * in a Mojo module file stored in include/libcamera/ipa/{pipeline_name}.mojom.
+ * The Mojo module contains two Mojo interfaces: IPAInterface defines the
+ * operations exposed by the IPA and called by the pipeline handler, and
+ * IPAEventInterface defines the events generated by the IPA and received by the
+ * pipeline handler.
*
* \todo Add reference to how pipelines shall document their protocol.
*
* IPAs can be isolated in a separate process. This implies that arguments to
- * the IPA interface functions may need to be transferred over IPC. All
- * arguments use Plain Old Data types and are documented either in the form of C
- * data types, or as a textual description of byte arrays for types that can't
- * be expressed using C data types (such as arrays of mixed data types). IPA
- * modules can thus use the C API without calling into libcamera to access the
- * data passed to the IPA context operations.
+ * the IPA interface functions may need to be transferred over IPC. An IPA
+ * proxy is auto-generated based on the mojom file, which abstracts away the
+ * (de)serialization from the pipeline handler and the IPA implementation. Thus
+ * any C++ structure that is defined in the mojom file, or the C++ libcamera
+ * objects that are listed in core.mojom, can be used directly.
*
* Due to IPC, synchronous communication between pipeline handlers and IPAs can
- * be costly. For that reason, the interface operates asynchronously. This
- * implies that methods don't return a status, and that all methods may copy
- * their arguments.
- *
- * The IPAInterface class is a C++ representation of the ipa_context_ops, using
- * C++ data classes provided by libcamera. This is the API exposed to pipeline
- * handlers to communicate with IPA modules. IPA modules may use the
- * IPAInterface API internally if they want to benefit from the data and helper
- * classes offered by libcamera.
- *
- * When an IPA module is loaded directly into the libcamera process and uses
- * the IPAInterface API internally, short-circuiting the path to the
- * ipa_context_ops and back to IPAInterface is desirable. To support this, IPA
- * modules may implement the ipa_context_ops::get_interface function to return a
- * pointer to their internal IPAInterface.
- */
-
-/**
- * \struct ipa_context
- * \brief IPA module context of execution
- *
- * This structure models a context of execution for an IPA module. It is
- * instantiated by the IPA module ipaCreate() function. IPA modules allocate
- * context instances in an implementation-defined way, contexts shall thus be
- * destroyed using the ipa_operation::destroy function only.
- *
- * The ipa_context structure provides a pointer to the IPA context operations.
- * It shall otherwise be treated as a constant black-box cookie and passed
- * unmodified to the functions defined in struct ipa_context_ops.
- *
- * IPA modules are expected to extend struct ipa_context by inheriting from it,
- * either through structure embedding to model inheritance in plain C, or
- * through C++ class inheritance. A simple example of the latter is available
- * in the IPAContextWrapper class implementation.
- *
- * \var ipa_context::ops
- * \brief The IPA context operations
- */
-
-/**
- * \struct ipa_stream
- * \brief Stream information for the IPA context operations
- *
- * \var ipa_stream::id
- * \brief Identifier for the stream, defined by the IPA protocol
- *
- * \var ipa_stream::pixel_format
- * \brief The stream pixel format, as defined by the PixelFormat class
- *
- * \var ipa_stream::width
- * \brief The stream width in pixels
- *
- * \var ipa_stream::height
- * \brief The stream height in pixels
- */
-
-/**
- * \struct ipa_control_info_map
- * \brief ControlInfoMap description for the IPA context operations
- *
- * \var ipa_control_info_map::id
- * \brief Identifier for the ControlInfoMap, defined by the IPA protocol
- *
- * \var ipa_control_info_map::data
- * \brief Pointer to a control packet for the ControlInfoMap
- * \sa ipa_controls.h
- *
- * \var ipa_control_info_map::size
- * \brief The size of the control packet in bytes
- */
-
-/**
- * \struct ipa_buffer_plane
- * \brief A plane for an ipa_buffer
- *
- * \var ipa_buffer_plane::dmabuf
- * \brief The dmabuf file descriptor for the plane (-1 for unused planes)
- *
- * \var ipa_buffer_plane::length
- * \brief The plane length in bytes (0 for unused planes)
- */
-
-/**
- * \struct ipa_buffer
- * \brief Buffer information for the IPA context operations
- *
- * \var ipa_buffer::id
- * \brief The buffer unique ID (see \ref libcamera::IPABuffer::id)
- *
- * \var ipa_buffer::num_planes
- * \brief The number of used planes in the ipa_buffer::planes array
- *
- * \var ipa_buffer::planes
- * \brief The buffer planes (up to 3)
- */
-
-/**
- * \struct ipa_control_list
- * \brief ControlList description for the IPA context operations
- *
- * \var ipa_control_list::data
- * \brief Pointer to a control packet for the ControlList
- * \sa ipa_controls.h
- *
- * \var ipa_control_list::size
- * \brief The size of the control packet in bytes
- */
-
-/**
- * \struct ipa_operation_data
- * \brief IPA operation data for the IPA context operations
- * \sa libcamera::IPAOperationData
- *
- * \var ipa_operation_data::operation
- * \brief IPA protocol operation
- *
- * \var ipa_operation_data::data
- * \brief Pointer to the operation data array
- *
- * \var ipa_operation_data::num_data
- * \brief Number of entries in the ipa_operation_data::data array
- *
- * \var ipa_operation_data::lists
- * \brief Pointer to an array of ipa_control_list
- *
- * \var ipa_operation_data::num_lists
- * \brief Number of entries in the ipa_control_list array
- */
-
-/**
- * \struct ipa_callback_ops
- * \brief IPA context operations as a set of function pointers
- */
-
-/**
- * \var ipa_callback_ops::queue_frame_action
- * \brief Queue an action associated with a frame to the pipeline handler
- * \param[in] cb_ctx The callback context registered with
- * ipa_context_ops::register_callbacks
- * \param[in] frame The frame number
- *
- * \sa libcamera::IPAInterface::queueFrameAction
- */
-
-/**
- * \struct ipa_context_ops
- * \brief IPA context operations as a set of function pointers
- *
- * To allow for isolation of IPA modules in separate processes, the functions
- * defined in the ipa_context_ops structure return only data related to the
- * libcamera side of the operations. In particular, error related to the
- * libcamera side of the IPC may be returned. Data returned by the IPA,
- * including status information, shall be provided through callbacks from the
- * IPA to libcamera.
- */
-
-/**
- * \var ipa_context_ops::destroy
- * \brief Destroy the IPA context created by the module's ipaCreate() function
- * \param[in] ctx The IPA context
- */
-
-/**
- * \var ipa_context_ops::get_interface
- * \brief Retrieve the IPAInterface implemented by the ipa_context (optional)
- * \param[in] ctx The IPA context
- *
- * IPA modules may implement this function to expose their internal
- * IPAInterface, if any. When implemented, libcamera may at its sole discretion
- * call it and then bypass the ipa_context_ops API by calling the IPAInterface
- * methods directly. IPA modules shall still implement and support the full
- * ipa_context_ops API.
- */
-
-/**
- * \var ipa_context_ops::init
- * \brief Initialise the IPA context
- * \param[in] ctx The IPA context
- *
- * \sa libcamera::IPAInterface::init()
- */
-
-/**
- * \var ipa_context_ops::register_callbacks
- * \brief Register callback operation from the IPA to the pipeline handler
- * \param[in] ctx The IPA context
- * \param[in] callback The IPA callback operations
- * \param[in] cb_ctx The callback context, passed to all callback operations
- */
-
-/**
- * \var ipa_context_ops::configure
- * \brief Configure the IPA stream and sensor settings
- * \param[in] ctx The IPA context
- *
- * \sa libcamera::IPAInterface::configure()
- */
-
-/**
- * \var ipa_context_ops::map_buffers
- * \brief Map buffers shared between the pipeline handler and the IPA
- * \param[in] ctx The IPA context
- * \param[in] buffers The buffers to map
- * \param[in] num_buffers The number of entries in the \a buffers array
- *
- * The dmabuf file descriptors provided in \a buffers are borrowed from the
- * caller and are only guaranteed to be valid during the map_buffers() call.
- * Should the callee need to store a copy of the file descriptors, it shall
- * duplicate them first with ::%dup().
- *
- * \sa libcamera::IPAInterface::mapBuffers()
- */
-
-/**
- * \var ipa_context_ops::unmap_buffers
- * \brief Unmap buffers shared by the pipeline to the IPA
- * \param[in] ctx The IPA context
- * \param[in] ids The IDs of the buffers to unmap
- * \param[in] num_buffers The number of entries in the \a ids array
- *
- * \sa libcamera::IPAInterface::unmapBuffers()
- */
-
-/**
- * \var ipa_context_ops::process_event
- * \brief Process an event from the pipeline handler
- * \param[in] ctx The IPA context
- *
- * \sa libcamera::IPAInterface::processEvent()
+ * be costly. For that reason, functions that cannot afford the high cost
+ * should be marked as [async] in the mojom file, and they will operate
+ * asynchronously. This implies that these functions don't return a status, and
+ * that all functions may copy their arguments. Synchronous functions are still
+ * allowed, but should be used with caution.
*/
/**
@@ -289,213 +67,38 @@
* \brief Entry point to the IPA modules
*
* This function is the entry point to the IPA modules. It is implemented by
- * every IPA module, and called by libcamera to create a new IPA context.
+ * every IPA module, and called by libcamera to create a new IPA interface
+ * instance.
*
- * \return A newly created IPA context
+ * \return A newly created IPA interface instance
*/
namespace libcamera {
/**
- * \struct IPAStream
- * \brief Stream configuration for the IPA interface
- *
- * The IPAStream structure stores stream configuration parameters needed by the
- * IPAInterface::configure() method. It mirrors the StreamConfiguration class
- * that is not suitable for this purpose due to not being serializable.
- */
-
-/**
- * \var IPAStream::pixelFormat
- * \brief The stream pixel format
- */
-
-/**
- * \var IPAStream::size
- * \brief The stream size in pixels
- */
-
-/**
- * \struct IPABuffer
- * \brief Buffer information for the IPA interface
- *
- * The IPABuffer structure associates buffer memory with a unique ID. It is
- * used to map buffers to the IPA with IPAInterface::mapBuffers(), after which
- * buffers will be identified by their ID in the IPA interface.
- */
-
-/**
- * \var IPABuffer::id
- * \brief The buffer unique ID
- *
- * Buffers mapped to the IPA are identified by numerical unique IDs. The IDs
- * are chosen by the pipeline handler to fulfil the following constraints:
- *
- * - IDs shall be positive integers different than zero
- * - IDs shall be unique among all mapped buffers
- *
- * When buffers are unmapped with IPAInterface::unmapBuffers() their IDs are
- * freed and may be reused for new buffer mappings.
- */
-
-/**
- * \var IPABuffer::planes
- * \brief The buffer planes description
- *
- * Stores the dmabuf handle and length for each plane of the buffer.
- */
-
-/**
- * \struct IPAOperationData
- * \brief Parameters for IPA operations
- *
- * The IPAOperationData structure carries parameters for the IPA operations
- * performed through the IPAInterface::processEvent() method and the
- * IPAInterface::queueFrameAction signal.
- */
-
-/**
- * \var IPAOperationData::operation
- * \brief IPA protocol operation
- *
- * The operation field describes which operation the receiver shall perform. It
- * defines, through the IPA protocol, how the other fields of the structure are
- * interpreted. The protocol freely assigns numerical values to operations.
- */
-
-/**
- * \var IPAOperationData::data
- * \brief Operation integer data
- *
- * The interpretation and position of different values in the array are defined
- * by the IPA protocol.
- */
-
-/**
- * \var IPAOperationData::controls
- * \brief Operation controls data
- *
- * The interpretation and position of different values in the array are defined
- * by the IPA protocol.
- */
-
-/**
* \class IPAInterface
* \brief C++ Interface for IPA implementation
*
- * This pure virtual class defines a C++ API corresponding to the ipa_context,
- * ipa_context_ops and ipa_callback_ops API. It is used by pipeline handlers to
- * interact with IPA modules, and may be used internally in IPA modules if
- * desired to benefit from the data and helper classes provided by libcamera.
- *
- * Functions defined in the ipa_context_ops structure are mapped to IPAInterface
- * methods, while functions defined in the ipa_callback_ops are mapped to
- * IPAInterface signals. As with the C API, the IPA C++ interface uses
- * serializable data types only. It reuses structures defined by the C API, or
- * defines corresponding classes using C++ containers when required.
+ * This pure virtual class defines a skeletal C++ API for IPA modules.
+ * Specializations of this class must be defined in a mojom file in
+ * include/libcamera/ipa/ (see the IPA Writers Guide for details
+ * on how to do so).
*
- * Due to process isolation all arguments to the IPAInterface methods and
- * signals may need to be transferred over IPC. The class thus uses serializable
- * data types only. The IPA C++ interface defines custom data structures that
- * mirror core libcamera structures when the latter are not suitable, such as
- * IPAStream to carry StreamConfiguration data.
+ * Due to process isolation all arguments to the IPAInterface member functions
+ * and signals may need to be transferred over IPC. The class thus uses
+ * serializable data types only. The IPA C++ interface defines custom data
+ * structures that mirror core libcamera structures when the latter are not
+ * suitable, such as IPAStream to carry StreamConfiguration data.
*
- * As for the functions defined in struct ipa_context_ops, the methods defined
- * by this class shall not return data from the IPA.
+ * Custom data structures may also be defined in the mojom file, in which case
+ * the (de)serialization will automatically be generated. If any other libcamera
+ * structures are to be used as parameters, then a (de)serializer for them must
+ * be implemented in IPADataSerializer.
*
- * The pipeline handler shall use the IPAManager to locate a compatible
+ * The pipeline handlers shall use the IPAManager to locate a compatible
* IPAInterface. The interface may then be used to interact with the IPA module.
- */
-
-/**
- * \fn IPAInterface::init()
- * \brief Initialise the IPAInterface
- */
-
-/**
- * \fn IPAInterface::configure()
- * \brief Configure the IPA stream and sensor settings
- * \param[in] streamConfig Configuration of all active streams
- * \param[in] entityControls Controls provided by the pipeline entities
- *
- * This method shall be called when the camera is started to inform the IPA of
- * the camera's streams and the sensor settings. The meaning of the numerical
- * keys in the \a streamConfig and \a entityControls maps is defined by the IPA
- * protocol.
- */
-
-/**
- * \fn IPAInterface::mapBuffers()
- * \brief Map buffers shared between the pipeline handler and the IPA
- * \param[in] buffers List of buffers to map
- *
- * This method informs the IPA module of memory buffers set up by the pipeline
- * handler that the IPA needs to access. It provides dmabuf file handles for
- * each buffer, and associates the buffers with unique numerical IDs.
- *
- * IPAs shall map the dmabuf file handles to their address space and keep a
- * cache of the mappings, indexed by the buffer numerical IDs. The IDs are used
- * in all other IPA interface methods to refer to buffers, including the
- * unmapBuffers() method.
- *
- * All buffers that the pipeline handler wishes to share with an IPA shall be
- * mapped with this method. Buffers may be mapped all at once with a single
- * call, or mapped and unmapped dynamically at runtime, depending on the IPA
- * protocol. Regardless of the protocol, all buffers mapped at a given time
- * shall have unique numerical IDs.
- *
- * The numerical IDs have no meaning defined by the IPA interface, and IPA
- * protocols shall not give them any specific meaning either. They should be
- * treated as opaque handles by IPAs, with the only exception that ID zero is
- * invalid.
- *
- * \sa unmapBuffers()
- *
- * \todo Provide a generic implementation of mapBuffers and unmapBuffers for
- * IPAs
- */
-
-/**
- * \fn IPAInterface::unmapBuffers()
- * \brief Unmap buffers shared by the pipeline to the IPA
- * \param[in] ids List of buffer IDs to unmap
- *
- * This method removes mappings set up with mapBuffers(). Buffers may be
- * unmapped all at once with a single call, or selectively at runtime, depending
- * on the IPA protocol. Numerical IDs of unmapped buffers may be reused when
- * mapping new buffers.
- *
- * \sa mapBuffers()
- */
-
-/**
- * \fn IPAInterface::processEvent()
- * \brief Process an event from the pipeline handler
- * \param[in] data IPA operation data
- *
- * This operation is used by pipeline handlers to inform the IPA module of
- * events that occurred during the on-going capture operation.
- *
- * The event notified by the pipeline handler with this method is handled by the
- * IPA, which interprets the operation parameters according to the separately
- * documented IPA protocol.
- */
-
-/**
- * \var IPAInterface::queueFrameAction
- * \brief Queue an action associated with a frame to the pipeline handler
- * \param[in] frame The frame number for the action
- * \param[in] data IPA operation data
- *
- * This signal is emitted when the IPA wishes to queue a FrameAction on the
- * pipeline. The pipeline is still responsible for the scheduling of the action
- * on its timeline.
*
- * This signal is emitted by the IPA to queue an action to be executed by the
- * pipeline handler on a frame. The type of action is identified by the
- * \a data.operation field, as defined by the IPA protocol, and the rest of the
- * \a data is interpreted accordingly. The pipeline handler shall queue the
- * action and execute it as appropriate.
+ * \todo Figure out how to generate IPAInterface documentation.
*/
} /* namespace libcamera */
diff --git a/src/libcamera/ipa_manager.cpp b/src/libcamera/ipa_manager.cpp
index bcaae356..cfc24d38 100644
--- a/src/libcamera/ipa_manager.cpp
+++ b/src/libcamera/ipa_manager.cpp
@@ -2,22 +2,23 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_manager.cpp - Image Processing Algorithm module manager
+ * Image Processing Algorithm module manager
*/
-#include "ipa_manager.h"
+#include "libcamera/internal/ipa_manager.h"
#include <algorithm>
#include <dirent.h>
#include <string.h>
#include <sys/types.h>
-#include "ipa_context_wrapper.h"
-#include "ipa_module.h"
-#include "ipa_proxy.h"
-#include "log.h"
-#include "pipeline_handler.h"
-#include "utils.h"
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/ipa_module.h"
+#include "libcamera/internal/ipa_proxy.h"
+#include "libcamera/internal/pipeline_handler.h"
/**
* \file ipa_manager.h
@@ -43,8 +44,8 @@ LOG_DEFINE_CATEGORY(IPAManager)
* The isolation mechanism ensures that no code from a closed-source module is
* ever run in the libcamera process.
*
- * To create an IPA context, pipeline handlers call the IPAManager::ipaCreate()
- * method. For a directly loaded module, the manager calls the module's
+ * To create an IPA context, pipeline handlers call the IPAManager::createIPA()
+ * function. For a directly loaded module, the manager calls the module's
* ipaCreate() function directly and wraps the returned context in an
* IPAContextWrapper that exposes an IPAInterface.
*
@@ -88,13 +89,25 @@ LOG_DEFINE_CATEGORY(IPAManager)
* returned to the pipeline handler, and all interactions with the IPA context
* go the same interface regardless of process isolation.
*
- * In all cases the data passed to the IPAInterface methods is serialized to
- * Plain Old Data, either for the purpose of passing it to the IPA context
- * plain C API, or to transmit the data to the isolated process through IPC.
+ * In all cases the data passed to the IPAInterface member functions is
+ * serialized to Plain Old Data, either for the purpose of passing it to the IPA
+ * context plain C API, or to transmit the data to the isolated process through
+ * IPC.
*/
+/**
+ * \brief Construct an IPAManager instance
+ *
+ * The IPAManager class is meant to only be instantiated once, by the
+ * CameraManager.
+ */
IPAManager::IPAManager()
{
+#if HAVE_IPA_PUBKEY
+ if (!pubKey_.isValid())
+ LOG(IPAManager, Warning) << "Public key not valid";
+#endif
+
unsigned int ipaCount = 0;
/* User-specified paths take precedence. */
@@ -114,15 +127,12 @@ IPAManager::IPAManager()
/*
* When libcamera is used before it is installed, load IPAs from the
- * same build directory as the libcamera library itself. This requires
- * identifying the path of the libcamera.so, and referencing a relative
- * path for the IPA from that point. We need to recurse one level of
- * sub-directories to match the build tree.
+ * same build directory as the libcamera library itself.
*/
std::string root = utils::libcameraBuildPath();
if (!root.empty()) {
std::string ipaBuildPath = root + "src/ipa";
- constexpr int maxDepth = 1;
+ constexpr int maxDepth = 2;
LOG(IPAManager, Info)
<< "libcamera is not installed. Adding '"
@@ -146,21 +156,6 @@ IPAManager::~IPAManager()
}
/**
- * \brief Retrieve the IPA manager instance
- *
- * The IPAManager is a singleton and can't be constructed manually. This
- * function shall instead be used to retrieve the single global instance of the
- * manager.
- *
- * \return The IPA manager instance
- */
-IPAManager *IPAManager::instance()
-{
- static IPAManager ipaManager;
- return &ipaManager;
-}
-
-/**
* \brief Identify shared library objects within a directory
* \param[in] libDir The directory to search for shared objects
* \param[in] maxDepth The maximum depth of sub-directories to parse
@@ -212,7 +207,7 @@ void IPAManager::parseDir(const char *libDir, unsigned int maxDepth,
* \param[in] libDir The directory to search for IPA modules
* \param[in] maxDepth The maximum depth of sub-directories to search
*
- * This method tries to create an IPAModule instance for every shared object
+ * This function tries to create an IPAModule instance for every shared object
* found in \a libDir, and skips invalid IPA modules.
*
* Sub-directories are searched up to a depth of \a maxDepth. A \a maxDepth
@@ -247,64 +242,75 @@ unsigned int IPAManager::addDir(const char *libDir, unsigned int maxDepth)
}
/**
- * \brief Create an IPA interface that matches a given pipeline handler
- * \param[in] pipe The pipeline handler that wants a matching IPA interface
+ * \brief Retrieve an IPA module that matches a given pipeline handler
+ * \param[in] pipe The pipeline handler
* \param[in] minVersion Minimum acceptable version of IPA module
* \param[in] maxVersion Maximum acceptable version of IPA module
- *
- * \return A newly created IPA interface, or nullptr if no matching
- * IPA module is found or if the IPA interface fails to initialize
*/
-std::unique_ptr<IPAInterface> IPAManager::createIPA(PipelineHandler *pipe,
- uint32_t maxVersion,
- uint32_t minVersion)
+IPAModule *IPAManager::module(PipelineHandler *pipe, uint32_t minVersion,
+ uint32_t maxVersion)
{
- IPAModule *m = nullptr;
-
for (IPAModule *module : modules_) {
- if (module->match(pipe, minVersion, maxVersion)) {
- m = module;
- break;
- }
+ if (module->match(pipe, minVersion, maxVersion))
+ return module;
}
- if (!m)
- return nullptr;
+ return nullptr;
+}
- if (!m->isOpenSource()) {
- IPAProxyFactory *pf = nullptr;
- std::vector<IPAProxyFactory *> &factories = IPAProxyFactory::factories();
+/**
+ * \fn IPAManager::createIPA()
+ * \brief Create an IPA proxy that matches a given pipeline handler
+ * \param[in] pipe The pipeline handler that wants a matching IPA proxy
+ * \param[in] minVersion Minimum acceptable version of IPA module
+ * \param[in] maxVersion Maximum acceptable version of IPA module
+ *
+ * \return A newly created IPA proxy, or nullptr if no matching IPA module is
+ * found or if the IPA proxy fails to initialize
+ */
- for (IPAProxyFactory *factory : factories) {
- /* TODO: Better matching */
- if (!strcmp(factory->name().c_str(), "IPAProxyLinux")) {
- pf = factory;
- break;
- }
- }
+#if HAVE_IPA_PUBKEY
+/**
+ * \fn IPAManager::pubKey()
+ * \brief Retrieve the IPA module signing public key
+ *
+ * IPA module signature verification is normally handled internally by the
+ * IPAManager class. This function is meant to be used by utilities that need to
+ * verify signatures externally.
+ *
+ * \return The IPA module signing public key
+ */
+#endif
- if (!pf) {
- LOG(IPAManager, Error) << "Failed to get proxy factory";
- return nullptr;
- }
+bool IPAManager::isSignatureValid([[maybe_unused]] IPAModule *ipa) const
+{
+#if HAVE_IPA_PUBKEY
+ char *force = utils::secure_getenv("LIBCAMERA_IPA_FORCE_ISOLATION");
+ if (force && force[0] != '\0') {
+ LOG(IPAManager, Debug)
+ << "Isolation of IPA module " << ipa->path()
+ << " forced through environment variable";
+ return false;
+ }
- std::unique_ptr<IPAProxy> proxy = pf->create(m);
- if (!proxy->isValid()) {
- LOG(IPAManager, Error) << "Failed to load proxy";
- return nullptr;
- }
+ File file{ ipa->path() };
+ if (!file.open(File::OpenModeFlag::ReadOnly))
+ return false;
- return proxy;
- }
+ Span<uint8_t> data = file.map();
+ if (data.empty())
+ return false;
- if (!m->load())
- return nullptr;
+ bool valid = pubKey_.verify(data, ipa->signature());
- struct ipa_context *ctx = m->createContext();
- if (!ctx)
- return nullptr;
+ LOG(IPAManager, Debug)
+ << "IPA module " << ipa->path() << " signature is "
+ << (valid ? "valid" : "not valid");
- return std::make_unique<IPAContextWrapper>(ctx);
+ return valid;
+#else
+ return false;
+#endif
}
} /* namespace libcamera */
diff --git a/src/libcamera/ipa_module.cpp b/src/libcamera/ipa_module.cpp
index a01d0757..9ca74be6 100644
--- a/src/libcamera/ipa_module.cpp
+++ b/src/libcamera/ipa_module.cpp
@@ -2,28 +2,29 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_module.cpp - Image Processing Algorithm module
+ * Image Processing Algorithm module
*/
-#include "ipa_module.h"
+#include "libcamera/internal/ipa_module.h"
#include <algorithm>
-#include <array>
+#include <ctype.h>
#include <dlfcn.h>
#include <elf.h>
#include <errno.h>
#include <fcntl.h>
#include <link.h>
#include <string.h>
-#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
-#include <tuple>
#include <unistd.h>
-#include "log.h"
-#include "pipeline_handler.h"
-#include "utils.h"
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/span.h>
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/pipeline_handler.h"
/**
* \file ipa_module.h
@@ -42,27 +43,27 @@ LOG_DEFINE_CATEGORY(IPAModule)
namespace {
template<typename T>
-typename std::remove_extent_t<T> *elfPointer(void *map, off_t offset,
- size_t fileSize, size_t objSize)
+typename std::remove_extent_t<T> *elfPointer(Span<const uint8_t> elf,
+ off_t offset, size_t objSize)
{
size_t size = offset + objSize;
- if (size > fileSize || size < objSize)
+ if (size > elf.size() || size < objSize)
return nullptr;
- return reinterpret_cast<typename std::remove_extent_t<T> *>
- (static_cast<char *>(map) + offset);
+ return reinterpret_cast<typename std::remove_extent_t<T> *>(
+ reinterpret_cast<const char *>(elf.data()) + offset);
}
template<typename T>
-typename std::remove_extent_t<T> *elfPointer(void *map, off_t offset,
- size_t fileSize)
+typename std::remove_extent_t<T> *elfPointer(Span<const uint8_t> elf,
+ off_t offset)
{
- return elfPointer<T>(map, offset, fileSize, sizeof(T));
+ return elfPointer<T>(elf, offset, sizeof(T));
}
-int elfVerifyIdent(void *map, size_t soSize)
+int elfVerifyIdent(Span<const uint8_t> elf)
{
- char *e_ident = elfPointer<char[EI_NIDENT]>(map, 0, soSize);
+ const char *e_ident = elfPointer<const char[EI_NIDENT]>(elf, 0);
if (!e_ident)
return -ENOEXEC;
@@ -86,40 +87,47 @@ int elfVerifyIdent(void *map, size_t soSize)
return 0;
}
+const ElfW(Shdr) *elfSection(Span<const uint8_t> elf, const ElfW(Ehdr) *eHdr,
+ ElfW(Half) idx)
+{
+ if (idx >= eHdr->e_shnum)
+ return nullptr;
+
+ off_t offset = eHdr->e_shoff + idx *
+ static_cast<uint32_t>(eHdr->e_shentsize);
+ return elfPointer<const ElfW(Shdr)>(elf, offset);
+}
+
/**
* \brief Retrieve address and size of a symbol from an mmap'ed ELF file
- * \param[in] map Address of mmap'ed ELF file
- * \param[in] soSize Size of mmap'ed ELF file (in bytes)
+ * \param[in] elf Address and size of mmap'ed ELF file
* \param[in] symbol Symbol name
*
- * \return zero or error code, address or nullptr, size of symbol or zero,
- * respectively
+ * \return The memory region storing the symbol on success, or an empty span
+ * otherwise
*/
-std::tuple<void *, size_t>
-elfLoadSymbol(void *map, size_t soSize, const char *symbol)
+Span<const uint8_t> elfLoadSymbol(Span<const uint8_t> elf, const char *symbol)
{
- ElfW(Ehdr) *eHdr = elfPointer<ElfW(Ehdr)>(map, 0, soSize);
+ const ElfW(Ehdr) *eHdr = elfPointer<const ElfW(Ehdr)>(elf, 0);
if (!eHdr)
- return std::make_tuple(nullptr, 0);
+ return {};
- off_t offset = eHdr->e_shoff + eHdr->e_shentsize * eHdr->e_shstrndx;
- ElfW(Shdr) *sHdr = elfPointer<ElfW(Shdr)>(map, offset, soSize);
+ const ElfW(Shdr) *sHdr = elfSection(elf, eHdr, eHdr->e_shstrndx);
if (!sHdr)
- return std::make_tuple(nullptr, 0);
+ return {};
off_t shnameoff = sHdr->sh_offset;
/* Locate .dynsym section header. */
- ElfW(Shdr) *dynsym = nullptr;
+ const ElfW(Shdr) *dynsym = nullptr;
for (unsigned int i = 0; i < eHdr->e_shnum; i++) {
- offset = eHdr->e_shoff + eHdr->e_shentsize * i;
- sHdr = elfPointer<ElfW(Shdr)>(map, offset, soSize);
+ sHdr = elfSection(elf, eHdr, i);
if (!sHdr)
- return std::make_tuple(nullptr, 0);
+ return {};
- offset = shnameoff + sHdr->sh_name;
- char *name = elfPointer<char[8]>(map, offset, soSize);
+ off_t offset = shnameoff + sHdr->sh_name;
+ const char *name = elfPointer<const char[8]>(elf, offset);
if (!name)
- return std::make_tuple(nullptr, 0);
+ return {};
if (sHdr->sh_type == SHT_DYNSYM && !strcmp(name, ".dynsym")) {
dynsym = sHdr;
@@ -129,29 +137,28 @@ elfLoadSymbol(void *map, size_t soSize, const char *symbol)
if (dynsym == nullptr) {
LOG(IPAModule, Error) << "ELF has no .dynsym section";
- return std::make_tuple(nullptr, 0);
+ return {};
}
- offset = eHdr->e_shoff + eHdr->e_shentsize * dynsym->sh_link;
- sHdr = elfPointer<ElfW(Shdr)>(map, offset, soSize);
+ sHdr = elfSection(elf, eHdr, dynsym->sh_link);
if (!sHdr)
- return std::make_tuple(nullptr, 0);
+ return {};
off_t dynsym_nameoff = sHdr->sh_offset;
/* Locate symbol in the .dynsym section. */
- ElfW(Sym) *targetSymbol = nullptr;
+ const ElfW(Sym) *targetSymbol = nullptr;
unsigned int dynsym_num = dynsym->sh_size / dynsym->sh_entsize;
for (unsigned int i = 0; i < dynsym_num; i++) {
- offset = dynsym->sh_offset + dynsym->sh_entsize * i;
- ElfW(Sym) *sym = elfPointer<ElfW(Sym)>(map, offset, soSize);
+ off_t offset = dynsym->sh_offset + dynsym->sh_entsize * i;
+ const ElfW(Sym) *sym = elfPointer<const ElfW(Sym)>(elf, offset);
if (!sym)
- return std::make_tuple(nullptr, 0);
+ return {};
offset = dynsym_nameoff + sym->st_name;
- char *name = elfPointer<char>(map, offset, soSize,
- strlen(symbol) + 1);
+ const char *name = elfPointer<const char>(elf, offset,
+ strlen(symbol) + 1);
if (!name)
- return std::make_tuple(nullptr, 0);
+ return {};
if (!strcmp(name, symbol) &&
sym->st_info & STB_GLOBAL) {
@@ -162,22 +169,20 @@ elfLoadSymbol(void *map, size_t soSize, const char *symbol)
if (targetSymbol == nullptr) {
LOG(IPAModule, Error) << "Symbol " << symbol << " not found";
- return std::make_tuple(nullptr, 0);
+ return {};
}
/* Locate and return data of symbol. */
- if (targetSymbol->st_shndx >= eHdr->e_shnum)
- return std::make_tuple(nullptr, 0);
- offset = eHdr->e_shoff + targetSymbol->st_shndx * eHdr->e_shentsize;
- sHdr = elfPointer<ElfW(Shdr)>(map, offset, soSize);
+ sHdr = elfSection(elf, eHdr, targetSymbol->st_shndx);
if (!sHdr)
- return std::make_tuple(nullptr, 0);
- offset = sHdr->sh_offset + (targetSymbol->st_value - sHdr->sh_addr);
- char *data = elfPointer<char>(map, offset, soSize, targetSymbol->st_size);
+ return {};
+ off_t offset = sHdr->sh_offset + (targetSymbol->st_value - sHdr->sh_addr);
+ const uint8_t *data = elfPointer<const uint8_t>(elf, offset,
+ targetSymbol->st_size);
if (!data)
- return std::make_tuple(nullptr, 0);
+ return {};
- return std::make_tuple(data, targetSymbol->st_size);
+ return { data, targetSymbol->st_size };
}
} /* namespace */
@@ -218,26 +223,10 @@ elfLoadSymbol(void *map, size_t soSize, const char *symbol)
* \var IPAModuleInfo::name
* \brief The name of the IPA module
*
- * \var IPAModuleInfo::license
- * \brief License of the IPA module
- *
- * This license is used to determine whether to force isolation of the IPA in
- * a separate process. If the license is "Proprietary", then the IPA will
- * be isolated. If the license is open-source, then the IPA will be allowed to
- * run without isolation if the user enables it. The license should be an
- * SPDX license string. The following licenses are currently available to
- * allow the IPA to run unisolated:
- *
- * - GPL-2.0-only
- * - GPL-2.0-or-later
- * - GPL-3.0-only
- * - GPL-3.0-or-later
- * - LGPL-2.1-only
- * - LGPL-2.1-or-later
- * - LGPL-3.0-only
- * - LGPL-3.0-or-later
- *
- * Any other license will cause the IPA to be run isolated.
+ * The name may be used to build file system paths to IPA-specific resources.
+ * It shall only contain printable characters, and may not contain '*', '?' or
+ * '\'. For IPA modules included in libcamera, it shall match the directory of
+ * the IPA module in the source tree.
*
* \todo Allow user to choose to isolate open source IPAs
*/
@@ -262,7 +251,7 @@ elfLoadSymbol(void *map, size_t soSize, const char *symbol)
* The IPA module shared object file must be of the same endianness and
* bitness as libcamera.
*
- * The caller shall call the isValid() method after constructing an
+ * The caller shall call the isValid() function after constructing an
* IPAModule instance to verify the validity of the IPAModule.
*/
IPAModule::IPAModule(const std::string &libPath)
@@ -283,55 +272,67 @@ IPAModule::~IPAModule()
int IPAModule::loadIPAModuleInfo()
{
- int fd = open(libPath_.c_str(), O_RDONLY);
- if (fd < 0) {
- int ret = -errno;
+ File file{ libPath_ };
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
LOG(IPAModule, Error) << "Failed to open IPA library: "
- << strerror(-ret);
- return ret;
+ << strerror(-file.error());
+ return file.error();
}
- void *data = nullptr;
- size_t dataSize;
- void *map;
- size_t soSize;
- struct stat st;
- int ret = fstat(fd, &st);
- if (ret < 0)
- goto close;
- soSize = st.st_size;
- map = mmap(NULL, soSize, PROT_READ, MAP_PRIVATE, fd, 0);
- if (map == MAP_FAILED) {
- ret = -errno;
- goto close;
+ Span<const uint8_t> data = file.map();
+ int ret = elfVerifyIdent(data);
+ if (ret) {
+ LOG(IPAModule, Error) << "IPA module is not an ELF file";
+ return ret;
}
- ret = elfVerifyIdent(map, soSize);
- if (ret)
- goto unmap;
-
- std::tie(data, dataSize) = elfLoadSymbol(map, soSize, "ipaModuleInfo");
-
- if (data && dataSize == sizeof(info_))
- memcpy(&info_, data, dataSize);
+ Span<const uint8_t> info = elfLoadSymbol(data, "ipaModuleInfo");
+ if (info.size() < sizeof(info_)) {
+ LOG(IPAModule, Error) << "IPA module has no valid info";
+ return -EINVAL;
+ }
- if (!data)
- goto unmap;
+ memcpy(&info_, info.data(), sizeof(info_));
if (info_.moduleAPIVersion != IPA_MODULE_API_VERSION) {
LOG(IPAModule, Error) << "IPA module API version mismatch";
- ret = -EINVAL;
+ return -EINVAL;
}
-unmap:
- munmap(map, soSize);
-close:
- if (ret || !data)
+ /*
+ * Validate the IPA module name.
+ *
+ * \todo Consider module naming restrictions to avoid escaping from a
+ * base directory. Forbidding ".." may be enough, but this may be best
+ * implemented in a different layer.
+ */
+ std::string ipaName = info_.name;
+ auto iter = std::find_if_not(ipaName.begin(), ipaName.end(),
+ [](unsigned char c) -> bool {
+ return isprint(c) && c != '?' &&
+ c != '*' && c != '\\';
+ });
+ if (iter != ipaName.end()) {
LOG(IPAModule, Error)
- << "Error loading IPA module info for " << libPath_;
+ << "Invalid IPA module name '" << ipaName << "'";
+ return -EINVAL;
+ }
- close(fd);
- return ret;
+ /* Load the signature. Failures are not fatal. */
+ File sign{ libPath_ + ".sign" };
+ if (!sign.open(File::OpenModeFlag::ReadOnly)) {
+ LOG(IPAModule, Debug)
+ << "IPA module " << libPath_ << " is not signed";
+ return 0;
+ }
+
+ data = sign.map(0, -1, File::MapFlag::Private);
+ signature_.resize(data.size());
+ memcpy(signature_.data(), data.data(), data.size());
+
+ LOG(IPAModule, Debug) << "IPA module " << libPath_ << " is signed";
+
+ return 0;
}
/**
@@ -363,6 +364,21 @@ const struct IPAModuleInfo &IPAModule::info() const
}
/**
+ * \brief Retrieve the IPA module signature
+ *
+ * The IPA module signature is stored alongside the IPA module in a file with a
+ * '.sign' suffix, and is loaded when the IPAModule instance is created. This
+ * function returns the signature without verifying it. If the signature is
+ * missing, the returned vector will be empty.
+ *
+ * \return The IPA module signature
+ */
+const std::vector<uint8_t> IPAModule::signature() const
+{
+ return signature_;
+}
+
+/**
* \brief Retrieve the IPA module path
*
* The IPA module path is the file name and path of the IPA module shared
@@ -378,13 +394,13 @@ const std::string &IPAModule::path() const
/**
* \brief Load the IPA implementation factory from the shared object
*
- * The IPA module shared object implements an ipa_context object to be used
- * by pipeline handlers. This method loads the factory function from the
- * shared object. Later, createContext() can be called to instantiate the
- * ipa_context.
+ * The IPA module shared object implements an IPAInterface object to be used
+ * by pipeline handlers. This function loads the factory function from the
+ * shared object. Later, createInterface() can be called to instantiate the
+ * IPAInterface.
*
- * This method only needs to be called successfully once, after which
- * createContext() can be called as many times as ipa_context instances are
+ * This function only needs to be called successfully once, after which
+ * createInterface() can be called as many times as IPAInterface instances are
* needed.
*
* Calling this function on an invalid module (as returned by isValid()) is
@@ -426,20 +442,18 @@ bool IPAModule::load()
}
/**
- * \brief Instantiate an IPA context
+ * \brief Instantiate an IPA interface
*
- * After loading the IPA module with load(), this method creates an instance of
- * the IPA module context. Ownership of the context is passed to the caller, and
- * the context shall be destroyed by calling the \ref ipa_context_ops::destroy
- * "ipa_context::ops::destroy()" function.
+ * After loading the IPA module with load(), this function creates an instance
+ * of the IPA module interface.
*
* Calling this function on a module that has not yet been loaded, or an
* invalid module (as returned by load() and isValid(), respectively) is
* an error.
*
- * \return The IPA context on success, or nullptr on error
+ * \return The IPA interface on success, or nullptr on error
*/
-struct ipa_context *IPAModule::createContext()
+IPAInterface *IPAModule::createInterface()
{
if (!valid_ || !loaded_)
return nullptr;
@@ -448,12 +462,12 @@ struct ipa_context *IPAModule::createContext()
}
/**
- * \brief Verify if the IPA module maches a given pipeline handler
+ * \brief Verify if the IPA module matches a given pipeline handler
* \param[in] pipe Pipeline handler to match with
* \param[in] minVersion Minimum acceptable version of IPA module
* \param[in] maxVersion Maximum acceptable version of IPA module
*
- * This method checks if this IPA module matches the \a pipe pipeline handler,
+ * This function checks if this IPA module matches the \a pipe pipeline handler,
* and the input version range.
*
* \return True if the pipeline handler matches the IPA module, or false otherwise
@@ -466,29 +480,9 @@ bool IPAModule::match(PipelineHandler *pipe,
!strcmp(info_.pipelineName, pipe->name());
}
-/**
- * \brief Verify if the IPA module is open source
- *
- * \sa IPAModuleInfo::license
- */
-bool IPAModule::isOpenSource() const
+std::string IPAModule::logPrefix() const
{
- static const char *osLicenses[] = {
- "GPL-2.0-only",
- "GPL-2.0-or-later",
- "GPL-3.0-only",
- "GPL-3.0-or-later",
- "LGPL-2.1-only",
- "LGPL-2.1-or-later",
- "LGPL-3.0-only",
- "LGPL-3.0-or-later",
- };
-
- for (unsigned int i = 0; i < ARRAY_SIZE(osLicenses); i++)
- if (!strcmp(osLicenses[i], info_.license))
- return true;
-
- return false;
+ return utils::basename(libPath_.c_str());
}
} /* namespace libcamera */
diff --git a/src/libcamera/ipa_proxy.cpp b/src/libcamera/ipa_proxy.cpp
index 5fd88a4b..85004737 100644
--- a/src/libcamera/ipa_proxy.cpp
+++ b/src/libcamera/ipa_proxy.cpp
@@ -2,18 +2,19 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_proxy.cpp - Image Processing Algorithm proxy
+ * Image Processing Algorithm proxy
*/
-#include "ipa_proxy.h"
+#include "libcamera/internal/ipa_proxy.h"
-#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
#include <unistd.h>
-#include "log.h"
-#include "utils.h"
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
-#include <iostream>
+#include "libcamera/internal/ipa_module.h"
/**
* \file ipa_proxy.h
@@ -29,19 +30,27 @@ LOG_DEFINE_CATEGORY(IPAProxy)
* \brief IPA Proxy
*
* Isolate IPA into separate process.
- *
- * Every subclass of proxy shall be registered with libcamera using
- * the REGISTER_IPA_PROXY() macro.
+ */
+
+/**
+ * \enum IPAProxy::ProxyState
+ * \brief Identifies the available operational states of the proxy
+ *
+ * \var IPAProxy::ProxyStopped
+ * \brief The proxy is not active and only synchronous operations are permitted
+ * \var IPAProxy::ProxyStopping
+ * \brief No new tasks can be submitted to the proxy, however existing events
+ * can be completed
+ * \var IPAProxy::ProxyRunning
+ * \brief The Proxy is active and asynchronous tasks may be queued
*/
/**
* \brief Construct an IPAProxy instance
- *
- * IPAProxy instances shall be constructed through the IPAProxyFactory::create()
- * method implemented by the respective factories.
+ * \param[in] ipam The IPA module
*/
-IPAProxy::IPAProxy()
- : valid_(false)
+IPAProxy::IPAProxy(IPAModule *ipam)
+ : valid_(false), state_(ProxyStopped), ipam_(ipam)
{
}
@@ -60,12 +69,108 @@ IPAProxy::~IPAProxy()
*/
/**
+ * \brief Retrieve the absolute path to an IPA configuration file
+ * \param[in] name The configuration file name
+ * \param[in] fallbackName The name of a fallback configuration file
+ *
+ * This function locates the configuration file for an IPA and returns its
+ * absolute path. It searches the following directories, in order:
+ *
+ * - All directories specified in the colon-separated LIBCAMERA_IPA_CONFIG_PATH
+ * environment variable ; or
+ * - If libcamera is not installed, the src/ipa/ directory within the source
+ * tree ; otherwise
+ * - The system sysconf (etc/libcamera/ipa) and the data (share/libcamera/ipa/)
+ * directories.
+ *
+ * The system directories are not searched if libcamera is not installed.
+ *
+ * Within each of those directories, the function looks for a subdirectory
+ * named after the IPA module name, as reported in IPAModuleInfo::name, and for
+ * a file named \a name within that directory. The \a name is IPA-specific.
+ *
+ * If the file named \a name is not found and \a fallbackName is non-empty then
+ * the whole search is repeated for \a fallbackName.
+ *
+ * \return The full path to the IPA configuration file, or an empty string if
+ * no configuration file can be found
+ */
+std::string IPAProxy::configurationFile(const std::string &name,
+ const std::string &fallbackName) const
+{
+ struct stat statbuf;
+ int ret;
+
+ /*
+ * The IPA module name can be used as-is to build directory names as it
+ * has been validated when loading the module.
+ */
+ std::string ipaName = ipam_->info().name;
+
+ /* Check the environment variable first. */
+ const char *confPaths = utils::secure_getenv("LIBCAMERA_IPA_CONFIG_PATH");
+ if (confPaths) {
+ for (const auto &dir : utils::split(confPaths, ":")) {
+ if (dir.empty())
+ continue;
+
+ std::string confPath = dir + "/" + ipaName + "/" + name;
+ ret = stat(confPath.c_str(), &statbuf);
+ if (ret == 0 && (statbuf.st_mode & S_IFMT) == S_IFREG)
+ return confPath;
+ }
+ }
+
+ std::string root = utils::libcameraSourcePath();
+ if (!root.empty()) {
+ /*
+ * When libcamera is used before it is installed, load
+ * configuration files from the source directory. The
+ * configuration files are then located in the 'data'
+ * subdirectory of the corresponding IPA module.
+ */
+ std::string ipaConfDir = root + "src/ipa/" + ipaName + "/data";
+
+ LOG(IPAProxy, Info)
+ << "libcamera is not installed. Loading IPA configuration from '"
+ << ipaConfDir << "'";
+
+ std::string confPath = ipaConfDir + "/" + name;
+ ret = stat(confPath.c_str(), &statbuf);
+ if (ret == 0 && (statbuf.st_mode & S_IFMT) == S_IFREG)
+ return confPath;
+
+ } else {
+ /* Else look in the system locations. */
+ for (const auto &dir : utils::split(IPA_CONFIG_DIR, ":")) {
+ std::string confPath = dir + "/" + ipaName + "/" + name;
+ ret = stat(confPath.c_str(), &statbuf);
+ if (ret == 0 && (statbuf.st_mode & S_IFMT) == S_IFREG)
+ return confPath;
+ }
+ }
+
+ if (fallbackName.empty()) {
+ LOG(IPAProxy, Error)
+ << "Configuration file '" << name
+ << "' not found for IPA module '" << ipaName << "'";
+ return std::string();
+ }
+
+ LOG(IPAProxy, Warning)
+ << "Configuration file '" << name
+ << "' not found for IPA module '" << ipaName
+ << "', falling back to '" << fallbackName << "'";
+ return configurationFile(fallbackName);
+}
+
+/**
* \brief Find a valid full path for a proxy worker for a given executable name
* \param[in] file File name of proxy worker executable
*
* A proxy worker's executable could be found in either the global installation
* directory, or in the paths specified by the environment variable
- * LIBCAMERA_IPA_PROXY_PATH. This method checks the global install directory
+ * LIBCAMERA_IPA_PROXY_PATH. This function checks the global install directory
* first, then LIBCAMERA_IPA_PROXY_PATH in order, and returns the full path to
* the proxy worker executable that is specified by file. The proxy worker
* executable shall have exec permission.
@@ -102,7 +207,7 @@ std::string IPAProxy::resolvePath(const std::string &file) const
std::string ipaProxyDir = root + "src/libcamera/proxy/worker";
LOG(IPAProxy, Info)
- << "libcamera is not installed. Loading proxy workers from'"
+ << "libcamera is not installed. Loading proxy workers from '"
<< ipaProxyDir << "'";
std::string proxyPath = ipaProxyDir + proxyFile;
@@ -134,88 +239,14 @@ std::string IPAProxy::resolvePath(const std::string &file) const
*/
/**
- * \class IPAProxyFactory
- * \brief Registration of IPAProxy classes and creation of instances
- *
- * To facilitate discovery and instantiation of IPAProxy classes, the
- * IPAProxyFactory class maintains a registry of IPAProxy classes. Each
- * IPAProxy subclass shall register itself using the REGISTER_IPA_PROXY()
- * macro, which will create a corresponding instance of a IPAProxyFactory
- * subclass and register it with the static list of factories.
- */
-
-/**
- * \brief Construct a IPAProxy factory
- * \param[in] name Name of the IPAProxy class
- *
- * Creating an instance of the factory registers is with the global list of
- * factories, accessible through the factories() function.
- *
- * The factory \a name is used for debugging and IPAProxy matching purposes
- * and shall be unique.
- */
-IPAProxyFactory::IPAProxyFactory(const char *name)
- : name_(name)
-{
- registerType(this);
-}
-
-/**
- * \fn IPAProxyFactory::create()
- * \brief Create an instance of the IPAProxy corresponding to the factory
- * \param[in] ipam The IPA module
- *
- * This virtual function is implemented by the REGISTER_IPA_PROXY() macro.
- * It creates a IPAProxy instance that isolates an IPA interface designated
- * by the IPA module \a ipam.
- *
- * \return A pointer to a newly constructed instance of the IPAProxy subclass
- * corresponding to the factory
- */
-
-/**
- * \fn IPAProxyFactory::name()
- * \brief Retrieve the factory name
- * \return The factory name
- */
-
-/**
- * \brief Add a IPAProxy class to the registry
- * \param[in] factory Factory to use to construct the IPAProxy
+ * \var IPAProxy::state_
+ * \brief Current state of the IPAProxy
*
- * The caller is responsible to guarantee the uniqueness of the IPAProxy name.
- */
-void IPAProxyFactory::registerType(IPAProxyFactory *factory)
-{
- std::vector<IPAProxyFactory *> &factories = IPAProxyFactory::factories();
-
- factories.push_back(factory);
-
- LOG(IPAProxy, Debug)
- << "Registered proxy \"" << factory->name() << "\"";
-}
-
-/**
- * \brief Retrieve the list of all IPAProxy factories
- *
- * The static factories map is defined inside the function to ensure it gets
- * initialized on first use, without any dependency on link order.
- *
- * \return The list of pipeline handler factories
- */
-std::vector<IPAProxyFactory *> &IPAProxyFactory::factories()
-{
- static std::vector<IPAProxyFactory *> factories;
- return factories;
-}
-
-/**
- * \def REGISTER_IPA_PROXY
- * \brief Register a IPAProxy with the IPAProxy factory
- * \param[in] proxy Class name of IPAProxy derived class to register
+ * The IPAProxy can be Running, Stopped, or Stopping.
*
- * Register a proxy subclass with the factory and make it available to
- * isolate IPA modules.
+ * This state provides a means to ensure that asynchronous functions are only
+ * called while the proxy is running, and prevent new tasks being submitted
+ * while still enabling events to complete when the IPAProxy is stopping.
*/
} /* namespace libcamera */
diff --git a/src/libcamera/ipa_pub_key.cpp.in b/src/libcamera/ipa_pub_key.cpp.in
new file mode 100644
index 00000000..5d8c92c2
--- /dev/null
+++ b/src/libcamera/ipa_pub_key.cpp.in
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * IPA module signing public key
+ *
+ * This file is auto-generated. Do not edit.
+ */
+
+#include "libcamera/internal/ipa_manager.h"
+
+namespace libcamera {
+
+#if HAVE_IPA_PUBKEY
+const uint8_t IPAManager::publicKeyData_[] = {
+ ${ipa_key}
+};
+
+const PubKey IPAManager::pubKey_{ { IPAManager::publicKeyData_ } };
+#endif
+
+} /* namespace libcamera */
diff --git a/src/libcamera/ipc_pipe.cpp b/src/libcamera/ipc_pipe.cpp
new file mode 100644
index 00000000..548299d0
--- /dev/null
+++ b/src/libcamera/ipc_pipe.cpp
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Image Processing Algorithm IPC module for IPA proxies
+ */
+
+#include "libcamera/internal/ipc_pipe.h"
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file ipc_pipe.h
+ * \brief IPC mechanism for IPA isolation
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(IPCPipe)
+
+/**
+ * \struct IPCMessage::Header
+ * \brief Container for an IPCMessage header
+ *
+ * Holds a cmd code for the IPC message, and a cookie.
+ */
+
+/**
+ * \var IPCMessage::Header::cmd
+ * \brief Type of IPCMessage
+ *
+ * Typically used to carry a command code for an RPC.
+ */
+
+/**
+ * \var IPCMessage::Header::cookie
+ * \brief Cookie to identify the message and a corresponding reply.
+ *
+ * Populated and used by IPCPipe implementations for matching calls with
+ * replies.
+ */
+
+/**
+ * \class IPCMessage
+ * \brief IPC message to be passed through IPC message pipe
+ */
+
+/**
+ * \brief Construct an empty IPCMessage instance
+ */
+IPCMessage::IPCMessage()
+ : header_(Header{ 0, 0 })
+{
+}
+
+/**
+ * \brief Construct an IPCMessage instance with a given command code
+ * \param[in] cmd The command code
+ */
+IPCMessage::IPCMessage(uint32_t cmd)
+ : header_(Header{ cmd, 0 })
+{
+}
+
+/**
+ * \brief Construct an IPCMessage instance with a given header
+ * \param[in] header The header that the constructed IPCMessage will contain
+ */
+IPCMessage::IPCMessage(const Header &header)
+ : header_(header)
+{
+}
+
+/**
+ * \brief Construct an IPCMessage instance from an IPC payload
+ * \param[in] payload The IPCUnixSocket payload to construct from
+ *
+ * This essentially converts an IPCUnixSocket payload into an IPCMessage.
+ * The header is extracted from the payload into the IPCMessage's header field.
+ *
+ * If the IPCUnixSocket payload had any valid file descriptors, then they will
+ * all be invalidated.
+ */
+IPCMessage::IPCMessage(IPCUnixSocket::Payload &payload)
+{
+ memcpy(&header_, payload.data.data(), sizeof(header_));
+ data_ = std::vector<uint8_t>(payload.data.begin() + sizeof(header_),
+ payload.data.end());
+ for (int32_t &fd : payload.fds)
+ fds_.push_back(SharedFD(std::move(fd)));
+}
+
+/**
+ * \brief Create an IPCUnixSocket payload from the IPCMessage
+ *
+ * This essentially converts the IPCMessage into an IPCUnixSocket payload.
+ *
+ * \todo Resolve the layering violation (add other converters later?)
+ */
+IPCUnixSocket::Payload IPCMessage::payload() const
+{
+ IPCUnixSocket::Payload payload;
+
+ payload.data.resize(sizeof(Header) + data_.size());
+ payload.fds.reserve(fds_.size());
+
+ memcpy(payload.data.data(), &header_, sizeof(Header));
+
+ if (data_.size() > 0) {
+ /* \todo Make this work without copy */
+ memcpy(payload.data.data() + sizeof(Header),
+ data_.data(), data_.size());
+ }
+
+ for (const SharedFD &fd : fds_)
+ payload.fds.push_back(fd.get());
+
+ return payload;
+}
+
+/**
+ * \fn IPCMessage::header()
+ * \brief Returns a reference to the header
+ */
+
+/**
+ * \fn IPCMessage::data()
+ * \brief Returns a reference to the byte vector containing data
+ */
+
+/**
+ * \fn IPCMessage::fds()
+ * \brief Returns a reference to the vector containing file descriptors
+ */
+
+/**
+ * \fn IPCMessage::header() const
+ * \brief Returns a const reference to the header
+ */
+
+/**
+ * \fn IPCMessage::data() const
+ * \brief Returns a const reference to the byte vector containing data
+ */
+
+/**
+ * \fn IPCMessage::fds() const
+ * \brief Returns a const reference to the vector containing file descriptors
+ */
+
+/**
+ * \class IPCPipe
+ * \brief IPC message pipe for IPA isolation
+ *
+ * Virtual class to model an IPC message pipe for use by IPA proxies for IPA
+ * isolation. sendSync() and sendAsync() must be implemented, and the recvMessage
+ * signal must be emitted whenever new data is available.
+ */
+
+/**
+ * \brief Construct an IPCPipe instance
+ */
+IPCPipe::IPCPipe()
+ : connected_(false)
+{
+}
+
+IPCPipe::~IPCPipe()
+{
+}
+
+/**
+ * \fn IPCPipe::isConnected()
+ * \brief Check if the IPCPipe instance is connected
+ *
+ * An IPCPipe instance is connected if IPC is successfully set up.
+ *
+ * \return True if the IPCPipe is connected, false otherwise
+ */
+
+/**
+ * \fn IPCPipe::sendSync()
+ * \brief Send a message over IPC synchronously
+ * \param[in] in Data to send
+ * \param[in] out IPCMessage instance in which to receive data, if applicable
+ *
+ * This function will not return until a response is received. The event loop
+ * will still continue to execute, however.
+ *
+ * \return Zero on success, negative error code otherwise
+ *
+ * \todo Determine if the event loop should limit the types of messages it
+ * processes, to avoid reintrancy in the caller, and carefully document what
+ * the caller needs to implement to make this safe.
+ */
+
+/**
+ * \fn IPCPipe::sendAsync()
+ * \brief Send a message over IPC asynchronously
+ * \param[in] data Data to send
+ *
+ * This function will return immediately after sending the message.
+ *
+ * \return Zero on success, negative error code otherwise
+ */
+
+/**
+ * \var IPCPipe::recv
+ * \brief Signal to be emitted when a message is received over IPC
+ *
+ * When a message is received over IPC, this signal shall be emitted. Users must
+ * connect to this to receive messages.
+ */
+
+/**
+ * \var IPCPipe::connected_
+ * \brief Flag to indicate if the IPCPipe instance is connected
+ *
+ * An IPCPipe instance is connected if IPC is successfully set up.
+ *
+ * This flag can be read via IPCPipe::isConnected().
+ *
+ * Implementations of the IPCPipe class should set this flag upon successful
+ * connection.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/ipc_pipe_unixsocket.cpp b/src/libcamera/ipc_pipe_unixsocket.cpp
new file mode 100644
index 00000000..668ec73b
--- /dev/null
+++ b/src/libcamera/ipc_pipe_unixsocket.cpp
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Image Processing Algorithm IPC module using unix socket
+ */
+
+#include "libcamera/internal/ipc_pipe_unixsocket.h"
+
+#include <vector>
+
+#include <libcamera/base/event_dispatcher.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/thread.h>
+#include <libcamera/base/timer.h>
+
+#include "libcamera/internal/ipc_pipe.h"
+#include "libcamera/internal/ipc_unixsocket.h"
+#include "libcamera/internal/process.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(IPCPipe)
+
+IPCPipeUnixSocket::IPCPipeUnixSocket(const char *ipaModulePath,
+ const char *ipaProxyWorkerPath)
+ : IPCPipe()
+{
+ std::vector<int> fds;
+ std::vector<std::string> args;
+ args.push_back(ipaModulePath);
+
+ socket_ = std::make_unique<IPCUnixSocket>();
+ UniqueFD fd = socket_->create();
+ if (!fd.isValid()) {
+ LOG(IPCPipe, Error) << "Failed to create socket";
+ return;
+ }
+ socket_->readyRead.connect(this, &IPCPipeUnixSocket::readyRead);
+ args.push_back(std::to_string(fd.get()));
+ fds.push_back(fd.get());
+
+ proc_ = std::make_unique<Process>();
+ int ret = proc_->start(ipaProxyWorkerPath, args, fds);
+ if (ret) {
+ LOG(IPCPipe, Error)
+ << "Failed to start proxy worker process";
+ return;
+ }
+
+ connected_ = true;
+}
+
+IPCPipeUnixSocket::~IPCPipeUnixSocket()
+{
+}
+
+int IPCPipeUnixSocket::sendSync(const IPCMessage &in, IPCMessage *out)
+{
+ IPCUnixSocket::Payload response;
+
+ int ret = call(in.payload(), &response, in.header().cookie);
+ if (ret) {
+ LOG(IPCPipe, Error) << "Failed to call sync";
+ return ret;
+ }
+
+ if (out)
+ *out = IPCMessage(response);
+
+ return 0;
+}
+
+int IPCPipeUnixSocket::sendAsync(const IPCMessage &data)
+{
+ int ret = socket_->send(data.payload());
+ if (ret) {
+ LOG(IPCPipe, Error) << "Failed to call async";
+ return ret;
+ }
+
+ return 0;
+}
+
+void IPCPipeUnixSocket::readyRead()
+{
+ IPCUnixSocket::Payload payload;
+ int ret = socket_->receive(&payload);
+ if (ret) {
+ LOG(IPCPipe, Error) << "Receive message failed" << ret;
+ return;
+ }
+
+ /* \todo Use span to avoid the double copy when callData is found. */
+ if (payload.data.size() < sizeof(IPCMessage::Header)) {
+ LOG(IPCPipe, Error) << "Not enough data received";
+ return;
+ }
+
+ IPCMessage ipcMessage(payload);
+
+ auto callData = callData_.find(ipcMessage.header().cookie);
+ if (callData != callData_.end()) {
+ *callData->second.response = std::move(payload);
+ callData->second.done = true;
+ return;
+ }
+
+ /* Received unexpected data, this means it's a call from the IPA. */
+ recv.emit(ipcMessage);
+}
+
+int IPCPipeUnixSocket::call(const IPCUnixSocket::Payload &message,
+ IPCUnixSocket::Payload *response, uint32_t cookie)
+{
+ Timer timeout;
+ int ret;
+
+ const auto result = callData_.insert({ cookie, { response, false } });
+ const auto &iter = result.first;
+
+ ret = socket_->send(message);
+ if (ret) {
+ callData_.erase(iter);
+ return ret;
+ }
+
+ /* \todo Make this less dangerous, see IPCPipe::sendSync() */
+ timeout.start(2000ms);
+ while (!iter->second.done) {
+ if (!timeout.isRunning()) {
+ LOG(IPCPipe, Error) << "Call timeout!";
+ callData_.erase(iter);
+ return -ETIMEDOUT;
+ }
+
+ Thread::current()->eventDispatcher()->processEvents();
+ }
+
+ callData_.erase(iter);
+
+ return 0;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/ipc_unixsocket.cpp b/src/libcamera/ipc_unixsocket.cpp
index 6e5cab89..002053e3 100644
--- a/src/libcamera/ipc_unixsocket.cpp
+++ b/src/libcamera/ipc_unixsocket.cpp
@@ -2,17 +2,20 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipc_unixsocket.cpp - IPC mechanism based on Unix sockets
+ * IPC mechanism based on Unix sockets
*/
-#include "ipc_unixsocket.h"
+#include "libcamera/internal/ipc_unixsocket.h"
+#include <array>
#include <poll.h>
#include <string.h>
#include <sys/socket.h>
#include <unistd.h>
+#include <vector>
-#include "log.h"
+#include <libcamera/base/event_notifier.h>
+#include <libcamera/base/log.h>
/**
* \file ipc_unixsocket.h
@@ -57,7 +60,7 @@ LOG_DEFINE_CATEGORY(IPCUnixSocket)
*
* Establishment of an IPC channel is asymmetrical. The side that initiates
* communication first instantiates a local side socket and creates the channel
- * with create(). The method returns a file descriptor for the remote side of
+ * with create(). The function returns a file descriptor for the remote side of
* the channel, which is passed to the remote process through an out-of-band
* communication method. The remote side then instantiates a socket, and binds
* it to the other side by passing the file descriptor to bind(). At that point
@@ -67,7 +70,7 @@ LOG_DEFINE_CATEGORY(IPCUnixSocket)
*/
IPCUnixSocket::IPCUnixSocket()
- : fd_(-1), headerReceived_(false), notifier_(nullptr)
+ : headerReceived_(false), notifier_(nullptr)
{
}
@@ -79,15 +82,15 @@ IPCUnixSocket::~IPCUnixSocket()
/**
* \brief Create an new IPC channel
*
- * This method creates a new IPC channel. The socket instance is bound to the
- * local side of the channel, and the method returns a file descriptor bound to
- * the remote side. The caller is responsible for passing the file descriptor to
- * the remote process, where it can be used with IPCUnixSocket::bind() to bind
- * the remote side socket.
+ * This function creates a new IPC channel. The socket instance is bound to the
+ * local side of the channel, and the function returns a file descriptor bound
+ * to the remote side. The caller is responsible for passing the file descriptor
+ * to the remote process, where it can be used with IPCUnixSocket::bind() to
+ * bind the remote side socket.
*
- * \return A file descriptor on success, negative error code on failure
+ * \return A file descriptor. It is valid on success or invalid otherwise.
*/
-int IPCUnixSocket::create()
+UniqueFD IPCUnixSocket::create()
{
int sockets[2];
int ret;
@@ -97,33 +100,37 @@ int IPCUnixSocket::create()
ret = -errno;
LOG(IPCUnixSocket, Error)
<< "Failed to create socket pair: " << strerror(-ret);
- return ret;
+ return {};
}
- ret = bind(sockets[0]);
- if (ret)
- return ret;
+ std::array<UniqueFD, 2> socketFds{
+ UniqueFD(sockets[0]),
+ UniqueFD(sockets[1]),
+ };
- return sockets[1];
+ if (bind(std::move(socketFds[0])) < 0)
+ return {};
+
+ return std::move(socketFds[1]);
}
/**
* \brief Bind to an existing IPC channel
* \param[in] fd File descriptor
*
- * This method binds the socket instance to an existing IPC channel identified
+ * This function binds the socket instance to an existing IPC channel identified
* by the file descriptor \a fd. The file descriptor is obtained from the
- * IPCUnixSocket::create() method.
+ * IPCUnixSocket::create() function.
*
* \return 0 on success or a negative error code otherwise
*/
-int IPCUnixSocket::bind(int fd)
+int IPCUnixSocket::bind(UniqueFD fd)
{
if (isBound())
return -EINVAL;
- fd_ = fd;
- notifier_ = new EventNotifier(fd_, EventNotifier::Read);
+ fd_ = std::move(fd);
+ notifier_ = new EventNotifier(fd_.get(), EventNotifier::Read);
notifier_->activated.connect(this, &IPCUnixSocket::dataNotifier);
return 0;
@@ -142,9 +149,7 @@ void IPCUnixSocket::close()
delete notifier_;
notifier_ = nullptr;
- ::close(fd_);
-
- fd_ = -1;
+ fd_.reset();
headerReceived_ = false;
}
@@ -154,14 +159,14 @@ void IPCUnixSocket::close()
*/
bool IPCUnixSocket::isBound() const
{
- return fd_ != -1;
+ return fd_.isValid();
}
/**
* \brief Send a message payload
* \param[in] payload Message payload to send
*
- * This method queues the message payload for transmission to the other end of
+ * This function queues the message payload for transmission to the other end of
* the IPC channel. It returns immediately, before the message is delivered to
* the remote side.
*
@@ -181,7 +186,7 @@ int IPCUnixSocket::send(const Payload &payload)
if (!hdr.data && !hdr.fds)
return -EINVAL;
- ret = ::send(fd_, &hdr, sizeof(hdr), 0);
+ ret = ::send(fd_.get(), &hdr, sizeof(hdr), 0);
if (ret < 0) {
ret = -errno;
LOG(IPCUnixSocket, Error)
@@ -196,7 +201,7 @@ int IPCUnixSocket::send(const Payload &payload)
* \brief Receive a message payload
* \param[out] payload Payload where to write the received message
*
- * This method receives the message payload from the IPC channel and writes it
+ * This function receives the message payload from the IPC channel and writes it
* to the \a payload. If no message payload is available, it returns
* immediately with -EAGAIN. The \ref readyRead signal shall be used to receive
* notification of message availability.
@@ -243,10 +248,9 @@ int IPCUnixSocket::sendData(const void *buffer, size_t length,
iov[0].iov_base = const_cast<void *>(buffer);
iov[0].iov_len = length;
- char buf[CMSG_SPACE(num * sizeof(uint32_t))];
- memset(buf, 0, sizeof(buf));
+ std::vector<uint8_t> buf(CMSG_SPACE(num * sizeof(uint32_t)));
- struct cmsghdr *cmsg = (struct cmsghdr *)buf;
+ struct cmsghdr *cmsg = reinterpret_cast<struct cmsghdr *>(buf.data());
cmsg->cmsg_len = CMSG_LEN(num * sizeof(uint32_t));
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
@@ -259,9 +263,10 @@ int IPCUnixSocket::sendData(const void *buffer, size_t length,
msg.msg_control = cmsg;
msg.msg_controllen = cmsg->cmsg_len;
msg.msg_flags = 0;
- memcpy(CMSG_DATA(cmsg), fds, num * sizeof(uint32_t));
+ if (fds)
+ memcpy(CMSG_DATA(cmsg), fds, num * sizeof(uint32_t));
- if (sendmsg(fd_, &msg, 0) < 0) {
+ if (sendmsg(fd_.get(), &msg, 0) < 0) {
int ret = -errno;
LOG(IPCUnixSocket, Error)
<< "Failed to sendmsg: " << strerror(-ret);
@@ -278,10 +283,9 @@ int IPCUnixSocket::recvData(void *buffer, size_t length,
iov[0].iov_base = buffer;
iov[0].iov_len = length;
- char buf[CMSG_SPACE(num * sizeof(uint32_t))];
- memset(buf, 0, sizeof(buf));
+ std::vector<uint8_t> buf(CMSG_SPACE(num * sizeof(uint32_t)));
- struct cmsghdr *cmsg = (struct cmsghdr *)buf;
+ struct cmsghdr *cmsg = reinterpret_cast<struct cmsghdr *>(buf.data());
cmsg->cmsg_len = CMSG_LEN(num * sizeof(uint32_t));
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
@@ -295,7 +299,7 @@ int IPCUnixSocket::recvData(void *buffer, size_t length,
msg.msg_controllen = cmsg->cmsg_len;
msg.msg_flags = 0;
- if (recvmsg(fd_, &msg, 0) < 0) {
+ if (recvmsg(fd_.get(), &msg, 0) < 0) {
int ret = -errno;
if (ret != -EAGAIN)
LOG(IPCUnixSocket, Error)
@@ -303,18 +307,19 @@ int IPCUnixSocket::recvData(void *buffer, size_t length,
return ret;
}
- memcpy(fds, CMSG_DATA(cmsg), num * sizeof(uint32_t));
+ if (fds)
+ memcpy(fds, CMSG_DATA(cmsg), num * sizeof(uint32_t));
return 0;
}
-void IPCUnixSocket::dataNotifier(EventNotifier *notifier)
+void IPCUnixSocket::dataNotifier()
{
int ret;
if (!headerReceived_) {
/* Receive the header. */
- ret = ::recv(fd_, &header_, sizeof(header_), 0);
+ ret = ::recv(fd_.get(), &header_, sizeof(header_), 0);
if (ret < 0) {
ret = -errno;
LOG(IPCUnixSocket, Error)
@@ -328,9 +333,9 @@ void IPCUnixSocket::dataNotifier(EventNotifier *notifier)
/*
* If the payload has arrived, disable the notifier and emit the
* readyRead signal. The notifier will be reenabled by the receive()
- * method.
+ * function.
*/
- struct pollfd fds = { fd_, POLLIN, 0 };
+ struct pollfd fds = { fd_.get(), POLLIN, 0 };
ret = poll(&fds, 1, 0);
if (ret < 0)
return;
@@ -339,7 +344,7 @@ void IPCUnixSocket::dataNotifier(EventNotifier *notifier)
return;
notifier_->setEnabled(false);
- readyRead.emit(this);
+ readyRead.emit();
}
} /* namespace libcamera */
diff --git a/src/libcamera/log.cpp b/src/libcamera/log.cpp
deleted file mode 100644
index fd1b5c39..00000000
--- a/src/libcamera/log.cpp
+++ /dev/null
@@ -1,1051 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2018, Google Inc.
- *
- * log.cpp - Logging infrastructure
- */
-
-#include "log.h"
-
-#if HAVE_BACKTRACE
-#include <execinfo.h>
-#endif
-#include <fstream>
-#include <iostream>
-#include <list>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <syslog.h>
-#include <time.h>
-#include <unordered_set>
-
-#include <libcamera/logging.h>
-
-#include "thread.h"
-#include "utils.h"
-
-/**
- * \file log.h
- * \brief Logging infrastructure
- *
- * libcamera includes a logging infrastructure used through the library that
- * allows inspection of internal operation in a user-configurable way. The log
- * messages are grouped in categories that represent areas of libcamera, and
- * output of messages for each category can be controlled by independent log
- * levels.
- *
- * The levels are configurable through the LIBCAMERA_LOG_LEVELS environment
- * variable that contains a comma-separated list of 'category:level' pairs.
- *
- * The category names are strings and can include a wildcard ('*') character at
- * the end to match multiple categories.
- *
- * The level are either numeric values, or strings containing the log level
- * name. The available log levels are DEBUG, INFO, WARN, ERROR and FATAL. Log
- * message with a level higher than or equal to the configured log level for
- * their category are output to the log, while other messages are silently
- * discarded.
- *
- * By default log messages are output to stderr. They can be redirected to a log
- * file by setting the LIBCAMERA_LOG_FILE environment variable to the name of
- * the file. The file must be writable and is truncated if it exists. If any
- * error occurs when opening the file, the file is ignored and the log is output
- * to stderr.
- */
-
-/**
- * \file logging.h
- * \brief Logging management
- *
- * API to change the logging output destination and log levels programatically.
- */
-
-namespace libcamera {
-
-static int log_severity_to_syslog(LogSeverity severity)
-{
- switch (severity) {
- case LogDebug:
- return LOG_DEBUG;
- case LogInfo:
- return LOG_INFO;
- case LogWarning:
- return LOG_WARNING;
- case LogError:
- return LOG_ERR;
- case LogFatal:
- return LOG_ALERT;
- default:
- return LOG_NOTICE;
- }
-}
-
-static const char *log_severity_name(LogSeverity severity)
-{
- static const char *const names[] = {
- "DEBUG",
- " INFO",
- " WARN",
- "ERROR",
- "FATAL",
- };
-
- if (static_cast<unsigned int>(severity) < ARRAY_SIZE(names))
- return names[severity];
- else
- return "UNKWN";
-}
-
-/**
- * \brief Log output
- *
- * The LogOutput class models a log output destination
- */
-class LogOutput
-{
-public:
- LogOutput(const char *path);
- LogOutput(std::ostream *stream);
- LogOutput();
- ~LogOutput();
-
- bool isValid() const;
- void write(const LogMessage &msg);
- void write(const std::string &msg);
-
-private:
- void writeSyslog(LogSeverity severity, const std::string &msg);
- void writeStream(const std::string &msg);
-
- std::ostream *stream_;
- LoggingTarget target_;
-};
-
-/**
- * \brief Construct a log output based on a file
- * \param[in] path Full path to log file
- */
-LogOutput::LogOutput(const char *path)
- : target_(LoggingTargetFile)
-{
- stream_ = new std::ofstream(path);
-}
-
-/**
- * \brief Construct a log output based on a stream
- * \param[in] stream Stream to send log output to
- */
-LogOutput::LogOutput(std::ostream *stream)
- : stream_(stream), target_(LoggingTargetStream)
-{
-}
-
-/**
- * \brief Construct a log output to syslog
- */
-LogOutput::LogOutput()
- : stream_(nullptr), target_(LoggingTargetSyslog)
-{
- openlog("libcamera", LOG_PID, 0);
-}
-
-LogOutput::~LogOutput()
-{
- switch (target_) {
- case LoggingTargetFile:
- delete stream_;
- break;
- case LoggingTargetSyslog:
- closelog();
- break;
- default:
- break;
- }
-}
-
-/**
- * \brief Check if the log output is valid
- * \return True if the log output is valid
- */
-bool LogOutput::isValid() const
-{
- switch (target_) {
- case LoggingTargetFile:
- return stream_->good();
- case LoggingTargetStream:
- return stream_ != nullptr;
- default:
- return true;
- }
-}
-
-/**
- * \brief Write message to log output
- * \param[in] msg Message to write
- */
-void LogOutput::write(const LogMessage &msg)
-{
- std::string str;
-
- switch (target_) {
- case LoggingTargetSyslog:
- str = std::string(log_severity_name(msg.severity())) + " "
- + msg.category().name() + " " + msg.fileInfo() + " "
- + msg.msg();
- writeSyslog(msg.severity(), str);
- break;
- case LoggingTargetStream:
- case LoggingTargetFile:
- str = "[" + utils::time_point_to_string(msg.timestamp()) + "] ["
- + std::to_string(Thread::currentId()) + "] "
- + log_severity_name(msg.severity()) + " "
- + msg.category().name() + " " + msg.fileInfo() + " "
- + msg.msg();
- writeStream(str);
- break;
- default:
- break;
- }
-}
-
-/**
- * \brief Write string to log output
- * \param[in] str String to write
- */
-void LogOutput::write(const std::string &str)
-{
- switch (target_) {
- case LoggingTargetSyslog:
- writeSyslog(LogDebug, str);
- break;
- case LoggingTargetStream:
- case LoggingTargetFile:
- writeStream(str);
- break;
- default:
- break;
- }
-}
-
-void LogOutput::writeSyslog(LogSeverity severity, const std::string &str)
-{
- syslog(log_severity_to_syslog(severity), "%s", str.c_str());
-}
-
-void LogOutput::writeStream(const std::string &str)
-{
- stream_->write(str.c_str(), str.size());
- stream_->flush();
-}
-
-/**
- * \brief Message logger
- *
- * The Logger class handles log configuration.
- */
-class Logger
-{
-public:
- static Logger *instance();
-
- void write(const LogMessage &msg);
- void backtrace();
-
- int logSetFile(const char *path);
- int logSetStream(std::ostream *stream);
- int logSetTarget(LoggingTarget target);
- void logSetLevel(const char *category, const char *level);
-
-private:
- Logger();
-
- void parseLogFile();
- void parseLogLevels();
- static LogSeverity parseLogLevel(const std::string &level);
-
- friend LogCategory;
- void registerCategory(LogCategory *category);
- void unregisterCategory(LogCategory *category);
-
- std::unordered_set<LogCategory *> categories_;
- std::list<std::pair<std::string, LogSeverity>> levels_;
-
- std::shared_ptr<LogOutput> output_;
-};
-
-/**
- * \enum LoggingTarget
- * \brief Log destination type
- * \var LoggingTargetNone
- * \brief No logging destination
- * \sa Logger::logSetTarget
- * \var LoggingTargetSyslog
- * \brief Log to syslog
- * \sa Logger::logSetTarget
- * \var LoggingTargetFile
- * \brief Log to file
- * \sa Logger::logSetFile
- * \var LoggingTargetStream
- * \brief Log to stream
- * \sa Logger::logSetStream
- */
-
-/**
- * \brief Direct logging to a file
- * \param[in] path Full path to the log file
- *
- * This function directs the log output to the file identified by \a path. The
- * previous log target, if any, is closed, and all new log messages will be
- * written to the new log file.
- *
- * If the function returns an error, the log target is not changed.
- *
- * \return Zero on success, or a negative error code otherwise
- */
-int logSetFile(const char *path)
-{
- return Logger::instance()->logSetFile(path);
-}
-
-/**
- * \brief Direct logging to a stream
- * \param[in] stream Stream to send log output to
- *
- * This function directs the log output to \a stream. The previous log target,
- * if any, is closed, and all new log messages will be written to the new log
- * stream.
- *
- * If the function returns an error, the log file is not changed
- *
- * \return Zero on success, or a negative error code otherwise.
- */
-int logSetStream(std::ostream *stream)
-{
- return Logger::instance()->logSetStream(stream);
-}
-
-/**
- * \brief Set the logging target
- * \param[in] target Logging destination
- *
- * This function sets the logging output to the target specified by \a target.
- * The allowed values of \a target are LoggingTargetNone and
- * LoggingTargetSyslog. LoggingTargetNone will send the log output to nowhere,
- * and LoggingTargetSyslog will send the log output to syslog. The previous
- * log target, if any, is closed, and all new log messages will be written to
- * the new log destination.
- *
- * LoggingTargetFile and LoggingTargetStream are not valid values for \a target.
- * Use logSetFile() and logSetStream() instead, respectively.
- *
- * If the function returns an error, the log file is not changed.
- *
- * \return Zero on success, or a negative error code otherwise.
- */
-int logSetTarget(LoggingTarget target)
-{
- return Logger::instance()->logSetTarget(target);
-}
-
-/**
- * \brief Set the log level
- * \param[in] category Logging category
- * \param[in] level Log level
- *
- * This function sets the log level of \a category to \a level.
- * \a level shall be one of the following strings:
- * - "DEBUG"
- * - "INFO"
- * - "WARN"
- * - "ERROR"
- * - "FATAL"
- *
- * "*" is not a valid \a category for this function.
- */
-void logSetLevel(const char *category, const char *level)
-{
- Logger::instance()->logSetLevel(category, level);
-}
-
-/**
- * \brief Retrieve the logger instance
- *
- * The Logger is a singleton and can't be constructed manually. This function
- * shall instead be used to retrieve the single global instance of the logger.
- *
- * \return The logger instance
- */
-Logger *Logger::instance()
-{
- static Logger instance;
- return &instance;
-}
-
-/**
- * \brief Write a message to the configured logger output
- * \param[in] msg The message object
- */
-void Logger::write(const LogMessage &msg)
-{
- std::shared_ptr<LogOutput> output = std::atomic_load(&output_);
- if (!output)
- return;
-
- output->write(msg);
-}
-
-/**
- * \brief Write a backtrace to the log
- */
-void Logger::backtrace()
-{
-#if HAVE_BACKTRACE
- std::shared_ptr<LogOutput> output = std::atomic_load(&output_);
- if (!output)
- return;
-
- void *buffer[32];
- int num_entries = ::backtrace(buffer, ARRAY_SIZE(buffer));
- char **strings = backtrace_symbols(buffer, num_entries);
- if (!strings)
- return;
-
- std::ostringstream msg;
- msg << "Backtrace:" << std::endl;
-
- /*
- * Skip the first two entries that correspond to this method and
- * ~LogMessage().
- */
- for (int i = 2; i < num_entries; ++i)
- msg << strings[i] << std::endl;
-
- output->write(msg.str());
-
- free(strings);
-#endif
-}
-
-/**
- * \brief Set the log file
- * \param[in] path Full path to the log file
- *
- * \sa libcamera::logSetFile()
- *
- * \return Zero on success, or a negative error code otherwise.
- */
-int Logger::logSetFile(const char *path)
-{
- std::shared_ptr<LogOutput> output = std::make_shared<LogOutput>(path);
- if (!output->isValid())
- return -EINVAL;
-
- std::atomic_store(&output_, output);
- return 0;
-}
-
-/**
- * \brief Set the log stream
- * \param[in] stream Stream to send log output to
- *
- * \sa libcamera::logSetStream()
- *
- * \return Zero on success, or a negative error code otherwise.
- */
-int Logger::logSetStream(std::ostream *stream)
-{
- std::shared_ptr<LogOutput> output = std::make_shared<LogOutput>(stream);
- std::atomic_store(&output_, output);
- return 0;
-}
-
-/**
- * \brief Set the log target
- * \param[in] target Log destination
- *
- * \sa libcamera::logSetTarget()
- *
- * \return Zero on success, or a negative error code otherwise.
- */
-int Logger::logSetTarget(enum LoggingTarget target)
-{
- std::shared_ptr<LogOutput> output;
-
- switch (target) {
- case LoggingTargetSyslog:
- output = std::make_shared<LogOutput>();
- std::atomic_store(&output_, output);
- break;
- case LoggingTargetNone:
- output = nullptr;
- std::atomic_store(&output_, std::shared_ptr<LogOutput>());
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * \brief Set the log level
- * \param[in] category Logging category
- * \param[in] level Log level
- *
- * \sa libcamera::logSetLevel()
- */
-void Logger::logSetLevel(const char *category, const char *level)
-{
- LogSeverity severity = parseLogLevel(level);
- if (severity == LogInvalid)
- return;
-
- for (LogCategory *c : categories_) {
- if (!strcmp(c->name(), category)) {
- c->setSeverity(severity);
- break;
- }
- }
-}
-
-/**
- * \brief Construct a logger
- */
-Logger::Logger()
-{
- parseLogFile();
- parseLogLevels();
-}
-
-/**
- * \brief Parse the log output file from the environment
- *
- * If the LIBCAMERA_LOG_FILE environment variable is set, open the file it
- * points to and redirect the logger output to it. If the environment variable
- * is set to "syslog", then the logger output will be directed to syslog. Errors
- * are silently ignored and don't affect the logger output (set to stderr).
- */
-void Logger::parseLogFile()
-{
- const char *file = utils::secure_getenv("LIBCAMERA_LOG_FILE");
- if (!file) {
- logSetStream(&std::cerr);
- return;
- }
-
- if (!strcmp(file, "syslog")) {
- logSetTarget(LoggingTargetSyslog);
- return;
- }
-
- logSetFile(file);
-}
-
-/**
- * \brief Parse the log levels from the environment
- *
- * The log levels are stored in the LIBCAMERA_LOG_LEVELS environment variable
- * as a list of "category:level" pairs, separated by commas (','). Parse the
- * variable and store the levels to configure all log categories.
- */
-void Logger::parseLogLevels()
-{
- const char *debug = utils::secure_getenv("LIBCAMERA_LOG_LEVELS");
- if (!debug)
- return;
-
- for (const char *pair = debug; *debug != '\0'; pair = debug) {
- const char *comma = strchrnul(debug, ',');
- size_t len = comma - pair;
-
- /* Skip over the comma. */
- debug = *comma == ',' ? comma + 1 : comma;
-
- /* Skip to the next pair if the pair is empty. */
- if (!len)
- continue;
-
- std::string category;
- std::string level;
-
- const char *colon = static_cast<const char *>(memchr(pair, ':', len));
- if (!colon) {
- /* 'x' is a shortcut for '*:x'. */
- category = "*";
- level = std::string(pair, len);
- } else {
- category = std::string(pair, colon - pair);
- level = std::string(colon + 1, comma - colon - 1);
- }
-
- /* Both the category and the level must be specified. */
- if (category.empty() || level.empty())
- continue;
-
- LogSeverity severity = parseLogLevel(level);
- if (severity == LogInvalid)
- continue;
-
- levels_.push_back({ category, severity });
- }
-}
-
-/**
- * \brief Parse a log level string into a LogSeverity
- * \param[in] level The log level string
- *
- * Log levels can be specified as an integer value in the range from LogDebug to
- * LogFatal, or as a string corresponding to the severity name in uppercase. Any
- * other value is invalid.
- *
- * \return The log severity, or LogInvalid if the string is invalid
- */
-LogSeverity Logger::parseLogLevel(const std::string &level)
-{
- static const char *const names[] = {
- "DEBUG",
- "INFO",
- "WARN",
- "ERROR",
- "FATAL",
- };
-
- int severity;
-
- if (std::isdigit(level[0])) {
- char *endptr;
- severity = strtoul(level.c_str(), &endptr, 10);
- if (*endptr != '\0' || severity > LogFatal)
- severity = LogInvalid;
- } else {
- severity = LogInvalid;
- for (unsigned int i = 0; i < ARRAY_SIZE(names); ++i) {
- if (names[i] == level) {
- severity = i;
- break;
- }
- }
- }
-
- return static_cast<LogSeverity>(severity);
-}
-
-/**
- * \brief Register a log category with the logger
- * \param[in] category The log category
- *
- * Log categories must have unique names. If a category with the same name
- * already exists this function performs no operation.
- */
-void Logger::registerCategory(LogCategory *category)
-{
- categories_.insert(category);
-
- const std::string &name = category->name();
- for (const std::pair<std::string, LogSeverity> &level : levels_) {
- bool match = true;
-
- for (unsigned int i = 0; i < level.first.size(); ++i) {
- if (level.first[i] == '*')
- break;
-
- if (i >= name.size() ||
- name[i] != level.first[i]) {
- match = false;
- break;
- }
- }
-
- if (match) {
- category->setSeverity(level.second);
- break;
- }
- }
-}
-
-/**
- * \brief Unregister a log category from the logger
- * \param[in] category The log category
- *
- * If the \a category hasn't been registered with the logger this function
- * performs no operation.
- */
-void Logger::unregisterCategory(LogCategory *category)
-{
- categories_.erase(category);
-}
-
-/**
- * \enum LogSeverity
- * Log message severity
- * \var LogDebug
- * Debug message
- * \var LogInfo
- * Informational message
- * \var LogWarning
- * Warning message, signals a potential issue
- * \var LogError
- * Error message, signals an unrecoverable issue
- * \var LogFatal
- * Fatal message, signals an unrecoverable issue and aborts execution
- */
-
-/**
- * \class LogCategory
- * \brief A category of log message
- *
- * The LogCategory class represents a category of log messages, related to an
- * area of the library. It groups all messages belonging to the same category,
- * and is used to control the log level per group.
- */
-
-/**
- * \brief Construct a log category
- * \param[in] name The category name
- */
-LogCategory::LogCategory(const char *name)
- : name_(name), severity_(LogSeverity::LogInfo)
-{
- Logger::instance()->registerCategory(this);
-}
-
-LogCategory::~LogCategory()
-{
- Logger::instance()->unregisterCategory(this);
-}
-
-/**
- * \fn LogCategory::name()
- * \brief Retrieve the log category name
- * \return The log category name
- */
-
-/**
- * \fn LogCategory::severity()
- * \brief Retrieve the severity of the log category
- * \sa setSeverity()
- * \return Return the severity of the log category
- */
-
-/**
- * \brief Set the severity of the log category
- *
- * Messages of severity higher than or equal to the severity of the log category
- * are printed, other messages are discarded.
- */
-void LogCategory::setSeverity(LogSeverity severity)
-{
- severity_ = severity;
-}
-
-/**
- * \brief Retrieve the default log category
- *
- * The default log category is named "default" and is used by the LOG() macro
- * when no log category is specified.
- *
- * \return A pointer to the default log category
- */
-const LogCategory &LogCategory::defaultCategory()
-{
- static const LogCategory category("default");
- return category;
-}
-
-/**
- * \class LogMessage
- * \brief Internal log message representation.
- *
- * The LogMessage class models a single message in the log. It serves as a
- * helper to provide the std::ostream API for logging, and must never be used
- * directly. Use the LOG() macro instead access the log infrastructure.
- */
-
-/**
- * \brief Construct a log message for the default category
- * \param[in] fileName The file name where the message is logged from
- * \param[in] line The line number where the message is logged from
- * \param[in] severity The log message severity, controlling how the message
- * will be displayed
- *
- * Create a log message pertaining to line \a line of file \a fileName. The
- * \a severity argument sets the message severity to control whether it will be
- * output or dropped.
- */
-LogMessage::LogMessage(const char *fileName, unsigned int line,
- LogSeverity severity)
- : category_(LogCategory::defaultCategory()), severity_(severity)
-{
- init(fileName, line);
-}
-
-/**
- * \brief Construct a log message for a given category
- * \param[in] fileName The file name where the message is logged from
- * \param[in] line The line number where the message is logged from
- * \param[in] category The log message category, controlling how the message
- * will be displayed
- * \param[in] severity The log message severity, controlling how the message
- * will be displayed
- *
- * Create a log message pertaining to line \a line of file \a fileName. The
- * \a severity argument sets the message severity to control whether it will be
- * output or dropped.
- */
-LogMessage::LogMessage(const char *fileName, unsigned int line,
- const LogCategory &category, LogSeverity severity)
- : category_(category), severity_(severity)
-{
- init(fileName, line);
-}
-
-/**
- * \brief Move-construct a log message
- * \param[in] other The other message
- *
- * The move constructor is meant to support the _log() functions. Thanks to copy
- * elision it will likely never be called, but C++11 only permits copy elision,
- * it doesn't enforce it unlike C++17. To avoid potential link errors depending
- * on the compiler type and version, and optimization level, the move
- * constructor is defined even if it will likely never be called, and ensures
- * that the destructor of the \a other message will not output anything to the
- * log by setting the severity to LogInvalid.
- */
-LogMessage::LogMessage(LogMessage &&other)
- : msgStream_(std::move(other.msgStream_)), category_(other.category_),
- severity_(other.severity_)
-{
- other.severity_ = LogInvalid;
-}
-
-void LogMessage::init(const char *fileName, unsigned int line)
-{
- /* Log the timestamp, severity and file information. */
- timestamp_ = utils::clock::now();
-
- std::ostringstream ossFileInfo;
- ossFileInfo << utils::basename(fileName) << ":" << line;
- fileInfo_ = ossFileInfo.str();
-}
-
-LogMessage::~LogMessage()
-{
- /* Don't print anything if we have been moved to another LogMessage. */
- if (severity_ == LogInvalid)
- return;
-
- msgStream_ << std::endl;
-
- if (severity_ >= category_.severity())
- Logger::instance()->write(*this);
-
- if (severity_ == LogSeverity::LogFatal) {
- Logger::instance()->backtrace();
- std::abort();
- }
-}
-
-/**
- * \fn std::ostream& LogMessage::stream()
- *
- * Data is added to a LogMessage through the stream returned by this function.
- * The stream implements the std::ostream API and can be used for logging as
- * std::cout.
- *
- * \return A reference to the log message stream
- */
-
-/**
- * \fn LogMessage::timestamp()
- * \brief Retrieve the timestamp of the log message
- * \return The timestamp of the message
- */
-
-/**
- * \fn LogMessage::severity()
- * \brief Retrieve the severity of the log message
- * \return The severity of the message
- */
-
-/**
- * \fn LogMessage::category()
- * \brief Retrieve the category of the log message
- * \return The category of the message
- */
-
-/**
- * \fn LogMessage::fileInfo()
- * \brief Retrieve the file info of the log message
- * \return The file info of the message
- */
-
-/**
- * \fn LogMessage::msg()
- * \brief Retrieve the message text of the log message
- * \return The message text of the message, as a string
- */
-
-/**
- * \class Loggable
- * \brief Base class to support log message extensions
- *
- * The Loggable class allows classes to extend log messages without any change
- * to the way the LOG() macro is invoked. By inheriting from Loggable and
- * implementing the logPrefix() virtual method, a class can specify extra
- * information to be automatically added to messages logged from class member
- * methods.
- */
-
-Loggable::~Loggable()
-{
-}
-
-/**
- * \fn Loggable::logPrefix()
- * \brief Retrieve a string to be prefixed to the log message
- *
- * This method allows classes inheriting from the Loggable class to extend the
- * logger with an object-specific prefix output right before the log message
- * contents.
- *
- * \return A string to be prefixed to the log message
- */
-
-/**
- * \brief Create a temporary LogMessage object to log a message
- * \param[in] fileName The file name where the message is logged from
- * \param[in] line The line number where the message is logged from
- * \param[in] severity The log message severity
- *
- * This method is used as a backeng by the LOG() macro to create a log message
- * for locations inheriting from the Loggable class.
- *
- * \return A log message
- */
-LogMessage Loggable::_log(const char *fileName, unsigned int line,
- LogSeverity severity) const
-{
- LogMessage msg(fileName, line, severity);
-
- msg.stream() << logPrefix() << ": ";
- return msg;
-}
-
-/**
- * \brief Create a temporary LogMessage object to log a message
- * \param[in] fileName The file name where the message is logged from
- * \param[in] line The line number where the message is logged from
- * \param[in] category The log message category
- * \param[in] severity The log message severity
- *
- * This method is used as a backeng by the LOG() macro to create a log message
- * for locations inheriting from the Loggable class.
- *
- * \return A log message
- */
-LogMessage Loggable::_log(const char *fileName, unsigned int line,
- const LogCategory &category,
- LogSeverity severity) const
-{
- LogMessage msg(fileName, line, category, severity);
-
- msg.stream() << logPrefix() << ": ";
- return msg;
-}
-
-/**
- * \brief Create a temporary LogMessage object to log a message
- * \param[in] fileName The file name where the message is logged from
- * \param[in] line The line number where the message is logged from
- * \param[in] severity The log message severity
- *
- * This function is used as a backeng by the LOG() macro to create a log
- * message for locations not inheriting from the Loggable class.
- *
- * \return A log message
- */
-LogMessage _log(const char *fileName, unsigned int line, LogSeverity severity)
-{
- return LogMessage(fileName, line, severity);
-}
-
-/**
- * \brief Create a temporary LogMessage object to log a message
- * \param[in] fileName The file name where the message is logged from
- * \param[in] line The line number where the message is logged from
- * \param[in] category The log message category
- * \param[in] severity The log message severity
- *
- * This function is used as a backeng by the LOG() macro to create a log
- * message for locations not inheriting from the Loggable class.
- *
- * \return A log message
- */
-LogMessage _log(const char *fileName, unsigned int line,
- const LogCategory &category, LogSeverity severity)
-{
- return LogMessage(fileName, line, category, severity);
-}
-
-/**
- * \def LOG_DECLARE_CATEGORY(name)
- * \hideinitializer
- * \brief Declare a category of log messages
- *
- * This macro is used to declare a log category defined in another compilation
- * unit by the LOG_DEFINE_CATEGORY() macro.
- *
- * The LOG_DECLARE_CATEGORY() macro must be used in the libcamera namespace.
- *
- * \sa LogCategory
- */
-
-/**
- * \def LOG_DEFINE_CATEGORY(name)
- * \hideinitializer
- * \brief Define a category of log messages
- *
- * This macro is used to define a log category that can then be used with the
- * LOGC() macro. Category names shall be unique, if a category is shared between
- * compilation units, it shall be defined in one compilation unit only and
- * declared with LOG_DECLARE_CATEGORY() in the other compilation units.
- *
- * The LOG_DEFINE_CATEGORY() macro must be used in the libcamera namespace.
- *
- * \sa LogCategory
- */
-
-/**
- * \def LOG(category, severity)
- * \hideinitializer
- * \brief Log a message
- * \param[in] category Category (optional)
- * \param[in] severity Severity
- *
- * Return an std::ostream reference to which a message can be logged using the
- * iostream API. The \a category, if specified, sets the message category. When
- * absent the default category is used. The \a severity controls whether the
- * message is printed or discarded, depending on the log level for the category.
- *
- * If the severity is set to Fatal, execution is aborted and the program
- * terminates immediately after printing the message.
- */
-
-/**
- * \def ASSERT(condition)
- * \brief Abort program execution if assertion fails
- *
- * If \a condition is false, ASSERT() logs an error message with the Fatal log
- * level and aborts program execution.
- *
- * If the macro NDEBUG is defined before including log.h, ASSERT() generates no
- * code.
- *
- * Using conditions that have side effects with ASSERT() is not recommended, as
- * these effects would depend on whether NDEBUG is defined or not. Similarly,
- * ASSERT() should not be used to check for errors that can occur under normal
- * conditions as those checks would then be removed when compiling with NDEBUG.
- */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/mapped_framebuffer.cpp b/src/libcamera/mapped_framebuffer.cpp
new file mode 100644
index 00000000..f54bbf21
--- /dev/null
+++ b/src/libcamera/mapped_framebuffer.cpp
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Mapped Framebuffer support
+ */
+
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include <algorithm>
+#include <errno.h>
+#include <map>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file mapped_framebuffer.h
+ * \brief Frame buffer memory mapping support
+ */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Buffer)
+
+/**
+ * \class MappedBuffer
+ * \brief Provide an interface to support managing memory mapped buffers
+ *
+ * The MappedBuffer interface provides access to a set of MappedPlanes which
+ * are available for access by the CPU.
+ *
+ * This class is not meant to be constructed directly, but instead derived
+ * classes should be used to implement the correct mapping of a source buffer.
+ *
+ * This allows treating CPU accessible memory through a generic interface
+ * regardless of whether it originates from a libcamera FrameBuffer or other
+ * source.
+ */
+
+/**
+ * \typedef MappedBuffer::Plane
+ * \brief A mapped region of memory accessible to the CPU
+ *
+ * The MappedBuffer::Plane uses the Span interface to describe the mapped memory
+ * region.
+ */
+
+/**
+ * \brief Construct an empty MappedBuffer
+ */
+MappedBuffer::MappedBuffer()
+ : error_(0)
+{
+}
+
+/**
+ * \brief Move constructor, construct the MappedBuffer with the contents of \a
+ * other using move semantics
+ * \param[in] other The other MappedBuffer
+ *
+ * Moving a MappedBuffer moves the mappings contained in the \a other to the new
+ * MappedBuffer and invalidates the \a other.
+ *
+ * No mappings are unmapped or destroyed in this process.
+ */
+MappedBuffer::MappedBuffer(MappedBuffer &&other)
+{
+ *this = std::move(other);
+}
+
+/**
+ * \brief Move assignment operator, replace the mappings with those of \a other
+ * \param[in] other The other MappedBuffer
+ *
+ * Moving a MappedBuffer moves the mappings contained in the \a other to the new
+ * MappedBuffer and invalidates the \a other.
+ *
+ * No mappings are unmapped or destroyed in this process.
+ */
+MappedBuffer &MappedBuffer::operator=(MappedBuffer &&other)
+{
+ error_ = other.error_;
+ planes_ = std::move(other.planes_);
+ maps_ = std::move(other.maps_);
+ other.error_ = -ENOENT;
+
+ return *this;
+}
+
+MappedBuffer::~MappedBuffer()
+{
+ for (Plane &map : maps_)
+ munmap(map.data(), map.size());
+}
+
+/**
+ * \fn MappedBuffer::isValid()
+ * \brief Check if the MappedBuffer instance is valid
+ * \return True if the MappedBuffer has valid mappings, false otherwise
+ */
+
+/**
+ * \fn MappedBuffer::error()
+ * \brief Retrieve the map error status
+ *
+ * This function retrieves the error status from the MappedBuffer.
+ * The error status is a negative number as defined by errno.h. If
+ * no error occurred, this function returns 0.
+ *
+ * \return The map error code
+ */
+
+/**
+ * \fn MappedBuffer::planes()
+ * \brief Retrieve the mapped planes
+ *
+ * This function retrieves the successfully mapped planes stored as a vector
+ * of Span<uint8_t> to provide access to the mapped memory.
+ *
+ * \return A vector of the mapped planes
+ */
+
+/**
+ * \var MappedBuffer::error_
+ * \brief Stores the error value if present
+ *
+ * MappedBuffer derived classes shall set this to a negative value as defined
+ * by errno.h if an error occured during the mapping process.
+ */
+
+/**
+ * \var MappedBuffer::planes_
+ * \brief Stores the internal mapped planes
+ *
+ * MappedBuffer derived classes shall store the mappings they create in this
+ * vector which points the beginning of mapped plane addresses.
+ */
+
+/**
+ * \var MappedBuffer::maps_
+ * \brief Stores the mapped buffer
+ *
+ * MappedBuffer derived classes shall store the mappings they create in this
+ * vector which is parsed during destruct to unmap any memory mappings which
+ * completed successfully.
+ */
+
+/**
+ * \class MappedFrameBuffer
+ * \brief Map a FrameBuffer using the MappedBuffer interface
+ */
+
+/**
+ * \enum MappedFrameBuffer::MapFlag
+ * \brief Specify the mapping mode for the FrameBuffer
+ * \var MappedFrameBuffer::Read
+ * \brief Create a read-only mapping
+ * \var MappedFrameBuffer::Write
+ * \brief Create a write-only mapping
+ * \var MappedFrameBuffer::ReadWrite
+ * \brief Create a mapping that can be both read and written
+ */
+
+/**
+ * \typedef MappedFrameBuffer::MapFlags
+ * \brief A bitwise combination of MappedFrameBuffer::MapFlag values
+ */
+
+/**
+ * \brief Map all planes of a FrameBuffer
+ * \param[in] buffer FrameBuffer to be mapped
+ * \param[in] flags Protection flags to apply to map
+ *
+ * Construct an object to map a frame buffer for CPU access. The mapping can be
+ * made as Read only, Write only or support Read and Write operations by setting
+ * the MapFlag flags accordingly.
+ */
+MappedFrameBuffer::MappedFrameBuffer(const FrameBuffer *buffer, MapFlags flags)
+{
+ ASSERT(!buffer->planes().empty());
+ planes_.reserve(buffer->planes().size());
+
+ int mmapFlags = 0;
+
+ if (flags & MapFlag::Read)
+ mmapFlags |= PROT_READ;
+
+ if (flags & MapFlag::Write)
+ mmapFlags |= PROT_WRITE;
+
+ struct MappedBufferInfo {
+ uint8_t *address = nullptr;
+ size_t mapLength = 0;
+ size_t dmabufLength = 0;
+ };
+ std::map<int, MappedBufferInfo> mappedBuffers;
+
+ for (const FrameBuffer::Plane &plane : buffer->planes()) {
+ const int fd = plane.fd.get();
+ if (mappedBuffers.find(fd) == mappedBuffers.end()) {
+ const size_t length = lseek(fd, 0, SEEK_END);
+ mappedBuffers[fd] = MappedBufferInfo{ nullptr, 0, length };
+ }
+
+ const size_t length = mappedBuffers[fd].dmabufLength;
+
+ if (plane.offset > length ||
+ plane.offset + plane.length > length) {
+ LOG(Buffer, Fatal) << "plane is out of buffer: "
+ << "buffer length=" << length
+ << ", plane offset=" << plane.offset
+ << ", plane length=" << plane.length;
+ return;
+ }
+ size_t &mapLength = mappedBuffers[fd].mapLength;
+ mapLength = std::max(mapLength,
+ static_cast<size_t>(plane.offset + plane.length));
+ }
+
+ for (const FrameBuffer::Plane &plane : buffer->planes()) {
+ const int fd = plane.fd.get();
+ auto &info = mappedBuffers[fd];
+ if (!info.address) {
+ void *address = mmap(nullptr, info.mapLength, mmapFlags,
+ MAP_SHARED, fd, 0);
+ if (address == MAP_FAILED) {
+ error_ = -errno;
+ LOG(Buffer, Error) << "Failed to mmap plane: "
+ << strerror(-error_);
+ return;
+ }
+
+ info.address = static_cast<uint8_t *>(address);
+ maps_.emplace_back(info.address, info.mapLength);
+ }
+
+ planes_.emplace_back(info.address + plane.offset, plane.length);
+ }
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/matrix.cpp b/src/libcamera/matrix.cpp
new file mode 100644
index 00000000..4d95a19b
--- /dev/null
+++ b/src/libcamera/matrix.cpp
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
+ *
+ * Matrix and related operations
+ */
+
+#include "libcamera/internal/matrix.h"
+
+#include <libcamera/base/log.h>
+
+/**
+ * \file matrix.h
+ * \brief Matrix class
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Matrix)
+
+/**
+ * \class Matrix
+ * \brief Matrix class
+ * \tparam T Type of numerical values to be stored in the matrix
+ * \tparam Rows Number of rows in the matrix
+ * \tparam Cols Number of columns in the matrix
+ */
+
+/**
+ * \fn Matrix::Matrix()
+ * \brief Construct a zero matrix
+ */
+
+/**
+ * \fn Matrix::Matrix(const std::array<T, Rows * Cols> &data)
+ * \brief Construct a matrix from supplied data
+ * \param[in] data Data from which to construct a matrix
+ *
+ * \a data is a one-dimensional vector and will be turned into a matrix in
+ * row-major order. The size of \a data must be equal to the product of the
+ * number of rows and columns of the matrix (Rows x Cols).
+ */
+
+/**
+ * \fn Matrix::identity()
+ * \brief Construct an identity matrix
+ */
+
+/**
+ * \fn Matrix::toString()
+ * \brief Assemble and return a string describing the matrix
+ * \return A string describing the matrix
+ */
+
+/**
+ * \fn Span<const T, Cols> Matrix::operator[](size_t i) const
+ * \brief Index to a row in the matrix
+ * \param[in] i Index of row to retrieve
+ *
+ * This operator[] returns a Span, which can then be indexed into again with
+ * another operator[], allowing a convenient m[i][j] to access elements of the
+ * matrix. Note that the lifetime of the Span returned by this first-level
+ * operator[] is bound to that of the Matrix itself, so it is not recommended
+ * to save the Span that is the result of this operator[].
+ *
+ * \return Row \a i from the matrix, as a Span
+ */
+
+/**
+ * \fn Matrix::operator[](size_t i)
+ * \copydoc Matrix::operator[](size_t i) const
+ */
+
+/**
+ * \fn Matrix<T, Rows, Cols> &Matrix::operator*=(U d)
+ * \brief Multiply the matrix by a scalar in-place
+ * \tparam U Type of the numerical scalar value
+ * \param d The scalar multiplier
+ * \return Product of this matrix and scalar \a d
+ */
+
+/**
+ * \fn Matrix::Matrix<U, Rows, Cols> operator*(T d, const Matrix<U, Rows, Cols> &m)
+ * \brief Multiply the matrix by a scalar
+ * \tparam T Type of the numerical scalar value
+ * \tparam U Type of numerical values in the matrix
+ * \tparam Rows Number of rows in the matrix
+ * \tparam Cols Number of columns in the matrix
+ * \param d The scalar multiplier
+ * \param m The matrix
+ * \return Product of scalar \a d and matrix \a m
+ */
+
+/**
+ * \fn Matrix::Matrix<U, Rows, Cols> operator*(const Matrix<U, Rows, Cols> &m, T d)
+ * \copydoc operator*(T d, const Matrix<U, Rows, Cols> &m)
+ */
+
+/**
+ * \fn Matrix<T, R1, C2> operator*(const Matrix<T, R1, C1> &m1, const Matrix<T, R2, C2> &m2)
+ * \brief Matrix multiplication
+ * \tparam T Type of numerical values in the matrices
+ * \tparam R1 Number of rows in the first matrix
+ * \tparam C1 Number of columns in the first matrix
+ * \tparam R2 Number of rows in the second matrix
+ * \tparam C2 Number of columns in the second matrix
+ * \param m1 Multiplicand matrix
+ * \param m2 Multiplier matrix
+ * \return Matrix product of matrices \a m1 and \a m2
+ */
+
+/**
+ * \fn Matrix<T, Rows, Cols> operator+(const Matrix<T, Rows, Cols> &m1, const Matrix<T, Rows, Cols> &m2)
+ * \brief Matrix addition
+ * \tparam T Type of numerical values in the matrices
+ * \tparam Rows Number of rows in the matrices
+ * \tparam Cols Number of columns in the matrices
+ * \param m1 Summand matrix
+ * \param m2 Summand matrix
+ * \return Matrix sum of matrices \a m1 and \a m2
+ */
+
+#ifndef __DOXYGEN__
+/*
+ * The YAML data shall be a list of numerical values. Its size shall be equal
+ * to the product of the number of rows and columns of the matrix (Rows x
+ * Cols). The values shall be stored in row-major order.
+ */
+bool matrixValidateYaml(const YamlObject &obj, unsigned int size)
+{
+ if (!obj.isList())
+ return false;
+
+ if (obj.size() != size) {
+ LOG(Matrix, Error)
+ << "Wrong number of values in matrix: expected "
+ << size << ", got " << obj.size();
+ return false;
+ }
+
+ return true;
+}
+#endif /* __DOXYGEN__ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/media_device.cpp b/src/libcamera/media_device.cpp
index 0d6b5efd..d71dad74 100644
--- a/src/libcamera/media_device.cpp
+++ b/src/libcamera/media_device.cpp
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * media_device.cpp - Media device handler
+ * Media device handler
*/
-#include "media_device.h"
+#include "libcamera/internal/media_device.h"
#include <errno.h>
#include <fcntl.h>
@@ -18,7 +18,7 @@
#include <linux/media.h>
-#include "log.h"
+#include <libcamera/base/log.h>
/**
* \file media_device.h
@@ -44,13 +44,13 @@ LOG_DEFINE_CATEGORY(MediaDevice)
* MediaEntity, MediaPad and MediaLink are created to model the media graph,
* and stored in a map indexed by object id.
*
- * The graph is valid once successfully populated, as reported by the valid()
+ * The graph is valid once successfully populated, as reported by the isValid()
* function. It can be queried to list all entities(), or entities can be
* looked up by name with getEntityByName(). The graph can be traversed from
* entity to entity through pads and links as exposed by the corresponding
* classes.
*
- * Media device can be claimed for exclusive use with acquire(), released with
+ * Media devices can be claimed for exclusive use with acquire(), released with
* release() and tested with busy(). This mechanism is aimed at pipeline
* managers to claim media devices they support during enumeration.
*/
@@ -63,15 +63,13 @@ LOG_DEFINE_CATEGORY(MediaDevice)
* populate() before the media graph can be queried.
*/
MediaDevice::MediaDevice(const std::string &deviceNode)
- : deviceNode_(deviceNode), fd_(-1), valid_(false), acquired_(false),
- lockOwner_(false)
+ : deviceNode_(deviceNode), valid_(false), acquired_(false)
{
}
MediaDevice::~MediaDevice()
{
- if (fd_ != -1)
- ::close(fd_);
+ fd_.reset();
clear();
}
@@ -134,7 +132,7 @@ void MediaDevice::release()
* they provide at all times, while still allowing an instance to lock a
* resource while it prepares to actively use a camera from the resource.
*
- * This method shall not be called from a pipeline handler implementation
+ * This function shall not be called from a pipeline handler implementation
* directly, as the base PipelineHandler implementation handles this on the
* behalf of the specified implementation.
*
@@ -143,25 +141,19 @@ void MediaDevice::release()
*/
bool MediaDevice::lock()
{
- if (fd_ == -1)
+ if (!fd_.isValid())
return false;
- /* Do not allow nested locking in the same libcamera instance. */
- if (lockOwner_)
+ if (lockf(fd_.get(), F_TLOCK, 0))
return false;
- if (lockf(fd_, F_TLOCK, 0))
- return false;
-
- lockOwner_ = true;
-
return true;
}
/**
* \brief Unlock the device and free it for use for libcamera instances
*
- * This method shall not be called from a pipeline handler implementation
+ * This function shall not be called from a pipeline handler implementation
* directly, as the base PipelineHandler implementation handles this on the
* behalf of the specified implementation.
*
@@ -169,15 +161,10 @@ bool MediaDevice::lock()
*/
void MediaDevice::unlock()
{
- if (fd_ == -1)
- return;
-
- if (!lockOwner_)
+ if (!fd_.isValid())
return;
- lockOwner_ = false;
-
- lockf(fd_, F_ULOCK, 0);
+ lockf(fd_.get(), F_ULOCK, 0);
}
/**
@@ -220,7 +207,7 @@ int MediaDevice::populate()
return ret;
struct media_device_info info = {};
- ret = ioctl(fd_, MEDIA_IOC_DEVICE_INFO, &info);
+ ret = ioctl(fd_.get(), MEDIA_IOC_DEVICE_INFO, &info);
if (ret) {
ret = -errno;
LOG(MediaDevice, Error)
@@ -231,6 +218,7 @@ int MediaDevice::populate()
driver_ = info.driver;
model_ = info.model;
version_ = info.media_version;
+ hwRevision_ = info.hw_revision;
/*
* Keep calling G_TOPOLOGY until the version number stays stable.
@@ -242,7 +230,7 @@ int MediaDevice::populate()
topology.ptr_links = reinterpret_cast<uintptr_t>(links);
topology.ptr_pads = reinterpret_cast<uintptr_t>(pads);
- ret = ioctl(fd_, MEDIA_IOC_G_TOPOLOGY, &topology);
+ ret = ioctl(fd_.get(), MEDIA_IOC_G_TOPOLOGY, &topology);
if (ret < 0) {
ret = -errno;
LOG(MediaDevice, Error)
@@ -291,7 +279,7 @@ done:
}
/**
- * \fn MediaDevice::valid()
+ * \fn MediaDevice::isValid()
* \brief Query whether the media graph has been populated and is valid
* \return true if the media graph is valid, false otherwise
*/
@@ -315,6 +303,24 @@ done:
*/
/**
+ * \fn MediaDevice::version()
+ * \brief Retrieve the media device API version
+ *
+ * The version is formatted with the KERNEL_VERSION() macro.
+ *
+ * \return The MediaDevice API version
+ */
+
+/**
+ * \fn MediaDevice::hwRevision()
+ * \brief Retrieve the media device hardware revision
+ *
+ * The hardware revision is in a driver-specific format.
+ *
+ * \return The MediaDevice hardware revision
+ */
+
+/**
* \fn MediaDevice::entities()
* \brief Retrieve the list of entities in the media graph
* \return The list of MediaEntities registered in the MediaDevice
@@ -346,8 +352,9 @@ MediaEntity *MediaDevice::getEntityByName(const std::string &name) const
* entity with name \a sourceName, to the pad at index \a sinkIdx of the
* sink entity with name \a sinkName, if any.
*
- * \sa MediaDevice::link(const MediaEntity *source, unsigned int sourceIdx, const MediaEntity *sink, unsigned int sinkIdx) const
- * \sa MediaDevice::link(const MediaPad *source, const MediaPad *sink) const
+ * \sa link(const MediaEntity *source, unsigned int sourceIdx,
+ * const MediaEntity *sink, unsigned int sinkIdx)
+ * \sa link(const MediaPad *source, const MediaPad *sink)
*
* \return The link that connects the two pads, or nullptr if no such a link
* exists
@@ -375,8 +382,9 @@ MediaLink *MediaDevice::link(const std::string &sourceName, unsigned int sourceI
* entity \a source, to the pad at index \a sinkIdx of the sink entity \a
* sink, if any.
*
- * \sa MediaDevice::link(const std::string &sourceName, unsigned int sourceIdx, const std::string &sinkName, unsigned int sinkIdx) const
- * \sa MediaDevice::link(const MediaPad *source, const MediaPad *sink) const
+ * \sa link(const std::string &sourceName, unsigned int sourceIdx,
+ * const std::string &sinkName, unsigned int sinkIdx)
+ * \sa link(const MediaPad *source, const MediaPad *sink)
*
* \return The link that connects the two pads, or nullptr if no such a link
* exists
@@ -398,8 +406,10 @@ MediaLink *MediaDevice::link(const MediaEntity *source, unsigned int sourceIdx,
* \param[in] source The source pad
* \param[in] sink The sink pad
*
- * \sa MediaDevice::link(const std::string &sourceName, unsigned int sourceIdx, const std::string &sinkName, unsigned int sinkIdx) const
- * \sa MediaDevice::link(const MediaEntity *source, unsigned int sourceIdx, const MediaEntity *sink, unsigned int sinkIdx) const
+ * \sa link(const std::string &sourceName, unsigned int sourceIdx,
+ * const std::string &sinkName, unsigned int sinkIdx)
+ * \sa link(const MediaEntity *source, unsigned int sourceIdx,
+ * const MediaEntity *sink, unsigned int sinkIdx)
*
* \return The link that connects the two pads, or nullptr if no such a link
* exists
@@ -462,20 +472,19 @@ int MediaDevice::disableLinks()
*/
int MediaDevice::open()
{
- if (fd_ != -1) {
+ if (fd_.isValid()) {
LOG(MediaDevice, Error) << "MediaDevice already open";
return -EBUSY;
}
- int ret = ::open(deviceNode_.c_str(), O_RDWR);
- if (ret < 0) {
- ret = -errno;
+ fd_ = UniqueFD(::open(deviceNode_.c_str(), O_RDWR | O_CLOEXEC));
+ if (!fd_.isValid()) {
+ int ret = -errno;
LOG(MediaDevice, Error)
<< "Failed to open media device at "
<< deviceNode_ << ": " << strerror(-ret);
return ret;
}
- fd_ = ret;
return 0;
}
@@ -495,11 +504,7 @@ int MediaDevice::open()
*/
void MediaDevice::close()
{
- if (fd_ == -1)
- return;
-
- ::close(fd_);
- fd_ = -1;
+ fd_.reset();
}
/**
@@ -552,7 +557,7 @@ bool MediaDevice::addObject(MediaObject *object)
*
* The media device graph state is reset to invalid when the graph is cleared.
*
- * \sa valid()
+ * \sa isValid()
*/
void MediaDevice::clear()
{
@@ -633,14 +638,7 @@ bool MediaDevice::populateEntities(const struct media_v2_topology &topology)
*/
struct media_v2_interface *iface =
findInterface(topology, ent->id);
-
- MediaEntity *entity;
- if (iface)
- entity = new MediaEntity(this, ent,
- iface->devnode.major,
- iface->devnode.minor);
- else
- entity = new MediaEntity(this, ent);
+ MediaEntity *entity = new MediaEntity(this, ent, iface);
if (!addObject(entity)) {
delete entity;
@@ -689,43 +687,72 @@ bool MediaDevice::populateLinks(const struct media_v2_topology &topology)
(topology.ptr_links);
for (unsigned int i = 0; i < topology.num_links; ++i) {
- /*
- * Skip links between entities and interfaces: we only care
- * about pad-2-pad links here.
- */
if ((mediaLinks[i].flags & MEDIA_LNK_FL_LINK_TYPE) ==
MEDIA_LNK_FL_INTERFACE_LINK)
continue;
- /* Store references to source and sink pads in the link. */
+ /* Look up the source and sink objects. */
unsigned int source_id = mediaLinks[i].source_id;
- MediaPad *source = dynamic_cast<MediaPad *>
- (object(source_id));
+ MediaObject *source = object(source_id);
if (!source) {
LOG(MediaDevice, Error)
- << "Failed to find pad with id: "
+ << "Failed to find MediaObject with id "
<< source_id;
return false;
}
unsigned int sink_id = mediaLinks[i].sink_id;
- MediaPad *sink = dynamic_cast<MediaPad *>
- (object(sink_id));
+ MediaObject *sink = object(sink_id);
if (!sink) {
LOG(MediaDevice, Error)
- << "Failed to find pad with id: "
+ << "Failed to find MediaObject with id "
<< sink_id;
return false;
}
- MediaLink *link = new MediaLink(&mediaLinks[i], source, sink);
- if (!addObject(link)) {
- delete link;
- return false;
+ switch (mediaLinks[i].flags & MEDIA_LNK_FL_LINK_TYPE) {
+ case MEDIA_LNK_FL_DATA_LINK: {
+ MediaPad *sourcePad = dynamic_cast<MediaPad *>(source);
+ MediaPad *sinkPad = dynamic_cast<MediaPad *>(sink);
+ if (!source || !sink) {
+ LOG(MediaDevice, Error)
+ << "Source or sink is not a pad";
+ return false;
+ }
+
+ MediaLink *link = new MediaLink(&mediaLinks[i],
+ sourcePad, sinkPad);
+ if (!addObject(link)) {
+ delete link;
+ return false;
+ }
+
+ link->source()->addLink(link);
+ link->sink()->addLink(link);
+
+ break;
}
- source->addLink(link);
- sink->addLink(link);
+ case MEDIA_LNK_FL_ANCILLARY_LINK: {
+ MediaEntity *primary = dynamic_cast<MediaEntity *>(source);
+ MediaEntity *ancillary = dynamic_cast<MediaEntity *>(sink);
+ if (!primary || !ancillary) {
+ LOG(MediaDevice, Error)
+ << "Source or sink is not an entity";
+ return false;
+ }
+
+ primary->addAncillaryEntity(ancillary);
+
+ break;
+ }
+
+ default:
+ LOG(MediaDevice, Warning)
+ << "Unknown media link type";
+
+ break;
+ }
}
return true;
@@ -744,7 +771,7 @@ void MediaDevice::fixupEntityFlags(struct media_v2_entity *entity)
struct media_entity_desc desc = {};
desc.id = entity->id;
- int ret = ioctl(fd_, MEDIA_IOC_ENUM_ENTITIES, &desc);
+ int ret = ioctl(fd_.get(), MEDIA_IOC_ENUM_ENTITIES, &desc);
if (ret < 0) {
ret = -errno;
LOG(MediaDevice, Debug)
@@ -787,20 +814,16 @@ int MediaDevice::setupLink(const MediaLink *link, unsigned int flags)
linkDesc.flags = flags;
- int ret = ioctl(fd_, MEDIA_IOC_SETUP_LINK, &linkDesc);
+ int ret = ioctl(fd_.get(), MEDIA_IOC_SETUP_LINK, &linkDesc);
if (ret) {
ret = -errno;
LOG(MediaDevice, Error)
- << "Failed to setup link: "
+ << "Failed to setup link " << *link << ": "
<< strerror(-ret);
return ret;
}
- LOG(MediaDevice, Debug)
- << source->entity()->name() << "["
- << source->index() << "] -> "
- << sink->entity()->name() << "["
- << sink->index() << "]: " << flags;
+ LOG(MediaDevice, Debug) << *link << ": " << flags;
return 0;
}
diff --git a/src/libcamera/media_object.cpp b/src/libcamera/media_object.cpp
index ef32065c..3e3772a6 100644
--- a/src/libcamera/media_object.cpp
+++ b/src/libcamera/media_object.cpp
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * media_object.cpp - Media device objects: entities, pads and links
+ * Media device objects: entities, pads and links
*/
-#include "media_object.h"
+#include "libcamera/internal/media_object.h"
#include <errno.h>
#include <string>
@@ -15,8 +15,9 @@
#include <linux/media.h>
-#include "log.h"
-#include "media_device.h"
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/media_device.h"
/**
* \file media_object.h
@@ -67,6 +68,11 @@ LOG_DECLARE_CATEGORY(MediaDevice)
/**
* \fn MediaObject::device()
+ * \copydoc MediaObject::device() const
+ */
+
+/**
+ * \fn MediaObject::device() const
* \brief Retrieve the media device the media object belongs to
* \return The MediaDevice
*/
@@ -115,7 +121,8 @@ LOG_DECLARE_CATEGORY(MediaDevice)
*/
int MediaLink::setEnabled(bool enable)
{
- unsigned int flags = enable ? MEDIA_LNK_FL_ENABLED : 0;
+ unsigned int flags = (flags_ & ~MEDIA_LNK_FL_ENABLED)
+ | (enable ? MEDIA_LNK_FL_ENABLED : 0);
int ret = dev_->setupLink(this, flags);
if (ret)
@@ -140,6 +147,31 @@ MediaLink::MediaLink(const struct media_v2_link *link, MediaPad *source,
}
/**
+ * \brief Generate a string representation of the MediaLink
+ * \return A string representing the MediaLink
+ */
+std::string MediaLink::toString() const
+{
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
+}
+
+/**
+ * \brief Insert a text representation of a Link into an output stream
+ * \param[in] out The output stream
+ * \param[in] link The MediaLink
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const MediaLink &link)
+{
+ out << *link.source() << " -> " << *link.sink();
+
+ return out;
+}
+
+/**
* \fn MediaLink::source()
* \brief Retrieve the link's source pad
* \return The source pad at the origin of the link
@@ -190,15 +222,6 @@ MediaPad::MediaPad(const struct media_v2_pad *pad, MediaEntity *entity)
{
}
-MediaPad::~MediaPad()
-{
- /*
- * Don't delete the links as we only borrow the reference owned by
- * MediaDevice.
- */
- links_.clear();
-}
-
/**
* \fn MediaPad::index()
* \brief Retrieve the pad index
@@ -238,6 +261,31 @@ void MediaPad::addLink(MediaLink *link)
}
/**
+ * \brief Generate a string representation of the MediaPad
+ * \return A string representing the MediaPad
+ */
+std::string MediaPad::toString() const
+{
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
+}
+
+/**
+ * \brief Insert a text representation of a MediaPad into an output stream
+ * \param[in] out The output stream
+ * \param[in] pad The MediaPad
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const MediaPad &pad)
+{
+ out << "'" << pad.entity()->name() << "'[" << pad.index() << "]";
+
+ return out;
+}
+
+/**
* \class MediaEntity
* \brief The MediaEntity represents an entity in the media graph
*
@@ -250,6 +298,23 @@ void MediaPad::addLink(MediaLink *link)
*/
/**
+ * \enum MediaEntity::Type
+ * \brief The type of the interface exposed by the entity to userspace
+ *
+ * \var MediaEntity::Type::Invalid
+ * \brief Invalid or unsupported entity type
+ *
+ * \var MediaEntity::Type::MediaEntity
+ * \brief Plain media entity with no userspace interface
+ *
+ * \var MediaEntity::Type::V4L2VideoDevice
+ * \brief V4L2 video device with a V4L2 video device node
+ *
+ * \var MediaEntity::Type::V4L2Subdevice
+ * \brief V4L2 subdevice with a V4L2 subdev device node
+ */
+
+/**
* \fn MediaEntity::name()
* \brief Retrieve the entity name
* \return The entity name
@@ -276,6 +341,15 @@ void MediaPad::addLink(MediaLink *link)
*/
/**
+ * \fn MediaEntity::type()
+ * \brief Retrieve the entity's type
+ *
+ * The entity type identifies the type of interface exposed to userspace.
+ *
+ * \return The entity's type
+ */
+
+/**
* \fn MediaEntity::deviceNode()
* \brief Retrieve the entity's device node path, if any
* \return The entity's device node path, or an empty string if it is not set
@@ -358,25 +432,32 @@ int MediaEntity::setDeviceNode(const std::string &deviceNode)
* \brief Construct a MediaEntity
* \param[in] dev The media device this entity belongs to
* \param[in] entity The media entity kernel data
- * \param[in] major The major number of the entity associated interface
- * \param[in] minor The minor number of the entity associated interface
+ * \param[in] iface The entity interface data (may be null)
*/
MediaEntity::MediaEntity(MediaDevice *dev,
const struct media_v2_entity *entity,
- unsigned int major, unsigned int minor)
+ const struct media_v2_interface *iface)
: MediaObject(dev, entity->id), name_(entity->name),
function_(entity->function), flags_(entity->flags),
- major_(major), minor_(minor)
+ type_(Type::MediaEntity), major_(0), minor_(0)
{
-}
+ if (!iface)
+ return;
+
+ switch (iface->intf_type) {
+ case MEDIA_INTF_T_V4L_VIDEO:
+ type_ = Type::V4L2VideoDevice;
+ break;
+ case MEDIA_INTF_T_V4L_SUBDEV:
+ type_ = Type::V4L2Subdevice;
+ break;
+ default:
+ type_ = Type::Invalid;
+ return;
+ }
-MediaEntity::~MediaEntity()
-{
- /*
- * Don't delete the pads as we only borrow the reference owned by
- * MediaDevice.
- */
- pads_.clear();
+ major_ = iface->devnode.major;
+ minor_ = iface->devnode.minor;
}
/**
@@ -392,4 +473,19 @@ void MediaEntity::addPad(MediaPad *pad)
pads_.push_back(pad);
}
+/**
+ * \brief Add a MediaEntity to the list of ancillary entities
+ * \param[in] ancillaryEntity The instance of MediaEntity to add
+ */
+void MediaEntity::addAncillaryEntity(MediaEntity *ancillaryEntity)
+{
+ ancillaryEntities_.push_back(ancillaryEntity);
+}
+
+/**
+ * \fn MediaEntity::ancillaryEntities()
+ * \brief Retrieve all ancillary entities of the entity
+ * \return The list of the entity's ancillary entities
+ */
+
} /* namespace libcamera */
diff --git a/src/libcamera/meson.build b/src/libcamera/meson.build
index 87fa09cd..57fde8a8 100644
--- a/src/libcamera/meson.build
+++ b/src/libcamera/meson.build
@@ -1,129 +1,238 @@
-libcamera_sources = files([
- 'bound_method.cpp',
- 'buffer.cpp',
- 'byte_stream_buffer.cpp',
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_public_sources = files([
'camera.cpp',
- 'camera_controls.cpp',
'camera_manager.cpp',
- 'camera_sensor.cpp',
+ 'color_space.cpp',
'controls.cpp',
+ 'fence.cpp',
+ 'framebuffer.cpp',
+ 'framebuffer_allocator.cpp',
+ 'geometry.cpp',
+ 'orientation.cpp',
+ 'pixel_format.cpp',
+ 'request.cpp',
+ 'stream.cpp',
+ 'transform.cpp',
+])
+
+libcamera_internal_sources = files([
+ 'bayer_format.cpp',
+ 'byte_stream_buffer.cpp',
+ 'camera_controls.cpp',
+ 'camera_lens.cpp',
'control_serializer.cpp',
'control_validator.cpp',
+ 'converter.cpp',
+ 'debug_controls.cpp',
+ 'delayed_controls.cpp',
'device_enumerator.cpp',
'device_enumerator_sysfs.cpp',
- 'event_dispatcher.cpp',
- 'event_dispatcher_poll.cpp',
- 'event_notifier.cpp',
- 'file_descriptor.cpp',
+ 'dma_buf_allocator.cpp',
'formats.cpp',
- 'framebuffer_allocator.cpp',
- 'geometry.cpp',
- 'ipa_context_wrapper.cpp',
'ipa_controls.cpp',
+ 'ipa_data_serializer.cpp',
'ipa_interface.cpp',
'ipa_manager.cpp',
'ipa_module.cpp',
'ipa_proxy.cpp',
+ 'ipc_pipe.cpp',
+ 'ipc_pipe_unixsocket.cpp',
'ipc_unixsocket.cpp',
- 'log.cpp',
+ 'mapped_framebuffer.cpp',
+ 'matrix.cpp',
'media_device.cpp',
'media_object.cpp',
- 'message.cpp',
- 'object.cpp',
'pipeline_handler.cpp',
- 'pixelformats.cpp',
'process.cpp',
- 'request.cpp',
- 'semaphore.cpp',
- 'signal.cpp',
- 'stream.cpp',
- 'thread.cpp',
- 'timer.cpp',
- 'utils.cpp',
- 'v4l2_controls.cpp',
+ 'pub_key.cpp',
+ 'shared_mem_object.cpp',
+ 'source_paths.cpp',
+ 'sysfs.cpp',
'v4l2_device.cpp',
+ 'v4l2_pixelformat.cpp',
'v4l2_subdevice.cpp',
'v4l2_videodevice.cpp',
+ 'yaml_parser.cpp',
])
-subdir('include')
-
-libcamera_internal_includes = include_directories('include')
-
includes = [
libcamera_includes,
- libcamera_internal_includes,
]
+libcamera_deps = []
+
+libatomic = cc.find_library('atomic', required : false)
+libthreads = dependency('threads')
+
+subdir('base')
+subdir('converter')
+subdir('ipa')
subdir('pipeline')
subdir('proxy')
+subdir('sensor')
+subdir('software_isp')
-libatomic = cc.find_library('atomic', required : false)
-libdl = cc.find_library('dl')
-libudev = dependency('libudev', required : false)
+null_dep = dependency('', required : false)
+
+# TODO: Use dependency('dl') when updating to meson 0.62.0 or newer.
+libdl = null_dep
+if not cc.has_function('dlopen')
+ libdl = cc.find_library('dl')
+endif
+libudev = dependency('libudev', required : get_option('udev'))
+libyaml = dependency('yaml-0.1', required : false)
+
+# Use one of gnutls or libcrypto (provided by OpenSSL), trying gnutls first.
+libcrypto = dependency('gnutls', required : false)
+if libcrypto.found()
+ config_h.set('HAVE_GNUTLS', 1)
+else
+ libcrypto = dependency('libcrypto', required : false)
+ if libcrypto.found()
+ config_h.set('HAVE_CRYPTO', 1)
+ endif
+endif
+
+if not libcrypto.found()
+ warning('Neither gnutls nor libcrypto found, all IPA modules will be isolated')
+ summary({'IPA modules signed with': 'None (modules will run isolated)'},
+ section : 'Configuration')
+else
+ summary({'IPA modules signed with' : libcrypto.name()}, section : 'Configuration')
+endif
+
+if liblttng.found()
+ tracing_enabled = true
+ config_h.set('HAVE_TRACING', 1)
+ libcamera_internal_sources += files(['tracepoints.cpp'])
+else
+ tracing_enabled = false
+endif
if libudev.found()
config_h.set('HAVE_LIBUDEV', 1)
- libcamera_sources += files([
+ libcamera_internal_sources += files([
'device_enumerator_udev.cpp',
])
endif
-gen_controls = files('gen-controls.py')
+# Fallback to a subproject if libyaml isn't found, as it's not packaged in AOSP.
+if not libyaml.found()
+ cmake = import('cmake')
+
+ libyaml_vars = cmake.subproject_options()
+ libyaml_vars.add_cmake_defines({'CMAKE_POSITION_INDEPENDENT_CODE': 'ON'})
+ libyaml_vars.append_compile_args('c', '-Wno-unused-value')
+ libyaml_wrap = cmake.subproject('libyaml', options : libyaml_vars)
+ libyaml = libyaml_wrap.dependency('yaml')
+endif
control_sources = []
-foreach source : control_source_files
- input_files = files(source +'.yaml', source + '.cpp.in')
- control_sources += custom_target(source + '_cpp',
+controls_mode_files = {
+ 'controls': [
+ controls_files,
+ 'control_ids.cpp',
+ ],
+ 'properties': [
+ properties_files,
+ 'property_ids.cpp',
+ ],
+}
+
+foreach mode, inout_files : controls_mode_files
+ input_files = inout_files[0]
+ output_file = inout_files[1]
+
+ template_file = files('control_ids.cpp.in')
+ ranges_file = files('control_ranges.yaml')
+
+ control_sources += custom_target(mode + '_ids_cpp',
input : input_files,
- output : source + '.cpp',
- depend_files : gen_controls,
- command : [gen_controls, '-o', '@OUTPUT@', '@INPUT@'])
+ output : output_file,
+ command : [gen_controls, '-o', '@OUTPUT@',
+ '--mode', mode, '-t', template_file,
+ '-r', ranges_file, '@INPUT@'],
+ env : py_build_env)
endforeach
-libcamera_sources += control_headers
-libcamera_sources += control_sources
+libcamera_public_sources += control_sources
-gen_version = join_paths(meson.source_root(), 'utils', 'gen-version.sh')
+gen_version = meson.project_source_root() / 'utils' / 'gen-version.sh'
-version_cpp = vcs_tag(command : [gen_version, meson.build_root()],
+# Use vcs_tag() and not configure_file() or run_command(), to ensure that the
+# version gets updated with every ninja build and not just at meson setup time.
+version_cpp = vcs_tag(command : [gen_version, meson.project_build_root(), meson.project_source_root()],
input : 'version.cpp.in',
output : 'version.cpp',
fallback : meson.project_version())
-libcamera_sources += version_cpp
+libcamera_public_sources += version_cpp
-libcamera_deps = [
+if ipa_sign_module
+ ipa_pub_key_cpp = custom_target('ipa_pub_key_cpp',
+ input : [ipa_priv_key, 'ipa_pub_key.cpp.in'],
+ output : 'ipa_pub_key.cpp',
+ command : [gen_ipa_pub_key, '@INPUT@', '@OUTPUT@'])
+
+ libcamera_internal_sources += ipa_pub_key_cpp
+endif
+
+libcamera_deps += [
libatomic,
+ libcamera_base,
+ libcamera_base_private,
+ libcrypto,
libdl,
+ liblttng,
libudev,
- dependency('threads'),
+ libyaml,
]
-libcamera_link_with = []
-
-if get_option('android')
- libcamera_sources += android_hal_sources
- includes += android_includes
- libcamera_link_with += android_camera_metadata
-endif
-
# We add '/' to the build_rpath as a 'safe' path to act as a boolean flag.
# The build_rpath is stripped at install time by meson, so we determine at
# runtime if the library is running from an installed location by checking
# for the presence or abscence of the dynamic tag.
-libcamera = shared_library('camera',
- libcamera_sources,
+libcamera = shared_library('libcamera',
+ [
+ libcamera_public_headers,
+ libcamera_public_sources,
+ libcamera_ipa_headers,
+ libcamera_internal_headers,
+ libcamera_internal_sources,
+ ],
+ version : libcamera_version,
+ soversion : libcamera_soversion,
+ name_prefix : '',
install : true,
- link_with : libcamera_link_with,
include_directories : includes,
build_rpath : '/',
dependencies : libcamera_deps)
-libcamera_dep = declare_dependency(sources : [libcamera_api, libcamera_ipa_api],
- include_directories : libcamera_includes,
- link_with : libcamera)
+libcamera_public = declare_dependency(sources : [
+ libcamera_public_headers,
+ ],
+ include_directories : libcamera_includes,
+ dependencies : libcamera_base,
+ link_with : libcamera)
+
+# Internal dependency for components and plugins which can use private APIs
+libcamera_private = declare_dependency(sources : [
+ libcamera_ipa_headers,
+ ],
+ dependencies : [
+ libcamera_public,
+ libcamera_base_private,
+ ])
+
+pkg_mod = import('pkgconfig')
+pkg_mod.generate(libcamera,
+ libraries : libcamera_base_lib,
+ description : 'Complex Camera Support Library',
+ subdirs : 'libcamera')
+
+meson.override_dependency('libcamera', libcamera_public)
subdir('proxy/worker')
diff --git a/src/libcamera/message.cpp b/src/libcamera/message.cpp
deleted file mode 100644
index 77f2bdd5..00000000
--- a/src/libcamera/message.cpp
+++ /dev/null
@@ -1,165 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * message.cpp - Message queue support
- */
-
-#include "message.h"
-
-#include <libcamera/signal.h>
-
-#include "log.h"
-
-/**
- * \file message.h
- * \brief Message queue support
- *
- * The messaging API enables inter-thread communication through message
- * posting. Messages can be sent from any thread to any recipient deriving from
- * the Object class.
- *
- * To post a message, the sender allocates it dynamically as instance of a class
- * derived from Message. It then posts the message to an Object recipient
- * through Object::postMessage(). Message ownership is passed to the object,
- * thus the message shall not store any temporary data.
- *
- * The message is delivered in the context of the object's thread, through the
- * Object::message() virtual method. After delivery the message is
- * automatically deleted.
- */
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(Message)
-
-std::atomic_uint Message::nextUserType_{ Message::UserMessage };
-
-/**
- * \class Message
- * \brief A message that can be posted to a Thread
- */
-
-/**
- * \enum Message::Type
- * \brief The message type
- * \var Message::None
- * \brief Invalid message type
- * \var Message::InvokeMessage
- * \brief Asynchronous method invocation across threads
- * \var Message::ThreadMoveMessage
- * \brief Object is being moved to a different thread
- * \var Message::UserMessage
- * \brief First value available for user-defined messages
- */
-
-/**
- * \brief Construct a message object of type \a type
- * \param[in] type The message type
- */
-Message::Message(Message::Type type)
- : type_(type)
-{
-}
-
-Message::~Message()
-{
-}
-
-/**
- * \fn Message::type()
- * \brief Retrieve the message type
- * \return The message type
- */
-
-/**
- * \fn Message::receiver()
- * \brief Retrieve the message receiver
- * \return The message receiver
- */
-
-/**
- * \brief Reserve and register a custom user-defined message type
- *
- * Custom message types use values starting at Message::UserMessage. Assigning
- * custom types manually may lead to accidental duplicated types. To avoid this
- * problem, this method reserves and returns the next available user-defined
- * message type.
- *
- * The recommended way to use this method is to subclass Message and provide a
- * static accessor for the custom message type.
- *
- * \code{.cpp}
- * class MyCustomMessage : public Message
- * {
- * public:
- * MyCustomMessage() : Message(type()) {}
- *
- * static Message::Type type()
- * {
- * static MessageType type = registerMessageType();
- * return type;
- * }
- * };
- * \endcode
- *
- * \return A new unique message type
- */
-Message::Type Message::registerMessageType()
-{
- return static_cast<Message::Type>(nextUserType_++);
-}
-
-/**
- * \class InvokeMessage
- * \brief A message carrying a method invocation across threads
- */
-
-/**
- * \brief Construct an InvokeMessage for method invocation on an Object
- * \param[in] method The bound method
- * \param[in] pack The packed method arguments
- * \param[in] semaphore The semaphore used to signal message delivery
- * \param[in] deleteMethod True to delete the \a method when the message is
- * destroyed
- */
-InvokeMessage::InvokeMessage(BoundMethodBase *method,
- std::shared_ptr<BoundMethodPackBase> pack,
- Semaphore *semaphore, bool deleteMethod)
- : Message(Message::InvokeMessage), method_(method), pack_(pack),
- semaphore_(semaphore), deleteMethod_(deleteMethod)
-{
-}
-
-InvokeMessage::~InvokeMessage()
-{
- if (deleteMethod_)
- delete method_;
-}
-
-/**
- * \fn InvokeMessage::semaphore()
- * \brief Retrieve the message semaphore passed to the constructor
- * \return The message semaphore
- */
-
-/**
- * \brief Invoke the method bound to InvokeMessage::method_ with arguments
- * InvokeMessage::pack_
- */
-void InvokeMessage::invoke()
-{
- method_->invokePack(pack_.get());
-}
-
-/**
- * \var InvokeMessage::method_
- * \brief The method to be invoked
- */
-
-/**
- * \var InvokeMessage::pack_
- * \brief The packed method invocation arguments
- */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/object.cpp b/src/libcamera/object.cpp
deleted file mode 100644
index 99c3bf9a..00000000
--- a/src/libcamera/object.cpp
+++ /dev/null
@@ -1,245 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * object.cpp - Base object
- */
-
-#include <libcamera/object.h>
-
-#include <algorithm>
-
-#include <libcamera/signal.h>
-
-#include "log.h"
-#include "message.h"
-#include "semaphore.h"
-#include "thread.h"
-#include "utils.h"
-
-/**
- * \file object.h
- * \brief Base object to support automatic signal disconnection
- */
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(Object)
-
-/**
- * \class Object
- * \brief Base object to support automatic signal disconnection
- *
- * The Object class simplifies signal/slot handling for classes implementing
- * slots. By inheriting from Object, an object is automatically disconnected
- * from all connected signals when it gets destroyed.
- *
- * Object instances are bound to the thread of their parent, or the thread in
- * which they're created when they have no parent. When a message is posted to
- * an object, its handler will run in the object's thread. This allows
- * implementing easy message passing between threads by inheriting from the
- * Object class.
- *
- * Deleting an object from a thread other than the one the object is bound to is
- * unsafe, unless the caller ensures that the object isn't processing any
- * message concurrently.
- *
- * Object slots connected to signals will also run in the context of the
- * object's thread, regardless of whether the signal is emitted in the same or
- * in another thread.
- *
- * \sa Message, Signal, Thread
- */
-
-/**
- * \brief Construct an Object instance
- * \param[in] parent The object parent
- *
- * The new Object instance is bound to the thread of its \a parent, or to the
- * current thread if the \a parent is nullptr.
- */
-Object::Object(Object *parent)
- : parent_(parent), pendingMessages_(0)
-{
- thread_ = parent ? parent->thread() : Thread::current();
-
- if (parent)
- parent->children_.push_back(this);
-}
-
-/**
- * \brief Destroy an Object instance
- *
- * Deleting an Object automatically disconnects all signals from the Object's
- * slots. All the Object's children are made orphan, but stay bound to their
- * current thread.
- */
-Object::~Object()
-{
- /*
- * Move signals to a private list to avoid concurrent iteration and
- * deletion of items from Signal::disconnect().
- */
- std::list<SignalBase *> signals(std::move(signals_));
- for (SignalBase *signal : signals)
- signal->disconnect(this);
-
- if (pendingMessages_)
- thread()->removeMessages(this);
-
- if (parent_) {
- auto it = std::find(parent_->children_.begin(),
- parent_->children_.end(), this);
- ASSERT(it != parent_->children_.end());
- parent_->children_.erase(it);
- }
-
- for (auto child : children_)
- child->parent_ = nullptr;
-}
-
-/**
- * \brief Post a message to the object's thread
- * \param[in] msg The message
- *
- * This method posts the message \a msg to the message queue of the object's
- * thread, to be delivered to the object through the message() method in the
- * context of its thread. Message ownership is passed to the thread, and the
- * message will be deleted after being delivered.
- *
- * Messages are delivered through the thread's event loop. If the thread is not
- * running its event loop the message will not be delivered until the event
- * loop gets started.
- *
- * \context This function is \threadsafe.
- */
-void Object::postMessage(std::unique_ptr<Message> msg)
-{
- thread()->postMessage(std::move(msg), this);
-}
-
-/**
- * \brief Message handler for the object
- * \param[in] msg The message
- *
- * This virtual method receives messages for the object. It is called in the
- * context of the object's thread, and can be overridden to process custom
- * messages. The parent Object::message() method shall be called for any
- * message not handled by the override method.
- *
- * The message \a msg is valid only for the duration of the call, no reference
- * to it shall be kept after this method returns.
- */
-void Object::message(Message *msg)
-{
- switch (msg->type()) {
- case Message::InvokeMessage: {
- InvokeMessage *iMsg = static_cast<InvokeMessage *>(msg);
- Semaphore *semaphore = iMsg->semaphore();
- iMsg->invoke();
-
- if (semaphore)
- semaphore->release();
-
- break;
- }
-
- default:
- break;
- }
-}
-
-/**
- * \fn R Object::invokeMethod()
- * \brief Invoke a method asynchronously on an Object instance
- * \param[in] func The object method to invoke
- * \param[in] type Connection type for method invocation
- * \param[in] args The method arguments
- *
- * This method invokes the member method \a func with arguments \a args, based
- * on the connection \a type. Depending on the type, the method will be called
- * synchronously in the same thread or asynchronously in the object's thread.
- *
- * Arguments \a args passed by value or reference are copied, while pointers
- * are passed untouched. The caller shall ensure that any pointer argument
- * remains valid until the method is invoked.
- *
- * \context This function is \threadsafe.
- *
- * \return For connection types ConnectionTypeDirect and
- * ConnectionTypeBlocking, return the return value of the invoked method. For
- * connection type ConnectionTypeQueued, return a default-constructed R value.
- */
-
-/**
- * \fn Object::thread()
- * \brief Retrieve the thread the object is bound to
- * \context This function is \threadsafe.
- * \return The thread the object is bound to
- */
-
-/**
- * \brief Move the object and all its children to a different thread
- * \param[in] thread The target thread
- *
- * This method moves the object and all its children from the current thread to
- * the new \a thread.
- *
- * Before the object is moved, a Message::ThreadMoveMessage message is sent to
- * it. The message() method can be reimplement in derived classes to be notified
- * of the upcoming thread move and perform any required processing.
- *
- * Moving an object that has a parent is not allowed, and causes undefined
- * behaviour.
- *
- * \context This function is thread-bound.
- */
-void Object::moveToThread(Thread *thread)
-{
- ASSERT(Thread::current() == thread_);
-
- if (thread_ == thread)
- return;
-
- if (parent_) {
- LOG(Object, Error)
- << "Moving object to thread with a parent is not permitted";
- return;
- }
-
- notifyThreadMove();
-
- thread->moveObject(this);
-}
-
-void Object::notifyThreadMove()
-{
- Message msg(Message::ThreadMoveMessage);
- message(&msg);
-
- for (auto child : children_)
- child->notifyThreadMove();
-}
-
-/**
- * \fn Object::parent()
- * \brief Retrieve the object's parent
- * \return The object's parent
- */
-
-void Object::connect(SignalBase *signal)
-{
- signals_.push_back(signal);
-}
-
-void Object::disconnect(SignalBase *signal)
-{
- for (auto iter = signals_.begin(); iter != signals_.end(); ) {
- if (*iter == signal)
- iter = signals_.erase(iter);
- else
- iter++;
- }
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/orientation.cpp b/src/libcamera/orientation.cpp
new file mode 100644
index 00000000..7d7d21ae
--- /dev/null
+++ b/src/libcamera/orientation.cpp
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Ideas On Board Oy
+ *
+ * Image orientation
+ */
+
+#include <libcamera/orientation.h>
+
+#include <array>
+
+/**
+ * \file orientation.h
+ * \brief Image orientation definition
+ */
+
+namespace libcamera {
+
+/**
+ * \enum Orientation
+ * \brief The image orientation in a memory buffer
+ *
+ * The Orientation enumeration describes the orientation of the images
+ * produced by the camera pipeline as they get received by the application
+ * inside memory buffers.
+ *
+ * The image orientation expressed using the Orientation enumeration can be then
+ * inferred by applying to a naturally oriented image a multiple of a 90 degrees
+ * rotation in the clockwise direction from the origin and then by applying an
+ * optional horizontal mirroring.
+ *
+ * The enumeration numerical values follow the ones defined by the EXIF
+ * Specification version 2.32, Tag 274 "Orientation", while the names of the
+ * enumerated values report the rotation and mirroring operations performed.
+ *
+ * For example, Orientation::Rotate90Mirror describes the orientation obtained
+ * by rotating the image 90 degrees clockwise first and then applying a
+ * horizontal mirroring.
+ *
+ * \var CameraConfiguration::Rotate0
+ * \image html rotation/rotate0.svg
+ * \var CameraConfiguration::Rotate0Mirror
+ * \image html rotation/rotate0Mirror.svg
+ * \var CameraConfiguration::Rotate180
+ * \image html rotation/rotate180.svg
+ * \var CameraConfiguration::Rotate180Mirror
+ * \image html rotation/rotate180Mirror.svg
+ * \var CameraConfiguration::Rotate90Mirror
+ * \image html rotation/rotate90Mirror.svg
+ * \var CameraConfiguration::Rotate270
+ * \image html rotation/rotate270.svg
+ * \var CameraConfiguration::Rotate270Mirror
+ * \image html rotation/rotate270Mirror.svg
+ * \var CameraConfiguration::Rotate90
+ * \image html rotation/rotate90.svg
+ */
+
+/**
+ * \brief Return the orientation representing a rotation of the given angle
+ * clockwise
+ * \param[in] angle The angle of rotation in a clockwise sense. Negative values
+ * can be used to represent anticlockwise rotations
+ * \param[out] success Set to `true` if the angle is a multiple of 90 degrees,
+ * otherwise `false`
+ * \return The orientation corresponding to the rotation if \a success was set
+ * to `true`, otherwise the `Rotate0` orientation
+ */
+Orientation orientationFromRotation(int angle, bool *success)
+{
+ angle = angle % 360;
+ if (angle < 0)
+ angle += 360;
+
+ if (success != nullptr)
+ *success = true;
+
+ switch (angle) {
+ case 0:
+ return Orientation::Rotate0;
+ case 90:
+ return Orientation::Rotate90;
+ case 180:
+ return Orientation::Rotate180;
+ case 270:
+ return Orientation::Rotate270;
+ }
+
+ if (success != nullptr)
+ *success = false;
+
+ return Orientation::Rotate0;
+}
+
+/**
+ * \brief Prints human-friendly names for Orientation items
+ * \param[in] out The output stream
+ * \param[in] orientation The Orientation item
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const Orientation &orientation)
+{
+ constexpr std::array<const char *, 9> orientationNames = {
+ "", /* Orientation starts counting from 1. */
+ "Rotate0",
+ "Rotate0Mirror",
+ "Rotate180",
+ "Rotate180Mirror",
+ "Rotate90Mirror",
+ "Rotate270",
+ "Rotate270Mirror",
+ "Rotate90",
+ };
+
+ out << orientationNames[static_cast<unsigned int>(orientation)];
+ return out;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp b/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp
new file mode 100644
index 00000000..4e66b336
--- /dev/null
+++ b/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp
@@ -0,0 +1,1116 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022 - Jacopo Mondi <jacopo@jmondi.org>
+ *
+ * Pipeline handler for ISI interface found on NXP i.MX8 SoC
+ */
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/camera_manager.h>
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+#include "linux/media-bus-format.h"
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(ISI)
+
+class PipelineHandlerISI;
+
+class ISICameraData : public Camera::Private
+{
+public:
+ ISICameraData(PipelineHandler *ph)
+ : Camera::Private(ph)
+ {
+ /*
+ * \todo Assume 2 channels only for now, as that's the number of
+ * available channels on i.MX8MP.
+ */
+ streams_.resize(2);
+ }
+
+ PipelineHandlerISI *pipe();
+
+ int init();
+
+ unsigned int pipeIndex(const Stream *stream)
+ {
+ return stream - &*streams_.begin();
+ }
+
+ unsigned int getRawMediaBusFormat(PixelFormat *pixelFormat) const;
+ unsigned int getYuvMediaBusFormat(const PixelFormat &pixelFormat) const;
+ unsigned int getMediaBusFormat(PixelFormat *pixelFormat) const;
+
+ std::unique_ptr<CameraSensor> sensor_;
+ std::unique_ptr<V4L2Subdevice> csis_;
+
+ std::vector<Stream> streams_;
+
+ std::vector<Stream *> enabledStreams_;
+
+ unsigned int xbarSink_;
+};
+
+class ISICameraConfiguration : public CameraConfiguration
+{
+public:
+ ISICameraConfiguration(ISICameraData *data)
+ : data_(data)
+ {
+ }
+
+ Status validate() override;
+
+ static const std::map<PixelFormat, unsigned int> formatsMap_;
+
+ V4L2SubdeviceFormat sensorFormat_;
+
+private:
+ CameraConfiguration::Status
+ validateRaw(std::set<Stream *> &availableStreams, const Size &maxResolution);
+ CameraConfiguration::Status
+ validateYuv(std::set<Stream *> &availableStreams, const Size &maxResolution);
+
+ const ISICameraData *data_;
+};
+
+class PipelineHandlerISI : public PipelineHandler
+{
+public:
+ PipelineHandlerISI(CameraManager *manager);
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+ std::unique_ptr<CameraConfiguration>
+ generateConfiguration(Camera *camera, Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+
+protected:
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+private:
+ static constexpr Size kPreviewSize = { 1920, 1080 };
+ static constexpr Size kMinISISize = { 1, 1 };
+
+ struct Pipe {
+ std::unique_ptr<V4L2Subdevice> isi;
+ std::unique_ptr<V4L2VideoDevice> capture;
+ };
+
+ ISICameraData *cameraData(Camera *camera)
+ {
+ return static_cast<ISICameraData *>(camera->_d());
+ }
+
+ Pipe *pipeFromStream(Camera *camera, const Stream *stream);
+
+ StreamConfiguration generateYUVConfiguration(Camera *camera,
+ const Size &size);
+ StreamConfiguration generateRawConfiguration(Camera *camera);
+
+ void bufferReady(FrameBuffer *buffer);
+
+ MediaDevice *isiDev_;
+
+ std::unique_ptr<V4L2Subdevice> crossbar_;
+ std::vector<Pipe> pipes_;
+};
+
+/* -----------------------------------------------------------------------------
+ * Camera Data
+ */
+
+PipelineHandlerISI *ISICameraData::pipe()
+{
+ return static_cast<PipelineHandlerISI *>(Camera::Private::pipe());
+}
+
+/* Open and initialize pipe components. */
+int ISICameraData::init()
+{
+ if (!sensor_)
+ return -ENODEV;
+
+ int ret = csis_->open();
+ if (ret)
+ return ret;
+
+ properties_ = sensor_->properties();
+
+ return 0;
+}
+
+/*
+ * Get a RAW Bayer media bus format compatible with the requested pixelFormat.
+ *
+ * If the requested pixelFormat cannot be produced by the sensor adjust it to
+ * the one corresponding to the media bus format with the largest bit-depth.
+ */
+unsigned int ISICameraData::getRawMediaBusFormat(PixelFormat *pixelFormat) const
+{
+ std::vector<unsigned int> mbusCodes = sensor_->mbusCodes();
+
+ static const std::map<PixelFormat, unsigned int> rawFormats = {
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+ { formats::SBGGR14, MEDIA_BUS_FMT_SBGGR14_1X14 },
+ { formats::SGBRG14, MEDIA_BUS_FMT_SGBRG14_1X14 },
+ { formats::SGRBG14, MEDIA_BUS_FMT_SGRBG14_1X14 },
+ { formats::SRGGB14, MEDIA_BUS_FMT_SRGGB14_1X14 },
+ };
+
+ /*
+ * Make sure the requested PixelFormat is supported in the above
+ * map and the sensor can produce the compatible mbus code.
+ */
+ auto it = rawFormats.find(*pixelFormat);
+ if (it != rawFormats.end() &&
+ std::count(mbusCodes.begin(), mbusCodes.end(), it->second))
+ return it->second;
+
+ if (it == rawFormats.end())
+ LOG(ISI, Warning) << pixelFormat
+ << " not supported in ISI formats map.";
+
+ /*
+ * The desired pixel format cannot be produced. Adjust it to the one
+ * corresponding to the raw media bus format with the largest bit-depth
+ * the sensor provides.
+ */
+ unsigned int sensorCode = 0;
+ unsigned int maxDepth = 0;
+ *pixelFormat = {};
+
+ for (unsigned int code : mbusCodes) {
+ /* Make sure the media bus format is RAW Bayer. */
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(code);
+ if (!bayerFormat.isValid())
+ continue;
+
+ /* Make sure the media format is supported. */
+ it = std::find_if(rawFormats.begin(), rawFormats.end(),
+ [code](auto &rawFormat) {
+ return rawFormat.second == code;
+ });
+
+ if (it == rawFormats.end()) {
+ LOG(ISI, Warning) << bayerFormat
+ << " not supported in ISI formats map.";
+ continue;
+ }
+
+ /* Pick the one with the largest bit depth. */
+ if (bayerFormat.bitDepth > maxDepth) {
+ maxDepth = bayerFormat.bitDepth;
+ *pixelFormat = it->first;
+ sensorCode = code;
+ }
+ }
+
+ if (!pixelFormat->isValid())
+ LOG(ISI, Error) << "Cannot find a supported RAW format";
+
+ return sensorCode;
+}
+
+/*
+ * Get a YUV/RGB media bus format from which the ISI can produce a processed
+ * stream, preferring codes with the same colour encoding as the requested
+ * pixelformat.
+ *
+ * If the sensor does not provide any YUV/RGB media bus format the ISI cannot
+ * generate any processed pixel format as it cannot debayer.
+ */
+unsigned int ISICameraData::getYuvMediaBusFormat(const PixelFormat &pixelFormat) const
+{
+ std::vector<unsigned int> mbusCodes = sensor_->mbusCodes();
+
+ /*
+ * The ISI can produce YUV/RGB pixel formats from any non-RAW Bayer
+ * media bus formats.
+ *
+ * Keep the list in sync with the mxc_isi_bus_formats[] array in
+ * the ISI driver.
+ */
+ std::vector<unsigned int> yuvCodes = {
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_RGB565_1X16,
+ MEDIA_BUS_FMT_RGB888_1X24,
+ };
+
+ std::sort(mbusCodes.begin(), mbusCodes.end());
+ std::sort(yuvCodes.begin(), yuvCodes.end());
+
+ std::vector<unsigned int> supportedCodes;
+ std::set_intersection(mbusCodes.begin(), mbusCodes.end(),
+ yuvCodes.begin(), yuvCodes.end(),
+ std::back_inserter(supportedCodes));
+
+ if (supportedCodes.empty()) {
+ LOG(ISI, Warning) << "Cannot find a supported YUV/RGB format";
+
+ return 0;
+ }
+
+ /* Prefer codes with the same encoding as the requested pixel format. */
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixelFormat);
+ for (unsigned int code : supportedCodes) {
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingYUV &&
+ (code == MEDIA_BUS_FMT_UYVY8_1X16 ||
+ code == MEDIA_BUS_FMT_YUV8_1X24))
+ return code;
+
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRGB &&
+ (code == MEDIA_BUS_FMT_RGB565_1X16 ||
+ code == MEDIA_BUS_FMT_RGB888_1X24))
+ return code;
+ }
+
+ /* Otherwise return the first found code. */
+ return supportedCodes[0];
+}
+
+unsigned int ISICameraData::getMediaBusFormat(PixelFormat *pixelFormat) const
+{
+ if (PixelFormatInfo::info(*pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW)
+ return getRawMediaBusFormat(pixelFormat);
+
+ return getYuvMediaBusFormat(*pixelFormat);
+}
+
+/* -----------------------------------------------------------------------------
+ * Camera Configuration
+ */
+
+/*
+ * ISICameraConfiguration::formatsMap_ records the association between an output
+ * pixel format and the ISI source pixel format to be applied to the pipeline.
+ */
+const std::map<PixelFormat, unsigned int> ISICameraConfiguration::formatsMap_ = {
+ { formats::YUYV, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::AVUY8888, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::NV12, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::NV16, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::YUV444, MEDIA_BUS_FMT_YUV8_1X24 },
+ { formats::RGB565, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::BGR888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::RGB888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::XRGB8888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::ABGR8888, MEDIA_BUS_FMT_RGB888_1X24 },
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+};
+
+/*
+ * Adjust stream configuration when the first requested stream is RAW: all the
+ * streams will have the same RAW pixelformat and size.
+ */
+CameraConfiguration::Status
+ISICameraConfiguration::validateRaw(std::set<Stream *> &availableStreams,
+ const Size &maxResolution)
+{
+ CameraConfiguration::Status status = Valid;
+
+ /*
+ * Make sure the requested RAW format is supported by the
+ * pipeline, otherwise adjust it.
+ */
+ std::vector<unsigned int> mbusCodes = data_->sensor_->mbusCodes();
+ StreamConfiguration &rawConfig = config_[0];
+ PixelFormat rawFormat = rawConfig.pixelFormat;
+
+ unsigned int sensorCode = data_->getRawMediaBusFormat(&rawFormat);
+ if (!sensorCode) {
+ LOG(ISI, Error) << "Cannot adjust RAW pixelformat "
+ << rawConfig.pixelFormat;
+ return Invalid;
+ }
+
+ if (rawFormat != rawConfig.pixelFormat) {
+ LOG(ISI, Debug) << "RAW pixelformat adjusted to "
+ << rawFormat;
+ rawConfig.pixelFormat = rawFormat;
+ status = Adjusted;
+ }
+
+ /* Cap the RAW stream size to the maximum resolution. */
+ const Size configSize = rawConfig.size;
+ rawConfig.size.boundTo(maxResolution);
+ if (rawConfig.size != configSize) {
+ LOG(ISI, Debug) << "RAW size adjusted to "
+ << rawConfig.size;
+ status = Adjusted;
+ }
+
+ /* Adjust all other streams to RAW. */
+ for (const auto &[i, cfg] : utils::enumerate(config_)) {
+
+ LOG(ISI, Debug) << "Stream " << i << ": " << cfg.toString();
+ const PixelFormat pixFmt = cfg.pixelFormat;
+ const Size size = cfg.size;
+
+ cfg.pixelFormat = rawConfig.pixelFormat;
+ cfg.size = rawConfig.size;
+
+ if (cfg.pixelFormat != pixFmt || cfg.size != size) {
+ LOG(ISI, Debug) << "Stream " << i << " adjusted to "
+ << cfg.toString();
+ status = Adjusted;
+ }
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+ cfg.stride = info.stride(cfg.size.width, 0);
+ cfg.frameSize = info.frameSize(cfg.size, info.bitsPerPixel);
+
+ /* Assign streams in the order they are presented. */
+ auto stream = availableStreams.extract(availableStreams.begin());
+ cfg.setStream(stream.value());
+ }
+
+ return status;
+}
+
+/*
+ * Adjust stream configuration when the first requested stream is not RAW: all
+ * the streams will be either YUV or RGB processed formats.
+ */
+CameraConfiguration::Status
+ISICameraConfiguration::validateYuv(std::set<Stream *> &availableStreams,
+ const Size &maxResolution)
+{
+ CameraConfiguration::Status status = Valid;
+
+ StreamConfiguration &yuvConfig = config_[0];
+ PixelFormat yuvPixelFormat = yuvConfig.pixelFormat;
+
+ /*
+ * Make sure the sensor can produce a compatible YUV/RGB media bus
+ * format. If the sensor can only produce RAW Bayer we can only fail
+ * here as we can't adjust to anything but RAW.
+ */
+ unsigned int yuvMediaBusCode = data_->getYuvMediaBusFormat(yuvPixelFormat);
+ if (!yuvMediaBusCode) {
+ LOG(ISI, Error) << "Cannot adjust pixelformat "
+ << yuvConfig.pixelFormat;
+ return Invalid;
+ }
+
+ /* Adjust all the other streams. */
+ for (const auto &[i, cfg] : utils::enumerate(config_)) {
+
+ LOG(ISI, Debug) << "Stream " << i << ": " << cfg.toString();
+
+ /* If the stream is RAW or not supported default it to YUYV. */
+ const PixelFormatInfo &cfgInfo = PixelFormatInfo::info(cfg.pixelFormat);
+ if (cfgInfo.colourEncoding == PixelFormatInfo::ColourEncodingRAW ||
+ !formatsMap_.count(cfg.pixelFormat)) {
+
+ LOG(ISI, Debug) << "Stream " << i << " format: "
+ << cfg.pixelFormat << " adjusted to YUYV";
+
+ cfg.pixelFormat = formats::YUYV;
+ status = Adjusted;
+ }
+
+ /* Cap the streams size to the maximum accepted resolution. */
+ Size configSize = cfg.size;
+ cfg.size.boundTo(maxResolution);
+ if (cfg.size != configSize) {
+ LOG(ISI, Debug)
+ << "Stream " << i << " adjusted to " << cfg.size;
+ status = Adjusted;
+ }
+
+ /* Re-fetch the pixel format info in case it has been adjusted. */
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+
+ /* \todo Multiplane ? */
+ cfg.stride = info.stride(cfg.size.width, 0);
+ cfg.frameSize = info.frameSize(cfg.size, info.bitsPerPixel);
+
+ /* Assign streams in the order they are presented. */
+ auto stream = availableStreams.extract(availableStreams.begin());
+ cfg.setStream(stream.value());
+ }
+
+ return status;
+}
+
+CameraConfiguration::Status ISICameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ std::set<Stream *> availableStreams;
+ std::transform(data_->streams_.begin(), data_->streams_.end(),
+ std::inserter(availableStreams, availableStreams.end()),
+ [](const Stream &s) { return const_cast<Stream *>(&s); });
+
+ if (config_.empty())
+ return Invalid;
+
+ /* Cap the number of streams to the number of available ISI pipes. */
+ if (config_.size() > availableStreams.size()) {
+ config_.resize(availableStreams.size());
+ status = Adjusted;
+ }
+
+ /*
+ * If more than a single stream is requested, the maximum allowed input
+ * image width is 2048. Cap the maximum image size accordingly.
+ *
+ * \todo The (size > 1) check only applies to i.MX8MP which has 2 ISI
+ * channels. SoCs with more channels than the i.MX8MP are capable of
+ * supporting more streams with input width > 2048 by chaining
+ * successive channels together. Define a policy for channels allocation
+ * to fully support other SoCs.
+ */
+ CameraSensor *sensor = data_->sensor_.get();
+ Size maxResolution = sensor->resolution();
+ if (config_.size() > 1)
+ maxResolution.width = std::min(2048U, maxResolution.width);
+
+ /* Validate streams according to the format of the first one. */
+ const PixelFormatInfo info = PixelFormatInfo::info(config_[0].pixelFormat);
+
+ Status validationStatus;
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
+ validationStatus = validateRaw(availableStreams, maxResolution);
+ else
+ validationStatus = validateYuv(availableStreams, maxResolution);
+
+ if (validationStatus == Invalid)
+ return Invalid;
+
+ if (validationStatus == Adjusted)
+ status = Adjusted;
+
+ /*
+ * Sensor format selection policy: the first stream selects the media
+ * bus code to use, the largest stream selects the size.
+ *
+ * \todo The sensor format selection policy could be changed to
+ * prefer operating the sensor at full resolution to prioritize
+ * image quality in exchange of a usually slower frame rate.
+ * Usage of the STILL_CAPTURE role could be consider for this.
+ */
+ Size maxSize;
+ for (const auto &cfg : config_) {
+ if (cfg.size > maxSize)
+ maxSize = cfg.size;
+ }
+
+ PixelFormat pixelFormat = config_[0].pixelFormat;
+
+ V4L2SubdeviceFormat sensorFormat{};
+ sensorFormat.code = data_->getMediaBusFormat(&pixelFormat);
+ sensorFormat.size = maxSize;
+
+ LOG(ISI, Debug) << "Computed sensor configuration: " << sensorFormat;
+
+ /*
+ * We can't use CameraSensor::getFormat() as it might return a
+ * format larger than our strict width limit, as that function
+ * prioritizes formats with the same aspect ratio over formats with less
+ * difference in size.
+ *
+ * Manually walk all the sensor supported sizes searching for
+ * the smallest larger format without considering the aspect ratio
+ * as the ISI can freely scale.
+ */
+ auto sizes = sensor->sizes(sensorFormat.code);
+ Size bestSize;
+
+ for (const Size &s : sizes) {
+ /* Ignore smaller sizes. */
+ if (s.width < sensorFormat.size.width ||
+ s.height < sensorFormat.size.height)
+ continue;
+
+ /* Make sure the width stays in the limits. */
+ if (s.width > maxResolution.width)
+ continue;
+
+ bestSize = s;
+ break;
+ }
+
+ /*
+ * This should happen only if the sensor can only produce formats that
+ * exceed the maximum allowed input width.
+ */
+ if (bestSize.isNull()) {
+ LOG(ISI, Error) << "Unable to find a suitable sensor format";
+ return Invalid;
+ }
+
+ sensorFormat_.code = sensorFormat.code;
+ sensorFormat_.size = bestSize;
+
+ LOG(ISI, Debug) << "Selected sensor format: " << sensorFormat_;
+
+ return status;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline Handler
+ */
+
+PipelineHandlerISI::PipelineHandlerISI(CameraManager *manager)
+ : PipelineHandler(manager)
+{
+}
+
+/*
+ * Generate a StreamConfiguration for YUV/RGB use case.
+ *
+ * Verify it the sensor can produce a YUV/RGB media bus format and collect
+ * all the processed pixel formats the ISI can generate as supported stream
+ * configurations.
+ */
+StreamConfiguration PipelineHandlerISI::generateYUVConfiguration(Camera *camera,
+ const Size &size)
+{
+ ISICameraData *data = cameraData(camera);
+ PixelFormat pixelFormat = formats::YUYV;
+ unsigned int mbusCode;
+
+ mbusCode = data->getYuvMediaBusFormat(pixelFormat);
+ if (!mbusCode)
+ return {};
+
+ /* Adjust the requested size to the sensor's capabilities. */
+ V4L2SubdeviceFormat sensorFmt;
+ sensorFmt.code = mbusCode;
+ sensorFmt.size = size;
+
+ int ret = data->sensor_->tryFormat(&sensorFmt);
+ if (ret) {
+ LOG(ISI, Error) << "Failed to try sensor format.";
+ return {};
+ }
+
+ Size sensorSize = sensorFmt.size;
+
+ /*
+ * Populate the StreamConfiguration.
+ *
+ * As the sensor supports at least one YUV/RGB media bus format all the
+ * processed ones in formatsMap_ can be generated from it.
+ */
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+
+ for (const auto &[pixFmt, pipeFmt] : ISICameraConfiguration::formatsMap_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
+ continue;
+
+ streamFormats[pixFmt] = { { kMinISISize, sensorSize } };
+ }
+
+ StreamFormats formats(streamFormats);
+
+ StreamConfiguration cfg(formats);
+ cfg.pixelFormat = pixelFormat;
+ cfg.size = sensorSize;
+ cfg.bufferCount = 4;
+
+ return cfg;
+}
+
+/*
+ * Generate a StreamConfiguration for Raw Bayer use case. Verify if the sensor
+ * can produce the requested RAW bayer format and eventually adjust it to
+ * the one with the largest bit-depth the sensor can produce.
+ */
+StreamConfiguration PipelineHandlerISI::generateRawConfiguration(Camera *camera)
+{
+ static const std::map<unsigned int, PixelFormat> rawFormats = {
+ { MEDIA_BUS_FMT_SBGGR8_1X8, formats::SBGGR8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, formats::SGBRG8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, formats::SGRBG8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, formats::SRGGB8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, formats::SBGGR10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, formats::SGBRG10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, formats::SGRBG10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, formats::SRGGB10 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, formats::SBGGR12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, formats::SGBRG12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, formats::SGRBG12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, formats::SRGGB12 },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, formats::SBGGR14 },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, formats::SGBRG14 },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, formats::SGRBG14 },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, formats::SRGGB14 },
+ };
+
+ ISICameraData *data = cameraData(camera);
+ PixelFormat pixelFormat = formats::SBGGR10;
+ unsigned int mbusCode;
+
+ /* pixelFormat will be adjusted, if the sensor can produce RAW. */
+ mbusCode = data->getRawMediaBusFormat(&pixelFormat);
+ if (!mbusCode)
+ return {};
+
+ /*
+ * Populate the StreamConfiguration with all the supported Bayer
+ * formats the sensor can produce.
+ */
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+ const CameraSensor *sensor = data->sensor_.get();
+
+ for (unsigned int code : sensor->mbusCodes()) {
+ /* Find a Bayer media bus code from the sensor. */
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(code);
+ if (!bayerFormat.isValid())
+ continue;
+
+ auto it = rawFormats.find(code);
+ if (it == rawFormats.end()) {
+ LOG(ISI, Warning) << bayerFormat
+ << " not supported in ISI formats map.";
+ continue;
+ }
+
+ streamFormats[it->second] = { { sensor->resolution(), sensor->resolution() } };
+ }
+
+ StreamFormats formats(streamFormats);
+
+ StreamConfiguration cfg(formats);
+ cfg.size = sensor->resolution();
+ cfg.pixelFormat = pixelFormat;
+ cfg.bufferCount = 4;
+
+ return cfg;
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerISI::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
+{
+ ISICameraData *data = cameraData(camera);
+ std::unique_ptr<ISICameraConfiguration> config =
+ std::make_unique<ISICameraConfiguration>(data);
+
+ if (roles.empty())
+ return config;
+
+ if (roles.size() > data->streams_.size()) {
+ LOG(ISI, Error) << "Only up to " << data->streams_.size()
+ << " streams are supported";
+ return nullptr;
+ }
+
+ for (const auto &role : roles) {
+ /*
+ * Prefer the following formats:
+ * - Still Capture: Full resolution YUYV
+ * - ViewFinder/VideoRecording: 1080p YUYV
+ * - RAW: Full resolution Bayer
+ */
+ StreamConfiguration cfg;
+
+ switch (role) {
+ case StreamRole::StillCapture:
+ case StreamRole::Viewfinder:
+ case StreamRole::VideoRecording: {
+ Size size = role == StreamRole::StillCapture
+ ? data->sensor_->resolution()
+ : PipelineHandlerISI::kPreviewSize;
+ cfg = generateYUVConfiguration(camera, size);
+ if (cfg.pixelFormat.isValid())
+ break;
+
+
+ /*
+ * Fallback to use a Bayer format if that's what the
+ * sensor supports.
+ */
+ [[fallthrough]];
+
+ }
+
+ case StreamRole::Raw: {
+ cfg = generateRawConfiguration(camera);
+ break;
+ }
+
+ default:
+ LOG(ISI, Error) << "Requested stream role not supported: " << role;
+ return nullptr;
+ }
+
+ if (!cfg.pixelFormat.isValid()) {
+ LOG(ISI, Error)
+ << "Cannot generate configuration for role: " << role;
+ return nullptr;
+ }
+
+ config->addConfiguration(cfg);
+ }
+
+ config->validate();
+
+ return config;
+}
+
+int PipelineHandlerISI::configure(Camera *camera, CameraConfiguration *c)
+{
+ ISICameraConfiguration *camConfig = static_cast<ISICameraConfiguration *>(c);
+ ISICameraData *data = cameraData(camera);
+
+ /* All links are immutable except the sensor -> csis link. */
+ const MediaPad *sensorSrc = data->sensor_->entity()->getPadByIndex(0);
+ sensorSrc->links()[0]->setEnabled(true);
+
+ /*
+ * Reset the crossbar switch routing and enable one route for each
+ * requested stream configuration.
+ *
+ * \todo Handle concurrent usage of multiple cameras by adjusting the
+ * routing table instead of resetting it.
+ */
+ V4L2Subdevice::Routing routing = {};
+ unsigned int xbarFirstSource = crossbar_->entity()->pads().size() / 2 + 1;
+
+ for (const auto &[idx, config] : utils::enumerate(*c)) {
+ uint32_t sourcePad = xbarFirstSource + idx;
+ routing.emplace_back(V4L2Subdevice::Stream{ data->xbarSink_, 0 },
+ V4L2Subdevice::Stream{ sourcePad, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+ }
+
+ int ret = crossbar_->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ /* Apply format to the sensor and CSIS receiver. */
+ V4L2SubdeviceFormat format = camConfig->sensorFormat_;
+ ret = data->sensor_->setFormat(&format);
+ if (ret)
+ return ret;
+
+ ret = data->csis_->setFormat(0, &format);
+ if (ret)
+ return ret;
+
+ ret = crossbar_->setFormat(data->xbarSink_, &format);
+ if (ret)
+ return ret;
+
+ /* Now configure the ISI and video node instances, one per stream. */
+ data->enabledStreams_.clear();
+ for (const auto &config : *c) {
+ Pipe *pipe = pipeFromStream(camera, config.stream());
+
+ /*
+ * Set the format on the ISI sink pad: it must match what is
+ * received by the CSIS.
+ */
+ ret = pipe->isi->setFormat(0, &format);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure the ISI sink compose rectangle to downscale the
+ * image.
+ *
+ * \todo Additional cropping could be applied on the ISI source
+ * pad to further reduce the output image size.
+ */
+ Rectangle isiScale(config.size);
+ ret = pipe->isi->setSelection(0, V4L2_SEL_TGT_COMPOSE, &isiScale);
+ if (ret)
+ return ret;
+
+ /*
+ * Set the format on ISI source pad: only the media bus code
+ * is relevant as it configures format conversion, while the
+ * size is taken from the sink's COMPOSE (or source's CROP,
+ * if any) rectangles.
+ */
+ unsigned int isiCode = ISICameraConfiguration::formatsMap_.at(config.pixelFormat);
+
+ V4L2SubdeviceFormat isiFormat{};
+ isiFormat.code = isiCode;
+ isiFormat.size = config.size;
+
+ ret = pipe->isi->setFormat(1, &isiFormat);
+ if (ret)
+ return ret;
+
+ V4L2DeviceFormat captureFmt{};
+ captureFmt.fourcc = pipe->capture->toV4L2PixelFormat(config.pixelFormat);
+ captureFmt.size = config.size;
+
+ /* \todo Set stride and format. */
+ ret = pipe->capture->setFormat(&captureFmt);
+ if (ret)
+ return ret;
+
+ /* Store the list of enabled streams for later use. */
+ data->enabledStreams_.push_back(config.stream());
+ }
+
+ return 0;
+}
+
+int PipelineHandlerISI::exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ unsigned int count = stream->configuration().bufferCount;
+ Pipe *pipe = pipeFromStream(camera, stream);
+
+ return pipe->capture->exportBuffers(count, buffers);
+}
+
+int PipelineHandlerISI::start(Camera *camera,
+ [[maybe_unused]] const ControlList *controls)
+{
+ ISICameraData *data = cameraData(camera);
+
+ for (const auto &stream : data->enabledStreams_) {
+ Pipe *pipe = pipeFromStream(camera, stream);
+ const StreamConfiguration &config = stream->configuration();
+
+ int ret = pipe->capture->importBuffers(config.bufferCount);
+ if (ret)
+ return ret;
+
+ ret = pipe->capture->streamOn();
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void PipelineHandlerISI::stopDevice(Camera *camera)
+{
+ ISICameraData *data = cameraData(camera);
+
+ for (const auto &stream : data->enabledStreams_) {
+ Pipe *pipe = pipeFromStream(camera, stream);
+
+ pipe->capture->streamOff();
+ pipe->capture->releaseBuffers();
+ }
+}
+
+int PipelineHandlerISI::queueRequestDevice(Camera *camera, Request *request)
+{
+ for (auto &[stream, buffer] : request->buffers()) {
+ Pipe *pipe = pipeFromStream(camera, stream);
+
+ int ret = pipe->capture->queueBuffer(buffer);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+bool PipelineHandlerISI::match(DeviceEnumerator *enumerator)
+{
+ DeviceMatch dm("mxc-isi");
+ dm.add("crossbar");
+ dm.add("mxc_isi.0");
+ dm.add("mxc_isi.0.capture");
+
+ isiDev_ = acquireMediaDevice(enumerator, dm);
+ if (!isiDev_)
+ return false;
+
+ /*
+ * Acquire the subdevs and video nodes for the crossbar switch and the
+ * processing pipelines.
+ */
+ crossbar_ = V4L2Subdevice::fromEntityName(isiDev_, "crossbar");
+ if (!crossbar_)
+ return false;
+
+ int ret = crossbar_->open();
+ if (ret)
+ return false;
+
+ for (unsigned int i = 0; ; ++i) {
+ std::string entityName = "mxc_isi." + std::to_string(i);
+ std::unique_ptr<V4L2Subdevice> isi =
+ V4L2Subdevice::fromEntityName(isiDev_, entityName);
+ if (!isi)
+ break;
+
+ ret = isi->open();
+ if (ret)
+ return false;
+
+ entityName += ".capture";
+ std::unique_ptr<V4L2VideoDevice> capture =
+ V4L2VideoDevice::fromEntityName(isiDev_, entityName);
+ if (!capture)
+ return false;
+
+ capture->bufferReady.connect(this, &PipelineHandlerISI::bufferReady);
+
+ ret = capture->open();
+ if (ret)
+ return ret;
+
+ pipes_.push_back({ std::move(isi), std::move(capture) });
+ }
+
+ if (pipes_.empty()) {
+ LOG(ISI, Error) << "Unable to enumerate pipes";
+ return false;
+ }
+
+ /*
+ * Loop over all the crossbar switch sink pads to find connected CSI-2
+ * receivers and camera sensors.
+ */
+ unsigned int numCameras = 0;
+ unsigned int numSinks = 0;
+ for (MediaPad *pad : crossbar_->entity()->pads()) {
+ unsigned int sink = numSinks;
+
+ if (!(pad->flags() & MEDIA_PAD_FL_SINK) || pad->links().empty())
+ continue;
+
+ /*
+ * Count each crossbar sink pad to correctly configure
+ * routing and format for this camera.
+ */
+ numSinks++;
+
+ MediaEntity *csi = pad->links()[0]->source()->entity();
+ if (csi->pads().size() != 2) {
+ LOG(ISI, Debug) << "Skip unsupported CSI-2 receiver "
+ << csi->name();
+ continue;
+ }
+
+ pad = csi->pads()[0];
+ if (!(pad->flags() & MEDIA_PAD_FL_SINK) || pad->links().empty())
+ continue;
+
+ MediaEntity *sensor = pad->links()[0]->source()->entity();
+ if (sensor->function() != MEDIA_ENT_F_CAM_SENSOR) {
+ LOG(ISI, Debug) << "Skip unsupported subdevice "
+ << sensor->name();
+ continue;
+ }
+
+ /* Create the camera data. */
+ std::unique_ptr<ISICameraData> data =
+ std::make_unique<ISICameraData>(this);
+
+ data->sensor_ = CameraSensorFactoryBase::create(sensor);
+ data->csis_ = std::make_unique<V4L2Subdevice>(csi);
+ data->xbarSink_ = sink;
+
+ ret = data->init();
+ if (ret) {
+ LOG(ISI, Error) << "Failed to initialize camera data";
+ return false;
+ }
+
+ /* Register the camera. */
+ const std::string &id = data->sensor_->id();
+ std::set<Stream *> streams;
+ std::transform(data->streams_.begin(), data->streams_.end(),
+ std::inserter(streams, streams.end()),
+ [](Stream &s) { return &s; });
+
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(data), id, streams);
+
+ registerCamera(std::move(camera));
+ numCameras++;
+ }
+
+ return numCameras > 0;
+}
+
+PipelineHandlerISI::Pipe *PipelineHandlerISI::pipeFromStream(Camera *camera,
+ const Stream *stream)
+{
+ ISICameraData *data = cameraData(camera);
+ unsigned int pipeIndex = data->pipeIndex(stream);
+
+ ASSERT(pipeIndex < pipes_.size());
+
+ return &pipes_[pipeIndex];
+}
+
+void PipelineHandlerISI::bufferReady(FrameBuffer *buffer)
+{
+ Request *request = buffer->request();
+
+ /* Record the sensor's timestamp in the request metadata. */
+ ControlList &metadata = request->metadata();
+ if (!metadata.contains(controls::SensorTimestamp.id()))
+ metadata.set(controls::SensorTimestamp,
+ buffer->metadata().timestamp);
+
+ completeBuffer(request, buffer);
+ if (request->hasPendingBuffers())
+ return;
+
+ completeRequest(request);
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerISI, "imx8-isi")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/imx8-isi/meson.build b/src/libcamera/pipeline/imx8-isi/meson.build
new file mode 100644
index 00000000..b369b031
--- /dev/null
+++ b/src/libcamera/pipeline/imx8-isi/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'imx8-isi.cpp'
+])
diff --git a/src/libcamera/pipeline/ipu3/cio2.cpp b/src/libcamera/pipeline/ipu3/cio2.cpp
new file mode 100644
index 00000000..aa544d7b
--- /dev/null
+++ b/src/libcamera/pipeline/ipu3/cio2.cpp
@@ -0,0 +1,431 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Intel IPU3 CIO2
+ */
+
+#include "cio2.h"
+
+#include <cmath>
+#include <limits>
+
+#include <linux/media-bus-format.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+#include <libcamera/transform.h>
+
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(IPU3)
+
+namespace {
+
+const std::map<uint32_t, PixelFormat> mbusCodesToPixelFormat = {
+ { MEDIA_BUS_FMT_SBGGR10_1X10, formats::SBGGR10_IPU3 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, formats::SGBRG10_IPU3 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, formats::SGRBG10_IPU3 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, formats::SRGGB10_IPU3 },
+};
+
+} /* namespace */
+
+CIO2Device::CIO2Device()
+{
+}
+
+/**
+ * \brief Retrieve the list of supported PixelFormats
+ *
+ * Retrieve the list of supported pixel formats by matching the sensor produced
+ * media bus codes with the formats supported by the CIO2 unit.
+ *
+ * \return The list of supported PixelFormat
+ */
+std::vector<PixelFormat> CIO2Device::formats() const
+{
+ if (!sensor_)
+ return {};
+
+ std::vector<PixelFormat> formats;
+ for (unsigned int code : sensor_->mbusCodes()) {
+ auto it = mbusCodesToPixelFormat.find(code);
+ if (it != mbusCodesToPixelFormat.end())
+ formats.push_back(it->second);
+ }
+
+ return formats;
+}
+
+/**
+ * \brief Retrieve the list of supported size ranges
+ * \param[in] format The pixel format
+ *
+ * Retrieve the list of supported sizes for a particular \a format by matching
+ * the sensor produced media bus codes formats supported by the CIO2 unit.
+ *
+ * \return A list of supported sizes for the \a format or an empty list
+ * otherwise
+ */
+std::vector<SizeRange> CIO2Device::sizes(const PixelFormat &format) const
+{
+ int mbusCode = -1;
+
+ if (!sensor_)
+ return {};
+
+ std::vector<SizeRange> sizes;
+ for (const auto &iter : mbusCodesToPixelFormat) {
+ if (iter.second != format)
+ continue;
+
+ mbusCode = iter.first;
+ break;
+ }
+
+ if (mbusCode == -1)
+ return {};
+
+ for (const Size &sz : sensor_->sizes(mbusCode))
+ sizes.emplace_back(sz);
+
+ return sizes;
+}
+
+/**
+ * \brief Initialize components of the CIO2 device with \a index
+ * \param[in] media The CIO2 media device
+ * \param[in] index The CIO2 device index
+ *
+ * Create and open the video device and subdevices in the CIO2 instance at \a
+ * index, if a supported image sensor is connected to the CSI-2 receiver of
+ * this CIO2 instance. Enable the media links connecting the CIO2 components
+ * to prepare for capture operations and cached the sensor maximum size.
+ *
+ * \return 0 on success or a negative error code otherwise
+ * \retval -ENODEV No supported image sensor is connected to this CIO2 instance
+ */
+int CIO2Device::init(const MediaDevice *media, unsigned int index)
+{
+ int ret;
+
+ /*
+ * Verify that a sensor subdevice is connected to this CIO2 instance
+ * and enable the media link between the two.
+ */
+ std::string csi2Name = "ipu3-csi2 " + std::to_string(index);
+ MediaEntity *csi2Entity = media->getEntityByName(csi2Name);
+ const std::vector<MediaPad *> &pads = csi2Entity->pads();
+ if (pads.empty())
+ return -ENODEV;
+
+ /* IPU3 CSI-2 receivers have a single sink pad at index 0. */
+ MediaPad *sink = pads[0];
+ const std::vector<MediaLink *> &links = sink->links();
+ if (links.empty())
+ return -ENODEV;
+
+ MediaLink *link = links[0];
+ MediaEntity *sensorEntity = link->source()->entity();
+ sensor_ = CameraSensorFactoryBase::create(sensorEntity);
+ if (!sensor_)
+ return -ENODEV;
+
+ ret = link->setEnabled(true);
+ if (ret)
+ return ret;
+
+ /*
+ * Make sure the sensor produces at least one format compatible with
+ * the CIO2 requirements.
+ *
+ * utils::set_overlap requires the ranges to be sorted, keep the
+ * cio2Codes vector sorted in ascending order.
+ */
+ std::vector<unsigned int> cio2Codes = utils::map_keys(mbusCodesToPixelFormat);
+ const std::vector<unsigned int> &sensorCodes = sensor_->mbusCodes();
+ if (!utils::set_overlap(sensorCodes.begin(), sensorCodes.end(),
+ cio2Codes.begin(), cio2Codes.end())) {
+ LOG(IPU3, Error)
+ << "Sensor " << sensor_->entity()->name()
+ << " has not format compatible with the IPU3";
+ return -EINVAL;
+ }
+
+ /*
+ * \todo Define when to open and close video device nodes, as they
+ * might impact on power consumption.
+ */
+
+ csi2_ = std::make_unique<V4L2Subdevice>(csi2Entity);
+ ret = csi2_->open();
+ if (ret)
+ return ret;
+
+ std::string cio2Name = "ipu3-cio2 " + std::to_string(index);
+ output_ = V4L2VideoDevice::fromEntityName(media, cio2Name);
+ return output_->open();
+}
+
+/**
+ * \brief Configure the CIO2 unit
+ * \param[in] size The requested CIO2 output frame size
+ * \param[in] transform The transformation to be applied on the image sensor
+ * \param[out] outputFormat The CIO2 unit output image format
+ * \return 0 on success or a negative error code otherwise
+ */
+int CIO2Device::configure(const Size &size, const Transform &transform,
+ V4L2DeviceFormat *outputFormat)
+{
+ V4L2SubdeviceFormat sensorFormat;
+ int ret;
+
+ /*
+ * Apply the selected format to the sensor, the CSI-2 receiver and
+ * the CIO2 output device.
+ */
+ std::vector<unsigned int> mbusCodes = utils::map_keys(mbusCodesToPixelFormat);
+ sensorFormat = getSensorFormat(mbusCodes, size);
+ ret = sensor_->setFormat(&sensorFormat, transform);
+ if (ret)
+ return ret;
+
+ ret = csi2_->setFormat(0, &sensorFormat);
+ if (ret)
+ return ret;
+
+ const auto &itInfo = mbusCodesToPixelFormat.find(sensorFormat.code);
+ if (itInfo == mbusCodesToPixelFormat.end())
+ return -EINVAL;
+
+ outputFormat->fourcc = output_->toV4L2PixelFormat(itInfo->second);
+ outputFormat->size = sensorFormat.size;
+ outputFormat->planesCount = 1;
+
+ ret = output_->setFormat(outputFormat);
+ if (ret)
+ return ret;
+
+ LOG(IPU3, Debug) << "CIO2 output format " << *outputFormat;
+
+ return 0;
+}
+
+StreamConfiguration CIO2Device::generateConfiguration(Size size) const
+{
+ StreamConfiguration cfg;
+
+ /* If no desired size use the sensor resolution. */
+ if (size.isNull())
+ size = sensor_->resolution();
+
+ /* Query the sensor static information for closest match. */
+ std::vector<unsigned int> mbusCodes = utils::map_keys(mbusCodesToPixelFormat);
+ V4L2SubdeviceFormat sensorFormat = getSensorFormat(mbusCodes, size);
+ if (!sensorFormat.code) {
+ LOG(IPU3, Error) << "Sensor does not support mbus code";
+ return {};
+ }
+
+ cfg.size = sensorFormat.size;
+ cfg.pixelFormat = mbusCodesToPixelFormat.at(sensorFormat.code);
+ cfg.bufferCount = kBufferCount;
+
+ return cfg;
+}
+
+/**
+ * \brief Retrieve the best sensor format for a desired output
+ * \param[in] mbusCodes The list of acceptable media bus codes
+ * \param[in] size The desired size
+ *
+ * Media bus codes are selected from \a mbusCodes, which lists all acceptable
+ * codes in decreasing order of preference. Media bus codes supported by the
+ * sensor but not listed in \a mbusCodes are ignored. If none of the desired
+ * codes is supported, it returns an error.
+ *
+ * \a size indicates the desired size at the output of the sensor. This method
+ * selects the best media bus code and size supported by the sensor according
+ * to the following criteria.
+ *
+ * - The desired \a size shall fit in the sensor output size to avoid the need
+ * to up-scale.
+ * - The aspect ratio of sensor output size shall be as close as possible to
+ * the sensor's native resolution field of view.
+ * - The sensor output size shall be as small as possible to lower the required
+ * bandwidth.
+ * - The desired \a size shall be supported by one of the media bus code listed
+ * in \a mbusCodes.
+ *
+ * When multiple media bus codes can produce the same size, the code at the
+ * lowest position in \a mbusCodes is selected.
+ *
+ * The returned sensor output format is guaranteed to be acceptable by the
+ * setFormat() method without any modification.
+ *
+ * \return The best sensor output format matching the desired media bus codes
+ * and size on success, or an empty format otherwise.
+ */
+V4L2SubdeviceFormat CIO2Device::getSensorFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size) const
+{
+ unsigned int desiredArea = size.width * size.height;
+ unsigned int bestArea = std::numeric_limits<unsigned int>::max();
+ const Size &resolution = sensor_->resolution();
+ float desiredRatio = static_cast<float>(resolution.width) /
+ resolution.height;
+ float bestRatio = std::numeric_limits<float>::max();
+ Size bestSize;
+ uint32_t bestCode = 0;
+
+ for (unsigned int code : mbusCodes) {
+ const auto sizes = sensor_->sizes(code);
+ if (!sizes.size())
+ continue;
+
+ for (const Size &sz : sizes) {
+ if (sz.width < size.width || sz.height < size.height)
+ continue;
+
+ float ratio = static_cast<float>(sz.width) / sz.height;
+ /*
+ * Ratios can differ by small mantissa difference which
+ * can affect the selection of the sensor output size
+ * wildly. We are interested in selection of the closest
+ * size with respect to the desired output size, hence
+ * comparing it with a single precision digit is enough.
+ */
+ ratio = static_cast<unsigned int>(ratio * 10) / 10.0;
+ float ratioDiff = std::abs(ratio - desiredRatio);
+ unsigned int area = sz.width * sz.height;
+ unsigned int areaDiff = area - desiredArea;
+
+ if (ratioDiff > bestRatio)
+ continue;
+
+ if (ratioDiff < bestRatio || areaDiff < bestArea) {
+ bestRatio = ratioDiff;
+ bestArea = areaDiff;
+ bestSize = sz;
+ bestCode = code;
+ }
+ }
+ }
+
+ if (bestSize.isNull()) {
+ LOG(IPU3, Debug) << "No supported format or size found";
+ return {};
+ }
+
+ V4L2SubdeviceFormat format{};
+ format.code = bestCode;
+ format.size = bestSize;
+
+ return format;
+}
+
+int CIO2Device::exportBuffers(unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ return output_->exportBuffers(count, buffers);
+}
+
+int CIO2Device::start()
+{
+ int ret = output_->exportBuffers(kBufferCount, &buffers_);
+ if (ret < 0)
+ return ret;
+
+ ret = output_->importBuffers(kBufferCount);
+ if (ret)
+ LOG(IPU3, Error) << "Failed to import CIO2 buffers";
+
+ for (std::unique_ptr<FrameBuffer> &buffer : buffers_)
+ availableBuffers_.push(buffer.get());
+
+ ret = output_->streamOn();
+ if (ret) {
+ freeBuffers();
+ return ret;
+ }
+
+ ret = csi2_->setFrameStartEnabled(true);
+ if (ret) {
+ stop();
+ return ret;
+ }
+
+ return 0;
+}
+
+int CIO2Device::stop()
+{
+ int ret;
+
+ csi2_->setFrameStartEnabled(false);
+
+ ret = output_->streamOff();
+
+ freeBuffers();
+
+ return ret;
+}
+
+FrameBuffer *CIO2Device::queueBuffer(Request *request, FrameBuffer *rawBuffer)
+{
+ FrameBuffer *buffer = rawBuffer;
+
+ /* If no buffer is provided in the request, use an internal one. */
+ if (!buffer) {
+ if (availableBuffers_.empty()) {
+ LOG(IPU3, Debug) << "CIO2 buffer underrun";
+ return nullptr;
+ }
+
+ buffer = availableBuffers_.front();
+ availableBuffers_.pop();
+ buffer->_d()->setRequest(request);
+ }
+
+ int ret = output_->queueBuffer(buffer);
+ if (ret)
+ return nullptr;
+
+ return buffer;
+}
+
+void CIO2Device::tryReturnBuffer(FrameBuffer *buffer)
+{
+ /*
+ * \todo Once more pipelines deal with buffers that may be allocated
+ * internally or externally this pattern might become a common need. At
+ * that point this check should be moved to something clever in
+ * FrameBuffer.
+ */
+ for (const std::unique_ptr<FrameBuffer> &buf : buffers_) {
+ if (buf.get() == buffer) {
+ availableBuffers_.push(buffer);
+ break;
+ }
+ }
+
+ bufferAvailable.emit();
+}
+
+void CIO2Device::freeBuffers()
+{
+ availableBuffers_ = {};
+ buffers_.clear();
+
+ if (output_->releaseBuffers())
+ LOG(IPU3, Error) << "Failed to release CIO2 buffers";
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/ipu3/cio2.h b/src/libcamera/pipeline/ipu3/cio2.h
new file mode 100644
index 00000000..963c2f6b
--- /dev/null
+++ b/src/libcamera/pipeline/ipu3/cio2.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Intel IPU3 CIO2
+ */
+
+#pragma once
+
+#include <memory>
+#include <queue>
+#include <vector>
+
+#include <libcamera/base/signal.h>
+
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace libcamera {
+
+class CameraSensor;
+class FrameBuffer;
+class MediaDevice;
+class PixelFormat;
+class Request;
+class Size;
+class SizeRange;
+struct StreamConfiguration;
+enum class Transform;
+
+class CIO2Device
+{
+public:
+ static constexpr unsigned int kBufferCount = 4;
+
+ CIO2Device();
+
+ std::vector<PixelFormat> formats() const;
+ std::vector<SizeRange> sizes(const PixelFormat &format) const;
+
+ int init(const MediaDevice *media, unsigned int index);
+ int configure(const Size &size, const Transform &transform,
+ V4L2DeviceFormat *outputFormat);
+
+ StreamConfiguration generateConfiguration(Size size) const;
+
+ int exportBuffers(unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers);
+
+ V4L2SubdeviceFormat getSensorFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size) const;
+
+ int start();
+ int stop();
+
+ CameraSensor *sensor() { return sensor_.get(); }
+ const CameraSensor *sensor() const { return sensor_.get(); }
+
+ FrameBuffer *queueBuffer(Request *request, FrameBuffer *rawBuffer);
+ void tryReturnBuffer(FrameBuffer *buffer);
+ Signal<FrameBuffer *> &bufferReady() { return output_->bufferReady; }
+ Signal<uint32_t> &frameStart() { return csi2_->frameStart; }
+
+ Signal<> bufferAvailable;
+
+private:
+ void freeBuffers();
+
+ void cio2BufferReady(FrameBuffer *buffer);
+
+ std::unique_ptr<CameraSensor> sensor_;
+ std::unique_ptr<V4L2Subdevice> csi2_;
+ std::unique_ptr<V4L2VideoDevice> output_;
+
+ std::vector<std::unique_ptr<FrameBuffer>> buffers_;
+ std::queue<FrameBuffer *> availableBuffers_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/ipu3/frames.cpp b/src/libcamera/pipeline/ipu3/frames.cpp
new file mode 100644
index 00000000..bc0526a7
--- /dev/null
+++ b/src/libcamera/pipeline/ipu3/frames.cpp
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Intel IPU3 Frames helper
+ */
+
+#include "frames.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/request.h>
+
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/pipeline_handler.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(IPU3)
+
+IPU3Frames::IPU3Frames()
+{
+}
+
+void IPU3Frames::init(const std::vector<std::unique_ptr<FrameBuffer>> &paramBuffers,
+ const std::vector<std::unique_ptr<FrameBuffer>> &statBuffers)
+{
+ for (const std::unique_ptr<FrameBuffer> &buffer : paramBuffers)
+ availableParamBuffers_.push(buffer.get());
+
+ for (const std::unique_ptr<FrameBuffer> &buffer : statBuffers)
+ availableStatBuffers_.push(buffer.get());
+
+ frameInfo_.clear();
+}
+
+void IPU3Frames::clear()
+{
+ availableParamBuffers_ = {};
+ availableStatBuffers_ = {};
+}
+
+IPU3Frames::Info *IPU3Frames::create(Request *request)
+{
+ unsigned int id = request->sequence();
+
+ if (availableParamBuffers_.empty()) {
+ LOG(IPU3, Debug) << "Parameters buffer underrun";
+ return nullptr;
+ }
+
+ if (availableStatBuffers_.empty()) {
+ LOG(IPU3, Debug) << "Statistics buffer underrun";
+ return nullptr;
+ }
+
+ FrameBuffer *paramBuffer = availableParamBuffers_.front();
+ FrameBuffer *statBuffer = availableStatBuffers_.front();
+
+ paramBuffer->_d()->setRequest(request);
+ statBuffer->_d()->setRequest(request);
+
+ availableParamBuffers_.pop();
+ availableStatBuffers_.pop();
+
+ /* \todo Remove the dynamic allocation of Info */
+ std::unique_ptr<Info> info = std::make_unique<Info>();
+
+ info->id = id;
+ info->request = request;
+ info->rawBuffer = nullptr;
+ info->paramBuffer = paramBuffer;
+ info->statBuffer = statBuffer;
+ info->paramDequeued = false;
+ info->metadataProcessed = false;
+
+ frameInfo_[id] = std::move(info);
+
+ return frameInfo_[id].get();
+}
+
+void IPU3Frames::remove(IPU3Frames::Info *info)
+{
+ /* Return params and stat buffer for reuse. */
+ availableParamBuffers_.push(info->paramBuffer);
+ availableStatBuffers_.push(info->statBuffer);
+
+ /* Delete the extended frame information. */
+ frameInfo_.erase(info->id);
+}
+
+bool IPU3Frames::tryComplete(IPU3Frames::Info *info)
+{
+ Request *request = info->request;
+
+ if (request->hasPendingBuffers())
+ return false;
+
+ if (!info->metadataProcessed)
+ return false;
+
+ if (!info->paramDequeued)
+ return false;
+
+ remove(info);
+
+ bufferAvailable.emit();
+
+ return true;
+}
+
+IPU3Frames::Info *IPU3Frames::find(unsigned int id)
+{
+ const auto &itInfo = frameInfo_.find(id);
+
+ if (itInfo != frameInfo_.end())
+ return itInfo->second.get();
+
+ LOG(IPU3, Fatal) << "Can't find tracking information for frame " << id;
+
+ return nullptr;
+}
+
+IPU3Frames::Info *IPU3Frames::find(FrameBuffer *buffer)
+{
+ for (auto const &itInfo : frameInfo_) {
+ Info *info = itInfo.second.get();
+
+ for (auto const itBuffers : info->request->buffers())
+ if (itBuffers.second == buffer)
+ return info;
+
+ if (info->rawBuffer == buffer || info->paramBuffer == buffer ||
+ info->statBuffer == buffer)
+ return info;
+ }
+
+ LOG(IPU3, Fatal) << "Can't find tracking information from buffer";
+
+ return nullptr;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/ipu3/frames.h b/src/libcamera/pipeline/ipu3/frames.h
new file mode 100644
index 00000000..a347b66f
--- /dev/null
+++ b/src/libcamera/pipeline/ipu3/frames.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Intel IPU3 Frames helper
+ */
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <queue>
+#include <vector>
+
+#include <libcamera/base/signal.h>
+
+#include <libcamera/controls.h>
+
+namespace libcamera {
+
+class FrameBuffer;
+class IPAProxy;
+class PipelineHandler;
+class Request;
+class V4L2VideoDevice;
+struct IPABuffer;
+
+class IPU3Frames
+{
+public:
+ struct Info {
+ unsigned int id;
+ Request *request;
+
+ FrameBuffer *rawBuffer;
+ FrameBuffer *paramBuffer;
+ FrameBuffer *statBuffer;
+
+ ControlList effectiveSensorControls;
+
+ bool paramDequeued;
+ bool metadataProcessed;
+ };
+
+ IPU3Frames();
+
+ void init(const std::vector<std::unique_ptr<FrameBuffer>> &paramBuffers,
+ const std::vector<std::unique_ptr<FrameBuffer>> &statBuffers);
+ void clear();
+
+ Info *create(Request *request);
+ void remove(Info *info);
+ bool tryComplete(Info *info);
+
+ Info *find(unsigned int id);
+ Info *find(FrameBuffer *buffer);
+
+ Signal<> bufferAvailable;
+
+private:
+ std::queue<FrameBuffer *> availableParamBuffers_;
+ std::queue<FrameBuffer *> availableStatBuffers_;
+
+ std::map<unsigned int, std::unique_ptr<Info>> frameInfo_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/ipu3/imgu.cpp b/src/libcamera/pipeline/ipu3/imgu.cpp
new file mode 100644
index 00000000..7be78091
--- /dev/null
+++ b/src/libcamera/pipeline/ipu3/imgu.cpp
@@ -0,0 +1,767 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Intel IPU3 ImgU
+ */
+
+#include "imgu.h"
+
+#include <algorithm>
+#include <cmath>
+#include <limits>
+
+#include <linux/media-bus-format.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/media_device.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(IPU3)
+
+namespace {
+
+/*
+ * The procedure to calculate the ImgU pipe configuration has been ported
+ * from the pipe_config.py python script, available at:
+ * https://github.com/intel/intel-ipu3-pipecfg
+ * at revision: 243d13446e44 ("Fix some bug for some resolutions")
+ */
+
+/* BSD scaling factors: min=1, max=2.5, step=1/32 */
+const std::vector<float> bdsScalingFactors = {
+ 1, 1.03125, 1.0625, 1.09375, 1.125, 1.15625, 1.1875, 1.21875, 1.25,
+ 1.28125, 1.3125, 1.34375, 1.375, 1.40625, 1.4375, 1.46875, 1.5, 1.53125,
+ 1.5625, 1.59375, 1.625, 1.65625, 1.6875, 1.71875, 1.75, 1.78125, 1.8125,
+ 1.84375, 1.875, 1.90625, 1.9375, 1.96875, 2, 2.03125, 2.0625, 2.09375,
+ 2.125, 2.15625, 2.1875, 2.21875, 2.25, 2.28125, 2.3125, 2.34375, 2.375,
+ 2.40625, 2.4375, 2.46875, 2.5
+};
+
+/* GDC scaling factors: min=1, max=16, step=1/4 */
+const std::vector<float> gdcScalingFactors = {
+ 1, 1.25, 1.5, 1.75, 2, 2.25, 2.5, 2.75, 3, 3.25, 3.5, 3.75, 4, 4.25,
+ 4.5, 4.75, 5, 5.25, 5.5, 5.75, 6, 6.25, 6.5, 6.75, 7, 7.25, 7.5, 7.75,
+ 8, 8.25, 8.5, 8.75, 9, 9.25, 9.5, 9.75, 10, 10.25, 10.5, 10.75, 11,
+ 11.25, 11.5, 11.75, 12, 12.25, 12.5, 12.75, 13, 13.25, 13.5, 13.75, 14,
+ 14.25, 14.5, 14.75, 15, 15.25, 15.5, 15.75, 16,
+};
+
+std::vector<ImgUDevice::PipeConfig> pipeConfigs;
+
+struct FOV {
+ float w;
+ float h;
+
+ bool isLarger(const FOV &other)
+ {
+ if (w > other.w)
+ return true;
+ if (w == other.w && h > other.h)
+ return true;
+ return false;
+ }
+};
+
+/* Approximate a scaling factor sf to the closest one available in a range. */
+float findScaleFactor(float sf, const std::vector<float> &range,
+ bool roundDown = false)
+{
+ if (sf <= range[0])
+ return range[0];
+ if (sf >= range[range.size() - 1])
+ return range[range.size() - 1];
+
+ float bestDiff = std::numeric_limits<float>::max();
+ unsigned int index = 0;
+ for (unsigned int i = 0; i < range.size(); ++i) {
+ float diff = utils::abs_diff(sf, range[i]);
+ if (diff < bestDiff) {
+ bestDiff = diff;
+ index = i;
+ }
+ }
+
+ if (roundDown && index > 0 && sf < range[index])
+ index--;
+
+ return range[index];
+}
+
+bool isSameRatio(const Size &in, const Size &out)
+{
+ float inRatio = static_cast<float>(in.width) / in.height;
+ float outRatio = static_cast<float>(out.width) / out.height;
+
+ if (utils::abs_diff(inRatio, outRatio) > 0.1)
+ return false;
+
+ return true;
+}
+
+void calculateBDSHeight(ImgUDevice::Pipe *pipe, const Size &iif, const Size &gdc,
+ unsigned int bdsWidth, float bdsSF)
+{
+ unsigned int minIFHeight = iif.height - ImgUDevice::kIFMaxCropHeight;
+ unsigned int minBDSHeight = gdc.height + ImgUDevice::kFilterHeight * 2;
+ unsigned int ifHeight;
+ float bdsHeight;
+
+ if (!isSameRatio(pipe->input, gdc)) {
+ unsigned int foundIfHeight = 0;
+ float estIFHeight = (iif.width * gdc.height) /
+ static_cast<float>(gdc.width);
+ estIFHeight = std::clamp<float>(estIFHeight, minIFHeight, iif.height);
+
+ ifHeight = utils::alignUp(estIFHeight, ImgUDevice::kIFAlignHeight);
+ while (ifHeight >= minIFHeight && ifHeight <= iif.height &&
+ ifHeight / bdsSF >= minBDSHeight) {
+
+ float height = ifHeight / bdsSF;
+ if (std::fmod(height, 1.0) == 0) {
+ unsigned int bdsIntHeight = static_cast<unsigned int>(height);
+
+ if (!(bdsIntHeight % ImgUDevice::kBDSAlignHeight)) {
+ foundIfHeight = ifHeight;
+ bdsHeight = height;
+ break;
+ }
+ }
+
+ ifHeight -= ImgUDevice::kIFAlignHeight;
+ }
+
+ ifHeight = utils::alignUp(estIFHeight, ImgUDevice::kIFAlignHeight);
+ while (ifHeight >= minIFHeight && ifHeight <= iif.height &&
+ ifHeight / bdsSF >= minBDSHeight) {
+
+ float height = ifHeight / bdsSF;
+ if (std::fmod(height, 1.0) == 0) {
+ unsigned int bdsIntHeight = static_cast<unsigned int>(height);
+
+ if (!(bdsIntHeight % ImgUDevice::kBDSAlignHeight)) {
+ foundIfHeight = ifHeight;
+ bdsHeight = height;
+ break;
+ }
+ }
+
+ ifHeight += ImgUDevice::kIFAlignHeight;
+ }
+
+ if (foundIfHeight) {
+ unsigned int bdsIntHeight = static_cast<unsigned int>(bdsHeight);
+
+ pipeConfigs.push_back({ bdsSF, { iif.width, foundIfHeight },
+ { bdsWidth, bdsIntHeight }, gdc });
+ return;
+ }
+ } else {
+ ifHeight = utils::alignUp(iif.height, ImgUDevice::kIFAlignHeight);
+ while (ifHeight >= minIFHeight && ifHeight / bdsSF >= minBDSHeight) {
+
+ bdsHeight = ifHeight / bdsSF;
+ if (std::fmod(ifHeight, 1.0) == 0 && std::fmod(bdsHeight, 1.0) == 0) {
+ unsigned int bdsIntHeight = static_cast<unsigned int>(bdsHeight);
+
+ if (!(ifHeight % ImgUDevice::kIFAlignHeight) &&
+ !(bdsIntHeight % ImgUDevice::kBDSAlignHeight)) {
+ pipeConfigs.push_back({ bdsSF, { iif.width, ifHeight },
+ { bdsWidth, bdsIntHeight }, gdc });
+ }
+ }
+
+ ifHeight -= ImgUDevice::kIFAlignHeight;
+ }
+ }
+}
+
+void calculateBDS(ImgUDevice::Pipe *pipe, const Size &iif, const Size &gdc, float bdsSF)
+{
+ unsigned int minBDSWidth = gdc.width + ImgUDevice::kFilterWidth * 2;
+ unsigned int minBDSHeight = gdc.height + ImgUDevice::kFilterHeight * 2;
+
+ float sf = bdsSF;
+ while (sf <= ImgUDevice::kBDSSfMax && sf >= ImgUDevice::kBDSSfMin) {
+ float bdsWidth = static_cast<float>(iif.width) / sf;
+ float bdsHeight = static_cast<float>(iif.height) / sf;
+
+ if (std::fmod(bdsWidth, 1.0) == 0 &&
+ std::fmod(bdsHeight, 1.0) == 0) {
+ unsigned int bdsIntWidth = static_cast<unsigned int>(bdsWidth);
+ unsigned int bdsIntHeight = static_cast<unsigned int>(bdsHeight);
+ if (!(bdsIntWidth % ImgUDevice::kBDSAlignWidth) && bdsWidth >= minBDSWidth &&
+ !(bdsIntHeight % ImgUDevice::kBDSAlignHeight) && bdsHeight >= minBDSHeight)
+ calculateBDSHeight(pipe, iif, gdc, bdsIntWidth, sf);
+ }
+
+ sf += ImgUDevice::kBDSSfStep;
+ }
+
+ sf = bdsSF;
+ while (sf <= ImgUDevice::kBDSSfMax && sf >= ImgUDevice::kBDSSfMin) {
+ float bdsWidth = static_cast<float>(iif.width) / sf;
+ float bdsHeight = static_cast<float>(iif.height) / sf;
+
+ if (std::fmod(bdsWidth, 1.0) == 0 &&
+ std::fmod(bdsHeight, 1.0) == 0) {
+ unsigned int bdsIntWidth = static_cast<unsigned int>(bdsWidth);
+ unsigned int bdsIntHeight = static_cast<unsigned int>(bdsHeight);
+ if (!(bdsIntWidth % ImgUDevice::kBDSAlignWidth) && bdsWidth >= minBDSWidth &&
+ !(bdsIntHeight % ImgUDevice::kBDSAlignHeight) && bdsHeight >= minBDSHeight)
+ calculateBDSHeight(pipe, iif, gdc, bdsIntWidth, sf);
+ }
+
+ sf -= ImgUDevice::kBDSSfStep;
+ }
+}
+
+Size calculateGDC(ImgUDevice::Pipe *pipe)
+{
+ const Size &in = pipe->input;
+ const Size &main = pipe->main;
+ const Size &vf = pipe->viewfinder;
+ Size gdc;
+
+ if (!vf.isNull()) {
+ gdc.width = main.width;
+
+ float ratio = (main.width * vf.height) / static_cast<float>(vf.width);
+ gdc.height = std::max(static_cast<float>(main.height), ratio);
+
+ return gdc;
+ }
+
+ if (!isSameRatio(in, main)) {
+ gdc = main;
+ return gdc;
+ }
+
+ float totalSF = static_cast<float>(in.width) / main.width;
+ float bdsSF = totalSF > 2 ? 2 : 1;
+ float yuvSF = totalSF / bdsSF;
+ float sf = findScaleFactor(yuvSF, gdcScalingFactors);
+
+ gdc.width = main.width * sf;
+ gdc.height = main.height * sf;
+
+ return gdc;
+}
+
+FOV calcFOV(const Size &in, const ImgUDevice::PipeConfig &pipe)
+{
+ FOV fov{};
+
+ float inW = static_cast<float>(in.width);
+ float inH = static_cast<float>(in.height);
+ float ifCropW = static_cast<float>(in.width - pipe.iif.width);
+ float ifCropH = static_cast<float>(in.height - pipe.iif.height);
+ float gdcCropW = static_cast<float>(pipe.bds.width - pipe.gdc.width) * pipe.bds_sf;
+ float gdcCropH = static_cast<float>(pipe.bds.height - pipe.gdc.height) * pipe.bds_sf;
+
+ fov.w = (inW - (ifCropW + gdcCropW)) / inW;
+ fov.h = (inH - (ifCropH + gdcCropH)) / inH;
+
+ return fov;
+}
+
+} /* namespace */
+
+/**
+ * \struct PipeConfig
+ * \brief The ImgU pipe configuration parameters
+ *
+ * The ImgU image pipeline is composed of several hardware blocks that crop
+ * and scale the input image to obtain the desired output sizes. The
+ * scaling/cropping operations of those components is configured though the
+ * V4L2 selection API and the V4L2 subdev API applied to the ImgU media entity.
+ *
+ * The configurable components in the pipeline are:
+ * - IF: image feeder
+ * - BDS: bayer downscaler
+ * - GDC: geometric distorsion correction
+ *
+ * The IF crop rectangle is controlled by the V4L2_SEL_TGT_CROP selection target
+ * applied to the ImgU media entity sink pad number 0. The BDS scaler is
+ * controlled by the V4L2_SEL_TGT_COMPOSE target on the same pad, while the GDC
+ * output size is configured with the VIDIOC_SUBDEV_S_FMT IOCTL, again on pad
+ * number 0.
+ *
+ * The PipeConfig structure collects the sizes of each of those components
+ * plus the BDS scaling factor used to calculate the field of view
+ * of the final images.
+ */
+
+/**
+ * \struct Pipe
+ * \brief Describe the ImgU requested configuration
+ *
+ * The ImgU unit processes images through several components, which have
+ * to be properly configured inspecting the input image size and the desired
+ * output sizes. This structure collects the ImgU input configuration and the
+ * requested main output and viewfinder configurations.
+ *
+ * \var Pipe::input
+ * \brief The input image size
+ *
+ * \var Pipe::main
+ * \brief The requested main output size
+ *
+ * \var Pipe::viewfinder
+ * \brief The requested viewfinder output size
+ */
+
+/**
+ * \brief Initialize components of the ImgU instance
+ * \param[in] mediaDevice The ImgU instance media device
+ * \param[in] index The ImgU instance index
+ *
+ * Create and open the V4L2 devices and subdevices of the ImgU instance
+ * with \a index.
+ *
+ * In case of errors the created V4L2VideoDevice and V4L2Subdevice instances
+ * are destroyed at pipeline handler delete time.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int ImgUDevice::init(MediaDevice *media, unsigned int index)
+{
+ int ret;
+
+ name_ = "ipu3-imgu " + std::to_string(index);
+ media_ = media;
+
+ /*
+ * The media entities presence in the media device has been verified
+ * by the match() function: no need to check for newly created
+ * video devices and subdevice validity here.
+ */
+ imgu_ = V4L2Subdevice::fromEntityName(media, name_);
+ ret = imgu_->open();
+ if (ret)
+ return ret;
+
+ input_ = V4L2VideoDevice::fromEntityName(media, name_ + " input");
+ ret = input_->open();
+ if (ret)
+ return ret;
+
+ output_ = V4L2VideoDevice::fromEntityName(media, name_ + " output");
+ ret = output_->open();
+ if (ret)
+ return ret;
+
+ viewfinder_ = V4L2VideoDevice::fromEntityName(media, name_ + " viewfinder");
+ ret = viewfinder_->open();
+ if (ret)
+ return ret;
+
+ param_ = V4L2VideoDevice::fromEntityName(media, name_ + " parameters");
+ ret = param_->open();
+ if (ret)
+ return ret;
+
+ stat_ = V4L2VideoDevice::fromEntityName(media, name_ + " 3a stat");
+ ret = stat_->open();
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * \brief Calculate the ImgU pipe configuration parameters
+ * \param[in] pipe The requested ImgU configuration
+ * \return An ImgUDevice::PipeConfig instance on success, an empty configuration
+ * otherwise
+ */
+ImgUDevice::PipeConfig ImgUDevice::calculatePipeConfig(Pipe *pipe)
+{
+ pipeConfigs.clear();
+
+ LOG(IPU3, Debug) << "Calculating pipe configuration for: ";
+ LOG(IPU3, Debug) << "input: " << pipe->input;
+ LOG(IPU3, Debug) << "main: " << pipe->main;
+ LOG(IPU3, Debug) << "vf: " << pipe->viewfinder;
+
+ const Size &in = pipe->input;
+
+ /*
+ * \todo Filter out all resolutions < IF_CROP_MAX.
+ * See https://bugs.libcamera.org/show_bug.cgi?id=32
+ */
+ if (in.width < ImgUDevice::kIFMaxCropWidth || in.height < ImgUDevice::kIFMaxCropHeight) {
+ LOG(IPU3, Error) << "Input resolution " << in << " not supported";
+ return {};
+ }
+
+ Size gdc = calculateGDC(pipe);
+
+ float bdsSF = static_cast<float>(in.width) / gdc.width;
+ float sf = findScaleFactor(bdsSF, bdsScalingFactors, true);
+
+ /* Populate the configurations vector by scaling width and height. */
+ unsigned int ifWidth = utils::alignUp(in.width, ImgUDevice::kIFAlignWidth);
+ unsigned int ifHeight = utils::alignUp(in.height, ImgUDevice::kIFAlignHeight);
+ unsigned int minIfWidth = in.width - ImgUDevice::kIFMaxCropWidth;
+ unsigned int minIfHeight = in.height - ImgUDevice::kIFMaxCropHeight;
+ while (ifWidth >= minIfWidth) {
+ while (ifHeight >= minIfHeight) {
+ Size iif{ ifWidth, ifHeight };
+ calculateBDS(pipe, iif, gdc, sf);
+ ifHeight -= ImgUDevice::kIFAlignHeight;
+ }
+
+ ifWidth -= ImgUDevice::kIFAlignWidth;
+ }
+
+ /* Repeat search by scaling width first. */
+ ifWidth = utils::alignUp(in.width, ImgUDevice::kIFAlignWidth);
+ ifHeight = utils::alignUp(in.height, ImgUDevice::kIFAlignHeight);
+ minIfWidth = in.width - ImgUDevice::kIFMaxCropWidth;
+ minIfHeight = in.height - ImgUDevice::kIFMaxCropHeight;
+ while (ifHeight >= minIfHeight) {
+ /*
+ * \todo This procedure is probably broken:
+ * https://github.com/intel/intel-ipu3-pipecfg/issues/2
+ */
+ while (ifWidth >= minIfWidth) {
+ Size iif{ ifWidth, ifHeight };
+ calculateBDS(pipe, iif, gdc, sf);
+ ifWidth -= ImgUDevice::kIFAlignWidth;
+ }
+
+ ifHeight -= ImgUDevice::kIFAlignHeight;
+ }
+
+ if (pipeConfigs.size() == 0) {
+ LOG(IPU3, Error) << "Failed to calculate pipe configuration";
+ return {};
+ }
+
+ FOV bestFov = calcFOV(pipe->input, pipeConfigs[0]);
+ unsigned int bestIndex = 0;
+ unsigned int p = 0;
+ for (auto pipeConfig : pipeConfigs) {
+ FOV fov = calcFOV(pipe->input, pipeConfig);
+ if (fov.isLarger(bestFov)) {
+ bestFov = fov;
+ bestIndex = p;
+ }
+
+ ++p;
+ }
+
+ LOG(IPU3, Debug) << "Computed pipe configuration: ";
+ LOG(IPU3, Debug) << "IF: " << pipeConfigs[bestIndex].iif;
+ LOG(IPU3, Debug) << "BDS: " << pipeConfigs[bestIndex].bds;
+ LOG(IPU3, Debug) << "GDC: " << pipeConfigs[bestIndex].gdc;
+
+ return pipeConfigs[bestIndex];
+}
+
+/**
+ * \brief Configure the ImgU pipeline
+ * \param[in] config The ImgU pipe configuration parameters
+ * \param[in] inputFormat The format to be applied to ImgU input
+ * \return 0 on success or a negative error code otherwise
+ */
+int ImgUDevice::configure(const PipeConfig &pipeConfig, V4L2DeviceFormat *inputFormat)
+{
+ /* Configure the ImgU input video device with the requested sizes. */
+ int ret = input_->setFormat(inputFormat);
+ if (ret)
+ return ret;
+
+ LOG(IPU3, Debug) << "ImgU input format = " << *inputFormat;
+
+ /*
+ * \todo The IPU3 driver implementation shall be changed to use the
+ * input sizes as 'ImgU Input' subdevice sizes, and use the desired
+ * GDC output sizes to configure the crop/compose rectangles.
+ *
+ * The current IPU3 driver implementation uses GDC sizes as the
+ * 'ImgU Input' subdevice sizes, and the input video device sizes
+ * to configure the crop/compose rectangles, contradicting the
+ * V4L2 specification.
+ */
+ Rectangle iif{ 0, 0, pipeConfig.iif };
+ ret = imgu_->setSelection(PAD_INPUT, V4L2_SEL_TGT_CROP, &iif);
+ if (ret)
+ return ret;
+ LOG(IPU3, Debug) << "ImgU IF rectangle = " << iif;
+
+ Rectangle bds{ 0, 0, pipeConfig.bds };
+ ret = imgu_->setSelection(PAD_INPUT, V4L2_SEL_TGT_COMPOSE, &bds);
+ if (ret)
+ return ret;
+ LOG(IPU3, Debug) << "ImgU BDS rectangle = " << bds;
+
+ V4L2SubdeviceFormat gdcFormat = {};
+ gdcFormat.code = MEDIA_BUS_FMT_FIXED;
+ gdcFormat.size = pipeConfig.gdc;
+
+ ret = imgu_->setFormat(PAD_INPUT, &gdcFormat);
+ if (ret)
+ return ret;
+
+ LOG(IPU3, Debug) << "ImgU GDC format = " << gdcFormat;
+
+ StreamConfiguration paramCfg = {};
+ paramCfg.size = inputFormat->size;
+ V4L2DeviceFormat paramFormat;
+ ret = configureVideoDevice(param_.get(), PAD_PARAM, paramCfg, &paramFormat);
+ if (ret)
+ return ret;
+
+ StreamConfiguration statCfg = {};
+ statCfg.size = inputFormat->size;
+ V4L2DeviceFormat statFormat;
+ ret = configureVideoDevice(stat_.get(), PAD_STAT, statCfg, &statFormat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * \brief Configure a video device on the ImgU
+ * \param[in] dev The video device to configure
+ * \param[in] pad The pad of the ImgU subdevice
+ * \param[in] cfg The requested configuration
+ * \param[out] outputFormat The format set on the video device
+ * \return 0 on success or a negative error code otherwise
+ */
+int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
+ const StreamConfiguration &cfg,
+ V4L2DeviceFormat *outputFormat)
+{
+ V4L2SubdeviceFormat imguFormat = {};
+ imguFormat.code = MEDIA_BUS_FMT_FIXED;
+ imguFormat.size = cfg.size;
+
+ int ret = imgu_->setFormat(pad, &imguFormat);
+ if (ret)
+ return ret;
+
+ /*
+ * No need to apply format to the param or stat video devices as the
+ * driver ignores the operation.
+ */
+ if (dev == param_.get() || dev == stat_.get())
+ return 0;
+
+ *outputFormat = {};
+ outputFormat->fourcc = dev->toV4L2PixelFormat(formats::NV12);
+ outputFormat->size = cfg.size;
+ outputFormat->planesCount = 2;
+
+ ret = dev->setFormat(outputFormat);
+ if (ret)
+ return ret;
+
+ const char *name = dev == output_.get() ? "output" : "viewfinder";
+ LOG(IPU3, Debug) << "ImgU " << name << " format = "
+ << *outputFormat;
+
+ return 0;
+}
+
+/**
+ * \brief Allocate buffers for all the ImgU video devices
+ */
+int ImgUDevice::allocateBuffers(unsigned int bufferCount)
+{
+ /* Share buffers between CIO2 output and ImgU input. */
+ int ret = input_->importBuffers(bufferCount);
+ if (ret) {
+ LOG(IPU3, Error) << "Failed to import ImgU input buffers";
+ return ret;
+ }
+
+ ret = param_->allocateBuffers(bufferCount, &paramBuffers_);
+ if (ret < 0) {
+ LOG(IPU3, Error) << "Failed to allocate ImgU param buffers";
+ goto error;
+ }
+
+ ret = stat_->allocateBuffers(bufferCount, &statBuffers_);
+ if (ret < 0) {
+ LOG(IPU3, Error) << "Failed to allocate ImgU stat buffers";
+ goto error;
+ }
+
+ /*
+ * Import buffers for all outputs, regardless of whether the
+ * corresponding stream is active or inactive, as the driver needs
+ * buffers to be requested on the V4L2 devices in order to operate.
+ */
+ ret = output_->importBuffers(bufferCount);
+ if (ret < 0) {
+ LOG(IPU3, Error) << "Failed to import ImgU output buffers";
+ goto error;
+ }
+
+ ret = viewfinder_->importBuffers(bufferCount);
+ if (ret < 0) {
+ LOG(IPU3, Error) << "Failed to import ImgU viewfinder buffers";
+ goto error;
+ }
+
+ return 0;
+
+error:
+ freeBuffers();
+
+ return ret;
+}
+
+/**
+ * \brief Release buffers for all the ImgU video devices
+ */
+void ImgUDevice::freeBuffers()
+{
+ int ret;
+
+ paramBuffers_.clear();
+ statBuffers_.clear();
+
+ ret = output_->releaseBuffers();
+ if (ret)
+ LOG(IPU3, Error) << "Failed to release ImgU output buffers";
+
+ ret = param_->releaseBuffers();
+ if (ret)
+ LOG(IPU3, Error) << "Failed to release ImgU param buffers";
+
+ ret = stat_->releaseBuffers();
+ if (ret)
+ LOG(IPU3, Error) << "Failed to release ImgU stat buffers";
+
+ ret = viewfinder_->releaseBuffers();
+ if (ret)
+ LOG(IPU3, Error) << "Failed to release ImgU viewfinder buffers";
+
+ ret = input_->releaseBuffers();
+ if (ret)
+ LOG(IPU3, Error) << "Failed to release ImgU input buffers";
+}
+
+int ImgUDevice::start()
+{
+ int ret;
+
+ /* Start the ImgU video devices. */
+ ret = output_->streamOn();
+ if (ret) {
+ LOG(IPU3, Error) << "Failed to start ImgU output";
+ return ret;
+ }
+
+ ret = viewfinder_->streamOn();
+ if (ret) {
+ LOG(IPU3, Error) << "Failed to start ImgU viewfinder";
+ return ret;
+ }
+
+ ret = param_->streamOn();
+ if (ret) {
+ LOG(IPU3, Error) << "Failed to start ImgU param";
+ return ret;
+ }
+
+ ret = stat_->streamOn();
+ if (ret) {
+ LOG(IPU3, Error) << "Failed to start ImgU stat";
+ return ret;
+ }
+
+ ret = input_->streamOn();
+ if (ret) {
+ LOG(IPU3, Error) << "Failed to start ImgU input";
+ return ret;
+ }
+
+ return 0;
+}
+
+int ImgUDevice::stop()
+{
+ int ret;
+
+ ret = output_->streamOff();
+ ret |= viewfinder_->streamOff();
+ ret |= param_->streamOff();
+ ret |= stat_->streamOff();
+ ret |= input_->streamOff();
+
+ return ret;
+}
+
+/**
+ * \brief Enable or disable a single link on the ImgU instance
+ *
+ * This function assumes the media device associated with the ImgU instance
+ * is open.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int ImgUDevice::linkSetup(const std::string &source, unsigned int sourcePad,
+ const std::string &sink, unsigned int sinkPad,
+ bool enable)
+{
+ MediaLink *link = media_->link(source, sourcePad, sink, sinkPad);
+ if (!link) {
+ LOG(IPU3, Error)
+ << "Failed to get link: '" << source << "':"
+ << sourcePad << " -> '" << sink << "':" << sinkPad;
+ return -ENODEV;
+ }
+
+ return link->setEnabled(enable);
+}
+
+/**
+ * \brief Enable or disable all media links in the ImgU instance to prepare
+ * for capture operations
+ *
+ * \todo This function will probably be removed or changed once links will be
+ * enabled or disabled selectively.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int ImgUDevice::enableLinks(bool enable)
+{
+ std::string viewfinderName = name_ + " viewfinder";
+ std::string paramName = name_ + " parameters";
+ std::string outputName = name_ + " output";
+ std::string statName = name_ + " 3a stat";
+ std::string inputName = name_ + " input";
+ int ret;
+
+ ret = linkSetup(inputName, 0, name_, PAD_INPUT, enable);
+ if (ret)
+ return ret;
+
+ ret = linkSetup(name_, PAD_OUTPUT, outputName, 0, enable);
+ if (ret)
+ return ret;
+
+ ret = linkSetup(name_, PAD_VF, viewfinderName, 0, enable);
+ if (ret)
+ return ret;
+
+ ret = linkSetup(paramName, 0, name_, PAD_PARAM, enable);
+ if (ret)
+ return ret;
+
+ return linkSetup(name_, PAD_STAT, statName, 0, enable);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/ipu3/imgu.h b/src/libcamera/pipeline/ipu3/imgu.h
new file mode 100644
index 00000000..fa508316
--- /dev/null
+++ b/src/libcamera/pipeline/ipu3/imgu.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * Intel IPU3 ImgU
+ */
+
+#pragma once
+
+#include <memory>
+#include <string>
+
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace libcamera {
+
+class FrameBuffer;
+class MediaDevice;
+class Size;
+struct StreamConfiguration;
+
+class ImgUDevice
+{
+public:
+ static constexpr unsigned int kFilterWidth = 4;
+ static constexpr unsigned int kFilterHeight = 4;
+
+ static constexpr unsigned int kIFAlignWidth = 2;
+ static constexpr unsigned int kIFAlignHeight = 4;
+
+ static constexpr unsigned int kIFMaxCropWidth = 40;
+ static constexpr unsigned int kIFMaxCropHeight = 540;
+
+ static constexpr unsigned int kBDSAlignWidth = 2;
+ static constexpr unsigned int kBDSAlignHeight = 4;
+
+ static constexpr float kBDSSfMax = 2.5;
+ static constexpr float kBDSSfMin = 1.0;
+ static constexpr float kBDSSfStep = 0.03125;
+
+ static constexpr Size kOutputMinSize = { 2, 2 };
+ static constexpr Size kOutputMaxSize = { 4480, 34004 };
+ static constexpr unsigned int kOutputAlignWidth = 64;
+ static constexpr unsigned int kOutputAlignHeight = 4;
+ static constexpr unsigned int kOutputMarginWidth = 64;
+ static constexpr unsigned int kOutputMarginHeight = 32;
+
+ struct PipeConfig {
+ float bds_sf;
+ Size iif;
+ Size bds;
+ Size gdc;
+
+ bool isNull() const
+ {
+ return iif.isNull() || bds.isNull() || gdc.isNull();
+ }
+ };
+
+ struct Pipe {
+ Size input;
+ Size main;
+ Size viewfinder;
+ };
+
+ int init(MediaDevice *media, unsigned int index);
+
+ PipeConfig calculatePipeConfig(Pipe *pipe);
+
+ int configure(const PipeConfig &pipeConfig, V4L2DeviceFormat *inputFormat);
+
+ int configureOutput(const StreamConfiguration &cfg,
+ V4L2DeviceFormat *outputFormat)
+ {
+ return configureVideoDevice(output_.get(), PAD_OUTPUT, cfg,
+ outputFormat);
+ }
+
+ int configureViewfinder(const StreamConfiguration &cfg,
+ V4L2DeviceFormat *outputFormat)
+ {
+ return configureVideoDevice(viewfinder_.get(), PAD_VF, cfg,
+ outputFormat);
+ }
+
+ int allocateBuffers(unsigned int bufferCount);
+ void freeBuffers();
+
+ int start();
+ int stop();
+
+ int enableLinks(bool enable);
+
+ std::unique_ptr<V4L2Subdevice> imgu_;
+ std::unique_ptr<V4L2VideoDevice> input_;
+ std::unique_ptr<V4L2VideoDevice> param_;
+ std::unique_ptr<V4L2VideoDevice> output_;
+ std::unique_ptr<V4L2VideoDevice> viewfinder_;
+ std::unique_ptr<V4L2VideoDevice> stat_;
+
+ std::vector<std::unique_ptr<FrameBuffer>> paramBuffers_;
+ std::vector<std::unique_ptr<FrameBuffer>> statBuffers_;
+
+private:
+ static constexpr unsigned int PAD_INPUT = 0;
+ static constexpr unsigned int PAD_PARAM = 1;
+ static constexpr unsigned int PAD_OUTPUT = 2;
+ static constexpr unsigned int PAD_VF = 3;
+ static constexpr unsigned int PAD_STAT = 4;
+
+ int linkSetup(const std::string &source, unsigned int sourcePad,
+ const std::string &sink, unsigned int sinkPad,
+ bool enable);
+
+ int configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
+ const StreamConfiguration &cfg,
+ V4L2DeviceFormat *outputFormat);
+
+ std::string name_;
+ MediaDevice *media_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp b/src/libcamera/pipeline/ipu3/ipu3.cpp
index 1b44460e..e31e3879 100644
--- a/src/libcamera/pipeline/ipu3/ipu3.cpp
+++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
@@ -2,198 +2,131 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipu3.cpp - Pipeline handler for Intel IPU3
+ * Pipeline handler for Intel IPU3
*/
#include <algorithm>
-#include <iomanip>
#include <memory>
+#include <queue>
#include <vector>
-#include <linux/media-bus-format.h>
+#include <linux/intel-ipu3.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/formats.h>
+#include <libcamera/property_ids.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
-#include "camera_sensor.h"
-#include "device_enumerator.h"
-#include "log.h"
-#include "media_device.h"
-#include "pipeline_handler.h"
-#include "utils.h"
-#include "v4l2_controls.h"
-#include "v4l2_subdevice.h"
-#include "v4l2_videodevice.h"
+#include <libcamera/ipa/ipu3_ipa_interface.h>
+#include <libcamera/ipa/ipu3_ipa_proxy.h>
-namespace libcamera {
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_lens.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/delayed_controls.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
-LOG_DEFINE_CATEGORY(IPU3)
+#include "cio2.h"
+#include "frames.h"
+#include "imgu.h"
-class IPU3CameraData;
-
-class ImgUDevice
-{
-public:
- static constexpr unsigned int PAD_INPUT = 0;
- static constexpr unsigned int PAD_OUTPUT = 2;
- static constexpr unsigned int PAD_VF = 3;
- static constexpr unsigned int PAD_STAT = 4;
-
- /* ImgU output descriptor: group data specific to an ImgU output. */
- struct ImgUOutput {
- V4L2VideoDevice *dev;
- unsigned int pad;
- std::string name;
- std::vector<std::unique_ptr<FrameBuffer>> buffers;
- };
-
- ImgUDevice()
- : imgu_(nullptr), input_(nullptr)
- {
- output_.dev = nullptr;
- viewfinder_.dev = nullptr;
- stat_.dev = nullptr;
- }
+namespace libcamera {
- ~ImgUDevice()
- {
- delete imgu_;
- delete input_;
- delete output_.dev;
- delete viewfinder_.dev;
- delete stat_.dev;
- }
+LOG_DEFINE_CATEGORY(IPU3)
- int init(MediaDevice *media, unsigned int index);
- int configureInput(const Size &size,
- V4L2DeviceFormat *inputFormat);
- int configureOutput(ImgUOutput *output,
- const StreamConfiguration &cfg);
-
- int allocateBuffers(IPU3CameraData *data, unsigned int bufferCount);
- void freeBuffers(IPU3CameraData *data);
-
- int start();
- int stop();
-
- int linkSetup(const std::string &source, unsigned int sourcePad,
- const std::string &sink, unsigned int sinkPad,
- bool enable);
- int enableLinks(bool enable);
-
- unsigned int index_;
- std::string name_;
- MediaDevice *media_;
-
- V4L2Subdevice *imgu_;
- V4L2VideoDevice *input_;
- ImgUOutput output_;
- ImgUOutput viewfinder_;
- ImgUOutput stat_;
- /* \todo Add param video device for 3A tuning */
+static const ControlInfoMap::Map IPU3Controls = {
+ { &controls::draft::PipelineDepth, ControlInfo(2, 3) },
};
-class CIO2Device
+class IPU3CameraData : public Camera::Private
{
public:
- static constexpr unsigned int CIO2_BUFFER_COUNT = 4;
-
- CIO2Device()
- : output_(nullptr), csi2_(nullptr), sensor_(nullptr)
+ IPU3CameraData(PipelineHandler *pipe)
+ : Camera::Private(pipe)
{
}
- ~CIO2Device()
- {
- delete output_;
- delete csi2_;
- delete sensor_;
- }
+ int loadIPA();
- int init(const MediaDevice *media, unsigned int index);
- int configure(const Size &size,
- V4L2DeviceFormat *outputFormat);
+ void imguOutputBufferReady(FrameBuffer *buffer);
+ void cio2BufferReady(FrameBuffer *buffer);
+ void paramBufferReady(FrameBuffer *buffer);
+ void statBufferReady(FrameBuffer *buffer);
+ void queuePendingRequests();
+ void cancelPendingRequests();
+ void frameStart(uint32_t sequence);
- int allocateBuffers();
- void freeBuffers();
+ CIO2Device cio2_;
+ ImgUDevice *imgu_;
- int start();
- int stop();
+ Stream outStream_;
+ Stream vfStream_;
+ Stream rawStream_;
- static V4L2PixelFormat mediaBusToFormat(unsigned int code);
+ Rectangle cropRegion_;
- V4L2VideoDevice *output_;
- V4L2Subdevice *csi2_;
- CameraSensor *sensor_;
+ std::unique_ptr<DelayedControls> delayedCtrls_;
+ IPU3Frames frameInfos_;
-private:
- std::vector<std::unique_ptr<FrameBuffer>> buffers_;
-};
+ std::unique_ptr<ipa::ipu3::IPAProxyIPU3> ipa_;
-class IPU3Stream : public Stream
-{
-public:
- IPU3Stream()
- : active_(false), device_(nullptr)
- {
- }
+ /* Requests for which no buffer has been queued to the CIO2 device yet. */
+ std::queue<Request *> pendingRequests_;
+ /* Requests queued to the CIO2 device but not yet processed by the ImgU. */
+ std::queue<Request *> processingRequests_;
- bool active_;
- std::string name_;
- ImgUDevice::ImgUOutput *device_;
-};
+ ControlInfoMap ipaControls_;
-class IPU3CameraData : public CameraData
-{
-public:
- IPU3CameraData(PipelineHandler *pipe)
- : CameraData(pipe)
- {
- }
-
- void imguOutputBufferReady(FrameBuffer *buffer);
- void imguInputBufferReady(FrameBuffer *buffer);
- void cio2BufferReady(FrameBuffer *buffer);
-
- CIO2Device cio2_;
- ImgUDevice *imgu_;
-
- IPU3Stream outStream_;
- IPU3Stream vfStream_;
+private:
+ void metadataReady(unsigned int id, const ControlList &metadata);
+ void paramsComputed(unsigned int id);
+ void setSensorControls(unsigned int id, const ControlList &sensorControls,
+ const ControlList &lensControls);
};
class IPU3CameraConfiguration : public CameraConfiguration
{
public:
- IPU3CameraConfiguration(Camera *camera, IPU3CameraData *data);
+ static constexpr unsigned int kBufferCount = 4;
+ static constexpr unsigned int kMaxStreams = 3;
- Status validate() override;
+ IPU3CameraConfiguration(IPU3CameraData *data);
- const V4L2SubdeviceFormat &sensorFormat() { return sensorFormat_; }
- const std::vector<const IPU3Stream *> &streams() { return streams_; }
+ Status validate() override;
-private:
- static constexpr unsigned int IPU3_BUFFER_COUNT = 4;
+ const StreamConfiguration &cio2Format() const { return cio2Configuration_; }
+ const ImgUDevice::PipeConfig imguConfig() const { return pipeConfig_; }
- void adjustStream(StreamConfiguration &cfg, bool scale);
+ /* Cache the combinedTransform_ that will be applied to the sensor */
+ Transform combinedTransform_;
+private:
/*
* The IPU3CameraData instance is guaranteed to be valid as long as the
* corresponding Camera instance is valid. In order to borrow a
* reference to the camera data, store a new reference to the camera.
*/
- std::shared_ptr<Camera> camera_;
const IPU3CameraData *data_;
- V4L2SubdeviceFormat sensorFormat_;
- std::vector<const IPU3Stream *> streams_;
+ StreamConfiguration cio2Configuration_;
+ ImgUDevice::PipeConfig pipeConfig_;
};
class PipelineHandlerIPU3 : public PipelineHandler
{
public:
static constexpr unsigned int V4L2_CID_IPU3_PIPE_MODE = 0x009819c1;
+ static constexpr Size kViewfinderSize{ 1280, 720 };
enum IPU3PipeModes {
IPU3PipeModeVideo = 0,
@@ -202,27 +135,28 @@ public:
PipelineHandlerIPU3(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
- int start(Camera *camera) override;
- void stop(Camera *camera) override;
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override;
bool match(DeviceEnumerator *enumerator) override;
private:
- IPU3CameraData *cameraData(const Camera *camera)
+ IPU3CameraData *cameraData(Camera *camera)
{
- return static_cast<IPU3CameraData *>(
- PipelineHandler::cameraData(camera));
+ return static_cast<IPU3CameraData *>(camera->_d());
}
+ int initControls(IPU3CameraData *data);
+ int updateControls(IPU3CameraData *data);
int registerCameras();
int allocateBuffers(Camera *camera);
@@ -232,148 +166,220 @@ private:
ImgUDevice imgu1_;
MediaDevice *cio2MediaDev_;
MediaDevice *imguMediaDev_;
+
+ std::vector<IPABuffer> ipaBuffers_;
};
-IPU3CameraConfiguration::IPU3CameraConfiguration(Camera *camera,
- IPU3CameraData *data)
+IPU3CameraConfiguration::IPU3CameraConfiguration(IPU3CameraData *data)
: CameraConfiguration()
{
- camera_ = camera->shared_from_this();
data_ = data;
}
-void IPU3CameraConfiguration::adjustStream(StreamConfiguration &cfg, bool scale)
-{
- /* The only pixel format the driver supports is NV12. */
- cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
-
- if (scale) {
- /*
- * Provide a suitable default that matches the sensor aspect
- * ratio.
- */
- if (!cfg.size.width || !cfg.size.height) {
- cfg.size.width = 1280;
- cfg.size.height = 1280 * sensorFormat_.size.height
- / sensorFormat_.size.width;
- }
-
- /*
- * \todo: Clamp the size to the hardware bounds when we will
- * figure them out.
- *
- * \todo: Handle the scaler (BDS) restrictions. The BDS can
- * only scale with the same factor in both directions, and the
- * scaling factor is limited to a multiple of 1/32. At the
- * moment the ImgU driver hides these constraints by applying
- * additional cropping, this should be fixed on the driver
- * side, and cropping should be exposed to us.
- */
- } else {
- /*
- * \todo: Properly support cropping when the ImgU driver
- * interface will be cleaned up.
- */
- cfg.size = sensorFormat_.size;
- }
-
- /*
- * Clamp the size to match the ImgU alignment constraints. The width
- * shall be a multiple of 8 pixels and the height a multiple of 4
- * pixels.
- */
- if (cfg.size.width % 8 || cfg.size.height % 4) {
- cfg.size.width &= ~7;
- cfg.size.height &= ~3;
- }
-
- cfg.bufferCount = IPU3_BUFFER_COUNT;
-}
-
CameraConfiguration::Status IPU3CameraConfiguration::validate()
{
- const CameraSensor *sensor = data_->cio2_.sensor_;
Status status = Valid;
if (config_.empty())
return Invalid;
+ /*
+ * Validate the requested transform against the sensor capabilities and
+ * rotation and store the final combined transform that configure() will
+ * need to apply to the sensor to save us working it out again.
+ */
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->cio2_.sensor()->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
+ status = Adjusted;
+
/* Cap the number of entries to the available streams. */
- if (config_.size() > 2) {
- config_.resize(2);
+ if (config_.size() > kMaxStreams) {
+ config_.resize(kMaxStreams);
status = Adjusted;
}
/*
- * Select the sensor format by collecting the maximum width and height
- * and picking the closest larger match, as the IPU3 can downscale
- * only. If no resolution is requested for any stream, or if no sensor
- * resolution is large enough, pick the largest one.
+ * Validate the requested stream configuration and select the sensor
+ * format by collecting the maximum RAW stream width and height and
+ * picking the closest larger match.
+ *
+ * If no RAW stream is requested use the one of the largest YUV stream,
+ * plus margin pixels for the IF and BDS rectangle to downscale.
+ *
+ * \todo Clarify the IF and BDS margins requirements.
*/
- Size size = {};
+ unsigned int rawCount = 0;
+ unsigned int yuvCount = 0;
+ Size rawRequirement;
+ Size maxYuvSize;
+ Size rawSize;
for (const StreamConfiguration &cfg : config_) {
- if (cfg.size.width > size.width)
- size.width = cfg.size.width;
- if (cfg.size.height > size.height)
- size.height = cfg.size.height;
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
+ rawCount++;
+ rawSize = std::max(rawSize, cfg.size);
+ } else {
+ yuvCount++;
+ maxYuvSize = std::max(maxYuvSize, cfg.size);
+ rawRequirement.expandTo(cfg.size);
+ }
}
- if (!size.width || !size.height)
- size = sensor->resolution();
-
- sensorFormat_ = sensor->getFormat({ MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SGBRG10_1X10,
- MEDIA_BUS_FMT_SGRBG10_1X10,
- MEDIA_BUS_FMT_SRGGB10_1X10 },
- size);
- if (!sensorFormat_.size.width || !sensorFormat_.size.height)
- sensorFormat_.size = sensor->resolution();
+ if (rawCount > 1 || yuvCount > 2) {
+ LOG(IPU3, Debug) << "Camera configuration not supported";
+ return Invalid;
+ } else if (rawCount && !yuvCount) {
+ /*
+ * Disallow raw-only camera configuration. Currently, ImgU does
+ * not get configured for raw-only streams and has early return
+ * in configure(). To support raw-only stream, we do need the IPA
+ * to get configured since it will setup the sensor controls for
+ * the capture.
+ *
+ * \todo Configure the ImgU with internal buffers which will enable
+ * the IPA to get configured for the raw-only camera configuration.
+ */
+ LOG(IPU3, Debug)
+ << "Camera configuration cannot support raw-only streams";
+ return Invalid;
+ }
/*
- * Verify and update all configuration entries, and assign a stream to
- * each of them. The viewfinder stream can scale, while the output
- * stream can crop only, so select the output stream when the requested
- * resolution is equal to the sensor resolution, and the viewfinder
- * stream otherwise.
+ * Generate raw configuration from CIO2.
+ *
+ * The output YUV streams will be limited in size to the maximum frame
+ * size requested for the RAW stream, if present.
+ *
+ * If no raw stream is requested, generate a size from the largest YUV
+ * stream, aligned to the ImgU constraints and bound
+ * by the sensor's maximum resolution. See
+ * https://bugs.libcamera.org/show_bug.cgi?id=32
*/
- std::set<const IPU3Stream *> availableStreams = {
- &data_->outStream_,
- &data_->vfStream_,
- };
+ if (rawSize.isNull())
+ rawSize = rawRequirement.expandedTo({ ImgUDevice::kIFMaxCropWidth,
+ ImgUDevice::kIFMaxCropHeight })
+ .grownBy({ ImgUDevice::kOutputMarginWidth,
+ ImgUDevice::kOutputMarginHeight })
+ .boundedTo(data_->cio2_.sensor()->resolution());
+
+ cio2Configuration_ = data_->cio2_.generateConfiguration(rawSize);
+ if (!cio2Configuration_.pixelFormat.isValid())
+ return Invalid;
+
+ LOG(IPU3, Debug) << "CIO2 configuration: " << cio2Configuration_.toString();
- streams_.clear();
- streams_.reserve(config_.size());
+ ImgUDevice::Pipe pipe{};
+ pipe.input = cio2Configuration_.size;
+ /*
+ * Adjust the configurations if needed and assign streams while
+ * iterating them.
+ */
+ bool mainOutputAvailable = true;
for (unsigned int i = 0; i < config_.size(); ++i) {
- StreamConfiguration &cfg = config_[i];
- const PixelFormat pixelFormat = cfg.pixelFormat;
- const Size size = cfg.size;
- const IPU3Stream *stream;
+ const PixelFormatInfo &info = PixelFormatInfo::info(config_[i].pixelFormat);
+ const StreamConfiguration originalCfg = config_[i];
+ StreamConfiguration *cfg = &config_[i];
+
+ LOG(IPU3, Debug) << "Validating stream: " << config_[i].toString();
+
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
+ /* Initialize the RAW stream with the CIO2 configuration. */
+ cfg->size = cio2Configuration_.size;
+ cfg->pixelFormat = cio2Configuration_.pixelFormat;
+ cfg->bufferCount = cio2Configuration_.bufferCount;
+ cfg->stride = info.stride(cfg->size.width, 0, 64);
+ cfg->frameSize = info.frameSize(cfg->size, 64);
+ cfg->setStream(const_cast<Stream *>(&data_->rawStream_));
+
+ LOG(IPU3, Debug) << "Assigned " << cfg->toString()
+ << " to the raw stream";
+ } else {
+ /* Assign and configure the main and viewfinder outputs. */
+
+ /*
+ * Clamp the size to match the ImgU size limits and the
+ * margins from the CIO2 output frame size.
+ *
+ * The ImgU outputs needs to be strictly smaller than
+ * the CIO2 output frame and rounded down to 64 pixels
+ * in width and 32 pixels in height. This assumption
+ * comes from inspecting the pipe configuration script
+ * results and the available suggested configurations in
+ * the ChromeOS BSP .xml camera tuning files and shall
+ * be validated.
+ *
+ * \todo Clarify what are the hardware constraints
+ * that require this alignements, if any. It might
+ * depend on the BDS scaling factor of 1/32, as the main
+ * output has no YUV scaler as the viewfinder output has.
+ */
+ unsigned int limit;
+ limit = utils::alignDown(cio2Configuration_.size.width - 1,
+ ImgUDevice::kOutputMarginWidth);
+ cfg->size.width = std::clamp(cfg->size.width,
+ ImgUDevice::kOutputMinSize.width,
+ limit);
+
+ limit = utils::alignDown(cio2Configuration_.size.height - 1,
+ ImgUDevice::kOutputMarginHeight);
+ cfg->size.height = std::clamp(cfg->size.height,
+ ImgUDevice::kOutputMinSize.height,
+ limit);
+
+ cfg->size.alignDownTo(ImgUDevice::kOutputAlignWidth,
+ ImgUDevice::kOutputAlignHeight);
+
+ cfg->pixelFormat = formats::NV12;
+ cfg->bufferCount = kBufferCount;
+ cfg->stride = info.stride(cfg->size.width, 0, 1);
+ cfg->frameSize = info.frameSize(cfg->size, 1);
- if (cfg.size == sensorFormat_.size)
- stream = &data_->outStream_;
- else
- stream = &data_->vfStream_;
+ /*
+ * Use the main output stream in case only one stream is
+ * requested or if the current configuration is the one
+ * with the maximum YUV output size.
+ */
+ if (mainOutputAvailable &&
+ (originalCfg.size == maxYuvSize || yuvCount == 1)) {
+ cfg->setStream(const_cast<Stream *>(&data_->outStream_));
+ mainOutputAvailable = false;
- if (availableStreams.find(stream) == availableStreams.end())
- stream = *availableStreams.begin();
+ pipe.main = cfg->size;
+ if (yuvCount == 1)
+ pipe.viewfinder = pipe.main;
- LOG(IPU3, Debug)
- << "Assigned '" << stream->name_ << "' to stream " << i;
+ LOG(IPU3, Debug) << "Assigned " << cfg->toString()
+ << " to the main output";
+ } else {
+ cfg->setStream(const_cast<Stream *>(&data_->vfStream_));
+ pipe.viewfinder = cfg->size;
- bool scale = stream == &data_->vfStream_;
- adjustStream(config_[i], scale);
+ LOG(IPU3, Debug) << "Assigned " << cfg->toString()
+ << " to the viewfinder output";
+ }
+ }
- if (cfg.pixelFormat != pixelFormat || cfg.size != size) {
+ if (cfg->pixelFormat != originalCfg.pixelFormat ||
+ cfg->size != originalCfg.size) {
LOG(IPU3, Debug)
<< "Stream " << i << " configuration adjusted to "
- << cfg.toString();
+ << cfg->toString();
status = Adjusted;
}
+ }
- streams_.push_back(stream);
- availableStreams.erase(stream);
+ /* Only compute the ImgU configuration if a YUV stream has been requested. */
+ if (yuvCount) {
+ pipeConfig_ = data_->imgu_->calculatePipeConfig(&pipe);
+ if (pipeConfig_.isNull()) {
+ LOG(IPU3, Error) << "Failed to calculate pipe configuration: "
+ << "unsupported resolutions.";
+ return Invalid;
+ }
}
return status;
@@ -384,82 +390,70 @@ PipelineHandlerIPU3::PipelineHandlerIPU3(CameraManager *manager)
{
}
-CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerIPU3::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
{
IPU3CameraData *data = cameraData(camera);
- IPU3CameraConfiguration *config;
- std::set<IPU3Stream *> streams = {
- &data->outStream_,
- &data->vfStream_,
- };
+ std::unique_ptr<IPU3CameraConfiguration> config =
+ std::make_unique<IPU3CameraConfiguration>(data);
- config = new IPU3CameraConfiguration(camera, data);
+ if (roles.empty())
+ return config;
+ Size sensorResolution = data->cio2_.sensor()->resolution();
for (const StreamRole role : roles) {
- StreamConfiguration cfg = {};
- IPU3Stream *stream = nullptr;
-
- cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+ unsigned int bufferCount;
+ PixelFormat pixelFormat;
+ Size size;
switch (role) {
case StreamRole::StillCapture:
/*
- * Pick the output stream by default as the Viewfinder
- * and VideoRecording roles are not allowed on
- * the output stream.
- */
- if (streams.find(&data->outStream_) != streams.end()) {
- stream = &data->outStream_;
- } else if (streams.find(&data->vfStream_) != streams.end()) {
- stream = &data->vfStream_;
- } else {
- LOG(IPU3, Error)
- << "No stream available for requested role "
- << role;
- break;
- }
-
- /*
- * FIXME: Soraka: the maximum resolution reported by
- * both sensors (2592x1944 for ov5670 and 4224x3136 for
- * ov13858) are returned as default configurations but
- * they're not correctly processed by the ImgU.
- * Resolutions up tp 2560x1920 have been validated.
+ * Use as default full-frame configuration a value
+ * strictly smaller than the sensor resolution (limited
+ * to the ImgU maximum output size) and aligned down to
+ * the required frame margin.
*
- * \todo Clarify ImgU alignment requirements.
+ * \todo Clarify the alignment constraints as explained
+ * in validate()
*/
- cfg.size = { 2560, 1920 };
+ size = sensorResolution.boundedTo(ImgUDevice::kOutputMaxSize)
+ .shrunkBy({ 1, 1 })
+ .alignedDownTo(ImgUDevice::kOutputMarginWidth,
+ ImgUDevice::kOutputMarginHeight);
+ pixelFormat = formats::NV12;
+ bufferCount = IPU3CameraConfiguration::kBufferCount;
+ streamFormats[pixelFormat] = { { ImgUDevice::kOutputMinSize, size } };
break;
- case StreamRole::Viewfinder:
- case StreamRole::VideoRecording: {
- /*
- * We can't use the 'output' stream for viewfinder or
- * video capture roles.
- *
- * \todo This is an artificial limitation until we
- * figure out the exact capabilities of the hardware.
- */
- if (streams.find(&data->vfStream_) == streams.end()) {
- LOG(IPU3, Error)
- << "No stream available for requested role "
- << role;
- break;
- }
+ case StreamRole::Raw: {
+ StreamConfiguration cio2Config =
+ data->cio2_.generateConfiguration(sensorResolution);
+ pixelFormat = cio2Config.pixelFormat;
+ size = cio2Config.size;
+ bufferCount = cio2Config.bufferCount;
+
+ for (const PixelFormat &format : data->cio2_.formats())
+ streamFormats[format] = data->cio2_.sizes(format);
- stream = &data->vfStream_;
+ break;
+ }
+ case StreamRole::Viewfinder:
+ case StreamRole::VideoRecording: {
/*
- * Align the default viewfinder size to the maximum
- * available sensor resolution and to the IPU3
- * alignment constraints.
+ * Default viewfinder and videorecording to 1280x720,
+ * capped to the maximum sensor resolution and aligned
+ * to the ImgU output constraints.
*/
- const Size &res = data->cio2_.sensor_->resolution();
- unsigned int width = std::min(1280U, res.width);
- unsigned int height = std::min(720U, res.height);
- cfg.size = { width & ~7, height & ~3 };
+ size = sensorResolution.boundedTo(kViewfinderSize)
+ .alignedDownTo(ImgUDevice::kOutputAlignWidth,
+ ImgUDevice::kOutputAlignHeight);
+ pixelFormat = formats::NV12;
+ bufferCount = IPU3CameraConfiguration::kBufferCount;
+ streamFormats[pixelFormat] = { { ImgUDevice::kOutputMinSize, size } };
break;
}
@@ -467,20 +461,19 @@ CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
default:
LOG(IPU3, Error)
<< "Requested stream role not supported: " << role;
- break;
- }
-
- if (!stream) {
- delete config;
return nullptr;
}
- streams.erase(stream);
-
+ StreamFormats formats(streamFormats);
+ StreamConfiguration cfg(formats);
+ cfg.size = size;
+ cfg.pixelFormat = pixelFormat;
+ cfg.bufferCount = bufferCount;
config->addConfiguration(cfg);
}
- config->validate();
+ if (config->validate() == CameraConfiguration::Invalid)
+ return {};
return config;
}
@@ -490,10 +483,11 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
IPU3CameraConfiguration *config =
static_cast<IPU3CameraConfiguration *>(c);
IPU3CameraData *data = cameraData(camera);
- IPU3Stream *outStream = &data->outStream_;
- IPU3Stream *vfStream = &data->vfStream_;
+ Stream *outStream = &data->outStream_;
+ Stream *vfStream = &data->vfStream_;
CIO2Device *cio2 = &data->cio2_;
ImgUDevice *imgu = data->imgu_;
+ V4L2DeviceFormat outputFormat;
int ret;
/*
@@ -528,8 +522,11 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
return ret;
/*
- * \todo: Enable links selectively based on the requested streams.
+ * \todo Enable links selectively based on the requested streams.
* As of now, enable all links unconditionally.
+ * \todo Don't configure the ImgU at all if we only have a single
+ * stream which is for raw capture, in which case no buffers will
+ * ever be queued to the ImgU.
*/
ret = data->imgu_->enableLinks(true);
if (ret)
@@ -539,36 +536,53 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
* Pass the requested stream size to the CIO2 unit and get back the
* adjusted format to be propagated to the ImgU output devices.
*/
- const Size &sensorSize = config->sensorFormat().size;
- V4L2DeviceFormat cio2Format = {};
- ret = cio2->configure(sensorSize, &cio2Format);
+ const Size &sensorSize = config->cio2Format().size;
+ V4L2DeviceFormat cio2Format;
+ ret = cio2->configure(sensorSize, config->combinedTransform_, &cio2Format);
if (ret)
return ret;
- ret = imgu->configureInput(sensorSize, &cio2Format);
+ IPACameraSensorInfo sensorInfo;
+ cio2->sensor()->sensorInfo(&sensorInfo);
+ data->cropRegion_ = sensorInfo.analogCrop;
+
+ /*
+ * If the ImgU gets configured, its driver seems to expect that
+ * buffers will be queued to its outputs, as otherwise the next
+ * capture session that uses the ImgU fails when queueing
+ * buffers to its input.
+ *
+ * If no ImgU configuration has been computed, it means only a RAW
+ * stream has been requested: return here to skip the ImgU configuration
+ * part.
+ */
+ ImgUDevice::PipeConfig imguConfig = config->imguConfig();
+ if (imguConfig.isNull())
+ return 0;
+
+ ret = imgu->configure(imguConfig, &cio2Format);
if (ret)
return ret;
/* Apply the format to the configured streams output devices. */
- outStream->active_ = false;
- vfStream->active_ = false;
+ StreamConfiguration *mainCfg = nullptr;
+ StreamConfiguration *vfCfg = nullptr;
for (unsigned int i = 0; i < config->size(); ++i) {
- /*
- * Use a const_cast<> here instead of storing a mutable stream
- * pointer in the configuration to let the compiler catch
- * unwanted modifications of camera data in the configuration
- * validate() implementation.
- */
- IPU3Stream *stream = const_cast<IPU3Stream *>(config->streams()[i]);
StreamConfiguration &cfg = (*config)[i];
-
- stream->active_ = true;
- cfg.setStream(stream);
-
- ret = imgu->configureOutput(stream->device_, cfg);
- if (ret)
- return ret;
+ Stream *stream = cfg.stream();
+
+ if (stream == outStream) {
+ mainCfg = &cfg;
+ ret = imgu->configureOutput(cfg, &outputFormat);
+ if (ret)
+ return ret;
+ } else if (stream == vfStream) {
+ vfCfg = &cfg;
+ ret = imgu->configureViewfinder(cfg, &outputFormat);
+ if (ret)
+ return ret;
+ }
}
/*
@@ -576,51 +590,64 @@ int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
* the configuration of the active one for that purpose (there should
* be at least one active stream in the configuration request).
*/
- if (!outStream->active_) {
- ret = imgu->configureOutput(outStream->device_, config->at(0));
+ if (!vfCfg) {
+ ret = imgu->configureViewfinder(*mainCfg, &outputFormat);
if (ret)
return ret;
}
- if (!vfStream->active_) {
- ret = imgu->configureOutput(vfStream->device_, config->at(0));
- if (ret)
- return ret;
- }
-
- /*
- * Apply the largest available format to the stat node.
- * \todo Revise this when we'll actually use the stat node.
- */
- StreamConfiguration statCfg = {};
- statCfg.size = cio2Format.size;
-
- ret = imgu->configureOutput(&imgu->stat_, statCfg);
- if (ret)
- return ret;
-
/* Apply the "pipe_mode" control to the ImgU subdevice. */
ControlList ctrls(imgu->imgu_->controls());
+ /*
+ * Set the ImgU pipe mode to 'Video' unconditionally to have statistics
+ * generated.
+ *
+ * \todo Figure out what the 'Still Capture' mode is meant for, and use
+ * it accordingly.
+ */
ctrls.set(V4L2_CID_IPU3_PIPE_MODE,
- static_cast<int32_t>(vfStream->active_ ? IPU3PipeModeVideo :
- IPU3PipeModeStillCapture));
+ static_cast<int32_t>(IPU3PipeModeVideo));
ret = imgu->imgu_->setControls(&ctrls);
if (ret) {
LOG(IPU3, Error) << "Unable to set pipe_mode control";
return ret;
}
- return 0;
+ ipa::ipu3::IPAConfigInfo configInfo;
+ configInfo.sensorControls = data->cio2_.sensor()->controls();
+
+ CameraLens *lens = data->cio2_.sensor()->focusLens();
+ if (lens)
+ configInfo.lensControls = lens->controls();
+
+ configInfo.sensorInfo = sensorInfo;
+ configInfo.bdsOutputSize = config->imguConfig().bds;
+ configInfo.iif = config->imguConfig().iif;
+
+ ret = data->ipa_->configure(configInfo, &data->ipaControls_);
+ if (ret) {
+ LOG(IPU3, Error) << "Failed to configure IPA: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ return updateControls(data);
}
int PipelineHandlerIPU3::exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
- IPU3Stream *ipu3stream = static_cast<IPU3Stream *>(stream);
- V4L2VideoDevice *video = ipu3stream->device_->dev;
+ IPU3CameraData *data = cameraData(camera);
unsigned int count = stream->configuration().bufferCount;
- return video->exportBuffers(count, buffers);
+ if (stream == &data->outStream_)
+ return data->imgu_->output_->exportBuffers(count, buffers);
+ else if (stream == &data->vfStream_)
+ return data->imgu_->viewfinder_->exportBuffers(count, buffers);
+ else if (stream == &data->rawStream_)
+ return data->cio2_.exportBuffers(count, buffers);
+
+ return -EINVAL;
}
/**
@@ -634,23 +661,39 @@ int PipelineHandlerIPU3::exportFrameBuffers(Camera *camera, Stream *stream,
int PipelineHandlerIPU3::allocateBuffers(Camera *camera)
{
IPU3CameraData *data = cameraData(camera);
- CIO2Device *cio2 = &data->cio2_;
ImgUDevice *imgu = data->imgu_;
unsigned int bufferCount;
int ret;
- ret = cio2->allocateBuffers();
+ bufferCount = std::max({
+ data->outStream_.configuration().bufferCount,
+ data->vfStream_.configuration().bufferCount,
+ data->rawStream_.configuration().bufferCount,
+ });
+
+ ret = imgu->allocateBuffers(bufferCount);
if (ret < 0)
return ret;
- bufferCount = ret;
+ /* Map buffers to the IPA. */
+ unsigned int ipaBufferId = 1;
- ret = imgu->allocateBuffers(data, bufferCount);
- if (ret < 0) {
- cio2->freeBuffers();
- return ret;
+ for (const std::unique_ptr<FrameBuffer> &buffer : imgu->paramBuffers_) {
+ buffer->setCookie(ipaBufferId++);
+ ipaBuffers_.emplace_back(buffer->cookie(), buffer->planes());
}
+ for (const std::unique_ptr<FrameBuffer> &buffer : imgu->statBuffers_) {
+ buffer->setCookie(ipaBufferId++);
+ ipaBuffers_.emplace_back(buffer->cookie(), buffer->planes());
+ }
+
+ data->ipa_->mapBuffers(ipaBuffers_);
+
+ data->frameInfos_.init(imgu->paramBuffers_, imgu->statBuffers_);
+ data->frameInfos_.bufferAvailable.connect(
+ data, &IPU3CameraData::queuePendingRequests);
+
return 0;
}
@@ -658,24 +701,44 @@ int PipelineHandlerIPU3::freeBuffers(Camera *camera)
{
IPU3CameraData *data = cameraData(camera);
- data->cio2_.freeBuffers();
- data->imgu_->freeBuffers(data);
+ data->frameInfos_.clear();
+
+ std::vector<unsigned int> ids;
+ for (IPABuffer &ipabuf : ipaBuffers_)
+ ids.push_back(ipabuf.id);
+
+ data->ipa_->unmapBuffers(ids);
+ ipaBuffers_.clear();
+
+ data->imgu_->freeBuffers();
return 0;
}
-int PipelineHandlerIPU3::start(Camera *camera)
+int PipelineHandlerIPU3::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
{
IPU3CameraData *data = cameraData(camera);
CIO2Device *cio2 = &data->cio2_;
ImgUDevice *imgu = data->imgu_;
int ret;
+ /* Disable test pattern mode on the sensor, if any. */
+ ret = cio2->sensor()->setTestPatternMode(
+ controls::draft::TestPatternModeEnum::TestPatternModeOff);
+ if (ret)
+ return ret;
+
/* Allocate buffers for internal pipeline usage. */
ret = allocateBuffers(camera);
if (ret)
return ret;
+ ret = data->ipa_->start();
+ if (ret)
+ goto error;
+
+ data->delayedCtrls_->reset();
+
/*
* Start the ImgU video devices, buffers will be queued to the
* ImgU output and viewfinder when requests will be queued.
@@ -685,49 +748,99 @@ int PipelineHandlerIPU3::start(Camera *camera)
goto error;
ret = imgu->start();
- if (ret) {
- imgu->stop();
- cio2->stop();
+ if (ret)
goto error;
- }
return 0;
error:
+ imgu->stop();
+ cio2->stop();
+ data->ipa_->stop();
freeBuffers(camera);
- LOG(IPU3, Error) << "Failed to start camera " << camera->name();
+ LOG(IPU3, Error) << "Failed to start camera " << camera->id();
return ret;
}
-void PipelineHandlerIPU3::stop(Camera *camera)
+void PipelineHandlerIPU3::stopDevice(Camera *camera)
{
IPU3CameraData *data = cameraData(camera);
- int ret;
+ int ret = 0;
+
+ data->cancelPendingRequests();
+
+ data->ipa_->stop();
- ret = data->cio2_.stop();
ret |= data->imgu_->stop();
+ ret |= data->cio2_.stop();
if (ret)
- LOG(IPU3, Warning) << "Failed to stop camera "
- << camera->name();
+ LOG(IPU3, Warning) << "Failed to stop camera " << camera->id();
freeBuffers(camera);
}
-int PipelineHandlerIPU3::queueRequestDevice(Camera *camera, Request *request)
+void IPU3CameraData::cancelPendingRequests()
{
- int error = 0;
+ processingRequests_ = {};
- for (auto it : request->buffers()) {
- IPU3Stream *stream = static_cast<IPU3Stream *>(it.first);
- FrameBuffer *buffer = it.second;
+ while (!pendingRequests_.empty()) {
+ Request *request = pendingRequests_.front();
- int ret = stream->device_->dev->queueBuffer(buffer);
- if (ret < 0)
- error = ret;
+ for (auto it : request->buffers()) {
+ FrameBuffer *buffer = it.second;
+ buffer->_d()->cancel();
+ pipe()->completeBuffer(request, buffer);
+ }
+
+ pipe()->completeRequest(request);
+ pendingRequests_.pop();
}
+}
+
+void IPU3CameraData::queuePendingRequests()
+{
+ while (!pendingRequests_.empty()) {
+ Request *request = pendingRequests_.front();
+
+ IPU3Frames::Info *info = frameInfos_.create(request);
+ if (!info)
+ break;
+
+ /*
+ * Queue a buffer on the CIO2, using the raw stream buffer
+ * provided in the request, if any, or a CIO2 internal buffer
+ * otherwise.
+ */
+ FrameBuffer *reqRawBuffer = request->findBuffer(&rawStream_);
+ FrameBuffer *rawBuffer = cio2_.queueBuffer(request, reqRawBuffer);
+ /*
+ * \todo If queueBuffer fails in queuing a buffer to the device,
+ * report the request as error by cancelling the request and
+ * calling PipelineHandler::completeRequest().
+ */
+ if (!rawBuffer) {
+ frameInfos_.remove(info);
+ break;
+ }
+
+ info->rawBuffer = rawBuffer;
+
+ ipa_->queueRequest(info->id, request->controls());
- return error;
+ pendingRequests_.pop();
+ processingRequests_.push(request);
+ }
+}
+
+int PipelineHandlerIPU3::queueRequestDevice(Camera *camera, Request *request)
+{
+ IPU3CameraData *data = cameraData(camera);
+
+ data->pendingRequests_.push(request);
+ data->queuePendingRequests();
+
+ return 0;
}
bool PipelineHandlerIPU3::match(DeviceEnumerator *enumerator)
@@ -783,6 +896,136 @@ bool PipelineHandlerIPU3::match(DeviceEnumerator *enumerator)
}
/**
+ * \brief Initialize the camera controls
+ * \param[in] data The camera data
+ *
+ * Initialize the camera controls by calculating controls which the pipeline
+ * is reponsible for and merge them with the controls computed by the IPA.
+ *
+ * This function needs data->ipaControls_ to be initialized by the IPA init()
+ * function at camera creation time. Always call this function after IPA init().
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int PipelineHandlerIPU3::initControls(IPU3CameraData *data)
+{
+ /*
+ * \todo The controls initialized here depend on sensor configuration
+ * and their limits should be updated once the configuration gets
+ * changed.
+ *
+ * Initialize the sensor using its resolution and compute the control
+ * limits.
+ */
+ CameraSensor *sensor = data->cio2_.sensor();
+ V4L2SubdeviceFormat sensorFormat = {};
+ sensorFormat.size = sensor->resolution();
+ int ret = sensor->setFormat(&sensorFormat);
+ if (ret)
+ return ret;
+
+ return updateControls(data);
+}
+
+/**
+ * \brief Update the camera controls
+ * \param[in] data The camera data
+ *
+ * Compute the camera controls by calculating controls which the pipeline
+ * is reponsible for and merge them with the controls computed by the IPA.
+ *
+ * This function needs data->ipaControls_ to be refreshed when a new
+ * configuration is applied to the camera by the IPA configure() function.
+ *
+ * Always call this function after IPA configure() to make sure to have a
+ * properly refreshed IPA controls list.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int PipelineHandlerIPU3::updateControls(IPU3CameraData *data)
+{
+ CameraSensor *sensor = data->cio2_.sensor();
+ IPACameraSensorInfo sensorInfo{};
+
+ int ret = sensor->sensorInfo(&sensorInfo);
+ if (ret)
+ return ret;
+
+ ControlInfoMap::Map controls = IPU3Controls;
+ const std::vector<controls::draft::TestPatternModeEnum>
+ &testPatternModes = sensor->testPatternModes();
+ if (!testPatternModes.empty()) {
+ std::vector<ControlValue> values;
+ values.reserve(testPatternModes.size());
+
+ for (auto pattern : testPatternModes)
+ values.emplace_back(pattern);
+
+ controls[&controls::draft::TestPatternMode] = ControlInfo(values);
+ }
+
+ /*
+ * Compute the scaler crop limits.
+ *
+ * Initialize the control use the 'Viewfinder' configuration (1280x720)
+ * as the pipeline output resolution and the full sensor size as input
+ * frame (see the todo note in the validate() function about the usage
+ * of the sensor's full frame as ImgU input).
+ */
+
+ /*
+ * The maximum scaler crop rectangle is the analogue crop used to
+ * produce the maximum frame size.
+ */
+ const Rectangle &analogueCrop = sensorInfo.analogCrop;
+ Rectangle maxCrop = analogueCrop;
+
+ /*
+ * As the ImgU cannot up-scale, the minimum selection rectangle has to
+ * be as large as the pipeline output size. Use the default viewfinder
+ * configuration as the desired output size and calculate the minimum
+ * rectangle required to satisfy the ImgU processing margins, unless the
+ * sensor resolution is smaller.
+ *
+ * \todo This implementation is based on the same assumptions about the
+ * ImgU pipeline configuration described in then viewfinder and main
+ * output sizes calculation in the validate() function.
+ */
+
+ /* The strictly smaller size than the sensor resolution, aligned to margins. */
+ Size minSize = sensor->resolution().shrunkBy({ 1, 1 })
+ .alignedDownTo(ImgUDevice::kOutputMarginWidth,
+ ImgUDevice::kOutputMarginHeight);
+
+ /*
+ * Either the smallest margin-aligned size larger than the viewfinder
+ * size or the adjusted sensor resolution.
+ */
+ minSize = kViewfinderSize.grownBy({ 1, 1 })
+ .alignedUpTo(ImgUDevice::kOutputMarginWidth,
+ ImgUDevice::kOutputMarginHeight)
+ .boundedTo(minSize);
+
+ /*
+ * Re-scale in the sensor's native coordinates. Report (0,0) as
+ * top-left corner as we allow application to freely pan the crop area.
+ */
+ Rectangle minCrop = Rectangle(minSize).scaledBy(analogueCrop.size(),
+ sensorInfo.outputSize);
+
+ controls[&controls::ScalerCrop] = ControlInfo(minCrop, maxCrop, maxCrop);
+
+ /* Add the IPA registered controls to list of camera controls. */
+ for (const auto &ipaControl : data->ipaControls_)
+ controls[ipaControl.first] = ipaControl.second;
+
+ data->controlInfo_ = ControlInfoMap(std::move(controls),
+ controls::controls);
+
+ return 0;
+}
+
+/**
* \brief Initialise ImgU and CIO2 devices associated with cameras
*
* Initialise the two ImgU instances and create cameras with an associated
@@ -816,6 +1059,7 @@ int PipelineHandlerIPU3::registerCameras()
std::set<Stream *> streams = {
&data->outStream_,
&data->vfStream_,
+ &data->rawStream_,
};
CIO2Device *cio2 = &data->cio2_;
@@ -823,8 +1067,35 @@ int PipelineHandlerIPU3::registerCameras()
if (ret)
continue;
+ ret = data->loadIPA();
+ if (ret)
+ continue;
+
/* Initialize the camera properties. */
- data->properties_ = cio2->sensor_->properties();
+ data->properties_ = cio2->sensor()->properties();
+
+ ret = initControls(data.get());
+ if (ret)
+ continue;
+
+ const CameraSensorProperties::SensorDelays &delays = cio2->sensor()->sensorDelays();
+ std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
+ };
+
+ data->delayedCtrls_ =
+ std::make_unique<DelayedControls>(cio2->sensor()->device(),
+ params);
+ data->cio2_.frameStart().connect(data.get(),
+ &IPU3CameraData::frameStart);
+
+ /* Convert the sensor rotation to a transformation */
+ const auto &rotation = data->properties_.get(properties::Rotation);
+ if (!rotation)
+ LOG(IPU3, Warning) << "Rotation control not exposed by "
+ << cio2->sensor()->id()
+ << ". Assume rotation 0";
/**
* \todo Dynamically assign ImgU and output devices to each
@@ -833,10 +1104,6 @@ int PipelineHandlerIPU3::registerCameras()
* second.
*/
data->imgu_ = numCameras ? &imgu1_ : &imgu0_;
- data->outStream_.device_ = &data->imgu_->output_;
- data->outStream_.name_ = "output";
- data->vfStream_.device_ = &data->imgu_->viewfinder_;
- data->vfStream_.name_ = "viewfinder";
/*
* Connect video devices' 'bufferReady' signals to their
@@ -846,27 +1113,31 @@ int PipelineHandlerIPU3::registerCameras()
* associated ImgU input where they get processed and
* returned through the ImgU main and secondary outputs.
*/
- data->cio2_.output_->bufferReady.connect(data.get(),
- &IPU3CameraData::cio2BufferReady);
- data->imgu_->input_->bufferReady.connect(data.get(),
- &IPU3CameraData::imguInputBufferReady);
- data->imgu_->output_.dev->bufferReady.connect(data.get(),
- &IPU3CameraData::imguOutputBufferReady);
- data->imgu_->viewfinder_.dev->bufferReady.connect(data.get(),
- &IPU3CameraData::imguOutputBufferReady);
+ data->cio2_.bufferReady().connect(data.get(),
+ &IPU3CameraData::cio2BufferReady);
+ data->cio2_.bufferAvailable.connect(
+ data.get(), &IPU3CameraData::queuePendingRequests);
+ data->imgu_->input_->bufferReady.connect(&data->cio2_,
+ &CIO2Device::tryReturnBuffer);
+ data->imgu_->output_->bufferReady.connect(data.get(),
+ &IPU3CameraData::imguOutputBufferReady);
+ data->imgu_->viewfinder_->bufferReady.connect(data.get(),
+ &IPU3CameraData::imguOutputBufferReady);
+ data->imgu_->param_->bufferReady.connect(data.get(),
+ &IPU3CameraData::paramBufferReady);
+ data->imgu_->stat_->bufferReady.connect(data.get(),
+ &IPU3CameraData::statBufferReady);
/* Create and register the Camera instance. */
- std::string cameraName = cio2->sensor_->entity()->name() + " "
- + std::to_string(id);
- std::shared_ptr<Camera> camera = Camera::create(this,
- cameraName,
- streams);
+ const std::string &cameraId = cio2->sensor()->id();
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(data), cameraId, streams);
- registerCamera(std::move(camera), std::move(data));
+ registerCamera(std::move(camera));
LOG(IPU3, Info)
<< "Registered Camera[" << numCameras << "] \""
- << cameraName << "\""
+ << cameraId << "\""
<< " connected to CSI-2 receiver " << id;
numCameras++;
@@ -875,594 +1146,276 @@ int PipelineHandlerIPU3::registerCameras()
return numCameras ? 0 : -ENODEV;
}
-/* -----------------------------------------------------------------------------
- * Buffer Ready slots
- */
-
-/**
- * \brief Handle buffers completion at the ImgU input
- * \param[in] buffer The completed buffer
- *
- * Buffers completed from the ImgU input are immediately queued back to the
- * CIO2 unit to continue frame capture.
- */
-void IPU3CameraData::imguInputBufferReady(FrameBuffer *buffer)
-{
- /* \todo Handle buffer failures when state is set to BufferError. */
- if (buffer->metadata().status == FrameMetadata::FrameCancelled)
- return;
-
- cio2_.output_->queueBuffer(buffer);
-}
-
-/**
- * \brief Handle buffers completion at the ImgU output
- * \param[in] buffer The completed buffer
- *
- * Buffers completed from the ImgU output are directed to the application.
- */
-void IPU3CameraData::imguOutputBufferReady(FrameBuffer *buffer)
+int IPU3CameraData::loadIPA()
{
- Request *request = buffer->request();
-
- if (!pipe_->completeBuffer(camera_, request, buffer))
- /* Request not completed yet, return here. */
- return;
+ ipa_ = IPAManager::createIPA<ipa::ipu3::IPAProxyIPU3>(pipe(), 1, 1);
+ if (!ipa_)
+ return -ENOENT;
- /* Mark the request as complete. */
- pipe_->completeRequest(camera_, request);
-}
-
-/**
- * \brief Handle buffers completion at the CIO2 output
- * \param[in] buffer The completed buffer
- *
- * Buffers completed from the CIO2 are immediately queued to the ImgU unit
- * for further processing.
- */
-void IPU3CameraData::cio2BufferReady(FrameBuffer *buffer)
-{
- /* \todo Handle buffer failures when state is set to BufferError. */
- if (buffer->metadata().status == FrameMetadata::FrameCancelled)
- return;
-
- imgu_->input_->queueBuffer(buffer);
-}
-
-/* -----------------------------------------------------------------------------
- * ImgU Device
- */
-
-/**
- * \brief Initialize components of the ImgU instance
- * \param[in] mediaDevice The ImgU instance media device
- * \param[in] index The ImgU instance index
- *
- * Create and open the V4L2 devices and subdevices of the ImgU instance
- * with \a index.
- *
- * In case of errors the created V4L2VideoDevice and V4L2Subdevice instances
- * are destroyed at pipeline handler delete time.
- *
- * \return 0 on success or a negative error code otherwise
- */
-int ImgUDevice::init(MediaDevice *media, unsigned int index)
-{
- int ret;
-
- index_ = index;
- name_ = "ipu3-imgu " + std::to_string(index_);
- media_ = media;
+ ipa_->setSensorControls.connect(this, &IPU3CameraData::setSensorControls);
+ ipa_->paramsComputed.connect(this, &IPU3CameraData::paramsComputed);
+ ipa_->metadataReady.connect(this, &IPU3CameraData::metadataReady);
/*
- * The media entities presence in the media device has been verified
- * by the match() function: no need to check for newly created
- * video devices and subdevice validity here.
+ * Pass the sensor info to the IPA to initialize controls.
+ *
+ * \todo Find a way to initialize IPA controls without basing their
+ * limits on a particular sensor mode. We currently pass sensor
+ * information corresponding to the largest sensor resolution, and the
+ * IPA uses this to compute limits for supported controls. There's a
+ * discrepancy between the need to compute IPA control limits at init
+ * time, and the fact that those limits may depend on the sensor mode.
+ * Research is required to find out to handle this issue.
*/
- imgu_ = V4L2Subdevice::fromEntityName(media, name_);
- ret = imgu_->open();
- if (ret)
- return ret;
-
- input_ = V4L2VideoDevice::fromEntityName(media, name_ + " input");
- ret = input_->open();
- if (ret)
- return ret;
-
- output_.dev = V4L2VideoDevice::fromEntityName(media, name_ + " output");
- ret = output_.dev->open();
+ CameraSensor *sensor = cio2_.sensor();
+ V4L2SubdeviceFormat sensorFormat = {};
+ sensorFormat.size = sensor->resolution();
+ int ret = sensor->setFormat(&sensorFormat);
if (ret)
return ret;
- output_.pad = PAD_OUTPUT;
- output_.name = "output";
-
- viewfinder_.dev = V4L2VideoDevice::fromEntityName(media,
- name_ + " viewfinder");
- ret = viewfinder_.dev->open();
+ IPACameraSensorInfo sensorInfo{};
+ ret = sensor->sensorInfo(&sensorInfo);
if (ret)
return ret;
- viewfinder_.pad = PAD_VF;
- viewfinder_.name = "viewfinder";
-
- stat_.dev = V4L2VideoDevice::fromEntityName(media, name_ + " 3a stat");
- ret = stat_.dev->open();
- if (ret)
- return ret;
-
- stat_.pad = PAD_STAT;
- stat_.name = "stat";
-
- return 0;
-}
-
-/**
- * \brief Configure the ImgU unit input
- * \param[in] size The ImgU input frame size
- * \param[in] inputFormat The format to be applied to ImgU input
- * \return 0 on success or a negative error code otherwise
- */
-int ImgUDevice::configureInput(const Size &size,
- V4L2DeviceFormat *inputFormat)
-{
- /* Configure the ImgU input video device with the requested sizes. */
- int ret = input_->setFormat(inputFormat);
- if (ret)
- return ret;
-
- LOG(IPU3, Debug) << "ImgU input format = " << inputFormat->toString();
-
/*
- * \todo The IPU3 driver implementation shall be changed to use the
- * input sizes as 'ImgU Input' subdevice sizes, and use the desired
- * GDC output sizes to configure the crop/compose rectangles.
- *
- * The current IPU3 driver implementation uses GDC sizes as the
- * 'ImgU Input' subdevice sizes, and the input video device sizes
- * to configure the crop/compose rectangles, contradicting the
- * V4L2 specification.
+ * The API tuning file is made from the sensor name. If the tuning file
+ * isn't found, fall back to the 'uncalibrated' file.
*/
- Rectangle rect = {
- .x = 0,
- .y = 0,
- .w = inputFormat->size.width,
- .h = inputFormat->size.height,
- };
- ret = imgu_->setCrop(PAD_INPUT, &rect);
- if (ret)
- return ret;
-
- ret = imgu_->setCompose(PAD_INPUT, &rect);
- if (ret)
- return ret;
-
- LOG(IPU3, Debug) << "ImgU input feeder and BDS rectangle = "
- << rect.toString();
-
- V4L2SubdeviceFormat imguFormat = {};
- imguFormat.mbus_code = MEDIA_BUS_FMT_FIXED;
- imguFormat.size = size;
-
- ret = imgu_->setFormat(PAD_INPUT, &imguFormat);
- if (ret)
- return ret;
-
- LOG(IPU3, Debug) << "ImgU GDC format = " << imguFormat.toString();
-
- return 0;
-}
-
-/**
- * \brief Configure the ImgU unit \a id video output
- * \param[in] output The ImgU output device to configure
- * \param[in] cfg The requested configuration
- * \return 0 on success or a negative error code otherwise
- */
-int ImgUDevice::configureOutput(ImgUOutput *output,
- const StreamConfiguration &cfg)
-{
- V4L2VideoDevice *dev = output->dev;
- unsigned int pad = output->pad;
-
- V4L2SubdeviceFormat imguFormat = {};
- imguFormat.mbus_code = MEDIA_BUS_FMT_FIXED;
- imguFormat.size = cfg.size;
-
- int ret = imgu_->setFormat(pad, &imguFormat);
- if (ret)
- return ret;
-
- /* No need to apply format to the stat node. */
- if (output == &stat_)
- return 0;
-
- V4L2DeviceFormat outputFormat = {};
- outputFormat.fourcc = dev->toV4L2PixelFormat(PixelFormat(DRM_FORMAT_NV12));
- outputFormat.size = cfg.size;
- outputFormat.planesCount = 2;
+ std::string ipaTuningFile =
+ ipa_->configurationFile(sensor->model() + ".yaml", "uncalibrated.yaml");
- ret = dev->setFormat(&outputFormat);
- if (ret)
- return ret;
-
- LOG(IPU3, Debug) << "ImgU " << output->name << " format = "
- << outputFormat.toString();
-
- return 0;
-}
-
-/**
- * \brief Allocate buffers for all the ImgU video devices
- */
-int ImgUDevice::allocateBuffers(IPU3CameraData *data, unsigned int bufferCount)
-{
- IPU3Stream *outStream = &data->outStream_;
- IPU3Stream *vfStream = &data->vfStream_;
-
- /* Share buffers between CIO2 output and ImgU input. */
- int ret = input_->importBuffers(bufferCount);
+ ret = ipa_->init(IPASettings{ ipaTuningFile, sensor->model() },
+ sensorInfo, sensor->controls(), &ipaControls_);
if (ret) {
- LOG(IPU3, Error) << "Failed to import ImgU input buffers";
+ LOG(IPU3, Error) << "Failed to initialise the IPU3 IPA";
return ret;
}
- /*
- * Use for the stat's internal pool the same number of buffers as for
- * the input pool.
- * \todo To be revised when we'll actually use the stat node.
- */
- ret = stat_.dev->allocateBuffers(bufferCount, &stat_.buffers);
- if (ret < 0) {
- LOG(IPU3, Error) << "Failed to allocate ImgU stat buffers";
- goto error;
- }
-
- /*
- * Allocate buffers for both outputs. If an output is active, prepare
- * for buffer import, otherwise allocate internal buffers. Use the same
- * number of buffers in either case.
- */
- if (outStream->active_)
- ret = output_.dev->importBuffers(bufferCount);
- else
- ret = output_.dev->allocateBuffers(bufferCount,
- &output_.buffers);
- if (ret < 0) {
- LOG(IPU3, Error) << "Failed to allocate ImgU output buffers";
- goto error;
- }
-
- if (vfStream->active_)
- ret = viewfinder_.dev->importBuffers(bufferCount);
- else
- ret = viewfinder_.dev->allocateBuffers(bufferCount,
- &viewfinder_.buffers);
- if (ret < 0) {
- LOG(IPU3, Error) << "Failed to allocate ImgU viewfinder buffers";
- goto error;
- }
-
return 0;
-
-error:
- freeBuffers(data);
-
- return ret;
}
-/**
- * \brief Release buffers for all the ImgU video devices
- */
-void ImgUDevice::freeBuffers(IPU3CameraData *data)
+void IPU3CameraData::setSensorControls([[maybe_unused]] unsigned int id,
+ const ControlList &sensorControls,
+ const ControlList &lensControls)
{
- int ret;
+ delayedCtrls_->push(sensorControls);
- ret = output_.dev->releaseBuffers();
- if (ret)
- LOG(IPU3, Error) << "Failed to release ImgU output buffers";
+ CameraLens *focusLens = cio2_.sensor()->focusLens();
+ if (!focusLens)
+ return;
- ret = stat_.dev->releaseBuffers();
- if (ret)
- LOG(IPU3, Error) << "Failed to release ImgU stat buffers";
+ if (!lensControls.contains(V4L2_CID_FOCUS_ABSOLUTE))
+ return;
- ret = viewfinder_.dev->releaseBuffers();
- if (ret)
- LOG(IPU3, Error) << "Failed to release ImgU viewfinder buffers";
+ const ControlValue &focusValue = lensControls.get(V4L2_CID_FOCUS_ABSOLUTE);
- ret = input_->releaseBuffers();
- if (ret)
- LOG(IPU3, Error) << "Failed to release ImgU input buffers";
+ focusLens->setFocusPosition(focusValue.get<int32_t>());
}
-int ImgUDevice::start()
+void IPU3CameraData::paramsComputed(unsigned int id)
{
- int ret;
-
- /* Start the ImgU video devices. */
- ret = output_.dev->streamOn();
- if (ret) {
- LOG(IPU3, Error) << "Failed to start ImgU output";
- return ret;
- }
-
- ret = viewfinder_.dev->streamOn();
- if (ret) {
- LOG(IPU3, Error) << "Failed to start ImgU viewfinder";
- return ret;
- }
+ IPU3Frames::Info *info = frameInfos_.find(id);
+ if (!info)
+ return;
- ret = stat_.dev->streamOn();
- if (ret) {
- LOG(IPU3, Error) << "Failed to start ImgU stat";
- return ret;
- }
+ /* Queue all buffers from the request aimed for the ImgU. */
+ for (auto it : info->request->buffers()) {
+ const Stream *stream = it.first;
+ FrameBuffer *outbuffer = it.second;
- ret = input_->streamOn();
- if (ret) {
- LOG(IPU3, Error) << "Failed to start ImgU input";
- return ret;
+ if (stream == &outStream_)
+ imgu_->output_->queueBuffer(outbuffer);
+ else if (stream == &vfStream_)
+ imgu_->viewfinder_->queueBuffer(outbuffer);
}
- return 0;
+ info->paramBuffer->_d()->metadata().planes()[0].bytesused =
+ sizeof(struct ipu3_uapi_params);
+ imgu_->param_->queueBuffer(info->paramBuffer);
+ imgu_->stat_->queueBuffer(info->statBuffer);
+ imgu_->input_->queueBuffer(info->rawBuffer);
}
-int ImgUDevice::stop()
+void IPU3CameraData::metadataReady(unsigned int id, const ControlList &metadata)
{
- int ret;
+ IPU3Frames::Info *info = frameInfos_.find(id);
+ if (!info)
+ return;
- ret = output_.dev->streamOff();
- ret |= viewfinder_.dev->streamOff();
- ret |= stat_.dev->streamOff();
- ret |= input_->streamOff();
+ Request *request = info->request;
+ request->metadata().merge(metadata);
- return ret;
+ info->metadataProcessed = true;
+ if (frameInfos_.tryComplete(info))
+ pipe()->completeRequest(request);
}
-/**
- * \brief Enable or disable a single link on the ImgU instance
- *
- * This method assumes the media device associated with the ImgU instance
- * is open.
- *
- * \return 0 on success or a negative error code otherwise
+/* -----------------------------------------------------------------------------
+ * Buffer Ready slots
*/
-int ImgUDevice::linkSetup(const std::string &source, unsigned int sourcePad,
- const std::string &sink, unsigned int sinkPad,
- bool enable)
-{
- MediaLink *link = media_->link(source, sourcePad, sink, sinkPad);
- if (!link) {
- LOG(IPU3, Error)
- << "Failed to get link: '" << source << "':"
- << sourcePad << " -> '" << sink << "':" << sinkPad;
- return -ENODEV;
- }
-
- return link->setEnabled(enable);
-}
/**
- * \brief Enable or disable all media links in the ImgU instance to prepare
- * for capture operations
- *
- * \todo This method will probably be removed or changed once links will be
- * enabled or disabled selectively.
+ * \brief Handle buffers completion at the ImgU output
+ * \param[in] buffer The completed buffer
*
- * \return 0 on success or a negative error code otherwise
+ * Buffers completed from the ImgU output are directed to the application.
*/
-int ImgUDevice::enableLinks(bool enable)
+void IPU3CameraData::imguOutputBufferReady(FrameBuffer *buffer)
{
- std::string viewfinderName = name_ + " viewfinder";
- std::string outputName = name_ + " output";
- std::string statName = name_ + " 3a stat";
- std::string inputName = name_ + " input";
- int ret;
+ IPU3Frames::Info *info = frameInfos_.find(buffer);
+ if (!info)
+ return;
- ret = linkSetup(inputName, 0, name_, PAD_INPUT, enable);
- if (ret)
- return ret;
+ Request *request = info->request;
- ret = linkSetup(name_, PAD_OUTPUT, outputName, 0, enable);
- if (ret)
- return ret;
+ pipe()->completeBuffer(request, buffer);
- ret = linkSetup(name_, PAD_VF, viewfinderName, 0, enable);
- if (ret)
- return ret;
+ request->metadata().set(controls::draft::PipelineDepth, 3);
+ /* \todo Actually apply the scaler crop region to the ImgU. */
+ const auto &scalerCrop = request->controls().get(controls::ScalerCrop);
+ if (scalerCrop)
+ cropRegion_ = *scalerCrop;
+ request->metadata().set(controls::ScalerCrop, cropRegion_);
- return linkSetup(name_, PAD_STAT, statName, 0, enable);
+ if (frameInfos_.tryComplete(info))
+ pipe()->completeRequest(request);
}
-/*------------------------------------------------------------------------------
- * CIO2 Device
- */
-
/**
- * \brief Initialize components of the CIO2 device with \a index
- * \param[in] media The CIO2 media device
- * \param[in] index The CIO2 device index
- *
- * Create and open the video device and subdevices in the CIO2 instance at \a
- * index, if a supported image sensor is connected to the CSI-2 receiver of
- * this CIO2 instance. Enable the media links connecting the CIO2 components
- * to prepare for capture operations and cached the sensor maximum size.
+ * \brief Handle buffers completion at the CIO2 output
+ * \param[in] buffer The completed buffer
*
- * \return 0 on success or a negative error code otherwise
- * \retval -ENODEV No supported image sensor is connected to this CIO2 instance
+ * Buffers completed from the CIO2 are immediately queued to the ImgU unit
+ * for further processing.
*/
-int CIO2Device::init(const MediaDevice *media, unsigned int index)
+void IPU3CameraData::cio2BufferReady(FrameBuffer *buffer)
{
- int ret;
+ IPU3Frames::Info *info = frameInfos_.find(buffer);
+ if (!info)
+ return;
- /*
- * Verify that a sensor subdevice is connected to this CIO2 instance
- * and enable the media link between the two.
- */
- std::string csi2Name = "ipu3-csi2 " + std::to_string(index);
- MediaEntity *csi2Entity = media->getEntityByName(csi2Name);
- const std::vector<MediaPad *> &pads = csi2Entity->pads();
- if (pads.empty())
- return -ENODEV;
-
- /* IPU3 CSI-2 receivers have a single sink pad at index 0. */
- MediaPad *sink = pads[0];
- const std::vector<MediaLink *> &links = sink->links();
- if (links.empty())
- return -ENODEV;
-
- MediaLink *link = links[0];
- MediaEntity *sensorEntity = link->source()->entity();
- sensor_ = new CameraSensor(sensorEntity);
- ret = sensor_->init();
- if (ret)
- return ret;
+ Request *request = info->request;
- ret = link->setEnabled(true);
- if (ret)
- return ret;
+ /* If the buffer is cancelled force a complete of the whole request. */
+ if (buffer->metadata().status == FrameMetadata::FrameCancelled) {
+ for (auto it : request->buffers()) {
+ FrameBuffer *b = it.second;
+ b->_d()->cancel();
+ pipe()->completeBuffer(request, b);
+ }
- /*
- * Make sure the sensor produces at least one format compatible with
- * the CIO2 requirements.
- *
- * utils::set_overlap requires the ranges to be sorted, keep the
- * cio2Codes vector sorted in ascending order.
- */
- const std::vector<unsigned int> cio2Codes{ MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SGRBG10_1X10,
- MEDIA_BUS_FMT_SGBRG10_1X10,
- MEDIA_BUS_FMT_SRGGB10_1X10 };
- const std::vector<unsigned int> &sensorCodes = sensor_->mbusCodes();
- if (!utils::set_overlap(sensorCodes.begin(), sensorCodes.end(),
- cio2Codes.begin(), cio2Codes.end())) {
- LOG(IPU3, Error)
- << "Sensor " << sensor_->entity()->name()
- << " has not format compatible with the IPU3";
- return -EINVAL;
+ frameInfos_.remove(info);
+ pipe()->completeRequest(request);
+ return;
}
/*
- * \todo Define when to open and close video device nodes, as they
- * might impact on power consumption.
+ * Record the sensor's timestamp in the request metadata.
+ *
+ * \todo The sensor timestamp should be better estimated by connecting
+ * to the V4L2Device::frameStart signal.
*/
+ request->metadata().set(controls::SensorTimestamp,
+ buffer->metadata().timestamp);
- csi2_ = new V4L2Subdevice(csi2Entity);
- ret = csi2_->open();
- if (ret)
- return ret;
+ info->effectiveSensorControls = delayedCtrls_->get(buffer->metadata().sequence);
- std::string cio2Name = "ipu3-cio2 " + std::to_string(index);
- output_ = V4L2VideoDevice::fromEntityName(media, cio2Name);
- ret = output_->open();
- if (ret)
- return ret;
+ if (request->findBuffer(&rawStream_))
+ pipe()->completeBuffer(request, buffer);
- return 0;
+ ipa_->computeParams(info->id, info->paramBuffer->cookie());
}
-/**
- * \brief Configure the CIO2 unit
- * \param[in] size The requested CIO2 output frame size
- * \param[out] outputFormat The CIO2 unit output image format
- * \return 0 on success or a negative error code otherwise
- */
-int CIO2Device::configure(const Size &size,
- V4L2DeviceFormat *outputFormat)
+void IPU3CameraData::paramBufferReady(FrameBuffer *buffer)
{
- V4L2SubdeviceFormat sensorFormat;
- int ret;
+ IPU3Frames::Info *info = frameInfos_.find(buffer);
+ if (!info)
+ return;
+
+ info->paramDequeued = true;
/*
- * Apply the selected format to the sensor, the CSI-2 receiver and
- * the CIO2 output device.
+ * tryComplete() will delete info if it completes the IPU3Frame.
+ * In that event, we must have obtained the Request before hand.
+ *
+ * \todo Improve the FrameInfo API to avoid this type of issue
*/
- sensorFormat = sensor_->getFormat({ MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SGBRG10_1X10,
- MEDIA_BUS_FMT_SGRBG10_1X10,
- MEDIA_BUS_FMT_SRGGB10_1X10 },
- size);
- ret = sensor_->setFormat(&sensorFormat);
- if (ret)
- return ret;
+ Request *request = info->request;
- ret = csi2_->setFormat(0, &sensorFormat);
- if (ret)
- return ret;
+ if (frameInfos_.tryComplete(info))
+ pipe()->completeRequest(request);
+}
- outputFormat->fourcc = mediaBusToFormat(sensorFormat.mbus_code);
- outputFormat->size = sensorFormat.size;
- outputFormat->planesCount = 1;
+void IPU3CameraData::statBufferReady(FrameBuffer *buffer)
+{
+ IPU3Frames::Info *info = frameInfos_.find(buffer);
+ if (!info)
+ return;
- ret = output_->setFormat(outputFormat);
- if (ret)
- return ret;
+ Request *request = info->request;
- LOG(IPU3, Debug) << "CIO2 output format " << outputFormat->toString();
+ if (buffer->metadata().status == FrameMetadata::FrameCancelled) {
+ info->metadataProcessed = true;
- return 0;
+ /*
+ * tryComplete() will delete info if it completes the IPU3Frame.
+ * In that event, we must have obtained the Request before hand.
+ */
+ if (frameInfos_.tryComplete(info))
+ pipe()->completeRequest(request);
+
+ return;
+ }
+
+ ipa_->processStats(info->id, request->metadata().get(controls::SensorTimestamp).value_or(0),
+ info->statBuffer->cookie(), info->effectiveSensorControls);
}
-/**
- * \brief Allocate frame buffers for the CIO2 output
+/*
+ * \brief Handle the start of frame exposure signal
+ * \param[in] sequence The sequence number of frame
*
- * Allocate frame buffers in the CIO2 video device to be used to capture frames
- * from the CIO2 output. The buffers are stored in the CIO2Device::buffers_
- * vector.
+ * Inspect the list of pending requests waiting for a RAW frame to be
+ * produced and apply controls for the 'next' one.
*
- * \return Number of buffers allocated or negative error code
+ * Some controls need to be applied immediately, such as the
+ * TestPatternMode one. Other controls are handled through the delayed
+ * controls class.
*/
-int CIO2Device::allocateBuffers()
+void IPU3CameraData::frameStart(uint32_t sequence)
{
- int ret = output_->allocateBuffers(CIO2_BUFFER_COUNT, &buffers_);
- if (ret < 0)
- LOG(IPU3, Error) << "Failed to allocate CIO2 buffers";
+ delayedCtrls_->applyControls(sequence);
- return ret;
-}
+ if (processingRequests_.empty())
+ return;
-void CIO2Device::freeBuffers()
-{
- buffers_.clear();
+ /*
+ * Handle controls to be set immediately on the next frame.
+ * This currently only handle the TestPatternMode control.
+ *
+ * \todo Synchronize with the sequence number
+ */
+ Request *request = processingRequests_.front();
+ processingRequests_.pop();
- if (output_->releaseBuffers())
- LOG(IPU3, Error) << "Failed to release CIO2 buffers";
-}
+ const auto &testPatternMode = request->controls().get(controls::draft::TestPatternMode);
+ if (!testPatternMode)
+ return;
-int CIO2Device::start()
-{
- for (const std::unique_ptr<FrameBuffer> &buffer : buffers_) {
- int ret = output_->queueBuffer(buffer.get());
- if (ret) {
- LOG(IPU3, Error) << "Failed to queue CIO2 buffer";
- return ret;
- }
+ int ret = cio2_.sensor()->setTestPatternMode(
+ static_cast<controls::draft::TestPatternModeEnum>(*testPatternMode));
+ if (ret) {
+ LOG(IPU3, Error) << "Failed to set test pattern mode: "
+ << ret;
+ return;
}
- return output_->streamOn();
-}
-
-int CIO2Device::stop()
-{
- return output_->streamOff();
-}
-
-V4L2PixelFormat CIO2Device::mediaBusToFormat(unsigned int code)
-{
- switch (code) {
- case MEDIA_BUS_FMT_SBGGR10_1X10:
- return V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SBGGR10);
- case MEDIA_BUS_FMT_SGBRG10_1X10:
- return V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGBRG10);
- case MEDIA_BUS_FMT_SGRBG10_1X10:
- return V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SGRBG10);
- case MEDIA_BUS_FMT_SRGGB10_1X10:
- return V4L2PixelFormat(V4L2_PIX_FMT_IPU3_SRGGB10);
- default:
- return {};
- }
+ request->metadata().set(controls::draft::TestPatternMode,
+ *testPatternMode);
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerIPU3);
+REGISTER_PIPELINE_HANDLER(PipelineHandlerIPU3, "ipu3")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/ipu3/meson.build b/src/libcamera/pipeline/ipu3/meson.build
index 0ab766a2..f2904b4a 100644
--- a/src/libcamera/pipeline/ipu3/meson.build
+++ b/src/libcamera/pipeline/ipu3/meson.build
@@ -1,3 +1,8 @@
-libcamera_sources += files([
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'cio2.cpp',
+ 'frames.cpp',
+ 'imgu.cpp',
'ipu3.cpp',
])
diff --git a/src/libcamera/pipeline/mali-c55/mali-c55.cpp b/src/libcamera/pipeline/mali-c55/mali-c55.cpp
new file mode 100644
index 00000000..5abd6b20
--- /dev/null
+++ b/src/libcamera/pipeline/mali-c55/mali-c55.cpp
@@ -0,0 +1,1755 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy
+ *
+ * Pipeline Handler for ARM's Mali-C55 ISP
+ */
+
+#include <algorithm>
+#include <array>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+
+#include <linux/mali-c55-config.h>
+#include <linux/media-bus-format.h>
+#include <linux/media.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
+#include <libcamera/property_ids.h>
+#include <libcamera/stream.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+#include <libcamera/ipa/mali-c55_ipa_interface.h>
+#include <libcamera/ipa/mali-c55_ipa_proxy.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/delayed_controls.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/request.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace {
+
+bool isFormatRaw(const libcamera::PixelFormat &pixFmt)
+{
+ return libcamera::PixelFormatInfo::info(pixFmt).colourEncoding ==
+ libcamera::PixelFormatInfo::ColourEncodingRAW;
+}
+
+} /* namespace */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(MaliC55)
+
+const std::map<libcamera::PixelFormat, unsigned int> maliC55FmtToCode = {
+ /* \todo Support all formats supported by the driver in libcamera. */
+
+ { formats::RGB565, MEDIA_BUS_FMT_RGB121212_1X36 },
+ { formats::RGB888, MEDIA_BUS_FMT_RGB121212_1X36 },
+ { formats::YUYV, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::UYVY, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::R8, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::NV12, MEDIA_BUS_FMT_YUV10_1X30 },
+ { formats::NV21, MEDIA_BUS_FMT_YUV10_1X30 },
+
+ /* RAW formats, FR pipe only. */
+ { formats::SGBRG16, MEDIA_BUS_FMT_SGBRG16_1X16 },
+ { formats::SRGGB16, MEDIA_BUS_FMT_SRGGB16_1X16 },
+ { formats::SBGGR16, MEDIA_BUS_FMT_SBGGR16_1X16 },
+ { formats::SGRBG16, MEDIA_BUS_FMT_SGRBG16_1X16 },
+};
+
+constexpr Size kMaliC55MinInputSize = { 640, 480 };
+constexpr Size kMaliC55MinSize = { 128, 128 };
+constexpr Size kMaliC55MaxSize = { 8192, 8192 };
+constexpr unsigned int kMaliC55ISPInternalFormat = MEDIA_BUS_FMT_RGB121212_1X36;
+
+struct MaliC55FrameInfo {
+ Request *request;
+
+ FrameBuffer *paramBuffer;
+ FrameBuffer *statBuffer;
+
+ bool paramsDone;
+ bool statsDone;
+};
+
+class MaliC55CameraData : public Camera::Private
+{
+public:
+ MaliC55CameraData(PipelineHandler *pipe, MediaEntity *entity)
+ : Camera::Private(pipe), entity_(entity)
+ {
+ }
+
+ int init();
+ int loadIPA();
+
+ /* Deflect these functionalities to either TPG or CameraSensor. */
+ const std::vector<Size> sizes(unsigned int mbusCode) const;
+ const Size resolution() const;
+
+ int pixfmtToMbusCode(const PixelFormat &pixFmt) const;
+ const PixelFormat &bestRawFormat() const;
+
+ void updateControls(const ControlInfoMap &ipaControls);
+
+ PixelFormat adjustRawFormat(const PixelFormat &pixFmt) const;
+ Size adjustRawSizes(const PixelFormat &pixFmt, const Size &rawSize) const;
+
+ std::unique_ptr<CameraSensor> sensor_;
+
+ MediaEntity *entity_;
+ std::unique_ptr<V4L2Subdevice> csi_;
+ std::unique_ptr<V4L2Subdevice> sd_;
+ Stream frStream_;
+ Stream dsStream_;
+
+ std::unique_ptr<ipa::mali_c55::IPAProxyMaliC55> ipa_;
+ std::vector<IPABuffer> ipaStatBuffers_;
+ std::vector<IPABuffer> ipaParamBuffers_;
+
+ std::unique_ptr<DelayedControls> delayedCtrls_;
+
+private:
+ void initTPGData();
+ void setSensorControls(const ControlList &sensorControls);
+
+ std::string id_;
+ std::vector<unsigned int> tpgCodes_;
+ std::vector<Size> tpgSizes_;
+ Size tpgResolution_;
+};
+
+int MaliC55CameraData::init()
+{
+ int ret;
+
+ sd_ = std::make_unique<V4L2Subdevice>(entity_);
+ ret = sd_->open();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to open sensor subdevice";
+ return ret;
+ }
+
+ /* If this camera is created from TPG, we return here. */
+ if (entity_->name() == "mali-c55 tpg") {
+ initTPGData();
+ return 0;
+ }
+
+ /*
+ * Register a CameraSensor if we connect to a sensor and create
+ * an entity for the connected CSI-2 receiver.
+ */
+ sensor_ = CameraSensorFactoryBase::create(entity_);
+ if (!sensor_)
+ return ret;
+
+ const MediaPad *sourcePad = entity_->getPadByIndex(0);
+ MediaEntity *csiEntity = sourcePad->links()[0]->sink()->entity();
+
+ csi_ = std::make_unique<V4L2Subdevice>(csiEntity);
+ if (csi_->open()) {
+ LOG(MaliC55, Error) << "Failed to open CSI-2 subdevice";
+ return false;
+ }
+
+ return 0;
+}
+
+void MaliC55CameraData::initTPGData()
+{
+ /* Replicate the CameraSensor implementation for TPG. */
+ V4L2Subdevice::Formats formats = sd_->formats(0);
+ if (formats.empty())
+ return;
+
+ tpgCodes_ = utils::map_keys(formats);
+ std::sort(tpgCodes_.begin(), tpgCodes_.end());
+
+ for (const auto &format : formats) {
+ const std::vector<SizeRange> &ranges = format.second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(tpgSizes_),
+ [](const SizeRange &range) { return range.max; });
+ }
+
+ tpgResolution_ = tpgSizes_.back();
+}
+
+void MaliC55CameraData::setSensorControls(const ControlList &sensorControls)
+{
+ delayedCtrls_->push(sensorControls);
+}
+
+const std::vector<Size> MaliC55CameraData::sizes(unsigned int mbusCode) const
+{
+ if (sensor_)
+ return sensor_->sizes(mbusCode);
+
+ V4L2Subdevice::Formats formats = sd_->formats(0);
+ if (formats.empty())
+ return {};
+
+ std::vector<Size> sizes;
+ const auto &format = formats.find(mbusCode);
+ if (format == formats.end())
+ return {};
+
+ const std::vector<SizeRange> &ranges = format->second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
+ [](const SizeRange &range) { return range.max; });
+
+ std::sort(sizes.begin(), sizes.end());
+
+ return sizes;
+}
+
+const Size MaliC55CameraData::resolution() const
+{
+ if (sensor_)
+ return sensor_->resolution();
+
+ return tpgResolution_;
+}
+
+/*
+ * The Mali C55 ISP can only produce 16-bit RAW output in bypass modes, but the
+ * sensors connected to it might produce 8/10/12/16 bits. We simply search the
+ * sensor's supported formats for the one with a matching bayer order and the
+ * greatest bitdepth.
+ */
+int MaliC55CameraData::pixfmtToMbusCode(const PixelFormat &pixFmt) const
+{
+ auto it = maliC55FmtToCode.find(pixFmt);
+ if (it == maliC55FmtToCode.end())
+ return -EINVAL;
+
+ BayerFormat bayerFormat = BayerFormat::fromMbusCode(it->second);
+ if (!bayerFormat.isValid())
+ return -EINVAL;
+
+ V4L2Subdevice::Formats formats = sd_->formats(0);
+ unsigned int sensorMbusCode = 0;
+ unsigned int bitDepth = 0;
+
+ for (const auto &[code, sizes] : formats) {
+ BayerFormat sdBayerFormat = BayerFormat::fromMbusCode(code);
+ if (!sdBayerFormat.isValid())
+ continue;
+
+ if (sdBayerFormat.order != bayerFormat.order)
+ continue;
+
+ if (sdBayerFormat.bitDepth > bitDepth) {
+ bitDepth = sdBayerFormat.bitDepth;
+ sensorMbusCode = code;
+ }
+ }
+
+ if (!sensorMbusCode)
+ return -EINVAL;
+
+ return sensorMbusCode;
+}
+
+/*
+ * Find a RAW PixelFormat supported by both the ISP and the sensor.
+ *
+ * The situation is mildly complicated by the fact that we expect the sensor to
+ * output something like RAW8/10/12/16, but the ISP can only accept as input
+ * RAW20 and can only produce as output RAW16. The one constant in that is the
+ * bayer order of the data, so we'll simply check that the sensor produces a
+ * format with a bayer order that matches that of one of the formats we support,
+ * and select that.
+ */
+const PixelFormat &MaliC55CameraData::bestRawFormat() const
+{
+ static const PixelFormat invalidPixFmt = {};
+
+ for (const auto &fmt : sd_->formats(0)) {
+ BayerFormat sensorBayer = BayerFormat::fromMbusCode(fmt.first);
+
+ if (!sensorBayer.isValid())
+ continue;
+
+ for (const auto &[pixFmt, rawCode] : maliC55FmtToCode) {
+ if (!isFormatRaw(pixFmt))
+ continue;
+
+ BayerFormat bayer = BayerFormat::fromMbusCode(rawCode);
+ if (bayer.order == sensorBayer.order)
+ return pixFmt;
+ }
+ }
+
+ LOG(MaliC55, Error) << "Sensor doesn't provide a compatible format";
+ return invalidPixFmt;
+}
+
+void MaliC55CameraData::updateControls(const ControlInfoMap &ipaControls)
+{
+ if (!sensor_)
+ return;
+
+ IPACameraSensorInfo sensorInfo;
+ int ret = sensor_->sensorInfo(&sensorInfo);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to retrieve sensor info";
+ return;
+ }
+
+ ControlInfoMap::Map controls;
+ Rectangle ispMinCrop{ 0, 0, 640, 480 };
+ controls[&controls::ScalerCrop] =
+ ControlInfo(ispMinCrop, sensorInfo.analogCrop,
+ sensorInfo.analogCrop);
+
+ for (auto const &c : ipaControls)
+ controls.emplace(c.first, c.second);
+
+ controlInfo_ = ControlInfoMap(std::move(controls), controls::controls);
+}
+
+/*
+ * Make sure the provided raw pixel format is supported and adjust it to
+ * one of the supported ones if it's not.
+ */
+PixelFormat MaliC55CameraData::adjustRawFormat(const PixelFormat &rawFmt) const
+{
+ /* Make sure the RAW mbus code is supported by the image source. */
+ int rawCode = pixfmtToMbusCode(rawFmt);
+ if (rawCode < 0)
+ return bestRawFormat();
+
+ const auto rawSizes = sizes(rawCode);
+ if (rawSizes.empty())
+ return bestRawFormat();
+
+ return rawFmt;
+}
+
+Size MaliC55CameraData::adjustRawSizes(const PixelFormat &rawFmt, const Size &size) const
+{
+ /* Expand the RAW size to the minimum ISP input size. */
+ Size rawSize = size.expandedTo(kMaliC55MinInputSize);
+
+ /* Check if the size is natively supported. */
+ int rawCode = pixfmtToMbusCode(rawFmt);
+ if (rawCode < 0)
+ return {};
+
+ const auto rawSizes = sizes(rawCode);
+ auto sizeIt = std::find(rawSizes.begin(), rawSizes.end(), rawSize);
+ if (sizeIt != rawSizes.end())
+ return rawSize;
+
+ /* Or adjust it to the closest supported size. */
+ uint16_t distance = std::numeric_limits<uint16_t>::max();
+ Size bestSize;
+ for (const Size &sz : rawSizes) {
+ uint16_t dist = std::abs(static_cast<int>(rawSize.width) -
+ static_cast<int>(sz.width)) +
+ std::abs(static_cast<int>(rawSize.height) -
+ static_cast<int>(sz.height));
+ if (dist < distance) {
+ dist = distance;
+ bestSize = sz;
+ }
+ }
+
+ return bestSize;
+}
+
+int MaliC55CameraData::loadIPA()
+{
+ int ret;
+
+ /* Do not initialize IPA for TPG. */
+ if (!sensor_)
+ return 0;
+
+ ipa_ = IPAManager::createIPA<ipa::mali_c55::IPAProxyMaliC55>(pipe(), 1, 1);
+ if (!ipa_)
+ return -ENOENT;
+
+ ipa_->setSensorControls.connect(this, &MaliC55CameraData::setSensorControls);
+
+ std::string ipaTuningFile = ipa_->configurationFile(sensor_->model() + ".yaml",
+ "uncalibrated.yaml");
+
+ /* We need to inform the IPA of the sensor configuration */
+ ipa::mali_c55::IPAConfigInfo ipaConfig{};
+
+ ret = sensor_->sensorInfo(&ipaConfig.sensorInfo);
+ if (ret)
+ return ret;
+
+ ipaConfig.sensorControls = sensor_->controls();
+
+ ControlInfoMap ipaControls;
+ ret = ipa_->init({ ipaTuningFile, sensor_->model() }, ipaConfig,
+ &ipaControls);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to initialise the Mali-C55 IPA";
+ return ret;
+ }
+
+ updateControls(ipaControls);
+
+ return 0;
+}
+
+class MaliC55CameraConfiguration : public CameraConfiguration
+{
+public:
+ MaliC55CameraConfiguration(MaliC55CameraData *data)
+ : CameraConfiguration(), data_(data)
+ {
+ }
+
+ Status validate() override;
+ const Transform &combinedTransform() { return combinedTransform_; }
+
+ V4L2SubdeviceFormat sensorFormat_;
+
+private:
+ static constexpr unsigned int kMaxStreams = 2;
+
+ const MaliC55CameraData *data_;
+ Transform combinedTransform_;
+};
+
+CameraConfiguration::Status MaliC55CameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty())
+ return Invalid;
+
+ /*
+ * The TPG doesn't support flips, so we only need to calculate a
+ * transform if we have a sensor.
+ */
+ if (data_->sensor_) {
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->sensor_->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
+ status = Adjusted;
+ } else {
+ combinedTransform_ = Transform::Rot0;
+ }
+
+ /* Only 2 streams available. */
+ if (config_.size() > kMaxStreams) {
+ config_.resize(kMaxStreams);
+ status = Adjusted;
+ }
+
+ bool frPipeAvailable = true;
+ StreamConfiguration *rawConfig = nullptr;
+ for (StreamConfiguration &config : config_) {
+ if (!isFormatRaw(config.pixelFormat))
+ continue;
+
+ if (rawConfig) {
+ LOG(MaliC55, Error)
+ << "Only a single RAW stream is supported";
+ return Invalid;
+ }
+
+ rawConfig = &config;
+ }
+
+ /*
+ * The C55 can not upscale. Limit the configuration to the ISP
+ * capabilities and the sensor resolution.
+ */
+ Size maxSize = kMaliC55MaxSize.boundedTo(data_->resolution());
+ if (rawConfig) {
+ /*
+ * \todo Take into account the Bayer components ordering once
+ * we support rotations.
+ */
+ PixelFormat rawFormat =
+ data_->adjustRawFormat(rawConfig->pixelFormat);
+
+ if (!rawFormat.isValid())
+ return Invalid;
+
+ if (rawFormat != rawConfig->pixelFormat) {
+ LOG(MaliC55, Debug)
+ << "RAW format adjusted to " << rawFormat;
+ rawConfig->pixelFormat = rawFormat;
+ status = Adjusted;
+ }
+
+ Size rawSize =
+ data_->adjustRawSizes(rawFormat, rawConfig->size);
+ if (rawSize != rawConfig->size) {
+ LOG(MaliC55, Debug)
+ << "RAW sizes adjusted to " << rawSize;
+ rawConfig->size = rawSize;
+ status = Adjusted;
+ }
+
+ maxSize = rawSize;
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(rawConfig->pixelFormat);
+ rawConfig->stride = info.stride(rawConfig->size.width, 0, 4);
+ rawConfig->frameSize = info.frameSize(rawConfig->size, 4);
+
+ rawConfig->setStream(const_cast<Stream *>(&data_->frStream_));
+ frPipeAvailable = false;
+ }
+
+ /*
+ * Adjust processed streams.
+ *
+ * Compute the minimum sensor size to be later used to select the
+ * sensor configuration.
+ */
+ Size minSensorSize = kMaliC55MinInputSize;
+ for (StreamConfiguration &config : config_) {
+ if (isFormatRaw(config.pixelFormat))
+ continue;
+
+ /* Adjust format and size for processed streams. */
+ const auto it = maliC55FmtToCode.find(config.pixelFormat);
+ if (it == maliC55FmtToCode.end()) {
+ LOG(MaliC55, Debug)
+ << "Format adjusted to " << formats::RGB565;
+ config.pixelFormat = formats::RGB565;
+ status = Adjusted;
+ }
+
+ Size size = std::clamp(config.size, kMaliC55MinSize, maxSize);
+ if (size != config.size) {
+ LOG(MaliC55, Debug)
+ << "Size adjusted to " << size;
+ config.size = size;
+ status = Adjusted;
+ }
+
+ if (minSensorSize < size)
+ minSensorSize = size;
+
+ if (frPipeAvailable) {
+ config.setStream(const_cast<Stream *>(&data_->frStream_));
+ frPipeAvailable = false;
+ } else {
+ config.setStream(const_cast<Stream *>(&data_->dsStream_));
+ }
+ }
+
+ /* Compute the sensor format. */
+
+ /* If there's a RAW config, sensor configuration follows it. */
+ if (rawConfig) {
+ sensorFormat_.code = data_->pixfmtToMbusCode(rawConfig->pixelFormat);
+ sensorFormat_.size = rawConfig->size.expandedTo(minSensorSize);
+
+ return status;
+ }
+
+ /* If there's no RAW config, compute the sensor configuration here. */
+ PixelFormat rawFormat = data_->bestRawFormat();
+ if (!rawFormat.isValid())
+ return Invalid;
+
+ sensorFormat_.code = data_->pixfmtToMbusCode(rawFormat);
+
+ uint16_t distance = std::numeric_limits<uint16_t>::max();
+ const auto sizes = data_->sizes(sensorFormat_.code);
+ Size bestSize;
+ for (const auto &size : sizes) {
+ if (minSensorSize.width > size.width ||
+ minSensorSize.height > size.height)
+ continue;
+
+ uint16_t dist = std::abs(static_cast<int>(minSensorSize.width) -
+ static_cast<int>(size.width)) +
+ std::abs(static_cast<int>(minSensorSize.height) -
+ static_cast<int>(size.height));
+ if (dist < distance) {
+ dist = distance;
+ bestSize = size;
+ }
+ }
+ sensorFormat_.size = bestSize;
+
+ LOG(MaliC55, Debug) << "Computed sensor configuration " << sensorFormat_;
+
+ return status;
+}
+
+class PipelineHandlerMaliC55 : public PipelineHandler
+{
+public:
+ PipelineHandlerMaliC55(CameraManager *manager);
+
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+ int allocateBuffers(Camera *camera);
+ void freeBuffers(Camera *camera);
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+ void imageBufferReady(FrameBuffer *buffer);
+ void paramsBufferReady(FrameBuffer *buffer);
+ void statsBufferReady(FrameBuffer *buffer);
+ void paramsComputed(unsigned int requestId);
+ void statsProcessed(unsigned int requestId, const ControlList &metadata);
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ struct MaliC55Pipe {
+ std::unique_ptr<V4L2Subdevice> resizer;
+ std::unique_ptr<V4L2VideoDevice> cap;
+ MediaLink *link;
+ Stream *stream;
+ };
+
+ enum {
+ MaliC55FR,
+ MaliC55DS,
+ MaliC55NumPipes,
+ };
+
+ MaliC55CameraData *cameraData(Camera *camera)
+ {
+ return static_cast<MaliC55CameraData *>(camera->_d());
+ }
+
+ MaliC55Pipe *pipeFromStream(MaliC55CameraData *data, Stream *stream)
+ {
+ if (stream == &data->frStream_)
+ return &pipes_[MaliC55FR];
+ else if (stream == &data->dsStream_)
+ return &pipes_[MaliC55DS];
+ else
+ LOG(MaliC55, Fatal) << "Stream " << stream << " not valid";
+ return nullptr;
+ }
+
+ MaliC55Pipe *pipeFromStream(MaliC55CameraData *data, const Stream *stream)
+ {
+ return pipeFromStream(data, const_cast<Stream *>(stream));
+ }
+
+ void resetPipes()
+ {
+ for (MaliC55Pipe &pipe : pipes_)
+ pipe.stream = nullptr;
+ }
+
+ MaliC55FrameInfo *findFrameInfo(FrameBuffer *buffer);
+ MaliC55FrameInfo *findFrameInfo(Request *request);
+ void tryComplete(MaliC55FrameInfo *info);
+
+ int configureRawStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat);
+ int configureProcessedStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat);
+
+ void applyScalerCrop(Camera *camera, const ControlList &controls);
+
+ bool registerMaliCamera(std::unique_ptr<MaliC55CameraData> data,
+ const std::string &name);
+ bool registerTPGCamera(MediaLink *link);
+ bool registerSensorCamera(MediaLink *link);
+
+ MediaDevice *media_;
+ std::unique_ptr<V4L2Subdevice> isp_;
+ std::unique_ptr<V4L2VideoDevice> stats_;
+ std::unique_ptr<V4L2VideoDevice> params_;
+
+ std::vector<std::unique_ptr<FrameBuffer>> statsBuffers_;
+ std::queue<FrameBuffer *> availableStatsBuffers_;
+
+ std::vector<std::unique_ptr<FrameBuffer>> paramsBuffers_;
+ std::queue<FrameBuffer *> availableParamsBuffers_;
+
+ std::map<unsigned int, MaliC55FrameInfo> frameInfoMap_;
+
+ std::array<MaliC55Pipe, MaliC55NumPipes> pipes_;
+
+ bool dsFitted_;
+};
+
+PipelineHandlerMaliC55::PipelineHandlerMaliC55(CameraManager *manager)
+ : PipelineHandler(manager), dsFitted_(true)
+{
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerMaliC55::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
+{
+ MaliC55CameraData *data = cameraData(camera);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<MaliC55CameraConfiguration>(data);
+ bool frPipeAvailable = true;
+
+ if (roles.empty())
+ return config;
+
+ /* Check if one stream is RAW to reserve the FR pipe for it. */
+ if (std::find(roles.begin(), roles.end(), StreamRole::Raw) != roles.end())
+ frPipeAvailable = false;
+
+ for (const StreamRole &role : roles) {
+ struct MaliC55Pipe *pipe;
+
+ /* Assign pipe for this role. */
+ if (role == StreamRole::Raw) {
+ pipe = &pipes_[MaliC55FR];
+ } else {
+ if (frPipeAvailable) {
+ pipe = &pipes_[MaliC55FR];
+ frPipeAvailable = false;
+ } else {
+ pipe = &pipes_[MaliC55DS];
+ }
+ }
+
+ Size size = std::min(Size{ 1920, 1080 }, data->resolution());
+ PixelFormat pixelFormat;
+
+ switch (role) {
+ case StreamRole::StillCapture:
+ size = data->resolution();
+ [[fallthrough]];
+ case StreamRole::VideoRecording:
+ pixelFormat = formats::NV12;
+ break;
+
+ case StreamRole::Viewfinder:
+ pixelFormat = formats::RGB565;
+ break;
+
+ case StreamRole::Raw:
+ pixelFormat = data->bestRawFormat();
+ if (!pixelFormat.isValid()) {
+ LOG(MaliC55, Error)
+ << "Camera does not support RAW formats";
+ return nullptr;
+ }
+
+ size = data->resolution();
+ break;
+
+ default:
+ LOG(MaliC55, Error)
+ << "Requested stream role not supported: " << role;
+ return nullptr;
+ }
+
+ std::map<PixelFormat, std::vector<SizeRange>> formats;
+ for (const auto &maliFormat : maliC55FmtToCode) {
+ PixelFormat pixFmt = maliFormat.first;
+ bool isRaw = isFormatRaw(pixFmt);
+
+ /* RAW formats are only supported on the FR pipe. */
+ if (pipe != &pipes_[MaliC55FR] && isRaw)
+ continue;
+
+ if (isRaw) {
+ /* Make sure the mbus code is supported. */
+ int rawCode = data->pixfmtToMbusCode(pixFmt);
+ if (rawCode < 0)
+ continue;
+
+ const auto sizes = data->sizes(rawCode);
+ if (sizes.empty())
+ continue;
+
+ /* And list all sizes the sensor can produce. */
+ std::vector<SizeRange> sizeRanges;
+ std::transform(sizes.begin(), sizes.end(),
+ std::back_inserter(sizeRanges),
+ [](const Size &s) {
+ return SizeRange(s);
+ });
+
+ formats[pixFmt] = sizeRanges;
+ } else {
+ /* Processed formats are always available. */
+ Size maxSize = std::min(kMaliC55MaxSize,
+ data->resolution());
+ formats[pixFmt] = { kMaliC55MinSize, maxSize };
+ }
+ }
+
+ StreamFormats streamFormats(formats);
+ StreamConfiguration cfg(streamFormats);
+ cfg.pixelFormat = pixelFormat;
+ cfg.bufferCount = 4;
+ cfg.size = size;
+
+ config->addConfiguration(cfg);
+ }
+
+ if (config->validate() == CameraConfiguration::Invalid)
+ return nullptr;
+
+ return config;
+}
+
+int PipelineHandlerMaliC55::configureRawStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat)
+{
+ Stream *stream = config.stream();
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ if (pipe != &pipes_[MaliC55FR]) {
+ LOG(MaliC55, Fatal) << "Only the FR pipe supports RAW capture.";
+ return -EINVAL;
+ }
+
+ /* Enable the debayer route to set fixed internal format on pad #0. */
+ V4L2Subdevice::Routing routing = {};
+ routing.emplace_back(V4L2Subdevice::Stream{ 0, 0 },
+ V4L2Subdevice::Stream{ 1, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+
+ int ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ unsigned int rawCode = subdevFormat.code;
+ subdevFormat.code = kMaliC55ISPInternalFormat;
+ ret = pipe->resizer->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ /* Enable the bypass route and apply RAW formats there. */
+ routing.clear();
+ routing.emplace_back(V4L2Subdevice::Stream{ 2, 0 },
+ V4L2Subdevice::Stream{ 1, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+ ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ subdevFormat.code = rawCode;
+ ret = pipe->resizer->setFormat(2, &subdevFormat);
+ if (ret)
+ return ret;
+
+ ret = pipe->resizer->setFormat(1, &subdevFormat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int PipelineHandlerMaliC55::configureProcessedStream(MaliC55CameraData *data,
+ const StreamConfiguration &config,
+ V4L2SubdeviceFormat &subdevFormat)
+{
+ Stream *stream = config.stream();
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ /* Enable the debayer route on the resizer pipe. */
+ V4L2Subdevice::Routing routing = {};
+ routing.emplace_back(V4L2Subdevice::Stream{ 0, 0 },
+ V4L2Subdevice::Stream{ 1, 0 },
+ V4L2_SUBDEV_ROUTE_FL_ACTIVE);
+
+ int ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ subdevFormat.code = kMaliC55ISPInternalFormat;
+ ret = pipe->resizer->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ /*
+ * Compute the scaler-in to scaler-out ratio: first center-crop to align
+ * the FOV to the desired resolution, then scale to the desired size.
+ */
+ Size scalerIn = subdevFormat.size.boundedToAspectRatio(config.size);
+ int xCrop = (subdevFormat.size.width - scalerIn.width) / 2;
+ int yCrop = (subdevFormat.size.height - scalerIn.height) / 2;
+ Rectangle ispCrop = { xCrop, yCrop, scalerIn };
+ ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_CROP, &ispCrop);
+ if (ret)
+ return ret;
+
+ Rectangle ispCompose = { 0, 0, config.size };
+ ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_COMPOSE, &ispCompose);
+ if (ret)
+ return ret;
+
+ /*
+ * The source pad format size comes directly from the sink
+ * compose rectangle.
+ */
+ subdevFormat.size = ispCompose.size();
+ subdevFormat.code = maliC55FmtToCode.find(config.pixelFormat)->second;
+ return pipe->resizer->setFormat(1, &subdevFormat);
+}
+
+int PipelineHandlerMaliC55::configure(Camera *camera,
+ CameraConfiguration *config)
+{
+ resetPipes();
+
+ int ret = media_->disableLinks();
+ if (ret)
+ return ret;
+
+ /* Link the graph depending if we are operating the TPG or a sensor. */
+ MaliC55CameraData *data = cameraData(camera);
+ if (data->csi_) {
+ const MediaEntity *csiEntity = data->csi_->entity();
+ ret = csiEntity->getPadByIndex(1)->links()[0]->setEnabled(true);
+ } else {
+ ret = data->entity_->getPadByIndex(0)->links()[0]->setEnabled(true);
+ }
+ if (ret)
+ return ret;
+
+ MaliC55CameraConfiguration *maliConfig =
+ static_cast<MaliC55CameraConfiguration *>(config);
+ V4L2SubdeviceFormat subdevFormat = maliConfig->sensorFormat_;
+ ret = data->sd_->getFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ if (data->sensor_) {
+ ret = data->sensor_->setFormat(&subdevFormat,
+ maliConfig->combinedTransform());
+ if (ret)
+ return ret;
+ }
+
+ if (data->csi_) {
+ ret = data->csi_->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ ret = data->csi_->getFormat(1, &subdevFormat);
+ if (ret)
+ return ret;
+ }
+
+ V4L2DeviceFormat statsFormat;
+ ret = stats_->getFormat(&statsFormat);
+ if (ret)
+ return ret;
+
+ if (statsFormat.planes[0].size != sizeof(struct mali_c55_stats_buffer)) {
+ LOG(MaliC55, Error) << "3a stats buffer size invalid";
+ return -EINVAL;
+ }
+
+ /*
+ * Propagate the format to the ISP sink pad and configure the input
+ * crop rectangle (no crop at the moment).
+ *
+ * \todo Configure the CSI-2 receiver.
+ */
+ ret = isp_->setFormat(0, &subdevFormat);
+ if (ret)
+ return ret;
+
+ Rectangle ispCrop(0, 0, subdevFormat.size);
+ ret = isp_->setSelection(0, V4L2_SEL_TGT_CROP, &ispCrop);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure the resizer: fixed format the sink pad; use the media
+ * bus code associated with the desired capture format on the source
+ * pad.
+ *
+ * Configure the crop and compose rectangles to match the desired
+ * stream output size
+ *
+ * \todo Make the crop/scaler configurable
+ */
+ for (const StreamConfiguration &streamConfig : *config) {
+ Stream *stream = streamConfig.stream();
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ /*
+ * Enable the media link between the pipe's resizer and the
+ * capture video device
+ */
+
+ ret = pipe->link->setEnabled(true);
+ if (ret) {
+ LOG(MaliC55, Error) << "Couldn't enable resizer's link";
+ return ret;
+ }
+
+ if (isFormatRaw(streamConfig.pixelFormat))
+ ret = configureRawStream(data, streamConfig, subdevFormat);
+ else
+ ret = configureProcessedStream(data, streamConfig, subdevFormat);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to configure pipeline";
+ return ret;
+ }
+
+ /* Now apply the pixel format and size to the capture device. */
+ V4L2DeviceFormat captureFormat;
+ captureFormat.fourcc = pipe->cap->toV4L2PixelFormat(streamConfig.pixelFormat);
+ captureFormat.size = streamConfig.size;
+
+ ret = pipe->cap->setFormat(&captureFormat);
+ if (ret)
+ return ret;
+
+ pipe->stream = stream;
+ }
+
+ if (!data->ipa_)
+ return 0;
+
+ /*
+ * Enable the media link between the ISP subdevice and the statistics
+ * video device.
+ */
+ const MediaEntity *ispEntity = isp_->entity();
+ ret = ispEntity->getPadByIndex(3)->links()[0]->setEnabled(true);
+ if (ret) {
+ LOG(MaliC55, Error) << "Couldn't enable statistics link";
+ return ret;
+ }
+
+ /*
+ * Enable the media link between the ISP subdevice and the parameters
+ * video device.
+ */
+ ret = ispEntity->getPadByIndex(4)->links()[0]->setEnabled(true);
+ if (ret) {
+ LOG(MaliC55, Error) << "Couldn't enable parameters link";
+ return ret;
+ }
+
+ /* We need to inform the IPA of the sensor configuration */
+ ipa::mali_c55::IPAConfigInfo ipaConfig{};
+
+ ret = data->sensor_->sensorInfo(&ipaConfig.sensorInfo);
+ if (ret)
+ return ret;
+
+ ipaConfig.sensorControls = data->sensor_->controls();
+
+ /*
+ * And we also need to tell the IPA the bayerOrder of the data (as
+ * affected by any flips that we've configured)
+ */
+ const Transform &combinedTransform = maliConfig->combinedTransform();
+ BayerFormat::Order bayerOrder = data->sensor_->bayerOrder(combinedTransform);
+
+ ControlInfoMap ipaControls;
+ ret = data->ipa_->configure(ipaConfig, utils::to_underlying(bayerOrder),
+ &ipaControls);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to configure IPA";
+ return ret;
+ }
+
+ data->updateControls(ipaControls);
+
+ return 0;
+}
+
+int PipelineHandlerMaliC55::exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ MaliC55Pipe *pipe = pipeFromStream(cameraData(camera), stream);
+ unsigned int count = stream->configuration().bufferCount;
+
+ return pipe->cap->exportBuffers(count, buffers);
+}
+
+void PipelineHandlerMaliC55::freeBuffers(Camera *camera)
+{
+ MaliC55CameraData *data = cameraData(camera);
+
+ while (!availableStatsBuffers_.empty())
+ availableStatsBuffers_.pop();
+ while (!availableParamsBuffers_.empty())
+ availableParamsBuffers_.pop();
+
+ statsBuffers_.clear();
+ paramsBuffers_.clear();
+
+ if (data->ipa_) {
+ data->ipa_->unmapBuffers(data->ipaStatBuffers_);
+ data->ipa_->unmapBuffers(data->ipaParamBuffers_);
+ }
+ data->ipaStatBuffers_.clear();
+ data->ipaParamBuffers_.clear();
+
+ if (stats_->releaseBuffers())
+ LOG(MaliC55, Error) << "Failed to release stats buffers";
+
+ if (params_->releaseBuffers())
+ LOG(MaliC55, Error) << "Failed to release params buffers";
+
+ return;
+}
+
+int PipelineHandlerMaliC55::allocateBuffers(Camera *camera)
+{
+ MaliC55CameraData *data = cameraData(camera);
+ unsigned int ipaBufferId = 1;
+ unsigned int bufferCount;
+ int ret;
+
+ bufferCount = std::max({
+ data->frStream_.configuration().bufferCount,
+ data->dsStream_.configuration().bufferCount,
+ });
+
+ ret = stats_->allocateBuffers(bufferCount, &statsBuffers_);
+ if (ret < 0)
+ return ret;
+
+ for (std::unique_ptr<FrameBuffer> &buffer : statsBuffers_) {
+ buffer->setCookie(ipaBufferId++);
+ data->ipaStatBuffers_.emplace_back(buffer->cookie(),
+ buffer->planes());
+ availableStatsBuffers_.push(buffer.get());
+ }
+
+ ret = params_->allocateBuffers(bufferCount, &paramsBuffers_);
+ if (ret < 0)
+ return ret;
+
+ for (std::unique_ptr<FrameBuffer> &buffer : paramsBuffers_) {
+ buffer->setCookie(ipaBufferId++);
+ data->ipaParamBuffers_.emplace_back(buffer->cookie(),
+ buffer->planes());
+ availableParamsBuffers_.push(buffer.get());
+ }
+
+ if (data->ipa_) {
+ data->ipa_->mapBuffers(data->ipaStatBuffers_, true);
+ data->ipa_->mapBuffers(data->ipaParamBuffers_, false);
+ }
+
+ return 0;
+}
+
+int PipelineHandlerMaliC55::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
+{
+ MaliC55CameraData *data = cameraData(camera);
+ int ret;
+
+ ret = allocateBuffers(camera);
+ if (ret)
+ return ret;
+
+ if (data->ipa_) {
+ ret = data->ipa_->start();
+ if (ret) {
+ LOG(MaliC55, Error)
+ << "Failed to start IPA" << camera->id();
+ freeBuffers(camera);
+ return ret;
+ }
+ }
+
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ Stream *stream = pipe.stream;
+
+ ret = pipe.cap->importBuffers(stream->configuration().bufferCount);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to import buffers";
+ if (data->ipa_)
+ data->ipa_->stop();
+ freeBuffers(camera);
+ return ret;
+ }
+
+ ret = pipe.cap->streamOn();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to start stream";
+ if (data->ipa_)
+ data->ipa_->stop();
+ freeBuffers(camera);
+ return ret;
+ }
+ }
+
+ ret = stats_->streamOn();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to start stats stream";
+
+ if (data->ipa_)
+ data->ipa_->stop();
+
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (pipe.stream)
+ pipe.cap->streamOff();
+ }
+
+ freeBuffers(camera);
+ return ret;
+ }
+
+ ret = params_->streamOn();
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to start params stream";
+
+ stats_->streamOff();
+ if (data->ipa_)
+ data->ipa_->stop();
+
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (pipe.stream)
+ pipe.cap->streamOff();
+ }
+
+ freeBuffers(camera);
+ return ret;
+ }
+
+ ret = isp_->setFrameStartEnabled(true);
+ if (ret)
+ LOG(MaliC55, Error) << "Failed to enable frame start events";
+
+ return 0;
+}
+
+void PipelineHandlerMaliC55::stopDevice(Camera *camera)
+{
+ MaliC55CameraData *data = cameraData(camera);
+
+ isp_->setFrameStartEnabled(false);
+
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ pipe.cap->streamOff();
+ pipe.cap->releaseBuffers();
+ }
+
+ stats_->streamOff();
+ params_->streamOff();
+ if (data->ipa_)
+ data->ipa_->stop();
+ freeBuffers(camera);
+}
+
+void PipelineHandlerMaliC55::applyScalerCrop(Camera *camera,
+ const ControlList &controls)
+{
+ MaliC55CameraData *data = cameraData(camera);
+
+ const auto &scalerCrop = controls.get<Rectangle>(controls::ScalerCrop);
+ if (!scalerCrop)
+ return;
+
+ if (!data->sensor_) {
+ LOG(MaliC55, Error) << "ScalerCrop not supported for TPG";
+ return;
+ }
+
+ Rectangle nativeCrop = *scalerCrop;
+
+ IPACameraSensorInfo sensorInfo;
+ int ret = data->sensor_->sensorInfo(&sensorInfo);
+ if (ret) {
+ LOG(MaliC55, Error) << "Failed to retrieve sensor info";
+ return;
+ }
+
+ /*
+ * The ScalerCrop rectangle re-scaling in the ISP crop rectangle
+ * comes straight from the RPi pipeline handler.
+ *
+ * Create a version of the crop rectangle aligned to the analogue crop
+ * rectangle top-left coordinates and scaled in the [analogue crop to
+ * output frame] ratio to take into account binning/skipping on the
+ * sensor.
+ */
+ Rectangle ispCrop = nativeCrop.translatedBy(-sensorInfo.analogCrop
+ .topLeft());
+ ispCrop.scaleBy(sensorInfo.outputSize, sensorInfo.analogCrop.size());
+
+ /*
+ * The crop rectangle should be:
+ * 1. At least as big as ispMinCropSize_, once that's been
+ * enlarged to the same aspect ratio.
+ * 2. With the same mid-point, if possible.
+ * 3. But it can't go outside the sensor area.
+ */
+ Rectangle ispMinCrop{ 0, 0, 640, 480 };
+ Size minSize = ispMinCrop.size().expandedToAspectRatio(nativeCrop.size());
+ Size size = ispCrop.size().expandedTo(minSize);
+ ispCrop = size.centeredTo(ispCrop.center())
+ .enclosedIn(Rectangle(sensorInfo.outputSize));
+
+ /*
+ * As the resizer can't upscale, the crop rectangle has to be larger
+ * than the larger stream output size.
+ */
+ Size maxYuvSize;
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ const StreamConfiguration &config = pipe.stream->configuration();
+ if (isFormatRaw(config.pixelFormat)) {
+ LOG(MaliC55, Debug) << "Cannot crop with a RAW stream";
+ return;
+ }
+
+ Size streamSize = config.size;
+ if (streamSize.width > maxYuvSize.width)
+ maxYuvSize.width = streamSize.width;
+ if (streamSize.height > maxYuvSize.height)
+ maxYuvSize.height = streamSize.height;
+ }
+
+ ispCrop.size().expandTo(maxYuvSize);
+
+ /*
+ * Now apply the scaler crop to each enabled output. This overrides the
+ * crop configuration performed at configure() time and can cause
+ * square pixels if the crop rectangle and scaler output FOV ratio are
+ * different.
+ */
+ for (MaliC55Pipe &pipe : pipes_) {
+ if (!pipe.stream)
+ continue;
+
+ /* Create a copy to avoid setSelection() to modify ispCrop. */
+ Rectangle pipeCrop = ispCrop;
+ ret = pipe.resizer->setSelection(0, V4L2_SEL_TGT_CROP, &pipeCrop);
+ if (ret) {
+ LOG(MaliC55, Error)
+ << "Failed to apply crop to "
+ << (pipe.stream == &data->frStream_ ?
+ "FR" : "DS") << " pipe";
+ return;
+ }
+ }
+}
+
+int PipelineHandlerMaliC55::queueRequestDevice(Camera *camera, Request *request)
+{
+ MaliC55CameraData *data = cameraData(camera);
+
+ /* Do not run the IPA if the TPG is in use. */
+ if (!data->ipa_) {
+ MaliC55FrameInfo frameInfo;
+ frameInfo.request = request;
+ frameInfo.statBuffer = nullptr;
+ frameInfo.paramBuffer = nullptr;
+ frameInfo.paramsDone = true;
+ frameInfo.statsDone = true;
+
+ frameInfoMap_[request->sequence()] = frameInfo;
+
+ for (auto &[stream, buffer] : request->buffers()) {
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ pipe->cap->queueBuffer(buffer);
+ }
+
+ return 0;
+ }
+
+ if (availableStatsBuffers_.empty()) {
+ LOG(MaliC55, Error) << "Stats buffer underrun";
+ return -ENOENT;
+ }
+
+ if (availableParamsBuffers_.empty()) {
+ LOG(MaliC55, Error) << "Params buffer underrun";
+ return -ENOENT;
+ }
+
+ MaliC55FrameInfo frameInfo;
+ frameInfo.request = request;
+
+ frameInfo.statBuffer = availableStatsBuffers_.front();
+ availableStatsBuffers_.pop();
+ frameInfo.paramBuffer = availableParamsBuffers_.front();
+ availableParamsBuffers_.pop();
+
+ frameInfo.paramsDone = false;
+ frameInfo.statsDone = false;
+
+ frameInfoMap_[request->sequence()] = frameInfo;
+
+ data->ipa_->queueRequest(request->sequence(), request->controls());
+ data->ipa_->fillParams(request->sequence(),
+ frameInfo.paramBuffer->cookie());
+
+ return 0;
+}
+
+MaliC55FrameInfo *PipelineHandlerMaliC55::findFrameInfo(Request *request)
+{
+ for (auto &[sequence, info] : frameInfoMap_) {
+ if (info.request == request)
+ return &info;
+ }
+
+ return nullptr;
+}
+
+MaliC55FrameInfo *PipelineHandlerMaliC55::findFrameInfo(FrameBuffer *buffer)
+{
+ for (auto &[sequence, info] : frameInfoMap_) {
+ if (info.paramBuffer == buffer ||
+ info.statBuffer == buffer)
+ return &info;
+ }
+
+ return nullptr;
+}
+
+void PipelineHandlerMaliC55::tryComplete(MaliC55FrameInfo *info)
+{
+ if (!info->paramsDone)
+ return;
+ if (!info->statsDone)
+ return;
+
+ Request *request = info->request;
+ if (request->hasPendingBuffers())
+ return;
+
+ if (info->statBuffer)
+ availableStatsBuffers_.push(info->statBuffer);
+ if (info->paramBuffer)
+ availableParamsBuffers_.push(info->paramBuffer);
+
+ frameInfoMap_.erase(request->sequence());
+
+ completeRequest(request);
+}
+
+void PipelineHandlerMaliC55::imageBufferReady(FrameBuffer *buffer)
+{
+ Request *request = buffer->request();
+ MaliC55FrameInfo *info = findFrameInfo(request);
+ ASSERT(info);
+
+ if (completeBuffer(request, buffer))
+ tryComplete(info);
+}
+
+void PipelineHandlerMaliC55::paramsBufferReady(FrameBuffer *buffer)
+{
+ MaliC55FrameInfo *info = findFrameInfo(buffer);
+ ASSERT(info);
+
+ info->paramsDone = true;
+
+ tryComplete(info);
+}
+
+void PipelineHandlerMaliC55::statsBufferReady(FrameBuffer *buffer)
+{
+ MaliC55FrameInfo *info = findFrameInfo(buffer);
+ ASSERT(info);
+
+ Request *request = info->request;
+ MaliC55CameraData *data = cameraData(request->_d()->camera());
+
+ ControlList sensorControls = data->delayedCtrls_->get(buffer->metadata().sequence);
+
+ data->ipa_->processStats(request->sequence(), buffer->cookie(),
+ sensorControls);
+}
+
+void PipelineHandlerMaliC55::paramsComputed(unsigned int requestId)
+{
+ MaliC55FrameInfo &frameInfo = frameInfoMap_[requestId];
+ Request *request = frameInfo.request;
+ MaliC55CameraData *data = cameraData(request->_d()->camera());
+
+ /*
+ * Queue buffers for stats and params, then queue buffers to the capture
+ * video devices.
+ */
+
+ frameInfo.paramBuffer->_d()->metadata().planes()[0].bytesused =
+ sizeof(struct mali_c55_params_buffer);
+ params_->queueBuffer(frameInfo.paramBuffer);
+ stats_->queueBuffer(frameInfo.statBuffer);
+
+ for (auto &[stream, buffer] : request->buffers()) {
+ MaliC55Pipe *pipe = pipeFromStream(data, stream);
+
+ pipe->cap->queueBuffer(buffer);
+ }
+}
+
+void PipelineHandlerMaliC55::statsProcessed(unsigned int requestId,
+ const ControlList &metadata)
+{
+ MaliC55FrameInfo &frameInfo = frameInfoMap_[requestId];
+
+ frameInfo.statsDone = true;
+ frameInfo.request->metadata().merge(metadata);
+
+ tryComplete(&frameInfo);
+}
+
+bool PipelineHandlerMaliC55::registerMaliCamera(std::unique_ptr<MaliC55CameraData> data,
+ const std::string &name)
+{
+ if (data->loadIPA())
+ return false;
+
+ if (data->ipa_) {
+ data->ipa_->statsProcessed.connect(this, &PipelineHandlerMaliC55::statsProcessed);
+ data->ipa_->paramsComputed.connect(this, &PipelineHandlerMaliC55::paramsComputed);
+ }
+
+ std::set<Stream *> streams{ &data->frStream_ };
+ if (dsFitted_)
+ streams.insert(&data->dsStream_);
+
+ std::shared_ptr<Camera> camera = Camera::create(std::move(data),
+ name, streams);
+ registerCamera(std::move(camera));
+
+ return true;
+}
+
+/*
+ * The only camera we support through direct connection to the ISP is the
+ * Mali-C55 TPG. Check we have that and warn if not.
+ */
+bool PipelineHandlerMaliC55::registerTPGCamera(MediaLink *link)
+{
+ const std::string &name = link->source()->entity()->name();
+ if (name != "mali-c55 tpg") {
+ LOG(MaliC55, Warning) << "Unsupported direct connection to "
+ << link->source()->entity()->name();
+ /*
+ * Return true and just skip registering a camera for this
+ * entity.
+ */
+ return true;
+ }
+
+ std::unique_ptr<MaliC55CameraData> data =
+ std::make_unique<MaliC55CameraData>(this, link->source()->entity());
+
+ if (data->init())
+ return false;
+
+ return registerMaliCamera(std::move(data), name);
+}
+
+/*
+ * Register a Camera for each sensor connected to the ISP through a CSI-2
+ * receiver.
+ *
+ * \todo Support more complex topologies, such as video muxes.
+ */
+bool PipelineHandlerMaliC55::registerSensorCamera(MediaLink *ispLink)
+{
+ MediaEntity *csi2 = ispLink->source()->entity();
+ const MediaPad *csi2Sink = csi2->getPadByIndex(0);
+
+ for (MediaLink *link : csi2Sink->links()) {
+ MediaEntity *sensor = link->source()->entity();
+ unsigned int function = sensor->function();
+
+ if (function != MEDIA_ENT_F_CAM_SENSOR)
+ continue;
+
+ std::unique_ptr<MaliC55CameraData> data =
+ std::make_unique<MaliC55CameraData>(this, sensor);
+ if (data->init())
+ return false;
+
+ data->properties_ = data->sensor_->properties();
+
+ const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays();
+ std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
+ };
+
+ data->delayedCtrls_ =
+ std::make_unique<DelayedControls>(data->sensor_->device(),
+ params);
+ isp_->frameStart.connect(data->delayedCtrls_.get(),
+ &DelayedControls::applyControls);
+
+ /* \todo: Init properties. */
+
+ if (!registerMaliCamera(std::move(data), sensor->name()))
+ return false;
+ }
+
+ return true;
+}
+
+bool PipelineHandlerMaliC55::match(DeviceEnumerator *enumerator)
+{
+ const MediaPad *ispSink;
+
+ /*
+ * We search for just the always-available elements of the media graph.
+ * The TPG and the downscale pipe are both optional blocks and may not
+ * be fitted.
+ */
+ DeviceMatch dm("mali-c55");
+ dm.add("mali-c55 isp");
+ dm.add("mali-c55 resizer fr");
+ dm.add("mali-c55 fr");
+ dm.add("mali-c55 3a stats");
+ dm.add("mali-c55 3a params");
+
+ media_ = acquireMediaDevice(enumerator, dm);
+ if (!media_)
+ return false;
+
+ isp_ = V4L2Subdevice::fromEntityName(media_, "mali-c55 isp");
+ if (isp_->open() < 0)
+ return false;
+
+ stats_ = V4L2VideoDevice::fromEntityName(media_, "mali-c55 3a stats");
+ if (stats_->open() < 0)
+ return false;
+
+ params_ = V4L2VideoDevice::fromEntityName(media_, "mali-c55 3a params");
+ if (params_->open() < 0)
+ return false;
+
+ MaliC55Pipe *frPipe = &pipes_[MaliC55FR];
+ frPipe->resizer = V4L2Subdevice::fromEntityName(media_, "mali-c55 resizer fr");
+ if (frPipe->resizer->open() < 0)
+ return false;
+
+ frPipe->cap = V4L2VideoDevice::fromEntityName(media_, "mali-c55 fr");
+ if (frPipe->cap->open() < 0)
+ return false;
+
+ frPipe->link = media_->link("mali-c55 resizer fr", 1, "mali-c55 fr", 0);
+ if (!frPipe->link) {
+ LOG(MaliC55, Error) << "No link between fr resizer and video node";
+ return false;
+ }
+
+ frPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::imageBufferReady);
+
+ dsFitted_ = !!media_->getEntityByName("mali-c55 ds");
+ if (dsFitted_) {
+ LOG(MaliC55, Debug) << "Downscaler pipe is fitted";
+
+ MaliC55Pipe *dsPipe = &pipes_[MaliC55DS];
+
+ dsPipe->resizer = V4L2Subdevice::fromEntityName(media_, "mali-c55 resizer ds");
+ if (dsPipe->resizer->open() < 0)
+ return false;
+
+ dsPipe->cap = V4L2VideoDevice::fromEntityName(media_, "mali-c55 ds");
+ if (dsPipe->cap->open() < 0)
+ return false;
+
+ dsPipe->link = media_->link("mali-c55 resizer ds", 1,
+ "mali-c55 ds", 0);
+ if (!dsPipe->link) {
+ LOG(MaliC55, Error) << "No link between ds resizer and video node";
+ return false;
+ }
+
+ dsPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::imageBufferReady);
+ }
+
+ stats_->bufferReady.connect(this, &PipelineHandlerMaliC55::statsBufferReady);
+ params_->bufferReady.connect(this, &PipelineHandlerMaliC55::paramsBufferReady);
+
+ ispSink = isp_->entity()->getPadByIndex(0);
+ if (!ispSink || ispSink->links().empty()) {
+ LOG(MaliC55, Error) << "ISP sink pad error";
+ return false;
+ }
+
+ /*
+ * We could have several links pointing to the ISP's sink pad, which
+ * will be from entities with one of the following functions:
+ *
+ * MEDIA_ENT_F_CAM_SENSOR - The test pattern generator
+ * MEDIA_ENT_F_VID_IF_BRIDGE - A CSI-2 receiver
+ * MEDIA_ENT_F_IO_V4L - An input device
+ *
+ * The last one will be unsupported for now. The TPG is relatively easy,
+ * we just register a Camera for it. If we have a CSI-2 receiver we need
+ * to check its sink pad and register Cameras for anything connected to
+ * it (probably...there are some complex situations in which that might
+ * not be true but let's pretend they don't exist until we come across
+ * them)
+ */
+ bool registered;
+ for (MediaLink *link : ispSink->links()) {
+ unsigned int function = link->source()->entity()->function();
+
+ switch (function) {
+ case MEDIA_ENT_F_CAM_SENSOR:
+ registered = registerTPGCamera(link);
+ if (!registered)
+ return registered;
+
+ break;
+ case MEDIA_ENT_F_VID_IF_BRIDGE:
+ registered = registerSensorCamera(link);
+ if (!registered)
+ return registered;
+
+ break;
+ case MEDIA_ENT_F_IO_V4L:
+ LOG(MaliC55, Warning) << "Memory input not yet supported";
+ break;
+ default:
+ LOG(MaliC55, Error) << "Unsupported entity function";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerMaliC55, "mali-c55")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/mali-c55/meson.build b/src/libcamera/pipeline/mali-c55/meson.build
new file mode 100644
index 00000000..eba8e5a3
--- /dev/null
+++ b/src/libcamera/pipeline/mali-c55/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'mali-c55.cpp'
+])
diff --git a/src/libcamera/pipeline/meson.build b/src/libcamera/pipeline/meson.build
index 190ca5a8..8a61991c 100644
--- a/src/libcamera/pipeline/meson.build
+++ b/src/libcamera/pipeline/meson.build
@@ -1,3 +1,20 @@
-foreach pipeline : get_option('pipelines')
+# SPDX-License-Identifier: CC0-1.0
+
+# Location of pipeline specific configuration files
+pipeline_data_dir = libcamera_datadir / 'pipeline'
+
+# Allow multi-level directory structuring for the pipeline handlers if needed.
+subdirs = []
+
+foreach pipeline : pipelines
+ pipeline = pipeline.split('/')[0]
+ if pipeline in subdirs
+ continue
+ endif
+
+ subdirs += pipeline
subdir(pipeline)
+
+ # Don't reuse the pipeline variable below, the subdirectory may have
+ # overwritten it.
endforeach
diff --git a/src/libcamera/pipeline/rkisp1/meson.build b/src/libcamera/pipeline/rkisp1/meson.build
index d04fb452..d21a6ef9 100644
--- a/src/libcamera/pipeline/rkisp1/meson.build
+++ b/src/libcamera/pipeline/rkisp1/meson.build
@@ -1,4 +1,6 @@
-libcamera_sources += files([
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
'rkisp1.cpp',
- 'timeline.cpp',
+ 'rkisp1_path.cpp',
])
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
index 2f909cef..35c793da 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
+++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
@@ -2,47 +2,58 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * rkisp1.cpp - Pipeline handler for Rockchip ISP1
+ * Pipeline handler for Rockchip ISP1
*/
#include <algorithm>
-#include <array>
-#include <iomanip>
+#include <map>
#include <memory>
+#include <numeric>
+#include <optional>
#include <queue>
+#include <vector>
#include <linux/media-bus-format.h>
+#include <linux/rkisp1-config.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
-#include <ipa/rkisp1.h>
-#include <libcamera/buffer.h>
#include <libcamera/camera.h>
+#include <libcamera/color_space.h>
#include <libcamera/control_ids.h>
+#include <libcamera/formats.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/property_ids.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
-
-#include "camera_sensor.h"
-#include "device_enumerator.h"
-#include "ipa_manager.h"
-#include "log.h"
-#include "media_device.h"
-#include "pipeline_handler.h"
-#include "timeline.h"
-#include "utils.h"
-#include "v4l2_subdevice.h"
-#include "v4l2_videodevice.h"
+#include <libcamera/transform.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+#include <libcamera/ipa/rkisp1_ipa_interface.h>
+#include <libcamera/ipa/rkisp1_ipa_proxy.h>
+
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/converter/converter_v4l2_m2m.h"
+#include "libcamera/internal/delayed_controls.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+#include "rkisp1_path.h"
namespace libcamera {
LOG_DEFINE_CATEGORY(RkISP1)
class PipelineHandlerRkISP1;
-class RkISP1ActionQueueBuffers;
-
-enum RkISP1ActionType {
- SetSensor,
- SOE,
- QueueBuffers,
-};
+class RkISP1CameraData;
struct RkISP1FrameInfo {
unsigned int frame;
@@ -50,9 +61,9 @@ struct RkISP1FrameInfo {
FrameBuffer *paramBuffer;
FrameBuffer *statBuffer;
- FrameBuffer *videoBuffer;
+ FrameBuffer *mainPathBuffer;
+ FrameBuffer *selfPathBuffer;
- bool paramFilled;
bool paramDequeued;
bool metadataProcessed;
};
@@ -62,8 +73,10 @@ class RkISP1Frames
public:
RkISP1Frames(PipelineHandler *pipe);
- RkISP1FrameInfo *create(unsigned int frame, Request *request, Stream *stream);
+ RkISP1FrameInfo *create(const RkISP1CameraData *data, Request *request,
+ bool isRaw);
int destroy(unsigned int frame);
+ void clear();
RkISP1FrameInfo *find(unsigned int frame);
RkISP1FrameInfo *find(FrameBuffer *buffer);
@@ -74,67 +87,39 @@ private:
std::map<unsigned int, RkISP1FrameInfo *> frameInfo_;
};
-class RkISP1Timeline : public Timeline
-{
-public:
- RkISP1Timeline()
- : Timeline()
- {
- setDelay(SetSensor, -1, 5);
- setDelay(SOE, 0, -1);
- setDelay(QueueBuffers, -1, 10);
- }
-
- void bufferReady(FrameBuffer *buffer)
- {
- /*
- * Calculate SOE by taking the end of DMA set by the kernel and applying
- * the time offsets provideprovided by the IPA to find the best estimate
- * of SOE.
- */
-
- ASSERT(frameOffset(SOE) == 0);
-
- utils::time_point soe = std::chrono::time_point<utils::clock>()
- + std::chrono::nanoseconds(buffer->metadata().timestamp)
- + timeOffset(SOE);
-
- notifyStartOfExposure(buffer->metadata().sequence, soe);
- }
-
- void setDelay(unsigned int type, int frame, int msdelay)
- {
- utils::duration delay = std::chrono::milliseconds(msdelay);
- setRawDelay(type, frame, delay);
- }
-};
-
-class RkISP1CameraData : public CameraData
+class RkISP1CameraData : public Camera::Private
{
public:
- RkISP1CameraData(PipelineHandler *pipe)
- : CameraData(pipe), sensor_(nullptr), frame_(0),
- frameInfo_(pipe)
- {
- }
-
- ~RkISP1CameraData()
+ RkISP1CameraData(PipelineHandler *pipe, RkISP1MainPath *mainPath,
+ RkISP1SelfPath *selfPath)
+ : Camera::Private(pipe), frame_(0), frameInfo_(pipe),
+ mainPath_(mainPath), selfPath_(selfPath)
{
- delete sensor_;
}
- int loadIPA();
+ PipelineHandlerRkISP1 *pipe();
+ const PipelineHandlerRkISP1 *pipe() const;
+ int loadIPA(unsigned int hwRevision);
- Stream stream_;
- CameraSensor *sensor_;
+ Stream mainPathStream_;
+ Stream selfPathStream_;
+ std::unique_ptr<CameraSensor> sensor_;
+ std::unique_ptr<DelayedControls> delayedCtrls_;
unsigned int frame_;
std::vector<IPABuffer> ipaBuffers_;
RkISP1Frames frameInfo_;
- RkISP1Timeline timeline_;
+
+ RkISP1MainPath *mainPath_;
+ RkISP1SelfPath *selfPath_;
+
+ std::unique_ptr<ipa::rkisp1::IPAProxyRkISP1> ipa_;
+
+ ControlInfoMap ipaControls_;
private:
- void queueFrameAction(unsigned int frame,
- const IPAOperationData &action);
+ void paramsComputed(unsigned int frame, unsigned int bytesused);
+ void setSensorControls(unsigned int frame,
+ const ControlList &sensorControls);
void metadataReady(unsigned int frame, const ControlList &metadata);
};
@@ -147,9 +132,10 @@ public:
Status validate() override;
const V4L2SubdeviceFormat &sensorFormat() { return sensorFormat_; }
+ const Transform &combinedTransform() { return combinedTransform_; }
private:
- static constexpr unsigned int RKISP1_BUFFER_COUNT = 4;
+ bool fitsAllPaths(const StreamConfiguration &cfg);
/*
* The RkISP1CameraData instance is guaranteed to be valid as long as the
@@ -160,55 +146,76 @@ private:
const RkISP1CameraData *data_;
V4L2SubdeviceFormat sensorFormat_;
+ Transform combinedTransform_;
};
class PipelineHandlerRkISP1 : public PipelineHandler
{
public:
PipelineHandlerRkISP1(CameraManager *manager);
- ~PipelineHandlerRkISP1();
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
- int start(Camera *camera) override;
- void stop(Camera *camera) override;
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override;
bool match(DeviceEnumerator *enumerator) override;
private:
- RkISP1CameraData *cameraData(const Camera *camera)
+ static constexpr Size kRkISP1PreviewSize = { 1920, 1080 };
+
+ RkISP1CameraData *cameraData(Camera *camera)
{
- return static_cast<RkISP1CameraData *>(
- PipelineHandler::cameraData(camera));
+ return static_cast<RkISP1CameraData *>(camera->_d());
}
- friend RkISP1ActionQueueBuffers;
friend RkISP1CameraData;
+ friend RkISP1CameraConfiguration;
friend RkISP1Frames;
- int initLinks();
+ int initLinks(Camera *camera, const CameraSensor *sensor,
+ const RkISP1CameraConfiguration &config);
int createCamera(MediaEntity *sensor);
- void tryCompleteRequest(Request *request);
- void bufferReady(FrameBuffer *buffer);
- void paramReady(FrameBuffer *buffer);
- void statReady(FrameBuffer *buffer);
+ void tryCompleteRequest(RkISP1FrameInfo *info);
+ void imageBufferReady(FrameBuffer *buffer);
+ void paramBufferReady(FrameBuffer *buffer);
+ void statBufferReady(FrameBuffer *buffer);
+ void dewarpBufferReady(FrameBuffer *buffer);
+ void frameStart(uint32_t sequence);
int allocateBuffers(Camera *camera);
int freeBuffers(Camera *camera);
+ int updateControls(RkISP1CameraData *data);
+
MediaDevice *media_;
- V4L2Subdevice *isp_;
- V4L2Subdevice *resizer_;
- V4L2VideoDevice *video_;
- V4L2VideoDevice *param_;
- V4L2VideoDevice *stat_;
+ std::unique_ptr<V4L2Subdevice> isp_;
+ std::unique_ptr<V4L2VideoDevice> param_;
+ std::unique_ptr<V4L2VideoDevice> stat_;
+ std::unique_ptr<V4L2Subdevice> csi_;
+
+ bool hasSelfPath_;
+ bool isRaw_;
+
+ RkISP1MainPath mainPath_;
+ RkISP1SelfPath selfPath_;
+
+ std::unique_ptr<V4L2M2MConverter> dewarper_;
+ Rectangle scalerMaxCrop_;
+ bool useDewarper_;
+
+ std::optional<Rectangle> activeCrop_;
+
+ /* Internal buffers used when dewarper is being used */
+ std::vector<std::unique_ptr<FrameBuffer>> mainPathBuffers_;
+ std::queue<FrameBuffer *> availableMainPathBuffers_;
std::vector<std::unique_ptr<FrameBuffer>> paramBuffers_;
std::vector<std::unique_ptr<FrameBuffer>> statBuffers_;
@@ -216,45 +223,60 @@ private:
std::queue<FrameBuffer *> availableStatBuffers_;
Camera *activeCamera_;
+
+ const MediaPad *ispSink_;
};
RkISP1Frames::RkISP1Frames(PipelineHandler *pipe)
- : pipe_(dynamic_cast<PipelineHandlerRkISP1 *>(pipe))
+ : pipe_(static_cast<PipelineHandlerRkISP1 *>(pipe))
{
}
-RkISP1FrameInfo *RkISP1Frames::create(unsigned int frame, Request *request, Stream *stream)
+RkISP1FrameInfo *RkISP1Frames::create(const RkISP1CameraData *data, Request *request,
+ bool isRaw)
{
- if (pipe_->availableParamBuffers_.empty()) {
- LOG(RkISP1, Error) << "Parameters buffer underrun";
- return nullptr;
+ unsigned int frame = data->frame_;
+
+ FrameBuffer *paramBuffer = nullptr;
+ FrameBuffer *statBuffer = nullptr;
+ FrameBuffer *mainPathBuffer = nullptr;
+ FrameBuffer *selfPathBuffer = nullptr;
+
+ if (!isRaw) {
+ if (pipe_->availableParamBuffers_.empty()) {
+ LOG(RkISP1, Error) << "Parameters buffer underrun";
+ return nullptr;
+ }
+
+ if (pipe_->availableStatBuffers_.empty()) {
+ LOG(RkISP1, Error) << "Statistic buffer underrun";
+ return nullptr;
+ }
+
+ paramBuffer = pipe_->availableParamBuffers_.front();
+ pipe_->availableParamBuffers_.pop();
+
+ statBuffer = pipe_->availableStatBuffers_.front();
+ pipe_->availableStatBuffers_.pop();
+
+ if (pipe_->useDewarper_) {
+ mainPathBuffer = pipe_->availableMainPathBuffers_.front();
+ pipe_->availableMainPathBuffers_.pop();
+ }
}
- FrameBuffer *paramBuffer = pipe_->availableParamBuffers_.front();
- if (pipe_->availableStatBuffers_.empty()) {
- LOG(RkISP1, Error) << "Statisitc buffer underrun";
- return nullptr;
- }
- FrameBuffer *statBuffer = pipe_->availableStatBuffers_.front();
-
- FrameBuffer *videoBuffer = request->findBuffer(stream);
- if (!videoBuffer) {
- LOG(RkISP1, Error)
- << "Attempt to queue request with invalid stream";
- return nullptr;
- }
-
- pipe_->availableParamBuffers_.pop();
- pipe_->availableStatBuffers_.pop();
+ if (!mainPathBuffer)
+ mainPathBuffer = request->findBuffer(&data->mainPathStream_);
+ selfPathBuffer = request->findBuffer(&data->selfPathStream_);
RkISP1FrameInfo *info = new RkISP1FrameInfo;
info->frame = frame;
info->request = request;
info->paramBuffer = paramBuffer;
- info->videoBuffer = videoBuffer;
+ info->mainPathBuffer = mainPathBuffer;
+ info->selfPathBuffer = selfPathBuffer;
info->statBuffer = statBuffer;
- info->paramFilled = false;
info->paramDequeued = false;
info->metadataProcessed = false;
@@ -271,6 +293,7 @@ int RkISP1Frames::destroy(unsigned int frame)
pipe_->availableParamBuffers_.push(info->paramBuffer);
pipe_->availableStatBuffers_.push(info->statBuffer);
+ pipe_->availableMainPathBuffers_.push(info->mainPathBuffer);
frameInfo_.erase(info->frame);
@@ -279,6 +302,21 @@ int RkISP1Frames::destroy(unsigned int frame)
return 0;
}
+void RkISP1Frames::clear()
+{
+ for (const auto &entry : frameInfo_) {
+ RkISP1FrameInfo *info = entry.second;
+
+ pipe_->availableParamBuffers_.push(info->paramBuffer);
+ pipe_->availableStatBuffers_.push(info->statBuffer);
+ pipe_->availableMainPathBuffers_.push(info->mainPathBuffer);
+
+ delete info;
+ }
+
+ frameInfo_.clear();
+}
+
RkISP1FrameInfo *RkISP1Frames::find(unsigned int frame)
{
auto itInfo = frameInfo_.find(frame);
@@ -286,7 +324,8 @@ RkISP1FrameInfo *RkISP1Frames::find(unsigned int frame)
if (itInfo != frameInfo_.end())
return itInfo->second;
- LOG(RkISP1, Error) << "Can't locate info from frame";
+ LOG(RkISP1, Fatal) << "Can't locate info from frame";
+
return nullptr;
}
@@ -297,11 +336,13 @@ RkISP1FrameInfo *RkISP1Frames::find(FrameBuffer *buffer)
if (info->paramBuffer == buffer ||
info->statBuffer == buffer ||
- info->videoBuffer == buffer)
+ info->mainPathBuffer == buffer ||
+ info->selfPathBuffer == buffer)
return info;
}
- LOG(RkISP1, Error) << "Can't locate info from buffer";
+ LOG(RkISP1, Fatal) << "Can't locate info from buffer";
+
return nullptr;
}
@@ -314,112 +355,121 @@ RkISP1FrameInfo *RkISP1Frames::find(Request *request)
return info;
}
- LOG(RkISP1, Error) << "Can't locate info from request";
+ LOG(RkISP1, Fatal) << "Can't locate info from request";
+
return nullptr;
}
-class RkISP1ActionSetSensor : public FrameAction
+PipelineHandlerRkISP1 *RkISP1CameraData::pipe()
{
-public:
- RkISP1ActionSetSensor(unsigned int frame, CameraSensor *sensor, const ControlList &controls)
- : FrameAction(frame, SetSensor), sensor_(sensor), controls_(controls) {}
-
-protected:
- void run() override
- {
- sensor_->setControls(&controls_);
- }
+ return static_cast<PipelineHandlerRkISP1 *>(Camera::Private::pipe());
+}
-private:
- CameraSensor *sensor_;
- ControlList controls_;
-};
+const PipelineHandlerRkISP1 *RkISP1CameraData::pipe() const
+{
+ return static_cast<const PipelineHandlerRkISP1 *>(Camera::Private::pipe());
+}
-class RkISP1ActionQueueBuffers : public FrameAction
+int RkISP1CameraData::loadIPA(unsigned int hwRevision)
{
-public:
- RkISP1ActionQueueBuffers(unsigned int frame, RkISP1CameraData *data,
- PipelineHandlerRkISP1 *pipe)
- : FrameAction(frame, QueueBuffers), data_(data), pipe_(pipe)
- {
- }
+ ipa_ = IPAManager::createIPA<ipa::rkisp1::IPAProxyRkISP1>(pipe(), 1, 1);
+ if (!ipa_)
+ return -ENOENT;
-protected:
- void run() override
- {
- RkISP1FrameInfo *info = data_->frameInfo_.find(frame());
- if (!info)
- LOG(RkISP1, Fatal) << "Frame not known";
+ ipa_->setSensorControls.connect(this, &RkISP1CameraData::setSensorControls);
+ ipa_->paramsComputed.connect(this, &RkISP1CameraData::paramsComputed);
+ ipa_->metadataReady.connect(this, &RkISP1CameraData::metadataReady);
- if (info->paramFilled)
- pipe_->param_->queueBuffer(info->paramBuffer);
- else
- LOG(RkISP1, Error)
- << "Parameters not ready on time for frame "
- << frame() << ", ignore parameters.";
+ /*
+ * The API tuning file is made from the sensor name unless the
+ * environment variable overrides it.
+ */
+ std::string ipaTuningFile;
+ char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RKISP1_TUNING_FILE");
+ if (!configFromEnv || *configFromEnv == '\0') {
+ ipaTuningFile =
+ ipa_->configurationFile(sensor_->model() + ".yaml", "uncalibrated.yaml");
+ } else {
+ ipaTuningFile = std::string(configFromEnv);
+ }
- pipe_->stat_->queueBuffer(info->statBuffer);
- pipe_->video_->queueBuffer(info->videoBuffer);
+ IPACameraSensorInfo sensorInfo{};
+ int ret = sensor_->sensorInfo(&sensorInfo);
+ if (ret) {
+ LOG(RkISP1, Error) << "Camera sensor information not available";
+ return ret;
}
-private:
- RkISP1CameraData *data_;
- PipelineHandlerRkISP1 *pipe_;
-};
+ ret = ipa_->init({ ipaTuningFile, sensor_->model() }, hwRevision,
+ sensorInfo, sensor_->controls(), &ipaControls_);
+ if (ret < 0) {
+ LOG(RkISP1, Error) << "IPA initialization failure";
+ return ret;
+ }
+
+ return 0;
+}
-int RkISP1CameraData::loadIPA()
+void RkISP1CameraData::paramsComputed(unsigned int frame, unsigned int bytesused)
{
- ipa_ = IPAManager::instance()->createIPA(pipe_, 1, 1);
- if (!ipa_)
- return -ENOENT;
+ PipelineHandlerRkISP1 *pipe = RkISP1CameraData::pipe();
+ RkISP1FrameInfo *info = frameInfo_.find(frame);
+ if (!info)
+ return;
- ipa_->queueFrameAction.connect(this,
- &RkISP1CameraData::queueFrameAction);
+ info->paramBuffer->_d()->metadata().planes()[0].bytesused = bytesused;
+ pipe->param_->queueBuffer(info->paramBuffer);
+ pipe->stat_->queueBuffer(info->statBuffer);
- return 0;
+ if (info->mainPathBuffer)
+ mainPath_->queueBuffer(info->mainPathBuffer);
+
+ if (selfPath_ && info->selfPathBuffer)
+ selfPath_->queueBuffer(info->selfPathBuffer);
}
-void RkISP1CameraData::queueFrameAction(unsigned int frame,
- const IPAOperationData &action)
+void RkISP1CameraData::setSensorControls([[maybe_unused]] unsigned int frame,
+ const ControlList &sensorControls)
{
- switch (action.operation) {
- case RKISP1_IPA_ACTION_V4L2_SET: {
- const ControlList &controls = action.controls[0];
- timeline_.scheduleAction(std::make_unique<RkISP1ActionSetSensor>(frame,
- sensor_,
- controls));
- break;
- }
- case RKISP1_IPA_ACTION_PARAM_FILLED: {
- RkISP1FrameInfo *info = frameInfo_.find(frame);
- if (info)
- info->paramFilled = true;
- break;
- }
- case RKISP1_IPA_ACTION_METADATA:
- metadataReady(frame, action.controls[0]);
- break;
- default:
- LOG(RkISP1, Error) << "Unkown action " << action.operation;
- break;
- }
+ delayedCtrls_->push(sensorControls);
}
void RkISP1CameraData::metadataReady(unsigned int frame, const ControlList &metadata)
{
- PipelineHandlerRkISP1 *pipe =
- static_cast<PipelineHandlerRkISP1 *>(pipe_);
-
RkISP1FrameInfo *info = frameInfo_.find(frame);
if (!info)
return;
- info->request->metadata() = metadata;
+ info->request->metadata().merge(metadata);
info->metadataProcessed = true;
- pipe->tryCompleteRequest(info->request);
+ pipe()->tryCompleteRequest(info);
}
+/* -----------------------------------------------------------------------------
+ * Camera Configuration
+ */
+
+namespace {
+
+/* Keep in sync with the supported raw formats in rkisp1_path.cpp. */
+const std::map<PixelFormat, uint32_t> rawFormats = {
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+};
+
+} /* namespace */
+
RkISP1CameraConfiguration::RkISP1CameraConfiguration(Camera *camera,
RkISP1CameraData *data)
: CameraConfiguration()
@@ -428,120 +478,321 @@ RkISP1CameraConfiguration::RkISP1CameraConfiguration(Camera *camera,
data_ = data;
}
-CameraConfiguration::Status RkISP1CameraConfiguration::validate()
+bool RkISP1CameraConfiguration::fitsAllPaths(const StreamConfiguration &cfg)
{
- static const std::array<PixelFormat, 8> formats{
- PixelFormat(DRM_FORMAT_YUYV),
- PixelFormat(DRM_FORMAT_YVYU),
- PixelFormat(DRM_FORMAT_VYUY),
- PixelFormat(DRM_FORMAT_NV16),
- PixelFormat(DRM_FORMAT_NV61),
- PixelFormat(DRM_FORMAT_NV21),
- PixelFormat(DRM_FORMAT_NV12),
- /* \todo Add support for 8-bit greyscale to DRM formats */
- };
+ const CameraSensor *sensor = data_->sensor_.get();
+ StreamConfiguration config;
+
+ config = cfg;
+ if (data_->mainPath_->validate(sensor, sensorConfig, &config) != Valid)
+ return false;
+
+ config = cfg;
+ if (data_->selfPath_ &&
+ data_->selfPath_->validate(sensor, sensorConfig, &config) != Valid)
+ return false;
+
+ return true;
+}
- const CameraSensor *sensor = data_->sensor_;
- Status status = Valid;
+CameraConfiguration::Status RkISP1CameraConfiguration::validate()
+{
+ const PipelineHandlerRkISP1 *pipe = data_->pipe();
+ const CameraSensor *sensor = data_->sensor_.get();
+ unsigned int pathCount = data_->selfPath_ ? 2 : 1;
+ Status status;
if (config_.empty())
return Invalid;
+ status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
+
+ /*
+ * Make sure that if a sensor configuration has been requested it
+ * is valid.
+ */
+ if (sensorConfig) {
+ if (!sensorConfig->isValid()) {
+ LOG(RkISP1, Error)
+ << "Invalid sensor configuration request";
+
+ return Invalid;
+ }
+
+ unsigned int bitDepth = sensorConfig->bitDepth;
+ if (bitDepth != 8 && bitDepth != 10 && bitDepth != 12) {
+ LOG(RkISP1, Error)
+ << "Invalid sensor configuration bit depth";
+
+ return Invalid;
+ }
+ }
+
/* Cap the number of entries to the available streams. */
- if (config_.size() > 1) {
- config_.resize(1);
+ if (config_.size() > pathCount) {
+ config_.resize(pathCount);
status = Adjusted;
}
- StreamConfiguration &cfg = config_[0];
-
- /* Adjust the pixel format. */
- if (std::find(formats.begin(), formats.end(), cfg.pixelFormat) ==
- formats.end()) {
- LOG(RkISP1, Debug) << "Adjusting format to NV12";
- cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12),
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->sensor_->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
status = Adjusted;
+
+ /*
+ * Simultaneous capture of raw and processed streams isn't possible. If
+ * there is any raw stream, cap the number of streams to one.
+ */
+ if (config_.size() > 1) {
+ for (const auto &cfg : config_) {
+ if (PixelFormatInfo::info(cfg.pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW) {
+ config_.resize(1);
+ status = Adjusted;
+ break;
+ }
+ }
}
- /* Select the sensor format. */
- sensorFormat_ = sensor->getFormat({ MEDIA_BUS_FMT_SBGGR12_1X12,
- MEDIA_BUS_FMT_SGBRG12_1X12,
- MEDIA_BUS_FMT_SGRBG12_1X12,
- MEDIA_BUS_FMT_SRGGB12_1X12,
- MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SGBRG10_1X10,
- MEDIA_BUS_FMT_SGRBG10_1X10,
- MEDIA_BUS_FMT_SRGGB10_1X10,
- MEDIA_BUS_FMT_SBGGR8_1X8,
- MEDIA_BUS_FMT_SGBRG8_1X8,
- MEDIA_BUS_FMT_SGRBG8_1X8,
- MEDIA_BUS_FMT_SRGGB8_1X8 },
- cfg.size);
- if (!sensorFormat_.size.width || !sensorFormat_.size.height)
- sensorFormat_.size = sensor->resolution();
+ bool useDewarper = false;
+ if (pipe->dewarper_) {
+ /*
+ * Platforms with dewarper support, such as i.MX8MP, support
+ * only a single stream. We can inspect config_[0] only here.
+ */
+ bool isRaw = PixelFormatInfo::info(config_[0].pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW;
+ if (!isRaw)
+ useDewarper = true;
+ }
+
+ /*
+ * If there are more than one stream in the configuration figure out the
+ * order to evaluate the streams. The first stream has the highest
+ * priority but if both main path and self path can satisfy it evaluate
+ * the second stream first as the first stream is guaranteed to work
+ * with whichever path is not used by the second one.
+ */
+ std::vector<unsigned int> order(config_.size());
+ std::iota(order.begin(), order.end(), 0);
+ if (config_.size() == 2 && fitsAllPaths(config_[0]))
+ std::reverse(order.begin(), order.end());
/*
- * Provide a suitable default that matches the sensor aspect
- * ratio and clamp the size to the hardware bounds.
- *
- * \todo: Check the hardware alignment constraints.
+ * Validate the configuration against the desired path and, if the
+ * platform supports it, the dewarper.
*/
- const Size size = cfg.size;
+ auto validateConfig = [&](StreamConfiguration &cfg, RkISP1Path *path,
+ Stream *stream, Status expectedStatus) {
+ StreamConfiguration tryCfg = cfg;
+
+ Status ret = path->validate(sensor, sensorConfig, &tryCfg);
+ if (ret == Invalid)
+ return false;
+
+ if (!useDewarper &&
+ (expectedStatus == Valid && ret == Adjusted))
+ return false;
+
+ if (useDewarper) {
+ bool adjusted;
+
+ pipe->dewarper_->validateOutput(&tryCfg, &adjusted,
+ Converter::Alignment::Down);
+ if (expectedStatus == Valid && adjusted)
+ return false;
+ }
+
+ cfg = tryCfg;
+ cfg.setStream(stream);
+ return true;
+ };
- if (!cfg.size.width || !cfg.size.height) {
- cfg.size.width = 1280;
- cfg.size.height = 1280 * sensorFormat_.size.height
- / sensorFormat_.size.width;
+ bool mainPathAvailable = true;
+ bool selfPathAvailable = data_->selfPath_;
+ RkISP1Path *mainPath = data_->mainPath_;
+ RkISP1Path *selfPath = data_->selfPath_;
+ Stream *mainPathStream = const_cast<Stream *>(&data_->mainPathStream_);
+ Stream *selfPathStream = const_cast<Stream *>(&data_->selfPathStream_);
+ for (unsigned int index : order) {
+ StreamConfiguration &cfg = config_[index];
+
+ /* Try to match stream without adjusting configuration. */
+ if (mainPathAvailable) {
+ if (validateConfig(cfg, mainPath, mainPathStream, Valid)) {
+ mainPathAvailable = false;
+ continue;
+ }
+ }
+
+ if (selfPathAvailable) {
+ if (validateConfig(cfg, selfPath, selfPathStream, Valid)) {
+ selfPathAvailable = false;
+ continue;
+ }
+ }
+
+ /* Try to match stream allowing adjusting configuration. */
+ if (mainPathAvailable) {
+ if (validateConfig(cfg, mainPath, mainPathStream, Adjusted)) {
+ mainPathAvailable = false;
+ status = Adjusted;
+ continue;
+ }
+ }
+
+ if (selfPathAvailable) {
+ if (validateConfig(cfg, selfPath, selfPathStream, Adjusted)) {
+ selfPathAvailable = false;
+ status = Adjusted;
+ continue;
+ }
+ }
+
+ /* All paths rejected configuration. */
+ LOG(RkISP1, Debug) << "Camera configuration not supported "
+ << cfg.toString();
+ return Invalid;
}
- cfg.size.width = std::max(32U, std::min(4416U, cfg.size.width));
- cfg.size.height = std::max(16U, std::min(3312U, cfg.size.height));
+ /* Select the sensor format. */
+ PixelFormat rawFormat;
+ Size maxSize;
- if (cfg.size != size) {
- LOG(RkISP1, Debug)
- << "Adjusting size from " << size.toString()
- << " to " << cfg.size.toString();
- status = Adjusted;
+ for (const StreamConfiguration &cfg : config_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
+ rawFormat = cfg.pixelFormat;
+
+ maxSize = std::max(maxSize, cfg.size);
}
- cfg.bufferCount = RKISP1_BUFFER_COUNT;
+ std::vector<unsigned int> mbusCodes;
- return status;
-}
+ if (rawFormat.isValid()) {
+ mbusCodes = { rawFormats.at(rawFormat) };
+ } else {
+ std::transform(rawFormats.begin(), rawFormats.end(),
+ std::back_inserter(mbusCodes),
+ [](const auto &value) { return value.second; });
+ }
-PipelineHandlerRkISP1::PipelineHandlerRkISP1(CameraManager *manager)
- : PipelineHandler(manager), isp_(nullptr), resizer_(nullptr),
- video_(nullptr), param_(nullptr), stat_(nullptr)
-{
-}
+ sensorFormat_ = sensor->getFormat(mbusCodes, maxSize,
+ mainPath->maxResolution());
-PipelineHandlerRkISP1::~PipelineHandlerRkISP1()
-{
- delete param_;
- delete stat_;
- delete video_;
- delete resizer_;
- delete isp_;
+ if (sensorFormat_.size.isNull())
+ sensorFormat_.size = sensor->resolution();
+
+ return status;
}
/* -----------------------------------------------------------------------------
* Pipeline Operations
*/
-CameraConfiguration *PipelineHandlerRkISP1::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+PipelineHandlerRkISP1::PipelineHandlerRkISP1(CameraManager *manager)
+ : PipelineHandler(manager), hasSelfPath_(true), useDewarper_(false)
+{
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerRkISP1::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
{
RkISP1CameraData *data = cameraData(camera);
- CameraConfiguration *config = new RkISP1CameraConfiguration(camera, data);
+ unsigned int pathCount = data->selfPath_ ? 2 : 1;
+ if (roles.size() > pathCount) {
+ LOG(RkISP1, Error) << "Too many stream roles requested";
+ return nullptr;
+ }
+
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<RkISP1CameraConfiguration>(camera, data);
if (roles.empty())
return config;
- StreamConfiguration cfg{};
- cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
- cfg.size = data->sensor_->resolution();
+ /*
+ * As the ISP can't output different color spaces for the main and self
+ * path, pick a sensible default color space based on the role of the
+ * first stream and use it for all streams.
+ */
+ std::optional<ColorSpace> colorSpace;
+ bool mainPathAvailable = true;
+
+ for (const StreamRole role : roles) {
+ Size size;
+
+ switch (role) {
+ case StreamRole::StillCapture:
+ /* JPEG encoders typically expect sYCC. */
+ if (!colorSpace)
+ colorSpace = ColorSpace::Sycc;
+
+ size = data->sensor_->resolution();
+ break;
+
+ case StreamRole::Viewfinder:
+ /*
+ * sYCC is the YCbCr encoding of sRGB, which is commonly
+ * used by displays.
+ */
+ if (!colorSpace)
+ colorSpace = ColorSpace::Sycc;
+
+ size = kRkISP1PreviewSize;
+ break;
+
+ case StreamRole::VideoRecording:
+ /* Rec. 709 is a good default for HD video recording. */
+ if (!colorSpace)
+ colorSpace = ColorSpace::Rec709;
+
+ size = kRkISP1PreviewSize;
+ break;
+
+ case StreamRole::Raw:
+ if (roles.size() > 1) {
+ LOG(RkISP1, Error)
+ << "Can't capture both raw and processed streams";
+ return nullptr;
+ }
+
+ colorSpace = ColorSpace::Raw;
+ size = data->sensor_->resolution();
+ break;
+
+ default:
+ LOG(RkISP1, Warning)
+ << "Requested stream role not supported: " << role;
+ return nullptr;
+ }
- config->addConfiguration(cfg);
+ /*
+ * Prefer the main path if available, as it supports higher
+ * resolutions.
+ *
+ * \todo Using the main path unconditionally hides support for
+ * RGB (only available on the self path) in the streams formats
+ * exposed to applications. This likely calls for a better API
+ * to expose streams capabilities.
+ */
+ RkISP1Path *path;
+ if (mainPathAvailable) {
+ path = data->mainPath_;
+ mainPathAvailable = false;
+ } else {
+ path = data->selfPath_;
+ }
+
+ StreamConfiguration cfg =
+ path->generateConfiguration(data->sensor_.get(), size, role);
+ if (!cfg.pixelFormat.isValid())
+ return nullptr;
+
+ cfg.colorSpace = colorSpace;
+ config->addConfiguration(cfg);
+ }
config->validate();
@@ -553,148 +804,228 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
RkISP1CameraConfiguration *config =
static_cast<RkISP1CameraConfiguration *>(c);
RkISP1CameraData *data = cameraData(camera);
- StreamConfiguration &cfg = config->at(0);
- CameraSensor *sensor = data->sensor_;
+ CameraSensor *sensor = data->sensor_.get();
int ret;
- /*
- * Configure the sensor links: enable the link corresponding to this
- * camera and disable all the other sensor links.
- */
- const MediaPad *pad = isp_->entity()->getPadByIndex(0);
-
- for (MediaLink *link : pad->links()) {
- bool enable = link->source()->entity() == sensor->entity();
-
- if (!!(link->flags() & MEDIA_LNK_FL_ENABLED) == enable)
- continue;
-
- LOG(RkISP1, Debug)
- << (enable ? "Enabling" : "Disabling")
- << " link from sensor '"
- << link->source()->entity()->name()
- << "' to ISP";
-
- ret = link->setEnabled(enable);
- if (ret < 0)
- return ret;
- }
+ ret = initLinks(camera, sensor, *config);
+ if (ret)
+ return ret;
/*
* Configure the format on the sensor output and propagate it through
* the pipeline.
*/
V4L2SubdeviceFormat format = config->sensorFormat();
- LOG(RkISP1, Debug) << "Configuring sensor with " << format.toString();
+ LOG(RkISP1, Debug) << "Configuring sensor with " << format;
+
+ if (config->sensorConfig)
+ ret = sensor->applyConfiguration(*config->sensorConfig,
+ config->combinedTransform(),
+ &format);
+ else
+ ret = sensor->setFormat(&format, config->combinedTransform());
- ret = sensor->setFormat(&format);
if (ret < 0)
return ret;
- LOG(RkISP1, Debug) << "Sensor configured with " << format.toString();
+ LOG(RkISP1, Debug) << "Sensor configured with " << format;
+
+ if (csi_) {
+ ret = csi_->setFormat(0, &format);
+ if (ret < 0)
+ return ret;
+ }
ret = isp_->setFormat(0, &format);
if (ret < 0)
return ret;
- LOG(RkISP1, Debug) << "ISP input pad configured with " << format.toString();
-
- /* YUYV8_2X8 is required on the ISP source path pad for YUV output. */
- format.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
- LOG(RkISP1, Debug) << "Configuring ISP output pad with " << format.toString();
-
- ret = isp_->setFormat(2, &format);
+ Rectangle inputCrop(0, 0, format.size);
+ ret = isp_->setSelection(0, V4L2_SEL_TGT_CROP, &inputCrop);
if (ret < 0)
return ret;
- LOG(RkISP1, Debug) << "ISP output pad configured with " << format.toString();
+ LOG(RkISP1, Debug)
+ << "ISP input pad configured with " << format
+ << " crop " << inputCrop;
- ret = resizer_->setFormat(0, &format);
- if (ret < 0)
- return ret;
+ Rectangle outputCrop = inputCrop;
+ const PixelFormat &streamFormat = config->at(0).pixelFormat;
+ const PixelFormatInfo &info = PixelFormatInfo::info(streamFormat);
+ isRaw_ = info.colourEncoding == PixelFormatInfo::ColourEncodingRAW;
+ useDewarper_ = dewarper_ && !isRaw_;
+
+ /* YUYV8_2X8 is required on the ISP source path pad for YUV output. */
+ if (!isRaw_)
+ format.code = MEDIA_BUS_FMT_YUYV8_2X8;
- LOG(RkISP1, Debug) << "Resizer input pad configured with " << format.toString();
+ /*
+ * On devices without DUAL_CROP (like the imx8mp) cropping needs to be
+ * done on the ISP/IS output.
+ */
+ if (media_->hwRevision() == RKISP1_V_IMX8MP) {
+ /* imx8mp has only a single path. */
+ const auto &cfg = config->at(0);
+ Size ispCrop = format.size.boundedToAspectRatio(cfg.size);
+ if (useDewarper_)
+ ispCrop = dewarper_->adjustInputSize(cfg.pixelFormat,
+ ispCrop);
+ else
+ ispCrop.alignUpTo(2, 2);
- format.size = cfg.size;
+ outputCrop = ispCrop.centeredTo(Rectangle(format.size).center());
+ format.size = ispCrop;
+ }
- LOG(RkISP1, Debug) << "Configuring resizer output pad with " << format.toString();
+ LOG(RkISP1, Debug)
+ << "Configuring ISP output pad with " << format
+ << " crop " << outputCrop;
- ret = resizer_->setFormat(1, &format);
+ ret = isp_->setSelection(2, V4L2_SEL_TGT_CROP, &outputCrop);
if (ret < 0)
return ret;
- LOG(RkISP1, Debug) << "Resizer output pad configured with " << format.toString();
+ format.colorSpace = config->at(0).colorSpace;
+ ret = isp_->setFormat(2, &format);
+ if (ret < 0)
+ return ret;
- V4L2DeviceFormat outputFormat = {};
- outputFormat.fourcc = video_->toV4L2PixelFormat(cfg.pixelFormat);
- outputFormat.size = cfg.size;
- outputFormat.planesCount = 2;
+ LOG(RkISP1, Debug)
+ << "ISP output pad configured with " << format
+ << " crop " << outputCrop;
- ret = video_->setFormat(&outputFormat);
+ IPACameraSensorInfo sensorInfo;
+ ret = data->sensor_->sensorInfo(&sensorInfo);
if (ret)
return ret;
- if (outputFormat.size != cfg.size ||
- outputFormat.fourcc != video_->toV4L2PixelFormat(cfg.pixelFormat)) {
- LOG(RkISP1, Error)
- << "Unable to configure capture in " << cfg.toString();
- return -EINVAL;
+ std::map<unsigned int, IPAStream> streamConfig;
+ std::vector<std::reference_wrapper<StreamConfiguration>> outputCfgs;
+
+ for (const StreamConfiguration &cfg : *config) {
+ if (cfg.stream() == &data->mainPathStream_) {
+ ret = mainPath_.configure(cfg, format);
+ streamConfig[0] = IPAStream(cfg.pixelFormat,
+ cfg.size);
+ /* Configure dewarp */
+ if (dewarper_ && !isRaw_) {
+ outputCfgs.push_back(const_cast<StreamConfiguration &>(cfg));
+ ret = dewarper_->configure(cfg, outputCfgs);
+ if (ret)
+ return ret;
+
+ /*
+ * Calculate the crop rectangle of the data
+ * flowing into the dewarper in sensor
+ * coordinates.
+ */
+ scalerMaxCrop_ =
+ outputCrop.transformedBetween(inputCrop,
+ sensorInfo.analogCrop);
+ }
+ } else if (hasSelfPath_) {
+ ret = selfPath_.configure(cfg, format);
+ streamConfig[1] = IPAStream(cfg.pixelFormat,
+ cfg.size);
+ } else {
+ return -ENODEV;
+ }
+
+ if (ret)
+ return ret;
}
- V4L2DeviceFormat paramFormat = {};
- paramFormat.fourcc = V4L2PixelFormat(V4L2_META_FMT_RK_ISP1_PARAMS);
+ V4L2DeviceFormat paramFormat;
+ paramFormat.fourcc = V4L2PixelFormat(V4L2_META_FMT_RK_ISP1_EXT_PARAMS);
ret = param_->setFormat(&paramFormat);
if (ret)
return ret;
- V4L2DeviceFormat statFormat = {};
+ V4L2DeviceFormat statFormat;
statFormat.fourcc = V4L2PixelFormat(V4L2_META_FMT_RK_ISP1_STAT_3A);
ret = stat_->setFormat(&statFormat);
if (ret)
return ret;
- cfg.setStream(&data->stream_);
+ /* Inform IPA of stream configuration and sensor controls. */
+ ipa::rkisp1::IPAConfigInfo ipaConfig{ sensorInfo,
+ data->sensor_->controls(),
+ paramFormat.fourcc };
- return 0;
+ ret = data->ipa_->configure(ipaConfig, streamConfig, &data->ipaControls_);
+ if (ret) {
+ LOG(RkISP1, Error) << "failed configuring IPA (" << ret << ")";
+ return ret;
+ }
+
+ return updateControls(data);
}
-int PipelineHandlerRkISP1::exportFrameBuffers(Camera *camera, Stream *stream,
+int PipelineHandlerRkISP1::exportFrameBuffers([[maybe_unused]] Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
+ RkISP1CameraData *data = cameraData(camera);
unsigned int count = stream->configuration().bufferCount;
- return video_->exportBuffers(count, buffers);
+
+ if (stream == &data->mainPathStream_) {
+ /*
+ * Currently, i.MX8MP is the only platform with DW100 dewarper.
+ * It has mainpath and no self path. Hence, export buffers from
+ * dewarper just for the main path stream, for now.
+ */
+ if (useDewarper_)
+ return dewarper_->exportBuffers(&data->mainPathStream_, count, buffers);
+ else
+ return mainPath_.exportBuffers(count, buffers);
+ } else if (hasSelfPath_ && stream == &data->selfPathStream_) {
+ return selfPath_.exportBuffers(count, buffers);
+ }
+
+ return -EINVAL;
}
int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
{
RkISP1CameraData *data = cameraData(camera);
- unsigned int count = data->stream_.configuration().bufferCount;
unsigned int ipaBufferId = 1;
int ret;
- ret = video_->importBuffers(count);
- if (ret < 0)
- goto error;
+ unsigned int maxCount = std::max({
+ data->mainPathStream_.configuration().bufferCount,
+ data->selfPathStream_.configuration().bufferCount,
+ });
- ret = param_->allocateBuffers(count, &paramBuffers_);
- if (ret < 0)
- goto error;
+ if (!isRaw_) {
+ ret = param_->allocateBuffers(maxCount, &paramBuffers_);
+ if (ret < 0)
+ goto error;
- ret = stat_->allocateBuffers(count, &statBuffers_);
- if (ret < 0)
- goto error;
+ ret = stat_->allocateBuffers(maxCount, &statBuffers_);
+ if (ret < 0)
+ goto error;
+ }
+
+ /* If the dewarper is being used, allocate internal buffers for ISP. */
+ if (useDewarper_) {
+ ret = mainPath_.exportBuffers(maxCount, &mainPathBuffers_);
+ if (ret < 0)
+ goto error;
+
+ for (std::unique_ptr<FrameBuffer> &buffer : mainPathBuffers_)
+ availableMainPathBuffers_.push(buffer.get());
+ }
for (std::unique_ptr<FrameBuffer> &buffer : paramBuffers_) {
buffer->setCookie(ipaBufferId++);
- data->ipaBuffers_.push_back({ .id = buffer->cookie(),
- .planes = buffer->planes() });
+ data->ipaBuffers_.emplace_back(buffer->cookie(),
+ buffer->planes());
availableParamBuffers_.push(buffer.get());
}
for (std::unique_ptr<FrameBuffer> &buffer : statBuffers_) {
buffer->setCookie(ipaBufferId++);
- data->ipaBuffers_.push_back({ .id = buffer->cookie(),
- .planes = buffer->planes() });
+ data->ipaBuffers_.emplace_back(buffer->cookie(),
+ buffer->planes());
availableStatBuffers_.push(buffer.get());
}
@@ -705,7 +1036,7 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
error:
paramBuffers_.clear();
statBuffers_.clear();
- video_->releaseBuffers();
+ mainPathBuffers_.clear();
return ret;
}
@@ -720,8 +1051,12 @@ int PipelineHandlerRkISP1::freeBuffers(Camera *camera)
while (!availableParamBuffers_.empty())
availableParamBuffers_.pop();
+ while (!availableMainPathBuffers_.empty())
+ availableMainPathBuffers_.pop();
+
paramBuffers_.clear();
statBuffers_.clear();
+ mainPathBuffers_.clear();
std::vector<unsigned int> ids;
for (IPABuffer &ipabuf : data->ipaBuffers_)
@@ -736,115 +1071,134 @@ int PipelineHandlerRkISP1::freeBuffers(Camera *camera)
if (stat_->releaseBuffers())
LOG(RkISP1, Error) << "Failed to release stat buffers";
- if (video_->releaseBuffers())
- LOG(RkISP1, Error) << "Failed to release video buffers";
-
return 0;
}
-int PipelineHandlerRkISP1::start(Camera *camera)
+int PipelineHandlerRkISP1::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
{
RkISP1CameraData *data = cameraData(camera);
+ utils::ScopeExitActions actions;
int ret;
/* Allocate buffers for internal pipeline usage. */
ret = allocateBuffers(camera);
if (ret)
return ret;
+ actions += [&]() { freeBuffers(camera); };
- data->frame_ = 0;
-
- ret = param_->streamOn();
+ ret = data->ipa_->start();
if (ret) {
- freeBuffers(camera);
LOG(RkISP1, Error)
- << "Failed to start parameters " << camera->name();
+ << "Failed to start IPA " << camera->id();
return ret;
}
+ actions += [&]() { data->ipa_->stop(); };
- ret = stat_->streamOn();
- if (ret) {
- param_->streamOff();
- freeBuffers(camera);
- LOG(RkISP1, Error)
- << "Failed to start statistics " << camera->name();
- return ret;
- }
+ data->frame_ = 0;
- ret = video_->streamOn();
- if (ret) {
- param_->streamOff();
- stat_->streamOff();
- freeBuffers(camera);
+ if (!isRaw_) {
+ ret = param_->streamOn();
+ if (ret) {
+ LOG(RkISP1, Error)
+ << "Failed to start parameters " << camera->id();
+ return ret;
+ }
+ actions += [&]() { param_->streamOff(); };
- LOG(RkISP1, Error)
- << "Failed to start camera " << camera->name();
+ ret = stat_->streamOn();
+ if (ret) {
+ LOG(RkISP1, Error)
+ << "Failed to start statistics " << camera->id();
+ return ret;
+ }
+ actions += [&]() { stat_->streamOff(); };
+
+ if (useDewarper_) {
+ ret = dewarper_->start();
+ if (ret) {
+ LOG(RkISP1, Error) << "Failed to start dewarper";
+ return ret;
+ }
+ actions += [&]() { dewarper_->stop(); };
+ }
}
- activeCamera_ = camera;
+ if (data->mainPath_->isEnabled()) {
+ ret = mainPath_.start();
+ if (ret)
+ return ret;
+ actions += [&]() { mainPath_.stop(); };
+ }
- /* Inform IPA of stream configuration and sensor controls. */
- std::map<unsigned int, IPAStream> streamConfig;
- streamConfig[0] = {
- .pixelFormat = data->stream_.configuration().pixelFormat,
- .size = data->stream_.configuration().size,
- };
+ if (hasSelfPath_ && data->selfPath_->isEnabled()) {
+ ret = selfPath_.start();
+ if (ret)
+ return ret;
+ }
- std::map<unsigned int, const ControlInfoMap &> entityControls;
- entityControls.emplace(0, data->sensor_->controls());
+ isp_->setFrameStartEnabled(true);
- data->ipa_->configure(streamConfig, entityControls);
+ activeCamera_ = camera;
- return ret;
+ actions.release();
+ return 0;
}
-void PipelineHandlerRkISP1::stop(Camera *camera)
+void PipelineHandlerRkISP1::stopDevice(Camera *camera)
{
RkISP1CameraData *data = cameraData(camera);
int ret;
- ret = video_->streamOff();
- if (ret)
- LOG(RkISP1, Warning)
- << "Failed to stop camera " << camera->name();
+ isp_->setFrameStartEnabled(false);
- ret = stat_->streamOff();
- if (ret)
- LOG(RkISP1, Warning)
- << "Failed to stop statistics " << camera->name();
+ data->ipa_->stop();
- ret = param_->streamOff();
- if (ret)
- LOG(RkISP1, Warning)
- << "Failed to stop parameters " << camera->name();
+ if (hasSelfPath_)
+ selfPath_.stop();
+ mainPath_.stop();
- data->timeline_.reset();
+ if (!isRaw_) {
+ ret = stat_->streamOff();
+ if (ret)
+ LOG(RkISP1, Warning)
+ << "Failed to stop statistics for " << camera->id();
+
+ ret = param_->streamOff();
+ if (ret)
+ LOG(RkISP1, Warning)
+ << "Failed to stop parameters for " << camera->id();
+
+ if (useDewarper_)
+ dewarper_->stop();
+ }
+
+ ASSERT(data->queuedRequests_.empty());
+ data->frameInfo_.clear();
freeBuffers(camera);
activeCamera_ = nullptr;
}
-int PipelineHandlerRkISP1::queueRequestDevice(Camera *camera,
- Request *request)
+int PipelineHandlerRkISP1::queueRequestDevice(Camera *camera, Request *request)
{
RkISP1CameraData *data = cameraData(camera);
- Stream *stream = &data->stream_;
- RkISP1FrameInfo *info = data->frameInfo_.create(data->frame_, request,
- stream);
+ RkISP1FrameInfo *info = data->frameInfo_.create(data, request, isRaw_);
if (!info)
return -ENOENT;
- IPAOperationData op;
- op.operation = RKISP1_IPA_EVENT_QUEUE_REQUEST;
- op.data = { data->frame_, info->paramBuffer->cookie() };
- op.controls = { request->controls() };
- data->ipa_->processEvent(op);
+ data->ipa_->queueRequest(data->frame_, request->controls());
+ if (isRaw_) {
+ if (info->mainPathBuffer)
+ data->mainPath_->queueBuffer(info->mainPathBuffer);
- data->timeline_.scheduleAction(std::make_unique<RkISP1ActionQueueBuffers>(data->frame_,
- data,
- this));
+ if (data->selfPath_ && info->selfPathBuffer)
+ data->selfPath_->queueBuffer(info->selfPathBuffer);
+ } else {
+ data->ipa_->computeParams(data->frame_,
+ info->paramBuffer->cookie());
+ }
data->frame_++;
@@ -855,22 +1209,106 @@ int PipelineHandlerRkISP1::queueRequestDevice(Camera *camera,
* Match and Setup
*/
-int PipelineHandlerRkISP1::initLinks()
+int PipelineHandlerRkISP1::initLinks(Camera *camera,
+ const CameraSensor *sensor,
+ const RkISP1CameraConfiguration &config)
{
- MediaLink *link;
+ RkISP1CameraData *data = cameraData(camera);
int ret;
ret = media_->disableLinks();
if (ret < 0)
return ret;
- link = media_->link("rkisp1_isp", 2, "rkisp1_resizer_mainpath", 0);
- if (!link)
- return -ENODEV;
+ /*
+ * Configure the sensor links: enable the link corresponding to this
+ * camera.
+ */
+ for (MediaLink *link : ispSink_->links()) {
+ if (link->source()->entity() != sensor->entity())
+ continue;
- ret = link->setEnabled(true);
- if (ret < 0)
- return ret;
+ LOG(RkISP1, Debug)
+ << "Enabling link from sensor '"
+ << link->source()->entity()->name()
+ << "' to ISP";
+
+ ret = link->setEnabled(true);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (csi_) {
+ MediaLink *link = isp_->entity()->getPadByIndex(0)->links().at(0);
+
+ ret = link->setEnabled(true);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (const StreamConfiguration &cfg : config) {
+ if (cfg.stream() == &data->mainPathStream_)
+ ret = data->mainPath_->setEnabled(true);
+ else if (hasSelfPath_ && cfg.stream() == &data->selfPathStream_)
+ ret = data->selfPath_->setEnabled(true);
+ else
+ return -EINVAL;
+
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * \brief Update the camera controls
+ * \param[in] data The camera data
+ *
+ * Compute the camera controls by calculating controls which the pipeline
+ * is reponsible for and merge them with the controls computed by the IPA.
+ *
+ * This function needs data->ipaControls_ to be refreshed when a new
+ * configuration is applied to the camera by the IPA configure() function.
+ *
+ * Always call this function after IPA configure() to make sure to have a
+ * properly refreshed IPA controls list.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int PipelineHandlerRkISP1::updateControls(RkISP1CameraData *data)
+{
+ ControlInfoMap::Map controls;
+
+ if (dewarper_) {
+ std::pair<Rectangle, Rectangle> cropLimits;
+ if (dewarper_->isConfigured(&data->mainPathStream_))
+ cropLimits = dewarper_->inputCropBounds(&data->mainPathStream_);
+ else
+ cropLimits = dewarper_->inputCropBounds();
+
+ /*
+ * ScalerCrop is specified to be in Sensor coordinates.
+ * So we need to transform the limits to sensor coordinates.
+ * We can safely assume that the maximum crop limit contains the
+ * full fov of the dewarper.
+ */
+ Rectangle min = cropLimits.first.transformedBetween(cropLimits.second,
+ scalerMaxCrop_);
+
+ controls[&controls::ScalerCrop] = ControlInfo(min,
+ scalerMaxCrop_,
+ scalerMaxCrop_);
+ data->properties_.set(properties::ScalerCropMaximum, scalerMaxCrop_);
+ activeCrop_ = scalerMaxCrop_;
+ }
+
+ /* Add the IPA registered controls to list of camera controls. */
+ for (const auto &ipaControl : data->ipaControls_)
+ controls[ipaControl.first] = ipaControl.second;
+
+ data->controlInfo_ = ControlInfoMap(std::move(controls),
+ controls::controls);
return 0;
}
@@ -880,31 +1318,44 @@ int PipelineHandlerRkISP1::createCamera(MediaEntity *sensor)
int ret;
std::unique_ptr<RkISP1CameraData> data =
- std::make_unique<RkISP1CameraData>(this);
-
- ControlInfoMap::Map ctrls;
- ctrls.emplace(std::piecewise_construct,
- std::forward_as_tuple(&controls::AeEnable),
- std::forward_as_tuple(false, true));
+ std::make_unique<RkISP1CameraData>(this, &mainPath_,
+ hasSelfPath_ ? &selfPath_ : nullptr);
- data->controlInfo_ = std::move(ctrls);
-
- data->sensor_ = new CameraSensor(sensor);
- ret = data->sensor_->init();
- if (ret)
- return ret;
+ data->sensor_ = CameraSensorFactoryBase::create(sensor);
+ if (!data->sensor_)
+ return -ENODEV;
/* Initialize the camera properties. */
data->properties_ = data->sensor_->properties();
- ret = data->loadIPA();
+ scalerMaxCrop_ = Rectangle(data->sensor_->resolution());
+
+ const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays();
+ std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
+ };
+
+ data->delayedCtrls_ =
+ std::make_unique<DelayedControls>(data->sensor_->device(),
+ params);
+ isp_->frameStart.connect(data->delayedCtrls_.get(),
+ &DelayedControls::applyControls);
+
+ ret = data->loadIPA(media_->hwRevision());
if (ret)
return ret;
- std::set<Stream *> streams{ &data->stream_ };
+ updateControls(data.get());
+
+ std::set<Stream *> streams{
+ &data->mainPathStream_,
+ &data->selfPathStream_,
+ };
+ const std::string &id = data->sensor_->id();
std::shared_ptr<Camera> camera =
- Camera::create(this, sensor->name(), streams);
- registerCamera(std::move(camera), std::move(data));
+ Camera::create(std::move(data), id, streams);
+ registerCamera(std::move(camera));
return 0;
}
@@ -915,9 +1366,7 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
DeviceMatch dm("rkisp1");
dm.add("rkisp1_isp");
- dm.add("rkisp1_resizer_selfpath");
dm.add("rkisp1_resizer_mainpath");
- dm.add("rkisp1_selfpath");
dm.add("rkisp1_mainpath");
dm.add("rkisp1_stats");
dm.add("rkisp1_params");
@@ -926,20 +1375,36 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
if (!media_)
return false;
+ if (!media_->hwRevision()) {
+ LOG(RkISP1, Error)
+ << "The rkisp1 driver is too old, v5.11 or newer is required";
+ return false;
+ }
+
+ hasSelfPath_ = !!media_->getEntityByName("rkisp1_selfpath");
+
/* Create the V4L2 subdevices we will need. */
isp_ = V4L2Subdevice::fromEntityName(media_, "rkisp1_isp");
if (isp_->open() < 0)
return false;
- resizer_ = V4L2Subdevice::fromEntityName(media_, "rkisp1_resizer_mainpath");
- if (resizer_->open() < 0)
+ /* Locate and open the optional CSI-2 receiver. */
+ ispSink_ = isp_->entity()->getPadByIndex(0);
+ if (!ispSink_ || ispSink_->links().empty())
return false;
- /* Locate and open the capture video node. */
- video_ = V4L2VideoDevice::fromEntityName(media_, "rkisp1_mainpath");
- if (video_->open() < 0)
- return false;
+ pad = ispSink_->links().at(0)->source();
+ if (pad->entity()->function() == MEDIA_ENT_F_VID_IF_BRIDGE) {
+ csi_ = std::make_unique<V4L2Subdevice>(pad->entity());
+ if (csi_->open() < 0)
+ return false;
+
+ ispSink_ = csi_->entity()->getPadByIndex(0);
+ if (!ispSink_)
+ return false;
+ }
+ /* Locate and open the stats and params video nodes. */
stat_ = V4L2VideoDevice::fromEntityName(media_, "rkisp1_stats");
if (stat_->open() < 0)
return false;
@@ -948,40 +1413,63 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
if (param_->open() < 0)
return false;
- video_->bufferReady.connect(this, &PipelineHandlerRkISP1::bufferReady);
- stat_->bufferReady.connect(this, &PipelineHandlerRkISP1::statReady);
- param_->bufferReady.connect(this, &PipelineHandlerRkISP1::paramReady);
+ /* Locate and open the ISP main and self paths. */
+ if (!mainPath_.init(media_))
+ return false;
- /* Configure default links. */
- if (initLinks() < 0) {
- LOG(RkISP1, Error) << "Failed to setup links";
+ if (hasSelfPath_ && !selfPath_.init(media_))
return false;
+
+ mainPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::imageBufferReady);
+ if (hasSelfPath_)
+ selfPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::imageBufferReady);
+ stat_->bufferReady.connect(this, &PipelineHandlerRkISP1::statBufferReady);
+ param_->bufferReady.connect(this, &PipelineHandlerRkISP1::paramBufferReady);
+
+ /* If dewarper is present, create its instance. */
+ DeviceMatch dwp("dw100");
+ dwp.add("dw100-source");
+ dwp.add("dw100-sink");
+
+ std::shared_ptr<MediaDevice> dwpMediaDevice = enumerator->search(dwp);
+ if (dwpMediaDevice) {
+ dewarper_ = std::make_unique<V4L2M2MConverter>(dwpMediaDevice.get());
+ if (dewarper_->isValid()) {
+ dewarper_->outputBufferReady.connect(
+ this, &PipelineHandlerRkISP1::dewarpBufferReady);
+
+ LOG(RkISP1, Info)
+ << "Using DW100 dewarper " << dewarper_->deviceNode();
+ } else {
+ LOG(RkISP1, Warning)
+ << "Found DW100 dewarper " << dewarper_->deviceNode()
+ << " but invalid";
+
+ dewarper_.reset();
+ }
}
/*
* Enumerate all sensors connected to the ISP and create one
* camera instance for each of them.
*/
- pad = isp_->entity()->getPadByIndex(0);
- if (!pad)
- return false;
-
- for (MediaLink *link : pad->links())
- createCamera(link->source()->entity());
+ bool registered = false;
+ for (MediaLink *link : ispSink_->links()) {
+ if (!createCamera(link->source()->entity()))
+ registered = true;
+ }
- return true;
+ return registered;
}
/* -----------------------------------------------------------------------------
* Buffer Handling
*/
-void PipelineHandlerRkISP1::tryCompleteRequest(Request *request)
+void PipelineHandlerRkISP1::tryCompleteRequest(RkISP1FrameInfo *info)
{
RkISP1CameraData *data = cameraData(activeCamera_);
- RkISP1FrameInfo *info = data->frameInfo_.find(request);
- if (!info)
- return;
+ Request *request = info->request;
if (request->hasPendingBuffers())
return;
@@ -989,41 +1477,148 @@ void PipelineHandlerRkISP1::tryCompleteRequest(Request *request)
if (!info->metadataProcessed)
return;
- if (!info->paramDequeued)
+ if (!isRaw_ && !info->paramDequeued)
return;
data->frameInfo_.destroy(info->frame);
- completeRequest(activeCamera_, request);
+ completeRequest(request);
}
-void PipelineHandlerRkISP1::bufferReady(FrameBuffer *buffer)
+void PipelineHandlerRkISP1::imageBufferReady(FrameBuffer *buffer)
{
ASSERT(activeCamera_);
RkISP1CameraData *data = cameraData(activeCamera_);
- Request *request = buffer->request();
- data->timeline_.bufferReady(buffer);
+ RkISP1FrameInfo *info = data->frameInfo_.find(buffer);
+ if (!info)
+ return;
- if (data->frame_ <= buffer->metadata().sequence)
- data->frame_ = buffer->metadata().sequence + 1;
+ const FrameMetadata &metadata = buffer->metadata();
+ Request *request = info->request;
+
+ if (metadata.status != FrameMetadata::FrameCancelled) {
+ /*
+ * Record the sensor's timestamp in the request metadata.
+ *
+ * \todo The sensor timestamp should be better estimated by connecting
+ * to the V4L2Device::frameStart signal.
+ */
+ request->metadata().set(controls::SensorTimestamp,
+ metadata.timestamp);
+
+ if (isRaw_) {
+ const ControlList &ctrls =
+ data->delayedCtrls_->get(metadata.sequence);
+ data->ipa_->processStats(info->frame, 0, ctrls);
+ }
+ } else {
+ if (isRaw_)
+ info->metadataProcessed = true;
+ }
+
+ if (!useDewarper_) {
+ completeBuffer(request, buffer);
+ tryCompleteRequest(info);
+
+ return;
+ }
+
+ /* Do not queue cancelled frames to dewarper. */
+ if (metadata.status == FrameMetadata::FrameCancelled) {
+ /*
+ * i.MX8MP is the only known platform with dewarper. It has
+ * no self path. Hence, only main path buffer completion is
+ * required.
+ *
+ * Also, we cannot completeBuffer(request, buffer) as buffer
+ * here, is an internal buffer (between ISP and dewarper) and
+ * is not associated to the any specific request. The request
+ * buffer associated with main path stream is the one that
+ * is required to be completed (not the internal buffer).
+ */
+ for (auto it : request->buffers()) {
+ if (it.first == &data->mainPathStream_)
+ completeBuffer(request, it.second);
+ }
+
+ tryCompleteRequest(info);
+ return;
+ }
+
+ /* Handle scaler crop control. */
+ const auto &crop = request->controls().get(controls::ScalerCrop);
+ if (crop) {
+ Rectangle rect = crop.value();
- completeBuffer(activeCamera_, request, buffer);
- tryCompleteRequest(request);
+ /*
+ * ScalerCrop is specified to be in Sensor coordinates.
+ * So we need to transform it into dewarper coordinates.
+ * We can safely assume that the maximum crop limit contains the
+ * full fov of the dewarper.
+ */
+ std::pair<Rectangle, Rectangle> cropLimits =
+ dewarper_->inputCropBounds(&data->mainPathStream_);
+
+ rect = rect.transformedBetween(scalerMaxCrop_, cropLimits.second);
+ int ret = dewarper_->setInputCrop(&data->mainPathStream_,
+ &rect);
+ rect = rect.transformedBetween(cropLimits.second, scalerMaxCrop_);
+ if (!ret && rect != crop.value()) {
+ /*
+ * If the rectangle is changed by setInputCrop on the
+ * dewarper, log a debug message and cache the actual
+ * applied rectangle for metadata reporting.
+ */
+ LOG(RkISP1, Debug)
+ << "Applied rectangle " << rect.toString()
+ << " differs from requested " << crop.value().toString();
+ }
+
+ activeCrop_ = rect;
+ }
+
+ /*
+ * Queue input and output buffers to the dewarper. The output
+ * buffers for the dewarper are the buffers of the request, supplied
+ * by the application.
+ */
+ int ret = dewarper_->queueBuffers(buffer, request->buffers());
+ if (ret < 0)
+ LOG(RkISP1, Error) << "Cannot queue buffers to dewarper: "
+ << strerror(-ret);
+
+ request->metadata().set(controls::ScalerCrop, activeCrop_.value());
}
-void PipelineHandlerRkISP1::paramReady(FrameBuffer *buffer)
+void PipelineHandlerRkISP1::dewarpBufferReady(FrameBuffer *buffer)
+{
+ ASSERT(activeCamera_);
+ RkISP1CameraData *data = cameraData(activeCamera_);
+ Request *request = buffer->request();
+
+ RkISP1FrameInfo *info = data->frameInfo_.find(buffer->request());
+ if (!info)
+ return;
+
+ completeBuffer(request, buffer);
+ tryCompleteRequest(info);
+}
+
+void PipelineHandlerRkISP1::paramBufferReady(FrameBuffer *buffer)
{
ASSERT(activeCamera_);
RkISP1CameraData *data = cameraData(activeCamera_);
RkISP1FrameInfo *info = data->frameInfo_.find(buffer);
+ if (!info)
+ return;
info->paramDequeued = true;
- tryCompleteRequest(info->request);
+ tryCompleteRequest(info);
}
-void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer)
+void PipelineHandlerRkISP1::statBufferReady(FrameBuffer *buffer)
{
ASSERT(activeCamera_);
RkISP1CameraData *data = cameraData(activeCamera_);
@@ -1032,12 +1627,19 @@ void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer)
if (!info)
return;
- IPAOperationData op;
- op.operation = RKISP1_IPA_EVENT_SIGNAL_STAT_BUFFER;
- op.data = { info->frame, info->statBuffer->cookie() };
- data->ipa_->processEvent(op);
+ if (buffer->metadata().status == FrameMetadata::FrameCancelled) {
+ info->metadataProcessed = true;
+ tryCompleteRequest(info);
+ return;
+ }
+
+ if (data->frame_ <= buffer->metadata().sequence)
+ data->frame_ = buffer->metadata().sequence + 1;
+
+ data->ipa_->processStats(info->frame, info->statBuffer->cookie(),
+ data->delayedCtrls_->get(buffer->metadata().sequence));
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerRkISP1);
+REGISTER_PIPELINE_HANDLER(PipelineHandlerRkISP1, "rkisp1")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
new file mode 100644
index 00000000..eee5b09e
--- /dev/null
+++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
@@ -0,0 +1,567 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Rockchip ISP1 path helper
+ */
+
+#include "rkisp1_path.h"
+
+#include <linux/media-bus-format.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(RkISP1)
+
+namespace {
+
+/* Keep in sync with the supported raw formats in rkisp1.cpp. */
+const std::map<PixelFormat, uint32_t> formatToMediaBus = {
+ { formats::UYVY, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::YUYV, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::NV12, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::NV21, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::NV16, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::NV61, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::YUV420, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::YVU420, MEDIA_BUS_FMT_YUYV8_1_5X8 },
+ { formats::YUV422, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::YVU422, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::R8, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::RGB565, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::XRGB8888, MEDIA_BUS_FMT_YUYV8_2X8 },
+ { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
+ { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
+ { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
+ { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
+ { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
+ { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
+ { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
+ { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
+ { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
+ { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
+ { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
+};
+
+} /* namespace */
+
+RkISP1Path::RkISP1Path(const char *name, const Span<const PixelFormat> &formats)
+ : name_(name), running_(false), formats_(formats), link_(nullptr)
+{
+}
+
+bool RkISP1Path::init(MediaDevice *media)
+{
+ std::string resizer = std::string("rkisp1_resizer_") + name_ + "path";
+ std::string video = std::string("rkisp1_") + name_ + "path";
+
+ resizer_ = V4L2Subdevice::fromEntityName(media, resizer);
+ if (resizer_->open() < 0)
+ return false;
+
+ video_ = V4L2VideoDevice::fromEntityName(media, video);
+ if (video_->open() < 0)
+ return false;
+
+ populateFormats();
+
+ link_ = media->link("rkisp1_isp", 2, resizer, 0);
+ if (!link_)
+ return false;
+
+ return true;
+}
+
+void RkISP1Path::populateFormats()
+{
+ V4L2VideoDevice::Formats v4l2Formats = video_->formats();
+ if (v4l2Formats.empty()) {
+ LOG(RkISP1, Info)
+ << "Failed to enumerate supported formats and sizes, using defaults";
+
+ for (const PixelFormat &format : formats_)
+ streamFormats_.insert(format);
+ return;
+ }
+
+ minResolution_ = { 65535, 65535 };
+ maxResolution_ = { 0, 0 };
+
+ std::vector<PixelFormat> formats;
+ for (const auto &[format, sizes] : v4l2Formats) {
+ const PixelFormat pixelFormat = format.toPixelFormat();
+
+ /*
+ * As a defensive measure, skip any pixel format exposed by the
+ * driver that we don't know about. This ensures that looking up
+ * formats in formatToMediaBus using a key from streamFormats_
+ * will never fail in any of the other functions.
+ */
+ if (!formatToMediaBus.count(pixelFormat)) {
+ LOG(RkISP1, Warning)
+ << "Unsupported pixel format " << pixelFormat;
+ continue;
+ }
+
+ streamFormats_.insert(pixelFormat);
+
+ for (const auto &size : sizes) {
+ if (minResolution_ > size.min)
+ minResolution_ = size.min;
+ if (maxResolution_ < size.max)
+ maxResolution_ = size.max;
+ }
+ }
+}
+
+/**
+ * \brief Filter the sensor resolutions that can be supported
+ * \param[in] sensor The camera sensor
+ *
+ * This function retrieves all the sizes supported by the sensor and
+ * filters all the resolutions that can be supported on the pipeline.
+ * It is possible that the sensor's maximum output resolution is higher
+ * than the ISP maximum input. In that case, this function filters out all
+ * the resolution incapable of being supported and returns the maximum
+ * sensor resolution that can be supported by the pipeline.
+ *
+ * \return Maximum sensor size supported on the pipeline
+ */
+Size RkISP1Path::filterSensorResolution(const CameraSensor *sensor)
+{
+ auto iter = sensorSizesMap_.find(sensor);
+ if (iter != sensorSizesMap_.end())
+ return iter->second.back();
+
+ std::vector<Size> &sizes = sensorSizesMap_[sensor];
+ for (unsigned int code : sensor->mbusCodes()) {
+ for (const Size &size : sensor->sizes(code)) {
+ if (size.width > maxResolution_.width ||
+ size.height > maxResolution_.height)
+ continue;
+
+ sizes.push_back(size);
+ }
+ }
+
+ /* Sort in increasing order and remove duplicates. */
+ std::sort(sizes.begin(), sizes.end());
+ auto last = std::unique(sizes.begin(), sizes.end());
+ sizes.erase(last, sizes.end());
+
+ return sizes.back();
+}
+
+StreamConfiguration
+RkISP1Path::generateConfiguration(const CameraSensor *sensor, const Size &size,
+ StreamRole role)
+{
+ const std::vector<unsigned int> &mbusCodes = sensor->mbusCodes();
+ Size resolution = filterSensorResolution(sensor);
+
+ /* Min and max resolutions to populate the available stream formats. */
+ Size maxResolution = maxResolution_.boundedToAspectRatio(resolution)
+ .boundedTo(resolution);
+ Size minResolution = minResolution_.expandedToAspectRatio(resolution);
+
+ /* The desired stream size, bound to the max resolution. */
+ Size streamSize = size.boundedTo(maxResolution);
+
+ /* Create the list of supported stream formats. */
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+ unsigned int rawBitsPerPixel = 0;
+ PixelFormat rawFormat;
+
+ for (const auto &format : streamFormats_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+
+ /* Populate stream formats for non-RAW configurations. */
+ if (info.colourEncoding != PixelFormatInfo::ColourEncodingRAW) {
+ if (role == StreamRole::Raw)
+ continue;
+
+ streamFormats[format] = { { minResolution, maxResolution } };
+ continue;
+ }
+
+ /* Skip RAW formats for non-raw roles. */
+ if (role != StreamRole::Raw)
+ continue;
+
+ /* Populate stream formats for RAW configurations. */
+ uint32_t mbusCode = formatToMediaBus.at(format);
+ if (std::find(mbusCodes.begin(), mbusCodes.end(), mbusCode) ==
+ mbusCodes.end())
+ /* Skip formats not supported by sensor. */
+ continue;
+
+ /* Add all the RAW sizes the sensor can produce for this code. */
+ for (const auto &rawSize : sensor->sizes(mbusCode)) {
+ if (rawSize.width > maxResolution_.width ||
+ rawSize.height > maxResolution_.height)
+ continue;
+
+ streamFormats[format].push_back({ rawSize, rawSize });
+ }
+
+ /*
+ * Store the raw format with the highest bits per pixel for
+ * later usage.
+ */
+ if (info.bitsPerPixel > rawBitsPerPixel) {
+ rawBitsPerPixel = info.bitsPerPixel;
+ rawFormat = format;
+ }
+ }
+
+ /*
+ * Pick a suitable pixel format for the role. Raw streams need to use a
+ * raw format, processed streams use NV12 by default.
+ */
+ PixelFormat format;
+
+ if (role == StreamRole::Raw) {
+ if (!rawFormat.isValid()) {
+ LOG(RkISP1, Error)
+ << "Sensor " << sensor->model()
+ << " doesn't support raw capture";
+ return {};
+ }
+
+ format = rawFormat;
+ } else {
+ format = formats::NV12;
+ }
+
+ StreamFormats formats(streamFormats);
+ StreamConfiguration cfg(formats);
+ cfg.pixelFormat = format;
+ cfg.size = streamSize;
+ cfg.bufferCount = RKISP1_BUFFER_COUNT;
+
+ return cfg;
+}
+
+CameraConfiguration::Status
+RkISP1Path::validate(const CameraSensor *sensor,
+ const std::optional<SensorConfiguration> &sensorConfig,
+ StreamConfiguration *cfg)
+{
+ const std::vector<unsigned int> &mbusCodes = sensor->mbusCodes();
+ Size resolution = filterSensorResolution(sensor);
+
+ const StreamConfiguration reqCfg = *cfg;
+ CameraConfiguration::Status status = CameraConfiguration::Valid;
+
+ /*
+ * Validate the pixel format. If the requested format isn't supported,
+ * default to either NV12 (all versions of the ISP are guaranteed to
+ * support NV12 on both the main and self paths) if the requested format
+ * is not a raw format, or to the supported raw format with the highest
+ * bits per pixel otherwise.
+ */
+ unsigned int rawBitsPerPixel = 0;
+ PixelFormat rawFormat;
+ bool found = false;
+
+ for (const auto &format : streamFormats_) {
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+
+ if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
+ /* Skip raw formats not supported by the sensor. */
+ uint32_t mbusCode = formatToMediaBus.at(format);
+ if (std::find(mbusCodes.begin(), mbusCodes.end(), mbusCode) ==
+ mbusCodes.end())
+ continue;
+
+ /*
+ * If the bits per pixel is supplied from the sensor
+ * configuration, choose a raw format that complies with
+ * it. Otherwise, store the raw format with the highest
+ * bits per pixel for later usage.
+ */
+ if (sensorConfig && info.bitsPerPixel != sensorConfig->bitDepth)
+ continue;
+
+ if (info.bitsPerPixel > rawBitsPerPixel) {
+ rawBitsPerPixel = info.bitsPerPixel;
+ rawFormat = format;
+ }
+ }
+
+ if (cfg->pixelFormat == format) {
+ found = true;
+ break;
+ }
+ }
+
+ if (sensorConfig && !rawFormat.isValid())
+ return CameraConfiguration::Invalid;
+
+ bool isRaw = PixelFormatInfo::info(cfg->pixelFormat).colourEncoding ==
+ PixelFormatInfo::ColourEncodingRAW;
+
+ /*
+ * If no raw format supported by the sensor has been found, use a
+ * processed format.
+ */
+ if (!rawFormat.isValid())
+ isRaw = false;
+
+ if (!found)
+ cfg->pixelFormat = isRaw ? rawFormat : formats::NV12;
+
+ Size minResolution;
+ Size maxResolution;
+
+ if (isRaw) {
+ /*
+ * Use the sensor output size closest to the requested stream
+ * size while ensuring the output size doesn't exceed ISP limits.
+ *
+ * As 'resolution' is the largest sensor resolution
+ * supported by the ISP, CameraSensor::getFormat() will never
+ * return a V4L2SubdeviceFormat with a larger size.
+ */
+ uint32_t mbusCode = formatToMediaBus.at(cfg->pixelFormat);
+ cfg->size.boundTo(resolution);
+
+ Size rawSize = sensorConfig ? sensorConfig->outputSize
+ : cfg->size;
+
+ V4L2SubdeviceFormat sensorFormat =
+ sensor->getFormat({ mbusCode }, rawSize);
+
+ if (sensorConfig &&
+ sensorConfig->outputSize != sensorFormat.size)
+ return CameraConfiguration::Invalid;
+
+ minResolution = sensorFormat.size;
+ maxResolution = sensorFormat.size;
+ } else if (sensorConfig) {
+ /*
+ * We have already ensured 'rawFormat' has the matching bit
+ * depth with sensorConfig.bitDepth hence, only validate the
+ * sensorConfig's output size here.
+ */
+ Size sensorSize = sensorConfig->outputSize;
+
+ if (sensorSize > resolution)
+ return CameraConfiguration::Invalid;
+
+ uint32_t mbusCode = formatToMediaBus.at(rawFormat);
+ V4L2SubdeviceFormat sensorFormat =
+ sensor->getFormat({ mbusCode }, sensorSize);
+
+ if (sensorFormat.size != sensorSize)
+ return CameraConfiguration::Invalid;
+
+ minResolution = minResolution_.expandedToAspectRatio(sensorSize);
+ maxResolution = maxResolution_.boundedTo(sensorSize)
+ .boundedToAspectRatio(sensorSize);
+ } else {
+ /*
+ * Adjust the size based on the sensor resolution and absolute
+ * limits of the ISP.
+ */
+ minResolution = minResolution_.expandedToAspectRatio(resolution);
+ maxResolution = maxResolution_.boundedToAspectRatio(resolution)
+ .boundedTo(resolution);
+ }
+
+ cfg->size.boundTo(maxResolution);
+ cfg->size.expandTo(minResolution);
+ cfg->bufferCount = RKISP1_BUFFER_COUNT;
+
+ V4L2DeviceFormat format;
+ format.fourcc = video_->toV4L2PixelFormat(cfg->pixelFormat);
+ format.size = cfg->size;
+
+ int ret = video_->tryFormat(&format);
+ if (ret)
+ return CameraConfiguration::Invalid;
+
+ cfg->stride = format.planes[0].bpl;
+ cfg->frameSize = format.planes[0].size;
+
+ if (cfg->pixelFormat != reqCfg.pixelFormat || cfg->size != reqCfg.size) {
+ LOG(RkISP1, Debug)
+ << "Adjusting format from " << reqCfg.toString()
+ << " to " << cfg->toString();
+ status = CameraConfiguration::Adjusted;
+ }
+
+ return status;
+}
+
+int RkISP1Path::configure(const StreamConfiguration &config,
+ const V4L2SubdeviceFormat &inputFormat)
+{
+ int ret;
+
+ V4L2SubdeviceFormat ispFormat = inputFormat;
+
+ ret = resizer_->setFormat(0, &ispFormat);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Crop on the resizer input to maintain FOV before downscaling.
+ *
+ * Note that this does not apply to devices without DUAL_CROP support
+ * (like imx8mp) , where the cropping needs to be done on the
+ * ImageStabilizer block on the ISP source pad and therefore is
+ * configured before this stage. For simplicity we still set the crop.
+ * This gets ignored by the kernel driver because the hardware is
+ * missing the capability.
+ *
+ * Alignment to a multiple of 2 pixels is required by the resizer.
+ */
+ Size ispCrop = inputFormat.size.boundedToAspectRatio(config.size)
+ .alignedUpTo(2, 2);
+ Rectangle rect = ispCrop.centeredTo(Rectangle(inputFormat.size).center());
+ ret = resizer_->setSelection(0, V4L2_SEL_TGT_CROP, &rect);
+ if (ret < 0)
+ return ret;
+
+ LOG(RkISP1, Debug)
+ << "Configured " << name_ << " resizer input pad with "
+ << ispFormat << " crop " << rect;
+
+ ispFormat.size = config.size;
+
+ LOG(RkISP1, Debug)
+ << "Configuring " << name_ << " resizer output pad with "
+ << ispFormat;
+
+ /*
+ * The configuration has been validated, the pixel format is guaranteed
+ * to be supported and thus found in formatToMediaBus.
+ */
+ ispFormat.code = formatToMediaBus.at(config.pixelFormat);
+
+ ret = resizer_->setFormat(1, &ispFormat);
+ if (ret < 0)
+ return ret;
+
+ LOG(RkISP1, Debug)
+ << "Configured " << name_ << " resizer output pad with "
+ << ispFormat;
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(config.pixelFormat);
+ V4L2DeviceFormat outputFormat;
+ outputFormat.fourcc = video_->toV4L2PixelFormat(config.pixelFormat);
+ outputFormat.size = config.size;
+ outputFormat.planesCount = info.numPlanes();
+
+ ret = video_->setFormat(&outputFormat);
+ if (ret)
+ return ret;
+
+ if (outputFormat.size != config.size ||
+ outputFormat.fourcc != video_->toV4L2PixelFormat(config.pixelFormat)) {
+ LOG(RkISP1, Error)
+ << "Unable to configure capture in " << config.toString();
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int RkISP1Path::start()
+{
+ int ret;
+
+ if (running_)
+ return -EBUSY;
+
+ /* \todo Make buffer count user configurable. */
+ ret = video_->importBuffers(RKISP1_BUFFER_COUNT);
+ if (ret)
+ return ret;
+
+ ret = video_->streamOn();
+ if (ret) {
+ LOG(RkISP1, Error)
+ << "Failed to start " << name_ << " path";
+
+ video_->releaseBuffers();
+ return ret;
+ }
+
+ running_ = true;
+
+ return 0;
+}
+
+void RkISP1Path::stop()
+{
+ if (!running_)
+ return;
+
+ if (video_->streamOff())
+ LOG(RkISP1, Warning) << "Failed to stop " << name_ << " path";
+
+ video_->releaseBuffers();
+
+ running_ = false;
+}
+
+/*
+ * \todo Remove the hardcoded formats once all users will have migrated to a
+ * recent enough kernel.
+ */
+namespace {
+constexpr std::array<PixelFormat, 18> RKISP1_RSZ_MP_FORMATS{
+ formats::YUYV,
+ formats::NV16,
+ formats::NV61,
+ formats::NV21,
+ formats::NV12,
+ formats::R8,
+ formats::SBGGR8,
+ formats::SGBRG8,
+ formats::SGRBG8,
+ formats::SRGGB8,
+ formats::SBGGR10,
+ formats::SGBRG10,
+ formats::SGRBG10,
+ formats::SRGGB10,
+ formats::SBGGR12,
+ formats::SGBRG12,
+ formats::SGRBG12,
+ formats::SRGGB12,
+};
+
+constexpr std::array<PixelFormat, 8> RKISP1_RSZ_SP_FORMATS{
+ formats::YUYV,
+ formats::NV16,
+ formats::NV61,
+ formats::NV21,
+ formats::NV12,
+ formats::R8,
+ formats::RGB565,
+ formats::XRGB8888,
+};
+} /* namespace */
+
+RkISP1MainPath::RkISP1MainPath()
+ : RkISP1Path("main", RKISP1_RSZ_MP_FORMATS)
+{
+}
+
+RkISP1SelfPath::RkISP1SelfPath()
+ : RkISP1Path("self", RKISP1_RSZ_SP_FORMATS)
+{
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.h b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
new file mode 100644
index 00000000..2a1ef0ab
--- /dev/null
+++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Rockchip ISP1 path helper
+ */
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <set>
+#include <vector>
+
+#include <libcamera/base/signal.h>
+#include <libcamera/base/span.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/geometry.h>
+#include <libcamera/pixel_format.h>
+
+#include "libcamera/internal/media_object.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace libcamera {
+
+class CameraSensor;
+class MediaDevice;
+class SensorConfiguration;
+class V4L2Subdevice;
+struct StreamConfiguration;
+struct V4L2SubdeviceFormat;
+
+class RkISP1Path
+{
+public:
+ RkISP1Path(const char *name, const Span<const PixelFormat> &formats);
+
+ bool init(MediaDevice *media);
+
+ int setEnabled(bool enable) { return link_->setEnabled(enable); }
+ bool isEnabled() const { return link_->flags() & MEDIA_LNK_FL_ENABLED; }
+
+ StreamConfiguration generateConfiguration(const CameraSensor *sensor,
+ const Size &resolution,
+ StreamRole role);
+ CameraConfiguration::Status validate(const CameraSensor *sensor,
+ const std::optional<SensorConfiguration> &sensorConfig,
+ StreamConfiguration *cfg);
+
+ int configure(const StreamConfiguration &config,
+ const V4L2SubdeviceFormat &inputFormat);
+
+ int exportBuffers(unsigned int bufferCount,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+ {
+ return video_->exportBuffers(bufferCount, buffers);
+ }
+
+ int start();
+ void stop();
+
+ int queueBuffer(FrameBuffer *buffer) { return video_->queueBuffer(buffer); }
+ Signal<FrameBuffer *> &bufferReady() { return video_->bufferReady; }
+ const Size &maxResolution() const { return maxResolution_; }
+
+private:
+ void populateFormats();
+ Size filterSensorResolution(const CameraSensor *sensor);
+
+ static constexpr unsigned int RKISP1_BUFFER_COUNT = 4;
+
+ const char *name_;
+ bool running_;
+
+ const Span<const PixelFormat> formats_;
+ std::set<PixelFormat> streamFormats_;
+ Size minResolution_;
+ Size maxResolution_;
+
+ std::unique_ptr<V4L2Subdevice> resizer_;
+ std::unique_ptr<V4L2VideoDevice> video_;
+ MediaLink *link_;
+
+ /*
+ * Map from camera sensors to the sizes (in increasing order),
+ * which are guaranteed to be supported by the pipeline.
+ */
+ std::map<const CameraSensor *, std::vector<Size>> sensorSizesMap_;
+};
+
+class RkISP1MainPath : public RkISP1Path
+{
+public:
+ RkISP1MainPath();
+};
+
+class RkISP1SelfPath : public RkISP1Path
+{
+public:
+ RkISP1SelfPath();
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rkisp1/timeline.cpp b/src/libcamera/pipeline/rkisp1/timeline.cpp
deleted file mode 100644
index f6c6434d..00000000
--- a/src/libcamera/pipeline/rkisp1/timeline.cpp
+++ /dev/null
@@ -1,227 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * timeline.cpp - Timeline for per-frame control
- */
-
-#include "timeline.h"
-
-#include "log.h"
-
-/**
- * \file timeline.h
- * \brief Timeline for per-frame control
- */
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(Timeline)
-
-/**
- * \class FrameAction
- * \brief Action that can be schedule on a Timeline
- *
- * A frame action is an event schedule to be executed on a Timeline. A frame
- * action has two primal attributes a frame number and a type.
- *
- * The frame number describes the frame to which the action is associated. The
- * type is a numerical ID which identifies the action within the pipeline and
- * IPA protocol.
- */
-
-/**
- * \class Timeline
- * \brief Executor of FrameAction
- *
- * The timeline has three primary functions:
- *
- * 1. Keep track of the Start of Exposure (SOE) for every frame processed by
- * the hardware. Using this information it shall keep an up-to-date estimate
- * of the frame interval (time between two consecutive SOE events).
- *
- * The estimated frame interval together with recorded SOE events are the
- * foundation for how the timeline schedule FrameAction at specific points
- * in time.
- * \todo Improve the frame interval estimation algorithm.
- *
- * 2. Keep track of current delays for different types of actions. The delays
- * for different actions might differ during a capture session. Exposure time
- * effects the over all FPS and different ISP parameters might impacts its
- * processing time.
- *
- * The action type delays shall be updated by the IPA in conjunction with
- * how it changes the capture parameters.
- *
- * 3. Schedule actions on the timeline. This is the process of taking a
- * FrameAction which contains an abstract description of what frame and
- * what type of action it contains and turning that into an time point
- * and make sure the action is executed at that time.
- */
-
-Timeline::Timeline()
- : frameInterval_(0)
-{
- timer_.timeout.connect(this, &Timeline::timeout);
-}
-
-/**
- * \brief Reset and stop the timeline
- *
- * The timeline needs to be reset when the timeline should no longer execute
- * actions. A timeline should be reset between two capture sessions to prevent
- * the old capture session to effect the second one.
- */
-void Timeline::reset()
-{
- timer_.stop();
-
- actions_.clear();
- history_.clear();
-}
-
-/**
- * \brief Schedule an action on the timeline
- * \param[in] action FrameAction to schedule
- *
- * The act of scheduling an action to the timeline is the process of taking
- * the properties of the action (type, frame and time offsets) and translating
- * that to a time point using the current values for the action type timings
- * value recorded in the timeline. If an action is scheduled too late, execute
- * it immediately.
- */
-void Timeline::scheduleAction(std::unique_ptr<FrameAction> action)
-{
- unsigned int lastFrame;
- utils::time_point lastTime;
-
- if (history_.empty()) {
- lastFrame = 0;
- lastTime = std::chrono::steady_clock::now();
- } else {
- lastFrame = history_.back().first;
- lastTime = history_.back().second;
- }
-
- /*
- * Calculate when the action shall be schedule by first finding out how
- * many frames in the future the action acts on and then add the actions
- * frame offset. After the spatial frame offset is found out translate
- * that to a time point by using the last estimated start of exposure
- * (SOE) as the fixed offset. Lastly add the action time offset to the
- * time point.
- */
- int frame = action->frame() - lastFrame + frameOffset(action->type());
- utils::time_point deadline = lastTime + frame * frameInterval_
- + timeOffset(action->type());
-
- utils::time_point now = std::chrono::steady_clock::now();
- if (deadline < now) {
- LOG(Timeline, Warning)
- << "Action scheduled too late "
- << utils::time_point_to_string(deadline)
- << ", run now " << utils::time_point_to_string(now);
- action->run();
- } else {
- actions_.emplace(deadline, std::move(action));
- updateDeadline();
- }
-}
-
-void Timeline::notifyStartOfExposure(unsigned int frame, utils::time_point time)
-{
- history_.push_back(std::make_pair(frame, time));
-
- if (history_.size() <= HISTORY_DEPTH / 2)
- return;
-
- while (history_.size() > HISTORY_DEPTH)
- history_.pop_front();
-
- /* Update esitmated time between two start of exposures. */
- utils::duration sumExposures(0);
- unsigned int numExposures = 0;
-
- utils::time_point lastTime;
- for (auto it = history_.begin(); it != history_.end(); it++) {
- if (it != history_.begin()) {
- sumExposures += it->second - lastTime;
- numExposures++;
- }
-
- lastTime = it->second;
- }
-
- frameInterval_ = sumExposures;
- if (numExposures)
- frameInterval_ /= numExposures;
-}
-
-int Timeline::frameOffset(unsigned int type) const
-{
- const auto it = delays_.find(type);
- if (it == delays_.end()) {
- LOG(Timeline, Error)
- << "No frame offset set for action type " << type;
- return 0;
- }
-
- return it->second.first;
-}
-
-utils::duration Timeline::timeOffset(unsigned int type) const
-{
- const auto it = delays_.find(type);
- if (it == delays_.end()) {
- LOG(Timeline, Error)
- << "No time offset set for action type " << type;
- return utils::duration::zero();
- }
-
- return it->second.second;
-}
-
-void Timeline::setRawDelay(unsigned int type, int frame, utils::duration time)
-{
- delays_[type] = std::make_pair(frame, time);
-}
-
-void Timeline::updateDeadline()
-{
- if (actions_.empty())
- return;
-
- const utils::time_point &deadline = actions_.begin()->first;
-
- if (timer_.isRunning() && deadline >= timer_.deadline())
- return;
-
- if (deadline <= std::chrono::steady_clock::now()) {
- timeout(&timer_);
- return;
- }
-
- timer_.start(deadline);
-}
-
-void Timeline::timeout(Timer *timer)
-{
- utils::time_point now = std::chrono::steady_clock::now();
-
- for (auto it = actions_.begin(); it != actions_.end();) {
- const utils::time_point &sched = it->first;
-
- if (sched > now)
- break;
-
- FrameAction *action = it->second.get();
-
- action->run();
-
- it = actions_.erase(it);
- }
-
- updateDeadline();
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rkisp1/timeline.h b/src/libcamera/pipeline/rkisp1/timeline.h
deleted file mode 100644
index 9d30e4ea..00000000
--- a/src/libcamera/pipeline/rkisp1/timeline.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * timeline.h - Timeline for per-frame controls
- */
-#ifndef __LIBCAMERA_TIMELINE_H__
-#define __LIBCAMERA_TIMELINE_H__
-
-#include <list>
-#include <map>
-
-#include <libcamera/timer.h>
-
-#include "utils.h"
-
-namespace libcamera {
-
-class FrameAction
-{
-public:
- FrameAction(unsigned int frame, unsigned int type)
- : frame_(frame), type_(type) {}
-
- virtual ~FrameAction() {}
-
- unsigned int frame() const { return frame_; }
- unsigned int type() const { return type_; }
-
- virtual void run() = 0;
-
-private:
- unsigned int frame_;
- unsigned int type_;
-};
-
-class Timeline
-{
-public:
- Timeline();
- virtual ~Timeline() {}
-
- virtual void reset();
- virtual void scheduleAction(std::unique_ptr<FrameAction> action);
- virtual void notifyStartOfExposure(unsigned int frame, utils::time_point time);
-
- utils::duration frameInterval() const { return frameInterval_; }
-
-protected:
- int frameOffset(unsigned int type) const;
- utils::duration timeOffset(unsigned int type) const;
-
- void setRawDelay(unsigned int type, int frame, utils::duration time);
-
- std::map<unsigned int, std::pair<int, utils::duration>> delays_;
-
-private:
- static constexpr unsigned int HISTORY_DEPTH = 10;
-
- void timeout(Timer *timer);
- void updateDeadline();
-
- std::list<std::pair<unsigned int, utils::time_point>> history_;
- std::multimap<utils::time_point, std::unique_ptr<FrameAction>> actions_;
- utils::duration frameInterval_;
-
- Timer timer_;
-};
-
-} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_TIMELINE_H__ */
diff --git a/src/libcamera/pipeline/rpi/common/delayed_controls.cpp b/src/libcamera/pipeline/rpi/common/delayed_controls.cpp
new file mode 100644
index 00000000..ad50a7c8
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/delayed_controls.cpp
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper to deal with controls that take effect with a delay
+ *
+ * Note: This has been forked from the libcamera core implementation.
+ */
+
+#include "delayed_controls.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/controls.h>
+
+#include "libcamera/internal/v4l2_device.h"
+
+/**
+ * \file delayed_controls.h
+ * \brief Helper to deal with controls that take effect with a delay
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(RPiDelayedControls)
+
+namespace RPi {
+
+/**
+ * \class DelayedControls
+ * \brief Helper to deal with controls that take effect with a delay
+ *
+ * Some sensor controls take effect with a delay as the sensor needs time to
+ * adjust, for example exposure and analog gain. This is a helper class to deal
+ * with such controls and the intended users are pipeline handlers.
+ *
+ * The idea is to extend the concept of the buffer depth of a pipeline the
+ * application needs to maintain to also cover controls. Just as with buffer
+ * depth if the application keeps the number of requests queued above the
+ * control depth the controls are guaranteed to take effect for the correct
+ * request. The control depth is determined by the control with the greatest
+ * delay.
+ */
+
+/**
+ * \struct DelayedControls::ControlParams
+ * \brief Parameters associated with controls handled by the \a DelayedControls
+ * helper class
+ *
+ * \var ControlParams::delay
+ * \brief Frame delay from setting the control on a sensor device to when it is
+ * consumed during framing.
+ *
+ * \var ControlParams::priorityWrite
+ * \brief Flag to indicate that this control must be applied ahead of, and
+ * separately from the other controls.
+ *
+ * Typically set for the \a V4L2_CID_VBLANK control so that the device driver
+ * does not reject \a V4L2_CID_EXPOSURE control values that may be outside of
+ * the existing vertical blanking specified bounds, but are within the new
+ * blanking bounds.
+ */
+
+/**
+ * \brief Construct a DelayedControls instance
+ * \param[in] device The V4L2 device the controls have to be applied to
+ * \param[in] controlParams Map of the numerical V4L2 control ids to their
+ * associated control parameters.
+ *
+ * The control parameters comprise of delays (in frames) and a priority write
+ * flag. If this flag is set, the relevant control is written separately from,
+ * and ahead of the rest of the batched controls.
+ *
+ * Only controls specified in \a controlParams are handled. If it's desired to
+ * mix delayed controls and controls that take effect immediately the immediate
+ * controls must be listed in the \a controlParams map with a delay value of 0.
+ */
+DelayedControls::DelayedControls(V4L2Device *device,
+ const std::unordered_map<uint32_t, ControlParams> &controlParams)
+ : device_(device), maxDelay_(0)
+{
+ const ControlInfoMap &controls = device_->controls();
+
+ /*
+ * Create a map of control ids to delays for controls exposed by the
+ * device.
+ */
+ for (auto const &param : controlParams) {
+ auto it = controls.find(param.first);
+ if (it == controls.end()) {
+ LOG(RPiDelayedControls, Error)
+ << "Delay request for control id "
+ << utils::hex(param.first)
+ << " but control is not exposed by device "
+ << device_->deviceNode();
+ continue;
+ }
+
+ const ControlId *id = it->first;
+
+ controlParams_[id] = param.second;
+
+ LOG(RPiDelayedControls, Debug)
+ << "Set a delay of " << controlParams_[id].delay
+ << " and priority write flag " << controlParams_[id].priorityWrite
+ << " for " << id->name();
+
+ maxDelay_ = std::max(maxDelay_, controlParams_[id].delay);
+ }
+
+ reset(0);
+}
+
+/**
+ * \brief Reset state machine
+ *
+ * Resets the state machine to a starting position based on control values
+ * retrieved from the device.
+ */
+void DelayedControls::reset(unsigned int cookie)
+{
+ queueCount_ = 1;
+ writeCount_ = 0;
+ cookies_[0] = cookie;
+
+ /* Retrieve control as reported by the device. */
+ std::vector<uint32_t> ids;
+ for (auto const &param : controlParams_)
+ ids.push_back(param.first->id());
+
+ ControlList controls = device_->getControls(ids);
+
+ /* Seed the control queue with the controls reported by the device. */
+ values_.clear();
+ for (const auto &ctrl : controls) {
+ const ControlId *id = device_->controls().idmap().at(ctrl.first);
+ /*
+ * Do not mark this control value as updated, it does not need
+ * to be written to to device on startup.
+ */
+ values_[id][0] = Info(ctrl.second, false);
+ }
+}
+
+/**
+ * \brief Push a set of controls on the queue
+ * \param[in] controls List of controls to add to the device queue
+ *
+ * Push a set of controls to the control queue. This increases the control queue
+ * depth by one.
+ *
+ * \returns true if \a controls are accepted, or false otherwise
+ */
+bool DelayedControls::push(const ControlList &controls, const unsigned int cookie)
+{
+ /* Copy state from previous frame. */
+ for (auto &ctrl : values_) {
+ Info &info = ctrl.second[queueCount_];
+ info = values_[ctrl.first][queueCount_ - 1];
+ info.updated = false;
+ }
+
+ /* Update with new controls. */
+ const ControlIdMap &idmap = device_->controls().idmap();
+ for (const auto &control : controls) {
+ const auto &it = idmap.find(control.first);
+ if (it == idmap.end()) {
+ LOG(RPiDelayedControls, Warning)
+ << "Unknown control " << control.first;
+ return false;
+ }
+
+ const ControlId *id = it->second;
+
+ if (controlParams_.find(id) == controlParams_.end())
+ return false;
+
+ Info &info = values_[id][queueCount_];
+
+ info = Info(control.second);
+
+ LOG(RPiDelayedControls, Debug)
+ << "Queuing " << id->name()
+ << " to " << info.toString()
+ << " at index " << queueCount_;
+ }
+
+ cookies_[queueCount_] = cookie;
+ queueCount_++;
+
+ return true;
+}
+
+/**
+ * \brief Read back controls in effect at a sequence number
+ * \param[in] sequence The sequence number to get controls for
+ *
+ * Read back what controls where in effect at a specific sequence number. The
+ * history is a ring buffer of 16 entries where new and old values coexist. It's
+ * the callers responsibility to not read too old sequence numbers that have been
+ * pushed out of the history.
+ *
+ * Historic values are evicted by pushing new values onto the queue using
+ * push(). The max history from the current sequence number that yields valid
+ * values are thus 16 minus number of controls pushed.
+ *
+ * \return The controls at \a sequence number
+ */
+std::pair<ControlList, unsigned int> DelayedControls::get(uint32_t sequence)
+{
+ unsigned int index = std::max<int>(0, sequence - maxDelay_);
+
+ ControlList out(device_->controls());
+ for (const auto &ctrl : values_) {
+ const ControlId *id = ctrl.first;
+ const Info &info = ctrl.second[index];
+
+ out.set(id->id(), info);
+
+ LOG(RPiDelayedControls, Debug)
+ << "Reading " << id->name()
+ << " to " << info.toString()
+ << " at index " << index;
+ }
+
+ return { out, cookies_[index] };
+}
+
+/**
+ * \brief Inform DelayedControls of the start of a new frame
+ * \param[in] sequence Sequence number of the frame that started
+ *
+ * Inform the state machine that a new frame has started and of its sequence
+ * number. Any user of these helpers is responsible to inform the helper about
+ * the start of any frame. This can be connected with ease to the start of a
+ * exposure (SOE) V4L2 event.
+ */
+void DelayedControls::applyControls(uint32_t sequence)
+{
+ LOG(RPiDelayedControls, Debug) << "frame " << sequence << " started";
+
+ /*
+ * Create control list peeking ahead in the value queue to ensure
+ * values are set in time to satisfy the sensor delay.
+ */
+ ControlList out(device_->controls());
+ for (auto &ctrl : values_) {
+ const ControlId *id = ctrl.first;
+ unsigned int delayDiff = maxDelay_ - controlParams_[id].delay;
+ unsigned int index = std::max<int>(0, writeCount_ - delayDiff);
+ Info &info = ctrl.second[index];
+
+ if (info.updated) {
+ if (controlParams_[id].priorityWrite) {
+ /*
+ * This control must be written now, it could
+ * affect validity of the other controls.
+ */
+ ControlList priority(device_->controls());
+ priority.set(id->id(), info);
+ device_->setControls(&priority);
+ } else {
+ /*
+ * Batch up the list of controls and write them
+ * at the end of the function.
+ */
+ out.set(id->id(), info);
+ }
+
+ LOG(RPiDelayedControls, Debug)
+ << "Setting " << id->name()
+ << " to " << info.toString()
+ << " at index " << index;
+
+ /* Done with this update, so mark as completed. */
+ info.updated = false;
+ }
+ }
+
+ writeCount_ = sequence + 1;
+
+ while (writeCount_ > queueCount_) {
+ LOG(RPiDelayedControls, Debug)
+ << "Queue is empty, auto queue no-op.";
+ push({}, cookies_[queueCount_ - 1]);
+ }
+
+ device_->setControls(&out);
+}
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/delayed_controls.h b/src/libcamera/pipeline/rpi/common/delayed_controls.h
new file mode 100644
index 00000000..487b0057
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/delayed_controls.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper to deal with controls that take effect with a delay
+ *
+ * Note: This has been forked from the libcamera core implementation.
+ */
+
+#pragma once
+
+#include <stdint.h>
+#include <unordered_map>
+#include <utility>
+
+#include <libcamera/controls.h>
+
+namespace libcamera {
+
+class V4L2Device;
+
+namespace RPi {
+
+class DelayedControls
+{
+public:
+ struct ControlParams {
+ unsigned int delay;
+ bool priorityWrite;
+ };
+
+ DelayedControls(V4L2Device *device,
+ const std::unordered_map<uint32_t, ControlParams> &controlParams);
+
+ void reset(unsigned int cookie);
+
+ bool push(const ControlList &controls, unsigned int cookie);
+ std::pair<ControlList, unsigned int> get(uint32_t sequence);
+
+ void applyControls(uint32_t sequence);
+
+private:
+ class Info : public ControlValue
+ {
+ public:
+ Info()
+ : updated(false)
+ {
+ }
+
+ Info(const ControlValue &v, bool updated_ = true)
+ : ControlValue(v), updated(updated_)
+ {
+ }
+
+ bool updated;
+ };
+
+ static constexpr int listSize = 16;
+ template<typename T>
+ class RingBuffer : public std::array<T, listSize>
+ {
+ public:
+ T &operator[](unsigned int index)
+ {
+ return std::array<T, listSize>::operator[](index % listSize);
+ }
+
+ const T &operator[](unsigned int index) const
+ {
+ return std::array<T, listSize>::operator[](index % listSize);
+ }
+ };
+
+ V4L2Device *device_;
+ std::unordered_map<const ControlId *, ControlParams> controlParams_;
+ unsigned int maxDelay_;
+
+ uint32_t queueCount_;
+ uint32_t writeCount_;
+ std::unordered_map<const ControlId *, RingBuffer<Info>> values_;
+ RingBuffer<unsigned int> cookies_;
+};
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/meson.build b/src/libcamera/pipeline/rpi/common/meson.build
new file mode 100644
index 00000000..b2b1a0a6
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'delayed_controls.cpp',
+ 'pipeline_base.cpp',
+ 'rpi_stream.cpp',
+])
diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.cpp b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp
new file mode 100644
index 00000000..4b147fdb
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp
@@ -0,0 +1,1528 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Pipeline handler base class for Raspberry Pi devices
+ */
+
+#include "pipeline_base.h"
+
+#include <chrono>
+
+#include <linux/media-bus-format.h>
+#include <linux/videodev2.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/logging.h>
+#include <libcamera/property_ids.h>
+
+#include "libcamera/internal/camera_lens.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+using namespace RPi;
+
+LOG_DEFINE_CATEGORY(RPI)
+
+using StreamFlag = RPi::Stream::StreamFlag;
+
+namespace {
+
+constexpr unsigned int defaultRawBitDepth = 12;
+
+PixelFormat mbusCodeToPixelFormat(unsigned int code,
+ BayerFormat::Packing packingReq)
+{
+ BayerFormat bayer = BayerFormat::fromMbusCode(code);
+
+ ASSERT(bayer.isValid());
+
+ bayer.packing = packingReq;
+ PixelFormat pix = bayer.toPixelFormat();
+
+ /*
+ * Not all formats (e.g. 8-bit or 16-bit Bayer formats) can have packed
+ * variants. So if the PixelFormat returns as invalid, use the non-packed
+ * conversion instead.
+ */
+ if (!pix.isValid()) {
+ bayer.packing = BayerFormat::Packing::None;
+ pix = bayer.toPixelFormat();
+ }
+
+ return pix;
+}
+
+bool isMonoSensor(std::unique_ptr<CameraSensor> &sensor)
+{
+ unsigned int mbusCode = sensor->mbusCodes()[0];
+ const BayerFormat &bayer = BayerFormat::fromMbusCode(mbusCode);
+
+ return bayer.order == BayerFormat::Order::MONO;
+}
+
+const std::vector<ColorSpace> validColorSpaces = {
+ ColorSpace::Sycc,
+ ColorSpace::Smpte170m,
+ ColorSpace::Rec709
+};
+
+std::optional<ColorSpace> findValidColorSpace(const ColorSpace &colourSpace)
+{
+ for (auto cs : validColorSpaces) {
+ if (colourSpace.primaries == cs.primaries &&
+ colourSpace.transferFunction == cs.transferFunction)
+ return cs;
+ }
+
+ return std::nullopt;
+}
+
+} /* namespace */
+
+/*
+ * Raspberry Pi drivers expect the following colour spaces:
+ * - V4L2_COLORSPACE_RAW for raw streams.
+ * - One of V4L2_COLORSPACE_JPEG, V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_REC709 for
+ * non-raw streams. Other fields such as transfer function, YCbCr encoding and
+ * quantisation are not used.
+ *
+ * The libcamera colour spaces that we wish to use corresponding to these are therefore:
+ * - ColorSpace::Raw for V4L2_COLORSPACE_RAW
+ * - ColorSpace::Sycc for V4L2_COLORSPACE_JPEG
+ * - ColorSpace::Smpte170m for V4L2_COLORSPACE_SMPTE170M
+ * - ColorSpace::Rec709 for V4L2_COLORSPACE_REC709
+ */
+CameraConfiguration::Status RPiCameraConfiguration::validateColorSpaces([[maybe_unused]] ColorSpaceFlags flags)
+{
+ Status status = Valid;
+ yuvColorSpace_.reset();
+
+ for (auto &cfg : config_) {
+ /* First fix up raw streams to have the "raw" colour space. */
+ if (PipelineHandlerBase::isRaw(cfg.pixelFormat)) {
+ /* If there was no value here, that doesn't count as "adjusted". */
+ if (cfg.colorSpace && cfg.colorSpace != ColorSpace::Raw)
+ status = Adjusted;
+ cfg.colorSpace = ColorSpace::Raw;
+ continue;
+ }
+
+ /* Next we need to find our shared colour space. The first valid one will do. */
+ if (cfg.colorSpace && !yuvColorSpace_)
+ yuvColorSpace_ = findValidColorSpace(cfg.colorSpace.value());
+ }
+
+ /* If no colour space was given anywhere, choose sYCC. */
+ if (!yuvColorSpace_)
+ yuvColorSpace_ = ColorSpace::Sycc;
+
+ /* Note the version of this that any RGB streams will have to use. */
+ rgbColorSpace_ = yuvColorSpace_;
+ rgbColorSpace_->ycbcrEncoding = ColorSpace::YcbcrEncoding::None;
+ rgbColorSpace_->range = ColorSpace::Range::Full;
+
+ /* Go through the streams again and force everyone to the same colour space. */
+ for (auto &cfg : config_) {
+ if (cfg.colorSpace == ColorSpace::Raw)
+ continue;
+
+ if (PipelineHandlerBase::isYuv(cfg.pixelFormat) && cfg.colorSpace != yuvColorSpace_) {
+ /* Again, no value means "not adjusted". */
+ if (cfg.colorSpace)
+ status = Adjusted;
+ cfg.colorSpace = yuvColorSpace_;
+ }
+ if (PipelineHandlerBase::isRgb(cfg.pixelFormat) && cfg.colorSpace != rgbColorSpace_) {
+ /* Be nice, and let the YUV version count as non-adjusted too. */
+ if (cfg.colorSpace && cfg.colorSpace != yuvColorSpace_)
+ status = Adjusted;
+ cfg.colorSpace = rgbColorSpace_;
+ }
+ }
+
+ return status;
+}
+
+CameraConfiguration::Status RPiCameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty())
+ return Invalid;
+
+ /*
+ * Make sure that if a sensor configuration has been requested it
+ * is valid.
+ */
+ if (sensorConfig && !sensorConfig->isValid()) {
+ LOG(RPI, Error) << "Invalid sensor configuration request";
+ return Invalid;
+ }
+
+ status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
+
+ /*
+ * Validate the requested transform against the sensor capabilities and
+ * rotation and store the final combined transform that configure() will
+ * need to apply to the sensor to save us working it out again.
+ */
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = data_->sensor_->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
+ status = Adjusted;
+
+ rawStreams_.clear();
+ outStreams_.clear();
+ unsigned int rawStreamIndex = 0;
+ unsigned int outStreamIndex = 0;
+
+ for (auto &cfg : config_) {
+ if (PipelineHandlerBase::isRaw(cfg.pixelFormat))
+ rawStreams_.emplace_back(rawStreamIndex++, &cfg);
+ else
+ outStreams_.emplace_back(outStreamIndex++, &cfg);
+ }
+
+ /* Sort the streams so the highest resolution is first. */
+ std::sort(rawStreams_.begin(), rawStreams_.end(),
+ [](auto &l, auto &r) { return l.cfg->size > r.cfg->size; });
+
+ std::sort(outStreams_.begin(), outStreams_.end(),
+ [](auto &l, auto &r) { return l.cfg->size > r.cfg->size; });
+
+ /* Compute the sensor's format then do any platform specific fixups. */
+ unsigned int bitDepth;
+ Size sensorSize;
+
+ if (sensorConfig) {
+ /* Use the application provided sensor configuration. */
+ bitDepth = sensorConfig->bitDepth;
+ sensorSize = sensorConfig->outputSize;
+ } else if (!rawStreams_.empty()) {
+ /* Use the RAW stream format and size. */
+ BayerFormat bayerFormat = BayerFormat::fromPixelFormat(rawStreams_[0].cfg->pixelFormat);
+ bitDepth = bayerFormat.bitDepth;
+ sensorSize = rawStreams_[0].cfg->size;
+ } else {
+ bitDepth = defaultRawBitDepth;
+ sensorSize = outStreams_[0].cfg->size;
+ }
+
+ sensorFormat_ = data_->findBestFormat(sensorSize, bitDepth);
+
+ /*
+ * If a sensor configuration has been requested, it should apply
+ * without modifications.
+ */
+ if (sensorConfig) {
+ BayerFormat bayer = BayerFormat::fromMbusCode(sensorFormat_.code);
+
+ if (bayer.bitDepth != sensorConfig->bitDepth ||
+ sensorFormat_.size != sensorConfig->outputSize) {
+ LOG(RPI, Error) << "Invalid sensor configuration: "
+ << "bitDepth/size mismatch";
+ return Invalid;
+ }
+ }
+
+ /* Start with some initial generic RAW stream adjustments. */
+ for (auto &raw : rawStreams_) {
+ StreamConfiguration *rawStream = raw.cfg;
+
+ /*
+ * Some sensors change their Bayer order when they are
+ * h-flipped or v-flipped, according to the transform. Adjust
+ * the RAW stream to match the computed sensor format by
+ * applying the sensor Bayer order resulting from the transform
+ * to the user request.
+ */
+
+ BayerFormat cfgBayer = BayerFormat::fromPixelFormat(rawStream->pixelFormat);
+ cfgBayer.order = data_->sensor_->bayerOrder(combinedTransform_);
+
+ if (rawStream->pixelFormat != cfgBayer.toPixelFormat()) {
+ rawStream->pixelFormat = cfgBayer.toPixelFormat();
+ status = Adjusted;
+ }
+ }
+
+ /* Do any platform specific fixups. */
+ Status st = data_->platformValidate(this);
+ if (st == Invalid)
+ return Invalid;
+ else if (st == Adjusted)
+ status = Adjusted;
+
+ /* Further fixups on the RAW streams. */
+ for (auto &raw : rawStreams_) {
+ int ret = raw.dev->tryFormat(&raw.format);
+ if (ret)
+ return Invalid;
+
+ if (RPi::PipelineHandlerBase::updateStreamConfig(raw.cfg, raw.format))
+ status = Adjusted;
+ }
+
+ /* Further fixups on the ISP output streams. */
+ for (auto &out : outStreams_) {
+
+ /*
+ * We want to send the associated YCbCr info through to the driver.
+ *
+ * But for RGB streams, the YCbCr info gets overwritten on the way back
+ * so we must check against what the stream cfg says, not what we actually
+ * requested (which carefully included the YCbCr info)!
+ */
+ out.format.colorSpace = yuvColorSpace_;
+
+ LOG(RPI, Debug)
+ << "Try color space " << ColorSpace::toString(out.cfg->colorSpace);
+
+ int ret = out.dev->tryFormat(&out.format);
+ if (ret)
+ return Invalid;
+
+ if (RPi::PipelineHandlerBase::updateStreamConfig(out.cfg, out.format))
+ status = Adjusted;
+ }
+
+ return status;
+}
+
+bool PipelineHandlerBase::isRgb(const PixelFormat &pixFmt)
+{
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+ return info.colourEncoding == PixelFormatInfo::ColourEncodingRGB;
+}
+
+bool PipelineHandlerBase::isYuv(const PixelFormat &pixFmt)
+{
+ /* The code below would return true for raw mono streams, so weed those out first. */
+ if (PipelineHandlerBase::isRaw(pixFmt))
+ return false;
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+ return info.colourEncoding == PixelFormatInfo::ColourEncodingYUV;
+}
+
+bool PipelineHandlerBase::isRaw(const PixelFormat &pixFmt)
+{
+ /* This test works for both Bayer and raw mono formats. */
+ return BayerFormat::fromPixelFormat(pixFmt).isValid();
+}
+
+/*
+ * Adjust a StreamConfiguration fields to match a video device format.
+ * Returns true if the StreamConfiguration has been adjusted.
+ */
+bool PipelineHandlerBase::updateStreamConfig(StreamConfiguration *stream,
+ const V4L2DeviceFormat &format)
+{
+ const PixelFormat &pixFormat = format.fourcc.toPixelFormat();
+ bool adjusted = false;
+
+ if (stream->pixelFormat != pixFormat || stream->size != format.size) {
+ stream->pixelFormat = pixFormat;
+ stream->size = format.size;
+ adjusted = true;
+ }
+
+ if (stream->colorSpace != format.colorSpace) {
+ stream->colorSpace = format.colorSpace;
+ adjusted = true;
+ LOG(RPI, Debug)
+ << "Color space changed from "
+ << ColorSpace::toString(stream->colorSpace) << " to "
+ << ColorSpace::toString(format.colorSpace);
+ }
+
+ stream->stride = format.planes[0].bpl;
+ stream->frameSize = format.planes[0].size;
+
+ return adjusted;
+}
+
+/*
+ * Populate and return a video device format using a StreamConfiguration. */
+V4L2DeviceFormat PipelineHandlerBase::toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const StreamConfiguration *stream)
+{
+ V4L2DeviceFormat deviceFormat;
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(stream->pixelFormat);
+ deviceFormat.planesCount = info.numPlanes();
+ deviceFormat.fourcc = dev->toV4L2PixelFormat(stream->pixelFormat);
+ deviceFormat.size = stream->size;
+ deviceFormat.planes[0].bpl = stream->stride;
+ deviceFormat.colorSpace = stream->colorSpace;
+
+ return deviceFormat;
+}
+
+V4L2DeviceFormat PipelineHandlerBase::toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const V4L2SubdeviceFormat &format,
+ BayerFormat::Packing packingReq)
+{
+ unsigned int code = format.code;
+ const PixelFormat pix = mbusCodeToPixelFormat(code, packingReq);
+ V4L2DeviceFormat deviceFormat;
+
+ deviceFormat.fourcc = dev->toV4L2PixelFormat(pix);
+ deviceFormat.size = format.size;
+ deviceFormat.colorSpace = format.colorSpace;
+ return deviceFormat;
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerBase::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
+{
+ CameraData *data = cameraData(camera);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<RPiCameraConfiguration>(data);
+ V4L2SubdeviceFormat sensorFormat;
+ unsigned int bufferCount;
+ PixelFormat pixelFormat;
+ V4L2VideoDevice::Formats fmts;
+ Size size;
+ std::optional<ColorSpace> colorSpace;
+
+ if (roles.empty())
+ return config;
+
+ Size sensorSize = data->sensor_->resolution();
+ for (const StreamRole role : roles) {
+ switch (role) {
+ case StreamRole::Raw:
+ size = sensorSize;
+ sensorFormat = data->findBestFormat(size, defaultRawBitDepth);
+ pixelFormat = mbusCodeToPixelFormat(sensorFormat.code,
+ BayerFormat::Packing::CSI2);
+ ASSERT(pixelFormat.isValid());
+ colorSpace = ColorSpace::Raw;
+ bufferCount = 2;
+ break;
+
+ case StreamRole::StillCapture:
+ fmts = data->ispFormats();
+ pixelFormat = formats::YUV420;
+ /*
+ * Still image codecs usually expect the sYCC color space.
+ * Even RGB codecs will be fine as the RGB we get with the
+ * sYCC color space is the same as sRGB.
+ */
+ colorSpace = ColorSpace::Sycc;
+ /* Return the largest sensor resolution. */
+ size = sensorSize;
+ bufferCount = 1;
+ break;
+
+ case StreamRole::VideoRecording:
+ /*
+ * The colour denoise algorithm requires the analysis
+ * image, produced by the second ISP output, to be in
+ * YUV420 format. Select this format as the default, to
+ * maximize chances that it will be picked by
+ * applications and enable usage of the colour denoise
+ * algorithm.
+ */
+ fmts = data->ispFormats();
+ pixelFormat = formats::YUV420;
+ /*
+ * Choose a color space appropriate for video recording.
+ * Rec.709 will be a good default for HD resolutions.
+ */
+ colorSpace = ColorSpace::Rec709;
+ size = { 1920, 1080 };
+ bufferCount = 4;
+ break;
+
+ case StreamRole::Viewfinder:
+ fmts = data->ispFormats();
+ pixelFormat = formats::XRGB8888;
+ colorSpace = ColorSpace::Sycc;
+ size = { 800, 600 };
+ bufferCount = 4;
+ break;
+
+ default:
+ LOG(RPI, Error) << "Requested stream role not supported: "
+ << role;
+ return nullptr;
+ }
+
+ std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
+ if (role == StreamRole::Raw) {
+ /* Translate the MBUS codes to a PixelFormat. */
+ for (const auto &format : data->sensorFormats_) {
+ PixelFormat pf = mbusCodeToPixelFormat(format.first,
+ BayerFormat::Packing::CSI2);
+ if (pf.isValid())
+ deviceFormats.emplace(std::piecewise_construct, std::forward_as_tuple(pf),
+ std::forward_as_tuple(format.second.begin(), format.second.end()));
+ }
+ } else {
+ /*
+ * Translate the V4L2PixelFormat to PixelFormat. Note that we
+ * limit the recommended largest ISP output size to match the
+ * sensor resolution.
+ */
+ for (const auto &format : fmts) {
+ PixelFormat pf = format.first.toPixelFormat();
+ /*
+ * Some V4L2 formats translate to the same pixel format (e.g. YU12, YM12
+ * both give YUV420). We must avoid duplicating the range in this case.
+ */
+ if (pf.isValid() && deviceFormats.find(pf) == deviceFormats.end()) {
+ const SizeRange &ispSizes = format.second[0];
+ deviceFormats[pf].emplace_back(ispSizes.min, sensorSize,
+ ispSizes.hStep, ispSizes.vStep);
+ }
+ }
+ }
+
+ /* Add the stream format based on the device node used for the use case. */
+ StreamFormats formats(deviceFormats);
+ StreamConfiguration cfg(formats);
+ cfg.size = size;
+ cfg.pixelFormat = pixelFormat;
+ cfg.colorSpace = colorSpace;
+ cfg.bufferCount = bufferCount;
+ config->addConfiguration(cfg);
+ }
+
+ return config;
+}
+
+int PipelineHandlerBase::configure(Camera *camera, CameraConfiguration *config)
+{
+ CameraData *data = cameraData(camera);
+ int ret;
+
+ /* Start by freeing all buffers and reset the stream states. */
+ data->freeBuffers();
+ for (auto const stream : data->streams_)
+ stream->clearFlags(StreamFlag::External);
+
+ /*
+ * Apply the format on the sensor with any cached transform.
+ *
+ * If the application has provided a sensor configuration apply it
+ * instead of just applying a format.
+ */
+ RPiCameraConfiguration *rpiConfig = static_cast<RPiCameraConfiguration *>(config);
+ V4L2SubdeviceFormat *sensorFormat = &rpiConfig->sensorFormat_;
+
+ if (rpiConfig->sensorConfig) {
+ ret = data->sensor_->applyConfiguration(*rpiConfig->sensorConfig,
+ rpiConfig->combinedTransform_,
+ sensorFormat);
+ } else {
+ ret = data->sensor_->setFormat(sensorFormat,
+ rpiConfig->combinedTransform_);
+ }
+ if (ret)
+ return ret;
+
+ /*
+ * Platform specific internal stream configuration. This also assigns
+ * external streams which get configured below.
+ */
+ data->cropParams_.clear();
+ ret = data->platformConfigure(rpiConfig);
+ if (ret)
+ return ret;
+
+ ipa::RPi::ConfigResult result;
+ ret = data->configureIPA(config, &result);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to configure the IPA: " << ret;
+ return ret;
+ }
+
+ /*
+ * Update the ScalerCropMaximum to the correct value for this camera mode.
+ * For us, it's the same as the "analogue crop".
+ *
+ * \todo Make this property the ScalerCrop maximum value when dynamic
+ * controls are available and set it at validate() time
+ */
+ data->properties_.set(properties::ScalerCropMaximum, data->sensorInfo_.analogCrop);
+
+ /* Store the mode sensitivity for the application. */
+ data->properties_.set(properties::SensorSensitivity, result.modeSensitivity);
+
+ /* Update the controls that the Raspberry Pi IPA can handle. */
+ ControlInfoMap::Map ctrlMap;
+ for (auto const &c : result.controlInfo)
+ ctrlMap.emplace(c.first, c.second);
+
+ const auto cropParamsIt = data->cropParams_.find(0);
+ if (cropParamsIt != data->cropParams_.end()) {
+ const CameraData::CropParams &cropParams = cropParamsIt->second;
+ /*
+ * Add the ScalerCrop control limits based on the current mode and
+ * the first configured stream.
+ */
+ Rectangle ispMinCrop = data->scaleIspCrop(Rectangle(cropParams.ispMinCropSize));
+ ctrlMap[&controls::ScalerCrop] = ControlInfo(ispMinCrop, data->sensorInfo_.analogCrop,
+ data->scaleIspCrop(cropParams.ispCrop));
+ if (data->cropParams_.size() == 2) {
+ /*
+ * The control map for rpi::ScalerCrops has the min value
+ * as the default crop for stream 0, max value as the default
+ * value for stream 1.
+ */
+ ctrlMap[&controls::rpi::ScalerCrops] =
+ ControlInfo(data->scaleIspCrop(data->cropParams_.at(0).ispCrop),
+ data->scaleIspCrop(data->cropParams_.at(1).ispCrop),
+ ctrlMap[&controls::ScalerCrop].def());
+ }
+ }
+
+ data->controlInfo_ = ControlInfoMap(std::move(ctrlMap), result.controlInfo.idmap());
+
+ /* Setup the Video Mux/Bridge entities. */
+ for (auto &[device, link] : data->bridgeDevices_) {
+ /*
+ * Start by disabling all the sink pad links on the devices in the
+ * cascade, with the exception of the link connecting the device.
+ */
+ for (const MediaPad *p : device->entity()->pads()) {
+ if (!(p->flags() & MEDIA_PAD_FL_SINK))
+ continue;
+
+ for (MediaLink *l : p->links()) {
+ if (l != link)
+ l->setEnabled(false);
+ }
+ }
+
+ /*
+ * Next, enable the entity -> entity links, and setup the pad format.
+ *
+ * \todo Some bridge devices may chainge the media bus code, so we
+ * ought to read the source pad format and propagate it to the sink pad.
+ */
+ link->setEnabled(true);
+ const MediaPad *sinkPad = link->sink();
+ ret = device->setFormat(sinkPad->index(), sensorFormat);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on " << device->entity()->name()
+ << " pad " << sinkPad->index()
+ << " with format " << *sensorFormat
+ << ": " << ret;
+ return ret;
+ }
+
+ LOG(RPI, Debug) << "Configured media link on device " << device->entity()->name()
+ << " on pad " << sinkPad->index();
+ }
+
+ return 0;
+}
+
+int PipelineHandlerBase::exportFrameBuffers([[maybe_unused]] Camera *camera, libcamera::Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ RPi::Stream *s = static_cast<RPi::Stream *>(stream);
+ unsigned int count = stream->configuration().bufferCount;
+ int ret = s->dev()->exportBuffers(count, buffers);
+
+ s->setExportedBuffers(buffers);
+
+ return ret;
+}
+
+int PipelineHandlerBase::start(Camera *camera, const ControlList *controls)
+{
+ CameraData *data = cameraData(camera);
+ int ret;
+
+ /* Check if a ScalerCrop control was specified. */
+ if (controls)
+ data->applyScalerCrop(*controls);
+
+ /* Start the IPA. */
+ ipa::RPi::StartResult result;
+ data->ipa_->start(controls ? *controls : ControlList{ controls::controls },
+ &result);
+
+ /* Apply any gain/exposure settings that the IPA may have passed back. */
+ if (!result.controls.empty())
+ data->setSensorControls(result.controls);
+
+ /* Configure the number of dropped frames required on startup. */
+ data->dropFrameCount_ = data->config_.disableStartupFrameDrops
+ ? 0 : result.dropFrameCount;
+
+ for (auto const stream : data->streams_)
+ stream->resetBuffers();
+
+ if (!data->buffersAllocated_) {
+ /* Allocate buffers for internal pipeline usage. */
+ ret = prepareBuffers(camera);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to allocate buffers";
+ data->freeBuffers();
+ stop(camera);
+ return ret;
+ }
+ data->buffersAllocated_ = true;
+ }
+
+ /* We need to set the dropFrameCount_ before queueing buffers. */
+ ret = queueAllBuffers(camera);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to queue buffers";
+ stop(camera);
+ return ret;
+ }
+
+ /*
+ * Reset the delayed controls with the gain and exposure values set by
+ * the IPA.
+ */
+ data->delayedCtrls_->reset(0);
+ data->state_ = CameraData::State::Idle;
+
+ /* Enable SOF event generation. */
+ data->frontendDevice()->setFrameStartEnabled(true);
+
+ data->platformStart();
+
+ /* Start all streams. */
+ for (auto const stream : data->streams_) {
+ ret = stream->dev()->streamOn();
+ if (ret) {
+ stop(camera);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void PipelineHandlerBase::stopDevice(Camera *camera)
+{
+ CameraData *data = cameraData(camera);
+
+ data->state_ = CameraData::State::Stopped;
+ data->platformStop();
+
+ for (auto const stream : data->streams_)
+ stream->dev()->streamOff();
+
+ /* Disable SOF event generation. */
+ data->frontendDevice()->setFrameStartEnabled(false);
+
+ data->clearIncompleteRequests();
+
+ /* Stop the IPA. */
+ data->ipa_->stop();
+}
+
+void PipelineHandlerBase::releaseDevice(Camera *camera)
+{
+ CameraData *data = cameraData(camera);
+ data->freeBuffers();
+}
+
+int PipelineHandlerBase::queueRequestDevice(Camera *camera, Request *request)
+{
+ CameraData *data = cameraData(camera);
+
+ if (!data->isRunning())
+ return -EINVAL;
+
+ LOG(RPI, Debug) << "queueRequestDevice: New request sequence: "
+ << request->sequence();
+
+ /* Push all buffers supplied in the Request to the respective streams. */
+ for (auto stream : data->streams_) {
+ if (!(stream->getFlags() & StreamFlag::External))
+ continue;
+
+ FrameBuffer *buffer = request->findBuffer(stream);
+ if (buffer && !stream->getBufferId(buffer)) {
+ /*
+ * This buffer is not recognised, so it must have been allocated
+ * outside the v4l2 device. Store it in the stream buffer list
+ * so we can track it.
+ */
+ stream->setExportedBuffer(buffer);
+ }
+
+ /*
+ * If no buffer is provided by the request for this stream, we
+ * queue a nullptr to the stream to signify that it must use an
+ * internally allocated buffer for this capture request. This
+ * buffer will not be given back to the application, but is used
+ * to support the internal pipeline flow.
+ *
+ * The below queueBuffer() call will do nothing if there are not
+ * enough internal buffers allocated, but this will be handled by
+ * queuing the request for buffers in the RPiStream object.
+ */
+ int ret = stream->queueBuffer(buffer);
+ if (ret)
+ return ret;
+ }
+
+ /* Push the request to the back of the queue. */
+ data->requestQueue_.push(request);
+ data->handleState();
+
+ return 0;
+}
+
+int PipelineHandlerBase::registerCamera(std::unique_ptr<RPi::CameraData> &cameraData,
+ MediaDevice *frontend, const std::string &frontendName,
+ MediaDevice *backend, MediaEntity *sensorEntity)
+{
+ CameraData *data = cameraData.get();
+ int ret;
+
+ data->sensor_ = CameraSensorFactoryBase::create(sensorEntity);
+ if (!data->sensor_)
+ return -EINVAL;
+
+ /* Populate the map of sensor supported formats and sizes. */
+ for (auto const mbusCode : data->sensor_->mbusCodes())
+ data->sensorFormats_.emplace(mbusCode,
+ data->sensor_->sizes(mbusCode));
+
+ /*
+ * Enumerate all the Video Mux/Bridge devices across the sensor -> Fr
+ * chain. There may be a cascade of devices in this chain!
+ */
+ MediaLink *link = sensorEntity->getPadByIndex(0)->links()[0];
+ data->enumerateVideoDevices(link, frontendName);
+
+ ipa::RPi::InitResult result;
+ if (data->loadIPA(&result)) {
+ LOG(RPI, Error) << "Failed to load a suitable IPA library";
+ return -EINVAL;
+ }
+
+ /*
+ * Setup our delayed control writer with the sensor default
+ * gain and exposure delays. Mark VBLANK for priority write.
+ */
+ const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays();
+ std::unordered_map<uint32_t, RPi::DelayedControls::ControlParams> params = {
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
+ { V4L2_CID_HBLANK, { delays.hblankDelay, false } },
+ { V4L2_CID_VBLANK, { delays.vblankDelay, true } }
+ };
+ data->delayedCtrls_ = std::make_unique<RPi::DelayedControls>(data->sensor_->device(), params);
+ data->sensorMetadata_ = result.sensorConfig.sensorMetadata;
+
+ /* Register initial controls that the Raspberry Pi IPA can handle. */
+ data->controlInfo_ = std::move(result.controlInfo);
+
+ /* Initialize the camera properties. */
+ data->properties_ = data->sensor_->properties();
+
+ /*
+ * The V4L2_CID_NOTIFY_GAINS control, if present, is used to inform the
+ * sensor of the colour gains. It is defined to be a linear gain where
+ * the default value represents a gain of exactly one.
+ */
+ auto it = data->sensor_->controls().find(V4L2_CID_NOTIFY_GAINS);
+ if (it != data->sensor_->controls().end())
+ data->notifyGainsUnity_ = it->second.def().get<int32_t>();
+
+ /*
+ * Set a default value for the ScalerCropMaximum property to show
+ * that we support its use, however, initialise it to zero because
+ * it's not meaningful until a camera mode has been chosen.
+ */
+ data->properties_.set(properties::ScalerCropMaximum, Rectangle{});
+
+ ret = platformRegister(cameraData, frontend, backend);
+ if (ret)
+ return ret;
+
+ ret = data->loadPipelineConfiguration();
+ if (ret) {
+ LOG(RPI, Error) << "Unable to load pipeline configuration";
+ return ret;
+ }
+
+ /* Setup the general IPA signal handlers. */
+ data->frontendDevice()->dequeueTimeout.connect(data, &RPi::CameraData::cameraTimeout);
+ data->frontendDevice()->frameStart.connect(data, &RPi::CameraData::frameStarted);
+ data->ipa_->setDelayedControls.connect(data, &CameraData::setDelayedControls);
+ data->ipa_->setLensControls.connect(data, &CameraData::setLensControls);
+ data->ipa_->metadataReady.connect(data, &CameraData::metadataReady);
+
+ return 0;
+}
+
+void PipelineHandlerBase::mapBuffers(Camera *camera, const BufferMap &buffers, unsigned int mask)
+{
+ CameraData *data = cameraData(camera);
+ std::vector<IPABuffer> bufferIds;
+ /*
+ * Link the FrameBuffers with the id (key value) in the map stored in
+ * the RPi stream object - along with an identifier mask.
+ *
+ * This will allow us to identify buffers passed between the pipeline
+ * handler and the IPA.
+ */
+ for (auto const &it : buffers) {
+ bufferIds.push_back(IPABuffer(mask | it.first,
+ it.second.buffer->planes()));
+ data->bufferIds_.insert(mask | it.first);
+ }
+
+ data->ipa_->mapBuffers(bufferIds);
+}
+
+int PipelineHandlerBase::queueAllBuffers(Camera *camera)
+{
+ CameraData *data = cameraData(camera);
+ int ret;
+
+ for (auto const stream : data->streams_) {
+ if (!(stream->getFlags() & StreamFlag::External)) {
+ ret = stream->queueAllBuffers();
+ if (ret < 0)
+ return ret;
+ } else {
+ /*
+ * For external streams, we must queue up a set of internal
+ * buffers to handle the number of drop frames requested by
+ * the IPA. This is done by passing nullptr in queueBuffer().
+ *
+ * The below queueBuffer() call will do nothing if there
+ * are not enough internal buffers allocated, but this will
+ * be handled by queuing the request for buffers in the
+ * RPiStream object.
+ */
+ unsigned int i;
+ for (i = 0; i < data->dropFrameCount_; i++) {
+ ret = stream->queueBuffer(nullptr);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+double CameraData::scoreFormat(double desired, double actual) const
+{
+ double score = desired - actual;
+ /* Smaller desired dimensions are preferred. */
+ if (score < 0.0)
+ score = (-score) / 8;
+ /* Penalise non-exact matches. */
+ if (actual != desired)
+ score *= 2;
+
+ return score;
+}
+
+V4L2SubdeviceFormat CameraData::findBestFormat(const Size &req, unsigned int bitDepth) const
+{
+ double bestScore = std::numeric_limits<double>::max(), score;
+ V4L2SubdeviceFormat bestFormat;
+ bestFormat.colorSpace = ColorSpace::Raw;
+
+ constexpr float penaltyAr = 1500.0;
+ constexpr float penaltyBitDepth = 500.0;
+
+ /* Calculate the closest/best mode from the user requested size. */
+ for (const auto &iter : sensorFormats_) {
+ const unsigned int mbusCode = iter.first;
+ const PixelFormat format = mbusCodeToPixelFormat(mbusCode,
+ BayerFormat::Packing::None);
+ const PixelFormatInfo &info = PixelFormatInfo::info(format);
+
+ for (const Size &size : iter.second) {
+ double reqAr = static_cast<double>(req.width) / req.height;
+ double fmtAr = static_cast<double>(size.width) / size.height;
+
+ /* Score the dimensions for closeness. */
+ score = scoreFormat(req.width, size.width);
+ score += scoreFormat(req.height, size.height);
+ score += penaltyAr * scoreFormat(reqAr, fmtAr);
+
+ /* Add any penalties... this is not an exact science! */
+ score += utils::abs_diff(info.bitsPerPixel, bitDepth) * penaltyBitDepth;
+
+ if (score <= bestScore) {
+ bestScore = score;
+ bestFormat.code = mbusCode;
+ bestFormat.size = size;
+ }
+
+ LOG(RPI, Debug) << "Format: " << size
+ << " fmt " << format
+ << " Score: " << score
+ << " (best " << bestScore << ")";
+ }
+ }
+
+ return bestFormat;
+}
+
+void CameraData::freeBuffers()
+{
+ if (ipa_) {
+ /*
+ * Copy the buffer ids from the unordered_set to a vector to
+ * pass to the IPA.
+ */
+ std::vector<unsigned int> bufferIds(bufferIds_.begin(),
+ bufferIds_.end());
+ ipa_->unmapBuffers(bufferIds);
+ bufferIds_.clear();
+ }
+
+ for (auto const stream : streams_)
+ stream->releaseBuffers();
+
+ platformFreeBuffers();
+
+ buffersAllocated_ = false;
+}
+
+/*
+ * enumerateVideoDevices() iterates over the Media Controller topology, starting
+ * at the sensor and finishing at the frontend. For each sensor, CameraData stores
+ * a unique list of any intermediate video mux or bridge devices connected in a
+ * cascade, together with the entity to entity link.
+ *
+ * Entity pad configuration and link enabling happens at the end of configure().
+ * We first disable all pad links on each entity device in the chain, and then
+ * selectively enabling the specific links to link sensor to the frontend across
+ * all intermediate muxes and bridges.
+ *
+ * In the cascaded topology below, if Sensor1 is used, the Mux2 -> Mux1 link
+ * will be disabled, and Sensor1 -> Mux1 -> Frontend links enabled. Alternatively,
+ * if Sensor3 is used, the Sensor2 -> Mux2 and Sensor1 -> Mux1 links are disabled,
+ * and Sensor3 -> Mux2 -> Mux1 -> Frontend links are enabled. All other links will
+ * remain unchanged.
+ *
+ * +----------+
+ * | FE |
+ * +-----^----+
+ * |
+ * +---+---+
+ * | Mux1 |<------+
+ * +--^---- |
+ * | |
+ * +-----+---+ +---+---+
+ * | Sensor1 | | Mux2 |<--+
+ * +---------+ +-^-----+ |
+ * | |
+ * +-------+-+ +---+-----+
+ * | Sensor2 | | Sensor3 |
+ * +---------+ +---------+
+ */
+void CameraData::enumerateVideoDevices(MediaLink *link, const std::string &frontend)
+{
+ const MediaPad *sinkPad = link->sink();
+ const MediaEntity *entity = sinkPad->entity();
+ bool frontendFound = false;
+
+ /* We only deal with Video Mux and Bridge devices in cascade. */
+ if (entity->function() != MEDIA_ENT_F_VID_MUX &&
+ entity->function() != MEDIA_ENT_F_VID_IF_BRIDGE)
+ return;
+
+ /* Find the source pad for this Video Mux or Bridge device. */
+ const MediaPad *sourcePad = nullptr;
+ for (const MediaPad *pad : entity->pads()) {
+ if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
+ /*
+ * We can only deal with devices that have a single source
+ * pad. If this device has multiple source pads, ignore it
+ * and this branch in the cascade.
+ */
+ if (sourcePad)
+ return;
+
+ sourcePad = pad;
+ }
+ }
+
+ LOG(RPI, Debug) << "Found video mux device " << entity->name()
+ << " linked to sink pad " << sinkPad->index();
+
+ bridgeDevices_.emplace_back(std::make_unique<V4L2Subdevice>(entity), link);
+ bridgeDevices_.back().first->open();
+
+ /*
+ * Iterate through all the sink pad links down the cascade to find any
+ * other Video Mux and Bridge devices.
+ */
+ for (MediaLink *l : sourcePad->links()) {
+ enumerateVideoDevices(l, frontend);
+ /* Once we reach the Frontend entity, we are done. */
+ if (l->sink()->entity()->name() == frontend) {
+ frontendFound = true;
+ break;
+ }
+ }
+
+ /* This identifies the end of our entity enumeration recursion. */
+ if (link->source()->entity()->function() == MEDIA_ENT_F_CAM_SENSOR) {
+ /*
+ * If the frontend is not at the end of this cascade, we cannot
+ * configure this topology automatically, so remove all entity
+ * references.
+ */
+ if (!frontendFound) {
+ LOG(RPI, Warning) << "Cannot automatically configure this MC topology!";
+ bridgeDevices_.clear();
+ }
+ }
+}
+
+int CameraData::loadPipelineConfiguration()
+{
+ config_ = {
+ .disableStartupFrameDrops = false,
+ .cameraTimeoutValue = 0,
+ };
+
+ /* Initial configuration of the platform, in case no config file is present */
+ platformPipelineConfigure({});
+
+ char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_CONFIG_FILE");
+ if (!configFromEnv || *configFromEnv == '\0')
+ return 0;
+
+ std::string filename = std::string(configFromEnv);
+ File file(filename);
+
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ LOG(RPI, Warning) << "Failed to open configuration file '" << filename << "'"
+ << ", using defaults";
+ return 0;
+ }
+
+ LOG(RPI, Info) << "Using configuration file '" << filename << "'";
+
+ std::unique_ptr<YamlObject> root = YamlParser::parse(file);
+ if (!root) {
+ LOG(RPI, Warning) << "Failed to parse configuration file, using defaults";
+ return 0;
+ }
+
+ std::optional<double> ver = (*root)["version"].get<double>();
+ if (!ver || *ver != 1.0) {
+ LOG(RPI, Warning) << "Unexpected configuration file version reported: "
+ << *ver;
+ return 0;
+ }
+
+ const YamlObject &phConfig = (*root)["pipeline_handler"];
+
+ config_.disableStartupFrameDrops =
+ phConfig["disable_startup_frame_drops"].get<bool>(config_.disableStartupFrameDrops);
+
+ config_.cameraTimeoutValue =
+ phConfig["camera_timeout_value_ms"].get<unsigned int>(config_.cameraTimeoutValue);
+
+ if (config_.cameraTimeoutValue) {
+ /* Disable the IPA signal to control timeout and set the user requested value. */
+ ipa_->setCameraTimeout.disconnect();
+ frontendDevice()->setDequeueTimeout(config_.cameraTimeoutValue * 1ms);
+ }
+
+ return platformPipelineConfigure(root);
+}
+
+int CameraData::loadIPA(ipa::RPi::InitResult *result)
+{
+ int ret;
+
+ ipa_ = IPAManager::createIPA<ipa::RPi::IPAProxyRPi>(pipe(), 1, 1);
+
+ if (!ipa_)
+ return -ENOENT;
+
+ /*
+ * The configuration (tuning file) is made from the sensor name unless
+ * the environment variable overrides it.
+ */
+ std::string configurationFile;
+ char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_TUNING_FILE");
+ if (!configFromEnv || *configFromEnv == '\0') {
+ std::string model = sensor_->model();
+ if (isMonoSensor(sensor_))
+ model += "_mono";
+ configurationFile = ipa_->configurationFile(model + ".json");
+ } else {
+ configurationFile = std::string(configFromEnv);
+ }
+
+ IPASettings settings(configurationFile, sensor_->model());
+ ipa::RPi::InitParams params;
+
+ ret = sensor_->sensorInfo(&params.sensorInfo);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to retrieve camera sensor info";
+ return ret;
+ }
+
+ params.lensPresent = !!sensor_->focusLens();
+ ret = platformInitIpa(params);
+ if (ret)
+ return ret;
+
+ return ipa_->init(settings, params, result);
+}
+
+int CameraData::configureIPA(const CameraConfiguration *config, ipa::RPi::ConfigResult *result)
+{
+ ipa::RPi::ConfigParams params;
+ int ret;
+
+ params.sensorControls = sensor_->controls();
+ if (sensor_->focusLens())
+ params.lensControls = sensor_->focusLens()->controls();
+
+ ret = platformConfigureIpa(params);
+ if (ret)
+ return ret;
+
+ /* We store the IPACameraSensorInfo for digital zoom calculations. */
+ ret = sensor_->sensorInfo(&sensorInfo_);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to retrieve camera sensor info";
+ return ret;
+ }
+
+ /* Always send the user transform to the IPA. */
+ Transform transform = config->orientation / Orientation::Rotate0;
+ params.transform = static_cast<unsigned int>(transform);
+
+ /* Ready the IPA - it must know about the sensor resolution. */
+ ret = ipa_->configure(sensorInfo_, params, result);
+ if (ret < 0) {
+ LOG(RPI, Error) << "IPA configuration failed!";
+ return -EPIPE;
+ }
+
+ if (!result->sensorControls.empty())
+ setSensorControls(result->sensorControls);
+ if (!result->lensControls.empty())
+ setLensControls(result->lensControls);
+
+ return 0;
+}
+
+void CameraData::metadataReady(const ControlList &metadata)
+{
+ if (!isRunning())
+ return;
+
+ /* Add to the Request metadata buffer what the IPA has provided. */
+ /* Last thing to do is to fill up the request metadata. */
+ Request *request = requestQueue_.front();
+ request->metadata().merge(metadata);
+
+ /*
+ * Inform the sensor of the latest colour gains if it has the
+ * V4L2_CID_NOTIFY_GAINS control (which means notifyGainsUnity_ is set).
+ */
+ const auto &colourGains = metadata.get(libcamera::controls::ColourGains);
+ if (notifyGainsUnity_ && colourGains) {
+ /* The control wants linear gains in the order B, Gb, Gr, R. */
+ ControlList ctrls(sensor_->controls());
+ std::array<int32_t, 4> gains{
+ static_cast<int32_t>((*colourGains)[1] * *notifyGainsUnity_),
+ *notifyGainsUnity_,
+ *notifyGainsUnity_,
+ static_cast<int32_t>((*colourGains)[0] * *notifyGainsUnity_)
+ };
+ ctrls.set(V4L2_CID_NOTIFY_GAINS, Span<const int32_t>{ gains });
+
+ sensor_->setControls(&ctrls);
+ }
+}
+
+void CameraData::setDelayedControls(const ControlList &controls, uint32_t delayContext)
+{
+ if (!delayedCtrls_->push(controls, delayContext))
+ LOG(RPI, Error) << "V4L2 DelayedControl set failed";
+}
+
+void CameraData::setLensControls(const ControlList &controls)
+{
+ CameraLens *lens = sensor_->focusLens();
+
+ if (lens && controls.contains(V4L2_CID_FOCUS_ABSOLUTE)) {
+ ControlValue const &focusValue = controls.get(V4L2_CID_FOCUS_ABSOLUTE);
+ lens->setFocusPosition(focusValue.get<int32_t>());
+ }
+}
+
+void CameraData::setSensorControls(ControlList &controls)
+{
+ /*
+ * We need to ensure that if both VBLANK and EXPOSURE are present, the
+ * former must be written ahead of, and separately from EXPOSURE to avoid
+ * V4L2 rejecting the latter. This is identical to what DelayedControls
+ * does with the priority write flag.
+ *
+ * As a consequence of the below logic, VBLANK gets set twice, and we
+ * rely on the v4l2 framework to not pass the second control set to the
+ * driver as the actual control value has not changed.
+ */
+ if (controls.contains(V4L2_CID_EXPOSURE) && controls.contains(V4L2_CID_VBLANK)) {
+ ControlList vblank_ctrl;
+
+ vblank_ctrl.set(V4L2_CID_VBLANK, controls.get(V4L2_CID_VBLANK));
+ sensor_->setControls(&vblank_ctrl);
+ }
+
+ sensor_->setControls(&controls);
+}
+
+Rectangle CameraData::scaleIspCrop(const Rectangle &ispCrop) const
+{
+ /*
+ * Scale a crop rectangle defined in the ISP's coordinates into native sensor
+ * coordinates.
+ */
+ Rectangle nativeCrop = ispCrop.scaledBy(sensorInfo_.analogCrop.size(),
+ sensorInfo_.outputSize);
+ nativeCrop.translateBy(sensorInfo_.analogCrop.topLeft());
+ return nativeCrop;
+}
+
+void CameraData::applyScalerCrop(const ControlList &controls)
+{
+ const auto &scalerCropRPi = controls.get<Span<const Rectangle>>(controls::rpi::ScalerCrops);
+ const auto &scalerCropCore = controls.get<Rectangle>(controls::ScalerCrop);
+ std::vector<Rectangle> scalerCrops;
+
+ /*
+ * First thing to do is create a vector of crops to apply to each ISP output
+ * based on either controls::ScalerCrop or controls::rpi::ScalerCrops if
+ * present.
+ *
+ * If controls::rpi::ScalerCrops is preset, apply the given crops to the
+ * ISP output streams, indexed by the same order in which they had been
+ * configured. This is not the same as the ISP output index. Otherwise
+ * if controls::ScalerCrop is present, apply the same crop to all ISP
+ * output streams.
+ */
+ for (unsigned int i = 0; i < cropParams_.size(); i++) {
+ if (scalerCropRPi && i < scalerCropRPi->size())
+ scalerCrops.push_back(scalerCropRPi->data()[i]);
+ else if (scalerCropCore)
+ scalerCrops.push_back(*scalerCropCore);
+ }
+
+ for (auto const &[i, scalerCrop] : utils::enumerate(scalerCrops)) {
+ Rectangle nativeCrop = scalerCrop;
+
+ if (!nativeCrop.width || !nativeCrop.height)
+ nativeCrop = { 0, 0, 1, 1 };
+
+ /* Create a version of the crop scaled to ISP (camera mode) pixels. */
+ Rectangle ispCrop = nativeCrop.translatedBy(-sensorInfo_.analogCrop.topLeft());
+ ispCrop.scaleBy(sensorInfo_.outputSize, sensorInfo_.analogCrop.size());
+
+ /*
+ * The crop that we set must be:
+ * 1. At least as big as ispMinCropSize_, once that's been
+ * enlarged to the same aspect ratio.
+ * 2. With the same mid-point, if possible.
+ * 3. But it can't go outside the sensor area.
+ */
+ Size minSize = cropParams_.at(i).ispMinCropSize.expandedToAspectRatio(nativeCrop.size());
+ Size size = ispCrop.size().expandedTo(minSize);
+ ispCrop = size.centeredTo(ispCrop.center()).enclosedIn(Rectangle(sensorInfo_.outputSize));
+
+ if (ispCrop != cropParams_.at(i).ispCrop) {
+ cropParams_.at(i).ispCrop = ispCrop;
+ platformSetIspCrop(cropParams_.at(i).ispIndex, ispCrop);
+ }
+ }
+}
+
+void CameraData::cameraTimeout()
+{
+ LOG(RPI, Error) << "Camera frontend has timed out!";
+ LOG(RPI, Error) << "Please check that your camera sensor connector is attached securely.";
+ LOG(RPI, Error) << "Alternatively, try another cable and/or sensor.";
+
+ state_ = CameraData::State::Error;
+ platformStop();
+
+ /*
+ * To allow the application to attempt a recovery from this timeout,
+ * stop all devices streaming, and return any outstanding requests as
+ * incomplete and cancelled.
+ */
+ for (auto const stream : streams_)
+ stream->dev()->streamOff();
+
+ clearIncompleteRequests();
+}
+
+void CameraData::frameStarted(uint32_t sequence)
+{
+ LOG(RPI, Debug) << "Frame start " << sequence;
+
+ /* Write any controls for the next frame as soon as we can. */
+ delayedCtrls_->applyControls(sequence);
+}
+
+void CameraData::clearIncompleteRequests()
+{
+ /*
+ * All outstanding requests (and associated buffers) must be returned
+ * back to the application.
+ */
+ while (!requestQueue_.empty()) {
+ Request *request = requestQueue_.front();
+
+ for (auto &b : request->buffers()) {
+ FrameBuffer *buffer = b.second;
+ /*
+ * Has the buffer already been handed back to the
+ * request? If not, do so now.
+ */
+ if (buffer->request()) {
+ buffer->_d()->cancel();
+ pipe()->completeBuffer(request, buffer);
+ }
+ }
+
+ pipe()->completeRequest(request);
+ requestQueue_.pop();
+ }
+}
+
+void CameraData::handleStreamBuffer(FrameBuffer *buffer, RPi::Stream *stream)
+{
+ /*
+ * It is possible to be here without a pending request, so check
+ * that we actually have one to action, otherwise we just return
+ * buffer back to the stream.
+ */
+ Request *request = requestQueue_.empty() ? nullptr : requestQueue_.front();
+ if (!dropFrameCount_ && request && request->findBuffer(stream) == buffer) {
+ /*
+ * Tag the buffer as completed, returning it to the
+ * application.
+ */
+ LOG(RPI, Debug) << "Completing request buffer for stream "
+ << stream->name();
+ pipe()->completeBuffer(request, buffer);
+ } else {
+ /*
+ * This buffer was not part of the Request (which happens if an
+ * internal buffer was used for an external stream, or
+ * unconditionally for internal streams), or there is no pending
+ * request, so we can recycle it.
+ */
+ LOG(RPI, Debug) << "Returning buffer to stream "
+ << stream->name();
+ stream->returnBuffer(buffer);
+ }
+}
+
+void CameraData::handleState()
+{
+ switch (state_) {
+ case State::Stopped:
+ case State::Busy:
+ case State::Error:
+ break;
+
+ case State::IpaComplete:
+ /* If the request is completed, we will switch to Idle state. */
+ checkRequestCompleted();
+ /*
+ * No break here, we want to try running the pipeline again.
+ * The fallthrough clause below suppresses compiler warnings.
+ */
+ [[fallthrough]];
+
+ case State::Idle:
+ tryRunPipeline();
+ break;
+ }
+}
+
+void CameraData::checkRequestCompleted()
+{
+ bool requestCompleted = false;
+ /*
+ * If we are dropping this frame, do not touch the request, simply
+ * change the state to IDLE when ready.
+ */
+ if (!dropFrameCount_) {
+ Request *request = requestQueue_.front();
+ if (request->hasPendingBuffers())
+ return;
+
+ /* Must wait for metadata to be filled in before completing. */
+ if (state_ != State::IpaComplete)
+ return;
+
+ LOG(RPI, Debug) << "Completing request sequence: "
+ << request->sequence();
+
+ pipe()->completeRequest(request);
+ requestQueue_.pop();
+ requestCompleted = true;
+ }
+
+ /*
+ * Make sure we have three outputs completed in the case of a dropped
+ * frame.
+ */
+ if (state_ == State::IpaComplete &&
+ ((ispOutputCount_ == ispOutputTotal_ && dropFrameCount_) ||
+ requestCompleted)) {
+ LOG(RPI, Debug) << "Going into Idle state";
+ state_ = State::Idle;
+ if (dropFrameCount_) {
+ dropFrameCount_--;
+ LOG(RPI, Debug) << "Dropping frame at the request of the IPA ("
+ << dropFrameCount_ << " left)";
+ }
+ }
+}
+
+void CameraData::fillRequestMetadata(const ControlList &bufferControls, Request *request)
+{
+ request->metadata().set(controls::SensorTimestamp,
+ bufferControls.get(controls::SensorTimestamp).value_or(0));
+
+ if (cropParams_.size()) {
+ std::vector<Rectangle> crops;
+
+ for (auto const &[k, v] : cropParams_)
+ crops.push_back(scaleIspCrop(v.ispCrop));
+
+ request->metadata().set(controls::ScalerCrop, crops[0]);
+ if (crops.size() > 1) {
+ request->metadata().set(controls::rpi::ScalerCrops,
+ Span<const Rectangle>(crops.data(), crops.size()));
+ }
+ }
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.h b/src/libcamera/pipeline/rpi/common/pipeline_base.h
new file mode 100644
index 00000000..aae0c2f3
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/pipeline_base.h
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Pipeline handler base class for Raspberry Pi devices
+ */
+
+#include <map>
+#include <memory>
+#include <optional>
+#include <queue>
+#include <string>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include <libcamera/controls.h>
+#include <libcamera/request.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/media_object.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include <libcamera/ipa/raspberrypi_ipa_interface.h>
+#include <libcamera/ipa/raspberrypi_ipa_proxy.h>
+
+#include "delayed_controls.h"
+#include "rpi_stream.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+namespace RPi {
+
+/* Map of mbus codes to supported sizes reported by the sensor. */
+using SensorFormats = std::map<unsigned int, std::vector<Size>>;
+
+class RPiCameraConfiguration;
+class CameraData : public Camera::Private
+{
+public:
+ CameraData(PipelineHandler *pipe)
+ : Camera::Private(pipe), state_(State::Stopped),
+ dropFrameCount_(0), buffersAllocated_(false),
+ ispOutputCount_(0), ispOutputTotal_(0)
+ {
+ }
+
+ virtual ~CameraData()
+ {
+ }
+
+ virtual CameraConfiguration::Status platformValidate(RPiCameraConfiguration *rpiConfig) const = 0;
+ virtual int platformConfigure(const RPiCameraConfiguration *rpiConfig) = 0;
+ virtual void platformStart() = 0;
+ virtual void platformStop() = 0;
+
+ double scoreFormat(double desired, double actual) const;
+ V4L2SubdeviceFormat findBestFormat(const Size &req, unsigned int bitDepth) const;
+
+ void freeBuffers();
+ virtual void platformFreeBuffers() = 0;
+
+ void enumerateVideoDevices(MediaLink *link, const std::string &frontend);
+
+ int loadPipelineConfiguration();
+ int loadIPA(ipa::RPi::InitResult *result);
+ int configureIPA(const CameraConfiguration *config, ipa::RPi::ConfigResult *result);
+ virtual int platformInitIpa(ipa::RPi::InitParams &params) = 0;
+ virtual int platformConfigureIpa(ipa::RPi::ConfigParams &params) = 0;
+
+ void metadataReady(const ControlList &metadata);
+ void setDelayedControls(const ControlList &controls, uint32_t delayContext);
+ void setLensControls(const ControlList &controls);
+ void setSensorControls(ControlList &controls);
+
+ Rectangle scaleIspCrop(const Rectangle &ispCrop) const;
+ void applyScalerCrop(const ControlList &controls);
+ virtual void platformSetIspCrop(unsigned int index, const Rectangle &ispCrop) = 0;
+
+ void cameraTimeout();
+ void frameStarted(uint32_t sequence);
+
+ void clearIncompleteRequests();
+ void handleStreamBuffer(FrameBuffer *buffer, Stream *stream);
+ void handleState();
+
+ virtual V4L2VideoDevice::Formats ispFormats() const = 0;
+ virtual V4L2VideoDevice::Formats rawFormats() const = 0;
+ virtual V4L2VideoDevice *frontendDevice() = 0;
+
+ virtual int platformPipelineConfigure(const std::unique_ptr<YamlObject> &root) = 0;
+
+ std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
+
+ std::unique_ptr<CameraSensor> sensor_;
+ SensorFormats sensorFormats_;
+
+ /* The vector below is just for convenience when iterating over all streams. */
+ std::vector<Stream *> streams_;
+ /* Stores the ids of the buffers mapped in the IPA. */
+ std::unordered_set<unsigned int> bufferIds_;
+ /*
+ * Stores a cascade of Video Mux or Bridge devices between the sensor and
+ * Unicam together with media link across the entities.
+ */
+ std::vector<std::pair<std::unique_ptr<V4L2Subdevice>, MediaLink *>> bridgeDevices_;
+
+ std::unique_ptr<DelayedControls> delayedCtrls_;
+ bool sensorMetadata_;
+
+ /*
+ * All the functions in this class are called from a single calling
+ * thread. So, we do not need to have any mutex to protect access to any
+ * of the variables below.
+ */
+ enum class State { Stopped, Idle, Busy, IpaComplete, Error };
+ State state_;
+
+ bool isRunning()
+ {
+ return state_ != State::Stopped && state_ != State::Error;
+ }
+
+ std::queue<Request *> requestQueue_;
+
+ /* For handling digital zoom. */
+ IPACameraSensorInfo sensorInfo_;
+
+ struct CropParams {
+ CropParams(Rectangle ispCrop_, Size ispMinCropSize_, unsigned int ispIndex_)
+ : ispCrop(ispCrop_), ispMinCropSize(ispMinCropSize_), ispIndex(ispIndex_)
+ {
+ }
+
+ /* Crop in ISP (camera mode) pixels */
+ Rectangle ispCrop;
+ /* Minimum crop size in ISP output pixels */
+ Size ispMinCropSize;
+ /* Index of the ISP output channel for this crop */
+ unsigned int ispIndex;
+ };
+
+ /* Mapping of CropParams keyed by the output stream order in CameraConfiguration */
+ std::map<unsigned int, CropParams> cropParams_;
+
+ unsigned int dropFrameCount_;
+
+ /*
+ * If set, this stores the value that represets a gain of one for
+ * the V4L2_CID_NOTIFY_GAINS control.
+ */
+ std::optional<int32_t> notifyGainsUnity_;
+
+ /* Have internal buffers been allocated? */
+ bool buffersAllocated_;
+
+ struct Config {
+ /*
+ * Override any request from the IPA to drop a number of startup
+ * frames.
+ */
+ bool disableStartupFrameDrops;
+ /*
+ * Override the camera timeout value calculated by the IPA based
+ * on frame durations.
+ */
+ unsigned int cameraTimeoutValue;
+ };
+
+ Config config_;
+
+protected:
+ void fillRequestMetadata(const ControlList &bufferControls,
+ Request *request);
+
+ virtual void tryRunPipeline() = 0;
+
+ unsigned int ispOutputCount_;
+ unsigned int ispOutputTotal_;
+
+private:
+ void checkRequestCompleted();
+};
+
+class PipelineHandlerBase : public PipelineHandler
+{
+public:
+ PipelineHandlerBase(CameraManager *manager)
+ : PipelineHandler(manager)
+ {
+ }
+
+ virtual ~PipelineHandlerBase()
+ {
+ }
+
+ static bool isRgb(const PixelFormat &pixFmt);
+ static bool isYuv(const PixelFormat &pixFmt);
+ static bool isRaw(const PixelFormat &pixFmt);
+
+ static bool updateStreamConfig(StreamConfiguration *stream,
+ const V4L2DeviceFormat &format);
+ static V4L2DeviceFormat toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const StreamConfiguration *stream);
+ static V4L2DeviceFormat toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+ const V4L2SubdeviceFormat &format,
+ BayerFormat::Packing packingReq);
+
+ std::unique_ptr<CameraConfiguration>
+ generateConfiguration(Camera *camera, Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, libcamera::Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+ void releaseDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+protected:
+ int registerCamera(std::unique_ptr<RPi::CameraData> &cameraData,
+ MediaDevice *frontent, const std::string &frontendName,
+ MediaDevice *backend, MediaEntity *sensorEntity);
+
+ void mapBuffers(Camera *camera, const BufferMap &buffers, unsigned int mask);
+
+ virtual int platformRegister(std::unique_ptr<CameraData> &cameraData,
+ MediaDevice *unicam, MediaDevice *isp) = 0;
+
+private:
+ CameraData *cameraData(Camera *camera)
+ {
+ return static_cast<CameraData *>(camera->_d());
+ }
+
+ int queueAllBuffers(Camera *camera);
+ virtual int prepareBuffers(Camera *camera) = 0;
+};
+
+class RPiCameraConfiguration final : public CameraConfiguration
+{
+public:
+ RPiCameraConfiguration(const CameraData *data)
+ : CameraConfiguration(), data_(data)
+ {
+ }
+
+ CameraConfiguration::Status validateColorSpaces(ColorSpaceFlags flags);
+ Status validate() override;
+
+ /* Cache the combinedTransform_ that will be applied to the sensor */
+ Transform combinedTransform_;
+ /* The sensor format computed in validate() */
+ V4L2SubdeviceFormat sensorFormat_;
+
+ struct StreamParams {
+ StreamParams()
+ : index(0), cfg(nullptr), dev(nullptr)
+ {
+ }
+
+ StreamParams(unsigned int index_, StreamConfiguration *cfg_)
+ : index(index_), cfg(cfg_), dev(nullptr)
+ {
+ }
+
+ unsigned int index;
+ StreamConfiguration *cfg;
+ V4L2VideoDevice *dev;
+ V4L2DeviceFormat format;
+ };
+
+ std::vector<StreamParams> rawStreams_;
+ std::vector<StreamParams> outStreams_;
+
+ /*
+ * Store the colour spaces that all our streams will have. RGB format streams
+ * will have the same colorspace as YUV streams, with YCbCr field cleared and
+ * range set to full.
+ */
+ std::optional<ColorSpace> yuvColorSpace_;
+ std::optional<ColorSpace> rgbColorSpace_;
+
+private:
+ const CameraData *data_;
+};
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/rpi_stream.cpp b/src/libcamera/pipeline/rpi/common/rpi_stream.cpp
new file mode 100644
index 00000000..accf59eb
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/rpi_stream.cpp
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Raspberry Pi device stream abstraction class.
+ */
+#include "rpi_stream.h"
+
+#include <algorithm>
+#include <tuple>
+#include <utility>
+
+#include <libcamera/base/log.h>
+
+/* Maximum number of buffer slots to allocate in the V4L2 device driver. */
+static constexpr unsigned int maxV4L2BufferCount = 32;
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(RPISTREAM)
+
+namespace RPi {
+
+const BufferObject Stream::errorBufferObject{ nullptr, false };
+
+void Stream::setFlags(StreamFlags flags)
+{
+ /* We don't want dynamic mmapping. */
+ ASSERT(!(flags & StreamFlag::RequiresMmap));
+
+ flags_ |= flags;
+
+ /* Import streams cannot be external. */
+ ASSERT(!(flags_ & StreamFlag::External) || !(flags_ & StreamFlag::ImportOnly));
+}
+
+void Stream::clearFlags(StreamFlags flags)
+{
+ /* We don't want dynamic mmapping. */
+ ASSERT(!(flags & StreamFlag::RequiresMmap));
+
+ flags_ &= ~flags;
+}
+
+RPi::Stream::StreamFlags Stream::getFlags() const
+{
+ return flags_;
+}
+
+V4L2VideoDevice *Stream::dev() const
+{
+ return dev_.get();
+}
+
+const std::string &Stream::name() const
+{
+ return name_;
+}
+
+unsigned int Stream::swDownscale() const
+{
+ return swDownscale_;
+}
+
+void Stream::setSwDownscale(unsigned int swDownscale)
+{
+ swDownscale_ = swDownscale;
+}
+
+void Stream::resetBuffers()
+{
+ /* Add all internal buffers to the queue of usable buffers. */
+ availableBuffers_ = {};
+ for (auto const &buffer : internalBuffers_)
+ availableBuffers_.push(buffer.get());
+}
+
+void Stream::setExportedBuffers(std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ for (auto const &buffer : *buffers)
+ bufferEmplace(++id_, buffer.get());
+}
+
+const BufferMap &Stream::getBuffers() const
+{
+ return bufferMap_;
+}
+
+unsigned int Stream::getBufferId(FrameBuffer *buffer) const
+{
+ if (flags_ & StreamFlag::ImportOnly)
+ return 0;
+
+ /* Find the buffer in the map, and return the buffer id. */
+ auto it = std::find_if(bufferMap_.begin(), bufferMap_.end(),
+ [&buffer](auto const &p) { return p.second.buffer == buffer; });
+
+ if (it == bufferMap_.end())
+ return 0;
+
+ return it->first;
+}
+
+void Stream::setExportedBuffer(FrameBuffer *buffer)
+{
+ bufferEmplace(++id_, buffer);
+}
+
+int Stream::prepareBuffers(unsigned int count)
+{
+ int ret;
+
+ if (!(flags_ & StreamFlag::ImportOnly)) {
+ /* Export some frame buffers for internal use. */
+ ret = dev_->exportBuffers(count, &internalBuffers_);
+ if (ret < 0)
+ return ret;
+
+ /* Add these exported buffers to the internal/external buffer list. */
+ setExportedBuffers(&internalBuffers_);
+ resetBuffers();
+ }
+
+ return dev_->importBuffers(maxV4L2BufferCount);
+}
+
+int Stream::queueBuffer(FrameBuffer *buffer)
+{
+ /*
+ * A nullptr buffer implies an external stream, but no external
+ * buffer has been supplied in the Request. So, pick one from the
+ * availableBuffers_ queue.
+ */
+ if (!buffer) {
+ if (availableBuffers_.empty()) {
+ LOG(RPISTREAM, Debug) << "No buffers available for "
+ << name_;
+ /*
+ * Note that we need to queue an internal buffer as soon
+ * as one becomes available.
+ */
+ requestBuffers_.push(nullptr);
+ return 0;
+ }
+
+ buffer = availableBuffers_.front();
+ availableBuffers_.pop();
+ }
+
+ /*
+ * If no earlier requests are pending to be queued we can go ahead and
+ * queue this buffer into the device.
+ */
+ if (requestBuffers_.empty())
+ return queueToDevice(buffer);
+
+ /*
+ * There are earlier Request buffers to be queued, so this buffer must go
+ * on the waiting list.
+ */
+ requestBuffers_.push(buffer);
+
+ return 0;
+}
+
+void Stream::returnBuffer(FrameBuffer *buffer)
+{
+ if (!(flags_ & StreamFlag::External) && !(flags_ & StreamFlag::Recurrent)) {
+ /* For internal buffers, simply requeue back to the device. */
+ queueToDevice(buffer);
+ return;
+ }
+
+ /* Push this buffer back into the queue to be used again. */
+ availableBuffers_.push(buffer);
+
+ /*
+ * Do we have any Request buffers that are waiting to be queued?
+ * If so, do it now as availableBuffers_ will not be empty.
+ */
+ while (!requestBuffers_.empty()) {
+ FrameBuffer *requestBuffer = requestBuffers_.front();
+
+ if (!requestBuffer) {
+ /*
+ * We want to queue an internal buffer, but none
+ * are available. Can't do anything, quit the loop.
+ */
+ if (availableBuffers_.empty())
+ break;
+
+ /*
+ * We want to queue an internal buffer, and at least one
+ * is available.
+ */
+ requestBuffer = availableBuffers_.front();
+ availableBuffers_.pop();
+ }
+
+ requestBuffers_.pop();
+ queueToDevice(requestBuffer);
+ }
+}
+
+const BufferObject &Stream::getBuffer(unsigned int id)
+{
+ auto const &it = bufferMap_.find(id);
+ if (it == bufferMap_.end())
+ return errorBufferObject;
+
+ return it->second;
+}
+
+const BufferObject &Stream::acquireBuffer()
+{
+ /* No id provided, so pick up the next available buffer if possible. */
+ if (availableBuffers_.empty())
+ return errorBufferObject;
+
+ unsigned int id = getBufferId(availableBuffers_.front());
+ availableBuffers_.pop();
+
+ return getBuffer(id);
+}
+
+int Stream::queueAllBuffers()
+{
+ int ret;
+
+ if ((flags_ & StreamFlag::External) || (flags_ & StreamFlag::Recurrent))
+ return 0;
+
+ while (!availableBuffers_.empty()) {
+ ret = queueBuffer(availableBuffers_.front());
+ if (ret < 0)
+ return ret;
+
+ availableBuffers_.pop();
+ }
+
+ return 0;
+}
+
+void Stream::releaseBuffers()
+{
+ dev_->releaseBuffers();
+ clearBuffers();
+}
+
+void Stream::bufferEmplace(unsigned int id, FrameBuffer *buffer)
+{
+ if (flags_ & StreamFlag::RequiresMmap)
+ bufferMap_.emplace(std::piecewise_construct, std::forward_as_tuple(id),
+ std::forward_as_tuple(buffer, true));
+ else
+ bufferMap_.emplace(std::piecewise_construct, std::forward_as_tuple(id),
+ std::forward_as_tuple(buffer, false));
+}
+
+void Stream::clearBuffers()
+{
+ availableBuffers_ = std::queue<FrameBuffer *>{};
+ requestBuffers_ = std::queue<FrameBuffer *>{};
+ internalBuffers_.clear();
+ bufferMap_.clear();
+ id_ = 0;
+}
+
+int Stream::queueToDevice(FrameBuffer *buffer)
+{
+ LOG(RPISTREAM, Debug) << "Queuing buffer " << getBufferId(buffer)
+ << " for " << name_;
+
+ int ret = dev_->queueBuffer(buffer);
+ if (ret)
+ LOG(RPISTREAM, Error) << "Failed to queue buffer for "
+ << name_;
+ return ret;
+}
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/rpi_stream.h b/src/libcamera/pipeline/rpi/common/rpi_stream.h
new file mode 100644
index 00000000..a13d5dc0
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/rpi_stream.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Raspberry Pi device stream abstraction class.
+ */
+
+#pragma once
+
+#include <optional>
+#include <queue>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include <libcamera/base/flags.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace libcamera {
+
+namespace RPi {
+
+enum BufferMask {
+ MaskID = 0x00ffff,
+ MaskStats = 0x010000,
+ MaskEmbeddedData = 0x020000,
+ MaskBayerData = 0x040000,
+};
+
+struct BufferObject {
+ BufferObject(FrameBuffer *b, bool requiresMmap)
+ : buffer(b), mapped(std::nullopt)
+ {
+ if (requiresMmap)
+ mapped = std::make_optional<MappedFrameBuffer>
+ (b, MappedFrameBuffer::MapFlag::ReadWrite);
+ }
+
+ FrameBuffer *buffer;
+ std::optional<MappedFrameBuffer> mapped;
+};
+
+using BufferMap = std::unordered_map<unsigned int, BufferObject>;
+
+/*
+ * Device stream abstraction for either an internal or external stream.
+ * Used for both Unicam and the ISP.
+ */
+class Stream : public libcamera::Stream
+{
+public:
+ enum class StreamFlag {
+ None = 0,
+ /*
+ * Indicates that this stream only imports buffers, e.g. the ISP
+ * input stream.
+ */
+ ImportOnly = (1 << 0),
+ /*
+ * Indicates that this stream is active externally, i.e. the
+ * buffers might be provided by (and returned to) the application.
+ */
+ External = (1 << 1),
+ /*
+ * Indicates that the stream buffers need to be mmaped and returned
+ * to the pipeline handler when requested.
+ */
+ RequiresMmap = (1 << 2),
+ /*
+ * Indicates a stream that needs buffers recycled every frame internally
+ * in the pipeline handler, e.g. stitch, TDN, config. All buffer
+ * management will be handled by the pipeline handler.
+ */
+ Recurrent = (1 << 3),
+ /*
+ * Indicates that the output stream needs a software format conversion
+ * to be applied after ISP processing.
+ */
+ Needs32bitConv = (1 << 4),
+ };
+
+ using StreamFlags = Flags<StreamFlag>;
+
+ Stream()
+ : flags_(StreamFlag::None), id_(0), swDownscale_(0)
+ {
+ }
+
+ Stream(const char *name, MediaEntity *dev, StreamFlags flags = StreamFlag::None)
+ : flags_(flags), name_(name),
+ dev_(std::make_unique<V4L2VideoDevice>(dev)), id_(0),
+ swDownscale_(0)
+ {
+ }
+
+ void setFlags(StreamFlags flags);
+ void clearFlags(StreamFlags flags);
+ StreamFlags getFlags() const;
+
+ V4L2VideoDevice *dev() const;
+ const std::string &name() const;
+ void resetBuffers();
+
+ unsigned int swDownscale() const;
+ void setSwDownscale(unsigned int swDownscale);
+
+ void setExportedBuffers(std::vector<std::unique_ptr<FrameBuffer>> *buffers);
+ const BufferMap &getBuffers() const;
+ unsigned int getBufferId(FrameBuffer *buffer) const;
+
+ void setExportedBuffer(FrameBuffer *buffer);
+
+ int prepareBuffers(unsigned int count);
+ int queueBuffer(FrameBuffer *buffer);
+ void returnBuffer(FrameBuffer *buffer);
+
+ const BufferObject &getBuffer(unsigned int id);
+ const BufferObject &acquireBuffer();
+
+ int queueAllBuffers();
+ void releaseBuffers();
+
+ /* For error handling. */
+ static const BufferObject errorBufferObject;
+
+private:
+ void bufferEmplace(unsigned int id, FrameBuffer *buffer);
+ void clearBuffers();
+ int queueToDevice(FrameBuffer *buffer);
+
+ StreamFlags flags_;
+
+ /* Stream name identifier. */
+ std::string name_;
+
+ /* The actual device stream. */
+ std::unique_ptr<V4L2VideoDevice> dev_;
+
+ /* Tracks a unique id key for the bufferMap_ */
+ unsigned int id_;
+
+ /* Power of 2 greater than one if software downscaling will be required. */
+ unsigned int swDownscale_;
+
+ /* All frame buffers associated with this device stream. */
+ BufferMap bufferMap_;
+
+ /*
+ * List of frame buffers that we can use if none have been provided by
+ * the application for external streams. This is populated by the
+ * buffers exported internally.
+ */
+ std::queue<FrameBuffer *> availableBuffers_;
+
+ /*
+ * List of frame buffers that are to be queued into the device from a Request.
+ * A nullptr indicates any internal buffer can be used (from availableBuffers_),
+ * whereas a valid pointer indicates an external buffer to be queued.
+ *
+ * Ordering buffers to be queued is important here as it must match the
+ * requests coming from the application.
+ */
+ std::queue<FrameBuffer *> requestBuffers_;
+
+ /*
+ * This is a list of buffers exported internally. Need to keep this around
+ * as the stream needs to maintain ownership of these buffers.
+ */
+ std::vector<std::unique_ptr<FrameBuffer>> internalBuffers_;
+};
+
+/*
+ * The following class is just a convenient (and typesafe) array of device
+ * streams indexed with an enum class.
+ */
+template<typename E, std::size_t N>
+class Device : public std::array<class Stream, N>
+{
+public:
+ Stream &operator[](E e)
+ {
+ return std::array<class Stream, N>::operator[](utils::to_underlying(e));
+ }
+ const Stream &operator[](E e) const
+ {
+ return std::array<class Stream, N>::operator[](utils::to_underlying(e));
+ }
+};
+
+} /* namespace RPi */
+
+LIBCAMERA_FLAGS_ENABLE_OPERATORS(RPi::Stream::StreamFlag)
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/meson.build b/src/libcamera/pipeline/rpi/meson.build
new file mode 100644
index 00000000..2391b6a9
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: CC0-1.0
+
+subdir('common')
+
+foreach pipeline : pipelines
+ pipeline = pipeline.split('/')
+ if pipeline.length() < 2 or pipeline[0] != 'rpi'
+ continue
+ endif
+
+ subdir(pipeline[1])
+endforeach
diff --git a/src/libcamera/pipeline/rpi/vc4/data/example.yaml b/src/libcamera/pipeline/rpi/vc4/data/example.yaml
new file mode 100644
index 00000000..b8e01ade
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/data/example.yaml
@@ -0,0 +1,46 @@
+{
+ "version": 1.0,
+ "target": "bcm2835",
+
+ "pipeline_handler":
+ {
+ # The minimum number of internal buffers to be allocated for
+ # Unicam. This value must be greater than 0, but less than or
+ # equal to min_total_unicam_buffers.
+ #
+ # A larger number of internal buffers can reduce the occurrence
+ # of frame drops during high CPU loads, but might also cause
+ # additional latency in the system.
+ #
+ # Note that the pipeline handler might override this value and
+ # not allocate any internal buffers if it knows they will never
+ # be used. For example if the RAW stream is marked as mandatory
+ # and there are no dropped frames signalled for algorithm
+ # convergence.
+ #
+ # "min_unicam_buffers": 2,
+
+ # The minimum total (internal + external) buffer count used for
+ # Unicam. The number of internal buffers allocated for Unicam is
+ # given by:
+ #
+ # internal buffer count = max(min_unicam_buffers,
+ # min_total_unicam_buffers - external buffer count)
+ #
+ # "min_total_unicam_buffers": 4,
+
+ # Override any request from the IPA to drop a number of startup
+ # frames.
+ #
+ # "disable_startup_frame_drops": false,
+
+ # Custom timeout value (in ms) for camera to use. This overrides
+ # the value computed by the pipeline handler based on frame
+ # durations.
+ #
+ # Set this value to 0 to use the pipeline handler computed
+ # timeout value.
+ #
+ # "camera_timeout_value_ms": 0,
+ }
+}
diff --git a/src/libcamera/pipeline/rpi/vc4/data/meson.build b/src/libcamera/pipeline/rpi/vc4/data/meson.build
new file mode 100644
index 00000000..179feebc
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/data/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: CC0-1.0
+
+conf_files = files([
+ 'example.yaml',
+])
+
+install_data(conf_files,
+ install_dir : pipeline_data_dir / 'rpi' / 'vc4',
+ install_tag : 'runtime')
diff --git a/src/libcamera/pipeline/rpi/vc4/meson.build b/src/libcamera/pipeline/rpi/vc4/meson.build
new file mode 100644
index 00000000..9b37c2f0
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'vc4.cpp',
+])
+
+subdir('data')
diff --git a/src/libcamera/pipeline/rpi/vc4/vc4.cpp b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
new file mode 100644
index 00000000..fd8d84b1
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
@@ -0,0 +1,1030 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * Pipeline handler for VC4-based Raspberry Pi devices
+ */
+
+#include <linux/bcm2835-isp.h>
+#include <linux/v4l2-controls.h>
+#include <linux/videodev2.h>
+
+#include <libcamera/formats.h>
+
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/dma_buf_allocator.h"
+
+#include "../common/pipeline_base.h"
+#include "../common/rpi_stream.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(RPI)
+
+using StreamFlag = RPi::Stream::StreamFlag;
+using StreamParams = RPi::RPiCameraConfiguration::StreamParams;
+
+namespace {
+
+enum class Unicam : unsigned int { Image, Embedded };
+enum class Isp : unsigned int { Input, Output0, Output1, Stats };
+
+} /* namespace */
+
+class Vc4CameraData final : public RPi::CameraData
+{
+public:
+ Vc4CameraData(PipelineHandler *pipe)
+ : RPi::CameraData(pipe)
+ {
+ }
+
+ ~Vc4CameraData()
+ {
+ freeBuffers();
+ }
+
+ V4L2VideoDevice::Formats ispFormats() const override
+ {
+ return isp_[Isp::Output0].dev()->formats();
+ }
+
+ V4L2VideoDevice::Formats rawFormats() const override
+ {
+ return unicam_[Unicam::Image].dev()->formats();
+ }
+
+ V4L2VideoDevice *frontendDevice() override
+ {
+ return unicam_[Unicam::Image].dev();
+ }
+
+ void platformFreeBuffers() override
+ {
+ }
+
+ CameraConfiguration::Status platformValidate(RPi::RPiCameraConfiguration *rpiConfig) const override;
+
+ int platformPipelineConfigure(const std::unique_ptr<YamlObject> &root) override;
+
+ void platformStart() override;
+ void platformStop() override;
+
+ void unicamBufferDequeue(FrameBuffer *buffer);
+ void ispInputDequeue(FrameBuffer *buffer);
+ void ispOutputDequeue(FrameBuffer *buffer);
+
+ void processStatsComplete(const ipa::RPi::BufferIds &buffers);
+ void prepareIspComplete(const ipa::RPi::BufferIds &buffers, bool stitchSwapBuffers);
+ void setIspControls(const ControlList &controls);
+ void setCameraTimeout(uint32_t maxFrameLengthMs);
+
+ /* Array of Unicam and ISP device streams and associated buffers/streams. */
+ RPi::Device<Unicam, 2> unicam_;
+ RPi::Device<Isp, 4> isp_;
+
+ /* DMAHEAP allocation helper. */
+ DmaBufAllocator dmaHeap_;
+ SharedFD lsTable_;
+
+ struct Config {
+ /*
+ * The minimum number of internal buffers to be allocated for
+ * the Unicam Image stream.
+ */
+ unsigned int minUnicamBuffers;
+ /*
+ * The minimum total (internal + external) buffer count used for
+ * the Unicam Image stream.
+ *
+ * Note that:
+ * minTotalUnicamBuffers must be >= 1, and
+ * minTotalUnicamBuffers >= minUnicamBuffers
+ */
+ unsigned int minTotalUnicamBuffers;
+ };
+
+ Config config_;
+
+private:
+ void platformSetIspCrop([[maybe_unused]] unsigned int index, const Rectangle &ispCrop) override
+ {
+ Rectangle crop = ispCrop;
+ isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &crop);
+ }
+
+ int platformConfigure(const RPi::RPiCameraConfiguration *rpiConfig) override;
+ int platformConfigureIpa(ipa::RPi::ConfigParams &params) override;
+
+ int platformInitIpa([[maybe_unused]] ipa::RPi::InitParams &params) override
+ {
+ return 0;
+ }
+
+ struct BayerFrame {
+ FrameBuffer *buffer;
+ ControlList controls;
+ unsigned int delayContext;
+ };
+
+ void tryRunPipeline() override;
+ bool findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer);
+
+ std::queue<BayerFrame> bayerQueue_;
+ std::queue<FrameBuffer *> embeddedQueue_;
+};
+
+class PipelineHandlerVc4 : public RPi::PipelineHandlerBase
+{
+public:
+ PipelineHandlerVc4(CameraManager *manager)
+ : RPi::PipelineHandlerBase(manager)
+ {
+ }
+
+ ~PipelineHandlerVc4()
+ {
+ }
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ Vc4CameraData *cameraData(Camera *camera)
+ {
+ return static_cast<Vc4CameraData *>(camera->_d());
+ }
+
+ int prepareBuffers(Camera *camera) override;
+ int platformRegister(std::unique_ptr<RPi::CameraData> &cameraData,
+ MediaDevice *unicam, MediaDevice *isp) override;
+};
+
+bool PipelineHandlerVc4::match(DeviceEnumerator *enumerator)
+{
+ constexpr unsigned int numUnicamDevices = 2;
+
+ /*
+ * Loop over all Unicam instances, but return out once a match is found.
+ * This is to ensure we correctly enumrate the camera when an instance
+ * of Unicam has registered with media controller, but has not registered
+ * device nodes due to a sensor subdevice failure.
+ */
+ for (unsigned int i = 0; i < numUnicamDevices; i++) {
+ DeviceMatch unicam("unicam");
+ MediaDevice *unicamDevice = acquireMediaDevice(enumerator, unicam);
+
+ if (!unicamDevice) {
+ LOG(RPI, Debug) << "Unable to acquire a Unicam instance";
+ continue;
+ }
+
+ DeviceMatch isp("bcm2835-isp");
+ MediaDevice *ispDevice = acquireMediaDevice(enumerator, isp);
+
+ if (!ispDevice) {
+ LOG(RPI, Debug) << "Unable to acquire ISP instance";
+ continue;
+ }
+
+ /*
+ * The loop below is used to register multiple cameras behind one or more
+ * video mux devices that are attached to a particular Unicam instance.
+ * Obviously these cameras cannot be used simultaneously.
+ */
+ unsigned int numCameras = 0;
+ for (MediaEntity *entity : unicamDevice->entities()) {
+ if (entity->function() != MEDIA_ENT_F_CAM_SENSOR)
+ continue;
+
+ std::unique_ptr<RPi::CameraData> cameraData = std::make_unique<Vc4CameraData>(this);
+ int ret = RPi::PipelineHandlerBase::registerCamera(cameraData,
+ unicamDevice, "unicam-image",
+ ispDevice, entity);
+ if (ret)
+ LOG(RPI, Error) << "Failed to register camera "
+ << entity->name() << ": " << ret;
+ else
+ numCameras++;
+ }
+
+ if (numCameras)
+ return true;
+ }
+
+ return false;
+}
+
+int PipelineHandlerVc4::prepareBuffers(Camera *camera)
+{
+ Vc4CameraData *data = cameraData(camera);
+ unsigned int numRawBuffers = 0;
+ int ret;
+
+ for (Stream *s : camera->streams()) {
+ if (BayerFormat::fromPixelFormat(s->configuration().pixelFormat).isValid()) {
+ numRawBuffers = s->configuration().bufferCount;
+ break;
+ }
+ }
+
+ /* Decide how many internal buffers to allocate. */
+ for (auto const stream : data->streams_) {
+ unsigned int numBuffers;
+ /*
+ * For Unicam, allocate a minimum number of buffers for internal
+ * use as we want to avoid any frame drops.
+ */
+ const unsigned int minBuffers = data->config_.minTotalUnicamBuffers;
+ if (stream == &data->unicam_[Unicam::Image]) {
+ /*
+ * If an application has configured a RAW stream, allocate
+ * additional buffers to make up the minimum, but ensure
+ * we have at least minUnicamBuffers of internal buffers
+ * to use to minimise frame drops.
+ */
+ numBuffers = std::max<int>(data->config_.minUnicamBuffers,
+ minBuffers - numRawBuffers);
+ } else if (stream == &data->isp_[Isp::Input]) {
+ /*
+ * ISP input buffers are imported from Unicam, so follow
+ * similar logic as above to count all the RAW buffers
+ * available.
+ */
+ numBuffers = numRawBuffers +
+ std::max<int>(data->config_.minUnicamBuffers,
+ minBuffers - numRawBuffers);
+
+ } else if (stream == &data->unicam_[Unicam::Embedded]) {
+ /*
+ * Embedded data buffers are (currently) for internal use, and
+ * are small enough (typically 1-2KB) that we can
+ * allocate them generously to avoid causing problems in the
+ * IPA when we cannot supply the metadata.
+ *
+ * 12 are allocated as a typical application will have 8-10
+ * input buffers, so allocating more embedded buffers than that
+ * is a sensible choice.
+ *
+ * The lifetimes of these buffers are smaller than those of the
+ * raw buffers, so allocating a fixed number will still suffice
+ * if the application requests a greater number of raw
+ * buffers, as these will be recycled quicker.
+ */
+ numBuffers = 12;
+ } else {
+ /*
+ * Since the ISP runs synchronous with the IPA and requests,
+ * we only ever need one set of internal buffers. Any buffers
+ * the application wants to hold onto will already be exported
+ * through PipelineHandlerRPi::exportFrameBuffers().
+ */
+ numBuffers = 1;
+ }
+
+ LOG(RPI, Debug) << "Preparing " << numBuffers
+ << " buffers for stream " << stream->name();
+
+ ret = stream->prepareBuffers(numBuffers);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Pass the stats and embedded data buffers to the IPA. No other
+ * buffers need to be passed.
+ */
+ mapBuffers(camera, data->isp_[Isp::Stats].getBuffers(), RPi::MaskStats);
+ if (data->sensorMetadata_)
+ mapBuffers(camera, data->unicam_[Unicam::Embedded].getBuffers(),
+ RPi::MaskEmbeddedData);
+
+ return 0;
+}
+
+int PipelineHandlerVc4::platformRegister(std::unique_ptr<RPi::CameraData> &cameraData, MediaDevice *unicam, MediaDevice *isp)
+{
+ Vc4CameraData *data = static_cast<Vc4CameraData *>(cameraData.get());
+
+ if (!data->dmaHeap_.isValid())
+ return -ENOMEM;
+
+ MediaEntity *unicamImage = unicam->getEntityByName("unicam-image");
+ MediaEntity *ispOutput0 = isp->getEntityByName("bcm2835-isp0-output0");
+ MediaEntity *ispCapture1 = isp->getEntityByName("bcm2835-isp0-capture1");
+ MediaEntity *ispCapture2 = isp->getEntityByName("bcm2835-isp0-capture2");
+ MediaEntity *ispCapture3 = isp->getEntityByName("bcm2835-isp0-capture3");
+
+ if (!unicamImage || !ispOutput0 || !ispCapture1 || !ispCapture2 || !ispCapture3)
+ return -ENOENT;
+
+ /* Locate and open the unicam video streams. */
+ data->unicam_[Unicam::Image] = RPi::Stream("Unicam Image", unicamImage);
+
+ /* An embedded data node will not be present if the sensor does not support it. */
+ MediaEntity *unicamEmbedded = unicam->getEntityByName("unicam-embedded");
+ if (unicamEmbedded) {
+ data->unicam_[Unicam::Embedded] = RPi::Stream("Unicam Embedded", unicamEmbedded);
+ data->unicam_[Unicam::Embedded].dev()->bufferReady.connect(data,
+ &Vc4CameraData::unicamBufferDequeue);
+ }
+
+ /* Tag the ISP input stream as an import stream. */
+ data->isp_[Isp::Input] = RPi::Stream("ISP Input", ispOutput0, StreamFlag::ImportOnly);
+ data->isp_[Isp::Output0] = RPi::Stream("ISP Output0", ispCapture1);
+ data->isp_[Isp::Output1] = RPi::Stream("ISP Output1", ispCapture2);
+ data->isp_[Isp::Stats] = RPi::Stream("ISP Stats", ispCapture3);
+
+ /* Wire up all the buffer connections. */
+ data->unicam_[Unicam::Image].dev()->bufferReady.connect(data, &Vc4CameraData::unicamBufferDequeue);
+ data->isp_[Isp::Input].dev()->bufferReady.connect(data, &Vc4CameraData::ispInputDequeue);
+ data->isp_[Isp::Output0].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+ data->isp_[Isp::Output1].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+ data->isp_[Isp::Stats].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+
+ if (data->sensorMetadata_ ^ !!data->unicam_[Unicam::Embedded].dev()) {
+ LOG(RPI, Warning) << "Mismatch between Unicam and CamHelper for embedded data usage!";
+ data->sensorMetadata_ = false;
+ if (data->unicam_[Unicam::Embedded].dev())
+ data->unicam_[Unicam::Embedded].dev()->bufferReady.disconnect();
+ }
+
+ /*
+ * Open all Unicam and ISP streams. The exception is the embedded data
+ * stream, which only gets opened below if the IPA reports that the sensor
+ * supports embedded data.
+ *
+ * The below grouping is just for convenience so that we can easily
+ * iterate over all streams in one go.
+ */
+ data->streams_.push_back(&data->unicam_[Unicam::Image]);
+ if (data->sensorMetadata_)
+ data->streams_.push_back(&data->unicam_[Unicam::Embedded]);
+
+ for (auto &stream : data->isp_)
+ data->streams_.push_back(&stream);
+
+ for (auto stream : data->streams_) {
+ int ret = stream->dev()->open();
+ if (ret)
+ return ret;
+ }
+
+ if (!data->unicam_[Unicam::Image].dev()->caps().hasMediaController()) {
+ LOG(RPI, Error) << "Unicam driver does not use the MediaController, please update your kernel!";
+ return -EINVAL;
+ }
+
+ /* Write up all the IPA connections. */
+ data->ipa_->processStatsComplete.connect(data, &Vc4CameraData::processStatsComplete);
+ data->ipa_->prepareIspComplete.connect(data, &Vc4CameraData::prepareIspComplete);
+ data->ipa_->setIspControls.connect(data, &Vc4CameraData::setIspControls);
+ data->ipa_->setCameraTimeout.connect(data, &Vc4CameraData::setCameraTimeout);
+
+ /*
+ * List the available streams an application may request. At present, we
+ * do not advertise Unicam Embedded and ISP Statistics streams, as there
+ * is no mechanism for the application to request non-image buffer formats.
+ */
+ std::set<Stream *> streams;
+ streams.insert(&data->unicam_[Unicam::Image]);
+ streams.insert(&data->isp_[Isp::Output0]);
+ streams.insert(&data->isp_[Isp::Output1]);
+
+ /* Create and register the camera. */
+ const std::string &id = data->sensor_->id();
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(cameraData), id, streams);
+ PipelineHandler::registerCamera(std::move(camera));
+
+ LOG(RPI, Info) << "Registered camera " << id
+ << " to Unicam device " << unicam->deviceNode()
+ << " and ISP device " << isp->deviceNode();
+
+ return 0;
+}
+
+CameraConfiguration::Status Vc4CameraData::platformValidate(RPi::RPiCameraConfiguration *rpiConfig) const
+{
+ std::vector<StreamParams> &rawStreams = rpiConfig->rawStreams_;
+ std::vector<StreamParams> &outStreams = rpiConfig->outStreams_;
+
+ CameraConfiguration::Status status = CameraConfiguration::Status::Valid;
+
+ /* Can only output 1 RAW stream, or 2 YUV/RGB streams. */
+ if (rawStreams.size() > 1 || outStreams.size() > 2) {
+ LOG(RPI, Error) << "Invalid number of streams requested";
+ return CameraConfiguration::Status::Invalid;
+ }
+
+ if (!rawStreams.empty()) {
+ rawStreams[0].dev = unicam_[Unicam::Image].dev();
+
+ /* Adjust the RAW stream to match the computed sensor format. */
+ StreamConfiguration *rawStream = rawStreams[0].cfg;
+ BayerFormat rawBayer = BayerFormat::fromPixelFormat(rawStream->pixelFormat);
+
+ /* Apply the sensor bitdepth. */
+ rawBayer.bitDepth = BayerFormat::fromMbusCode(rpiConfig->sensorFormat_.code).bitDepth;
+
+ /* Default to CSI2 packing if the user request is unsupported. */
+ if (rawBayer.packing != BayerFormat::Packing::CSI2 &&
+ rawBayer.packing != BayerFormat::Packing::None)
+ rawBayer.packing = BayerFormat::Packing::CSI2;
+
+ PixelFormat rawFormat = rawBayer.toPixelFormat();
+
+ /*
+ * Try for an unpacked format if a packed one wasn't available.
+ * This catches 8 (and 16) bit formats which would otherwise
+ * fail.
+ */
+ if (!rawFormat.isValid() && rawBayer.packing != BayerFormat::Packing::None) {
+ rawBayer.packing = BayerFormat::Packing::None;
+ rawFormat = rawBayer.toPixelFormat();
+ }
+
+ if (rawStream->pixelFormat != rawFormat ||
+ rawStream->size != rpiConfig->sensorFormat_.size) {
+ rawStream->pixelFormat = rawFormat;
+ rawStream->size = rpiConfig->sensorFormat_.size;
+
+ status = CameraConfiguration::Adjusted;
+ }
+
+ rawStreams[0].format =
+ RPi::PipelineHandlerBase::toV4L2DeviceFormat(unicam_[Unicam::Image].dev(), rawStream);
+ }
+
+ /*
+ * For the two ISP outputs, one stream must be equal or smaller than the
+ * other in all dimensions.
+ *
+ * Index 0 contains the largest requested resolution.
+ */
+ for (unsigned int i = 0; i < outStreams.size(); i++) {
+ Size size;
+
+ /*
+ * \todo Should we warn if upscaling, as it reduces the image
+ * quality and is usually undesired ?
+ */
+
+ size.width = std::min(outStreams[i].cfg->size.width,
+ outStreams[0].cfg->size.width);
+ size.height = std::min(outStreams[i].cfg->size.height,
+ outStreams[0].cfg->size.height);
+
+ if (outStreams[i].cfg->size != size) {
+ outStreams[i].cfg->size = size;
+ status = CameraConfiguration::Status::Adjusted;
+ }
+
+ /*
+ * Output 0 must be for the largest resolution. We will
+ * have that fixed up in the code above.
+ */
+ outStreams[i].dev = isp_[i == 0 ? Isp::Output0 : Isp::Output1].dev();
+
+ outStreams[i].format = RPi::PipelineHandlerBase::toV4L2DeviceFormat(outStreams[i].dev, outStreams[i].cfg);
+ }
+
+ return status;
+}
+
+int Vc4CameraData::platformPipelineConfigure(const std::unique_ptr<YamlObject> &root)
+{
+ config_ = {
+ .minUnicamBuffers = 2,
+ .minTotalUnicamBuffers = 4,
+ };
+
+ if (!root)
+ return 0;
+
+ std::optional<double> ver = (*root)["version"].get<double>();
+ if (!ver || *ver != 1.0) {
+ LOG(RPI, Error) << "Unexpected configuration file version reported";
+ return -EINVAL;
+ }
+
+ std::optional<std::string> target = (*root)["target"].get<std::string>();
+ if (!target || *target != "bcm2835") {
+ LOG(RPI, Error) << "Unexpected target reported: expected \"bcm2835\", got "
+ << *target;
+ return -EINVAL;
+ }
+
+ const YamlObject &phConfig = (*root)["pipeline_handler"];
+ config_.minUnicamBuffers =
+ phConfig["min_unicam_buffers"].get<unsigned int>(config_.minUnicamBuffers);
+ config_.minTotalUnicamBuffers =
+ phConfig["min_total_unicam_buffers"].get<unsigned int>(config_.minTotalUnicamBuffers);
+
+ if (config_.minTotalUnicamBuffers < config_.minUnicamBuffers) {
+ LOG(RPI, Error) << "Invalid configuration: min_total_unicam_buffers must be >= min_unicam_buffers";
+ return -EINVAL;
+ }
+
+ if (config_.minTotalUnicamBuffers < 1) {
+ LOG(RPI, Error) << "Invalid configuration: min_total_unicam_buffers must be >= 1";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int Vc4CameraData::platformConfigure(const RPi::RPiCameraConfiguration *rpiConfig)
+{
+ const std::vector<StreamParams> &rawStreams = rpiConfig->rawStreams_;
+ const std::vector<StreamParams> &outStreams = rpiConfig->outStreams_;
+ int ret;
+
+ V4L2VideoDevice *unicam = unicam_[Unicam::Image].dev();
+ V4L2DeviceFormat unicamFormat;
+
+ /*
+ * See which streams are requested, and route the user
+ * StreamConfiguration appropriately.
+ */
+ if (!rawStreams.empty()) {
+ rawStreams[0].cfg->setStream(&unicam_[Unicam::Image]);
+ unicam_[Unicam::Image].setFlags(StreamFlag::External);
+ unicamFormat = rawStreams[0].format;
+ } else {
+ unicamFormat =
+ RPi::PipelineHandlerBase::toV4L2DeviceFormat(unicam,
+ rpiConfig->sensorFormat_,
+ BayerFormat::Packing::CSI2);
+ }
+
+ ret = unicam->setFormat(&unicamFormat);
+ if (ret)
+ return ret;
+
+ ret = isp_[Isp::Input].dev()->setFormat(&unicamFormat);
+ if (ret)
+ return ret;
+
+ LOG(RPI, Info) << "Sensor: " << sensor_->id()
+ << " - Selected sensor format: " << rpiConfig->sensorFormat_
+ << " - Selected unicam format: " << unicamFormat;
+
+ /* Use a sensible small default size if no output streams are configured. */
+ Size maxSize = outStreams.empty() ? Size(320, 240) : outStreams[0].cfg->size;
+ V4L2DeviceFormat format;
+
+ for (unsigned int i = 0; i < outStreams.size(); i++) {
+ StreamConfiguration *cfg = outStreams[i].cfg;
+
+ /* The largest resolution gets routed to the ISP Output 0 node. */
+ RPi::Stream *stream = i == 0 ? &isp_[Isp::Output0] : &isp_[Isp::Output1];
+ format = outStreams[i].format;
+
+ LOG(RPI, Debug) << "Setting " << stream->name() << " to "
+ << format;
+
+ ret = stream->dev()->setFormat(&format);
+ if (ret)
+ return -EINVAL;
+
+ LOG(RPI, Debug)
+ << "Stream " << stream->name() << " has color space "
+ << ColorSpace::toString(cfg->colorSpace);
+
+ cfg->setStream(stream);
+ stream->setFlags(StreamFlag::External);
+ }
+
+ ispOutputTotal_ = outStreams.size();
+
+ /*
+ * If ISP::Output0 stream has not been configured by the application,
+ * we must allow the hardware to generate an output so that the data
+ * flow in the pipeline handler remains consistent, and we still generate
+ * statistics for the IPA to use. So enable the output at a very low
+ * resolution for internal use.
+ *
+ * \todo Allow the pipeline to work correctly without Output0 and only
+ * statistics coming from the hardware.
+ */
+ if (outStreams.empty()) {
+ V4L2VideoDevice *dev = isp_[Isp::Output0].dev();
+
+ format = {};
+ format.size = maxSize;
+ format.fourcc = dev->toV4L2PixelFormat(formats::YUV420);
+ /* No one asked for output, so the color space doesn't matter. */
+ format.colorSpace = ColorSpace::Sycc;
+ ret = dev->setFormat(&format);
+ if (ret) {
+ LOG(RPI, Error)
+ << "Failed to set default format on ISP Output0: "
+ << ret;
+ return -EINVAL;
+ }
+
+ ispOutputTotal_++;
+
+ LOG(RPI, Debug) << "Defaulting ISP Output0 format to "
+ << format;
+ }
+
+ /*
+ * If ISP::Output1 stream has not been requested by the application, we
+ * set it up for internal use now. This second stream will be used for
+ * fast colour denoise, and must be a quarter resolution of the ISP::Output0
+ * stream. However, also limit the maximum size to 1200 pixels in the
+ * larger dimension, just to avoid being wasteful with buffer allocations
+ * and memory bandwidth.
+ *
+ * \todo If Output 1 format is not YUV420, Output 1 ought to be disabled as
+ * colour denoise will not run.
+ */
+ if (outStreams.size() <= 1) {
+ V4L2VideoDevice *dev = isp_[Isp::Output1].dev();
+
+ V4L2DeviceFormat output1Format;
+ constexpr Size maxDimensions(1200, 1200);
+ const Size limit = maxDimensions.boundedToAspectRatio(format.size);
+
+ output1Format.size = (format.size / 2).boundedTo(limit).alignedDownTo(2, 2);
+ output1Format.colorSpace = format.colorSpace;
+ output1Format.fourcc = dev->toV4L2PixelFormat(formats::YUV420);
+
+ LOG(RPI, Debug) << "Setting ISP Output1 (internal) to "
+ << output1Format;
+
+ ret = dev->setFormat(&output1Format);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on ISP Output1: "
+ << ret;
+ return -EINVAL;
+ }
+
+ ispOutputTotal_++;
+ }
+
+ /* ISP statistics output format. */
+ format = {};
+ format.fourcc = V4L2PixelFormat(V4L2_META_FMT_BCM2835_ISP_STATS);
+ ret = isp_[Isp::Stats].dev()->setFormat(&format);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on ISP stats stream: "
+ << format;
+ return ret;
+ }
+
+ ispOutputTotal_++;
+
+ /*
+ * Configure the Unicam embedded data output format only if the sensor
+ * supports it.
+ */
+ if (sensorMetadata_) {
+ V4L2SubdeviceFormat embeddedFormat;
+
+ sensor_->device()->getFormat(1, &embeddedFormat);
+ format = {};
+ format.fourcc = V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
+ format.planes[0].size = embeddedFormat.size.width * embeddedFormat.size.height;
+
+ LOG(RPI, Debug) << "Setting embedded data format " << format.toString();
+ ret = unicam_[Unicam::Embedded].dev()->setFormat(&format);
+ if (ret) {
+ LOG(RPI, Error) << "Failed to set format on Unicam embedded: "
+ << format;
+ return ret;
+ }
+ }
+
+ /* Figure out the smallest selection the ISP will allow. */
+ Rectangle testCrop(0, 0, 1, 1);
+ isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &testCrop);
+
+ /* Adjust aspect ratio by providing crops on the input image. */
+ Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
+ Rectangle ispCrop = size.centeredTo(Rectangle(unicamFormat.size).center());
+
+ platformSetIspCrop(0, ispCrop);
+ /*
+ * Set the scaler crop to the value we are using (scaled to native sensor
+ * coordinates).
+ */
+ cropParams_.emplace(std::piecewise_construct,
+ std::forward_as_tuple(0),
+ std::forward_as_tuple(ispCrop, testCrop.size(), 0));
+
+ return 0;
+}
+
+int Vc4CameraData::platformConfigureIpa(ipa::RPi::ConfigParams &params)
+{
+ params.ispControls = isp_[Isp::Input].dev()->controls();
+
+ /* Allocate the lens shading table via dmaHeap and pass to the IPA. */
+ if (!lsTable_.isValid()) {
+ lsTable_ = SharedFD(dmaHeap_.alloc("ls_grid", ipa::RPi::MaxLsGridSize));
+ if (!lsTable_.isValid())
+ return -ENOMEM;
+
+ /* Allow the IPA to mmap the LS table via the file descriptor. */
+ /*
+ * \todo Investigate if mapping the lens shading table buffer
+ * could be handled with mapBuffers().
+ */
+ params.lsTableHandle = lsTable_;
+ }
+
+ return 0;
+}
+
+void Vc4CameraData::platformStart()
+{
+}
+
+void Vc4CameraData::platformStop()
+{
+ bayerQueue_ = {};
+ embeddedQueue_ = {};
+}
+
+void Vc4CameraData::unicamBufferDequeue(FrameBuffer *buffer)
+{
+ RPi::Stream *stream = nullptr;
+ unsigned int index;
+
+ if (!isRunning())
+ return;
+
+ for (RPi::Stream &s : unicam_) {
+ index = s.getBufferId(buffer);
+ if (index) {
+ stream = &s;
+ break;
+ }
+ }
+
+ /* The buffer must belong to one of our streams. */
+ ASSERT(stream);
+
+ LOG(RPI, Debug) << "Stream " << stream->name() << " buffer dequeue"
+ << ", buffer id " << index
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ if (stream == &unicam_[Unicam::Image]) {
+ /*
+ * Lookup the sensor controls used for this frame sequence from
+ * DelayedControl and queue them along with the frame buffer.
+ */
+ auto [ctrl, delayContext] = delayedCtrls_->get(buffer->metadata().sequence);
+ /*
+ * Add the frame timestamp to the ControlList for the IPA to use
+ * as it does not receive the FrameBuffer object.
+ */
+ ctrl.set(controls::SensorTimestamp, buffer->metadata().timestamp);
+ bayerQueue_.push({ buffer, std::move(ctrl), delayContext });
+ } else {
+ embeddedQueue_.push(buffer);
+ }
+
+ handleState();
+}
+
+void Vc4CameraData::ispInputDequeue(FrameBuffer *buffer)
+{
+ if (!isRunning())
+ return;
+
+ LOG(RPI, Debug) << "Stream ISP Input buffer complete"
+ << ", buffer id " << unicam_[Unicam::Image].getBufferId(buffer)
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ /* The ISP input buffer gets re-queued into Unicam. */
+ handleStreamBuffer(buffer, &unicam_[Unicam::Image]);
+ handleState();
+}
+
+void Vc4CameraData::ispOutputDequeue(FrameBuffer *buffer)
+{
+ RPi::Stream *stream = nullptr;
+ unsigned int index = 0;
+
+ if (!isRunning())
+ return;
+
+ for (RPi::Stream &s : isp_) {
+ index = s.getBufferId(buffer);
+ if (index) {
+ stream = &s;
+ break;
+ }
+ }
+
+ /* The buffer must belong to one of our ISP output streams. */
+ ASSERT(stream);
+
+ LOG(RPI, Debug) << "Stream " << stream->name() << " buffer complete"
+ << ", buffer id " << index
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ /*
+ * ISP statistics buffer must not be re-queued or sent back to the
+ * application until after the IPA signals so.
+ */
+ if (stream == &isp_[Isp::Stats]) {
+ ipa::RPi::ProcessParams params;
+ params.buffers.stats = index | RPi::MaskStats;
+ params.ipaContext = requestQueue_.front()->sequence();
+ ipa_->processStats(params);
+ } else {
+ /* Any other ISP output can be handed back to the application now. */
+ handleStreamBuffer(buffer, stream);
+ }
+
+ /*
+ * Increment the number of ISP outputs generated.
+ * This is needed to track dropped frames.
+ */
+ ispOutputCount_++;
+
+ handleState();
+}
+
+void Vc4CameraData::processStatsComplete(const ipa::RPi::BufferIds &buffers)
+{
+ if (!isRunning())
+ return;
+
+ FrameBuffer *buffer = isp_[Isp::Stats].getBuffers().at(buffers.stats & RPi::MaskID).buffer;
+
+ handleStreamBuffer(buffer, &isp_[Isp::Stats]);
+
+ state_ = State::IpaComplete;
+ handleState();
+}
+
+void Vc4CameraData::prepareIspComplete(const ipa::RPi::BufferIds &buffers,
+ [[maybe_unused]] bool stitchSwapBuffers)
+{
+ unsigned int embeddedId = buffers.embedded & RPi::MaskID;
+ unsigned int bayer = buffers.bayer & RPi::MaskID;
+ FrameBuffer *buffer;
+
+ if (!isRunning())
+ return;
+
+ buffer = unicam_[Unicam::Image].getBuffers().at(bayer & RPi::MaskID).buffer;
+ LOG(RPI, Debug) << "Input re-queue to ISP, buffer id " << (bayer & RPi::MaskID)
+ << ", timestamp: " << buffer->metadata().timestamp;
+
+ isp_[Isp::Input].queueBuffer(buffer);
+ ispOutputCount_ = 0;
+
+ if (sensorMetadata_ && embeddedId) {
+ buffer = unicam_[Unicam::Embedded].getBuffers().at(embeddedId & RPi::MaskID).buffer;
+ handleStreamBuffer(buffer, &unicam_[Unicam::Embedded]);
+ }
+
+ handleState();
+}
+
+void Vc4CameraData::setIspControls(const ControlList &controls)
+{
+ ControlList ctrls = controls;
+
+ if (ctrls.contains(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING)) {
+ ControlValue &value =
+ const_cast<ControlValue &>(ctrls.get(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING));
+ Span<uint8_t> s = value.data();
+ bcm2835_isp_lens_shading *ls =
+ reinterpret_cast<bcm2835_isp_lens_shading *>(s.data());
+ ls->dmabuf = lsTable_.get();
+ }
+
+ isp_[Isp::Input].dev()->setControls(&ctrls);
+ handleState();
+}
+
+void Vc4CameraData::setCameraTimeout(uint32_t maxFrameLengthMs)
+{
+ /*
+ * Set the dequeue timeout to the larger of 5x the maximum reported
+ * frame length advertised by the IPA over a number of frames. Allow
+ * a minimum timeout value of 1s.
+ */
+ utils::Duration timeout =
+ std::max<utils::Duration>(1s, 5 * maxFrameLengthMs * 1ms);
+
+ LOG(RPI, Debug) << "Setting Unicam timeout to " << timeout;
+ unicam_[Unicam::Image].dev()->setDequeueTimeout(timeout);
+}
+
+void Vc4CameraData::tryRunPipeline()
+{
+ FrameBuffer *embeddedBuffer;
+ BayerFrame bayerFrame;
+
+ /* If any of our request or buffer queues are empty, we cannot proceed. */
+ if (state_ != State::Idle || requestQueue_.empty() ||
+ bayerQueue_.empty() || (embeddedQueue_.empty() && sensorMetadata_))
+ return;
+
+ if (!findMatchingBuffers(bayerFrame, embeddedBuffer))
+ return;
+
+ /* Take the first request from the queue and action the IPA. */
+ Request *request = requestQueue_.front();
+
+ /* See if a new ScalerCrop value needs to be applied. */
+ applyScalerCrop(request->controls());
+
+ /*
+ * Clear the request metadata and fill it with some initial non-IPA
+ * related controls. We clear it first because the request metadata
+ * may have been populated if we have dropped the previous frame.
+ */
+ request->metadata().clear();
+ fillRequestMetadata(bayerFrame.controls, request);
+
+ /* Set our state to say the pipeline is active. */
+ state_ = State::Busy;
+
+ unsigned int bayer = unicam_[Unicam::Image].getBufferId(bayerFrame.buffer);
+
+ LOG(RPI, Debug) << "Signalling prepareIsp:"
+ << " Bayer buffer id: " << bayer;
+
+ ipa::RPi::PrepareParams params;
+ params.buffers.bayer = RPi::MaskBayerData | bayer;
+ params.sensorControls = std::move(bayerFrame.controls);
+ params.requestControls = request->controls();
+ params.ipaContext = request->sequence();
+ params.delayContext = bayerFrame.delayContext;
+ params.buffers.embedded = 0;
+
+ if (embeddedBuffer) {
+ unsigned int embeddedId = unicam_[Unicam::Embedded].getBufferId(embeddedBuffer);
+
+ params.buffers.embedded = RPi::MaskEmbeddedData | embeddedId;
+ LOG(RPI, Debug) << "Signalling prepareIsp:"
+ << " Embedded buffer id: " << embeddedId;
+ }
+
+ ipa_->prepareIsp(params);
+}
+
+bool Vc4CameraData::findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer)
+{
+ if (bayerQueue_.empty())
+ return false;
+
+ /*
+ * Find the embedded data buffer with a matching timestamp to pass to
+ * the IPA. Any embedded buffers with a timestamp lower than the
+ * current bayer buffer will be removed and re-queued to the driver.
+ */
+ uint64_t ts = bayerQueue_.front().buffer->metadata().timestamp;
+ embeddedBuffer = nullptr;
+ while (!embeddedQueue_.empty()) {
+ FrameBuffer *b = embeddedQueue_.front();
+ if (b->metadata().timestamp < ts) {
+ embeddedQueue_.pop();
+ unicam_[Unicam::Embedded].returnBuffer(b);
+ LOG(RPI, Debug) << "Dropping unmatched input frame in stream "
+ << unicam_[Unicam::Embedded].name();
+ } else if (b->metadata().timestamp == ts) {
+ /* Found a match! */
+ embeddedBuffer = b;
+ embeddedQueue_.pop();
+ break;
+ } else {
+ break; /* Only higher timestamps from here. */
+ }
+ }
+
+ if (!embeddedBuffer && sensorMetadata_) {
+ if (embeddedQueue_.empty()) {
+ /*
+ * If the embedded buffer queue is empty, wait for the next
+ * buffer to arrive - dequeue ordering may send the image
+ * buffer first.
+ */
+ LOG(RPI, Debug) << "Waiting for next embedded buffer.";
+ return false;
+ }
+
+ /* Log if there is no matching embedded data buffer found. */
+ LOG(RPI, Debug) << "Returning bayer frame without a matching embedded buffer.";
+ }
+
+ bayerFrame = std::move(bayerQueue_.front());
+ bayerQueue_.pop();
+
+ return true;
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVc4, "rpi/vc4")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/simple/meson.build b/src/libcamera/pipeline/simple/meson.build
new file mode 100644
index 00000000..dda3de97
--- /dev/null
+++ b/src/libcamera/pipeline/simple/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'simple.cpp',
+])
diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
new file mode 100644
index 00000000..8ac24e6e
--- /dev/null
+++ b/src/libcamera/pipeline/simple/simple.cpp
@@ -0,0 +1,1768 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Laurent Pinchart
+ * Copyright (C) 2019, Martijn Braam
+ *
+ * Pipeline handler for simple pipelines
+ */
+
+#include <algorithm>
+#include <iterator>
+#include <list>
+#include <map>
+#include <memory>
+#include <queue>
+#include <set>
+#include <stdint.h>
+#include <string.h>
+#include <string>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include <linux/media-bus-format.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/request.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/converter.h"
+#include "libcamera/internal/delayed_controls.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/software_isp/software_isp.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(SimplePipeline)
+
+/* -----------------------------------------------------------------------------
+ *
+ * Overview
+ * --------
+ *
+ * The SimplePipelineHandler relies on generic kernel APIs to control a camera
+ * device, without any device-specific code and with limited device-specific
+ * static data.
+ *
+ * To qualify for support by the simple pipeline handler, a device shall
+ *
+ * - be supported by V4L2 drivers, exposing the Media Controller API, the V4L2
+ * subdev APIs and the media bus format-based enumeration extension for the
+ * VIDIOC_ENUM_FMT ioctl ;
+ * - not expose any device-specific API from drivers to userspace ;
+ * - include one or more camera sensor media entities and one or more video
+ * capture devices ;
+ * - have a capture pipeline with linear paths from the camera sensors to the
+ * video capture devices ; and
+ * - have an optional memory-to-memory device to perform format conversion
+ * and/or scaling, exposed as a V4L2 M2M device.
+ *
+ * As devices that require a specific pipeline handler may still match the
+ * above characteristics, the simple pipeline handler doesn't attempt to
+ * automatically determine which devices it can support. It instead relies on
+ * an explicit list of supported devices, provided in the supportedDevices
+ * array.
+ *
+ * When matching a device, the pipeline handler enumerates all camera sensors
+ * and attempts, for each of them, to find a path to a video capture video node.
+ * It does so by using a breadth-first search to find the shortest path from the
+ * sensor device to a valid capture device. This is guaranteed to produce a
+ * valid path on devices with one only option and is a good heuristic on more
+ * complex devices to skip paths that aren't suitable for the simple pipeline
+ * handler. For instance, on the IPU-based i.MX6, the shortest path will skip
+ * encoders and image converters, and it will end in a CSI capture device.
+ * A more complex graph search algorithm could be implemented if a device that
+ * would otherwise be compatible with the pipeline handler isn't correctly
+ * handled by this heuristic.
+ *
+ * Once the camera data instances have been created, the match() function
+ * creates a V4L2VideoDevice or V4L2Subdevice instance for each entity used by
+ * any of the cameras and stores them in SimplePipelineHandler::entities_,
+ * accessible by the SimpleCameraData class through the
+ * SimplePipelineHandler::subdev() and SimplePipelineHandler::video() functions.
+ * This avoids duplication of subdev instances between different cameras when
+ * the same entity is used in multiple paths.
+ *
+ * Finally, all camera data instances are initialized to gather information
+ * about the possible pipeline configurations for the corresponding camera. If
+ * valid pipeline configurations are found, a Camera is registered for the
+ * SimpleCameraData instance.
+ *
+ * Pipeline Traversal
+ * ------------------
+ *
+ * During the breadth-first search, the pipeline is traversed from entity to
+ * entity, by following media graph links from source to sink, starting at the
+ * camera sensor.
+ *
+ * When reaching an entity (on its sink side), if the entity is a V4L2 subdev
+ * that supports the streams API, the subdev internal routes are followed to
+ * find the connected source pads. Otherwise all of the entity's source pads
+ * are considered to continue the graph traversal. The pipeline handler
+ * currently considers the default internal routes only and doesn't attempt to
+ * setup custom routes. This can be extended if needed.
+ *
+ * The shortest path between the camera sensor and a video node is stored in
+ * SimpleCameraData::entities_ as a list of SimpleCameraData::Entity structures,
+ * ordered along the data path from the camera sensor to the video node. The
+ * Entity structure stores a pointer to the MediaEntity, as well as information
+ * about how it is connected in that particular path for later usage when
+ * configuring the pipeline.
+ *
+ * Pipeline Configuration
+ * ----------------------
+ *
+ * The simple pipeline handler configures the pipeline by propagating V4L2
+ * subdev formats from the camera sensor to the video node. The format is first
+ * set on the camera sensor's output, picking a resolution supported by the
+ * sensor that best matches the needs of the requested streams. Then, on every
+ * link in the pipeline, the format is retrieved on the link source and set
+ * unmodified on the link sink.
+ *
+ * The best sensor resolution is selected using a heuristic that tries to
+ * minimize the required bus and memory bandwidth, as the simple pipeline
+ * handler is typically used on smaller, less powerful systems. To avoid the
+ * need to upscale, the pipeline handler picks the smallest sensor resolution
+ * large enough to accommodate the need of all streams. Resolutions that
+ * significantly restrict the field of view are ignored.
+ *
+ * When initializating the camera data, the above format propagation procedure
+ * is repeated for every media bus format and size supported by the camera
+ * sensor. Upon reaching the video node, the pixel formats compatible with the
+ * media bus format are enumerated. Each combination of the input media bus
+ * format, output pixel format and output size are recorded in an instance of
+ * the SimpleCameraData::Configuration structure, stored in the
+ * SimpleCameraData::configs_ vector.
+ *
+ * Format Conversion and Scaling
+ * -----------------------------
+ *
+ * The capture pipeline isn't expected to include a scaler, and if a scaler is
+ * available, it is ignored when configuring the pipeline. However, the simple
+ * pipeline handler supports optional memory-to-memory converters to scale the
+ * image and convert it to a different pixel format. If such a converter is
+ * present, the pipeline handler enumerates, for each pipeline configuration,
+ * the pixel formats and sizes that the converter can produce for the output of
+ * the capture video node, and stores the information in the outputFormats and
+ * outputSizes of the SimpleCameraData::Configuration structure.
+ *
+ * Concurrent Access to Cameras
+ * ----------------------------
+ *
+ * The cameras created by the same pipeline handler instance may share hardware
+ * resources. For instances, a platform may have multiple CSI-2 receivers but a
+ * single DMA engine, prohibiting usage of multiple cameras concurrently. This
+ * depends heavily on the hardware architecture, which the simple pipeline
+ * handler has no a priori knowledge of. The pipeline handler thus implements a
+ * heuristic to handle sharing of hardware resources in a generic fashion.
+ *
+ * Two cameras are considered to be mutually exclusive if they share common
+ * pads along the pipeline from the camera sensor to the video node. An entity
+ * can thus be used concurrently by multiple cameras, as long as pads are
+ * distinct.
+ *
+ * A resource reservation mechanism is implemented by the SimplePipelineHandler
+ * acquirePipeline() and releasePipeline() functions to manage exclusive access
+ * to pads. A camera reserves all the pads present in its pipeline when it is
+ * started, and the start() function returns an error if any of the required
+ * pads is already in use. When the camera is stopped, the pads it has reserved
+ * are released.
+ */
+
+class SimplePipelineHandler;
+
+struct SimplePipelineInfo {
+ const char *driver;
+ /*
+ * Each converter in the list contains the name
+ * and the number of streams it supports.
+ */
+ std::vector<std::pair<const char *, unsigned int>> converters;
+ /*
+ * Using Software ISP is to be enabled per driver.
+ *
+ * The Software ISP can't be used together with the converters.
+ */
+ bool swIspEnabled;
+};
+
+namespace {
+
+static const SimplePipelineInfo supportedDevices[] = {
+ { "dcmipp", {}, false },
+ { "imx7-csi", { { "pxp", 1 } }, false },
+ { "intel-ipu6", {}, true },
+ { "j721e-csi2rx", {}, true },
+ { "mtk-seninf", { { "mtk-mdp", 3 } }, false },
+ { "mxc-isi", {}, false },
+ { "qcom-camss", {}, true },
+ { "sun6i-csi", {}, false },
+};
+
+} /* namespace */
+
+class SimpleCameraData : public Camera::Private
+{
+public:
+ SimpleCameraData(SimplePipelineHandler *pipe,
+ unsigned int numStreams,
+ MediaEntity *sensor);
+
+ bool isValid() const { return sensor_ != nullptr; }
+ SimplePipelineHandler *pipe();
+
+ int init();
+ int setupLinks();
+ int setupFormats(V4L2SubdeviceFormat *format,
+ V4L2Subdevice::Whence whence,
+ Transform transform = Transform::Identity);
+ void imageBufferReady(FrameBuffer *buffer);
+ void clearIncompleteRequests();
+
+ unsigned int streamIndex(const Stream *stream) const
+ {
+ return stream - &streams_.front();
+ }
+
+ struct Entity {
+ /* The media entity, always valid. */
+ MediaEntity *entity;
+ /*
+ * Whether or not the entity is a subdev that supports the
+ * routing API.
+ */
+ bool supportsRouting;
+ /*
+ * The local sink pad connected to the upstream entity, null for
+ * the camera sensor at the beginning of the pipeline.
+ */
+ const MediaPad *sink;
+ /*
+ * The local source pad connected to the downstream entity, null
+ * for the video node at the end of the pipeline.
+ */
+ const MediaPad *source;
+ /*
+ * The link on the source pad, to the downstream entity, null
+ * for the video node at the end of the pipeline.
+ */
+ MediaLink *sourceLink;
+ };
+
+ struct Configuration {
+ uint32_t code;
+ Size sensorSize;
+ PixelFormat captureFormat;
+ Size captureSize;
+ std::vector<PixelFormat> outputFormats;
+ SizeRange outputSizes;
+ };
+
+ std::vector<Stream> streams_;
+
+ /*
+ * All entities in the pipeline, from the camera sensor to the video
+ * node.
+ */
+ std::list<Entity> entities_;
+ std::unique_ptr<CameraSensor> sensor_;
+ V4L2VideoDevice *video_;
+
+ std::vector<Configuration> configs_;
+ std::map<PixelFormat, std::vector<const Configuration *>> formats_;
+
+ std::unique_ptr<DelayedControls> delayedCtrls_;
+
+ std::vector<std::unique_ptr<FrameBuffer>> conversionBuffers_;
+ struct RequestOutputs {
+ Request *request;
+ std::map<const Stream *, FrameBuffer *> outputs;
+ };
+ std::queue<RequestOutputs> conversionQueue_;
+ bool useConversion_;
+
+ std::unique_ptr<Converter> converter_;
+ std::unique_ptr<SoftwareIsp> swIsp_;
+
+private:
+ void tryPipeline(unsigned int code, const Size &size);
+ static std::vector<const MediaPad *> routedSourcePads(MediaPad *sink);
+
+ void conversionInputDone(FrameBuffer *buffer);
+ void conversionOutputDone(FrameBuffer *buffer);
+
+ void ispStatsReady(uint32_t frame, uint32_t bufferId);
+ void setSensorControls(const ControlList &sensorControls);
+};
+
+class SimpleCameraConfiguration : public CameraConfiguration
+{
+public:
+ SimpleCameraConfiguration(Camera *camera, SimpleCameraData *data);
+
+ Status validate() override;
+
+ const SimpleCameraData::Configuration *pipeConfig() const
+ {
+ return pipeConfig_;
+ }
+
+ bool needConversion() const { return needConversion_; }
+ const Transform &combinedTransform() const { return combinedTransform_; }
+
+private:
+ /*
+ * The SimpleCameraData instance is guaranteed to be valid as long as
+ * the corresponding Camera instance is valid. In order to borrow a
+ * reference to the camera data, store a new reference to the camera.
+ */
+ std::shared_ptr<Camera> camera_;
+ SimpleCameraData *data_;
+
+ const SimpleCameraData::Configuration *pipeConfig_;
+ bool needConversion_;
+ Transform combinedTransform_;
+};
+
+class SimplePipelineHandler : public PipelineHandler
+{
+public:
+ SimplePipelineHandler(CameraManager *manager);
+
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+ V4L2VideoDevice *video(const MediaEntity *entity);
+ V4L2Subdevice *subdev(const MediaEntity *entity);
+ MediaDevice *converter() { return converter_; }
+ bool swIspEnabled() const { return swIspEnabled_; }
+
+protected:
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+private:
+ static constexpr unsigned int kNumInternalBuffers = 3;
+
+ struct EntityData {
+ std::unique_ptr<V4L2VideoDevice> video;
+ std::unique_ptr<V4L2Subdevice> subdev;
+ std::map<const MediaPad *, SimpleCameraData *> owners;
+ };
+
+ SimpleCameraData *cameraData(Camera *camera)
+ {
+ return static_cast<SimpleCameraData *>(camera->_d());
+ }
+
+ std::vector<MediaEntity *> locateSensors(MediaDevice *media);
+ static int resetRoutingTable(V4L2Subdevice *subdev);
+
+ const MediaPad *acquirePipeline(SimpleCameraData *data);
+ void releasePipeline(SimpleCameraData *data);
+
+ std::map<const MediaEntity *, EntityData> entities_;
+
+ MediaDevice *converter_;
+ bool swIspEnabled_;
+};
+
+/* -----------------------------------------------------------------------------
+ * Camera Data
+ */
+
+SimpleCameraData::SimpleCameraData(SimplePipelineHandler *pipe,
+ unsigned int numStreams,
+ MediaEntity *sensor)
+ : Camera::Private(pipe), streams_(numStreams)
+{
+ /*
+ * Find the shortest path from the camera sensor to a video capture
+ * device using the breadth-first search algorithm. This heuristic will
+ * be most likely to skip paths that aren't suitable for the simple
+ * pipeline handler on more complex devices, and is guaranteed to
+ * produce a valid path on all devices that have a single option.
+ *
+ * For instance, on the IPU-based i.MX6Q, the shortest path will skip
+ * encoders and image converters, and will end in a CSI capture device.
+ */
+ std::unordered_set<MediaEntity *> visited;
+ std::queue<std::tuple<MediaEntity *, MediaPad *>> queue;
+
+ /* Remember at each entity where we came from. */
+ std::unordered_map<MediaEntity *, Entity> parents;
+ MediaEntity *entity = nullptr;
+ MediaEntity *video = nullptr;
+ MediaPad *sinkPad;
+
+ queue.push({ sensor, nullptr });
+
+ while (!queue.empty()) {
+ std::tie(entity, sinkPad) = queue.front();
+ queue.pop();
+
+ /* Found the capture device. */
+ if (entity->function() == MEDIA_ENT_F_IO_V4L) {
+ LOG(SimplePipeline, Debug)
+ << "Found capture device " << entity->name();
+ video = entity;
+ break;
+ }
+
+ visited.insert(entity);
+
+ /*
+ * Add direct downstream entities to the search queue. If the
+ * current entity supports the subdev internal routing API,
+ * restrict the search to downstream entities reachable through
+ * active routes.
+ */
+
+ std::vector<const MediaPad *> pads;
+ bool supportsRouting = false;
+
+ if (sinkPad) {
+ pads = routedSourcePads(sinkPad);
+ if (!pads.empty())
+ supportsRouting = true;
+ }
+
+ if (pads.empty()) {
+ for (const MediaPad *pad : entity->pads()) {
+ if (!(pad->flags() & MEDIA_PAD_FL_SOURCE))
+ continue;
+ pads.push_back(pad);
+ }
+ }
+
+ for (const MediaPad *pad : pads) {
+ for (MediaLink *link : pad->links()) {
+ MediaEntity *next = link->sink()->entity();
+ if (visited.find(next) == visited.end()) {
+ queue.push({ next, link->sink() });
+
+ Entity e{ entity, supportsRouting, sinkPad, pad, link };
+ parents.insert({ next, e });
+ }
+ }
+ }
+ }
+
+ if (!video)
+ return;
+
+ /*
+ * With the parents, we can follow back our way from the capture device
+ * to the sensor. Store all the entities in the pipeline, from the
+ * camera sensor to the video node, in entities_.
+ */
+ entities_.push_front({ entity, false, sinkPad, nullptr, nullptr });
+
+ for (auto it = parents.find(entity); it != parents.end();
+ it = parents.find(entity)) {
+ const Entity &e = it->second;
+ entities_.push_front(e);
+ entity = e.entity;
+ }
+
+ /* Finally also remember the sensor. */
+ sensor_ = CameraSensorFactoryBase::create(sensor);
+ if (!sensor_)
+ return;
+
+ LOG(SimplePipeline, Debug)
+ << "Found pipeline: "
+ << utils::join(entities_, " -> ",
+ [](const Entity &e) {
+ std::string s = "[";
+ if (e.sink)
+ s += std::to_string(e.sink->index()) + "|";
+ s += e.entity->name();
+ if (e.source)
+ s += "|" + std::to_string(e.source->index());
+ s += "]";
+ return s;
+ });
+}
+
+SimplePipelineHandler *SimpleCameraData::pipe()
+{
+ return static_cast<SimplePipelineHandler *>(Camera::Private::pipe());
+}
+
+int SimpleCameraData::init()
+{
+ SimplePipelineHandler *pipe = SimpleCameraData::pipe();
+ int ret;
+
+ /* Open the converter, if any. */
+ MediaDevice *converter = pipe->converter();
+ if (converter) {
+ converter_ = ConverterFactoryBase::create(converter);
+ if (!converter_) {
+ LOG(SimplePipeline, Warning)
+ << "Failed to create converter, disabling format conversion";
+ converter_.reset();
+ } else {
+ converter_->inputBufferReady.connect(this, &SimpleCameraData::conversionInputDone);
+ converter_->outputBufferReady.connect(this, &SimpleCameraData::conversionOutputDone);
+ }
+ }
+
+ /*
+ * Instantiate Soft ISP if this is enabled for the given driver and no converter is used.
+ */
+ if (!converter_ && pipe->swIspEnabled()) {
+ swIsp_ = std::make_unique<SoftwareIsp>(pipe, sensor_.get(), &controlInfo_);
+ if (!swIsp_->isValid()) {
+ LOG(SimplePipeline, Warning)
+ << "Failed to create software ISP, disabling software debayering";
+ swIsp_.reset();
+ } else {
+ /*
+ * The inputBufferReady signal is emitted from the soft ISP thread,
+ * and needs to be handled in the pipeline handler thread. Signals
+ * implement queued delivery, but this works transparently only if
+ * the receiver is bound to the target thread. As the
+ * SimpleCameraData class doesn't inherit from the Object class, it
+ * is not bound to any thread, and the signal would be delivered
+ * synchronously. Instead, connect the signal to a lambda function
+ * bound explicitly to the pipe, which is bound to the pipeline
+ * handler thread. The function then simply forwards the call to
+ * conversionInputDone().
+ */
+ swIsp_->inputBufferReady.connect(pipe, [this](FrameBuffer *buffer) {
+ this->conversionInputDone(buffer);
+ });
+ swIsp_->outputBufferReady.connect(this, &SimpleCameraData::conversionOutputDone);
+ swIsp_->ispStatsReady.connect(this, &SimpleCameraData::ispStatsReady);
+ swIsp_->setSensorControls.connect(this, &SimpleCameraData::setSensorControls);
+ }
+ }
+
+ video_ = pipe->video(entities_.back().entity);
+ ASSERT(video_);
+
+ /*
+ * Setup links first as some subdev drivers take active links into
+ * account to propagate TRY formats. Such is life :-(
+ */
+ ret = setupLinks();
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Generate the list of possible pipeline configurations by trying each
+ * media bus format and size supported by the sensor.
+ */
+ for (unsigned int code : sensor_->mbusCodes()) {
+ for (const Size &size : sensor_->sizes(code))
+ tryPipeline(code, size);
+ }
+
+ if (configs_.empty()) {
+ LOG(SimplePipeline, Error) << "No valid configuration found";
+ return -EINVAL;
+ }
+
+ /* Map the pixel formats to configurations. */
+ for (const Configuration &config : configs_) {
+ formats_[config.captureFormat].push_back(&config);
+
+ for (PixelFormat fmt : config.outputFormats)
+ formats_[fmt].push_back(&config);
+ }
+
+ properties_ = sensor_->properties();
+
+ return 0;
+}
+
+/*
+ * Generate a list of supported pipeline configurations for a sensor media bus
+ * code and size.
+ *
+ * First propagate the media bus code and size through the pipeline from the
+ * camera sensor to the video node. Then, query the video node for all supported
+ * pixel formats compatible with the media bus code. For each pixel format, store
+ * a full pipeline configuration in the configs_ vector.
+ */
+void SimpleCameraData::tryPipeline(unsigned int code, const Size &size)
+{
+ /*
+ * Propagate the format through the pipeline, and enumerate the
+ * corresponding possible V4L2 pixel formats on the video node.
+ */
+ V4L2SubdeviceFormat format{};
+ format.code = code;
+ format.size = size;
+
+ int ret = setupFormats(&format, V4L2Subdevice::TryFormat);
+ if (ret < 0) {
+ /* Pipeline configuration failed, skip this configuration. */
+ format.code = code;
+ format.size = size;
+ LOG(SimplePipeline, Debug)
+ << "Sensor format " << format
+ << " not supported for this pipeline";
+ return;
+ }
+
+ V4L2VideoDevice::Formats videoFormats = video_->formats(format.code);
+
+ LOG(SimplePipeline, Debug)
+ << "Adding configuration for " << format.size
+ << " in pixel formats [ "
+ << utils::join(videoFormats, ", ",
+ [](const auto &f) {
+ return f.first.toString();
+ })
+ << " ]";
+
+ for (const auto &videoFormat : videoFormats) {
+ PixelFormat pixelFormat = videoFormat.first.toPixelFormat();
+ if (!pixelFormat)
+ continue;
+
+ Configuration config;
+ config.code = code;
+ config.sensorSize = size;
+ config.captureFormat = pixelFormat;
+ config.captureSize = format.size;
+
+ if (converter_) {
+ config.outputFormats = converter_->formats(pixelFormat);
+ config.outputSizes = converter_->sizes(format.size);
+ } else if (swIsp_) {
+ config.outputFormats = swIsp_->formats(pixelFormat);
+ config.outputSizes = swIsp_->sizes(pixelFormat, format.size);
+ if (config.outputFormats.empty()) {
+ /* Do not use swIsp for unsupported pixelFormat's. */
+ config.outputFormats = { pixelFormat };
+ config.outputSizes = config.captureSize;
+ }
+ } else {
+ config.outputFormats = { pixelFormat };
+ config.outputSizes = config.captureSize;
+ }
+
+ configs_.push_back(config);
+ }
+}
+
+int SimpleCameraData::setupLinks()
+{
+ int ret;
+
+ /*
+ * Configure all links along the pipeline. Some entities may not allow
+ * multiple sink links to be enabled together, even on different sink
+ * pads. We must thus start by disabling all sink links (but the one we
+ * want to enable) before enabling the pipeline link.
+ *
+ * The entities_ list stores entities along with their source link. We
+ * need to process the link in the context of the sink entity, so
+ * record the source link of the current entity as the sink link of the
+ * next entity, and skip the first entity in the loop.
+ */
+ MediaLink *sinkLink = nullptr;
+
+ for (SimpleCameraData::Entity &e : entities_) {
+ if (!sinkLink) {
+ sinkLink = e.sourceLink;
+ continue;
+ }
+
+ for (MediaPad *pad : e.entity->pads()) {
+ /*
+ * If the entity supports the V4L2 internal routing API,
+ * assume that it may carry multiple independent streams
+ * concurrently, and only disable links on the sink and
+ * source pads used by the pipeline.
+ */
+ if (e.supportsRouting && pad != e.sink && pad != e.source)
+ continue;
+
+ for (MediaLink *link : pad->links()) {
+ if (link == sinkLink)
+ continue;
+
+ if ((link->flags() & MEDIA_LNK_FL_ENABLED) &&
+ !(link->flags() & MEDIA_LNK_FL_IMMUTABLE)) {
+ ret = link->setEnabled(false);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ }
+
+ if (!(sinkLink->flags() & MEDIA_LNK_FL_ENABLED)) {
+ ret = sinkLink->setEnabled(true);
+ if (ret < 0)
+ return ret;
+ }
+
+ sinkLink = e.sourceLink;
+ }
+
+ return 0;
+}
+
+int SimpleCameraData::setupFormats(V4L2SubdeviceFormat *format,
+ V4L2Subdevice::Whence whence,
+ Transform transform)
+{
+ SimplePipelineHandler *pipe = SimpleCameraData::pipe();
+ int ret;
+
+ /*
+ * Configure the format on the sensor output and propagate it through
+ * the pipeline.
+ */
+ ret = sensor_->setFormat(format, transform);
+ if (ret < 0)
+ return ret;
+
+ for (const Entity &e : entities_) {
+ if (!e.sourceLink)
+ break;
+
+ MediaLink *link = e.sourceLink;
+ MediaPad *source = link->source();
+ MediaPad *sink = link->sink();
+
+ if (source->entity() != sensor_->entity()) {
+ V4L2Subdevice *subdev = pipe->subdev(source->entity());
+ ret = subdev->getFormat(source->index(), format, whence);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (sink->entity()->function() != MEDIA_ENT_F_IO_V4L) {
+ V4L2SubdeviceFormat sourceFormat = *format;
+
+ V4L2Subdevice *subdev = pipe->subdev(sink->entity());
+ ret = subdev->setFormat(sink->index(), format, whence);
+ if (ret < 0)
+ return ret;
+
+ if (format->code != sourceFormat.code ||
+ format->size != sourceFormat.size) {
+ LOG(SimplePipeline, Debug)
+ << "Source '" << source->entity()->name()
+ << "':" << source->index()
+ << " produces " << sourceFormat
+ << ", sink '" << sink->entity()->name()
+ << "':" << sink->index()
+ << " requires " << *format;
+ return -EINVAL;
+ }
+ }
+
+ LOG(SimplePipeline, Debug)
+ << "Link " << *link << ": configured with format "
+ << *format;
+ }
+
+ return 0;
+}
+
+void SimpleCameraData::imageBufferReady(FrameBuffer *buffer)
+{
+ SimplePipelineHandler *pipe = SimpleCameraData::pipe();
+
+ /*
+ * If an error occurred during capture, or if the buffer was cancelled,
+ * complete the request, even if the converter is in use as there's no
+ * point converting an erroneous buffer.
+ */
+ if (buffer->metadata().status != FrameMetadata::FrameSuccess) {
+ if (!useConversion_) {
+ /* No conversion, just complete the request. */
+ Request *request = buffer->request();
+ pipe->completeBuffer(request, buffer);
+ pipe->completeRequest(request);
+ return;
+ }
+
+ /*
+ * The converter or Software ISP is in use. Requeue the internal
+ * buffer for capture (unless the stream is being stopped), and
+ * complete the request with all the user-facing buffers.
+ */
+ if (buffer->metadata().status != FrameMetadata::FrameCancelled)
+ video_->queueBuffer(buffer);
+
+ if (conversionQueue_.empty())
+ return;
+
+ const RequestOutputs &outputs = conversionQueue_.front();
+ for (auto &[stream, buf] : outputs.outputs)
+ pipe->completeBuffer(outputs.request, buf);
+ pipe->completeRequest(outputs.request);
+ conversionQueue_.pop();
+
+ return;
+ }
+
+ /*
+ * Record the sensor's timestamp in the request metadata. The request
+ * needs to be obtained from the user-facing buffer, as internal
+ * buffers are free-wheeling and have no request associated with them.
+ *
+ * \todo The sensor timestamp should be better estimated by connecting
+ * to the V4L2Device::frameStart signal if the platform provides it.
+ */
+ Request *request = buffer->request();
+
+ if (useConversion_ && !conversionQueue_.empty()) {
+ const std::map<const Stream *, FrameBuffer *> &outputs =
+ conversionQueue_.front().outputs;
+ if (!outputs.empty()) {
+ FrameBuffer *outputBuffer = outputs.begin()->second;
+ if (outputBuffer)
+ request = outputBuffer->request();
+ }
+ }
+
+ if (request)
+ request->metadata().set(controls::SensorTimestamp,
+ buffer->metadata().timestamp);
+
+ /*
+ * Queue the captured and the request buffer to the converter or Software
+ * ISP if format conversion is needed. If there's no queued request, just
+ * requeue the captured buffer for capture.
+ */
+ if (useConversion_) {
+ if (conversionQueue_.empty()) {
+ video_->queueBuffer(buffer);
+ return;
+ }
+
+ if (converter_)
+ converter_->queueBuffers(buffer, conversionQueue_.front().outputs);
+ else
+ /*
+ * request->sequence() cannot be retrieved from `buffer' inside
+ * queueBuffers because unique_ptr's make buffer->request() invalid
+ * already here.
+ */
+ swIsp_->queueBuffers(request->sequence(), buffer,
+ conversionQueue_.front().outputs);
+
+ conversionQueue_.pop();
+ return;
+ }
+
+ /* Otherwise simply complete the request. */
+ pipe->completeBuffer(request, buffer);
+ pipe->completeRequest(request);
+}
+
+void SimpleCameraData::clearIncompleteRequests()
+{
+ while (!conversionQueue_.empty()) {
+ pipe()->cancelRequest(conversionQueue_.front().request);
+ conversionQueue_.pop();
+ }
+}
+
+void SimpleCameraData::conversionInputDone(FrameBuffer *buffer)
+{
+ /* Queue the input buffer back for capture. */
+ video_->queueBuffer(buffer);
+}
+
+void SimpleCameraData::conversionOutputDone(FrameBuffer *buffer)
+{
+ SimplePipelineHandler *pipe = SimpleCameraData::pipe();
+
+ /* Complete the buffer and the request. */
+ Request *request = buffer->request();
+ if (pipe->completeBuffer(request, buffer))
+ pipe->completeRequest(request);
+}
+
+void SimpleCameraData::ispStatsReady(uint32_t frame, uint32_t bufferId)
+{
+ swIsp_->processStats(frame, bufferId,
+ delayedCtrls_->get(frame));
+}
+
+void SimpleCameraData::setSensorControls(const ControlList &sensorControls)
+{
+ delayedCtrls_->push(sensorControls);
+ ControlList ctrls(sensorControls);
+ sensor_->setControls(&ctrls);
+}
+
+/* Retrieve all source pads connected to a sink pad through active routes. */
+std::vector<const MediaPad *> SimpleCameraData::routedSourcePads(MediaPad *sink)
+{
+ MediaEntity *entity = sink->entity();
+ std::unique_ptr<V4L2Subdevice> subdev =
+ std::make_unique<V4L2Subdevice>(entity);
+
+ int ret = subdev->open();
+ if (ret < 0)
+ return {};
+
+ V4L2Subdevice::Routing routing = {};
+ ret = subdev->getRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret < 0)
+ return {};
+
+ std::vector<const MediaPad *> pads;
+
+ for (const V4L2Subdevice::Route &route : routing) {
+ if (sink->index() != route.sink.pad ||
+ !(route.flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
+ continue;
+
+ const MediaPad *pad = entity->getPadByIndex(route.source.pad);
+ if (!pad) {
+ LOG(SimplePipeline, Warning)
+ << "Entity " << entity->name()
+ << " has invalid route source pad "
+ << route.source.pad;
+ }
+
+ pads.push_back(pad);
+ }
+
+ return pads;
+}
+
+/* -----------------------------------------------------------------------------
+ * Camera Configuration
+ */
+
+SimpleCameraConfiguration::SimpleCameraConfiguration(Camera *camera,
+ SimpleCameraData *data)
+ : CameraConfiguration(), camera_(camera->shared_from_this()),
+ data_(data), pipeConfig_(nullptr)
+{
+}
+
+namespace {
+
+static Size adjustSize(const Size &requestedSize, const SizeRange &supportedSizes)
+{
+ ASSERT(supportedSizes.min <= supportedSizes.max);
+
+ if (supportedSizes.min == supportedSizes.max)
+ return supportedSizes.max;
+
+ unsigned int hStep = supportedSizes.hStep;
+ unsigned int vStep = supportedSizes.vStep;
+
+ if (hStep == 0)
+ hStep = supportedSizes.max.width - supportedSizes.min.width;
+ if (vStep == 0)
+ vStep = supportedSizes.max.height - supportedSizes.min.height;
+
+ Size adjusted = requestedSize.boundedTo(supportedSizes.max)
+ .expandedTo(supportedSizes.min);
+
+ return adjusted.shrunkBy(supportedSizes.min)
+ .alignedDownTo(hStep, vStep)
+ .grownBy(supportedSizes.min);
+}
+
+} /* namespace */
+
+CameraConfiguration::Status SimpleCameraConfiguration::validate()
+{
+ const CameraSensor *sensor = data_->sensor_.get();
+ Status status = Valid;
+
+ if (config_.empty())
+ return Invalid;
+
+ Orientation requestedOrientation = orientation;
+ combinedTransform_ = sensor->computeTransform(&orientation);
+ if (orientation != requestedOrientation)
+ status = Adjusted;
+
+ /* Cap the number of entries to the available streams. */
+ if (config_.size() > data_->streams_.size()) {
+ config_.resize(data_->streams_.size());
+ status = Adjusted;
+ }
+
+ /* Find the largest stream size. */
+ Size maxStreamSize;
+ for (const StreamConfiguration &cfg : config_)
+ maxStreamSize.expandTo(cfg.size);
+
+ LOG(SimplePipeline, Debug)
+ << "Largest stream size is " << maxStreamSize;
+
+ /*
+ * Find the best configuration for the pipeline using a heuristic.
+ * First select the pixel format based on the streams (which are
+ * considered ordered from highest to lowest priority). Default to the
+ * first pipeline configuration if no streams request a supported pixel
+ * format.
+ */
+ const std::vector<const SimpleCameraData::Configuration *> *configs =
+ &data_->formats_.begin()->second;
+
+ for (const StreamConfiguration &cfg : config_) {
+ auto it = data_->formats_.find(cfg.pixelFormat);
+ if (it != data_->formats_.end()) {
+ configs = &it->second;
+ break;
+ }
+ }
+
+ /*
+ * \todo Pick the best sensor output media bus format when the
+ * requested pixel format can be produced from multiple sensor media
+ * bus formats.
+ */
+
+ /*
+ * Then pick, among the possible configuration for the pixel format,
+ * the smallest sensor resolution that can accommodate all streams
+ * without upscaling.
+ */
+ const SimpleCameraData::Configuration *maxPipeConfig = nullptr;
+ pipeConfig_ = nullptr;
+
+ for (const SimpleCameraData::Configuration *pipeConfig : *configs) {
+ const Size &size = pipeConfig->captureSize;
+
+ if (size.width >= maxStreamSize.width &&
+ size.height >= maxStreamSize.height) {
+ if (!pipeConfig_ || size < pipeConfig_->captureSize)
+ pipeConfig_ = pipeConfig;
+ }
+
+ if (!maxPipeConfig || maxPipeConfig->captureSize < size)
+ maxPipeConfig = pipeConfig;
+ }
+
+ /* If no configuration was large enough, select the largest one. */
+ if (!pipeConfig_)
+ pipeConfig_ = maxPipeConfig;
+
+ LOG(SimplePipeline, Debug)
+ << "Picked "
+ << V4L2SubdeviceFormat{ pipeConfig_->code, pipeConfig_->sensorSize, {} }
+ << " -> " << pipeConfig_->captureSize
+ << "-" << pipeConfig_->captureFormat
+ << " for max stream size " << maxStreamSize;
+
+ /*
+ * Adjust the requested streams.
+ *
+ * Enable usage of the converter when producing multiple streams, as
+ * the video capture device can't capture to multiple buffers.
+ *
+ * It is possible to produce up to one stream without conversion
+ * (provided the format and size match), at the expense of more complex
+ * buffer handling (including allocation of internal buffers to be used
+ * when a request doesn't contain a buffer for the stream that doesn't
+ * require any conversion, similar to raw capture use cases). This is
+ * left as a future improvement.
+ */
+ needConversion_ = config_.size() > 1;
+
+ for (unsigned int i = 0; i < config_.size(); ++i) {
+ StreamConfiguration &cfg = config_[i];
+
+ /* Adjust the pixel format and size. */
+ auto it = std::find(pipeConfig_->outputFormats.begin(),
+ pipeConfig_->outputFormats.end(),
+ cfg.pixelFormat);
+ if (it == pipeConfig_->outputFormats.end())
+ it = pipeConfig_->outputFormats.begin();
+
+ PixelFormat pixelFormat = *it;
+ if (cfg.pixelFormat != pixelFormat) {
+ LOG(SimplePipeline, Debug) << "Adjusting pixel format";
+ cfg.pixelFormat = pixelFormat;
+ status = Adjusted;
+ }
+
+ if (!pipeConfig_->outputSizes.contains(cfg.size)) {
+ Size adjustedSize = pipeConfig_->captureSize;
+ /*
+ * The converter (when present) may not be able to output
+ * a size identical to its input size. The capture size is thus
+ * not guaranteed to be a valid output size. In such cases, use
+ * the smaller valid output size closest to the requested.
+ */
+ if (!pipeConfig_->outputSizes.contains(adjustedSize))
+ adjustedSize = adjustSize(cfg.size, pipeConfig_->outputSizes);
+ LOG(SimplePipeline, Debug)
+ << "Adjusting size from " << cfg.size
+ << " to " << adjustedSize;
+ cfg.size = adjustedSize;
+ status = Adjusted;
+ }
+
+ /* \todo Create a libcamera core class to group format and size */
+ if (cfg.pixelFormat != pipeConfig_->captureFormat ||
+ cfg.size != pipeConfig_->captureSize)
+ needConversion_ = true;
+
+ /* Set the stride, frameSize and bufferCount. */
+ if (needConversion_) {
+ std::tie(cfg.stride, cfg.frameSize) =
+ data_->converter_
+ ? data_->converter_->strideAndFrameSize(cfg.pixelFormat,
+ cfg.size)
+ : data_->swIsp_->strideAndFrameSize(cfg.pixelFormat,
+ cfg.size);
+ if (cfg.stride == 0)
+ return Invalid;
+ } else {
+ V4L2DeviceFormat format;
+ format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
+ format.size = cfg.size;
+
+ int ret = data_->video_->tryFormat(&format);
+ if (ret < 0)
+ return Invalid;
+
+ cfg.stride = format.planes[0].bpl;
+ cfg.frameSize = format.planes[0].size;
+ }
+
+ cfg.bufferCount = 4;
+ }
+
+ return status;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline Handler
+ */
+
+SimplePipelineHandler::SimplePipelineHandler(CameraManager *manager)
+ : PipelineHandler(manager), converter_(nullptr)
+{
+}
+
+std::unique_ptr<CameraConfiguration>
+SimplePipelineHandler::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
+{
+ SimpleCameraData *data = cameraData(camera);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<SimpleCameraConfiguration>(camera, data);
+
+ if (roles.empty())
+ return config;
+
+ /* Create the formats map. */
+ std::map<PixelFormat, std::vector<SizeRange>> formats;
+
+ for (const SimpleCameraData::Configuration &cfg : data->configs_) {
+ for (PixelFormat format : cfg.outputFormats)
+ formats[format].push_back(cfg.outputSizes);
+ }
+
+ /* Sort the sizes and merge any consecutive overlapping ranges. */
+ for (auto &[format, sizes] : formats) {
+ std::sort(sizes.begin(), sizes.end(),
+ [](SizeRange &a, SizeRange &b) {
+ return a.min < b.min;
+ });
+
+ auto cur = sizes.begin();
+ auto next = cur;
+
+ while (++next != sizes.end()) {
+ if (cur->max.width >= next->min.width &&
+ cur->max.height >= next->min.height)
+ cur->max = next->max;
+ else if (++cur != next)
+ *cur = *next;
+ }
+
+ sizes.erase(++cur, sizes.end());
+ }
+
+ /*
+ * Create the stream configurations. Take the first entry in the formats
+ * map as the default, for lack of a better option.
+ *
+ * \todo Implement a better way to pick the default format
+ */
+ for ([[maybe_unused]] StreamRole role : roles) {
+ StreamConfiguration cfg{ StreamFormats{ formats } };
+ cfg.pixelFormat = formats.begin()->first;
+ cfg.size = formats.begin()->second[0].max;
+
+ config->addConfiguration(cfg);
+ }
+
+ config->validate();
+
+ return config;
+}
+
+int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c)
+{
+ SimpleCameraConfiguration *config =
+ static_cast<SimpleCameraConfiguration *>(c);
+ SimpleCameraData *data = cameraData(camera);
+ V4L2VideoDevice *video = data->video_;
+ int ret;
+
+ /*
+ * Configure links on the pipeline and propagate formats from the
+ * sensor to the video node.
+ */
+ ret = data->setupLinks();
+ if (ret < 0)
+ return ret;
+
+ const SimpleCameraData::Configuration *pipeConfig = config->pipeConfig();
+ V4L2SubdeviceFormat format{};
+ format.code = pipeConfig->code;
+ format.size = pipeConfig->sensorSize;
+
+ ret = data->setupFormats(&format, V4L2Subdevice::ActiveFormat,
+ config->combinedTransform());
+ if (ret < 0)
+ return ret;
+
+ /* Configure the video node. */
+ V4L2PixelFormat videoFormat = video->toV4L2PixelFormat(pipeConfig->captureFormat);
+
+ V4L2DeviceFormat captureFormat;
+ captureFormat.fourcc = videoFormat;
+ captureFormat.size = pipeConfig->captureSize;
+
+ ret = video->setFormat(&captureFormat);
+ if (ret)
+ return ret;
+
+ if (captureFormat.planesCount != 1) {
+ LOG(SimplePipeline, Error)
+ << "Planar formats using non-contiguous memory not supported";
+ return -EINVAL;
+ }
+
+ if (captureFormat.fourcc != videoFormat ||
+ captureFormat.size != pipeConfig->captureSize) {
+ LOG(SimplePipeline, Error)
+ << "Unable to configure capture in "
+ << pipeConfig->captureSize << "-" << videoFormat
+ << " (got " << captureFormat << ")";
+ return -EINVAL;
+ }
+
+ /* Configure the converter if needed. */
+ std::vector<std::reference_wrapper<StreamConfiguration>> outputCfgs;
+ data->useConversion_ = config->needConversion();
+
+ for (unsigned int i = 0; i < config->size(); ++i) {
+ StreamConfiguration &cfg = config->at(i);
+
+ cfg.setStream(&data->streams_[i]);
+
+ if (data->useConversion_)
+ outputCfgs.push_back(cfg);
+ }
+
+ if (outputCfgs.empty())
+ return 0;
+
+ const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays();
+ std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
+ { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } },
+ { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } },
+ };
+ data->delayedCtrls_ =
+ std::make_unique<DelayedControls>(data->sensor_->device(),
+ params);
+ data->video_->frameStart.connect(data->delayedCtrls_.get(),
+ &DelayedControls::applyControls);
+
+ StreamConfiguration inputCfg;
+ inputCfg.pixelFormat = pipeConfig->captureFormat;
+ inputCfg.size = pipeConfig->captureSize;
+ inputCfg.stride = captureFormat.planes[0].bpl;
+ inputCfg.bufferCount = kNumInternalBuffers;
+
+ if (data->converter_) {
+ return data->converter_->configure(inputCfg, outputCfgs);
+ } else {
+ ipa::soft::IPAConfigInfo configInfo;
+ configInfo.sensorControls = data->sensor_->controls();
+ return data->swIsp_->configure(inputCfg, outputCfgs, configInfo);
+ }
+}
+
+int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ SimpleCameraData *data = cameraData(camera);
+ unsigned int count = stream->configuration().bufferCount;
+
+ /*
+ * Export buffers on the converter or capture video node, depending on
+ * whether the converter is used or not.
+ */
+ if (data->useConversion_)
+ return data->converter_
+ ? data->converter_->exportBuffers(stream, count, buffers)
+ : data->swIsp_->exportBuffers(stream, count, buffers);
+ else
+ return data->video_->exportBuffers(count, buffers);
+}
+
+int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
+{
+ SimpleCameraData *data = cameraData(camera);
+ V4L2VideoDevice *video = data->video_;
+ int ret;
+
+ const MediaPad *pad = acquirePipeline(data);
+ if (pad) {
+ LOG(SimplePipeline, Info)
+ << "Failed to acquire pipeline, entity "
+ << pad->entity()->name() << " in use";
+ return -EBUSY;
+ }
+
+ if (data->useConversion_) {
+ /*
+ * When using the converter allocate a fixed number of internal
+ * buffers.
+ */
+ ret = video->allocateBuffers(kNumInternalBuffers,
+ &data->conversionBuffers_);
+ } else {
+ /* Otherwise, prepare for using buffers from the only stream. */
+ Stream *stream = &data->streams_[0];
+ ret = video->importBuffers(stream->configuration().bufferCount);
+ }
+ if (ret < 0) {
+ releasePipeline(data);
+ return ret;
+ }
+
+ video->bufferReady.connect(data, &SimpleCameraData::imageBufferReady);
+
+ ret = video->streamOn();
+ if (ret < 0) {
+ stop(camera);
+ return ret;
+ }
+
+ if (data->useConversion_) {
+ if (data->converter_)
+ ret = data->converter_->start();
+ else if (data->swIsp_)
+ ret = data->swIsp_->start();
+ else
+ ret = 0;
+
+ if (ret < 0) {
+ stop(camera);
+ return ret;
+ }
+
+ /* Queue all internal buffers for capture. */
+ for (std::unique_ptr<FrameBuffer> &buffer : data->conversionBuffers_)
+ video->queueBuffer(buffer.get());
+ }
+
+ return 0;
+}
+
+void SimplePipelineHandler::stopDevice(Camera *camera)
+{
+ SimpleCameraData *data = cameraData(camera);
+ V4L2VideoDevice *video = data->video_;
+
+ if (data->useConversion_) {
+ if (data->converter_)
+ data->converter_->stop();
+ else if (data->swIsp_)
+ data->swIsp_->stop();
+ }
+
+ video->streamOff();
+ video->releaseBuffers();
+
+ video->bufferReady.disconnect(data, &SimpleCameraData::imageBufferReady);
+
+ data->clearIncompleteRequests();
+ data->conversionBuffers_.clear();
+
+ releasePipeline(data);
+}
+
+int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
+{
+ SimpleCameraData *data = cameraData(camera);
+ int ret;
+
+ std::map<const Stream *, FrameBuffer *> buffers;
+
+ for (auto &[stream, buffer] : request->buffers()) {
+ /*
+ * If conversion is needed, push the buffer to the converter
+ * queue, it will be handed to the converter in the capture
+ * completion handler.
+ */
+ if (data->useConversion_) {
+ buffers.emplace(stream, buffer);
+ } else {
+ ret = data->video_->queueBuffer(buffer);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ if (data->useConversion_) {
+ data->conversionQueue_.push({ request, std::move(buffers) });
+ if (data->swIsp_)
+ data->swIsp_->queueRequest(request->sequence(), request->controls());
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Match and Setup
+ */
+
+std::vector<MediaEntity *>
+SimplePipelineHandler::locateSensors(MediaDevice *media)
+{
+ std::vector<MediaEntity *> entities;
+
+ /*
+ * Gather all the camera sensor entities based on the function they
+ * expose.
+ */
+ for (MediaEntity *entity : media->entities()) {
+ if (entity->function() == MEDIA_ENT_F_CAM_SENSOR)
+ entities.push_back(entity);
+ }
+
+ if (entities.empty())
+ return {};
+
+ /*
+ * Sensors can be made of multiple entities. For instance, a raw sensor
+ * can be connected to an ISP, and the combination of both should be
+ * treated as one sensor. To support this, as a crude heuristic, check
+ * the downstream entity from the camera sensor, and if it is an ISP,
+ * use it instead of the sensor.
+ */
+ std::vector<MediaEntity *> sensors;
+
+ for (MediaEntity *entity : entities) {
+ /*
+ * Locate the downstream entity by following the first link
+ * from a source pad.
+ */
+ const MediaLink *link = nullptr;
+
+ for (const MediaPad *pad : entity->pads()) {
+ if ((pad->flags() & MEDIA_PAD_FL_SOURCE) &&
+ !pad->links().empty()) {
+ link = pad->links()[0];
+ break;
+ }
+ }
+
+ if (!link)
+ continue;
+
+ MediaEntity *remote = link->sink()->entity();
+ if (remote->function() == MEDIA_ENT_F_PROC_VIDEO_ISP)
+ sensors.push_back(remote);
+ else
+ sensors.push_back(entity);
+ }
+
+ /*
+ * Remove duplicates, in case multiple sensors are connected to the
+ * same ISP.
+ */
+ std::sort(sensors.begin(), sensors.end());
+ auto last = std::unique(sensors.begin(), sensors.end());
+ sensors.erase(last, sensors.end());
+
+ return sensors;
+}
+
+int SimplePipelineHandler::resetRoutingTable(V4L2Subdevice *subdev)
+{
+ /* Reset the media entity routing table to its default state. */
+ V4L2Subdevice::Routing routing = {};
+
+ int ret = subdev->getRouting(&routing, V4L2Subdevice::TryFormat);
+ if (ret)
+ return ret;
+
+ ret = subdev->setRouting(&routing, V4L2Subdevice::ActiveFormat);
+ if (ret)
+ return ret;
+
+ /*
+ * If the routing table is empty we won't be able to meaningfully use
+ * the subdev.
+ */
+ if (routing.empty()) {
+ LOG(SimplePipeline, Error)
+ << "Default routing table of " << subdev->deviceNode()
+ << " is empty";
+ return -EINVAL;
+ }
+
+ LOG(SimplePipeline, Debug)
+ << "Routing table of " << subdev->deviceNode()
+ << " reset to " << routing;
+
+ return 0;
+}
+
+bool SimplePipelineHandler::match(DeviceEnumerator *enumerator)
+{
+ const SimplePipelineInfo *info = nullptr;
+ unsigned int numStreams = 1;
+ MediaDevice *media;
+
+ for (const SimplePipelineInfo &inf : supportedDevices) {
+ DeviceMatch dm(inf.driver);
+ media = acquireMediaDevice(enumerator, dm);
+ if (media) {
+ info = &inf;
+ break;
+ }
+ }
+
+ if (!media)
+ return false;
+
+ for (const auto &[name, streams] : info->converters) {
+ DeviceMatch converterMatch(name);
+ converter_ = acquireMediaDevice(enumerator, converterMatch);
+ if (converter_) {
+ numStreams = streams;
+ break;
+ }
+ }
+
+ swIspEnabled_ = info->swIspEnabled;
+
+ /* Locate the sensors. */
+ std::vector<MediaEntity *> sensors = locateSensors(media);
+ if (sensors.empty()) {
+ LOG(SimplePipeline, Info) << "No sensor found for " << media->deviceNode();
+ return false;
+ }
+
+ LOG(SimplePipeline, Debug) << "Sensor found for " << media->deviceNode();
+
+ /*
+ * Create one camera data instance for each sensor and gather all
+ * entities in all pipelines.
+ */
+ std::vector<std::unique_ptr<SimpleCameraData>> pipelines;
+ std::set<MediaEntity *> entities;
+
+ pipelines.reserve(sensors.size());
+
+ for (MediaEntity *sensor : sensors) {
+ std::unique_ptr<SimpleCameraData> data =
+ std::make_unique<SimpleCameraData>(this, numStreams, sensor);
+ if (!data->isValid()) {
+ LOG(SimplePipeline, Error)
+ << "No valid pipeline for sensor '"
+ << sensor->name() << "', skipping";
+ continue;
+ }
+
+ for (SimpleCameraData::Entity &entity : data->entities_)
+ entities.insert(entity.entity);
+
+ pipelines.push_back(std::move(data));
+ }
+
+ if (entities.empty())
+ return false;
+
+ /*
+ * Insert all entities in the global entities list. Create and open
+ * V4L2VideoDevice and V4L2Subdevice instances for the corresponding
+ * entities.
+ */
+ for (MediaEntity *entity : entities) {
+ std::unique_ptr<V4L2VideoDevice> video;
+ std::unique_ptr<V4L2Subdevice> subdev;
+ int ret;
+
+ switch (entity->type()) {
+ case MediaEntity::Type::V4L2VideoDevice:
+ video = std::make_unique<V4L2VideoDevice>(entity);
+ ret = video->open();
+ if (ret < 0) {
+ LOG(SimplePipeline, Error)
+ << "Failed to open " << video->deviceNode()
+ << ": " << strerror(-ret);
+ return false;
+ }
+ break;
+
+ case MediaEntity::Type::V4L2Subdevice:
+ subdev = std::make_unique<V4L2Subdevice>(entity);
+ ret = subdev->open();
+ if (ret < 0) {
+ LOG(SimplePipeline, Error)
+ << "Failed to open " << subdev->deviceNode()
+ << ": " << strerror(-ret);
+ return false;
+ }
+
+ if (subdev->caps().hasStreams()) {
+ /*
+ * Reset the routing table to its default state
+ * to make sure entities are enumerated according
+ * to the default routing configuration.
+ */
+ ret = resetRoutingTable(subdev.get());
+ if (ret) {
+ LOG(SimplePipeline, Error)
+ << "Failed to reset routes for "
+ << subdev->deviceNode() << ": "
+ << strerror(-ret);
+ return false;
+ }
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ entities_[entity] = { std::move(video), std::move(subdev), {} };
+ }
+
+ /* Initialize each pipeline and register a corresponding camera. */
+ bool registered = false;
+
+ for (std::unique_ptr<SimpleCameraData> &data : pipelines) {
+ int ret = data->init();
+ if (ret < 0)
+ continue;
+
+ std::set<Stream *> streams;
+ std::transform(data->streams_.begin(), data->streams_.end(),
+ std::inserter(streams, streams.end()),
+ [](Stream &stream) { return &stream; });
+
+ const std::string &id = data->sensor_->id();
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(data), id, streams);
+ registerCamera(std::move(camera));
+ registered = true;
+ }
+
+ return registered;
+}
+
+V4L2VideoDevice *SimplePipelineHandler::video(const MediaEntity *entity)
+{
+ auto iter = entities_.find(entity);
+ if (iter == entities_.end())
+ return nullptr;
+
+ return iter->second.video.get();
+}
+
+V4L2Subdevice *SimplePipelineHandler::subdev(const MediaEntity *entity)
+{
+ auto iter = entities_.find(entity);
+ if (iter == entities_.end())
+ return nullptr;
+
+ return iter->second.subdev.get();
+}
+
+/**
+ * \brief Acquire all resources needed by the camera pipeline
+ * \return nullptr on success, a pointer to the contended pad on error
+ */
+const MediaPad *SimplePipelineHandler::acquirePipeline(SimpleCameraData *data)
+{
+ for (const SimpleCameraData::Entity &entity : data->entities_) {
+ const EntityData &edata = entities_[entity.entity];
+
+ if (entity.sink) {
+ auto iter = edata.owners.find(entity.sink);
+ if (iter != edata.owners.end() && iter->second != data)
+ return entity.sink;
+ }
+
+ if (entity.source) {
+ auto iter = edata.owners.find(entity.source);
+ if (iter != edata.owners.end() && iter->second != data)
+ return entity.source;
+ }
+ }
+
+ for (const SimpleCameraData::Entity &entity : data->entities_) {
+ EntityData &edata = entities_[entity.entity];
+
+ if (entity.sink)
+ edata.owners[entity.sink] = data;
+ if (entity.source)
+ edata.owners[entity.source] = data;
+ }
+
+ return nullptr;
+}
+
+void SimplePipelineHandler::releasePipeline(SimpleCameraData *data)
+{
+ for (const SimpleCameraData::Entity &entity : data->entities_) {
+ EntityData &edata = entities_[entity.entity];
+
+ if (entity.sink) {
+ auto iter = edata.owners.find(entity.sink);
+ ASSERT(iter->second == data);
+ edata.owners.erase(iter);
+ }
+
+ if (entity.source) {
+ auto iter = edata.owners.find(entity.source);
+ ASSERT(iter->second == data);
+ edata.owners.erase(iter);
+ }
+ }
+}
+
+REGISTER_PIPELINE_HANDLER(SimplePipelineHandler, "simple")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/uvcvideo/meson.build b/src/libcamera/pipeline/uvcvideo/meson.build
index c19ae238..a3a91074 100644
--- a/src/libcamera/pipeline/uvcvideo/meson.build
+++ b/src/libcamera/pipeline/uvcvideo/meson.build
@@ -1,3 +1,5 @@
-libcamera_sources += files([
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
'uvcvideo.cpp',
])
diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
index ffbddf27..8c2c6baf 100644
--- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
+++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
@@ -2,58 +2,75 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * uvcvideo.cpp - Pipeline handler for uvcvideo devices
+ * Pipeline handler for uvcvideo devices
*/
#include <algorithm>
-#include <iomanip>
-#include <sys/sysmacros.h>
-#include <tuple>
+#include <cmath>
+#include <fstream>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/mutex.h>
+#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
#include <libcamera/control_ids.h>
#include <libcamera/controls.h>
+#include <libcamera/property_ids.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
-#include "device_enumerator.h"
-#include "log.h"
-#include "media_device.h"
-#include "pipeline_handler.h"
-#include "utils.h"
-#include "v4l2_controls.h"
-#include "v4l2_videodevice.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/sysfs.h"
+#include "libcamera/internal/v4l2_videodevice.h"
namespace libcamera {
LOG_DEFINE_CATEGORY(UVC)
-class UVCCameraData : public CameraData
+class UVCCameraData : public Camera::Private
{
public:
UVCCameraData(PipelineHandler *pipe)
- : CameraData(pipe), video_(nullptr)
+ : Camera::Private(pipe)
{
}
- ~UVCCameraData()
- {
- delete video_;
- }
+ int init(MediaDevice *media);
+ void addControl(uint32_t cid, const ControlInfo &v4l2info,
+ ControlInfoMap::Map *ctrls);
+ void imageBufferReady(FrameBuffer *buffer);
- int init(MediaEntity *entity);
- void bufferReady(FrameBuffer *buffer);
+ const std::string &id() const { return id_; }
- V4L2VideoDevice *video_;
+ Mutex openLock_;
+ std::unique_ptr<V4L2VideoDevice> video_;
Stream stream_;
+ std::map<PixelFormat, std::vector<SizeRange>> formats_;
+
+private:
+ bool generateId();
+
+ std::string id_;
};
class UVCCameraConfiguration : public CameraConfiguration
{
public:
- UVCCameraConfiguration();
+ UVCCameraConfiguration(UVCCameraData *data);
Status validate() override;
+
+private:
+ UVCCameraData *data_;
};
class PipelineHandlerUVC : public PipelineHandler
@@ -61,32 +78,36 @@ class PipelineHandlerUVC : public PipelineHandler
public:
PipelineHandlerUVC(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
- int start(Camera *camera) override;
- void stop(Camera *camera) override;
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override;
bool match(DeviceEnumerator *enumerator) override;
private:
+ int processControl(ControlList *controls, unsigned int id,
+ const ControlValue &value);
int processControls(UVCCameraData *data, Request *request);
- UVCCameraData *cameraData(const Camera *camera)
+ bool acquireDevice(Camera *camera) override;
+ void releaseDevice(Camera *camera) override;
+
+ UVCCameraData *cameraData(Camera *camera)
{
- return static_cast<UVCCameraData *>(
- PipelineHandler::cameraData(camera));
+ return static_cast<UVCCameraData *>(camera->_d());
}
};
-UVCCameraConfiguration::UVCCameraConfiguration()
- : CameraConfiguration()
+UVCCameraConfiguration::UVCCameraConfiguration(UVCCameraData *data)
+ : CameraConfiguration(), data_(data)
{
}
@@ -97,6 +118,11 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
if (config_.empty())
return Invalid;
+ if (orientation != Orientation::Rotate0) {
+ orientation = Orientation::Rotate0;
+ status = Adjusted;
+ }
+
/* Cap the number of entries to the available streams. */
if (config_.size() > 1) {
config_.resize(1);
@@ -113,9 +139,8 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
if (iter == pixelFormats.end()) {
cfg.pixelFormat = pixelFormats.front();
LOG(UVC, Debug)
- << "Adjusting pixel format from "
- << pixelFormat.toString() << " to "
- << cfg.pixelFormat.toString();
+ << "Adjusting pixel format from " << pixelFormat
+ << " to " << cfg.pixelFormat;
status = Adjusted;
}
@@ -130,13 +155,48 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
if (cfg.size != size) {
LOG(UVC, Debug)
- << "Adjusting size from " << size.toString()
- << " to " << cfg.size.toString();
+ << "Adjusting size from " << size << " to " << cfg.size;
status = Adjusted;
}
cfg.bufferCount = 4;
+ V4L2DeviceFormat format;
+ format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
+ format.size = cfg.size;
+
+ /*
+ * For power-consumption reasons video_ is closed when the camera is not
+ * acquired. Open it here if necessary.
+ */
+ {
+ bool opened = false;
+
+ MutexLocker locker(data_->openLock_);
+
+ if (!data_->video_->isOpen()) {
+ int ret = data_->video_->open();
+ if (ret)
+ return Invalid;
+
+ opened = true;
+ }
+
+ int ret = data_->video_->tryFormat(&format);
+ if (opened)
+ data_->video_->close();
+ if (ret)
+ return Invalid;
+ }
+
+ cfg.stride = format.planes[0].bpl;
+ cfg.frameSize = format.planes[0].size;
+
+ if (cfg.colorSpace != format.colorSpace) {
+ cfg.colorSpace = format.colorSpace;
+ status = Adjusted;
+ }
+
return status;
}
@@ -145,28 +205,18 @@ PipelineHandlerUVC::PipelineHandlerUVC(CameraManager *manager)
{
}
-CameraConfiguration *PipelineHandlerUVC::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerUVC::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
{
UVCCameraData *data = cameraData(camera);
- CameraConfiguration *config = new UVCCameraConfiguration();
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<UVCCameraConfiguration>(data);
if (roles.empty())
return config;
- std::map<V4L2PixelFormat, std::vector<SizeRange>> v4l2Formats =
- data->video_->formats();
- std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
- std::transform(v4l2Formats.begin(), v4l2Formats.end(),
- std::inserter(deviceFormats, deviceFormats.begin()),
- [&](const decltype(v4l2Formats)::value_type &format) {
- return decltype(deviceFormats)::value_type{
- data->video_->toPixelFormat(format.first),
- format.second
- };
- });
-
- StreamFormats formats(deviceFormats);
+ StreamFormats formats(data->formats_);
StreamConfiguration cfg(formats);
cfg.pixelFormat = formats.pixelformats().front();
@@ -186,7 +236,7 @@ int PipelineHandlerUVC::configure(Camera *camera, CameraConfiguration *config)
StreamConfiguration &cfg = config->at(0);
int ret;
- V4L2DeviceFormat format = {};
+ V4L2DeviceFormat format;
format.fourcc = data->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
@@ -212,7 +262,7 @@ int PipelineHandlerUVC::exportFrameBuffers(Camera *camera, Stream *stream,
return data->video_->exportBuffers(count, buffers);
}
-int PipelineHandlerUVC::start(Camera *camera)
+int PipelineHandlerUVC::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
{
UVCCameraData *data = cameraData(camera);
unsigned int count = data->stream_.configuration().bufferCount;
@@ -230,35 +280,101 @@ int PipelineHandlerUVC::start(Camera *camera)
return 0;
}
-void PipelineHandlerUVC::stop(Camera *camera)
+void PipelineHandlerUVC::stopDevice(Camera *camera)
{
UVCCameraData *data = cameraData(camera);
data->video_->streamOff();
data->video_->releaseBuffers();
}
-int PipelineHandlerUVC::processControls(UVCCameraData *data, Request *request)
+int PipelineHandlerUVC::processControl(ControlList *controls, unsigned int id,
+ const ControlValue &value)
{
- ControlList controls(data->video_->controls());
+ uint32_t cid;
+
+ if (id == controls::Brightness)
+ cid = V4L2_CID_BRIGHTNESS;
+ else if (id == controls::Contrast)
+ cid = V4L2_CID_CONTRAST;
+ else if (id == controls::Saturation)
+ cid = V4L2_CID_SATURATION;
+ else if (id == controls::AeEnable)
+ cid = V4L2_CID_EXPOSURE_AUTO;
+ else if (id == controls::ExposureTime)
+ cid = V4L2_CID_EXPOSURE_ABSOLUTE;
+ else if (id == controls::AnalogueGain)
+ cid = V4L2_CID_GAIN;
+ else
+ return -EINVAL;
+
+ const ControlInfo &v4l2Info = controls->infoMap()->at(cid);
+ int32_t min = v4l2Info.min().get<int32_t>();
+ int32_t def = v4l2Info.def().get<int32_t>();
+ int32_t max = v4l2Info.max().get<int32_t>();
+
+ /*
+ * See UVCCameraData::addControl() for explanations of the different
+ * value mappings.
+ */
+ switch (cid) {
+ case V4L2_CID_BRIGHTNESS: {
+ float scale = std::max(max - def, def - min);
+ float fvalue = value.get<float>() * scale + def;
+ controls->set(cid, static_cast<int32_t>(std::lround(fvalue)));
+ break;
+ }
+
+ case V4L2_CID_SATURATION: {
+ float scale = def - min;
+ float fvalue = value.get<float>() * scale + min;
+ controls->set(cid, static_cast<int32_t>(std::lround(fvalue)));
+ break;
+ }
- for (auto it : request->controls()) {
- unsigned int id = it.first;
- ControlValue &value = it.second;
-
- if (id == controls::Brightness) {
- controls.set(V4L2_CID_BRIGHTNESS, value);
- } else if (id == controls::Contrast) {
- controls.set(V4L2_CID_CONTRAST, value);
- } else if (id == controls::Saturation) {
- controls.set(V4L2_CID_SATURATION, value);
- } else if (id == controls::ManualExposure) {
- controls.set(V4L2_CID_EXPOSURE_AUTO, static_cast<int32_t>(1));
- controls.set(V4L2_CID_EXPOSURE_ABSOLUTE, value);
- } else if (id == controls::ManualGain) {
- controls.set(V4L2_CID_GAIN, value);
+ case V4L2_CID_EXPOSURE_AUTO: {
+ int32_t ivalue = value.get<bool>()
+ ? V4L2_EXPOSURE_APERTURE_PRIORITY
+ : V4L2_EXPOSURE_MANUAL;
+ controls->set(V4L2_CID_EXPOSURE_AUTO, ivalue);
+ break;
+ }
+
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ controls->set(cid, value.get<int32_t>() / 100);
+ break;
+
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_GAIN: {
+ float m = (4.0f - 1.0f) / (max - def);
+ float p = 1.0f - m * def;
+
+ if (m * min + p < 0.5f) {
+ m = (1.0f - 0.5f) / (def - min);
+ p = 1.0f - m * def;
}
+
+ float fvalue = (value.get<float>() - p) / m;
+ controls->set(cid, static_cast<int32_t>(std::lround(fvalue)));
+ break;
+ }
+
+ default: {
+ int32_t ivalue = value.get<int32_t>();
+ controls->set(cid, ivalue);
+ break;
+ }
}
+ return 0;
+}
+
+int PipelineHandlerUVC::processControls(UVCCameraData *data, Request *request)
+{
+ ControlList controls(data->video_->controls());
+
+ for (const auto &[id, value] : request->controls())
+ processControl(&controls, id, value);
+
for (const auto &ctrl : controls)
LOG(UVC, Debug)
<< "Setting control " << utils::hex(ctrl.first)
@@ -306,26 +422,15 @@ bool PipelineHandlerUVC::match(DeviceEnumerator *enumerator)
std::unique_ptr<UVCCameraData> data = std::make_unique<UVCCameraData>(this);
- /* Locate and initialise the camera data with the default video node. */
- const std::vector<MediaEntity *> &entities = media->entities();
- auto entity = std::find_if(entities.begin(), entities.end(),
- [](MediaEntity *entity) {
- return entity->flags() & MEDIA_ENT_FL_DEFAULT;
- });
- if (entity == entities.end()) {
- LOG(UVC, Error) << "Could not find a default video device";
+ if (data->init(media))
return false;
- }
-
- if (data->init(*entity))
- return false;
-
- dev_t devnum = makedev((*entity)->deviceMajor(), (*entity)->deviceMinor());
/* Create and register the camera. */
+ std::string id = data->id();
std::set<Stream *> streams{ &data->stream_ };
- std::shared_ptr<Camera> camera = Camera::create(this, media->model(), streams);
- registerCamera(std::move(camera), std::move(data), devnum);
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(data), id, streams);
+ registerCamera(std::move(camera));
/* Enable hot-unplug notifications. */
hotplugMediaDevice(media);
@@ -333,62 +438,327 @@ bool PipelineHandlerUVC::match(DeviceEnumerator *enumerator)
return true;
}
-int UVCCameraData::init(MediaEntity *entity)
+bool PipelineHandlerUVC::acquireDevice(Camera *camera)
+{
+ UVCCameraData *data = cameraData(camera);
+
+ MutexLocker locker(data->openLock_);
+
+ return data->video_->open() == 0;
+}
+
+void PipelineHandlerUVC::releaseDevice(Camera *camera)
+{
+ UVCCameraData *data = cameraData(camera);
+
+ MutexLocker locker(data->openLock_);
+ data->video_->close();
+}
+
+int UVCCameraData::init(MediaDevice *media)
{
int ret;
+ /* Locate and initialise the camera data with the default video node. */
+ const std::vector<MediaEntity *> &entities = media->entities();
+ auto entity = std::find_if(entities.begin(), entities.end(),
+ [](MediaEntity *e) {
+ return e->flags() & MEDIA_ENT_FL_DEFAULT;
+ });
+ if (entity == entities.end()) {
+ LOG(UVC, Error) << "Could not find a default video device";
+ return -ENODEV;
+ }
+
/* Create and open the video device. */
- video_ = new V4L2VideoDevice(entity);
+ video_ = std::make_unique<V4L2VideoDevice>(*entity);
ret = video_->open();
if (ret)
return ret;
- video_->bufferReady.connect(this, &UVCCameraData::bufferReady);
+ video_->bufferReady.connect(this, &UVCCameraData::imageBufferReady);
+
+ /* Generate the camera ID. */
+ if (!generateId()) {
+ LOG(UVC, Error) << "Failed to generate camera ID";
+ return -EINVAL;
+ }
+
+ /*
+ * Populate the map of supported formats, and infer the camera sensor
+ * resolution from the largest size it advertises.
+ */
+ Size resolution;
+ for (const auto &format : video_->formats()) {
+ PixelFormat pixelFormat = format.first.toPixelFormat();
+ if (!pixelFormat.isValid())
+ continue;
+
+ formats_[pixelFormat] = format.second;
+
+ const std::vector<SizeRange> &sizeRanges = format.second;
+ for (const SizeRange &sizeRange : sizeRanges) {
+ if (sizeRange.max > resolution)
+ resolution = sizeRange.max;
+ }
+ }
+
+ if (formats_.empty()) {
+ LOG(UVC, Error)
+ << "Camera " << id_ << " (" << media->model()
+ << ") doesn't expose any supported format";
+ return -EINVAL;
+ }
+
+ /* Populate the camera properties. */
+ properties_.set(properties::Model, utils::toAscii(media->model()));
+
+ /*
+ * Derive the location from the device removable attribute in sysfs.
+ * Non-removable devices are assumed to be front as we lack detailed
+ * location information, and removable device are considered external.
+ *
+ * The sysfs removable attribute is derived from the ACPI _UPC attribute
+ * if available, or from the USB hub descriptors otherwise. ACPI data
+ * may not be very reliable, and the USB hub descriptors may not be
+ * accurate on DT-based platforms. A heuristic may need to be
+ * implemented later if too many devices end up being miscategorized.
+ *
+ * \todo Find a way to tell front and back devices apart. This could
+ * come from the ACPI _PLD, but that may be even more unreliable than
+ * the _UPC.
+ */
+ properties::LocationEnum location = properties::CameraLocationExternal;
+ std::ifstream file(video_->devicePath() + "/../removable");
+ if (file.is_open()) {
+ std::string value;
+ std::getline(file, value);
+ file.close();
+
+ if (value == "fixed")
+ location = properties::CameraLocationFront;
+ }
+
+ properties_.set(properties::Location, location);
+
+ properties_.set(properties::PixelArraySize, resolution);
+ properties_.set(properties::PixelArrayActiveAreas, { Rectangle(resolution) });
/* Initialise the supported controls. */
- const ControlInfoMap &controls = video_->controls();
ControlInfoMap::Map ctrls;
- for (const auto &ctrl : controls) {
+ for (const auto &ctrl : video_->controls()) {
+ uint32_t cid = ctrl.first->id();
const ControlInfo &info = ctrl.second;
- const ControlId *id;
- switch (ctrl.first->id()) {
- case V4L2_CID_BRIGHTNESS:
- id = &controls::Brightness;
- break;
- case V4L2_CID_CONTRAST:
- id = &controls::Contrast;
- break;
- case V4L2_CID_SATURATION:
- id = &controls::Saturation;
- break;
- case V4L2_CID_EXPOSURE_ABSOLUTE:
- id = &controls::ManualExposure;
- break;
- case V4L2_CID_GAIN:
- id = &controls::ManualGain;
+ addControl(cid, info, &ctrls);
+ }
+
+ controlInfo_ = ControlInfoMap(std::move(ctrls), controls::controls);
+
+ /*
+ * Close to allow camera to go into runtime-suspend, video_ will be
+ * re-opened from acquireDevice() and validate().
+ */
+ video_->close();
+
+ return 0;
+}
+
+bool UVCCameraData::generateId()
+{
+ const std::string path = video_->devicePath();
+
+ /* Create a controller ID from first device described in firmware. */
+ std::string controllerId;
+ std::string searchPath = path;
+ while (true) {
+ std::string::size_type pos = searchPath.rfind('/');
+ if (pos <= 1) {
+ LOG(UVC, Error) << "Can not find controller ID";
+ return false;
+ }
+
+ searchPath = searchPath.substr(0, pos);
+
+ controllerId = sysfs::firmwareNodePath(searchPath);
+ if (!controllerId.empty())
break;
- default:
- continue;
+ }
+
+ /*
+ * Create a USB ID from the device path which has the known format:
+ *
+ * path = bus, "-", ports, ":", config, ".", interface ;
+ * bus = number ;
+ * ports = port, [ ".", ports ] ;
+ * port = number ;
+ * config = number ;
+ * interface = number ;
+ *
+ * Example: 3-2.4:1.0
+ *
+ * The bus is not guaranteed to be stable and needs to be stripped from
+ * the USB ID. The final USB ID is built up of the ports, config and
+ * interface properties.
+ *
+ * Example 2.4:1.0.
+ */
+ std::string usbId = utils::basename(path.c_str());
+ usbId = usbId.substr(usbId.find('-') + 1);
+
+ /* Creata a device ID from the USB devices vendor and product ID. */
+ std::string deviceId;
+ for (const char *name : { "idVendor", "idProduct" }) {
+ std::ifstream file(path + "/../" + name);
+
+ if (!file.is_open())
+ return false;
+
+ std::string value;
+ std::getline(file, value);
+ file.close();
+
+ if (!deviceId.empty())
+ deviceId += ":";
+
+ deviceId += value;
+ }
+
+ id_ = controllerId + "-" + usbId + "-" + deviceId;
+ return true;
+}
+
+void UVCCameraData::addControl(uint32_t cid, const ControlInfo &v4l2Info,
+ ControlInfoMap::Map *ctrls)
+{
+ const ControlId *id;
+ ControlInfo info;
+
+ /* Map the control ID. */
+ switch (cid) {
+ case V4L2_CID_BRIGHTNESS:
+ id = &controls::Brightness;
+ break;
+ case V4L2_CID_CONTRAST:
+ id = &controls::Contrast;
+ break;
+ case V4L2_CID_SATURATION:
+ id = &controls::Saturation;
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ id = &controls::AeEnable;
+ break;
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ id = &controls::ExposureTime;
+ break;
+ case V4L2_CID_GAIN:
+ id = &controls::AnalogueGain;
+ break;
+ default:
+ return;
+ }
+
+ /* Map the control info. */
+ int32_t min = v4l2Info.min().get<int32_t>();
+ int32_t max = v4l2Info.max().get<int32_t>();
+ int32_t def = v4l2Info.def().get<int32_t>();
+
+ switch (cid) {
+ case V4L2_CID_BRIGHTNESS: {
+ /*
+ * The Brightness control is a float, with 0.0 mapped to the
+ * default value. The control range is [-1.0, 1.0], but the V4L2
+ * default may not be in the middle of the V4L2 range.
+ * Accommodate this by restricting the range of the libcamera
+ * control, but always within the maximum limits.
+ */
+ float scale = std::max(max - def, def - min);
+
+ info = ControlInfo{
+ { static_cast<float>(min - def) / scale },
+ { static_cast<float>(max - def) / scale },
+ { 0.0f }
+ };
+ break;
+ }
+
+ case V4L2_CID_SATURATION:
+ /*
+ * The Saturation control is a float, with 0.0 mapped to the
+ * minimum value (corresponding to a fully desaturated image)
+ * and 1.0 mapped to the default value. Calculate the maximum
+ * value accordingly.
+ */
+ info = ControlInfo{
+ { 0.0f },
+ { static_cast<float>(max - min) / (def - min) },
+ { 1.0f }
+ };
+ break;
+
+ case V4L2_CID_EXPOSURE_AUTO:
+ info = ControlInfo{ false, true, true };
+ break;
+
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ /*
+ * ExposureTime is in units of 1 µs, and UVC expects
+ * V4L2_CID_EXPOSURE_ABSOLUTE in units of 100 µs.
+ */
+ info = ControlInfo{
+ { min * 100 },
+ { max * 100 },
+ { def * 100 }
+ };
+ break;
+
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_GAIN: {
+ /*
+ * The Contrast and AnalogueGain controls are floats, with 1.0
+ * mapped to the default value. UVC doesn't specify units, and
+ * cameras have been seen to expose very different ranges for
+ * the controls. Arbitrarily assume that the minimum and
+ * maximum values are respectively no lower than 0.5 and no
+ * higher than 4.0.
+ */
+ float m = (4.0f - 1.0f) / (max - def);
+ float p = 1.0f - m * def;
+
+ if (m * min + p < 0.5f) {
+ m = (1.0f - 0.5f) / (def - min);
+ p = 1.0f - m * def;
}
- ctrls.emplace(id, info);
+ info = ControlInfo{
+ { m * min + p },
+ { m * max + p },
+ { 1.0f }
+ };
+ break;
}
- controlInfo_ = std::move(ctrls);
+ default:
+ info = v4l2Info;
+ break;
+ }
- return 0;
+ ctrls->emplace(id, info);
}
-void UVCCameraData::bufferReady(FrameBuffer *buffer)
+void UVCCameraData::imageBufferReady(FrameBuffer *buffer)
{
Request *request = buffer->request();
- pipe_->completeBuffer(camera_, request, buffer);
- pipe_->completeRequest(camera_, request);
+ /* \todo Use the UVC metadata to calculate a more precise timestamp */
+ request->metadata().set(controls::SensorTimestamp,
+ buffer->metadata().timestamp);
+
+ pipe()->completeBuffer(request, buffer);
+ pipe()->completeRequest(request);
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerUVC);
+REGISTER_PIPELINE_HANDLER(PipelineHandlerUVC, "uvcvideo")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/vimc/meson.build b/src/libcamera/pipeline/vimc/meson.build
index 615ecd20..868e2546 100644
--- a/src/libcamera/pipeline/vimc/meson.build
+++ b/src/libcamera/pipeline/vimc/meson.build
@@ -1,3 +1,5 @@
-libcamera_sources += files([
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
'vimc.cpp',
])
diff --git a/src/libcamera/pipeline/vimc/vimc.cpp b/src/libcamera/pipeline/vimc/vimc.cpp
index b04a9726..07273bd2 100644
--- a/src/libcamera/pipeline/vimc/vimc.cpp
+++ b/src/libcamera/pipeline/vimc/vimc.cpp
@@ -2,74 +2,84 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * vimc.cpp - Pipeline handler for the vimc device
+ * Pipeline handler for the vimc device
*/
#include <algorithm>
-#include <array>
+#include <cmath>
#include <iomanip>
+#include <map>
#include <tuple>
#include <linux/media-bus-format.h>
+#include <linux/version.h>
+
+#include <libcamera/base/flags.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
-#include <ipa/ipa_interface.h>
-#include <ipa/ipa_module_info.h>
#include <libcamera/camera.h>
#include <libcamera/control_ids.h>
#include <libcamera/controls.h>
+#include <libcamera/formats.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
-#include "camera_sensor.h"
-#include "device_enumerator.h"
-#include "ipa_manager.h"
-#include "log.h"
-#include "media_device.h"
-#include "pipeline_handler.h"
-#include "utils.h"
-#include "v4l2_controls.h"
-#include "v4l2_subdevice.h"
-#include "v4l2_videodevice.h"
+#include <libcamera/ipa/ipa_interface.h>
+#include <libcamera/ipa/ipa_module_info.h>
+#include <libcamera/ipa/vimc_ipa_interface.h>
+#include <libcamera/ipa/vimc_ipa_proxy.h>
+
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
namespace libcamera {
LOG_DEFINE_CATEGORY(VIMC)
-class VimcCameraData : public CameraData
+class VimcCameraData : public Camera::Private
{
public:
- VimcCameraData(PipelineHandler *pipe)
- : CameraData(pipe), sensor_(nullptr), debayer_(nullptr),
- scaler_(nullptr), video_(nullptr), raw_(nullptr)
- {
- }
-
- ~VimcCameraData()
+ VimcCameraData(PipelineHandler *pipe, MediaDevice *media)
+ : Camera::Private(pipe), media_(media)
{
- delete sensor_;
- delete debayer_;
- delete scaler_;
- delete video_;
- delete raw_;
}
- int init(MediaDevice *media);
- void bufferReady(FrameBuffer *buffer);
-
- CameraSensor *sensor_;
- V4L2Subdevice *debayer_;
- V4L2Subdevice *scaler_;
- V4L2VideoDevice *video_;
- V4L2VideoDevice *raw_;
+ int init();
+ int allocateMockIPABuffers();
+ void imageBufferReady(FrameBuffer *buffer);
+ void paramsComputed(unsigned int id, const Flags<ipa::vimc::TestFlag> flags);
+
+ MediaDevice *media_;
+ std::unique_ptr<CameraSensor> sensor_;
+ std::unique_ptr<V4L2Subdevice> debayer_;
+ std::unique_ptr<V4L2Subdevice> scaler_;
+ std::unique_ptr<V4L2VideoDevice> video_;
+ std::unique_ptr<V4L2VideoDevice> raw_;
Stream stream_;
+
+ std::unique_ptr<ipa::vimc::IPAProxyVimc> ipa_;
+ std::vector<std::unique_ptr<FrameBuffer>> mockIPABufs_;
};
class VimcCameraConfiguration : public CameraConfiguration
{
public:
- VimcCameraConfiguration();
+ VimcCameraConfiguration(VimcCameraData *data);
Status validate() override;
+
+private:
+ VimcCameraData *data_;
};
class PipelineHandlerVimc : public PipelineHandler
@@ -77,15 +87,15 @@ class PipelineHandlerVimc : public PipelineHandler
public:
PipelineHandlerVimc(CameraManager *manager);
- CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) override;
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
- int start(Camera *camera) override;
- void stop(Camera *camera) override;
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override;
@@ -94,25 +104,26 @@ public:
private:
int processControls(VimcCameraData *data, Request *request);
- VimcCameraData *cameraData(const Camera *camera)
+ VimcCameraData *cameraData(Camera *camera)
{
- return static_cast<VimcCameraData *>(
- PipelineHandler::cameraData(camera));
+ return static_cast<VimcCameraData *>(camera->_d());
}
};
namespace {
-static const std::array<PixelFormat, 3> pixelformats{
- PixelFormat(DRM_FORMAT_RGB888),
- PixelFormat(DRM_FORMAT_BGR888),
- PixelFormat(DRM_FORMAT_BGRA8888),
+static const std::map<PixelFormat, uint32_t> pixelformats{
+ { formats::RGB888, MEDIA_BUS_FMT_BGR888_1X24 },
+ { formats::BGR888, MEDIA_BUS_FMT_RGB888_1X24 },
};
+static constexpr Size kMinSize{ 16, 16 };
+static constexpr Size kMaxSize{ 4096, 2160 };
+
} /* namespace */
-VimcCameraConfiguration::VimcCameraConfiguration()
- : CameraConfiguration()
+VimcCameraConfiguration::VimcCameraConfiguration(VimcCameraData *data)
+ : CameraConfiguration(), data_(data)
{
}
@@ -123,6 +134,11 @@ CameraConfiguration::Status VimcCameraConfiguration::validate()
if (config_.empty())
return Invalid;
+ if (orientation != Orientation::Rotate0) {
+ orientation = Orientation::Rotate0;
+ status = Adjusted;
+ }
+
/* Cap the number of entries to the available streams. */
if (config_.size() > 1) {
config_.resize(1);
@@ -132,30 +148,51 @@ CameraConfiguration::Status VimcCameraConfiguration::validate()
StreamConfiguration &cfg = config_[0];
/* Adjust the pixel format. */
- if (std::find(pixelformats.begin(), pixelformats.end(), cfg.pixelFormat) ==
- pixelformats.end()) {
- LOG(VIMC, Debug) << "Adjusting format to RGB24";
- cfg.pixelFormat = PixelFormat(DRM_FORMAT_BGR888);
+ const std::vector<libcamera::PixelFormat> formats = cfg.formats().pixelformats();
+ if (std::find(formats.begin(), formats.end(), cfg.pixelFormat) == formats.end()) {
+ LOG(VIMC, Debug) << "Adjusting format to BGR888";
+ cfg.pixelFormat = formats::BGR888;
status = Adjusted;
}
/* Clamp the size based on the device limits. */
const Size size = cfg.size;
- /* The scaler hardcodes a x3 scale-up ratio. */
- cfg.size.width = std::max(48U, std::min(4096U, cfg.size.width));
- cfg.size.height = std::max(48U, std::min(2160U, cfg.size.height));
- cfg.size.width -= cfg.size.width % 3;
- cfg.size.height -= cfg.size.height % 3;
+ /*
+ * The sensor output size is aligned to two pixels in both directions.
+ * Additionally, prior to v5.16, the scaler hardcodes a x3 scale-up
+ * ratio, requiring the output width and height to be multiples of 6.
+ */
+ Size minSize{ kMinSize };
+ unsigned int alignment = 2;
+
+ if (data_->media_->version() < KERNEL_VERSION(5, 16, 0)) {
+ minSize *= 3;
+ alignment *= 3;
+ }
+
+ cfg.size.expandTo(minSize).boundTo(kMaxSize)
+ .alignDownTo(alignment, alignment);
if (cfg.size != size) {
LOG(VIMC, Debug)
- << "Adjusting size to " << cfg.size.toString();
+ << "Adjusting size to " << cfg.size;
status = Adjusted;
}
cfg.bufferCount = 4;
+ V4L2DeviceFormat format;
+ format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
+ format.size = cfg.size;
+
+ int ret = data_->video_->tryFormat(&format);
+ if (ret)
+ return Invalid;
+
+ cfg.stride = format.planes[0].bpl;
+ cfg.frameSize = format.planes[0].size;
+
return status;
}
@@ -164,27 +201,45 @@ PipelineHandlerVimc::PipelineHandlerVimc(CameraManager *manager)
{
}
-CameraConfiguration *PipelineHandlerVimc::generateConfiguration(Camera *camera,
- const StreamRoles &roles)
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerVimc::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
{
- CameraConfiguration *config = new VimcCameraConfiguration();
+ VimcCameraData *data = cameraData(camera);
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<VimcCameraConfiguration>(data);
if (roles.empty())
return config;
std::map<PixelFormat, std::vector<SizeRange>> formats;
- for (PixelFormat pixelformat : pixelformats) {
- /* The scaler hardcodes a x3 scale-up ratio. */
- std::vector<SizeRange> sizes{
- SizeRange{ { 48, 48 }, { 4096, 2160 } }
- };
- formats[pixelformat] = sizes;
+ for (const auto &pixelformat : pixelformats) {
+ /*
+ * Kernels prior to v5.7 incorrectly report support for RGB888,
+ * but it isn't functional within the pipeline.
+ */
+ if (data->media_->version() < KERNEL_VERSION(5, 7, 0)) {
+ if (pixelformat.first != formats::BGR888) {
+ LOG(VIMC, Info)
+ << "Skipping unsupported pixel format "
+ << pixelformat.first;
+ continue;
+ }
+ }
+
+ /* Prior to v5.16, the scaler hardcodes a x3 scale-up ratio. */
+ Size minSize{ kMinSize };
+ if (data->media_->version() < KERNEL_VERSION(5, 16, 0))
+ minSize *= 3;
+
+ std::vector<SizeRange> sizes{ { minSize, kMaxSize } };
+ formats[pixelformat.first] = sizes;
}
StreamConfiguration cfg(formats);
- cfg.pixelFormat = PixelFormat(DRM_FORMAT_BGR888);
+ cfg.pixelFormat = formats::BGR888;
cfg.size = { 1920, 1080 };
cfg.bufferCount = 4;
@@ -201,10 +256,18 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
StreamConfiguration &cfg = config->at(0);
int ret;
- /* The scaler hardcodes a x3 scale-up ratio. */
+ /*
+ * Prior to v5.16, the scaler hardcodes a x3 scale-up ratio. For newer
+ * kernels, use a sensor resolution of 1920x1080 and let the scaler
+ * produce the requested stream size.
+ */
+ Size sensorSize{ 1920, 1080 };
+ if (data->media_->version() < KERNEL_VERSION(5, 16, 0))
+ sensorSize = { cfg.size.width / 3, cfg.size.height / 3 };
+
V4L2SubdeviceFormat subformat = {};
- subformat.mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8;
- subformat.size = { cfg.size.width / 3, cfg.size.height / 3 };
+ subformat.code = MEDIA_BUS_FMT_SGRBG8_1X8;
+ subformat.size = sensorSize;
ret = data->sensor_->setFormat(&subformat);
if (ret)
@@ -214,7 +277,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
if (ret)
return ret;
- subformat.mbus_code = MEDIA_BUS_FMT_RGB888_1X24;
+ subformat.code = pixelformats.find(cfg.pixelFormat)->second;
ret = data->debayer_->setFormat(1, &subformat);
if (ret)
return ret;
@@ -223,12 +286,19 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
if (ret)
return ret;
+ if (data->media_->version() >= KERNEL_VERSION(5, 6, 0)) {
+ Rectangle crop{ 0, 0, subformat.size };
+ ret = data->scaler_->setSelection(0, V4L2_SEL_TGT_CROP, &crop);
+ if (ret)
+ return ret;
+ }
+
subformat.size = cfg.size;
ret = data->scaler_->setFormat(1, &subformat);
if (ret)
return ret;
- V4L2DeviceFormat format = {};
+ V4L2DeviceFormat format;
format.fourcc = data->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
@@ -245,7 +315,7 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
* vimc driver will fail pipeline validation.
*/
format.fourcc = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG8);
- format.size = { cfg.size.width / 3, cfg.size.height / 3 };
+ format.size = sensorSize;
ret = data->raw_->setFormat(&format);
if (ret)
@@ -253,6 +323,22 @@ int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
cfg.setStream(&data->stream_);
+ if (data->ipa_) {
+ /* Inform IPA of stream configuration and sensor controls. */
+ std::map<unsigned int, IPAStream> streamConfig;
+ streamConfig.emplace(std::piecewise_construct,
+ std::forward_as_tuple(0),
+ std::forward_as_tuple(cfg.pixelFormat, cfg.size));
+
+ std::map<unsigned int, ControlInfoMap> entityControls;
+ entityControls.emplace(0, data->sensor_->controls());
+
+ IPACameraSensorInfo sensorInfo;
+ data->sensor_->sensorInfo(&sensorInfo);
+
+ data->ipa_->configure(sensorInfo, streamConfig, entityControls);
+ }
+
return 0;
}
@@ -265,7 +351,7 @@ int PipelineHandlerVimc::exportFrameBuffers(Camera *camera, Stream *stream,
return data->video_->exportBuffers(count, buffers);
}
-int PipelineHandlerVimc::start(Camera *camera)
+int PipelineHandlerVimc::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
{
VimcCameraData *data = cameraData(camera);
unsigned int count = data->stream_.configuration().bufferCount;
@@ -274,8 +360,23 @@ int PipelineHandlerVimc::start(Camera *camera)
if (ret < 0)
return ret;
+ /* Map the mock IPA buffers to VIMC IPA to exercise IPC code paths. */
+ std::vector<IPABuffer> ipaBuffers;
+ for (auto [i, buffer] : utils::enumerate(data->mockIPABufs_)) {
+ buffer->setCookie(i + 1);
+ ipaBuffers.emplace_back(buffer->cookie(), buffer->planes());
+ }
+ data->ipa_->mapBuffers(ipaBuffers);
+
+ ret = data->ipa_->start();
+ if (ret) {
+ data->video_->releaseBuffers();
+ return ret;
+ }
+
ret = data->video_->streamOn();
if (ret < 0) {
+ data->ipa_->stop();
data->video_->releaseBuffers();
return ret;
}
@@ -283,10 +384,17 @@ int PipelineHandlerVimc::start(Camera *camera)
return 0;
}
-void PipelineHandlerVimc::stop(Camera *camera)
+void PipelineHandlerVimc::stopDevice(Camera *camera)
{
VimcCameraData *data = cameraData(camera);
data->video_->streamOff();
+
+ std::vector<unsigned int> ids;
+ for (const std::unique_ptr<FrameBuffer> &buffer : data->mockIPABufs_)
+ ids.push_back(buffer->cookie());
+ data->ipa_->unmapBuffers(ids);
+ data->ipa_->stop();
+
data->video_->releaseBuffers();
}
@@ -294,16 +402,26 @@ int PipelineHandlerVimc::processControls(VimcCameraData *data, Request *request)
{
ControlList controls(data->sensor_->controls());
- for (auto it : request->controls()) {
+ for (const auto &it : request->controls()) {
unsigned int id = it.first;
- ControlValue &value = it.second;
-
- if (id == controls::Brightness)
- controls.set(V4L2_CID_BRIGHTNESS, value);
- else if (id == controls::Contrast)
- controls.set(V4L2_CID_CONTRAST, value);
- else if (id == controls::Saturation)
- controls.set(V4L2_CID_SATURATION, value);
+ unsigned int offset;
+ uint32_t cid;
+
+ if (id == controls::Brightness) {
+ cid = V4L2_CID_BRIGHTNESS;
+ offset = 128;
+ } else if (id == controls::Contrast) {
+ cid = V4L2_CID_CONTRAST;
+ offset = 0;
+ } else if (id == controls::Saturation) {
+ cid = V4L2_CID_SATURATION;
+ offset = 0;
+ } else {
+ continue;
+ }
+
+ int32_t value = std::lround(it.second.get<float>() * 128 + offset);
+ controls.set(cid, std::clamp(value, 0, 255));
}
for (const auto &ctrl : controls)
@@ -339,6 +457,8 @@ int PipelineHandlerVimc::queueRequestDevice(Camera *camera, Request *request)
if (ret < 0)
return ret;
+ data->ipa_->queueRequest(request->sequence(), request->controls());
+
return 0;
}
@@ -360,36 +480,50 @@ bool PipelineHandlerVimc::match(DeviceEnumerator *enumerator)
if (!media)
return false;
- std::unique_ptr<VimcCameraData> data = std::make_unique<VimcCameraData>(this);
-
- data->ipa_ = IPAManager::instance()->createIPA(this, 0, 0);
- if (data->ipa_ == nullptr)
- LOG(VIMC, Warning) << "no matching IPA found";
- else
- data->ipa_->init();
+ std::unique_ptr<VimcCameraData> data = std::make_unique<VimcCameraData>(this, media);
/* Locate and open the capture video node. */
- if (data->init(media))
+ if (data->init())
return false;
+ data->ipa_ = IPAManager::createIPA<ipa::vimc::IPAProxyVimc>(this, 0, 0);
+ if (!data->ipa_) {
+ LOG(VIMC, Error) << "no matching IPA found";
+ return false;
+ }
+
+ data->ipa_->paramsComputed.connect(data.get(), &VimcCameraData::paramsComputed);
+
+ std::string conf = data->ipa_->configurationFile("vimc.conf");
+ Flags<ipa::vimc::TestFlag> inFlags = ipa::vimc::TestFlag::Flag2;
+ Flags<ipa::vimc::TestFlag> outFlags;
+ data->ipa_->init(IPASettings{ conf, data->sensor_->model() },
+ ipa::vimc::IPAOperationInit, inFlags, &outFlags);
+
+ LOG(VIMC, Debug)
+ << "Flag 1 was "
+ << (outFlags & ipa::vimc::TestFlag::Flag1 ? "" : "not ")
+ << "set";
+
/* Create and register the camera. */
std::set<Stream *> streams{ &data->stream_ };
- std::shared_ptr<Camera> camera = Camera::create(this, "VIMC Sensor B",
- streams);
- registerCamera(std::move(camera), std::move(data));
+ const std::string &id = data->sensor_->id();
+ std::shared_ptr<Camera> camera =
+ Camera::create(std::move(data), id, streams);
+ registerCamera(std::move(camera));
return true;
}
-int VimcCameraData::init(MediaDevice *media)
+int VimcCameraData::init()
{
int ret;
- ret = media->disableLinks();
+ ret = media_->disableLinks();
if (ret < 0)
return ret;
- MediaLink *link = media->link("Debayer B", 1, "Scaler", 0);
+ MediaLink *link = media_->link("Debayer B", 1, "Scaler", 0);
if (!link)
return -ENODEV;
@@ -398,46 +532,54 @@ int VimcCameraData::init(MediaDevice *media)
return ret;
/* Create and open the camera sensor, debayer, scaler and video device. */
- sensor_ = new CameraSensor(media->getEntityByName("Sensor B"));
- ret = sensor_->init();
- if (ret)
- return ret;
+ sensor_ = CameraSensorFactoryBase::create(media_->getEntityByName("Sensor B"));
+ if (!sensor_)
+ return -ENODEV;
- debayer_ = new V4L2Subdevice(media->getEntityByName("Debayer B"));
+ debayer_ = V4L2Subdevice::fromEntityName(media_, "Debayer B");
if (debayer_->open())
return -ENODEV;
- scaler_ = new V4L2Subdevice(media->getEntityByName("Scaler"));
+ scaler_ = V4L2Subdevice::fromEntityName(media_, "Scaler");
if (scaler_->open())
return -ENODEV;
- video_ = new V4L2VideoDevice(media->getEntityByName("RGB/YUV Capture"));
+ video_ = V4L2VideoDevice::fromEntityName(media_, "RGB/YUV Capture");
if (video_->open())
return -ENODEV;
- video_->bufferReady.connect(this, &VimcCameraData::bufferReady);
+ video_->bufferReady.connect(this, &VimcCameraData::imageBufferReady);
- raw_ = new V4L2VideoDevice(media->getEntityByName("Raw Capture 1"));
+ raw_ = V4L2VideoDevice::fromEntityName(media_, "Raw Capture 1");
if (raw_->open())
return -ENODEV;
+ ret = allocateMockIPABuffers();
+ if (ret < 0) {
+ LOG(VIMC, Warning) << "Cannot allocate mock IPA buffers";
+ return ret;
+ }
+
/* Initialise the supported controls. */
const ControlInfoMap &controls = sensor_->controls();
ControlInfoMap::Map ctrls;
for (const auto &ctrl : controls) {
- const ControlInfo &info = ctrl.second;
const ControlId *id;
+ ControlInfo info;
switch (ctrl.first->id()) {
case V4L2_CID_BRIGHTNESS:
id = &controls::Brightness;
+ info = ControlInfo{ { -1.0f }, { 1.0f }, { 0.0f } };
break;
case V4L2_CID_CONTRAST:
id = &controls::Contrast;
+ info = ControlInfo{ { 0.0f }, { 2.0f }, { 1.0f } };
break;
case V4L2_CID_SATURATION:
id = &controls::Saturation;
+ info = ControlInfo{ { 0.0f }, { 2.0f }, { 1.0f } };
break;
default:
continue;
@@ -446,7 +588,7 @@ int VimcCameraData::init(MediaDevice *media)
ctrls.emplace(id, info);
}
- controlInfo_ = std::move(ctrls);
+ controlInfo_ = ControlInfoMap(std::move(ctrls), controls::controls);
/* Initialize the camera properties. */
properties_ = sensor_->properties();
@@ -454,14 +596,54 @@ int VimcCameraData::init(MediaDevice *media)
return 0;
}
-void VimcCameraData::bufferReady(FrameBuffer *buffer)
+void VimcCameraData::imageBufferReady(FrameBuffer *buffer)
{
+ PipelineHandlerVimc *pipe =
+ static_cast<PipelineHandlerVimc *>(this->pipe());
Request *request = buffer->request();
- pipe_->completeBuffer(camera_, request, buffer);
- pipe_->completeRequest(camera_, request);
+ /* If the buffer is cancelled force a complete of the whole request. */
+ if (buffer->metadata().status == FrameMetadata::FrameCancelled) {
+ for (auto it : request->buffers()) {
+ FrameBuffer *b = it.second;
+ b->_d()->cancel();
+ pipe->completeBuffer(request, b);
+ }
+
+ pipe->completeRequest(request);
+ return;
+ }
+
+ /* Record the sensor's timestamp in the request metadata. */
+ request->metadata().set(controls::SensorTimestamp,
+ buffer->metadata().timestamp);
+
+ pipe->completeBuffer(request, buffer);
+ pipe->completeRequest(request);
+
+ ipa_->computeParams(request->sequence(), mockIPABufs_[0]->cookie());
+}
+
+int VimcCameraData::allocateMockIPABuffers()
+{
+ constexpr unsigned int kBufCount = 2;
+
+ V4L2DeviceFormat format;
+ format.fourcc = video_->toV4L2PixelFormat(formats::BGR888);
+ format.size = Size (160, 120);
+
+ int ret = video_->setFormat(&format);
+ if (ret < 0)
+ return ret;
+
+ return video_->exportBuffers(kBufCount, &mockIPABufs_);
+}
+
+void VimcCameraData::paramsComputed([[maybe_unused]] unsigned int id,
+ [[maybe_unused]] const Flags<ipa::vimc::TestFlag> flags)
+{
}
-REGISTER_PIPELINE_HANDLER(PipelineHandlerVimc);
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVimc, "vimc")
} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/README.md b/src/libcamera/pipeline/virtual/README.md
new file mode 100644
index 00000000..a9f39c15
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/README.md
@@ -0,0 +1,65 @@
+# Virtual Pipeline Handler
+
+Virtual pipeline handler emulates fake external camera(s) for testing.
+
+## Parse config file and register cameras
+
+- A sample config file is located at `src/libcamera/pipeline/virtual/data/virtual.yaml`.
+- If libcamera is installed, the config file should be installed at
+ `share/libcamera/pipeline/virtual/virtual.yaml`.
+
+### Config File Format
+The config file contains the information about cameras' properties to register.
+The config file should be a yaml file with dictionary of the cameraIds
+associated with their properties as top level. The default value will be applied
+when any property is empty.
+
+Each camera block is a dictionary, containing the following keys:
+- `supported_formats` (list of `VirtualCameraData::Resolution`, optional):
+ List of supported resolution and frame rates of the emulated camera
+ - `width` (`unsigned int`, default=1920): Width of the window resolution.
+ This needs to be even.
+ - `height` (`unsigned int`, default=1080): Height of the window resolution.
+ - `frame_rates` (list of `int`, default=`[30,60]` ): Range of the frame
+ rate (per second). If the list contains one value, it's the lower bound
+ and the upper bound. If the list contains two values, the first is the
+ lower bound and the second is the upper bound. No other number of values
+ is allowed.
+- `test_pattern` (`string`): Which test pattern to use as frames. The options
+ are "bars", "lines". Cannot be set with `frames`.
+ - The test patterns are "bars" which means color bars, and "lines" which means
+ diagonal lines.
+- `frames` (dictionary):
+ - `path` (`string`): Path to an image, or path to a directory of a series of
+ images. Cannot be set with `test_pattern`.
+ - The path to an image has ".jpg" extension.
+ - The path to a directory ends with "/". The name of the images in the
+ directory are "{n}.jpg" with {n} is the sequence of images starting with 0.
+- `location` (`string`, default="front"): The location of the camera. Support
+ "CameraLocationFront", "CameraLocationBack", and "CameraLocationExternal".
+- `model` (`string`, default="Unknown"): The model name of the camera.
+
+Check `data/virtual.yaml` as the sample config file.
+
+### Implementation
+
+`Parser` class provides methods to parse the config file to register cameras
+in Virtual Pipeline Handler. `parseConfigFile()` is exposed to use in
+Virtual Pipeline Handler.
+
+This is the procedure of the Parser class:
+1. `parseConfigFile()` parses the config file to `YamlObject` using `YamlParser::parse()`.
+ - Parse the top level of config file which are the camera ids and look into
+ each camera properties.
+2. For each camera, `parseCameraConfigData()` returns a camera with the configuration.
+ - The methods in the next step fill the data with the pointer to the Camera object.
+ - If the config file contains invalid configuration, this method returns
+ nullptr. The camera will be skipped.
+3. Parse each property and register the data.
+ - `parseSupportedFormats()`: Parses `supported_formats` in the config, which
+ contains resolutions and frame rates.
+ - `parseFrameGenerator()`: Parses `test_pattern` or `frames` in the config.
+ - `parseLocation()`: Parses `location` in the config.
+ - `parseModel()`: Parses `model` in the config.
+4. Back to `parseConfigFile()` and append the camera configuration.
+5. Returns a list of camera configurations.
diff --git a/src/libcamera/pipeline/virtual/config_parser.cpp b/src/libcamera/pipeline/virtual/config_parser.cpp
new file mode 100644
index 00000000..0cbfe39b
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/config_parser.cpp
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Virtual cameras helper to parse config file
+ */
+
+#include "config_parser.h"
+
+#include <string.h>
+#include <utility>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/property_ids.h>
+
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "virtual.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Virtual)
+
+std::vector<std::unique_ptr<VirtualCameraData>>
+ConfigParser::parseConfigFile(File &file, PipelineHandler *pipe)
+{
+ std::vector<std::unique_ptr<VirtualCameraData>> configurations;
+
+ std::unique_ptr<YamlObject> cameras = YamlParser::parse(file);
+ if (!cameras) {
+ LOG(Virtual, Error) << "Failed to pass config file.";
+ return configurations;
+ }
+
+ if (!cameras->isDictionary()) {
+ LOG(Virtual, Error) << "Config file is not a dictionary at the top level.";
+ return configurations;
+ }
+
+ /* Look into the configuration of each camera */
+ for (const auto &[cameraId, cameraConfigData] : cameras->asDict()) {
+ std::unique_ptr<VirtualCameraData> data =
+ parseCameraConfigData(cameraConfigData, pipe);
+ /* Parse configData to data */
+ if (!data) {
+ /* Skip the camera if it has invalid config */
+ LOG(Virtual, Error) << "Failed to parse config of the camera: "
+ << cameraId;
+ continue;
+ }
+
+ data->config_.id = cameraId;
+ ControlInfoMap::Map controls;
+ /* todo: Check which resolution's frame rate to be reported */
+ controls[&controls::FrameDurationLimits] =
+ ControlInfo(1000000 / data->config_.resolutions[0].frameRates[1],
+ 1000000 / data->config_.resolutions[0].frameRates[0]);
+
+ std::vector<ControlValue> supportedFaceDetectModes{
+ static_cast<int32_t>(controls::draft::FaceDetectModeOff),
+ };
+ controls[&controls::draft::FaceDetectMode] = ControlInfo(supportedFaceDetectModes);
+
+ data->controlInfo_ = ControlInfoMap(std::move(controls), controls::controls);
+ configurations.push_back(std::move(data));
+ }
+
+ return configurations;
+}
+
+std::unique_ptr<VirtualCameraData>
+ConfigParser::parseCameraConfigData(const YamlObject &cameraConfigData,
+ PipelineHandler *pipe)
+{
+ std::vector<VirtualCameraData::Resolution> resolutions;
+ if (parseSupportedFormats(cameraConfigData, &resolutions))
+ return nullptr;
+
+ std::unique_ptr<VirtualCameraData> data =
+ std::make_unique<VirtualCameraData>(pipe, resolutions);
+
+ if (parseFrameGenerator(cameraConfigData, data.get()))
+ return nullptr;
+
+ if (parseLocation(cameraConfigData, data.get()))
+ return nullptr;
+
+ if (parseModel(cameraConfigData, data.get()))
+ return nullptr;
+
+ return data;
+}
+
+int ConfigParser::parseSupportedFormats(const YamlObject &cameraConfigData,
+ std::vector<VirtualCameraData::Resolution> *resolutions)
+{
+ if (cameraConfigData.contains("supported_formats")) {
+ const YamlObject &supportedResolutions = cameraConfigData["supported_formats"];
+
+ for (const YamlObject &supportedResolution : supportedResolutions.asList()) {
+ unsigned int width = supportedResolution["width"].get<unsigned int>(1920);
+ unsigned int height = supportedResolution["height"].get<unsigned int>(1080);
+ if (width == 0 || height == 0) {
+ LOG(Virtual, Error) << "Invalid width or/and height";
+ return -EINVAL;
+ }
+ if (width % 2 != 0) {
+ LOG(Virtual, Error) << "Invalid width: width needs to be even";
+ return -EINVAL;
+ }
+
+ std::vector<int64_t> frameRates;
+ if (supportedResolution.contains("frame_rates")) {
+ auto frameRatesList =
+ supportedResolution["frame_rates"].getList<int>();
+ if (!frameRatesList || (frameRatesList->size() != 1 &&
+ frameRatesList->size() != 2)) {
+ LOG(Virtual, Error) << "Invalid frame_rates: either one or two values";
+ return -EINVAL;
+ }
+
+ if (frameRatesList->size() == 2 &&
+ frameRatesList.value()[0] > frameRatesList.value()[1]) {
+ LOG(Virtual, Error) << "frame_rates's first value(lower bound)"
+ << " is higher than the second value(upper bound)";
+ return -EINVAL;
+ }
+ /*
+ * Push the min and max framerates. A
+ * single rate is duplicated.
+ */
+ frameRates.push_back(frameRatesList.value().front());
+ frameRates.push_back(frameRatesList.value().back());
+ } else {
+ frameRates.push_back(30);
+ frameRates.push_back(60);
+ }
+
+ resolutions->emplace_back(
+ VirtualCameraData::Resolution{ Size{ width, height },
+ frameRates });
+ }
+ } else {
+ resolutions->emplace_back(
+ VirtualCameraData::Resolution{ Size{ 1920, 1080 },
+ { 30, 60 } });
+ }
+
+ return 0;
+}
+
+int ConfigParser::parseFrameGenerator(const YamlObject &cameraConfigData, VirtualCameraData *data)
+{
+ const std::string testPatternKey = "test_pattern";
+ const std::string framesKey = "frames";
+ if (cameraConfigData.contains(testPatternKey)) {
+ if (cameraConfigData.contains(framesKey)) {
+ LOG(Virtual, Error) << "A camera should use either "
+ << testPatternKey << " or " << framesKey;
+ return -EINVAL;
+ }
+
+ auto testPattern = cameraConfigData[testPatternKey].get<std::string>("");
+
+ if (testPattern == "bars") {
+ data->config_.frame = TestPattern::ColorBars;
+ } else if (testPattern == "lines") {
+ data->config_.frame = TestPattern::DiagonalLines;
+ } else {
+ LOG(Virtual, Debug) << "Test pattern: " << testPattern
+ << " is not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ const YamlObject &frames = cameraConfigData[framesKey];
+
+ /* When there is no frames provided in the config file, use color bar test pattern */
+ if (!frames) {
+ data->config_.frame = TestPattern::ColorBars;
+ return 0;
+ }
+
+ if (!frames.isDictionary()) {
+ LOG(Virtual, Error) << "'frames' is not a dictionary.";
+ return -EINVAL;
+ }
+
+ auto path = frames["path"].get<std::string>();
+
+ if (!path) {
+ LOG(Virtual, Error) << "Test pattern or path should be specified.";
+ return -EINVAL;
+ }
+
+ std::vector<std::filesystem::path> files;
+
+ switch (std::filesystem::symlink_status(*path).type()) {
+ case std::filesystem::file_type::regular:
+ files.push_back(*path);
+ break;
+
+ case std::filesystem::file_type::directory:
+ for (const auto &dentry : std::filesystem::directory_iterator{ *path }) {
+ if (dentry.is_regular_file())
+ files.push_back(dentry.path());
+ }
+
+ std::sort(files.begin(), files.end(), [](const auto &a, const auto &b) {
+ return ::strverscmp(a.c_str(), b.c_str()) < 0;
+ });
+
+ if (files.empty()) {
+ LOG(Virtual, Error) << "Directory has no files: " << *path;
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ LOG(Virtual, Error) << "Frame: " << *path << " is not supported";
+ return -EINVAL;
+ }
+
+ data->config_.frame = ImageFrames{ std::move(files) };
+
+ return 0;
+}
+
+int ConfigParser::parseLocation(const YamlObject &cameraConfigData, VirtualCameraData *data)
+{
+ std::string location = cameraConfigData["location"].get<std::string>("CameraLocationFront");
+
+ /* Default value is properties::CameraLocationFront */
+ auto it = properties::LocationNameValueMap.find(location);
+ if (it == properties::LocationNameValueMap.end()) {
+ LOG(Virtual, Error)
+ << "location: " << location << " is not supported";
+ return -EINVAL;
+ }
+
+ data->properties_.set(properties::Location, it->second);
+
+ return 0;
+}
+
+int ConfigParser::parseModel(const YamlObject &cameraConfigData, VirtualCameraData *data)
+{
+ std::string model = cameraConfigData["model"].get<std::string>("Unknown");
+
+ data->properties_.set(properties::Model, model);
+
+ return 0;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/config_parser.h b/src/libcamera/pipeline/virtual/config_parser.h
new file mode 100644
index 00000000..d2000de9
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/config_parser.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Virtual cameras helper to parse config file
+ */
+
+#pragma once
+
+#include <memory>
+#include <vector>
+
+#include <libcamera/base/file.h>
+
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "virtual.h"
+
+namespace libcamera {
+
+class ConfigParser
+{
+public:
+ std::vector<std::unique_ptr<VirtualCameraData>>
+ parseConfigFile(File &file, PipelineHandler *pipe);
+
+private:
+ std::unique_ptr<VirtualCameraData>
+ parseCameraConfigData(const YamlObject &cameraConfigData, PipelineHandler *pipe);
+
+ int parseSupportedFormats(const YamlObject &cameraConfigData,
+ std::vector<VirtualCameraData::Resolution> *resolutions);
+ int parseFrameGenerator(const YamlObject &cameraConfigData, VirtualCameraData *data);
+ int parseLocation(const YamlObject &cameraConfigData, VirtualCameraData *data);
+ int parseModel(const YamlObject &cameraConfigData, VirtualCameraData *data);
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/data/virtual.yaml b/src/libcamera/pipeline/virtual/data/virtual.yaml
new file mode 100644
index 00000000..20471bb9
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/data/virtual.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: CC0-1.0
+%YAML 1.1
+---
+"Virtual0":
+ supported_formats:
+ - width: 1920
+ height: 1080
+ frame_rates:
+ - 30
+ - 60
+ - width: 1680
+ height: 1050
+ frame_rates:
+ - 70
+ - 80
+ test_pattern: "lines"
+ location: "CameraLocationFront"
+ model: "Virtual Video Device"
+"Virtual1":
+ supported_formats:
+ - width: 800
+ height: 600
+ frame_rates:
+ - 60
+ test_pattern: "bars"
+ location: "CameraLocationBack"
+ model: "Virtual Video Device1"
+"Virtual2":
+ supported_formats:
+ - width: 400
+ height: 300
+ test_pattern: "lines"
+ location: "CameraLocationFront"
+ model: "Virtual Video Device2"
+"Virtual3":
+ test_pattern: "bars"
diff --git a/src/libcamera/pipeline/virtual/frame_generator.h b/src/libcamera/pipeline/virtual/frame_generator.h
new file mode 100644
index 00000000..a0658c45
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/frame_generator.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Virtual cameras helper to generate frames
+ */
+
+#pragma once
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+
+namespace libcamera {
+
+class FrameGenerator
+{
+public:
+ virtual ~FrameGenerator() = default;
+
+ virtual void configure(const Size &size) = 0;
+
+ virtual int generateFrame(const Size &size,
+ const FrameBuffer *buffer) = 0;
+
+protected:
+ FrameGenerator() {}
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/image_frame_generator.cpp b/src/libcamera/pipeline/virtual/image_frame_generator.cpp
new file mode 100644
index 00000000..d1545b5d
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/image_frame_generator.cpp
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Derived class of FrameGenerator for generating frames from images
+ */
+
+#include "image_frame_generator.h"
+
+#include <string>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+
+#include <libcamera/framebuffer.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include "libyuv/convert.h"
+#include "libyuv/scale.h"
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Virtual)
+
+/*
+ * Factory function to create an ImageFrameGenerator object.
+ * Read the images and convert them to buffers in NV12 format.
+ * Store the pointers to the buffers to a list (imageFrameDatas)
+ */
+std::unique_ptr<ImageFrameGenerator>
+ImageFrameGenerator::create(ImageFrames &imageFrames)
+{
+ std::unique_ptr<ImageFrameGenerator> imageFrameGenerator =
+ std::make_unique<ImageFrameGenerator>();
+ imageFrameGenerator->imageFrames_ = &imageFrames;
+
+ /*
+ * For each file in the directory, load the image,
+ * convert it to NV12, and store the pointer.
+ */
+ for (const auto &path : imageFrames.files) {
+ File file(path);
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ LOG(Virtual, Error) << "Failed to open image file " << file.fileName()
+ << ": " << strerror(file.error());
+ return nullptr;
+ }
+
+ /* Read the image file to data */
+ auto fileSize = file.size();
+ auto buffer = std::make_unique<uint8_t[]>(fileSize);
+ if (file.read({ buffer.get(), static_cast<size_t>(fileSize) }) != fileSize) {
+ LOG(Virtual, Error) << "Failed to read file " << file.fileName()
+ << ": " << strerror(file.error());
+ return nullptr;
+ }
+
+ /* Get the width and height of the image */
+ int width, height;
+ if (libyuv::MJPGSize(buffer.get(), fileSize, &width, &height)) {
+ LOG(Virtual, Error) << "Failed to get the size of the image file: "
+ << file.fileName();
+ return nullptr;
+ }
+
+ std::unique_ptr<uint8_t[]> dstY =
+ std::make_unique<uint8_t[]>(width * height);
+ std::unique_ptr<uint8_t[]> dstUV =
+ std::make_unique<uint8_t[]>(width * height / 2);
+ int ret = libyuv::MJPGToNV12(buffer.get(), fileSize,
+ dstY.get(), width, dstUV.get(),
+ width, width, height, width, height);
+ if (ret != 0)
+ LOG(Virtual, Error) << "MJPGToNV12() failed with " << ret;
+
+ imageFrameGenerator->imageFrameDatas_.emplace_back(
+ ImageFrameData{ std::move(dstY), std::move(dstUV),
+ Size(width, height) });
+ }
+
+ ASSERT(!imageFrameGenerator->imageFrameDatas_.empty());
+
+ return imageFrameGenerator;
+}
+
+/*
+ * \var ImageFrameGenerator::frameRepeat
+ * \brief Number of frames to repeat before proceeding to the next frame
+ */
+
+/* Scale the buffers for image frames. */
+void ImageFrameGenerator::configure(const Size &size)
+{
+ /* Reset the source images to prevent multiple configuration calls */
+ scaledFrameDatas_.clear();
+ frameIndex_ = 0;
+ parameter_ = 0;
+
+ for (unsigned int i = 0; i < imageFrameDatas_.size(); i++) {
+ /* Scale the imageFrameDatas_ to scaledY and scaledUV */
+ unsigned int halfSizeWidth = (size.width + 1) / 2;
+ unsigned int halfSizeHeight = (size.height + 1) / 2;
+ std::unique_ptr<uint8_t[]> scaledY =
+ std::make_unique<uint8_t[]>(size.width * size.height);
+ std::unique_ptr<uint8_t[]> scaledUV =
+ std::make_unique<uint8_t[]>(halfSizeWidth * halfSizeHeight * 2);
+ auto &src = imageFrameDatas_[i];
+
+ /*
+ * \todo Some platforms might enforce stride due to GPU.
+ * The width needs to be a multiple of the stride to work
+ * properly for now.
+ */
+ libyuv::NV12Scale(src.Y.get(), src.size.width,
+ src.UV.get(), src.size.width,
+ src.size.width, src.size.height,
+ scaledY.get(), size.width, scaledUV.get(), size.width,
+ size.width, size.height, libyuv::FilterMode::kFilterBilinear);
+
+ scaledFrameDatas_.emplace_back(
+ ImageFrameData{ std::move(scaledY), std::move(scaledUV), size });
+ }
+}
+
+int ImageFrameGenerator::generateFrame(const Size &size, const FrameBuffer *buffer)
+{
+ ASSERT(!scaledFrameDatas_.empty());
+
+ MappedFrameBuffer mappedFrameBuffer(buffer, MappedFrameBuffer::MapFlag::Write);
+
+ const auto &planes = mappedFrameBuffer.planes();
+
+ /* Loop only around the number of images available */
+ frameIndex_ %= imageFrameDatas_.size();
+
+ /* Write the scaledY and scaledUV to the mapped frame buffer */
+ libyuv::NV12Copy(scaledFrameDatas_[frameIndex_].Y.get(), size.width,
+ scaledFrameDatas_[frameIndex_].UV.get(), size.width, planes[0].begin(),
+ size.width, planes[1].begin(), size.width,
+ size.width, size.height);
+
+ /* Proceed to the next image every 4 frames */
+ /* \todo Consider setting the frameRepeat in the config file */
+ parameter_++;
+ if (parameter_ % frameRepeat == 0)
+ frameIndex_++;
+
+ return 0;
+}
+
+/*
+ * \var ImageFrameGenerator::imageFrameDatas_
+ * \brief List of pointers to the not scaled image buffers
+ */
+
+/*
+ * \var ImageFrameGenerator::scaledFrameDatas_
+ * \brief List of pointers to the scaled image buffers
+ */
+
+/*
+ * \var ImageFrameGenerator::imageFrames_
+ * \brief Pointer to the imageFrames_ in VirtualCameraData
+ */
+
+/*
+ * \var ImageFrameGenerator::parameter_
+ * \brief Speed parameter. Change to the next image every parameter_ frames
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/image_frame_generator.h b/src/libcamera/pipeline/virtual/image_frame_generator.h
new file mode 100644
index 00000000..42a077ba
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/image_frame_generator.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Derived class of FrameGenerator for generating frames from images
+ */
+
+#pragma once
+
+#include <filesystem>
+#include <memory>
+#include <stdint.h>
+#include <sys/types.h>
+#include <vector>
+
+#include "frame_generator.h"
+
+namespace libcamera {
+
+/* Frame configuration provided by the config file */
+struct ImageFrames {
+ std::vector<std::filesystem::path> files;
+};
+
+class ImageFrameGenerator : public FrameGenerator
+{
+public:
+ static std::unique_ptr<ImageFrameGenerator> create(ImageFrames &imageFrames);
+
+private:
+ static constexpr unsigned int frameRepeat = 4;
+
+ struct ImageFrameData {
+ std::unique_ptr<uint8_t[]> Y;
+ std::unique_ptr<uint8_t[]> UV;
+ Size size;
+ };
+
+ void configure(const Size &size) override;
+ int generateFrame(const Size &size, const FrameBuffer *buffer) override;
+
+ std::vector<ImageFrameData> imageFrameDatas_;
+ std::vector<ImageFrameData> scaledFrameDatas_;
+ ImageFrames *imageFrames_;
+ unsigned int frameIndex_;
+ unsigned int parameter_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/meson.build b/src/libcamera/pipeline/virtual/meson.build
new file mode 100644
index 00000000..4786fe2e
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'config_parser.cpp',
+ 'image_frame_generator.cpp',
+ 'test_pattern_generator.cpp',
+ 'virtual.cpp',
+])
+
+libjpeg = dependency('libjpeg', required : true)
+
+libcamera_deps += [libyuv_dep]
+libcamera_deps += [libjpeg]
diff --git a/src/libcamera/pipeline/virtual/test_pattern_generator.cpp b/src/libcamera/pipeline/virtual/test_pattern_generator.cpp
new file mode 100644
index 00000000..745be83b
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/test_pattern_generator.cpp
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Derived class of FrameGenerator for generating test patterns
+ */
+
+#include "test_pattern_generator.h"
+
+#include <string.h>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include <libyuv/convert_from_argb.h>
+
+namespace {
+
+template<size_t SampleSize>
+void rotateLeft1Column(const libcamera::Size &size, uint8_t *image)
+{
+ if (size.width < 2)
+ return;
+
+ const size_t stride = size.width * SampleSize;
+ uint8_t first[SampleSize];
+
+ for (size_t i = 0; i < size.height; i++, image += stride) {
+ memcpy(first, &image[0], SampleSize);
+ memmove(&image[0], &image[SampleSize], stride - SampleSize);
+ memcpy(&image[stride - SampleSize], first, SampleSize);
+ }
+}
+
+} /* namespace */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Virtual)
+
+static const unsigned int kARGBSize = 4;
+
+int TestPatternGenerator::generateFrame(const Size &size,
+ const FrameBuffer *buffer)
+{
+ MappedFrameBuffer mappedFrameBuffer(buffer,
+ MappedFrameBuffer::MapFlag::Write);
+
+ const auto &planes = mappedFrameBuffer.planes();
+
+ rotateLeft1Column<kARGBSize>(size, template_.get());
+
+ /* Convert the template_ to the frame buffer */
+ int ret = libyuv::ARGBToNV12(template_.get(), size.width * kARGBSize,
+ planes[0].begin(), size.width,
+ planes[1].begin(), size.width,
+ size.width, size.height);
+ if (ret != 0)
+ LOG(Virtual, Error) << "ARGBToNV12() failed with " << ret;
+
+ return ret;
+}
+
+void ColorBarsGenerator::configure(const Size &size)
+{
+ constexpr uint8_t kColorBar[8][3] = {
+ /* R, G, B */
+ { 0xff, 0xff, 0xff }, /* White */
+ { 0xff, 0xff, 0x00 }, /* Yellow */
+ { 0x00, 0xff, 0xff }, /* Cyan */
+ { 0x00, 0xff, 0x00 }, /* Green */
+ { 0xff, 0x00, 0xff }, /* Magenta */
+ { 0xff, 0x00, 0x00 }, /* Red */
+ { 0x00, 0x00, 0xff }, /* Blue */
+ { 0x00, 0x00, 0x00 }, /* Black */
+ };
+
+ template_ = std::make_unique<uint8_t[]>(
+ size.width * size.height * kARGBSize);
+
+ unsigned int colorBarWidth = size.width / std::size(kColorBar);
+
+ uint8_t *buf = template_.get();
+ for (size_t h = 0; h < size.height; h++) {
+ for (size_t w = 0; w < size.width; w++) {
+ /* Repeat when the width is exceed */
+ unsigned int index = (w / colorBarWidth) % std::size(kColorBar);
+
+ *buf++ = kColorBar[index][2]; /* B */
+ *buf++ = kColorBar[index][1]; /* G */
+ *buf++ = kColorBar[index][0]; /* R */
+ *buf++ = 0x00; /* A */
+ }
+ }
+}
+
+void DiagonalLinesGenerator::configure(const Size &size)
+{
+ constexpr uint8_t kColorBar[2][3] = {
+ /* R, G, B */
+ { 0xff, 0xff, 0xff }, /* White */
+ { 0x00, 0x00, 0x00 }, /* Black */
+ };
+
+ template_ = std::make_unique<uint8_t[]>(
+ size.width * size.height * kARGBSize);
+
+ unsigned int lineWidth = size.width / 10;
+
+ uint8_t *buf = template_.get();
+ for (size_t h = 0; h < size.height; h++) {
+ for (size_t w = 0; w < size.width; w++) {
+ /* Repeat when the width is exceed */
+ int index = ((w + h) / lineWidth) % 2;
+
+ *buf++ = kColorBar[index][2]; /* B */
+ *buf++ = kColorBar[index][1]; /* G */
+ *buf++ = kColorBar[index][0]; /* R */
+ *buf++ = 0x00; /* A */
+ }
+ }
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/test_pattern_generator.h b/src/libcamera/pipeline/virtual/test_pattern_generator.h
new file mode 100644
index 00000000..2a51bd31
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/test_pattern_generator.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Derived class of FrameGenerator for generating test patterns
+ */
+
+#pragma once
+
+#include <memory>
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+
+#include "frame_generator.h"
+
+namespace libcamera {
+
+enum class TestPattern : char {
+ ColorBars = 0,
+ DiagonalLines = 1,
+};
+
+class TestPatternGenerator : public FrameGenerator
+{
+public:
+ int generateFrame(const Size &size, const FrameBuffer *buffer) override;
+
+protected:
+ /* Buffer of test pattern template */
+ std::unique_ptr<uint8_t[]> template_;
+};
+
+class ColorBarsGenerator : public TestPatternGenerator
+{
+public:
+ /* Generate a template buffer of the color bar test pattern. */
+ void configure(const Size &size) override;
+};
+
+class DiagonalLinesGenerator : public TestPatternGenerator
+{
+public:
+ /* Generate a template buffer of the diagonal lines test pattern. */
+ void configure(const Size &size) override;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/virtual.cpp b/src/libcamera/pipeline/virtual/virtual.cpp
new file mode 100644
index 00000000..e692a543
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/virtual.cpp
@@ -0,0 +1,397 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Pipeline handler for virtual cameras
+ */
+
+#include "virtual.h"
+
+#include <algorithm>
+#include <array>
+#include <chrono>
+#include <errno.h>
+#include <map>
+#include <memory>
+#include <ostream>
+#include <set>
+#include <stdint.h>
+#include <string>
+#include <time.h>
+#include <utility>
+#include <vector>
+
+#include <libcamera/base/flags.h>
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/formats.h>
+#include <libcamera/pixel_format.h>
+#include <libcamera/property_ids.h>
+#include <libcamera/request.h>
+
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/dma_buf_allocator.h"
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include "pipeline/virtual/config_parser.h"
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Virtual)
+
+namespace {
+
+uint64_t currentTimestamp()
+{
+ const auto now = std::chrono::steady_clock::now();
+ auto nsecs = std::chrono::duration_cast<std::chrono::nanoseconds>(
+ now.time_since_epoch());
+
+ return nsecs.count();
+}
+
+} /* namespace */
+
+template<class... Ts>
+struct overloaded : Ts... {
+ using Ts::operator()...;
+};
+template<class... Ts>
+overloaded(Ts...) -> overloaded<Ts...>;
+
+class VirtualCameraConfiguration : public CameraConfiguration
+{
+public:
+ static constexpr unsigned int kBufferCount = 4;
+
+ VirtualCameraConfiguration(VirtualCameraData *data);
+
+ Status validate() override;
+
+private:
+ const VirtualCameraData *data_;
+};
+
+class PipelineHandlerVirtual : public PipelineHandler
+{
+public:
+ PipelineHandlerVirtual(CameraManager *manager);
+ ~PipelineHandlerVirtual();
+
+ std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ static bool created_;
+
+ VirtualCameraData *cameraData(Camera *camera)
+ {
+ return static_cast<VirtualCameraData *>(camera->_d());
+ }
+
+ bool initFrameGenerator(Camera *camera);
+
+ DmaBufAllocator dmaBufAllocator_;
+
+ bool resetCreated_ = false;
+};
+
+VirtualCameraData::VirtualCameraData(PipelineHandler *pipe,
+ const std::vector<Resolution> &supportedResolutions)
+ : Camera::Private(pipe)
+{
+ config_.resolutions = supportedResolutions;
+ for (const auto &resolution : config_.resolutions) {
+ if (config_.minResolutionSize.isNull() || config_.minResolutionSize > resolution.size)
+ config_.minResolutionSize = resolution.size;
+
+ config_.maxResolutionSize = std::max(config_.maxResolutionSize, resolution.size);
+ }
+
+ properties_.set(properties::PixelArrayActiveAreas,
+ { Rectangle(config_.maxResolutionSize) });
+
+ /* \todo Support multiple streams and pass multi_stream_test */
+ streamConfigs_.resize(kMaxStream);
+}
+
+VirtualCameraConfiguration::VirtualCameraConfiguration(VirtualCameraData *data)
+ : CameraConfiguration(), data_(data)
+{
+}
+
+CameraConfiguration::Status VirtualCameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty()) {
+ LOG(Virtual, Error) << "Empty config";
+ return Invalid;
+ }
+
+ /* Only one stream is supported */
+ if (config_.size() > VirtualCameraData::kMaxStream) {
+ config_.resize(VirtualCameraData::kMaxStream);
+ status = Adjusted;
+ }
+
+ for (StreamConfiguration &cfg : config_) {
+ bool adjusted = false;
+ bool found = false;
+ for (const auto &resolution : data_->config_.resolutions) {
+ if (resolution.size.width == cfg.size.width &&
+ resolution.size.height == cfg.size.height) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ /*
+ * \todo It's a pipeline's decision to choose a
+ * resolution when the exact one is not supported.
+ * Defining the default logic in PipelineHandler to
+ * find the closest resolution would be nice.
+ */
+ cfg.size = data_->config_.maxResolutionSize;
+ status = Adjusted;
+ adjusted = true;
+ }
+
+ if (cfg.pixelFormat != formats::NV12) {
+ cfg.pixelFormat = formats::NV12;
+ status = Adjusted;
+ adjusted = true;
+ }
+
+ if (adjusted)
+ LOG(Virtual, Info)
+ << "Stream configuration adjusted to " << cfg.toString();
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+ cfg.stride = info.stride(cfg.size.width, 0, 1);
+ cfg.frameSize = info.frameSize(cfg.size, 1);
+
+ cfg.bufferCount = VirtualCameraConfiguration::kBufferCount;
+ }
+
+ return status;
+}
+
+/* static */
+bool PipelineHandlerVirtual::created_ = false;
+
+PipelineHandlerVirtual::PipelineHandlerVirtual(CameraManager *manager)
+ : PipelineHandler(manager),
+ dmaBufAllocator_(DmaBufAllocator::DmaBufAllocatorFlag::CmaHeap |
+ DmaBufAllocator::DmaBufAllocatorFlag::SystemHeap |
+ DmaBufAllocator::DmaBufAllocatorFlag::UDmaBuf)
+{
+}
+
+PipelineHandlerVirtual::~PipelineHandlerVirtual()
+{
+ if (resetCreated_)
+ created_ = false;
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerVirtual::generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles)
+{
+ VirtualCameraData *data = cameraData(camera);
+ auto config = std::make_unique<VirtualCameraConfiguration>(data);
+
+ if (roles.empty())
+ return config;
+
+ for (const StreamRole role : roles) {
+ switch (role) {
+ case StreamRole::StillCapture:
+ case StreamRole::VideoRecording:
+ case StreamRole::Viewfinder:
+ break;
+
+ case StreamRole::Raw:
+ default:
+ LOG(Virtual, Error)
+ << "Requested stream role not supported: " << role;
+ config.reset();
+ return config;
+ }
+
+ std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
+ PixelFormat pixelFormat = formats::NV12;
+ streamFormats[pixelFormat] = { { data->config_.minResolutionSize,
+ data->config_.maxResolutionSize } };
+ StreamFormats formats(streamFormats);
+ StreamConfiguration cfg(formats);
+ cfg.pixelFormat = pixelFormat;
+ cfg.size = data->config_.maxResolutionSize;
+ cfg.bufferCount = VirtualCameraConfiguration::kBufferCount;
+
+ config->addConfiguration(cfg);
+ }
+
+ ASSERT(config->validate() != CameraConfiguration::Invalid);
+
+ return config;
+}
+
+int PipelineHandlerVirtual::configure(Camera *camera,
+ CameraConfiguration *config)
+{
+ VirtualCameraData *data = cameraData(camera);
+ for (auto [i, c] : utils::enumerate(*config)) {
+ c.setStream(&data->streamConfigs_[i].stream);
+ /* Start reading the images/generating test patterns */
+ data->streamConfigs_[i].frameGenerator->configure(c.size);
+ }
+
+ return 0;
+}
+
+int PipelineHandlerVirtual::exportFrameBuffers([[maybe_unused]] Camera *camera,
+ Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ if (!dmaBufAllocator_.isValid())
+ return -ENOBUFS;
+
+ const StreamConfiguration &config = stream->configuration();
+ const PixelFormatInfo &info = PixelFormatInfo::info(config.pixelFormat);
+
+ std::vector<unsigned int> planeSizes;
+ for (size_t i = 0; i < info.numPlanes(); ++i)
+ planeSizes.push_back(info.planeSize(config.size, i));
+
+ return dmaBufAllocator_.exportBuffers(config.bufferCount, planeSizes, buffers);
+}
+
+int PipelineHandlerVirtual::start([[maybe_unused]] Camera *camera,
+ [[maybe_unused]] const ControlList *controls)
+{
+ return 0;
+}
+
+void PipelineHandlerVirtual::stopDevice([[maybe_unused]] Camera *camera)
+{
+}
+
+int PipelineHandlerVirtual::queueRequestDevice([[maybe_unused]] Camera *camera,
+ Request *request)
+{
+ VirtualCameraData *data = cameraData(camera);
+
+ for (auto const &[stream, buffer] : request->buffers()) {
+ bool found = false;
+ /* map buffer and fill test patterns */
+ for (auto &streamConfig : data->streamConfigs_) {
+ if (stream == &streamConfig.stream) {
+ found = true;
+ if (streamConfig.frameGenerator->generateFrame(
+ stream->configuration().size, buffer))
+ buffer->_d()->cancel();
+
+ completeBuffer(request, buffer);
+ break;
+ }
+ }
+ ASSERT(found);
+ }
+
+ request->metadata().set(controls::SensorTimestamp, currentTimestamp());
+ completeRequest(request);
+
+ return 0;
+}
+
+bool PipelineHandlerVirtual::match([[maybe_unused]] DeviceEnumerator *enumerator)
+{
+ if (created_)
+ return false;
+
+ created_ = true;
+
+ File file(configurationFile("virtual", "virtual.yaml"));
+ bool isOpen = file.open(File::OpenModeFlag::ReadOnly);
+ if (!isOpen) {
+ LOG(Virtual, Error) << "Failed to open config file: " << file.fileName();
+ return false;
+ }
+
+ ConfigParser parser;
+ auto configData = parser.parseConfigFile(file, this);
+ if (configData.size() == 0) {
+ LOG(Virtual, Error) << "Failed to parse any cameras from the config file: "
+ << file.fileName();
+ return false;
+ }
+
+ /* Configure and register cameras with configData */
+ for (auto &data : configData) {
+ std::set<Stream *> streams;
+ for (auto &streamConfig : data->streamConfigs_)
+ streams.insert(&streamConfig.stream);
+ std::string id = data->config_.id;
+ std::shared_ptr<Camera> camera = Camera::create(std::move(data), id, streams);
+
+ if (!initFrameGenerator(camera.get())) {
+ LOG(Virtual, Error) << "Failed to initialize frame "
+ << "generator for camera: " << id;
+ continue;
+ }
+
+ registerCamera(std::move(camera));
+ }
+
+ resetCreated_ = true;
+
+ return true;
+}
+
+bool PipelineHandlerVirtual::initFrameGenerator(Camera *camera)
+{
+ auto data = cameraData(camera);
+ auto &frame = data->config_.frame;
+ std::visit(overloaded{
+ [&](TestPattern &testPattern) {
+ for (auto &streamConfig : data->streamConfigs_) {
+ if (testPattern == TestPattern::DiagonalLines)
+ streamConfig.frameGenerator = std::make_unique<DiagonalLinesGenerator>();
+ else
+ streamConfig.frameGenerator = std::make_unique<ColorBarsGenerator>();
+ }
+ },
+ [&](ImageFrames &imageFrames) {
+ for (auto &streamConfig : data->streamConfigs_)
+ streamConfig.frameGenerator = ImageFrameGenerator::create(imageFrames);
+ } },
+ frame);
+
+ for (auto &streamConfig : data->streamConfigs_)
+ if (!streamConfig.frameGenerator)
+ return false;
+
+ return true;
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVirtual, "virtual")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/virtual/virtual.h b/src/libcamera/pipeline/virtual/virtual.h
new file mode 100644
index 00000000..92ad7d4a
--- /dev/null
+++ b/src/libcamera/pipeline/virtual/virtual.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * Pipeline handler for virtual cameras
+ */
+
+#pragma once
+
+#include <string>
+#include <variant>
+#include <vector>
+
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/pipeline_handler.h"
+
+#include "frame_generator.h"
+#include "image_frame_generator.h"
+#include "test_pattern_generator.h"
+
+namespace libcamera {
+
+using VirtualFrame = std::variant<TestPattern, ImageFrames>;
+
+class VirtualCameraData : public Camera::Private
+{
+public:
+ const static unsigned int kMaxStream = 3;
+
+ struct Resolution {
+ Size size;
+ std::vector<int64_t> frameRates;
+ };
+ struct StreamConfig {
+ Stream stream;
+ std::unique_ptr<FrameGenerator> frameGenerator;
+ };
+ /* The config file is parsed to the Configuration struct */
+ struct Configuration {
+ std::string id;
+ std::vector<Resolution> resolutions;
+ VirtualFrame frame;
+
+ Size maxResolutionSize;
+ Size minResolutionSize;
+ };
+
+ VirtualCameraData(PipelineHandler *pipe,
+ const std::vector<Resolution> &supportedResolutions);
+
+ ~VirtualCameraData() = default;
+
+ Configuration config_;
+
+ std::vector<StreamConfig> streamConfigs_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/vivid/meson.build b/src/libcamera/pipeline/vivid/meson.build
new file mode 100644
index 00000000..513de9af
--- /dev/null
+++ b/src/libcamera/pipeline/vivid/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'vivid.cpp',
+])
diff --git a/src/libcamera/pipeline/vivid/vivid.cpp b/src/libcamera/pipeline/vivid/vivid.cpp
new file mode 100644
index 00000000..0340a500
--- /dev/null
+++ b/src/libcamera/pipeline/vivid/vivid.cpp
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * vivid.cpp - Pipeline handler for the vivid capture device
+ */
+
+#include <math.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/formats.h>
+#include <libcamera/property_ids.h>
+
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+
+/*
+ * Explicitly disable the unused-parameter warning in this pipeline handler.
+ *
+ * Parameters are left unused while they are introduced incrementally, so for
+ * documentation purposes only we disable this warning so that we can compile
+ * each commit independently without breaking the flow of the development
+ * additions.
+ *
+ * This is not recommended practice within libcamera, please listen to your
+ * compiler warnings.
+ */
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+
+#define VIVID_CID_VIVID_BASE (0x00f00000 | 0xf000)
+#define VIVID_CID_VIVID_CLASS (0x00f00000 | 1)
+#define VIVID_CID_TEST_PATTERN (VIVID_CID_VIVID_BASE + 0)
+#define VIVID_CID_OSD_TEXT_MODE (VIVID_CID_VIVID_BASE + 1)
+#define VIVID_CID_HOR_MOVEMENT (VIVID_CID_VIVID_BASE + 2)
+#define VIVID_CID_VERT_MOVEMENT (VIVID_CID_VIVID_BASE + 3)
+#define VIVID_CID_SHOW_BORDER (VIVID_CID_VIVID_BASE + 4)
+#define VIVID_CID_SHOW_SQUARE (VIVID_CID_VIVID_BASE + 5)
+#define VIVID_CID_INSERT_SAV (VIVID_CID_VIVID_BASE + 6)
+#define VIVID_CID_INSERT_EAV (VIVID_CID_VIVID_BASE + 7)
+#define VIVID_CID_VBI_CAP_INTERLACED (VIVID_CID_VIVID_BASE + 8)
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(VIVID)
+
+class VividCameraData : public Camera::Private
+{
+public:
+ VividCameraData(PipelineHandler *pipe, MediaDevice *media)
+ : Camera::Private(pipe), media_(media), video_(nullptr)
+ {
+ }
+
+ ~VividCameraData()
+ {
+ delete video_;
+ }
+
+ int init();
+ void bufferReady(FrameBuffer *buffer);
+
+ MediaDevice *media_;
+ V4L2VideoDevice *video_;
+ Stream stream_;
+};
+
+class VividCameraConfiguration : public CameraConfiguration
+{
+public:
+ VividCameraConfiguration();
+
+ Status validate() override;
+};
+
+class PipelineHandlerVivid : public PipelineHandler
+{
+public:
+ PipelineHandlerVivid(CameraManager *manager);
+
+ std::unique_ptr<CameraConfiguration>
+ generateConfiguration(Camera *camera, Span<const StreamRole> roles) override;
+ int configure(Camera *camera, CameraConfiguration *config) override;
+
+ int exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+ int start(Camera *camera, const ControlList *controls) override;
+ void stopDevice(Camera *camera) override;
+
+ int queueRequestDevice(Camera *camera, Request *request) override;
+
+ bool match(DeviceEnumerator *enumerator) override;
+
+private:
+ int processControls(VividCameraData *data, Request *request);
+
+ VividCameraData *cameraData(Camera *camera)
+ {
+ return static_cast<VividCameraData *>(camera->_d());
+ }
+};
+
+VividCameraConfiguration::VividCameraConfiguration()
+ : CameraConfiguration()
+{
+}
+
+CameraConfiguration::Status VividCameraConfiguration::validate()
+{
+ Status status = Valid;
+
+ if (config_.empty())
+ return Invalid;
+
+ /* Cap the number of entries to the available streams. */
+ if (config_.size() > 1) {
+ config_.resize(1);
+ status = Adjusted;
+ }
+
+ StreamConfiguration &cfg = config_[0];
+
+ /* Adjust the pixel format. */
+ const std::vector<libcamera::PixelFormat> formats = cfg.formats().pixelformats();
+ if (std::find(formats.begin(), formats.end(), cfg.pixelFormat) == formats.end()) {
+ cfg.pixelFormat = cfg.formats().pixelformats()[0];
+ LOG(VIVID, Debug) << "Adjusting format to " << cfg.pixelFormat.toString();
+ status = Adjusted;
+ }
+
+ cfg.bufferCount = 4;
+
+ return status;
+}
+
+PipelineHandlerVivid::PipelineHandlerVivid(CameraManager *manager)
+ : PipelineHandler(manager)
+{
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerVivid::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
+{
+ std::unique_ptr<CameraConfiguration> config =
+ std::make_unique<VividCameraConfiguration>();
+ VividCameraData *data = cameraData(camera);
+
+ if (roles.empty())
+ return config;
+
+ std::map<V4L2PixelFormat, std::vector<SizeRange>> v4l2Formats =
+ data->video_->formats();
+ std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
+ std::transform(v4l2Formats.begin(), v4l2Formats.end(),
+ std::inserter(deviceFormats, deviceFormats.begin()),
+ [&](const decltype(v4l2Formats)::value_type &format) {
+ return decltype(deviceFormats)::value_type{
+ format.first.toPixelFormat(),
+ format.second
+ };
+ });
+
+ StreamFormats formats(deviceFormats);
+ StreamConfiguration cfg(formats);
+
+ cfg.pixelFormat = formats::BGR888;
+ cfg.size = { 1280, 720 };
+ cfg.bufferCount = 4;
+
+ config->addConfiguration(cfg);
+
+ config->validate();
+
+ return config;
+}
+
+int PipelineHandlerVivid::configure(Camera *camera, CameraConfiguration *config)
+{
+ VividCameraData *data = cameraData(camera);
+ StreamConfiguration &cfg = config->at(0);
+ int ret;
+
+ V4L2DeviceFormat format = {};
+ format.fourcc = data->video_->toV4L2PixelFormat(cfg.pixelFormat);
+ format.size = cfg.size;
+
+ ret = data->video_->setFormat(&format);
+ if (ret)
+ return ret;
+
+ if (format.size != cfg.size ||
+ format.fourcc != data->video_->toV4L2PixelFormat(cfg.pixelFormat)) {
+ LOG(VIVID, Error)
+ << "Requested " << cfg.toString() << ", got "
+ << format.size.toString() << "-"
+ << format.fourcc.toString();
+ return -EINVAL;
+ }
+
+ /* Set initial controls specific to VIVID */
+ ControlList controls(data->video_->controls());
+ controls.set(VIVID_CID_TEST_PATTERN, 0); /* Vertical Colour Bars */
+ controls.set(VIVID_CID_OSD_TEXT_MODE, 0); /* Display all OSD */
+
+ /* Ensure clear colours configured. */
+ controls.set(V4L2_CID_BRIGHTNESS, 128);
+ controls.set(V4L2_CID_CONTRAST, 128);
+ controls.set(V4L2_CID_SATURATION, 128);
+
+ /* Enable movement to visualise buffer updates. */
+ controls.set(VIVID_CID_HOR_MOVEMENT, 5);
+
+ ret = data->video_->setControls(&controls);
+ if (ret) {
+ LOG(VIVID, Error) << "Failed to set controls: " << ret;
+ return ret < 0 ? ret : -EINVAL;
+ }
+
+ cfg.setStream(&data->stream_);
+ cfg.stride = format.planes[0].bpl;
+
+ return 0;
+}
+
+int PipelineHandlerVivid::exportFrameBuffers(Camera *camera, Stream *stream,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ VividCameraData *data = cameraData(camera);
+ unsigned int count = stream->configuration().bufferCount;
+
+ return data->video_->exportBuffers(count, buffers);
+}
+
+int PipelineHandlerVivid::start(Camera *camera, const ControlList *controls)
+{
+ VividCameraData *data = cameraData(camera);
+ unsigned int count = data->stream_.configuration().bufferCount;
+
+ int ret = data->video_->importBuffers(count);
+ if (ret < 0)
+ return ret;
+
+ ret = data->video_->streamOn();
+ if (ret < 0) {
+ data->video_->releaseBuffers();
+ return ret;
+ }
+
+ return 0;
+}
+
+void PipelineHandlerVivid::stopDevice(Camera *camera)
+{
+ VividCameraData *data = cameraData(camera);
+ data->video_->streamOff();
+ data->video_->releaseBuffers();
+}
+
+int PipelineHandlerVivid::processControls(VividCameraData *data, Request *request)
+{
+ ControlList controls(data->video_->controls());
+
+ for (auto it : request->controls()) {
+ unsigned int id = it.first;
+ unsigned int offset;
+ uint32_t cid;
+
+ if (id == controls::Brightness) {
+ cid = V4L2_CID_BRIGHTNESS;
+ offset = 128;
+ } else if (id == controls::Contrast) {
+ cid = V4L2_CID_CONTRAST;
+ offset = 0;
+ } else if (id == controls::Saturation) {
+ cid = V4L2_CID_SATURATION;
+ offset = 0;
+ } else {
+ continue;
+ }
+
+ int32_t value = lroundf(it.second.get<float>() * 128 + offset);
+ controls.set(cid, std::clamp(value, 0, 255));
+ }
+
+ for (const auto &ctrl : controls)
+ LOG(VIVID, Debug)
+ << "Setting control " << utils::hex(ctrl.first)
+ << " to " << ctrl.second.toString();
+
+ int ret = data->video_->setControls(&controls);
+ if (ret) {
+ LOG(VIVID, Error) << "Failed to set controls: " << ret;
+ return ret < 0 ? ret : -EINVAL;
+ }
+
+ return ret;
+}
+
+int PipelineHandlerVivid::queueRequestDevice(Camera *camera, Request *request)
+{
+ VividCameraData *data = cameraData(camera);
+ FrameBuffer *buffer = request->findBuffer(&data->stream_);
+ if (!buffer) {
+ LOG(VIVID, Error)
+ << "Attempt to queue request with invalid stream";
+
+ return -ENOENT;
+ }
+
+ int ret = processControls(data, request);
+ if (ret < 0)
+ return ret;
+
+ ret = data->video_->queueBuffer(buffer);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+bool PipelineHandlerVivid::match(DeviceEnumerator *enumerator)
+{
+ DeviceMatch dm("vivid");
+ dm.add("vivid-000-vid-cap");
+
+ MediaDevice *media = acquireMediaDevice(enumerator, dm);
+ if (!media)
+ return false;
+
+ std::unique_ptr<VividCameraData> data = std::make_unique<VividCameraData>(this, media);
+
+ /* Locate and open the capture video node. */
+ if (data->init())
+ return false;
+
+ /* Create and register the camera. */
+ std::set<Stream *> streams{ &data->stream_ };
+ const std::string id = data->video_->deviceName();
+ std::shared_ptr<Camera> camera = Camera::create(std::move(data), id, streams);
+ registerCamera(std::move(camera));
+
+ return true;
+}
+
+int VividCameraData::init()
+{
+ video_ = new V4L2VideoDevice(media_->getEntityByName("vivid-000-vid-cap"));
+ if (video_->open())
+ return -ENODEV;
+
+ video_->bufferReady.connect(this, &VividCameraData::bufferReady);
+
+ /* Initialise the supported controls and properties. */
+ const ControlInfoMap &controls = video_->controls();
+ ControlInfoMap::Map ctrls;
+
+ for (const auto &ctrl : controls) {
+ const ControlId *id;
+ ControlInfo info;
+
+ switch (ctrl.first->id()) {
+ case V4L2_CID_BRIGHTNESS:
+ id = &controls::Brightness;
+ info = ControlInfo{ { -1.0f }, { 1.0f }, { 0.0f } };
+ break;
+ case V4L2_CID_CONTRAST:
+ id = &controls::Contrast;
+ info = ControlInfo{ { 0.0f }, { 2.0f }, { 1.0f } };
+ break;
+ case V4L2_CID_SATURATION:
+ id = &controls::Saturation;
+ info = ControlInfo{ { 0.0f }, { 2.0f }, { 1.0f } };
+ break;
+ default:
+ continue;
+ }
+
+ ctrls.emplace(id, info);
+ }
+
+ controlInfo_ = ControlInfoMap(std::move(ctrls), controls::controls);
+
+ properties_.set(properties::Location, properties::CameraLocationExternal);
+ properties_.set(properties::Model, "Virtual Video Device");
+
+ return 0;
+}
+
+void VividCameraData::bufferReady(FrameBuffer *buffer)
+{
+ Request *request = buffer->request();
+
+ /* Record the sensor's timestamp in the request metadata. */
+ request->metadata().set(controls::SensorTimestamp,
+ buffer->metadata().timestamp);
+
+ pipe()->completeBuffer(request, buffer);
+ pipe()->completeRequest(request);
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVivid, "vivid")
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline_handler.cpp b/src/libcamera/pipeline_handler.cpp
index 254d341f..caa5c20e 100644
--- a/src/libcamera/pipeline_handler.cpp
+++ b/src/libcamera/pipeline_handler.cpp
@@ -2,21 +2,29 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * pipeline_handler.cpp - Pipeline handler infrastructure
+ * Pipeline handler infrastructure
*/
-#include "pipeline_handler.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include <chrono>
+#include <sys/stat.h>
#include <sys/sysmacros.h>
-#include <libcamera/buffer.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/mutex.h>
+#include <libcamera/base/utils.h>
+
#include <libcamera/camera.h>
-#include <libcamera/camera_manager.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/property_ids.h>
-#include "device_enumerator.h"
-#include "log.h"
-#include "media_device.h"
-#include "utils.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_manager.h"
+#include "libcamera/internal/device_enumerator.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/request.h"
+#include "libcamera/internal/tracepoints.h"
/**
* \file pipeline_handler.h
@@ -32,89 +40,13 @@
* the REGISTER_PIPELINE_HANDLER() macro.
*/
+using namespace std::chrono_literals;
+
namespace libcamera {
LOG_DEFINE_CATEGORY(Pipeline)
/**
- * \class CameraData
- * \brief Base class for platform-specific data associated with a camera
- *
- * The CameraData base abstract class represents platform specific-data
- * a pipeline handler might want to associate with a Camera to access them
- * at a later time.
- *
- * Pipeline handlers are expected to extend this base class with platform
- * specific implementation, associate instances of the derived classes
- * using the setCameraData() method, and access them at a later time
- * with cameraData().
- */
-
-/**
- * \fn CameraData::CameraData(PipelineHandler *pipe)
- * \brief Construct a CameraData instance for the given pipeline handler
- * \param[in] pipe The pipeline handler
- *
- * The reference to the pipeline handler is stored internally, the caller shall
- * guarantee that the pointer remains valid as long as the CameraData instance
- * exists.
- */
-
-/**
- * \var CameraData::camera_
- * \brief The camera related to this CameraData instance
- *
- * The camera_ pointer provides access to the Camera object that this instance
- * is related to. It is set when the Camera is registered with
- * PipelineHandler::registerCamera() and remains valid until the CameraData
- * instance is destroyed.
- */
-
-/**
- * \var CameraData::pipe_
- * \brief The pipeline handler related to this CameraData instance
- *
- * The pipe_ pointer provides access to the PipelineHandler object that this
- * instance is related to. It is set when the CameraData instance is created
- * and remains valid until the instance is destroyed.
- */
-
-/**
- * \var CameraData::queuedRequests_
- * \brief The list of queued and not yet completed request
- *
- * The list of queued request is used to track requests queued in order to
- * ensure completion of all requests when the pipeline handler is stopped.
- *
- * \sa PipelineHandler::queueRequest(), PipelineHandler::stop(),
- * PipelineHandler::completeRequest()
- */
-
-/**
- * \var CameraData::controlInfo_
- * \brief The set of controls supported by the camera
- *
- * The control information shall be initialised by the pipeline handler when
- * creating the camera, and shall not be modified afterwards.
- */
-
-/**
- * \var CameraData::properties_
- * \brief The list of properties supported by the camera
- *
- * The list of camera properties shall be initialised by the pipeline handler
- * when creating the camera, and shall not be modified afterwards.
- */
-
-/**
- * \var CameraData::ipa_
- * \brief The IPA module used by the camera
- *
- * Reference to the Image Processing Algorithms (IPA) operating on the camera's
- * stream(s). If no IPA exists for the camera, this field is set to nullptr.
- */
-
-/**
* \class PipelineHandler
* \brief Create and manage cameras based on a set of media devices
*
@@ -133,17 +65,16 @@ LOG_DEFINE_CATEGORY(Pipeline)
*
* In order to honour the std::enable_shared_from_this<> contract,
* PipelineHandler instances shall never be constructed manually, but always
- * through the PipelineHandlerFactory::create() method implemented by the
- * respective factories.
+ * through the PipelineHandlerFactoryBase::create() function.
*/
PipelineHandler::PipelineHandler(CameraManager *manager)
- : manager_(manager)
+ : manager_(manager), useCount_(0)
{
}
PipelineHandler::~PipelineHandler()
{
- for (std::shared_ptr<MediaDevice> media : mediaDevices_)
+ for (std::shared_ptr<MediaDevice> &media : mediaDevices_)
media->release();
}
@@ -181,7 +112,7 @@ PipelineHandler::~PipelineHandler()
*/
/**
- * \brief Search and acquire a MediDevice matching a device pattern
+ * \brief Search and acquire a MediaDevice matching a device pattern
* \param[in] enumerator Enumerator containing all media devices in the system
* \param[in] dm Device match pattern
*
@@ -212,66 +143,122 @@ MediaDevice *PipelineHandler::acquireMediaDevice(DeviceEnumerator *enumerator,
}
/**
- * \brief Lock all media devices acquired by the pipeline
+ * \brief Acquire exclusive access to the pipeline handler for the process
*
- * This method shall not be called from pipeline handler implementation, as the
- * Camera class handles locking directly.
+ * This function locks all the media devices used by the pipeline to ensure
+ * that no other process can access them concurrently.
*
- * \context This function is \threadsafe.
+ * Access to a pipeline handler may be acquired recursively from within the
+ * same process. Every successful acquire() call shall be matched with a
+ * release() call. This allows concurrent access to the same pipeline handler
+ * from different cameras within the same process.
+ *
+ * Pipeline handlers shall not call this function directly as the Camera class
+ * handles access internally.
+ *
+ * \context This function is called from the CameraManager thread.
*
- * \return True if the devices could be locked, false otherwise
- * \sa unlock()
- * \sa MediaDevice::lock()
+ * \return True if the pipeline handler was acquired, false if another process
+ * has already acquired it
+ * \sa release()
*/
-bool PipelineHandler::lock()
+bool PipelineHandler::acquire(Camera *camera)
{
- for (std::shared_ptr<MediaDevice> &media : mediaDevices_) {
- if (!media->lock()) {
- unlock();
- return false;
+ if (useCount_ == 0) {
+ for (std::shared_ptr<MediaDevice> &media : mediaDevices_) {
+ if (!media->lock()) {
+ unlockMediaDevices();
+ return false;
+ }
}
}
+ if (!acquireDevice(camera)) {
+ if (useCount_ == 0)
+ unlockMediaDevices();
+
+ return false;
+ }
+
+ ++useCount_;
return true;
}
/**
- * \brief Unlock all media devices acquired by the pipeline
+ * \brief Release exclusive access to the pipeline handler
+ * \param[in] camera The camera for which to release data
*
- * This method shall not be called from pipeline handler implementation, as the
- * Camera class handles locking directly.
+ * This function releases access to the pipeline handler previously acquired by
+ * a call to acquire(). Every release() call shall match a previous successful
+ * acquire() call. Calling this function on a pipeline handler that hasn't been
+ * acquired results in undefined behaviour.
*
- * \context This function is \threadsafe.
+ * Pipeline handlers shall not call this function directly as the Camera class
+ * handles access internally.
+ *
+ * \context This function is called from the CameraManager thread.
*
- * \sa lock()
+ * \sa acquire()
*/
-void PipelineHandler::unlock()
+void PipelineHandler::release(Camera *camera)
{
- for (std::shared_ptr<MediaDevice> &media : mediaDevices_)
- media->unlock();
+ ASSERT(useCount_);
+
+ releaseDevice(camera);
+
+ if (useCount_ == 1)
+ unlockMediaDevices();
+
+ --useCount_;
}
/**
- * \brief Retrieve the list of controls for a camera
- * \param[in] camera The camera
- * \context This function is \threadsafe.
- * \return A ControlInfoMap listing the controls support by \a camera
+ * \brief Acquire resources associated with this camera
+ * \param[in] camera The camera for which to acquire resources
+ *
+ * Pipeline handlers may override this in order to get resources such as opening
+ * devices and allocating buffers when a camera is acquired.
+ *
+ * This is used by the uvcvideo pipeline handler to delay opening /dev/video#
+ * until the camera is acquired to avoid excess power consumption. The delayed
+ * opening of /dev/video# is a special case because the kernel uvcvideo driver
+ * powers on the USB device as soon as /dev/video# is opened. This behavior
+ * should *not* be copied by other pipeline handlers.
+ *
+ * \context This function is called from the CameraManager thread.
+ *
+ * \return True on success, false on failure
+ * \sa releaseDevice()
*/
-const ControlInfoMap &PipelineHandler::controls(Camera *camera)
+bool PipelineHandler::acquireDevice([[maybe_unused]] Camera *camera)
{
- CameraData *data = cameraData(camera);
- return data->controlInfo_;
+ return true;
}
/**
- * \brief Retrieve the list of properties for a camera
- * \param[in] camera The camera
- * \return A ControlList of properties supported by \a camera
+ * \brief Release resources associated with this camera
+ * \param[in] camera The camera for which to release resources
+ *
+ * Pipeline handlers may override this in order to perform cleanup operations
+ * when a camera is released, such as freeing memory.
+ *
+ * This is called once for every camera that is released. If there are resources
+ * shared by multiple cameras then the pipeline handler must take care to not
+ * release them until releaseDevice() has been called for all previously
+ * acquired cameras.
+ *
+ * \context This function is called from the CameraManager thread.
+ *
+ * \sa acquireDevice()
*/
-const ControlList &PipelineHandler::properties(Camera *camera)
+void PipelineHandler::releaseDevice([[maybe_unused]] Camera *camera)
+{
+}
+
+void PipelineHandler::unlockMediaDevices()
{
- CameraData *data = cameraData(camera);
- return data->properties_;
+ for (std::shared_ptr<MediaDevice> &media : mediaDevices_)
+ media->unlock();
}
/**
@@ -294,8 +281,7 @@ const ControlList &PipelineHandler::properties(Camera *camera)
* handler.
*
* \return A valid CameraConfiguration if the requested roles can be satisfied,
- * or a null pointer otherwise. The ownership of the returned configuration is
- * passed to the caller.
+ * or a null pointer otherwise.
*/
/**
@@ -310,13 +296,13 @@ const ControlList &PipelineHandler::properties(Camera *camera)
* application.
*
* The configuration is guaranteed to have been validated with
- * CameraConfiguration::valid(). The pipeline handler implementation shall not
- * perform further validation and may rely on any custom field stored in its
+ * CameraConfiguration::validate(). The pipeline handler implementation shall
+ * not perform further validation and may rely on any custom field stored in its
* custom CameraConfiguration derived class.
*
* When configuring the camera the pipeline handler shall associate a Stream
* instance to each StreamConfiguration entry in the CameraConfiguration using
- * the StreamConfiguration::setStream() method.
+ * the StreamConfiguration::setStream() function.
*
* \context This function is called from the CameraManager thread.
*
@@ -330,13 +316,13 @@ const ControlList &PipelineHandler::properties(Camera *camera)
* \param[in] stream The stream to allocate buffers for
* \param[out] buffers Array of buffers successfully allocated
*
- * This method allocates buffers for the \a stream from the devices associated
+ * This function allocates buffers for the \a stream from the devices associated
* with the stream in the corresponding pipeline handler. Those buffers shall be
* suitable to be added to a Request for the stream, and shall be mappable to
* the CPU through their associated dmabufs with mmap().
*
- * The method may only be called after the Camera has been configured and before
- * it gets started, or after it gets stopped. It shall be called only for
+ * The function may only be called after the Camera has been configured and
+ * before it gets started, or after it gets stopped. It shall be called only for
* streams that are part of the active camera configuration.
*
* The only intended caller is Camera::exportFrameBuffers().
@@ -351,10 +337,11 @@ const ControlList &PipelineHandler::properties(Camera *camera)
* \fn PipelineHandler::start()
* \brief Start capturing from a group of streams
* \param[in] camera The camera to start
+ * \param[in] controls Controls to be applied before starting the Camera
*
* Start the group of streams that have been configured for capture by
- * \a configure(). The intended caller of this method is the Camera class which
- * will in turn be called from the application to indicate that it has
+ * \a configure(). The intended caller of this function is the Camera class
+ * which will in turn be called from the application to indicate that it has
* configured the streams and is ready to capture.
*
* \context This function is called from the CameraManager thread.
@@ -363,44 +350,144 @@ const ControlList &PipelineHandler::properties(Camera *camera)
*/
/**
- * \fn PipelineHandler::stop()
- * \brief Stop capturing from all running streams
+ * \brief Stop capturing from all running streams and cancel pending requests
* \param[in] camera The camera to stop
*
- * This method stops capturing and processing requests immediately. All pending
- * requests are cancelled and complete immediately in an error state.
+ * This function stops capturing and processing requests immediately. All
+ * pending requests are cancelled and complete immediately in an error state.
*
* \context This function is called from the CameraManager thread.
*/
+void PipelineHandler::stop(Camera *camera)
+{
+ /* Stop the pipeline handler and let the queued requests complete. */
+ stopDevice(camera);
+
+ /* Cancel and signal as complete all waiting requests. */
+ while (!waitingRequests_.empty()) {
+ Request *request = waitingRequests_.front();
+ waitingRequests_.pop();
+ cancelRequest(request);
+ }
+
+ /* Make sure no requests are pending. */
+ Camera::Private *data = camera->_d();
+ ASSERT(data->queuedRequests_.empty());
+
+ data->requestSequence_ = 0;
+}
+
+/**
+ * \fn PipelineHandler::stopDevice()
+ * \brief Stop capturing from all running streams
+ * \param[in] camera The camera to stop
+ *
+ * This function stops capturing and processing requests immediately. All
+ * pending requests are cancelled and complete immediately in an error state.
+ */
+
+/**
+ * \brief Determine if the camera has any requests pending
+ * \param[in] camera The camera to check
+ *
+ * This function determines if there are any requests queued to the pipeline
+ * awaiting processing.
+ *
+ * \return True if there are pending requests, or false otherwise
+ */
+bool PipelineHandler::hasPendingRequests(const Camera *camera) const
+{
+ return !camera->_d()->queuedRequests_.empty();
+}
+
+/**
+ * \fn PipelineHandler::registerRequest()
+ * \brief Register a request for use by the pipeline handler
+ * \param[in] request The request to register
+ *
+ * This function is called when the request is created, and allows the pipeline
+ * handler to perform any one-time initialization it requries for the request.
+ */
+void PipelineHandler::registerRequest(Request *request)
+{
+ /*
+ * Connect the request prepared signal to notify the pipeline handler
+ * when a request is ready to be processed.
+ */
+ request->_d()->prepared.connect(this, &PipelineHandler::doQueueRequests);
+}
/**
* \fn PipelineHandler::queueRequest()
- * \brief Queue a request to the camera
- * \param[in] camera The camera to queue the request to
+ * \brief Queue a request
* \param[in] request The request to queue
*
- * This method queues a capture request to the pipeline handler for processing.
- * The request is first added to the internal list of queued requests, and
- * then passed to the pipeline handler with a call to queueRequestDevice().
+ * This function queues a capture request to the pipeline handler for
+ * processing. The request is first added to the internal list of waiting
+ * requests which have to be prepared to make sure they are ready for being
+ * queued to the pipeline handler.
+ *
+ * The queue of waiting requests is iterated and all prepared requests are
+ * passed to the pipeline handler in the same order they have been queued by
+ * calling this function.
+ *
+ * If a Request fails during the preparation phase or if the pipeline handler
+ * fails in queuing the request to the hardware the request is cancelled.
*
* Keeping track of queued requests ensures automatic completion of all requests
* when the pipeline handler is stopped with stop(). Request completion shall be
- * signalled by the pipeline handler using the completeRequest() method.
+ * signalled by the pipeline handler using the completeRequest() function.
*
* \context This function is called from the CameraManager thread.
- *
- * \return 0 on success or a negative error code otherwise
*/
-int PipelineHandler::queueRequest(Camera *camera, Request *request)
+void PipelineHandler::queueRequest(Request *request)
{
- CameraData *data = cameraData(camera);
+ LIBCAMERA_TRACEPOINT(request_queue, request);
+
+ waitingRequests_.push(request);
+
+ request->_d()->prepare(300ms);
+}
+
+/**
+ * \brief Queue one requests to the device
+ */
+void PipelineHandler::doQueueRequest(Request *request)
+{
+ LIBCAMERA_TRACEPOINT(request_device_queue, request);
+
+ Camera *camera = request->_d()->camera();
+ Camera::Private *data = camera->_d();
data->queuedRequests_.push_back(request);
+ request->_d()->sequence_ = data->requestSequence_++;
+
+ if (request->_d()->cancelled_) {
+ completeRequest(request);
+ return;
+ }
+
int ret = queueRequestDevice(camera, request);
if (ret)
- data->queuedRequests_.remove(request);
+ cancelRequest(request);
+}
+
+/**
+ * \brief Queue prepared requests to the device
+ *
+ * Iterate the list of waiting requests and queue them to the device one
+ * by one if they have been prepared.
+ */
+void PipelineHandler::doQueueRequests()
+{
+ while (!waitingRequests_.empty()) {
+ Request *request = waitingRequests_.front();
+ if (!request->_d()->prepared_)
+ break;
- return ret;
+ doQueueRequest(request);
+ waitingRequests_.pop();
+ }
}
/**
@@ -409,7 +496,7 @@ int PipelineHandler::queueRequest(Camera *camera, Request *request)
* \param[in] camera The camera to queue the request to
* \param[in] request The request to queue
*
- * This method queues a capture request to the device for processing. The
+ * This function queues a capture request to the device for processing. The
* request contains a set of buffers associated with streams and a set of
* parameters. The pipeline handler shall program the device to ensure that the
* parameters will be applied to the frames captured in the buffers provided in
@@ -422,12 +509,11 @@ int PipelineHandler::queueRequest(Camera *camera, Request *request)
/**
* \brief Complete a buffer for a request
- * \param[in] camera The camera the request belongs to
* \param[in] request The request the buffer belongs to
* \param[in] buffer The buffer that has completed
*
- * This method shall be called by pipeline handlers to signal completion of the
- * \a buffer part of the \a request. It notifies applications of buffer
+ * This function shall be called by pipeline handlers to signal completion of
+ * the \a buffer part of the \a request. It notifies applications of buffer
* completion and updates the request's internal buffer tracking. The request
* is not completed automatically when the last buffer completes to give
* pipeline handlers a chance to perform any operation that may still be
@@ -438,33 +524,34 @@ int PipelineHandler::queueRequest(Camera *camera, Request *request)
* \return True if all buffers contained in the request have completed, false
* otherwise
*/
-bool PipelineHandler::completeBuffer(Camera *camera, Request *request,
- FrameBuffer *buffer)
+bool PipelineHandler::completeBuffer(Request *request, FrameBuffer *buffer)
{
+ Camera *camera = request->_d()->camera();
camera->bufferCompleted.emit(request, buffer);
- return request->completeBuffer(buffer);
+ return request->_d()->completeBuffer(buffer);
}
/**
* \brief Signal request completion
- * \param[in] camera The camera that the request belongs to
* \param[in] request The request that has completed
*
- * The pipeline handler shall call this method to notify the \a camera that the
- * request has completed. The request is deleted and shall not be accessed once
- * this method returns.
+ * The pipeline handler shall call this function to notify the \a camera that
+ * the request has completed. The request is no longer managed by the pipeline
+ * handler and shall not be accessed once this function returns.
*
- * This method ensures that requests will be returned to the application in
+ * This function ensures that requests will be returned to the application in
* submission order, the pipeline handler may call it on any complete request
* without any ordering constraint.
*
* \context This function shall be called from the CameraManager thread.
*/
-void PipelineHandler::completeRequest(Camera *camera, Request *request)
+void PipelineHandler::completeRequest(Request *request)
{
- request->complete();
+ Camera *camera = request->_d()->camera();
+
+ request->_d()->complete();
- CameraData *data = cameraData(camera);
+ Camera::Private *data = camera->_d();
while (!data->queuedRequests_.empty()) {
Request *req = data->queuedRequests_.front();
@@ -478,31 +565,120 @@ void PipelineHandler::completeRequest(Camera *camera, Request *request)
}
/**
+ * \brief Cancel request and signal its completion
+ * \param[in] request The request to cancel
+ *
+ * This function cancels and completes the request. The same rules as for
+ * completeRequest() apply.
+ */
+void PipelineHandler::cancelRequest(Request *request)
+{
+ request->_d()->cancel();
+ completeRequest(request);
+}
+
+/**
+ * \brief Retrieve the absolute path to a platform configuration file
+ * \param[in] subdir The pipeline handler specific subdirectory name
+ * \param[in] name The configuration file name
+ *
+ * This function locates a named platform configuration file and returns
+ * its absolute path to the pipeline handler. It searches the following
+ * directories, in order:
+ *
+ * - If libcamera is not installed, the src/libcamera/pipeline/\<subdir\>/data/
+ * directory within the source tree ; otherwise
+ * - The system data (share/libcamera/pipeline/\<subdir\>) directory.
+ *
+ * The system directories are not searched if libcamera is not installed.
+ *
+ * \return The full path to the pipeline handler configuration file, or an empty
+ * string if no configuration file can be found
+ */
+std::string PipelineHandler::configurationFile(const std::string &subdir,
+ const std::string &name) const
+{
+ std::string confPath;
+ struct stat statbuf;
+ int ret;
+
+ std::string root = utils::libcameraSourcePath();
+ if (!root.empty()) {
+ /*
+ * When libcamera is used before it is installed, load
+ * configuration files from the source directory. The
+ * configuration files are then located in the 'data'
+ * subdirectory of the corresponding pipeline handler.
+ */
+ std::string confDir = root + "src/libcamera/pipeline/";
+ confPath = confDir + subdir + "/data/" + name;
+
+ LOG(Pipeline, Info)
+ << "libcamera is not installed. Loading platform configuration file from '"
+ << confPath << "'";
+ } else {
+ /* Else look in the system locations. */
+ confPath = std::string(LIBCAMERA_DATA_DIR)
+ + "/pipeline/" + subdir + '/' + name;
+ }
+
+ ret = stat(confPath.c_str(), &statbuf);
+ if (ret == 0 && (statbuf.st_mode & S_IFMT) == S_IFREG)
+ return confPath;
+
+ LOG(Pipeline, Error)
+ << "Configuration file '" << confPath
+ << "' not found for pipeline handler '" << PipelineHandler::name() << "'";
+
+ return std::string();
+}
+
+/**
* \brief Register a camera to the camera manager and pipeline handler
* \param[in] camera The camera to be added
- * \param[in] data Pipeline-specific data for the camera
- * \param[in] devnum Device number of the camera (optional)
*
- * This method is called by pipeline handlers to register the cameras they
- * handle with the camera manager. It associates the pipeline-specific \a data
- * with the camera, for later retrieval with cameraData(). Ownership of \a data
- * is transferred to the PipelineHandler.
- *
- * \a devnum is the device number (as returned by makedev) that the \a camera
- * is to be associated with. This is for the V4L2 compatibility layer to map
- * device nodes to Camera instances based on the device number
- * registered by this method in \a devnum.
+ * This function is called by pipeline handlers to register the cameras they
+ * handle with the camera manager.
*
* \context This function shall be called from the CameraManager thread.
*/
-void PipelineHandler::registerCamera(std::shared_ptr<Camera> camera,
- std::unique_ptr<CameraData> data,
- dev_t devnum)
+void PipelineHandler::registerCamera(std::shared_ptr<Camera> camera)
{
- data->camera_ = camera.get();
- cameraData_[camera.get()] = std::move(data);
cameras_.push_back(camera);
- manager_->addCamera(std::move(camera), devnum);
+
+ if (mediaDevices_.empty()) {
+ /*
+ * For virtual devices with no MediaDevice, there are no system
+ * devices to register.
+ */
+ manager_->_d()->addCamera(std::move(camera));
+ return;
+ }
+
+ /*
+ * Walk the entity list and map the devnums of all capture video nodes
+ * to the camera.
+ */
+ std::vector<int64_t> devnums;
+ for (const std::shared_ptr<MediaDevice> &media : mediaDevices_) {
+ for (const MediaEntity *entity : media->entities()) {
+ if (entity->pads().size() == 1 &&
+ (entity->pads()[0]->flags() & MEDIA_PAD_FL_SINK) &&
+ entity->function() == MEDIA_ENT_F_IO_V4L) {
+ devnums.push_back(makedev(entity->deviceMajor(),
+ entity->deviceMinor()));
+ }
+ }
+ }
+
+ /*
+ * Store the associated devices as a property of the camera to allow
+ * systems to identify which devices are managed by libcamera.
+ */
+ Camera::Private *data = camera->_d();
+ data->properties_.set(properties::SystemDevices, devnums);
+
+ manager_->_d()->addCamera(std::move(camera));
}
/**
@@ -519,7 +695,7 @@ void PipelineHandler::registerCamera(std::shared_ptr<Camera> camera,
*/
void PipelineHandler::hotplugMediaDevice(MediaDevice *media)
{
- media->disconnected.connect(this, &PipelineHandler::mediaDeviceDisconnected);
+ media->disconnected.connect(this, [this, media] { mediaDeviceDisconnected(media); });
}
/**
@@ -549,30 +725,28 @@ void PipelineHandler::mediaDeviceDisconnected(MediaDevice *media)
*/
void PipelineHandler::disconnect()
{
- for (std::weak_ptr<Camera> ptr : cameras_) {
+ /*
+ * Each camera holds a reference to its associated pipeline handler
+ * instance. Hence, when the last camera is dropped, the pipeline
+ * handler will get destroyed by the last manager_->removeCamera(camera)
+ * call in the loop below.
+ *
+ * This is acceptable as long as we make sure that the code path does not
+ * access any member of the (already destroyed) pipeline handler instance
+ * afterwards. Therefore, we move the cameras_ vector to a local temporary
+ * container to avoid accessing freed memory later i.e. to explicitly run
+ * cameras_.clear().
+ */
+ std::vector<std::weak_ptr<Camera>> cameras{ std::move(cameras_) };
+
+ for (const std::weak_ptr<Camera> &ptr : cameras) {
std::shared_ptr<Camera> camera = ptr.lock();
if (!camera)
continue;
camera->disconnect();
- manager_->removeCamera(camera.get());
+ manager_->_d()->removeCamera(camera);
}
-
- cameras_.clear();
-}
-
-/**
- * \brief Retrieve the pipeline-specific data associated with a Camera
- * \param[in] camera The camera whose data to retrieve
- * \return A pointer to the pipeline-specific data passed to registerCamera().
- * The returned pointer is a borrowed reference and is guaranteed to remain
- * valid until the pipeline handler is destroyed. It shall not be deleted
- * manually by the caller.
- */
-CameraData *PipelineHandler::cameraData(const Camera *camera)
-{
- ASSERT(cameraData_.count(camera));
- return cameraData_[camera].get();
}
/**
@@ -592,27 +766,32 @@ CameraData *PipelineHandler::cameraData(const Camera *camera)
*/
/**
- * \class PipelineHandlerFactory
- * \brief Registration of PipelineHandler classes and creation of instances
+ * \fn PipelineHandler::cameraManager() const
+ * \brief Retrieve the CameraManager that this pipeline handler belongs to
+ * \context This function is \threadsafe.
+ * \return The CameraManager for this pipeline handler
+ */
+
+/**
+ * \class PipelineHandlerFactoryBase
+ * \brief Base class for pipeline handler factories
*
- * To facilitate discovery and instantiation of PipelineHandler classes, the
- * PipelineHandlerFactory class maintains a registry of pipeline handler
- * classes. Each PipelineHandler subclass shall register itself using the
- * REGISTER_PIPELINE_HANDLER() macro, which will create a corresponding
- * instance of a PipelineHandlerFactory subclass and register it with the
- * static list of factories.
+ * The PipelineHandlerFactoryBase class is the base of all specializations of
+ * the PipelineHandlerFactory class template. It implements the factory
+ * registration, maintains a registry of factories, and provides access to the
+ * registered factories.
*/
/**
- * \brief Construct a pipeline handler factory
+ * \brief Construct a pipeline handler factory base
* \param[in] name Name of the pipeline handler class
*
- * Creating an instance of the factory registers is with the global list of
+ * Creating an instance of the factory base registers it with the global list of
* factories, accessible through the factories() function.
*
* The factory \a name is used for debug purpose and shall be unique.
*/
-PipelineHandlerFactory::PipelineHandlerFactory(const char *name)
+PipelineHandlerFactoryBase::PipelineHandlerFactoryBase(const char *name)
: name_(name)
{
registerType(this);
@@ -625,15 +804,15 @@ PipelineHandlerFactory::PipelineHandlerFactory(const char *name)
* \return A shared pointer to a new instance of the PipelineHandler subclass
* corresponding to the factory
*/
-std::shared_ptr<PipelineHandler> PipelineHandlerFactory::create(CameraManager *manager)
+std::shared_ptr<PipelineHandler> PipelineHandlerFactoryBase::create(CameraManager *manager) const
{
- PipelineHandler *handler = createInstance(manager);
+ std::unique_ptr<PipelineHandler> handler = createInstance(manager);
handler->name_ = name_.c_str();
- return std::shared_ptr<PipelineHandler>(handler);
+ return std::shared_ptr<PipelineHandler>(std::move(handler));
}
/**
- * \fn PipelineHandlerFactory::name()
+ * \fn PipelineHandlerFactoryBase::name()
* \brief Retrieve the factory name
* \return The factory name
*/
@@ -645,47 +824,89 @@ std::shared_ptr<PipelineHandler> PipelineHandlerFactory::create(CameraManager *m
* The caller is responsible to guarantee the uniqueness of the pipeline handler
* name.
*/
-void PipelineHandlerFactory::registerType(PipelineHandlerFactory *factory)
+void PipelineHandlerFactoryBase::registerType(PipelineHandlerFactoryBase *factory)
{
- std::vector<PipelineHandlerFactory *> &factories = PipelineHandlerFactory::factories();
+ std::vector<PipelineHandlerFactoryBase *> &factories =
+ PipelineHandlerFactoryBase::factories();
factories.push_back(factory);
-
- LOG(Pipeline, Debug)
- << "Registered pipeline handler \"" << factory->name() << "\"";
}
/**
* \brief Retrieve the list of all pipeline handler factories
- *
- * The static factories map is defined inside the function to ensures it gets
- * initialized on first use, without any dependency on link order.
- *
* \return the list of pipeline handler factories
*/
-std::vector<PipelineHandlerFactory *> &PipelineHandlerFactory::factories()
+std::vector<PipelineHandlerFactoryBase *> &PipelineHandlerFactoryBase::factories()
{
- static std::vector<PipelineHandlerFactory *> factories;
+ /*
+ * The static factories map is defined inside the function to ensure
+ * it gets initialized on first use, without any dependency on
+ * link order.
+ */
+ static std::vector<PipelineHandlerFactoryBase *> factories;
return factories;
}
/**
- * \fn PipelineHandlerFactory::createInstance()
- * \brief Create an instance of the PipelineHandler corresponding to the factory
- * \param[in] manager The camera manager
+ * \brief Return the factory for the pipeline handler with name \a name
+ * \param[in] name The pipeline handler name
+ * \return The factory of the pipeline with name \a name, or nullptr if not found
+ */
+const PipelineHandlerFactoryBase *PipelineHandlerFactoryBase::getFactoryByName(const std::string &name)
+{
+ const std::vector<PipelineHandlerFactoryBase *> &factories =
+ PipelineHandlerFactoryBase::factories();
+
+ auto iter = std::find_if(factories.begin(),
+ factories.end(),
+ [&name](const PipelineHandlerFactoryBase *f) {
+ return f->name() == name;
+ });
+
+ if (iter != factories.end())
+ return *iter;
+
+ return nullptr;
+}
+
+/**
+ * \class PipelineHandlerFactory
+ * \brief Registration of PipelineHandler classes and creation of instances
+ * \tparam _PipelineHandler The pipeline handler class type for this factory
+ *
+ * To facilitate discovery and instantiation of PipelineHandler classes, the
+ * PipelineHandlerFactory class implements auto-registration of pipeline
+ * handlers. Each PipelineHandler subclass shall register itself using the
+ * REGISTER_PIPELINE_HANDLER() macro, which will create a corresponding
+ * instance of a PipelineHandlerFactory and register it with the static list of
+ * factories.
+ */
+
+/**
+ * \fn PipelineHandlerFactory::PipelineHandlerFactory(const char *name)
+ * \brief Construct a pipeline handler factory
+ * \param[in] name Name of the pipeline handler class
*
- * This virtual function is implemented by the REGISTER_PIPELINE_HANDLER()
- * macro. It creates a pipeline handler instance associated with the camera
- * \a manager.
+ * Creating an instance of the factory registers it with the global list of
+ * factories, accessible through the factories() function.
*
- * \return a pointer to a newly constructed instance of the PipelineHandler
- * subclass corresponding to the factory
+ * The factory \a name is used for debug purpose and shall be unique.
+ */
+
+/**
+ * \fn PipelineHandlerFactory::createInstance() const
+ * \brief Create an instance of the PipelineHandler corresponding to the factory
+ * \param[in] manager The camera manager
+ * \return A unique pointer to a newly constructed instance of the
+ * PipelineHandler subclass corresponding to the factory
*/
/**
* \def REGISTER_PIPELINE_HANDLER
* \brief Register a pipeline handler with the pipeline handler factory
* \param[in] handler Class name of PipelineHandler derived class to register
+ * \param[in] name Name assigned to the pipeline handler, matching the pipeline
+ * subdirectory name in the source tree.
*
* Register a PipelineHandler subclass with the factory and make it available to
* try and match devices.
diff --git a/src/libcamera/pixel_format.cpp b/src/libcamera/pixel_format.cpp
new file mode 100644
index 00000000..314179a8
--- /dev/null
+++ b/src/libcamera/pixel_format.cpp
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * libcamera Pixel Format
+ */
+
+#include <libcamera/formats.h>
+#include <libcamera/pixel_format.h>
+
+#include "libcamera/internal/formats.h"
+
+/**
+ * \file pixel_format.h
+ * \brief libcamera pixel format
+ */
+
+namespace libcamera {
+
+/**
+ * \class PixelFormat
+ * \brief libcamera image pixel format
+ *
+ * The PixelFormat type describes the format of images in the public libcamera
+ * API. It stores a FourCC value as a 32-bit unsigned integer and a modifier.
+ * The FourCC and modifier values are defined in the Linux kernel DRM/KMS API
+ * (see linux/drm_fourcc.h). Constant expressions for all pixel formats
+ * supported by libcamera are available in libcamera/formats.h.
+ */
+
+/**
+ * \fn PixelFormat::PixelFormat()
+ * \brief Construct a PixelFormat with an invalid format
+ *
+ * PixelFormat instances constructed with the default constructor are
+ * invalid, calling the isValid() function returns false.
+ */
+
+/**
+ * \fn PixelFormat::PixelFormat(uint32_t fourcc, uint64_t modifier)
+ * \brief Construct a PixelFormat from a DRM FourCC and a modifier
+ * \param[in] fourcc A DRM FourCC
+ * \param[in] modifier A DRM FourCC modifier
+ */
+
+/**
+ * \brief Compare pixel formats for equality
+ * \return True if the two pixel formats are equal, false otherwise
+ */
+bool PixelFormat::operator==(const PixelFormat &other) const
+{
+ return fourcc_ == other.fourcc() && modifier_ == other.modifier_;
+}
+
+/**
+ * \fn bool PixelFormat::operator!=(const PixelFormat &other) const
+ * \brief Compare pixel formats for inequality
+ * \return True if the two pixel formats are not equal, false otherwise
+ */
+
+/**
+ * \brief Compare pixel formats for smaller than order
+ * \return True if \a this is smaller than \a other, false otherwise
+ */
+bool PixelFormat::operator<(const PixelFormat &other) const
+{
+ if (fourcc_ < other.fourcc_)
+ return true;
+ if (fourcc_ > other.fourcc_)
+ return false;
+ return modifier_ < other.modifier_;
+}
+
+/**
+ * \fn bool PixelFormat::isValid() const
+ * \brief Check if the pixel format is valid
+ *
+ * PixelFormat instances constructed with the default constructor are
+ * invalid. Instances constructed with a FourCC defined in the DRM API
+ * are valid. The behaviour is undefined otherwise.
+ *
+ * \return True if the pixel format is valid, false otherwise
+ */
+
+/**
+ * \fn PixelFormat::operator uint32_t() const
+ * \brief Convert the the pixel format numerical value
+ * \return The pixel format numerical value
+ */
+
+/**
+ * \fn PixelFormat::fourcc() const
+ * \brief Retrieve the pixel format FourCC
+ * \return DRM FourCC
+ */
+
+/**
+ * \fn PixelFormat::modifier() const
+ * \brief Retrieve the pixel format modifier
+ * \return DRM modifier
+ */
+
+/**
+ * \brief Assemble and return a string describing the pixel format
+ * \return A string describing the pixel format
+ */
+std::string PixelFormat::toString() const
+{
+ const PixelFormatInfo &info = PixelFormatInfo::info(*this);
+
+ if (!info.isValid()) {
+ if (*this == PixelFormat())
+ return "<INVALID>";
+
+ char fourcc[7] = { '<',
+ static_cast<char>(fourcc_),
+ static_cast<char>(fourcc_ >> 8),
+ static_cast<char>(fourcc_ >> 16),
+ static_cast<char>(fourcc_ >> 24),
+ '>' };
+
+ for (unsigned int i = 1; i < 5; i++) {
+ if (!isprint(fourcc[i]))
+ fourcc[i] = '.';
+ }
+
+ return fourcc;
+ }
+
+ return info.name;
+}
+
+/**
+ * \brief Create a PixelFormat from a string
+ * \return The PixelFormat represented by the \a name if known, or an
+ * invalid pixel format otherwise.
+ */
+PixelFormat PixelFormat::fromString(const std::string &name)
+{
+ return PixelFormatInfo::info(name).format;
+}
+
+/**
+ * \brief Insert a text representation of a PixelFormat into an output stream
+ * \param[in] out The output stream
+ * \param[in] f The PixelFormat
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const PixelFormat &f)
+{
+ out << f.toString();
+ return out;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pixelformats.cpp b/src/libcamera/pixelformats.cpp
deleted file mode 100644
index 87557d98..00000000
--- a/src/libcamera/pixelformats.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * pixelformats.cpp - libcamera pixel formats
- */
-
-#include <libcamera/pixelformats.h>
-
-/**
- * \file pixelformats.h
- * \brief libcamera pixel formats
- */
-
-namespace libcamera {
-
-/**
- * \class PixelFormat
- * \brief libcamera image pixel format
- *
- * The PixelFormat type describes the format of images in the public libcamera
- * API. It stores a FourCC value as a 32-bit unsigned integer and a set of
- * modifiers. The FourCC and modifiers values are defined in the Linux kernel
- * DRM/KMS API (see linux/drm_fourcc.h).
- */
-
-/**
- * \brief Construct a PixelFormat with an invalid format
- *
- * PixelFormat instances constructed with the default constructor are
- * invalid, calling the isValid() function returns false.
- */
-PixelFormat::PixelFormat()
- : fourcc_(0)
-{
-}
-
-/**
- * \brief Construct a PixelFormat from a DRM FourCC and a set of modifiers
- * \param[in] fourcc A DRM FourCC
- * \param[in] modifiers A set of DRM FourCC modifiers
- */
-PixelFormat::PixelFormat(uint32_t fourcc, const std::set<uint64_t> &modifiers)
- : fourcc_(fourcc), modifiers_(modifiers)
-{
-}
-
-/**
- * \brief Compare pixel formats for equality
- * \return True if the two pixel formats are equal, false otherwise
- */
-bool PixelFormat::operator==(const PixelFormat &other) const
-{
- return fourcc_ == other.fourcc() && modifiers_ == other.modifiers_;
-}
-
-/**
- * \fn bool PixelFormat::operator!=(const PixelFormat &other) const
- * \brief Compare pixel formats for inequality
- * \return True if the two pixel formats are not equal, false otherwise
- */
-
-/**
- * \brief Compare pixel formats for smaller than order
- * \return True if \a this is smaller than \a other, false otherwise
- */
-bool PixelFormat::operator<(const PixelFormat &other) const
-{
- if (fourcc_ < other.fourcc_)
- return true;
- if (fourcc_ > other.fourcc_)
- return false;
- return modifiers_ < modifiers_;
-}
-
-/**
- * \fn bool PixelFormat::isValid() const
- * \brief Check if the pixel format is valid
- *
- * PixelFormat instances constructed with the default constructor are
- * invalid. Instances constructed with a FourCC defined in the DRM API
- * are valid. The behaviour is undefined otherwise.
- *
- * \return True if the pixel format is valid, false otherwise
- */
-
-/**
- * \fn PixelFormat::operator uint32_t() const
- * \brief Convert the the pixel format numerical value
- * \return The pixel format numerical value
- */
-
-/**
- * \fn PixelFormat::fourcc() const
- * \brief Retrieve the pixel format FourCC
- * \return DRM FourCC
- */
-
-/**
- * \fn PixelFormat::modifiers() const
- * \brief Retrieve the pixel format modifiers
- * \return Set of DRM modifiers
- */
-
-/**
- * \brief Assemble and return a string describing the pixel format
- * \return A string describing the pixel format
- */
-std::string PixelFormat::toString() const
-{
- char str[11];
- snprintf(str, 11, "0x%08x", fourcc_);
- return str;
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/process.cpp b/src/libcamera/process.cpp
index 3b4d0f10..bc9833f4 100644
--- a/src/libcamera/process.cpp
+++ b/src/libcamera/process.cpp
@@ -2,15 +2,14 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * process.cpp - Process object
+ * Process object
*/
-#include "process.h"
+#include "libcamera/internal/process.h"
#include <algorithm>
#include <dirent.h>
#include <fcntl.h>
-#include <iostream>
#include <list>
#include <signal.h>
#include <string.h>
@@ -20,10 +19,9 @@
#include <unistd.h>
#include <vector>
-#include <libcamera/event_notifier.h>
-
-#include "log.h"
-#include "utils.h"
+#include <libcamera/base/event_notifier.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
/**
* \file process.h
@@ -41,28 +39,6 @@ LOG_DEFINE_CATEGORY(Process)
* The ProcessManager singleton keeps track of all created Process instances,
* and manages the signal handling involved in terminating processes.
*/
-class ProcessManager
-{
-public:
- void registerProcess(Process *proc);
-
- static ProcessManager *instance();
-
- int writePipe() const;
-
- const struct sigaction &oldsa() const;
-
-private:
- void sighandler(EventNotifier *notifier);
- ProcessManager();
- ~ProcessManager();
-
- std::list<Process *> processes_;
-
- struct sigaction oldsa_;
- EventNotifier *sigEvent_;
- int pipe_[2];
-};
namespace {
@@ -89,10 +65,10 @@ void sigact(int signal, siginfo_t *info, void *ucontext)
} /* namespace */
-void ProcessManager::sighandler(EventNotifier *notifier)
+void ProcessManager::sighandler()
{
char data;
- ssize_t ret = read(pipe_[0], &data, sizeof(data));
+ ssize_t ret = read(pipe_[0].get(), &data, sizeof(data));
if (ret < 0) {
LOG(Process, Error)
<< "Failed to read byte from signal handler pipe";
@@ -118,7 +94,7 @@ void ProcessManager::sighandler(EventNotifier *notifier)
* \brief Register process with process manager
* \param[in] proc Process to register
*
- * This method registers the \a proc with the process manager. It
+ * This function registers the \a proc with the process manager. It
* shall be called by the parent process after successfully forking, in
* order to let the parent signal process termination.
*/
@@ -127,8 +103,20 @@ void ProcessManager::registerProcess(Process *proc)
processes_.push_back(proc);
}
+ProcessManager *ProcessManager::self_ = nullptr;
+
+/**
+ * \brief Construct a ProcessManager instance
+ *
+ * The ProcessManager class is meant to only be instantiated once, by the
+ * CameraManager.
+ */
ProcessManager::ProcessManager()
{
+ if (self_)
+ LOG(Process, Fatal)
+ << "Multiple ProcessManager objects are not allowed";
+
sigaction(SIGCHLD, NULL, &oldsa_);
struct sigaction sa;
@@ -140,52 +128,58 @@ ProcessManager::ProcessManager()
sigaction(SIGCHLD, &sa, NULL);
- if (pipe2(pipe_, O_CLOEXEC | O_DIRECT | O_NONBLOCK))
+ int pipe[2];
+ if (pipe2(pipe, O_CLOEXEC | O_DIRECT | O_NONBLOCK))
LOG(Process, Fatal)
<< "Failed to initialize pipe for signal handling";
- sigEvent_ = new EventNotifier(pipe_[0], EventNotifier::Read);
+
+ pipe_[0] = UniqueFD(pipe[0]);
+ pipe_[1] = UniqueFD(pipe[1]);
+
+ sigEvent_ = new EventNotifier(pipe_[0].get(), EventNotifier::Read);
sigEvent_->activated.connect(this, &ProcessManager::sighandler);
+
+ self_ = this;
}
ProcessManager::~ProcessManager()
{
sigaction(SIGCHLD, &oldsa_, NULL);
+
delete sigEvent_;
- close(pipe_[0]);
- close(pipe_[1]);
+
+ self_ = nullptr;
}
/**
* \brief Retrieve the Process manager instance
*
- * The ProcessManager is a singleton and can't be constructed manually. This
- * method shall instead be used to retrieve the single global instance of the
- * manager.
+ * The ProcessManager is constructed by the CameraManager. This function shall
+ * be used to retrieve the single instance of the manager.
*
* \return The Process manager instance
*/
ProcessManager *ProcessManager::instance()
{
- static ProcessManager processManager;
- return &processManager;
+ return self_;
}
/**
* \brief Retrieve the Process manager's write pipe
*
- * This method is meant only to be used by the static signal handler.
+ * This function is meant only to be used by the static signal handler.
*
* \return Pipe for writing
*/
int ProcessManager::writePipe() const
{
- return pipe_[1];
+ return pipe_[1].get();
}
/**
* \brief Retrive the old signal action data
*
- * This method is meant only to be used by the static signal handler.
+ * This function is meant only to be used by the static signal handler.
*
* \return The old signal action data
*/
@@ -194,7 +188,6 @@ const struct sigaction &ProcessManager::oldsa() const
return oldsa_;
}
-
/**
* \class Process
* \brief Process object
@@ -268,14 +261,16 @@ int Process::start(const std::string &path,
closeAllFdsExcept(fds);
- unsetenv("LIBCAMERA_LOG_FILE");
+ const char *file = utils::secure_getenv("LIBCAMERA_LOG_FILE");
+ if (file && strcmp(file, "syslog"))
+ unsetenv("LIBCAMERA_LOG_FILE");
const char **argv = new const char *[args.size() + 2];
unsigned int len = args.size();
argv[0] = path.c_str();
for (unsigned int i = 0; i < len; i++)
- argv[i+1] = args[i].c_str();
- argv[len+1] = nullptr;
+ argv[i + 1] = args[i].c_str();
+ argv[len + 1] = nullptr;
execv(path.c_str(), (char **)argv);
@@ -326,7 +321,7 @@ int Process::isolate()
* \brief SIGCHLD handler
* \param[in] wstatus The status as output by waitpid()
*
- * This method is called when the process associated with Process terminates.
+ * This function is called when the process associated with Process terminates.
* It emits the Process::finished signal.
*/
void Process::died(int wstatus)
@@ -335,7 +330,7 @@ void Process::died(int wstatus)
exitStatus_ = WIFEXITED(wstatus) ? NormalExit : SignalExit;
exitCode_ = exitStatus_ == NormalExit ? WEXITSTATUS(wstatus) : -1;
- finished.emit(this, exitStatus_, exitCode_);
+ finished.emit(exitStatus_, exitCode_);
}
/**
@@ -355,7 +350,7 @@ void Process::died(int wstatus)
* \fn Process::exitCode()
* \brief Retrieve the exit code of the process
*
- * This method is only valid if exitStatus() returned NormalExit.
+ * This function is only valid if exitStatus() returned NormalExit.
*
* \return Exit code
*/
@@ -373,7 +368,8 @@ void Process::died(int wstatus)
*/
void Process::kill()
{
- ::kill(pid_, SIGKILL);
+ if (pid_ > 0)
+ ::kill(pid_, SIGKILL);
}
} /* namespace libcamera */
diff --git a/src/libcamera/property_ids.cpp.in b/src/libcamera/property_ids.cpp.in
deleted file mode 100644
index bfdd823f..00000000
--- a/src/libcamera/property_ids.cpp.in
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * property_ids.cpp : Property ID list
- *
- * This file is auto-generated. Do not edit.
- */
-
-#include <libcamera/property_ids.h>
-
-/**
- * \file property_ids.h
- * \brief Camera property identifiers
- */
-
-namespace libcamera {
-
-/**
- * \brief Namespace for libcamera properties
- */
-namespace properties {
-
-${controls_doc}
-
-#ifndef __DOXYGEN__
-/*
- * Keep the properties definitions hidden from doxygen as it incorrectly parses
- * them as functions.
- */
-${controls_def}
-#endif
-
-/**
- * \brief List of all supported libcamera properties
- */
-extern const ControlIdMap properties {
-${controls_map}
-};
-
-} /* namespace properties */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/property_ids.yaml b/src/libcamera/property_ids.yaml
deleted file mode 100644
index ce627fa0..00000000
--- a/src/libcamera/property_ids.yaml
+++ /dev/null
@@ -1,389 +0,0 @@
-# SPDX-License-Identifier: LGPL-2.1-or-later
-#
-# Copyright (C) 2019, Google Inc.
-#
-%YAML 1.2
----
-controls:
- - Location:
- type: int32_t
- description: |
- Camera mounting location
- enum:
- - name: CameraLocationFront
- value: 0
- description: |
- The camera is mounted on the front side of the device, facing the
- user
- - name: CameraLocationBack
- value: 1
- description: |
- The camera is mounted on the back side of the device, facing away
- from the user
- - name: CameraLocationExternal
- value: 2
- description: |
- The camera is attached to the device in a way that allows it to
- be moved freely
-
- - Rotation:
- type: int32_t
- description: |
- The camera rotation is expressed as the angular difference in degrees
- between two reference systems, one relative to the camera module, and
- one defined on the external world scene to be captured when projected
- on the image sensor pixel array.
-
- A camera sensor has a 2-dimensional reference system 'Rc' defined by
- its pixel array read-out order. The origin is set to the first pixel
- being read out, the X-axis points along the column read-out direction
- towards the last columns, and the Y-axis along the row read-out
- direction towards the last row.
-
- A typical example for a sensor with a 2592x1944 pixel array matrix
- observed from the front is
-
- 2591 X-axis 0
- <------------------------+ 0
- .......... ... ..........!
- .......... ... ..........! Y-axis
- ... !
- .......... ... ..........!
- .......... ... ..........! 1943
- V
-
-
- The external world scene reference system 'Rs' is a 2-dimensional
- reference system on the focal plane of the camera module. The origin is
- placed on the top-left corner of the visible scene, the X-axis points
- towards the right, and the Y-axis points towards the bottom of the
- scene. The top, bottom, left and right directions are intentionally not
- defined and depend on the environment in which the camera is used.
-
- A typical example of a (very common) picture of a shark swimming from
- left to right, as seen from the camera, is
-
- 0 X-axis
- 0 +------------------------------------->
- !
- !
- !
- ! |\____)\___
- ! ) _____ __`<
- ! |/ )/
- !
- !
- !
- V
- Y-axis
-
- With the reference system 'Rs' placed on the camera focal plane.
-
- ¸.·˙!
- ¸.·˙ !
- _ ¸.·˙ !
- +-/ \-+¸.·˙ !
- | (o) | ! Camera focal plane
- +-----+˙·.¸ !
- ˙·.¸ !
- ˙·.¸ !
- ˙·.¸!
-
- When projected on the sensor's pixel array, the image and the associated
- reference system 'Rs' are typically (but not always) inverted, due to
- the camera module's lens optical inversion effect.
-
- Assuming the above represented scene of the swimming shark, the lens
- inversion projects the scene and its reference system onto the sensor
- pixel array, seen from the front of the camera sensor, as follow
-
- Y-axis
- ^
- !
- !
- !
- ! |\_____)\__
- ! ) ____ ___.<
- ! |/ )/
- !
- !
- !
- 0 +------------------------------------->
- 0 X-axis
-
- Note the shark being upside-down.
-
- The resulting projected reference system is named 'Rp'.
-
- The camera rotation property is then defined as the angular difference
- in the counter-clockwise direction between the camera reference system
- 'Rc' and the projected scene reference system 'Rp'. It is expressed in
- degrees as a number in the range [0, 360[.
-
- Examples
-
- 0 degrees camera rotation
-
-
- Y-Rp
- ^
- Y-Rc !
- ^ !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! 0 +------------------------------------->
- ! 0 X-Rp
- 0 +------------------------------------->
- 0 X-Rc
-
-
- X-Rc 0
- <------------------------------------+ 0
- X-Rp 0 !
- <------------------------------------+ 0 !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! V
- ! Y-Rc
- V
- Y-Rp
-
- 90 degrees camera rotation
-
- 0 Y-Rc
- 0 +-------------------->
- ! Y-Rp
- ! ^
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! 0 +------------------------------------->
- ! 0 X-Rp
- !
- !
- !
- !
- V
- X-Rc
-
- 180 degrees camera rotation
-
- 0
- <------------------------------------+ 0
- X-Rc !
- Y-Rp !
- ^ !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! V
- ! Y-Rc
- 0 +------------------------------------->
- 0 X-Rp
-
- 270 degrees camera rotation
-
- 0 Y-Rc
- 0 +-------------------->
- ! 0
- ! <-----------------------------------+ 0
- ! X-Rp !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! !
- ! V
- ! Y-Rp
- !
- !
- !
- !
- V
- X-Rc
-
-
- Example one - Webcam
-
- A camera module installed on the user facing part of a laptop screen
- casing used for video calls. The captured images are meant to be
- displayed in landscape mode (width > height) on the laptop screen.
-
- The camera is typically mounted upside-down to compensate the lens
- optical inversion effect.
-
- Y-Rp
- Y-Rc ^
- ^ !
- ! !
- ! ! |\_____)\__
- ! ! ) ____ ___.<
- ! ! |/ )/
- ! !
- ! !
- ! !
- ! 0 +------------------------------------->
- ! 0 X-Rp
- 0 +------------------------------------->
- 0 X-Rc
-
- The two reference systems are aligned, the resulting camera rotation is
- 0 degrees, no rotation correction needs to be applied to the resulting
- image once captured to memory buffers to correctly display it to users.
-
- +--------------------------------------+
- ! !
- ! !
- ! !
- ! |\____)\___ !
- ! ) _____ __`< !
- ! |/ )/ !
- ! !
- ! !
- ! !
- +--------------------------------------+
-
- If the camera sensor is not mounted upside-down to compensate for the
- lens optical inversion, the two reference systems will not be aligned,
- with 'Rp' being rotated 180 degrees relatively to 'Rc'.
-
-
- X-Rc 0
- <------------------------------------+ 0
- !
- Y-Rp !
- ^ !
- ! !
- ! |\_____)\__ !
- ! ) ____ ___.< !
- ! |/ )/ !
- ! !
- ! !
- ! V
- ! Y-Rc
- 0 +------------------------------------->
- 0 X-Rp
-
- The image once captured to memory will then be rotated by 180 degrees
-
- +--------------------------------------+
- ! !
- ! !
- ! !
- ! __/(_____/| !
- ! >.___ ____ ( !
- ! \( \| !
- ! !
- ! !
- ! !
- +--------------------------------------+
-
- A software rotation correction of 180 degrees should be applied to
- correctly display the image.
-
- +--------------------------------------+
- ! !
- ! !
- ! !
- ! |\____)\___ !
- ! ) _____ __`< !
- ! |/ )/ !
- ! !
- ! !
- ! !
- +--------------------------------------+
-
- Example two - Phone camera
-
- A camera installed on the back side of a mobile device facing away from
- the user. The captured images are meant to be displayed in portrait mode
- (height > width) to match the device screen orientation and the device
- usage orientation used when taking the picture.
-
- The camera sensor is typically mounted with its pixel array longer side
- aligned to the device longer side, upside-down mounted to compensate for
- the lens optical inversion effect.
-
- 0 Y-Rc
- 0 +-------------------->
- ! Y-Rp
- ! ^
- ! !
- ! !
- ! !
- ! ! |\_____)\__
- ! ! ) ____ ___.<
- ! ! |/ )/
- ! !
- ! !
- ! !
- ! 0 +------------------------------------->
- ! 0 X-Rp
- !
- !
- !
- !
- V
- X-Rc
-
- The two reference systems are not aligned and the 'Rp' reference
- system is rotated by 90 degrees in the counter-clockwise direction
- relatively to the 'Rc' reference system.
-
- The image once captured to memory will be rotated.
-
- +-------------------------------------+
- | _ _ |
- | \ / |
- | | | |
- | | | |
- | | > |
- | < | |
- | | | |
- | . |
- | V |
- +-------------------------------------+
-
- A correction of 90 degrees in counter-clockwise direction has to be
- applied to correctly display the image in portrait mode on the device
- screen.
-
- +--------------------+
- | |
- | |
- | |
- | |
- | |
- | |
- | |\____)\___ |
- | ) _____ __`< |
- | |/ )/ |
- | |
- | |
- | |
- | |
- | |
- +--------------------+
-...
diff --git a/src/libcamera/property_ids_core.yaml b/src/libcamera/property_ids_core.yaml
new file mode 100644
index 00000000..834454a4
--- /dev/null
+++ b/src/libcamera/property_ids_core.yaml
@@ -0,0 +1,704 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2019, Google Inc.
+#
+%YAML 1.1
+---
+vendor: libcamera
+controls:
+ - Location:
+ type: int32_t
+ description: |
+ Camera mounting location
+ enum:
+ - name: CameraLocationFront
+ value: 0
+ description: |
+ The camera is mounted on the front side of the device, facing the
+ user
+ - name: CameraLocationBack
+ value: 1
+ description: |
+ The camera is mounted on the back side of the device, facing away
+ from the user
+ - name: CameraLocationExternal
+ value: 2
+ description: |
+ The camera is attached to the device in a way that allows it to
+ be moved freely
+
+ - Rotation:
+ type: int32_t
+ description: |
+ The camera physical mounting rotation. It is expressed as the angular
+ difference in degrees between two reference systems, one relative to the
+ camera module, and one defined on the external world scene to be
+ captured when projected on the image sensor pixel array.
+
+ A camera sensor has a 2-dimensional reference system 'Rc' defined by
+ its pixel array read-out order. The origin is set to the first pixel
+ being read out, the X-axis points along the column read-out direction
+ towards the last columns, and the Y-axis along the row read-out
+ direction towards the last row.
+
+ A typical example for a sensor with a 2592x1944 pixel array matrix
+ observed from the front is
+
+ 2591 X-axis 0
+ <------------------------+ 0
+ .......... ... ..........!
+ .......... ... ..........! Y-axis
+ ... !
+ .......... ... ..........!
+ .......... ... ..........! 1943
+ V
+
+
+ The external world scene reference system 'Rs' is a 2-dimensional
+ reference system on the focal plane of the camera module. The origin is
+ placed on the top-left corner of the visible scene, the X-axis points
+ towards the right, and the Y-axis points towards the bottom of the
+ scene. The top, bottom, left and right directions are intentionally not
+ defined and depend on the environment in which the camera is used.
+
+ A typical example of a (very common) picture of a shark swimming from
+ left to right, as seen from the camera, is
+
+ 0 X-axis
+ 0 +------------------------------------->
+ !
+ !
+ !
+ ! |\____)\___
+ ! ) _____ __`<
+ ! |/ )/
+ !
+ !
+ !
+ V
+ Y-axis
+
+ With the reference system 'Rs' placed on the camera focal plane.
+
+ ¸.·˙!
+ ¸.·˙ !
+ _ ¸.·˙ !
+ +-/ \-+¸.·˙ !
+ | (o) | ! Camera focal plane
+ +-----+˙·.¸ !
+ ˙·.¸ !
+ ˙·.¸ !
+ ˙·.¸!
+
+ When projected on the sensor's pixel array, the image and the associated
+ reference system 'Rs' are typically (but not always) inverted, due to
+ the camera module's lens optical inversion effect.
+
+ Assuming the above represented scene of the swimming shark, the lens
+ inversion projects the scene and its reference system onto the sensor
+ pixel array, seen from the front of the camera sensor, as follow
+
+ Y-axis
+ ^
+ !
+ !
+ !
+ ! |\_____)\__
+ ! ) ____ ___.<
+ ! |/ )/
+ !
+ !
+ !
+ 0 +------------------------------------->
+ 0 X-axis
+
+ Note the shark being upside-down.
+
+ The resulting projected reference system is named 'Rp'.
+
+ The camera rotation property is then defined as the angular difference
+ in the counter-clockwise direction between the camera reference system
+ 'Rc' and the projected scene reference system 'Rp'. It is expressed in
+ degrees as a number in the range [0, 360[.
+
+ Examples
+
+ 0 degrees camera rotation
+
+
+ Y-Rp
+ ^
+ Y-Rc !
+ ^ !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! 0 +------------------------------------->
+ ! 0 X-Rp
+ 0 +------------------------------------->
+ 0 X-Rc
+
+
+ X-Rc 0
+ <------------------------------------+ 0
+ X-Rp 0 !
+ <------------------------------------+ 0 !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! V
+ ! Y-Rc
+ V
+ Y-Rp
+
+ 90 degrees camera rotation
+
+ 0 Y-Rc
+ 0 +-------------------->
+ ! Y-Rp
+ ! ^
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! 0 +------------------------------------->
+ ! 0 X-Rp
+ !
+ !
+ !
+ !
+ V
+ X-Rc
+
+ 180 degrees camera rotation
+
+ 0
+ <------------------------------------+ 0
+ X-Rc !
+ Y-Rp !
+ ^ !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! V
+ ! Y-Rc
+ 0 +------------------------------------->
+ 0 X-Rp
+
+ 270 degrees camera rotation
+
+ 0 Y-Rc
+ 0 +-------------------->
+ ! 0
+ ! <-----------------------------------+ 0
+ ! X-Rp !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! !
+ ! V
+ ! Y-Rp
+ !
+ !
+ !
+ !
+ V
+ X-Rc
+
+
+ Example one - Webcam
+
+ A camera module installed on the user facing part of a laptop screen
+ casing used for video calls. The captured images are meant to be
+ displayed in landscape mode (width > height) on the laptop screen.
+
+ The camera is typically mounted upside-down to compensate the lens
+ optical inversion effect.
+
+ Y-Rp
+ Y-Rc ^
+ ^ !
+ ! !
+ ! ! |\_____)\__
+ ! ! ) ____ ___.<
+ ! ! |/ )/
+ ! !
+ ! !
+ ! !
+ ! 0 +------------------------------------->
+ ! 0 X-Rp
+ 0 +------------------------------------->
+ 0 X-Rc
+
+ The two reference systems are aligned, the resulting camera rotation is
+ 0 degrees, no rotation correction needs to be applied to the resulting
+ image once captured to memory buffers to correctly display it to users.
+
+ +--------------------------------------+
+ ! !
+ ! !
+ ! !
+ ! |\____)\___ !
+ ! ) _____ __`< !
+ ! |/ )/ !
+ ! !
+ ! !
+ ! !
+ +--------------------------------------+
+
+ If the camera sensor is not mounted upside-down to compensate for the
+ lens optical inversion, the two reference systems will not be aligned,
+ with 'Rp' being rotated 180 degrees relatively to 'Rc'.
+
+
+ X-Rc 0
+ <------------------------------------+ 0
+ !
+ Y-Rp !
+ ^ !
+ ! !
+ ! |\_____)\__ !
+ ! ) ____ ___.< !
+ ! |/ )/ !
+ ! !
+ ! !
+ ! V
+ ! Y-Rc
+ 0 +------------------------------------->
+ 0 X-Rp
+
+ The image once captured to memory will then be rotated by 180 degrees
+
+ +--------------------------------------+
+ ! !
+ ! !
+ ! !
+ ! __/(_____/| !
+ ! >.___ ____ ( !
+ ! \( \| !
+ ! !
+ ! !
+ ! !
+ +--------------------------------------+
+
+ A software rotation correction of 180 degrees should be applied to
+ correctly display the image.
+
+ +--------------------------------------+
+ ! !
+ ! !
+ ! !
+ ! |\____)\___ !
+ ! ) _____ __`< !
+ ! |/ )/ !
+ ! !
+ ! !
+ ! !
+ +--------------------------------------+
+
+ Example two - Phone camera
+
+ A camera installed on the back side of a mobile device facing away from
+ the user. The captured images are meant to be displayed in portrait mode
+ (height > width) to match the device screen orientation and the device
+ usage orientation used when taking the picture.
+
+ The camera sensor is typically mounted with its pixel array longer side
+ aligned to the device longer side, upside-down mounted to compensate for
+ the lens optical inversion effect.
+
+ 0 Y-Rc
+ 0 +-------------------->
+ ! Y-Rp
+ ! ^
+ ! !
+ ! !
+ ! !
+ ! ! |\_____)\__
+ ! ! ) ____ ___.<
+ ! ! |/ )/
+ ! !
+ ! !
+ ! !
+ ! 0 +------------------------------------->
+ ! 0 X-Rp
+ !
+ !
+ !
+ !
+ V
+ X-Rc
+
+ The two reference systems are not aligned and the 'Rp' reference
+ system is rotated by 90 degrees in the counter-clockwise direction
+ relatively to the 'Rc' reference system.
+
+ The image once captured to memory will be rotated.
+
+ +-------------------------------------+
+ | _ _ |
+ | \ / |
+ | | | |
+ | | | |
+ | | > |
+ | < | |
+ | | | |
+ | . |
+ | V |
+ +-------------------------------------+
+
+ A correction of 90 degrees in counter-clockwise direction has to be
+ applied to correctly display the image in portrait mode on the device
+ screen.
+
+ +--------------------+
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |\____)\___ |
+ | ) _____ __`< |
+ | |/ )/ |
+ | |
+ | |
+ | |
+ | |
+ | |
+ +--------------------+
+
+ - Model:
+ type: string
+ description: |
+ The model name shall to the extent possible describe the sensor. For
+ most devices this is the model name of the sensor. While for some
+ devices the sensor model is unavailable as the sensor or the entire
+ camera is part of a larger unit and exposed as a black-box to the
+ system. In such cases the model name of the smallest device that
+ contains the camera sensor shall be used.
+
+ The model name is not meant to be a camera name displayed to the
+ end-user, but may be combined with other camera information to create a
+ camera name.
+
+ The model name is not guaranteed to be unique in the system nor is
+ it guaranteed to be stable or have any other properties required to make
+ it a good candidate to be used as a permanent identifier of a camera.
+
+ The model name shall describe the camera in a human readable format and
+ shall be encoded in ASCII.
+
+ Example model names are 'ov5670', 'imx219' or 'Logitech Webcam C930e'.
+
+ - UnitCellSize:
+ type: Size
+ description: |
+ The pixel unit cell physical size, in nanometers.
+
+ The UnitCellSize properties defines the horizontal and vertical sizes of
+ a single pixel unit, including its active and non-active parts. In
+ other words, it expresses the horizontal and vertical distance between
+ the top-left corners of adjacent pixels.
+
+ The property can be used to calculate the physical size of the sensor's
+ pixel array area and for calibration purposes.
+
+ - PixelArraySize:
+ type: Size
+ description: |
+ The camera sensor pixel array readable area vertical and horizontal
+ sizes, in pixels.
+
+ The PixelArraySize property defines the size in pixel units of the
+ readable part of full pixel array matrix, including optical black
+ pixels used for calibration, pixels which are not considered valid for
+ capture and active pixels containing valid image data.
+
+ The property describes the maximum size of the raw data captured by the
+ camera, which might not correspond to the physical size of the sensor
+ pixel array matrix, as some portions of the physical pixel array matrix
+ are not accessible and cannot be transmitted out.
+
+ For example, let's consider a pixel array matrix assembled as follows
+
+ +--------------------------------------------------+
+ |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+ |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+ |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
+ |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
+ |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
+ |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
+ |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
+ |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
+ ... ... ... ... ...
+
+ ... ... ... ... ...
+ |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
+ |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
+ |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+ |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+ +--------------------------------------------------+
+
+ starting with two lines of non-readable pixels (x), followed by N lines
+ of readable data (D) surrounded by two columns of non-readable pixels on
+ each side, and ending with two more lines of non-readable pixels. Only
+ the readable portion is transmitted to the receiving side, defining the
+ sizes of the largest possible buffer of raw data that can be presented
+ to applications.
+
+ PixelArraySize.width
+ /----------------------------------------------/
+ +----------------------------------------------+ /
+ |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
+ |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
+ |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
+ |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
+ |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
+ |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | PixelArraySize.height
+ ... ... ... ... ...
+ ... ... ... ... ...
+ |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
+ |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
+ +----------------------------------------------+ /
+
+ This defines a rectangle whose top-left corner is placed in position (0,
+ 0) and whose vertical and horizontal sizes are defined by this property.
+ All other rectangles that describe portions of the pixel array, such as
+ the optical black pixels rectangles and active pixel areas, are defined
+ relatively to this rectangle.
+
+ All the coordinates are expressed relative to the default sensor readout
+ direction, without any transformation (such as horizontal and vertical
+ flipping) applied. When mapping them to the raw pixel buffer,
+ applications shall take any configured transformation into account.
+
+ \todo Rename this property to Size once we will have property
+ categories (i.e. Properties::PixelArray::Size)
+
+ - PixelArrayOpticalBlackRectangles:
+ type: Rectangle
+ size: [n]
+ description: |
+ The pixel array region(s) which contain optical black pixels
+ considered valid for calibration purposes.
+
+ This property describes the position and size of optical black pixel
+ regions in the raw data buffer as stored in memory, which might differ
+ from their actual physical location in the pixel array matrix.
+
+ It is important to note, in fact, that camera sensors might
+ automatically reorder or skip portions of their pixels array matrix when
+ transmitting data to the receiver. For instance, a sensor may merge the
+ top and bottom optical black rectangles into a single rectangle,
+ transmitted at the beginning of the frame.
+
+ The pixel array contains several areas with different purposes,
+ interleaved by lines and columns which are said not to be valid for
+ capturing purposes. Invalid lines and columns are defined as invalid as
+ they could be positioned too close to the chip margins or to the optical
+ black shielding placed on top of optical black pixels.
+
+ PixelArraySize.width
+ /----------------------------------------------/
+ x1 x2
+ +--o---------------------------------------o---+ /
+ |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| |
+ |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| |
+ y1 oIIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| |
+ |IIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| |
+ |IIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| |
+ y2 oIIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| |
+ |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| |
+ |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| |
+ y3 |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| |
+ |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| | PixelArraySize.height
+ |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| |
+ ... ... ... ... ...
+ ... ... ... ... ...
+ y4 |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| |
+ |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| |
+ |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| |
+ +----------------------------------------------+ /
+
+ The readable pixel array matrix is composed by
+ 2 invalid lines (I)
+ 4 lines of valid optical black pixels (O)
+ 2 invalid lines (I)
+ n lines of valid pixel data (P)
+ 2 invalid lines (I)
+
+ And the position of the optical black pixel rectangles is defined by
+
+ PixelArrayOpticalBlackRectangles = {
+ { x1, y1, x2 - x1 + 1, y2 - y1 + 1 },
+ { x1, y3, 2, y4 - y3 + 1 },
+ { x2, y3, 2, y4 - y3 + 1 },
+ };
+
+ If the camera, when capturing the full pixel array matrix, automatically
+ skips the invalid lines and columns, producing the following data
+ buffer, when captured to memory
+
+ PixelArraySize.width
+ /----------------------------------------------/
+ x1
+ +--------------------------------------------o-+ /
+ |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| |
+ |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| |
+ |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| |
+ |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| |
+ y1 oOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| |
+ |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| |
+ |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| | PixelArraySize.height
+ ... ... ... ... ... |
+ ... ... ... ... ... |
+ |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| |
+ |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| |
+ +----------------------------------------------+ /
+
+ then the invalid lines and columns should not be reported as part of the
+ PixelArraySize property in first place.
+
+ In this case, the position of the black pixel rectangles will be
+
+ PixelArrayOpticalBlackRectangles = {
+ { 0, 0, y1 + 1, PixelArraySize[0] },
+ { 0, y1, 2, PixelArraySize[1] - y1 + 1 },
+ { x1, y1, 2, PixelArraySize[1] - y1 + 1 },
+ };
+
+ \todo Rename this property to Size once we will have property
+ categories (i.e. Properties::PixelArray::OpticalBlackRectangles)
+
+ - PixelArrayActiveAreas:
+ type: Rectangle
+ size: [n]
+ description: |
+ The PixelArrayActiveAreas property defines the (possibly multiple and
+ overlapping) portions of the camera sensor readable pixel matrix
+ which are considered valid for image acquisition purposes.
+
+ This property describes an arbitrary number of overlapping rectangles,
+ with each rectangle representing the maximum image size that the camera
+ sensor can produce for a particular aspect ratio. They are defined
+ relatively to the PixelArraySize rectangle.
+
+ When multiple rectangles are reported, they shall be ordered from the
+ tallest to the shortest.
+
+ Example 1
+ A camera sensor which only produces images in the 4:3 image resolution
+ will report a single PixelArrayActiveAreas rectangle, from which all
+ other image formats are obtained by either cropping the field-of-view
+ and/or applying pixel sub-sampling techniques such as pixel skipping or
+ binning.
+
+ PixelArraySize.width
+ /----------------/
+ x1 x2
+ (0,0)-> +-o------------o-+ /
+ y1 o +------------+ | |
+ | |////////////| | |
+ | |////////////| | | PixelArraySize.height
+ | |////////////| | |
+ y2 o +------------+ | |
+ +----------------+ /
+
+ The property reports a single rectangle
+
+ PixelArrayActiveAreas = (x1, y1, x2 - x1 + 1, y2 - y1 + 1)
+
+ Example 2
+ A camera sensor which can produce images in different native
+ resolutions will report several overlapping rectangles, one for each
+ natively supported resolution.
+
+ PixelArraySize.width
+ /------------------/
+ x1 x2 x3 x4
+ (0,0)-> +o---o------o---o+ /
+ y1 o +------+ | |
+ | |//////| | |
+ y2 o+---+------+---+| |
+ ||///|//////|///|| | PixelArraySize.height
+ y3 o+---+------+---+| |
+ | |//////| | |
+ y4 o +------+ | |
+ +----+------+----+ /
+
+ The property reports two rectangles
+
+ PixelArrayActiveAreas = ((x2, y1, x3 - x2 + 1, y4 - y1 + 1),
+ (x1, y2, x4 - x1 + 1, y3 - y2 + 1))
+
+ The first rectangle describes the maximum field-of-view of all image
+ formats in the 4:3 resolutions, while the second one describes the
+ maximum field of view for all image formats in the 16:9 resolutions.
+
+ Multiple rectangles shall only be reported when the sensor can't capture
+ the pixels in the corner regions. If all the pixels in the (x1,y1) -
+ (x4,y4) area can be captured, the PixelArrayActiveAreas property shall
+ contains the single rectangle (x1,y1) - (x4,y4).
+
+ \todo Rename this property to ActiveAreas once we will have property
+ categories (i.e. Properties::PixelArray::ActiveAreas)
+
+ - ScalerCropMaximum:
+ type: Rectangle
+ description: |
+ The maximum valid rectangle for the controls::ScalerCrop control. This
+ reflects the minimum mandatory cropping applied in the camera sensor and
+ the rest of the pipeline. Just as the ScalerCrop control, it defines a
+ rectangle taken from the sensor's active pixel array.
+
+ This property is valid only after the camera has been successfully
+ configured and its value may change whenever a new configuration is
+ applied.
+
+ \todo Turn this property into a "maximum control value" for the
+ ScalerCrop control once "dynamic" controls have been implemented.
+
+ - SensorSensitivity:
+ type: float
+ description: |
+ The relative sensitivity of the chosen sensor mode.
+
+ Some sensors have readout modes with different sensitivities. For example,
+ a binned camera mode might, with the same exposure and gains, produce
+ twice the signal level of the full resolution readout. This would be
+ signalled by the binned mode, when it is chosen, indicating a value here
+ that is twice that of the full resolution mode. This value will be valid
+ after the configure method has returned successfully.
+
+ - SystemDevices:
+ type: int64_t
+ size: [n]
+ description: |
+ A list of integer values of type dev_t denoting the major and minor
+ device numbers of the underlying devices used in the operation of this
+ camera.
+
+ Different cameras may report identical devices.
+
+...
diff --git a/src/libcamera/property_ids_draft.yaml b/src/libcamera/property_ids_draft.yaml
new file mode 100644
index 00000000..62f0e242
--- /dev/null
+++ b/src/libcamera/property_ids_draft.yaml
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# Copyright (C) 2019, Google Inc.
+#
+%YAML 1.1
+---
+vendor: draft
+controls:
+ - ColorFilterArrangement:
+ type: int32_t
+ vendor: draft
+ description: |
+ The arrangement of color filters on sensor; represents the colors in the
+ top-left 2x2 section of the sensor, in reading order. Currently
+ identical to ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT.
+ enum:
+ - name: RGGB
+ value: 0
+ description: RGGB Bayer pattern
+ - name: GRBG
+ value: 1
+ description: GRBG Bayer pattern
+ - name: GBRG
+ value: 2
+ description: GBRG Bayer pattern
+ - name: BGGR
+ value: 3
+ description: BGGR Bayer pattern
+ - name: RGB
+ value: 4
+ description: |
+ Sensor is not Bayer; output has 3 16-bit values for each pixel,
+ instead of just 1 16-bit value per pixel.
+ - name: MONO
+ value: 5
+ description: |
+ Sensor is not Bayer; output consists of a single colour channel.
+
+...
diff --git a/src/libcamera/proxy/ipa_proxy_linux.cpp b/src/libcamera/proxy/ipa_proxy_linux.cpp
deleted file mode 100644
index c7218fb4..00000000
--- a/src/libcamera/proxy/ipa_proxy_linux.cpp
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipa_proxy_linux.cpp - Default Image Processing Algorithm proxy for Linux
- */
-
-#include <vector>
-
-#include <ipa/ipa_interface.h>
-#include <ipa/ipa_module_info.h>
-
-#include "ipa_module.h"
-#include "ipa_proxy.h"
-#include "ipc_unixsocket.h"
-#include "log.h"
-#include "process.h"
-
-namespace libcamera {
-
-LOG_DECLARE_CATEGORY(IPAProxy)
-
-class IPAProxyLinux : public IPAProxy
-{
-public:
- IPAProxyLinux(IPAModule *ipam);
- ~IPAProxyLinux();
-
- int init() override { return 0; }
- void configure(const std::map<unsigned int, IPAStream> &streamConfig,
- const std::map<unsigned int, const ControlInfoMap &> &entityControls) override {}
- void mapBuffers(const std::vector<IPABuffer> &buffers) override {}
- void unmapBuffers(const std::vector<unsigned int> &ids) override {}
- void processEvent(const IPAOperationData &event) override {}
-
-private:
- void readyRead(IPCUnixSocket *ipc);
-
- Process *proc_;
-
- IPCUnixSocket *socket_;
-};
-
-IPAProxyLinux::IPAProxyLinux(IPAModule *ipam)
- : proc_(nullptr), socket_(nullptr)
-{
- LOG(IPAProxy, Debug)
- << "initializing dummy proxy: loading IPA from "
- << ipam->path();
-
- std::vector<int> fds;
- std::vector<std::string> args;
- args.push_back(ipam->path());
- const std::string path = resolvePath("ipa_proxy_linux");
- if (path.empty()) {
- LOG(IPAProxy, Error)
- << "Failed to get proxy worker path";
- return;
- }
-
- socket_ = new IPCUnixSocket();
- int fd = socket_->create();
- if (fd < 0) {
- LOG(IPAProxy, Error)
- << "Failed to create socket";
- return;
- }
- socket_->readyRead.connect(this, &IPAProxyLinux::readyRead);
- args.push_back(std::to_string(fd));
- fds.push_back(fd);
-
- proc_ = new Process();
- int ret = proc_->start(path, args, fds);
- if (ret) {
- LOG(IPAProxy, Error)
- << "Failed to start proxy worker process";
- return;
- }
-
- valid_ = true;
-}
-
-IPAProxyLinux::~IPAProxyLinux()
-{
- delete proc_;
- delete socket_;
-}
-
-void IPAProxyLinux::readyRead(IPCUnixSocket *ipc)
-{
-}
-
-REGISTER_IPA_PROXY(IPAProxyLinux)
-
-} /* namespace libcamera */
diff --git a/src/libcamera/proxy/meson.build b/src/libcamera/proxy/meson.build
index efc11323..8bd1b135 100644
--- a/src/libcamera/proxy/meson.build
+++ b/src/libcamera/proxy/meson.build
@@ -1,3 +1,20 @@
-libcamera_sources += files([
- 'ipa_proxy_linux.cpp',
-])
+# SPDX-License-Identifier: CC0-1.0
+
+# generate {pipeline}_ipa_proxy.cpp
+foreach mojom : ipa_mojoms
+ proxy = custom_target(mojom['name'] + '_proxy_cpp',
+ input : mojom['mojom'],
+ output : mojom['name'] + '_ipa_proxy.cpp',
+ depends : mojom_templates,
+ command : [
+ mojom_generator, 'generate',
+ '-g', 'libcamera',
+ '--bytecode_path', mojom_templates_dir,
+ '--libcamera_generate_proxy_cpp',
+ '--libcamera_output_path=@OUTPUT@',
+ './' + '@INPUT@'
+ ],
+ env : py_build_env)
+
+ libcamera_internal_sources += proxy
+endforeach
diff --git a/src/libcamera/proxy/worker/ipa_proxy_linux_worker.cpp b/src/libcamera/proxy/worker/ipa_proxy_linux_worker.cpp
deleted file mode 100644
index 7d6287c7..00000000
--- a/src/libcamera/proxy/worker/ipa_proxy_linux_worker.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * ipa_proxy_linux_worker.cpp - Default Image Processing Algorithm proxy worker for Linux
- */
-
-#include <iostream>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <ipa/ipa_interface.h>
-#include <libcamera/event_dispatcher.h>
-#include <libcamera/logging.h>
-
-#include "ipa_module.h"
-#include "ipc_unixsocket.h"
-#include "log.h"
-#include "thread.h"
-
-using namespace libcamera;
-
-LOG_DEFINE_CATEGORY(IPAProxyLinuxWorker)
-
-void readyRead(IPCUnixSocket *ipc)
-{
- IPCUnixSocket::Payload message;
- int ret;
-
- ret = ipc->receive(&message);
- if (ret) {
- LOG(IPAProxyLinuxWorker, Error)
- << "Receive message failed: " << ret;
- return;
- }
-
- LOG(IPAProxyLinuxWorker, Debug) << "Received a message!";
-}
-
-int main(int argc, char **argv)
-{
- /* Uncomment this for debugging. */
-#if 0
- std::string logPath = "/tmp/libcamera.worker." +
- std::to_string(getpid()) + ".log";
- logSetFile(logPath.c_str());
-#endif
-
- if (argc < 3) {
- LOG(IPAProxyLinuxWorker, Debug)
- << "Tried to start worker with no args";
- return EXIT_FAILURE;
- }
-
- int fd = std::stoi(argv[2]);
- LOG(IPAProxyLinuxWorker, Debug)
- << "Starting worker for IPA module " << argv[1]
- << " with IPC fd = " << fd;
-
- std::unique_ptr<IPAModule> ipam = std::make_unique<IPAModule>(argv[1]);
- if (!ipam->isValid() || !ipam->load()) {
- LOG(IPAProxyLinuxWorker, Error)
- << "IPAModule " << argv[1] << " should be valid but isn't";
- return EXIT_FAILURE;
- }
-
- IPCUnixSocket socket;
- if (socket.bind(fd) < 0) {
- LOG(IPAProxyLinuxWorker, Error) << "IPC socket binding failed";
- return EXIT_FAILURE;
- }
- socket.readyRead.connect(&readyRead);
-
- struct ipa_context *ipac = ipam->createContext();
- if (!ipac) {
- LOG(IPAProxyLinuxWorker, Error) << "Failed to create IPA context";
- return EXIT_FAILURE;
- }
-
- LOG(IPAProxyLinuxWorker, Debug) << "Proxy worker successfully started";
-
- /* \todo upgrade listening loop */
- EventDispatcher *dispatcher = Thread::current()->eventDispatcher();
- while (1)
- dispatcher->processEvents();
-
- ipac->ops->destroy(ipac);
-
- return 0;
-}
diff --git a/src/libcamera/proxy/worker/meson.build b/src/libcamera/proxy/worker/meson.build
index 839156f7..8c54a2e2 100644
--- a/src/libcamera/proxy/worker/meson.build
+++ b/src/libcamera/proxy/worker/meson.build
@@ -1,16 +1,32 @@
-ipa_proxy_sources = [
- ['ipa_proxy_linux', 'ipa_proxy_linux_worker.cpp']
-]
+# SPDX-License-Identifier: CC0-1.0
-proxy_install_dir = join_paths(get_option('libexecdir'), 'libcamera')
+proxy_install_dir = libcamera_libexecdir
-foreach t : ipa_proxy_sources
- proxy = executable(t[0], t[1],
- include_directories : libcamera_internal_includes,
+# generate {pipeline}_ipa_proxy_worker.cpp
+foreach mojom : ipa_mojoms
+ worker = custom_target(mojom['name'] + '_proxy_worker',
+ input : mojom['mojom'],
+ output : mojom['name'] + '_ipa_proxy_worker.cpp',
+ depends : mojom_templates,
+ command : [
+ mojom_generator, 'generate',
+ '-g', 'libcamera',
+ '--bytecode_path', mojom_templates_dir,
+ '--libcamera_generate_proxy_worker',
+ '--libcamera_output_path=@OUTPUT@',
+ './' + '@INPUT@'
+ ],
+ env : py_build_env)
+
+ proxy = executable(mojom['name'] + '_ipa_proxy', worker,
install : true,
install_dir : proxy_install_dir,
- dependencies : libcamera_dep)
+ dependencies : libcamera_private)
endforeach
config_h.set('IPA_PROXY_DIR',
- '"' + join_paths(get_option('prefix'), proxy_install_dir) + '"')
+ '"' + get_option('prefix') / proxy_install_dir + '"')
+
+summary({
+ 'IPA_PROXY_DIR' : config_h.get('IPA_PROXY_DIR'),
+ }, section : 'Paths')
diff --git a/src/libcamera/pub_key.cpp b/src/libcamera/pub_key.cpp
new file mode 100644
index 00000000..f1d73a5c
--- /dev/null
+++ b/src/libcamera/pub_key.cpp
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Public key signature verification
+ */
+
+#include "libcamera/internal/pub_key.h"
+
+#if HAVE_CRYPTO
+#include <openssl/evp.h>
+#include <openssl/rsa.h>
+#include <openssl/sha.h>
+#include <openssl/x509.h>
+#elif HAVE_GNUTLS
+#include <gnutls/abstract.h>
+#endif
+
+/**
+ * \file pub_key.h
+ * \brief Public key signature verification
+ */
+
+namespace libcamera {
+
+/**
+ * \class PubKey
+ * \brief Public key wrapper for signature verification
+ *
+ * The PubKey class wraps a public key and implements signature verification. It
+ * only supports RSA keys and the RSA-SHA256 signature algorithm.
+ */
+
+/**
+ * \brief Construct a PubKey from key data
+ * \param[in] key Key data encoded in DER format
+ */
+PubKey::PubKey([[maybe_unused]] Span<const uint8_t> key)
+ : valid_(false)
+{
+#if HAVE_CRYPTO
+ const uint8_t *data = key.data();
+ pubkey_ = d2i_PUBKEY(nullptr, &data, key.size());
+ if (!pubkey_)
+ return;
+
+ valid_ = true;
+#elif HAVE_GNUTLS
+ int ret = gnutls_pubkey_init(&pubkey_);
+ if (ret < 0)
+ return;
+
+ const gnutls_datum_t gnuTlsKey{
+ const_cast<unsigned char *>(key.data()),
+ static_cast<unsigned int>(key.size())
+ };
+ ret = gnutls_pubkey_import(pubkey_, &gnuTlsKey, GNUTLS_X509_FMT_DER);
+ if (ret < 0)
+ return;
+
+ valid_ = true;
+#endif
+}
+
+PubKey::~PubKey()
+{
+#if HAVE_CRYPTO
+ EVP_PKEY_free(pubkey_);
+#elif HAVE_GNUTLS
+ gnutls_pubkey_deinit(pubkey_);
+#endif
+}
+
+/**
+ * \fn bool PubKey::isValid() const
+ * \brief Check is the public key is valid
+ * \return True if the public key is valid, false otherwise
+ */
+
+/**
+ * \brief Verify signature on data
+ * \param[in] data The signed data
+ * \param[in] sig The signature
+ *
+ * Verify that the signature \a sig matches the signed \a data for the public
+ * key. The signture algorithm is hardcoded to RSA-SHA256.
+ *
+ * \return True if the signature is valid, false otherwise
+ */
+bool PubKey::verify([[maybe_unused]] Span<const uint8_t> data,
+ [[maybe_unused]] Span<const uint8_t> sig) const
+{
+ if (!valid_)
+ return false;
+
+#if HAVE_CRYPTO
+ /*
+ * Create and initialize a public key algorithm context for signature
+ * verification.
+ */
+ EVP_PKEY_CTX *ctx = EVP_PKEY_CTX_new(pubkey_, nullptr);
+ if (!ctx)
+ return false;
+
+ if (EVP_PKEY_verify_init(ctx) <= 0 ||
+ EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_PADDING) <= 0 ||
+ EVP_PKEY_CTX_set_signature_md(ctx, EVP_sha256()) <= 0) {
+ EVP_PKEY_CTX_free(ctx);
+ return false;
+ }
+
+ /* Calculate the SHA256 digest of the data. */
+ uint8_t digest[SHA256_DIGEST_LENGTH];
+ SHA256(data.data(), data.size(), digest);
+
+ /* Decrypt the signature and verify it matches the digest. */
+ int ret = EVP_PKEY_verify(ctx, sig.data(), sig.size(), digest,
+ SHA256_DIGEST_LENGTH);
+ EVP_PKEY_CTX_free(ctx);
+ return ret == 1;
+#elif HAVE_GNUTLS
+ const gnutls_datum_t gnuTlsData{
+ const_cast<unsigned char *>(data.data()),
+ static_cast<unsigned int>(data.size())
+ };
+
+ const gnutls_datum_t gnuTlsSig{
+ const_cast<unsigned char *>(sig.data()),
+ static_cast<unsigned int>(sig.size())
+ };
+
+ int ret = gnutls_pubkey_verify_data2(pubkey_, GNUTLS_SIGN_RSA_SHA256, 0,
+ &gnuTlsData, &gnuTlsSig);
+ return ret >= 0;
+#else
+ return false;
+#endif
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/request.cpp b/src/libcamera/request.cpp
index ea33736f..8c56ed30 100644
--- a/src/libcamera/request.cpp
+++ b/src/libcamera/request.cpp
@@ -2,30 +2,313 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * request.cpp - Capture request handling
+ * Capture request handling
*/
-#include <libcamera/request.h>
+#include "libcamera/internal/request.h"
#include <map>
+#include <sstream>
+
+#include <libcamera/base/log.h>
-#include <libcamera/buffer.h>
#include <libcamera/camera.h>
#include <libcamera/control_ids.h>
+#include <libcamera/fence.h>
+#include <libcamera/framebuffer.h>
#include <libcamera/stream.h>
-#include "camera_controls.h"
-#include "log.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_controls.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/tracepoints.h"
/**
- * \file request.h
+ * \file libcamera/request.h
* \brief Describes a frame capture request to be processed by a camera
*/
+/**
+ * \internal
+ * \file libcamera/internal/request.h
+ * \brief Internal support for request handling
+ */
+
namespace libcamera {
LOG_DEFINE_CATEGORY(Request)
+#ifndef __DOXYGEN_PUBLIC__
+/**
+ * \class Request::Private
+ * \brief Request private data
+ *
+ * The Request::Private class stores all private data associated with a
+ * request. It implements the d-pointer design pattern to hide core
+ * Request data from the public API, and exposes utility functions to
+ * internal users of the request (namely the PipelineHandler class and its
+ * subclasses).
+ */
+
+/**
+ * \brief Create a Request::Private
+ * \param camera The Camera that creates the request
+ */
+Request::Private::Private(Camera *camera)
+ : camera_(camera), cancelled_(false)
+{
+}
+
+Request::Private::~Private()
+{
+ doCancelRequest();
+}
+
+/**
+ * \fn Request::Private::camera()
+ * \brief Retrieve the camera this request has been queued to
+ * \return The Camera this request has been queued to, or nullptr if the
+ * request hasn't been queued
+ */
+
+/**
+ * \brief Check if a request has buffers yet to be completed
+ *
+ * \return True if the request has buffers pending for completion, false
+ * otherwise
+ */
+bool Request::Private::hasPendingBuffers() const
+{
+ return !pending_.empty();
+}
+
+/**
+ * \brief Complete a buffer for the request
+ * \param[in] buffer The buffer that has completed
+ *
+ * A request tracks the status of all buffers it contains through a set of
+ * pending buffers. This function removes the \a buffer from the set to mark it
+ * as complete. All buffers associate with the request shall be marked as
+ * complete by calling this function once and once only before reporting the
+ * request as complete with the complete() function.
+ *
+ * \return True if all buffers contained in the request have completed, false
+ * otherwise
+ */
+bool Request::Private::completeBuffer(FrameBuffer *buffer)
+{
+ LIBCAMERA_TRACEPOINT(request_complete_buffer, this, buffer);
+
+ int ret = pending_.erase(buffer);
+ ASSERT(ret == 1);
+
+ buffer->_d()->setRequest(nullptr);
+
+ if (buffer->metadata().status == FrameMetadata::FrameCancelled)
+ cancelled_ = true;
+
+ return !hasPendingBuffers();
+}
+
+/**
+ * \brief Complete a queued request
+ *
+ * Mark the request as complete by updating its status to RequestComplete,
+ * unless buffers have been cancelled in which case the status is set to
+ * RequestCancelled.
+ */
+void Request::Private::complete()
+{
+ Request *request = _o<Request>();
+
+ ASSERT(request->status() == RequestPending);
+ ASSERT(!hasPendingBuffers());
+
+ request->status_ = cancelled_ ? RequestCancelled : RequestComplete;
+
+ LOG(Request, Debug) << request->toString();
+
+ LIBCAMERA_TRACEPOINT(request_complete, this);
+}
+
+void Request::Private::doCancelRequest()
+{
+ Request *request = _o<Request>();
+
+ for (FrameBuffer *buffer : pending_) {
+ buffer->_d()->cancel();
+ camera_->bufferCompleted.emit(request, buffer);
+ }
+
+ cancelled_ = true;
+ pending_.clear();
+ notifiers_.clear();
+ timer_.reset();
+}
+
+/**
+ * \brief Cancel a queued request
+ *
+ * Mark the request and its associated buffers as cancelled and complete it.
+ *
+ * Set each pending buffer in error state and emit the buffer completion signal
+ * before completing the Request.
+ */
+void Request::Private::cancel()
+{
+ LIBCAMERA_TRACEPOINT(request_cancel, this);
+
+ Request *request = _o<Request>();
+ ASSERT(request->status() == RequestPending);
+
+ doCancelRequest();
+}
+
+/**
+ * \brief Reset the request internal data to default values
+ *
+ * After calling this function, all request internal data will have default
+ * values as if the Request::Private instance had just been constructed.
+ */
+void Request::Private::reset()
+{
+ sequence_ = 0;
+ cancelled_ = false;
+ prepared_ = false;
+ pending_.clear();
+ notifiers_.clear();
+ timer_.reset();
+}
+
+/*
+ * Helper function to save some lines of code and make sure prepared_ is set
+ * to true before emitting the signal.
+ */
+void Request::Private::emitPrepareCompleted()
+{
+ prepared_ = true;
+ prepared.emit();
+}
+
+/**
+ * \brief Prepare the Request to be queued to the device
+ * \param[in] timeout Optional expiration timeout
+ *
+ * Prepare a Request to be queued to the hardware device by ensuring it is
+ * ready for the incoming memory transfers.
+ *
+ * This currently means waiting on each frame buffer acquire fence to be
+ * signalled. An optional expiration timeout can be specified. If not all the
+ * fences have been signalled correctly before the timeout expires the Request
+ * is cancelled.
+ *
+ * The function immediately emits the prepared signal if all the prepare
+ * operations have been completed synchronously. If instead the prepare
+ * operations require to wait the completion of asynchronous events, such as
+ * fences notifications or timer expiration, the prepared signal is emitted upon
+ * the asynchronous event completion.
+ *
+ * As we currently only handle fences, the function emits the prepared signal
+ * immediately if there are no fences to wait on. Otherwise the prepared signal
+ * is emitted when all fences have been signalled or the optional timeout has
+ * expired.
+ *
+ * If not all the fences have been correctly signalled or the optional timeout
+ * has expired the Request will be cancelled and the Request::prepared signal
+ * emitted.
+ *
+ * The intended user of this function is the PipelineHandler base class, which
+ * 'prepares' a Request before queuing it to the hardware device.
+ */
+void Request::Private::prepare(std::chrono::milliseconds timeout)
+{
+ /* Create and connect one notifier for each synchronization fence. */
+ for (FrameBuffer *buffer : pending_) {
+ const Fence *fence = buffer->_d()->fence();
+ if (!fence)
+ continue;
+
+ std::unique_ptr<EventNotifier> notifier =
+ std::make_unique<EventNotifier>(fence->fd().get(),
+ EventNotifier::Read);
+
+ notifier->activated.connect(this, [this, buffer] {
+ notifierActivated(buffer);
+ });
+
+ notifiers_[buffer] = std::move(notifier);
+ }
+
+ if (notifiers_.empty()) {
+ emitPrepareCompleted();
+ return;
+ }
+
+ /*
+ * In case a timeout is specified, create a timer and set it up.
+ *
+ * The timer must be created here instead of in the Request constructor,
+ * in order to be bound to the pipeline handler thread.
+ */
+ if (timeout != 0ms) {
+ timer_ = std::make_unique<Timer>();
+ timer_->timeout.connect(this, &Request::Private::timeout);
+ timer_->start(timeout);
+ }
+}
+
+/**
+ * \var Request::Private::prepared
+ * \brief Request preparation completed Signal
+ *
+ * The signal is emitted once the request preparation has completed and is ready
+ * to be queued. The Request might complete with errors in which case it is
+ * cancelled.
+ *
+ * The intended slot for this signal is the PipelineHandler::doQueueRequests()
+ * function which queues Request after they have been prepared or cancel them
+ * if they have failed preparing.
+ */
+
+void Request::Private::notifierActivated(FrameBuffer *buffer)
+{
+ /* Close the fence if successfully signalled. */
+ ASSERT(buffer);
+ buffer->releaseFence();
+
+ /* Remove the entry from the map and check if other fences are pending. */
+ auto it = notifiers_.find(buffer);
+ ASSERT(it != notifiers_.end());
+ notifiers_.erase(it);
+
+ Request *request = _o<Request>();
+ LOG(Request, Debug)
+ << "Request " << request->cookie() << " buffer " << buffer
+ << " fence signalled";
+
+ if (!notifiers_.empty())
+ return;
+
+ /* All fences completed, delete the timer and emit the prepared signal. */
+ timer_.reset();
+ emitPrepareCompleted();
+}
+
+void Request::Private::timeout()
+{
+ /* A timeout can only happen if there are fences not yet signalled. */
+ ASSERT(!notifiers_.empty());
+ notifiers_.clear();
+
+ Request *request = _o<Request>();
+ LOG(Request, Debug) << "Request prepare timeout: " << request->cookie();
+
+ cancel();
+
+ emitPrepareCompleted();
+}
+#endif /* __DOXYGEN_PUBLIC__ */
+
/**
* \enum Request::Status
* Request completion status
@@ -38,6 +321,20 @@ LOG_DEFINE_CATEGORY(Request)
*/
/**
+ * \enum Request::ReuseFlag
+ * Flags to control the behavior of Request::reuse()
+ * \var Request::Default
+ * Don't reuse buffers
+ * \var Request::ReuseBuffers
+ * Reuse the buffers that were previously added by addBuffer()
+ */
+
+/**
+ * \typedef Request::BufferMap
+ * \brief A map of Stream to FrameBuffer pointers
+ */
+
+/**
* \class Request
* \brief A frame capture request
*
@@ -51,33 +348,65 @@ LOG_DEFINE_CATEGORY(Request)
* \param[in] cookie Opaque cookie for application use
*
* The \a cookie is stored in the request and is accessible through the
- * cookie() method at any time. It is typically used by applications to map the
- * request to an external resource in the request completion handler, and is
+ * cookie() function at any time. It is typically used by applications to map
+ * the request to an external resource in the request completion handler, and is
* completely opaque to libcamera.
- *
*/
Request::Request(Camera *camera, uint64_t cookie)
- : camera_(camera), cookie_(cookie), status_(RequestPending),
- cancelled_(false)
+ : Extensible(std::make_unique<Private>(camera)),
+ cookie_(cookie), status_(RequestPending)
{
- /**
- * \todo Should the Camera expose a validator instance, to avoid
- * creating a new instance for each request?
- */
- validator_ = new CameraControlValidator(camera);
- controls_ = new ControlList(controls::controls, validator_);
+ controls_ = new ControlList(controls::controls,
+ camera->_d()->validator());
/**
- * \todo: Add a validator for metadata controls.
+ * \todo Add a validator for metadata controls.
*/
metadata_ = new ControlList(controls::controls);
+
+ LIBCAMERA_TRACEPOINT(request_construct, this);
+
+ LOG(Request, Debug) << "Created request - cookie: " << cookie_;
}
Request::~Request()
{
+ LIBCAMERA_TRACEPOINT(request_destroy, this);
+
delete metadata_;
delete controls_;
- delete validator_;
+}
+
+/**
+ * \brief Reset the request for reuse
+ * \param[in] flags Indicate whether or not to reuse the buffers
+ *
+ * Reset the status and controls associated with the request, to allow it to
+ * be reused and requeued without destruction. This function shall be called
+ * prior to queueing the request to the camera, in lieu of constructing a new
+ * request. The application can reuse the buffers that were previously added
+ * to the request via addBuffer() by setting \a flags to ReuseBuffers.
+ */
+void Request::reuse(ReuseFlag flags)
+{
+ LIBCAMERA_TRACEPOINT(request_reuse, this);
+
+ _d()->reset();
+
+ if (flags & ReuseBuffers) {
+ for (auto pair : bufferMap_) {
+ FrameBuffer *buffer = pair.second;
+ buffer->_d()->setRequest(this);
+ _d()->pending_.insert(buffer);
+ }
+ } else {
+ bufferMap_.clear();
+ }
+
+ status_ = RequestPending;
+
+ controls_->clear();
+ metadata_->clear();
}
/**
@@ -86,8 +415,8 @@ Request::~Request()
*
* Requests store a list of controls to be applied to all frames captured for
* the request. They are created with an empty list of controls that can be
- * accessed through this method and updated with ControlList::operator[]() or
- * ControlList::update().
+ * accessed through this function. Control values can be retrieved using
+ * ControlList::get() and updated using ControlList::set().
*
* Only controls supported by the camera to which this request will be
* submitted shall be included in the controls list. Attempting to add an
@@ -110,19 +439,36 @@ Request::~Request()
* \brief Add a FrameBuffer with its associated Stream to the Request
* \param[in] stream The stream the buffer belongs to
* \param[in] buffer The FrameBuffer to add to the request
+ * \param[in] fence The optional fence
*
* A reference to the buffer is stored in the request. The caller is responsible
* for ensuring that the buffer will remain valid until the request complete
* callback is called.
*
* A request can only contain one buffer per stream. If a buffer has already
- * been added to the request for the same stream, this method returns -EEXIST.
+ * been added to the request for the same stream, this function returns -EEXIST.
+ *
+ * A Fence can be optionally associated with the \a buffer.
+ *
+ * When a valid Fence is provided to this function, \a fence is moved to \a
+ * buffer and this Request will only be queued to the device once the
+ * fences of all its buffers have been correctly signalled.
+ *
+ * If the \a fence associated with \a buffer isn't signalled, the request will
+ * fail after a timeout. The buffer will still contain the fence, which
+ * applications must retrieve with FrameBuffer::releaseFence() before the buffer
+ * can be reused in another request. Attempting to add a buffer that still
+ * contains a fence to a request will result in this function returning -EEXIST.
+ *
+ * \sa FrameBuffer::releaseFence()
*
* \return 0 on success or a negative error code otherwise
* \retval -EEXIST The request already contains a buffer for the stream
+ * or the buffer still references a fence
* \retval -EINVAL The buffer does not reference a valid Stream
*/
-int Request::addBuffer(Stream *stream, FrameBuffer *buffer)
+int Request::addBuffer(const Stream *stream, FrameBuffer *buffer,
+ std::unique_ptr<Fence> fence)
{
if (!stream) {
LOG(Request, Error) << "Invalid stream reference";
@@ -135,10 +481,22 @@ int Request::addBuffer(Stream *stream, FrameBuffer *buffer)
return -EEXIST;
}
- buffer->request_ = this;
- pending_.insert(buffer);
+ buffer->_d()->setRequest(this);
+ _d()->pending_.insert(buffer);
bufferMap_[stream] = buffer;
+ /*
+ * Make sure the fence has been extracted from the buffer
+ * to avoid waiting on a stale fence.
+ */
+ if (buffer->_d()->fence()) {
+ LOG(Request, Error) << "Can't add buffer that still references a fence";
+ return -EEXIST;
+ }
+
+ if (fence && fence->isValid())
+ buffer->_d()->setFence(std::move(fence));
+
return 0;
}
@@ -157,9 +515,9 @@ int Request::addBuffer(Stream *stream, FrameBuffer *buffer)
* \return The buffer associated with the stream, or nullptr if the stream is
* not part of this request
*/
-FrameBuffer *Request::findBuffer(Stream *stream) const
+FrameBuffer *Request::findBuffer(const Stream *stream) const
{
- auto it = bufferMap_.find(stream);
+ const auto it = bufferMap_.find(stream);
if (it == bufferMap_.end())
return nullptr;
@@ -175,6 +533,26 @@ FrameBuffer *Request::findBuffer(Stream *stream) const
*/
/**
+ * \brief Retrieve the sequence number for the request
+ *
+ * When requests are queued, they are given a sequential number to track the
+ * order in which requests are queued to a camera. This number counts all
+ * requests given to a camera and is reset to zero between camera stop/start
+ * sequences.
+ *
+ * It can be used to support debugging and identifying the flow of requests
+ * through a pipeline, but does not guarantee to represent the sequence number
+ * of any images in the stream. The sequence number is stored as an unsigned
+ * integer and will wrap when overflowed.
+ *
+ * \return The request sequence number
+ */
+uint32_t Request::sequence() const
+{
+ return _d()->sequence_;
+}
+
+/**
* \fn Request::cookie()
* \brief Retrieve the cookie set when the request was created
* \return The request cookie
@@ -194,50 +572,49 @@ FrameBuffer *Request::findBuffer(Stream *stream) const
*/
/**
- * \fn Request::hasPendingBuffers()
* \brief Check if a request has buffers yet to be completed
*
* \return True if the request has buffers pending for completion, false
* otherwise
*/
+bool Request::hasPendingBuffers() const
+{
+ return !_d()->pending_.empty();
+}
/**
- * \brief Complete a queued request
+ * \brief Generate a string representation of the Request internals
*
- * Mark the request as complete by updating its status to RequestComplete,
- * unless buffers have been cancelled in which case the status is set to
- * RequestCancelled.
+ * This function facilitates debugging of Request state while it is used
+ * internally within libcamera.
+ *
+ * \return A string representing the current state of the request
*/
-void Request::complete()
+std::string Request::toString() const
{
- ASSERT(!hasPendingBuffers());
- status_ = cancelled_ ? RequestCancelled : RequestComplete;
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
}
/**
- * \brief Complete a buffer for the request
- * \param[in] buffer The buffer that has completed
- *
- * A request tracks the status of all buffers it contains through a set of
- * pending buffers. This function removes the \a buffer from the set to mark it
- * as complete. All buffers associate with the request shall be marked as
- * complete by calling this function once and once only before reporting the
- * request as complete with the complete() method.
- *
- * \return True if all buffers contained in the request have completed, false
- * otherwise
+ * \brief Insert a text representation of a Request into an output stream
+ * \param[in] out The output stream
+ * \param[in] r The Request
+ * \return The output stream \a out
*/
-bool Request::completeBuffer(FrameBuffer *buffer)
+std::ostream &operator<<(std::ostream &out, const Request &r)
{
- int ret = pending_.erase(buffer);
- ASSERT(ret == 1);
-
- buffer->request_ = nullptr;
+ /* Pending, Completed, Cancelled(X). */
+ static const char *statuses = "PCX";
- if (buffer->metadata().status == FrameMetadata::FrameCancelled)
- cancelled_ = true;
+ /* Example Output: Request(55:P:1/2:6523524) */
+ out << "Request(" << r.sequence() << ":" << statuses[r.status()] << ":"
+ << r._d()->pending_.size() << "/" << r.buffers().size() << ":"
+ << r.cookie() << ")";
- return !hasPendingBuffers();
+ return out;
}
} /* namespace libcamera */
diff --git a/src/libcamera/semaphore.cpp b/src/libcamera/semaphore.cpp
deleted file mode 100644
index ce1eae49..00000000
--- a/src/libcamera/semaphore.cpp
+++ /dev/null
@@ -1,103 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * semaphore.cpp - General-purpose counting semaphore
- */
-
-#include "semaphore.h"
-#include "thread.h"
-
-/**
- * \file semaphore.h
- * \brief General-purpose counting semaphore
- */
-
-namespace libcamera {
-
-/**
- * \class Semaphore
- * \brief General-purpose counting semaphore
- *
- * A semaphore is a locking primitive that protects resources. It is created
- * with an initial number of resources (which may be 0), and offers two
- * primitives to acquire and release resources. The acquire() method tries to
- * acquire a number of resources, and blocks if not enough resources are
- * available until they get released. The release() method releases a number of
- * resources, waking up any consumer blocked on an acquire() call.
- */
-
-/**
- * \brief Construct a semaphore with \a n resources
- * \param[in] n The resource count
- */
-Semaphore::Semaphore(unsigned int n)
- : available_(n)
-{
-}
-
-/**
- * \brief Retrieve the number of available resources
- * \return The number of available resources
- */
-unsigned int Semaphore::available()
-{
- MutexLocker locker(mutex_);
- return available_;
-}
-
-/**
- * \brief Acquire \a n resources
- * \param[in] n The resource count
- *
- * This method attempts to acquire \a n resources. If \a n is higher than the
- * number of available resources, the call will block until enough resources
- * become available.
- */
-void Semaphore::acquire(unsigned int n)
-{
- MutexLocker locker(mutex_);
- cv_.wait(locker, [&] { return available_ >= n; });
- available_ -= n;
-}
-
-/**
- * \brief Try to acquire \a n resources without blocking
- * \param[in] n The resource count
- *
- * This method attempts to acquire \a n resources. If \a n is higher than the
- * number of available resources, it returns false immediately without
- * acquiring any resource. Otherwise it acquires the resources and returns
- * true.
- *
- * \return True if the resources have been acquired, false otherwise
- */
-bool Semaphore::tryAcquire(unsigned int n)
-{
- MutexLocker locker(mutex_);
- if (available_ < n)
- return false;
-
- available_ -= n;
- return true;
-}
-
-/**
- * \brief Release \a n resources
- * \param[in] n The resource count
- *
- * This method releases \a n resources, increasing the available resource count
- * by \a n. If the number of available resources becomes large enough for any
- * consumer blocked on an acquire() call, those consumers get woken up.
- */
-void Semaphore::release(unsigned int n)
-{
- {
- MutexLocker locker(mutex_);
- available_ += n;
- }
-
- cv_.notify_all();
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/sensor/camera_sensor.cpp b/src/libcamera/sensor/camera_sensor.cpp
new file mode 100644
index 00000000..d19b5e2e
--- /dev/null
+++ b/src/libcamera/sensor/camera_sensor.cpp
@@ -0,0 +1,583 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * A camera sensor
+ */
+
+#include "libcamera/internal/camera_sensor.h"
+
+#include <memory>
+#include <vector>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/media_object.h"
+
+/**
+ * \file camera_sensor.h
+ * \brief A camera sensor
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(CameraSensor)
+
+/**
+ * \class CameraSensor
+ * \brief A abstract camera sensor
+ *
+ * The CameraSensor class eases handling of sensors for pipeline handlers by
+ * hiding the details of the kernel API and caching sensor information.
+ */
+
+/**
+ * \brief Destroy a CameraSensor
+ */
+CameraSensor::~CameraSensor() = default;
+
+/**
+ * \fn CameraSensor::model()
+ * \brief Retrieve the sensor model name
+ *
+ * The sensor model name is a free-formed string that uniquely identifies the
+ * sensor model.
+ *
+ * \return The sensor model name
+ */
+
+/**
+ * \fn CameraSensor::id()
+ * \brief Retrieve the sensor ID
+ *
+ * The sensor ID is a free-form string that uniquely identifies the sensor in
+ * the system. The ID satisfies the requirements to be used as a camera ID.
+ *
+ * \return The sensor ID
+ */
+
+/**
+ * \fn CameraSensor::entity()
+ * \brief Retrieve the sensor media entity
+ * \return The sensor media entity
+ */
+
+/**
+ * \fn CameraSensor::device()
+ * \brief Retrieve the camera sensor device
+ * \todo Remove this function by integrating DelayedControl with CameraSensor
+ * \return The camera sensor device
+ */
+
+/**
+ * \fn CameraSensor::focusLens()
+ * \brief Retrieve the focus lens controller
+ *
+ * \return The focus lens controller. nullptr if no focus lens controller is
+ * connected to the sensor
+ */
+
+/**
+ * \fn CameraSensor::mbusCodes()
+ * \brief Retrieve the media bus codes supported by the camera sensor
+ *
+ * Any Bayer formats are listed using the sensor's native Bayer order,
+ * that is, with the effect of V4L2_CID_HFLIP and V4L2_CID_VFLIP undone
+ * (where these controls exist).
+ *
+ * \return The supported media bus codes sorted in increasing order
+ */
+
+/**
+ * \fn CameraSensor::sizes()
+ * \brief Retrieve the supported frame sizes for a media bus code
+ * \param[in] mbusCode The media bus code for which sizes are requested
+ *
+ * \return The supported frame sizes for \a mbusCode sorted in increasing order
+ */
+
+/**
+ * \fn CameraSensor::resolution()
+ * \brief Retrieve the camera sensor resolution
+ *
+ * The camera sensor resolution is the active pixel area size, clamped to the
+ * maximum frame size the sensor can produce if it is smaller than the active
+ * pixel area.
+ *
+ * \todo Consider if it desirable to distinguish between the maximum resolution
+ * the sensor can produce (also including upscaled ones) and the actual pixel
+ * array size by splitting this function in two.
+ *
+ * \return The camera sensor resolution in pixels
+ */
+
+/**
+ * \fn CameraSensor::getFormat()
+ * \brief Retrieve the best sensor format for a desired output
+ * \param[in] mbusCodes The list of acceptable media bus codes
+ * \param[in] size The desired size
+ * \param[in] maxSize The maximum size
+ *
+ * Media bus codes are selected from \a mbusCodes, which lists all acceptable
+ * codes in decreasing order of preference. Media bus codes supported by the
+ * sensor but not listed in \a mbusCodes are ignored. If none of the desired
+ * codes is supported, it returns an error.
+ *
+ * \a size indicates the desired size at the output of the sensor. This function
+ * selects the best media bus code and size supported by the sensor according
+ * to the following criteria.
+ *
+ * - The desired \a size shall fit in the sensor output size to avoid the need
+ * to up-scale.
+ * - The sensor output size shall match the desired aspect ratio to avoid the
+ * need to crop the field of view.
+ * - The sensor output size shall be as small as possible to lower the required
+ * bandwidth.
+ * - The desired \a size shall be supported by one of the media bus code listed
+ * in \a mbusCodes.
+ * - The desired \a size shall fit into the maximum size \a maxSize if it is not
+ * null.
+ *
+ * When multiple media bus codes can produce the same size, the code at the
+ * lowest position in \a mbusCodes is selected.
+ *
+ * The use of this function is optional, as the above criteria may not match the
+ * needs of all pipeline handlers. Pipeline handlers may implement custom
+ * sensor format selection when needed.
+ *
+ * The returned sensor output format is guaranteed to be acceptable by the
+ * setFormat() function without any modification.
+ *
+ * \return The best sensor output format matching the desired media bus codes
+ * and size on success, or an empty format otherwise.
+ */
+
+/**
+ * \fn CameraSensor::setFormat()
+ * \brief Set the sensor output format
+ * \param[in] format The desired sensor output format
+ * \param[in] transform The transform to be applied on the sensor.
+ * Defaults to Identity.
+ *
+ * If flips are writable they are configured according to the desired Transform.
+ * Transform::Identity always corresponds to H/V flip being disabled if the
+ * controls are writable. Flips are set before the new format is applied as
+ * they can effectively change the Bayer pattern ordering.
+ *
+ * The ranges of any controls associated with the sensor are also updated.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn CameraSensor::tryFormat()
+ * \brief Try the sensor output format
+ * \param[in] format The desired sensor output format
+ *
+ * The ranges of any controls associated with the sensor are not updated.
+ *
+ * \todo Add support for Transform by changing the format's Bayer ordering
+ * before calling subdev_->setFormat().
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \fn CameraSensor::applyConfiguration()
+ * \brief Apply a sensor configuration to the camera sensor
+ * \param[in] config The sensor configuration
+ * \param[in] transform The transform to be applied on the sensor.
+ * Defaults to Identity
+ * \param[out] sensorFormat Format applied to the sensor (optional)
+ *
+ * Apply to the camera sensor the configuration \a config.
+ *
+ * \todo The configuration shall be fully populated and if any of the fields
+ * specified cannot be applied exactly, an error code is returned.
+ *
+ * \return 0 if \a config is applied correctly to the camera sensor, a negative
+ * error code otherwise
+ */
+
+/**
+ * \brief Retrieve the image source stream
+ *
+ * Sensors that produce multiple streams do not guarantee that the image stream
+ * is always assigned number 0. This function allows callers to retrieve the
+ * image stream on the sensor's source pad, in order to configure the receiving
+ * side accordingly.
+ *
+ * \return The image source stream
+ */
+V4L2Subdevice::Stream CameraSensor::imageStream() const
+{
+ return { 0, 0 };
+}
+
+/**
+ * \brief Retrieve the embedded data source stream
+ *
+ * Some sensors produce embedded data in a stream separate from the image
+ * stream. This function indicates if the sensor supports this feature by
+ * returning the embedded data stream on the sensor's source pad if available,
+ * or an std::optional<> without a value otheriwse.
+ *
+ * \return The embedded data source stream
+ */
+std::optional<V4L2Subdevice::Stream> CameraSensor::embeddedDataStream() const
+{
+ return {};
+}
+
+/**
+ * \brief Retrieve the format on the embedded data stream
+ *
+ * When an embedded data stream is available, this function returns the
+ * corresponding format on the sensor's source pad. The format may vary with
+ * the image stream format, and should therefore be retrieved after configuring
+ * the image stream.
+ *
+ * If the sensor doesn't support embedded data, this function returns a
+ * default-constructed format.
+ *
+ * \return The format on the embedded data stream
+ */
+V4L2SubdeviceFormat CameraSensor::embeddedDataFormat() const
+{
+ return {};
+}
+
+/**
+ * \brief Enable or disable the embedded data stream
+ * \param[in] enable True to enable the embedded data stream, false to disable it
+ *
+ * For sensors that support embedded data, this function enables or disables
+ * generation of embedded data. Some of such sensors always produce embedded
+ * data, in which case this function return -EISCONN if the caller attempts to
+ * disable embedded data.
+ *
+ * If the sensor doesn't support embedded data, this function returns 0 when \a
+ * enable is false, and -ENOSTR otherwise.
+ *
+ * \return 0 on success, or a negative error code otherwise
+ */
+int CameraSensor::setEmbeddedDataEnabled(bool enable)
+{
+ return enable ? -ENOSTR : 0;
+}
+
+/**
+ * \fn CameraSensor::properties()
+ * \brief Retrieve the camera sensor properties
+ * \return The list of camera sensor properties
+ */
+
+/**
+ * \fn CameraSensor::sensorInfo()
+ * \brief Assemble and return the camera sensor info
+ * \param[out] info The camera sensor info
+ *
+ * This function fills \a info with information that describes the camera sensor
+ * and its current configuration. The information combines static data (such as
+ * the the sensor model or active pixel array size) and data specific to the
+ * current sensor configuration (such as the line length and pixel rate).
+ *
+ * Sensor information is only available for raw sensors. When called for a YUV
+ * sensor, this function returns -EINVAL.
+ *
+ * \return 0 on success, a negative error code otherwise
+ */
+
+/**
+ * \fn CameraSensor::computeTransform()
+ * \brief Compute the Transform that gives the requested \a orientation
+ * \param[inout] orientation The desired image orientation
+ *
+ * This function computes the Transform that the pipeline handler should apply
+ * to the CameraSensor to obtain the requested \a orientation.
+ *
+ * The intended caller of this function is the validate() implementation of
+ * pipeline handlers, that pass in the application requested
+ * CameraConfiguration::orientation and obtain a Transform to apply to the
+ * camera sensor, likely at configure() time.
+ *
+ * If the requested \a orientation cannot be obtained, the \a orientation
+ * parameter is adjusted to report the current image orientation and
+ * Transform::Identity is returned.
+ *
+ * If the requested \a orientation can be obtained, the function computes a
+ * Transform and does not adjust \a orientation.
+ *
+ * Pipeline handlers are expected to verify if \a orientation has been
+ * adjusted by this function and set the CameraConfiguration::status to
+ * Adjusted accordingly.
+ *
+ * \return A Transform instance that applied to the CameraSensor produces images
+ * with \a orientation
+ */
+
+/**
+ * \fn CameraSensor::bayerOrder()
+ * \brief Compute the Bayer order that results from the given Transform
+ * \param[in] t The Transform to apply to the sensor
+ *
+ * Some sensors change their Bayer order when they are h-flipped or v-flipped.
+ * This function computes and returns the Bayer order that would result from the
+ * given transform applied to the sensor.
+ *
+ * This function is valid only when the sensor produces raw Bayer formats.
+ *
+ * \return The Bayer order produced by the sensor when the Transform is applied
+ */
+
+/**
+ * \fn CameraSensor::controls()
+ * \brief Retrieve the supported V4L2 controls and their information
+ *
+ * Control information is updated automatically to reflect the current sensor
+ * configuration when the setFormat() function is called, without invalidating
+ * any iterator on the ControlInfoMap.
+ *
+ * \return A map of the V4L2 controls supported by the sensor
+ */
+
+/**
+ * \fn CameraSensor::getControls()
+ * \brief Read V4L2 controls from the sensor
+ * \param[in] ids The list of controls to read, specified by their ID
+ *
+ * This function reads the value of all controls contained in \a ids, and
+ * returns their values as a ControlList. The control identifiers are defined by
+ * the V4L2 specification (V4L2_CID_*).
+ *
+ * If any control in \a ids is not supported by the device, is disabled (i.e.
+ * has the V4L2_CTRL_FLAG_DISABLED flag set), or if any other error occurs
+ * during validation of the requested controls, no control is read and this
+ * function returns an empty control list.
+ *
+ * \sa V4L2Device::getControls()
+ *
+ * \return The control values in a ControlList on success, or an empty list on
+ * error
+ */
+
+/**
+ * \fn CameraSensor::setControls()
+ * \brief Write V4L2 controls to the sensor
+ * \param[in] ctrls The list of controls to write
+ *
+ * This function writes the value of all controls contained in \a ctrls, and
+ * stores the values actually applied to the device in the corresponding \a
+ * ctrls entry. The control identifiers are defined by the V4L2 specification
+ * (V4L2_CID_*).
+ *
+ * If any control in \a ctrls is not supported by the device, is disabled (i.e.
+ * has the V4L2_CTRL_FLAG_DISABLED flag set), is read-only, or if any other
+ * error occurs during validation of the requested controls, no control is
+ * written and this function returns -EINVAL.
+ *
+ * If an error occurs while writing the controls, the index of the first
+ * control that couldn't be written is returned. All controls below that index
+ * are written and their values are updated in \a ctrls, while all other
+ * controls are not written and their values are not changed.
+ *
+ * \sa V4L2Device::setControls()
+ *
+ * \return 0 on success or an error code otherwise
+ * \retval -EINVAL One of the control is not supported or not accessible
+ * \retval i The index of the control that failed
+ */
+
+/**
+ * \fn CameraSensor::testPatternModes()
+ * \brief Retrieve all the supported test pattern modes of the camera sensor
+ * The test pattern mode values correspond to the controls::TestPattern control.
+ *
+ * \return The list of test pattern modes
+ */
+
+/**
+ * \fn CameraSensor::setTestPatternMode()
+ * \brief Set the test pattern mode for the camera sensor
+ * \param[in] mode The test pattern mode
+ *
+ * The new \a mode is applied to the sensor if it differs from the active test
+ * pattern mode. Otherwise, this function is a no-op. Setting the same test
+ * pattern mode for every frame thus incurs no performance penalty.
+ */
+
+/**
+ * \fn CameraSensor::sensorDelays()
+ * \brief Fetch the sensor delay values
+ *
+ * This function retrieves the delays that the sensor applies to controls. If
+ * the static properties database doesn't specifiy control delay values for the
+ * sensor, default delays that may be suitable are returned and a warning is
+ * logged.
+ *
+ * \return The sensor delay values
+ */
+
+/**
+ * \class CameraSensorFactoryBase
+ * \brief Base class for camera sensor factories
+ *
+ * The CameraSensorFactoryBase class is the base of all specializations of
+ * the CameraSensorFactory class template. It implements the factory
+ * registration, maintains a registry of factories, and provides access to the
+ * registered factories.
+ */
+
+/**
+ * \brief Construct a camera sensor factory base
+ * \param[in] name The camera sensor factory name
+ * \param[in] priority Priority order for factory selection
+ *
+ * Creating an instance of the factory base registers it with the global list of
+ * factories, accessible through the factories() function.
+ */
+CameraSensorFactoryBase::CameraSensorFactoryBase(const char *name, int priority)
+ : name_(name), priority_(priority)
+{
+ registerFactory(this);
+}
+
+/**
+ * \brief Create an instance of the CameraSensor corresponding to a media entity
+ * \param[in] entity The media entity on the source end of the sensor
+ *
+ * When multiple factories match the same \a entity, this function selects the
+ * matching factory with the highest priority as specified to the
+ * REGISTER_CAMERA_SENSOR() macro at factory registration time. If multiple
+ * matching factories have the same highest priority value, which factory gets
+ * selected is undefined and may vary between runs.
+ *
+ * \return A unique pointer to a new instance of the CameraSensor subclass
+ * matching the entity, or a null pointer if no such factory exists
+ */
+std::unique_ptr<CameraSensor> CameraSensorFactoryBase::create(MediaEntity *entity)
+{
+ const std::vector<CameraSensorFactoryBase *> &factories =
+ CameraSensorFactoryBase::factories();
+
+ for (const CameraSensorFactoryBase *factory : factories) {
+ std::variant<std::unique_ptr<CameraSensor>, int> result =
+ factory->match(entity);
+
+ if (std::holds_alternative<std::unique_ptr<CameraSensor>>(result)) {
+ LOG(CameraSensor, Debug)
+ << "Entity '" << entity->name() << "' matched by "
+ << factory->name();
+ return std::get<std::unique_ptr<CameraSensor>>(std::move(result));
+ }
+
+ if (std::get<int>(result)) {
+ LOG(CameraSensor, Error)
+ << "Failed to create sensor for '"
+ << entity->name() << ": " << std::get<int>(result);
+ return nullptr;
+ }
+ }
+
+ return nullptr;
+}
+
+/**
+ * \fn CameraSensorFactoryBase::name()
+ * \brief Retrieve the camera sensor factory name
+ * \return The name of the factory
+ */
+
+/**
+ * \fn CameraSensorFactoryBase::priority()
+ * \brief Retrieve the priority value for the factory
+ * \return The priority value for the factory
+ */
+
+/**
+ * \brief Retrieve the list of all camera sensor factories
+ *
+ * The factories are sorted in decreasing priority order.
+ *
+ * \return The list of camera sensor factories
+ */
+std::vector<CameraSensorFactoryBase *> &CameraSensorFactoryBase::factories()
+{
+ /*
+ * The static factories map is defined inside the function to ensure
+ * it gets initialized on first use, without any dependency on link
+ * order.
+ */
+ static std::vector<CameraSensorFactoryBase *> factories;
+ return factories;
+}
+
+/**
+ * \brief Add a camera sensor class to the registry
+ * \param[in] factory Factory to use to construct the camera sensor
+ */
+void CameraSensorFactoryBase::registerFactory(CameraSensorFactoryBase *factory)
+{
+ std::vector<CameraSensorFactoryBase *> &factories =
+ CameraSensorFactoryBase::factories();
+
+ auto pos = std::upper_bound(factories.begin(), factories.end(), factory,
+ [](const CameraSensorFactoryBase *value,
+ const CameraSensorFactoryBase *elem) {
+ return value->priority() > elem->priority();
+ });
+ factories.insert(pos, factory);
+}
+
+/**
+ * \class CameraSensorFactory
+ * \brief Registration of CameraSensorFactory classes and creation of instances
+ * \tparam _CameraSensor The camera sensor class type for this factory
+ *
+ * To facilitate discovery and instantiation of CameraSensor classes, the
+ * CameraSensorFactory class implements auto-registration of camera sensors.
+ * Each CameraSensor subclass shall register itself using the
+ * REGISTER_CAMERA_SENSOR() macro, which will create a corresponding instance
+ * of a CameraSensorFactory subclass and register it with the static list of
+ * factories.
+ */
+
+/**
+ * \fn CameraSensorFactory::CameraSensorFactory()
+ * \brief Construct a camera sensor factory
+ *
+ * Creating an instance of the factory registers it with the global list of
+ * factories, accessible through the CameraSensorFactoryBase::factories()
+ * function.
+ */
+
+/**
+ * \def REGISTER_CAMERA_SENSOR(sensor, priority)
+ * \brief Register a camera sensor type to the sensor factory
+ * \param[in] sensor Class name of the CameraSensor derived class to register
+ * \param[in] priority Priority order for factory selection
+ *
+ * Register a CameraSensor subclass with the factory and make it available to
+ * try and match sensors. The subclass needs to implement a static match
+ * function:
+ *
+ * \code{.cpp}
+ * static std::variant<std::unique_ptr<CameraSensor>, int> match(MediaEntity *entity);
+ * \endcode
+ *
+ * The function tests if the sensor class supports the camera sensor identified
+ * by a MediaEntity. If so, it creates a new instance of the sensor class. The
+ * return value is a variant that contains
+ *
+ * - A new instance of the camera sensor class if the entity matched and
+ * creation succeeded ;
+ * - A non-zero error code if the entity matched and the creation failed ; or
+ * - A zero error code if the entity didn't match.
+ *
+ * When multiple factories can support the same MediaEntity (as in the match()
+ * function of multiple factories returning true for the same entity), the \a
+ * priority argument selects which factory will be used. See
+ * CameraSensorFactoryBase::create() for more information.
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/sensor/camera_sensor_legacy.cpp b/src/libcamera/sensor/camera_sensor_legacy.cpp
new file mode 100644
index 00000000..32989c19
--- /dev/null
+++ b/src/libcamera/sensor/camera_sensor_legacy.cpp
@@ -0,0 +1,1045 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ *
+ * camera_sensor_legacy.cpp - A V4L2-backed camera sensor
+ */
+
+#include <algorithm>
+#include <cmath>
+#include <float.h>
+#include <iomanip>
+#include <limits.h>
+#include <map>
+#include <memory>
+#include <string.h>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/geometry.h>
+#include <libcamera/orientation.h>
+#include <libcamera/property_ids.h>
+#include <libcamera/transform.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera_lens.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/sysfs.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+
+namespace libcamera {
+
+class BayerFormat;
+class CameraLens;
+class MediaEntity;
+class SensorConfiguration;
+
+struct CameraSensorProperties;
+
+enum class Orientation;
+
+LOG_DECLARE_CATEGORY(CameraSensor)
+
+class CameraSensorLegacy : public CameraSensor, protected Loggable
+{
+public:
+ CameraSensorLegacy(const MediaEntity *entity);
+ ~CameraSensorLegacy();
+
+ static std::variant<std::unique_ptr<CameraSensor>, int>
+ match(MediaEntity *entity);
+
+ const std::string &model() const override { return model_; }
+ const std::string &id() const override { return id_; }
+
+ const MediaEntity *entity() const override { return entity_; }
+ V4L2Subdevice *device() override { return subdev_.get(); }
+
+ CameraLens *focusLens() override { return focusLens_.get(); }
+
+ const std::vector<unsigned int> &mbusCodes() const override { return mbusCodes_; }
+ std::vector<Size> sizes(unsigned int mbusCode) const override;
+ Size resolution() const override;
+
+ V4L2SubdeviceFormat getFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size,
+ const Size maxSize) const override;
+ int setFormat(V4L2SubdeviceFormat *format,
+ Transform transform = Transform::Identity) override;
+ int tryFormat(V4L2SubdeviceFormat *format) const override;
+
+ int applyConfiguration(const SensorConfiguration &config,
+ Transform transform = Transform::Identity,
+ V4L2SubdeviceFormat *sensorFormat = nullptr) override;
+
+ const ControlList &properties() const override { return properties_; }
+ int sensorInfo(IPACameraSensorInfo *info) const override;
+ Transform computeTransform(Orientation *orientation) const override;
+ BayerFormat::Order bayerOrder(Transform t) const override;
+
+ const ControlInfoMap &controls() const override;
+ ControlList getControls(const std::vector<uint32_t> &ids) override;
+ int setControls(ControlList *ctrls) override;
+
+ const std::vector<controls::draft::TestPatternModeEnum> &
+ testPatternModes() const override { return testPatternModes_; }
+ int setTestPatternMode(controls::draft::TestPatternModeEnum mode) override;
+ const CameraSensorProperties::SensorDelays &sensorDelays() override;
+
+protected:
+ std::string logPrefix() const override;
+
+private:
+ LIBCAMERA_DISABLE_COPY(CameraSensorLegacy)
+
+ int init();
+ int generateId();
+ int validateSensorDriver();
+ void initVimcDefaultProperties();
+ void initStaticProperties();
+ void initTestPatternModes();
+ int initProperties();
+ int applyTestPatternMode(controls::draft::TestPatternModeEnum mode);
+ int discoverAncillaryDevices();
+
+ const MediaEntity *entity_;
+ std::unique_ptr<V4L2Subdevice> subdev_;
+ unsigned int pad_;
+
+ const CameraSensorProperties *staticProps_;
+
+ std::string model_;
+ std::string id_;
+
+ V4L2Subdevice::Formats formats_;
+ std::vector<unsigned int> mbusCodes_;
+ std::vector<Size> sizes_;
+ std::vector<controls::draft::TestPatternModeEnum> testPatternModes_;
+ controls::draft::TestPatternModeEnum testPatternMode_;
+
+ Size pixelArraySize_;
+ Rectangle activeArea_;
+ const BayerFormat *bayerFormat_;
+ bool supportFlips_;
+ bool flipsAlterBayerOrder_;
+ Orientation mountingOrientation_;
+
+ ControlList properties_;
+
+ std::unique_ptr<CameraLens> focusLens_;
+};
+
+/**
+ * \class CameraSensorLegacy
+ * \brief A camera sensor based on V4L2 subdevices
+ *
+ * The implementation is currently limited to sensors that expose a single V4L2
+ * subdevice with a single pad. It will be extended to support more complex
+ * devices as the needs arise.
+ */
+
+CameraSensorLegacy::CameraSensorLegacy(const MediaEntity *entity)
+ : entity_(entity), pad_(UINT_MAX), staticProps_(nullptr),
+ bayerFormat_(nullptr), supportFlips_(false),
+ flipsAlterBayerOrder_(false), properties_(properties::properties)
+{
+}
+
+CameraSensorLegacy::~CameraSensorLegacy() = default;
+
+std::variant<std::unique_ptr<CameraSensor>, int>
+CameraSensorLegacy::match(MediaEntity *entity)
+{
+ std::unique_ptr<CameraSensorLegacy> sensor =
+ std::make_unique<CameraSensorLegacy>(entity);
+
+ int ret = sensor->init();
+ if (ret)
+ return { ret };
+
+ return { std::move(sensor) };
+}
+
+int CameraSensorLegacy::init()
+{
+ for (const MediaPad *pad : entity_->pads()) {
+ if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
+ pad_ = pad->index();
+ break;
+ }
+ }
+
+ if (pad_ == UINT_MAX) {
+ LOG(CameraSensor, Error)
+ << "Sensors with more than one pad are not supported";
+ return -EINVAL;
+ }
+
+ switch (entity_->function()) {
+ case MEDIA_ENT_F_CAM_SENSOR:
+ case MEDIA_ENT_F_PROC_VIDEO_ISP:
+ break;
+
+ default:
+ LOG(CameraSensor, Error)
+ << "Invalid sensor function "
+ << utils::hex(entity_->function());
+ return -EINVAL;
+ }
+
+ /* Create and open the subdev. */
+ subdev_ = std::make_unique<V4L2Subdevice>(entity_);
+ int ret = subdev_->open();
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Clear any flips to be sure we get the "native" Bayer order. This is
+ * harmless for sensors where the flips don't affect the Bayer order.
+ */
+ ControlList ctrls(subdev_->controls());
+ if (subdev_->controls().find(V4L2_CID_HFLIP) != subdev_->controls().end())
+ ctrls.set(V4L2_CID_HFLIP, 0);
+ if (subdev_->controls().find(V4L2_CID_VFLIP) != subdev_->controls().end())
+ ctrls.set(V4L2_CID_VFLIP, 0);
+ subdev_->setControls(&ctrls);
+
+ /* Enumerate, sort and cache media bus codes and sizes. */
+ formats_ = subdev_->formats(pad_);
+ if (formats_.empty()) {
+ LOG(CameraSensor, Error) << "No image format found";
+ return -EINVAL;
+ }
+
+ mbusCodes_ = utils::map_keys(formats_);
+ std::sort(mbusCodes_.begin(), mbusCodes_.end());
+
+ for (const auto &format : formats_) {
+ const std::vector<SizeRange> &ranges = format.second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes_),
+ [](const SizeRange &range) { return range.max; });
+ }
+
+ std::sort(sizes_.begin(), sizes_.end());
+
+ /* Remove duplicates. */
+ auto last = std::unique(sizes_.begin(), sizes_.end());
+ sizes_.erase(last, sizes_.end());
+
+ /*
+ * VIMC is a bit special, as it does not yet support all the mandatory
+ * requirements regular sensors have to respect.
+ *
+ * Do not validate the driver if it's VIMC and initialize the sensor
+ * properties with static information.
+ *
+ * \todo Remove the special case once the VIMC driver has been
+ * updated in all test platforms.
+ */
+ if (entity_->device()->driver() == "vimc") {
+ initVimcDefaultProperties();
+
+ ret = initProperties();
+ if (ret)
+ return ret;
+
+ return discoverAncillaryDevices();
+ }
+
+ /* Get the color filter array pattern (only for RAW sensors). */
+ for (unsigned int mbusCode : mbusCodes_) {
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(mbusCode);
+ if (bayerFormat.isValid()) {
+ bayerFormat_ = &bayerFormat;
+ break;
+ }
+ }
+
+ ret = validateSensorDriver();
+ if (ret)
+ return ret;
+
+ ret = initProperties();
+ if (ret)
+ return ret;
+
+ ret = discoverAncillaryDevices();
+ if (ret)
+ return ret;
+
+ /*
+ * Set HBLANK to the minimum to start with a well-defined line length,
+ * allowing IPA modules that do not modify HBLANK to use the sensor
+ * minimum line length in their calculations.
+ */
+ const struct v4l2_query_ext_ctrl *hblankInfo = subdev_->controlInfo(V4L2_CID_HBLANK);
+ if (hblankInfo && !(hblankInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ ControlList ctrl(subdev_->controls());
+
+ ctrl.set(V4L2_CID_HBLANK, static_cast<int32_t>(hblankInfo->minimum));
+ ret = subdev_->setControls(&ctrl);
+ if (ret)
+ return ret;
+ }
+
+ return applyTestPatternMode(controls::draft::TestPatternModeEnum::TestPatternModeOff);
+}
+
+int CameraSensorLegacy::generateId()
+{
+ const std::string devPath = subdev_->devicePath();
+
+ /* Try to get ID from firmware description. */
+ id_ = sysfs::firmwareNodePath(devPath);
+ if (!id_.empty())
+ return 0;
+
+ /*
+ * Virtual sensors not described in firmware
+ *
+ * Verify it's a platform device and construct ID from the device path
+ * and model of sensor.
+ */
+ if (devPath.find("/sys/devices/platform/", 0) == 0) {
+ id_ = devPath.substr(strlen("/sys/devices/")) + " " + model();
+ return 0;
+ }
+
+ LOG(CameraSensor, Error) << "Can't generate sensor ID";
+ return -EINVAL;
+}
+
+int CameraSensorLegacy::validateSensorDriver()
+{
+ int err = 0;
+
+ /*
+ * Optional controls are used to register optional sensor properties. If
+ * not present, some values will be defaulted.
+ */
+ static constexpr uint32_t optionalControls[] = {
+ V4L2_CID_CAMERA_SENSOR_ROTATION,
+ };
+
+ const ControlIdMap &controls = subdev_->controls().idmap();
+ for (uint32_t ctrl : optionalControls) {
+ if (!controls.count(ctrl))
+ LOG(CameraSensor, Debug)
+ << "Optional V4L2 control " << utils::hex(ctrl)
+ << " not supported";
+ }
+
+ /*
+ * Recommended controls are similar to optional controls, but will
+ * become mandatory in the near future. Be loud if they're missing.
+ */
+ static constexpr uint32_t recommendedControls[] = {
+ V4L2_CID_CAMERA_ORIENTATION,
+ };
+
+ for (uint32_t ctrl : recommendedControls) {
+ if (!controls.count(ctrl)) {
+ LOG(CameraSensor, Warning)
+ << "Recommended V4L2 control " << utils::hex(ctrl)
+ << " not supported";
+ err = -EINVAL;
+ }
+ }
+
+ /*
+ * Verify if sensor supports horizontal/vertical flips
+ *
+ * \todo Handle horizontal and vertical flips independently.
+ */
+ const struct v4l2_query_ext_ctrl *hflipInfo = subdev_->controlInfo(V4L2_CID_HFLIP);
+ const struct v4l2_query_ext_ctrl *vflipInfo = subdev_->controlInfo(V4L2_CID_VFLIP);
+ if (hflipInfo && !(hflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY) &&
+ vflipInfo && !(vflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ supportFlips_ = true;
+
+ if (hflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT ||
+ vflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT)
+ flipsAlterBayerOrder_ = true;
+ }
+
+ if (!supportFlips_)
+ LOG(CameraSensor, Debug)
+ << "Camera sensor does not support horizontal/vertical flip";
+
+ /*
+ * Make sure the required selection targets are supported.
+ *
+ * Failures in reading any of the targets are not deemed to be fatal,
+ * but some properties and features, like constructing a
+ * IPACameraSensorInfo for the IPA module, won't be supported.
+ *
+ * \todo Make support for selection targets mandatory as soon as all
+ * test platforms have been updated.
+ */
+ Rectangle rect;
+ int ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP_BOUNDS, &rect);
+ if (ret) {
+ /*
+ * Default the pixel array size to the largest size supported
+ * by the sensor. The sizes_ vector is sorted in ascending
+ * order, the largest size is thus the last element.
+ */
+ pixelArraySize_ = sizes_.back();
+
+ LOG(CameraSensor, Warning)
+ << "The PixelArraySize property has been defaulted to "
+ << pixelArraySize_;
+ err = -EINVAL;
+ } else {
+ pixelArraySize_ = rect.size();
+ }
+
+ ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP_DEFAULT, &activeArea_);
+ if (ret) {
+ activeArea_ = Rectangle(pixelArraySize_);
+ LOG(CameraSensor, Warning)
+ << "The PixelArrayActiveAreas property has been defaulted to "
+ << activeArea_;
+ err = -EINVAL;
+ }
+
+ ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP, &rect);
+ if (ret) {
+ LOG(CameraSensor, Warning)
+ << "Failed to retrieve the sensor crop rectangle";
+ err = -EINVAL;
+ }
+
+ if (err) {
+ LOG(CameraSensor, Warning)
+ << "The sensor kernel driver needs to be fixed";
+ LOG(CameraSensor, Warning)
+ << "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information";
+ }
+
+ if (!bayerFormat_)
+ return 0;
+
+ /*
+ * For raw sensors, make sure the sensor driver supports the controls
+ * required by the CameraSensor class.
+ */
+ static constexpr uint32_t mandatoryControls[] = {
+ V4L2_CID_ANALOGUE_GAIN,
+ V4L2_CID_EXPOSURE,
+ V4L2_CID_HBLANK,
+ V4L2_CID_PIXEL_RATE,
+ V4L2_CID_VBLANK,
+ };
+
+ err = 0;
+ for (uint32_t ctrl : mandatoryControls) {
+ if (!controls.count(ctrl)) {
+ LOG(CameraSensor, Error)
+ << "Mandatory V4L2 control " << utils::hex(ctrl)
+ << " not available";
+ err = -EINVAL;
+ }
+ }
+
+ if (err) {
+ LOG(CameraSensor, Error)
+ << "The sensor kernel driver needs to be fixed";
+ LOG(CameraSensor, Error)
+ << "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information";
+ return err;
+ }
+
+ return 0;
+}
+
+void CameraSensorLegacy::initVimcDefaultProperties()
+{
+ /* Use the largest supported size. */
+ pixelArraySize_ = sizes_.back();
+ activeArea_ = Rectangle(pixelArraySize_);
+}
+
+void CameraSensorLegacy::initStaticProperties()
+{
+ staticProps_ = CameraSensorProperties::get(model_);
+ if (!staticProps_)
+ return;
+
+ /* Register the properties retrieved from the sensor database. */
+ properties_.set(properties::UnitCellSize, staticProps_->unitCellSize);
+
+ initTestPatternModes();
+}
+
+const CameraSensorProperties::SensorDelays &CameraSensorLegacy::sensorDelays()
+{
+ static constexpr CameraSensorProperties::SensorDelays defaultSensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 1,
+ .vblankDelay = 2,
+ .hblankDelay = 2,
+ };
+
+ if (!staticProps_ ||
+ (!staticProps_->sensorDelays.exposureDelay &&
+ !staticProps_->sensorDelays.gainDelay &&
+ !staticProps_->sensorDelays.vblankDelay &&
+ !staticProps_->sensorDelays.hblankDelay)) {
+ LOG(CameraSensor, Warning)
+ << "No sensor delays found in static properties. "
+ "Assuming unverified defaults.";
+
+ return defaultSensorDelays;
+ }
+
+ return staticProps_->sensorDelays;
+}
+
+void CameraSensorLegacy::initTestPatternModes()
+{
+ const auto &v4l2TestPattern = controls().find(V4L2_CID_TEST_PATTERN);
+ if (v4l2TestPattern == controls().end()) {
+ LOG(CameraSensor, Debug) << "V4L2_CID_TEST_PATTERN is not supported";
+ return;
+ }
+
+ const auto &testPatternModes = staticProps_->testPatternModes;
+ if (testPatternModes.empty()) {
+ /*
+ * The camera sensor supports test patterns but we don't know
+ * how to map them so this should be fixed.
+ */
+ LOG(CameraSensor, Debug) << "No static test pattern map for \'"
+ << model() << "\'";
+ return;
+ }
+
+ /*
+ * Create a map that associates the V4L2 control index to the test
+ * pattern mode by reversing the testPatternModes map provided by the
+ * camera sensor properties. This makes it easier to verify if the
+ * control index is supported in the below for loop that creates the
+ * list of supported test patterns.
+ */
+ std::map<int32_t, controls::draft::TestPatternModeEnum> indexToTestPatternMode;
+ for (const auto &it : testPatternModes)
+ indexToTestPatternMode[it.second] = it.first;
+
+ for (const ControlValue &value : v4l2TestPattern->second.values()) {
+ const int32_t index = value.get<int32_t>();
+
+ const auto it = indexToTestPatternMode.find(index);
+ if (it == indexToTestPatternMode.end()) {
+ LOG(CameraSensor, Debug)
+ << "Test pattern mode " << index << " ignored";
+ continue;
+ }
+
+ testPatternModes_.push_back(it->second);
+ }
+}
+
+int CameraSensorLegacy::initProperties()
+{
+ model_ = subdev_->model();
+ properties_.set(properties::Model, utils::toAscii(model_));
+
+ /* Generate a unique ID for the sensor. */
+ int ret = generateId();
+ if (ret)
+ return ret;
+
+ /* Initialize the static properties from the sensor database. */
+ initStaticProperties();
+
+ /* Retrieve and register properties from the kernel interface. */
+ const ControlInfoMap &controls = subdev_->controls();
+
+ const auto &orientation = controls.find(V4L2_CID_CAMERA_ORIENTATION);
+ if (orientation != controls.end()) {
+ int32_t v4l2Orientation = orientation->second.def().get<int32_t>();
+ int32_t propertyValue;
+
+ switch (v4l2Orientation) {
+ default:
+ LOG(CameraSensor, Warning)
+ << "Unsupported camera location "
+ << v4l2Orientation << ", setting to External";
+ [[fallthrough]];
+ case V4L2_CAMERA_ORIENTATION_EXTERNAL:
+ propertyValue = properties::CameraLocationExternal;
+ break;
+ case V4L2_CAMERA_ORIENTATION_FRONT:
+ propertyValue = properties::CameraLocationFront;
+ break;
+ case V4L2_CAMERA_ORIENTATION_BACK:
+ propertyValue = properties::CameraLocationBack;
+ break;
+ }
+ properties_.set(properties::Location, propertyValue);
+ } else {
+ LOG(CameraSensor, Warning) << "Failed to retrieve the camera location";
+ }
+
+ const auto &rotationControl = controls.find(V4L2_CID_CAMERA_SENSOR_ROTATION);
+ if (rotationControl != controls.end()) {
+ int32_t propertyValue = rotationControl->second.def().get<int32_t>();
+
+ /*
+ * Cache the Transform associated with the camera mounting
+ * rotation for later use in computeTransform().
+ */
+ bool success;
+ mountingOrientation_ = orientationFromRotation(propertyValue, &success);
+ if (!success) {
+ LOG(CameraSensor, Warning)
+ << "Invalid rotation of " << propertyValue
+ << " degrees - ignoring";
+ mountingOrientation_ = Orientation::Rotate0;
+ }
+
+ properties_.set(properties::Rotation, propertyValue);
+ } else {
+ LOG(CameraSensor, Warning)
+ << "Rotation control not available, default to 0 degrees";
+ properties_.set(properties::Rotation, 0);
+ mountingOrientation_ = Orientation::Rotate0;
+ }
+
+ properties_.set(properties::PixelArraySize, pixelArraySize_);
+ properties_.set(properties::PixelArrayActiveAreas, { activeArea_ });
+
+ /* Color filter array pattern, register only for RAW sensors. */
+ if (bayerFormat_) {
+ int32_t cfa;
+ switch (bayerFormat_->order) {
+ case BayerFormat::BGGR:
+ cfa = properties::draft::BGGR;
+ break;
+ case BayerFormat::GBRG:
+ cfa = properties::draft::GBRG;
+ break;
+ case BayerFormat::GRBG:
+ cfa = properties::draft::GRBG;
+ break;
+ case BayerFormat::RGGB:
+ cfa = properties::draft::RGGB;
+ break;
+ case BayerFormat::MONO:
+ cfa = properties::draft::MONO;
+ break;
+ }
+
+ properties_.set(properties::draft::ColorFilterArrangement, cfa);
+ }
+
+ return 0;
+}
+
+int CameraSensorLegacy::discoverAncillaryDevices()
+{
+ int ret;
+
+ for (MediaEntity *ancillary : entity_->ancillaryEntities()) {
+ switch (ancillary->function()) {
+ case MEDIA_ENT_F_LENS:
+ focusLens_ = std::make_unique<CameraLens>(ancillary);
+ ret = focusLens_->init();
+ if (ret) {
+ LOG(CameraSensor, Error)
+ << "Lens initialisation failed, lens disabled";
+ focusLens_.reset();
+ }
+ break;
+
+ default:
+ LOG(CameraSensor, Warning)
+ << "Unsupported ancillary entity function "
+ << ancillary->function();
+ break;
+ }
+ }
+
+ return 0;
+}
+
+std::vector<Size> CameraSensorLegacy::sizes(unsigned int mbusCode) const
+{
+ std::vector<Size> sizes;
+
+ const auto &format = formats_.find(mbusCode);
+ if (format == formats_.end())
+ return sizes;
+
+ const std::vector<SizeRange> &ranges = format->second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
+ [](const SizeRange &range) { return range.max; });
+
+ std::sort(sizes.begin(), sizes.end());
+
+ return sizes;
+}
+
+Size CameraSensorLegacy::resolution() const
+{
+ return std::min(sizes_.back(), activeArea_.size());
+}
+
+V4L2SubdeviceFormat
+CameraSensorLegacy::getFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size, Size maxSize) const
+{
+ unsigned int desiredArea = size.width * size.height;
+ unsigned int bestArea = UINT_MAX;
+ float desiredRatio = static_cast<float>(size.width) / size.height;
+ float bestRatio = FLT_MAX;
+ const Size *bestSize = nullptr;
+ uint32_t bestCode = 0;
+
+ for (unsigned int code : mbusCodes) {
+ const auto formats = formats_.find(code);
+ if (formats == formats_.end())
+ continue;
+
+ for (const SizeRange &range : formats->second) {
+ const Size &sz = range.max;
+
+ if (!maxSize.isNull() &&
+ (sz.width > maxSize.width || sz.height > maxSize.height))
+ continue;
+
+ if (sz.width < size.width || sz.height < size.height)
+ continue;
+
+ float ratio = static_cast<float>(sz.width) / sz.height;
+ float ratioDiff = std::abs(ratio - desiredRatio);
+ unsigned int area = sz.width * sz.height;
+ unsigned int areaDiff = area - desiredArea;
+
+ if (ratioDiff > bestRatio)
+ continue;
+
+ if (ratioDiff < bestRatio || areaDiff < bestArea) {
+ bestRatio = ratioDiff;
+ bestArea = areaDiff;
+ bestSize = &sz;
+ bestCode = code;
+ }
+ }
+ }
+
+ if (!bestSize) {
+ LOG(CameraSensor, Debug) << "No supported format or size found";
+ return {};
+ }
+
+ V4L2SubdeviceFormat format{
+ .code = bestCode,
+ .size = *bestSize,
+ .colorSpace = ColorSpace::Raw,
+ };
+
+ return format;
+}
+
+int CameraSensorLegacy::setFormat(V4L2SubdeviceFormat *format, Transform transform)
+{
+ /* Configure flips if the sensor supports that. */
+ if (supportFlips_) {
+ ControlList flipCtrls(subdev_->controls());
+
+ flipCtrls.set(V4L2_CID_HFLIP,
+ static_cast<int32_t>(!!(transform & Transform::HFlip)));
+ flipCtrls.set(V4L2_CID_VFLIP,
+ static_cast<int32_t>(!!(transform & Transform::VFlip)));
+
+ int ret = subdev_->setControls(&flipCtrls);
+ if (ret)
+ return ret;
+ }
+
+ /* Apply format on the subdev. */
+ int ret = subdev_->setFormat(pad_, format);
+ if (ret)
+ return ret;
+
+ subdev_->updateControlInfo();
+ return 0;
+}
+
+int CameraSensorLegacy::tryFormat(V4L2SubdeviceFormat *format) const
+{
+ return subdev_->setFormat(pad_, format,
+ V4L2Subdevice::Whence::TryFormat);
+}
+
+int CameraSensorLegacy::applyConfiguration(const SensorConfiguration &config,
+ Transform transform,
+ V4L2SubdeviceFormat *sensorFormat)
+{
+ if (!config.isValid()) {
+ LOG(CameraSensor, Error) << "Invalid sensor configuration";
+ return -EINVAL;
+ }
+
+ std::vector<unsigned int> filteredCodes;
+ std::copy_if(mbusCodes_.begin(), mbusCodes_.end(),
+ std::back_inserter(filteredCodes),
+ [&config](unsigned int mbusCode) {
+ BayerFormat bayer = BayerFormat::fromMbusCode(mbusCode);
+ if (bayer.bitDepth == config.bitDepth)
+ return true;
+ return false;
+ });
+ if (filteredCodes.empty()) {
+ LOG(CameraSensor, Error)
+ << "Cannot find any format with bit depth "
+ << config.bitDepth;
+ return -EINVAL;
+ }
+
+ /*
+ * Compute the sensor's data frame size by applying the cropping
+ * rectangle, subsampling and output crop to the sensor's pixel array
+ * size.
+ *
+ * \todo The actual size computation is for now ignored and only the
+ * output size is considered. This implies that resolutions obtained
+ * with two different cropping/subsampling will look identical and
+ * only the first found one will be considered.
+ */
+ V4L2SubdeviceFormat subdevFormat = {};
+ for (unsigned int code : filteredCodes) {
+ for (const Size &size : sizes(code)) {
+ if (size.width != config.outputSize.width ||
+ size.height != config.outputSize.height)
+ continue;
+
+ subdevFormat.code = code;
+ subdevFormat.size = size;
+ break;
+ }
+ }
+ if (!subdevFormat.code) {
+ LOG(CameraSensor, Error) << "Invalid output size in sensor configuration";
+ return -EINVAL;
+ }
+
+ int ret = setFormat(&subdevFormat, transform);
+ if (ret)
+ return ret;
+
+ /*
+ * Return to the caller the format actually applied to the sensor.
+ * This is relevant if transform has changed the bayer pattern order.
+ */
+ if (sensorFormat)
+ *sensorFormat = subdevFormat;
+
+ /* \todo Handle AnalogCrop. Most sensors do not support set_selection */
+ /* \todo Handle scaling in the digital domain. */
+
+ return 0;
+}
+
+int CameraSensorLegacy::sensorInfo(IPACameraSensorInfo *info) const
+{
+ if (!bayerFormat_)
+ return -EINVAL;
+
+ info->model = model();
+
+ /*
+ * The active area size is a static property, while the crop
+ * rectangle needs to be re-read as it depends on the sensor
+ * configuration.
+ */
+ info->activeAreaSize = { activeArea_.width, activeArea_.height };
+
+ /*
+ * \todo Support for retreiving the crop rectangle is scheduled to
+ * become mandatory. For the time being use the default value if it has
+ * been initialized at sensor driver validation time.
+ */
+ int ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP, &info->analogCrop);
+ if (ret) {
+ info->analogCrop = activeArea_;
+ LOG(CameraSensor, Warning)
+ << "The analogue crop rectangle has been defaulted to the active area size";
+ }
+
+ /*
+ * IPACameraSensorInfo::analogCrop::x and IPACameraSensorInfo::analogCrop::y
+ * are defined relatively to the active pixel area, while V4L2's
+ * TGT_CROP target is defined in respect to the full pixel array.
+ *
+ * Compensate it by subtracting the active area offset.
+ */
+ info->analogCrop.x -= activeArea_.x;
+ info->analogCrop.y -= activeArea_.y;
+
+ /* The bit depth and image size depend on the currently applied format. */
+ V4L2SubdeviceFormat format{};
+ ret = subdev_->getFormat(pad_, &format);
+ if (ret)
+ return ret;
+ info->bitsPerPixel = MediaBusFormatInfo::info(format.code).bitsPerPixel;
+ info->outputSize = format.size;
+
+ std::optional<int32_t> cfa = properties_.get(properties::draft::ColorFilterArrangement);
+ info->cfaPattern = cfa ? *cfa : properties::draft::RGB;
+
+ /*
+ * Retrieve the pixel rate, line length and minimum/maximum frame
+ * duration through V4L2 controls. Support for the V4L2_CID_PIXEL_RATE,
+ * V4L2_CID_HBLANK and V4L2_CID_VBLANK controls is mandatory.
+ */
+ ControlList ctrls = subdev_->getControls({ V4L2_CID_PIXEL_RATE,
+ V4L2_CID_HBLANK,
+ V4L2_CID_VBLANK });
+ if (ctrls.empty()) {
+ LOG(CameraSensor, Error)
+ << "Failed to retrieve camera info controls";
+ return -EINVAL;
+ }
+
+ info->pixelRate = ctrls.get(V4L2_CID_PIXEL_RATE).get<int64_t>();
+
+ const ControlInfo hblank = ctrls.infoMap()->at(V4L2_CID_HBLANK);
+ info->minLineLength = info->outputSize.width + hblank.min().get<int32_t>();
+ info->maxLineLength = info->outputSize.width + hblank.max().get<int32_t>();
+
+ const ControlInfo vblank = ctrls.infoMap()->at(V4L2_CID_VBLANK);
+ info->minFrameLength = info->outputSize.height + vblank.min().get<int32_t>();
+ info->maxFrameLength = info->outputSize.height + vblank.max().get<int32_t>();
+
+ return 0;
+}
+
+Transform CameraSensorLegacy::computeTransform(Orientation *orientation) const
+{
+ /*
+ * If we cannot do any flips we cannot change the native camera mounting
+ * orientation.
+ */
+ if (!supportFlips_) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ /*
+ * Now compute the required transform to obtain 'orientation' starting
+ * from the mounting rotation.
+ *
+ * As a note:
+ * orientation / mountingOrientation_ = transform
+ * mountingOrientation_ * transform = orientation
+ */
+ Transform transform = *orientation / mountingOrientation_;
+
+ /*
+ * If transform contains any Transpose we cannot do it, so adjust
+ * 'orientation' to report the image native orientation and return Identity.
+ */
+ if (!!(transform & Transform::Transpose)) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ return transform;
+}
+
+BayerFormat::Order CameraSensorLegacy::bayerOrder(Transform t) const
+{
+ /* Return a defined by meaningless value for non-Bayer sensors. */
+ if (!bayerFormat_)
+ return BayerFormat::Order::BGGR;
+
+ if (!flipsAlterBayerOrder_)
+ return bayerFormat_->order;
+
+ /*
+ * Apply the transform to the native (i.e. untransformed) Bayer order,
+ * using the rest of the Bayer format supplied by the caller.
+ */
+ return bayerFormat_->transform(t).order;
+}
+
+const ControlInfoMap &CameraSensorLegacy::controls() const
+{
+ return subdev_->controls();
+}
+
+ControlList CameraSensorLegacy::getControls(const std::vector<uint32_t> &ids)
+{
+ return subdev_->getControls(ids);
+}
+
+int CameraSensorLegacy::setControls(ControlList *ctrls)
+{
+ return subdev_->setControls(ctrls);
+}
+
+int CameraSensorLegacy::setTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternMode_ == mode)
+ return 0;
+
+ if (testPatternModes_.empty()) {
+ LOG(CameraSensor, Error)
+ << "Camera sensor does not support test pattern modes.";
+ return -EINVAL;
+ }
+
+ return applyTestPatternMode(mode);
+}
+
+int CameraSensorLegacy::applyTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternModes_.empty())
+ return 0;
+
+ auto it = std::find(testPatternModes_.begin(), testPatternModes_.end(),
+ mode);
+ if (it == testPatternModes_.end()) {
+ LOG(CameraSensor, Error) << "Unsupported test pattern mode "
+ << mode;
+ return -EINVAL;
+ }
+
+ LOG(CameraSensor, Debug) << "Apply test pattern mode " << mode;
+
+ int32_t index = staticProps_->testPatternModes.at(mode);
+ ControlList ctrls{ controls() };
+ ctrls.set(V4L2_CID_TEST_PATTERN, index);
+
+ int ret = setControls(&ctrls);
+ if (ret)
+ return ret;
+
+ testPatternMode_ = mode;
+
+ return 0;
+}
+
+std::string CameraSensorLegacy::logPrefix() const
+{
+ return "'" + entity_->name() + "'";
+}
+
+REGISTER_CAMERA_SENSOR(CameraSensorLegacy, -100)
+
+} /* namespace libcamera */
diff --git a/src/libcamera/sensor/camera_sensor_properties.cpp b/src/libcamera/sensor/camera_sensor_properties.cpp
new file mode 100644
index 00000000..e2f518f9
--- /dev/null
+++ b/src/libcamera/sensor/camera_sensor_properties.cpp
@@ -0,0 +1,473 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Database of camera sensor properties
+ */
+
+#include "libcamera/internal/camera_sensor_properties.h"
+
+#include <map>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+
+/**
+ * \file camera_sensor_properties.h
+ * \brief Database of camera sensor properties
+ *
+ * The database of camera sensor properties collects static information about
+ * camera sensors that is not possible or desirable to retrieve from the device
+ * at run time.
+ *
+ * The database is indexed using the camera sensor model, as reported by the
+ * properties::Model property, and for each supported sensor it contains a
+ * list of properties.
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(CameraSensorProperties)
+
+/**
+ * \struct CameraSensorProperties
+ * \brief Database of camera sensor properties
+ *
+ * \var CameraSensorProperties::unitCellSize
+ * \brief The physical size of a pixel, including pixel edges, in nanometers.
+ *
+ * \var CameraSensorProperties::testPatternModes
+ * \brief Map that associates the TestPattern control value with the indexes of
+ * the corresponding sensor test pattern modes as returned by
+ * V4L2_CID_TEST_PATTERN.
+ *
+ * \var CameraSensorProperties::sensorDelays
+ * \brief Sensor control application delays
+ *
+ * This structure may be defined as empty if the actual sensor delays are not
+ * available or have not been measured.
+ */
+
+/**
+ * \struct CameraSensorProperties::SensorDelays
+ * \brief Sensor control application delay values
+ *
+ * This structure holds delay values, expressed in number of frames, between the
+ * time a control value is applied to the sensor and the time that value is
+ * reflected in the output. For example "2 frames delay" means that parameters
+ * set during frame N will take effect for frame N+2 (and by extension a delay
+ * of 0 would mean the parameter is applied immediately to the current frame).
+ *
+ * \var CameraSensorProperties::SensorDelays::exposureDelay
+ * \brief Number of frames between application of exposure control and effect
+ *
+ * \var CameraSensorProperties::SensorDelays::gainDelay
+ * \brief Number of frames between application of analogue gain control and effect
+ *
+ * \var CameraSensorProperties::SensorDelays::vblankDelay
+ * \brief Number of frames between application of vblank control and effect
+ *
+ * \var CameraSensorProperties::SensorDelays::hblankDelay
+ * \brief Number of frames between application of hblank control and effect
+ */
+
+/**
+ * \brief Retrieve the properties associated with a sensor
+ * \param sensor The sensor model name as reported by properties::Model
+ * \return A pointer to the CameraSensorProperties instance associated with a sensor
+ * or nullptr if the sensor is not supported
+ */
+const CameraSensorProperties *CameraSensorProperties::get(const std::string &sensor)
+{
+ static const std::map<std::string, const CameraSensorProperties> sensorProps = {
+ { "ar0144", {
+ .unitCellSize = { 3000, 3000 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 1 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "ar0521", {
+ .unitCellSize = { 2200, 2200 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 1 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ },
+ .sensorDelays = { },
+ } },
+ { "gc05a2", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "gc08a3", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "hi846", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 1 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ { controls::draft::TestPatternModePn9, 4 },
+ /*
+ * No corresponding test pattern mode for:
+ * 5: "Gradient Horizontal"
+ * 6: "Gradient Vertical"
+ * 7: "Check Board"
+ * 8: "Slant Pattern"
+ * 9: "Resolution Pattern"
+ */
+ },
+ .sensorDelays = { },
+ } },
+ { "imx214", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeSolidColor, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ { controls::draft::TestPatternModePn9, 4 },
+ },
+ .sensorDelays = { },
+ } },
+ { "imx219", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeSolidColor, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ { controls::draft::TestPatternModePn9, 4 },
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 1,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "imx258", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 1 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ { controls::draft::TestPatternModePn9, 4 },
+ },
+ .sensorDelays = { },
+ } },
+ { "imx283", {
+ .unitCellSize = { 2400, 2400 },
+ .testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "imx290", {
+ .unitCellSize = { 2900, 2900 },
+ .testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "imx296", {
+ .unitCellSize = { 3450, 3450 },
+ .testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "imx327", {
+ .unitCellSize = { 2900, 2900 },
+ .testPatternModes = {},
+ .sensorDelays = { },
+ } },
+ { "imx335", {
+ .unitCellSize = { 2000, 2000 },
+ .testPatternModes = {},
+ .sensorDelays = { },
+ } },
+ { "imx415", {
+ .unitCellSize = { 1450, 1450 },
+ .testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "imx462", {
+ .unitCellSize = { 2900, 2900 },
+ .testPatternModes = {},
+ .sensorDelays = { },
+ } },
+ { "imx477", {
+ .unitCellSize = { 1550, 1550 },
+ .testPatternModes = {},
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 3,
+ .hblankDelay = 3
+ },
+ } },
+ { "imx519", {
+ .unitCellSize = { 1220, 1220 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeSolidColor, 2 },
+ { controls::draft::TestPatternModePn9, 4 },
+ /*
+ * The driver reports ColorBars and ColorBarsFadeToGray as well but
+ * these two patterns do not comply with MIPI CCS v1.1 (Section 10.1).
+ */
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 3,
+ .hblankDelay = 3
+ },
+ } },
+ { "imx708", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeSolidColor, 2 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
+ { controls::draft::TestPatternModePn9, 4 },
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 3,
+ .hblankDelay = 3
+ },
+ } },
+ { "ov2685", {
+ .unitCellSize = { 1750, 1750 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1},
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2 },
+ /*
+ * No corresponding test pattern mode for:
+ * 3: "Random Data"
+ * 4: "Black White Square"
+ * 5: "Color Square"
+ */
+ },
+ .sensorDelays = { },
+ } },
+ { "ov2740", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1},
+ },
+ .sensorDelays = { },
+ } },
+ { "ov4689", {
+ .unitCellSize = { 2000, 2000 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1},
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2},
+ /*
+ * No corresponding test patterns in
+ * MIPI CCS specification for sensor's
+ * colorBarType2 and colorBarType3.
+ */
+ },
+ .sensorDelays = { },
+ } },
+ { "ov5640", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ },
+ .sensorDelays = { },
+ } },
+ { "ov5647", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {},
+ /*
+ * We run this sensor in a mode where the gain delay is
+ * bumped up to 2. It seems to be the only way to make
+ * the delays "predictable".
+ *
+ * \todo Verify these delays properly, as the upstream
+ * driver appears to configure _no_ delay.
+ */
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "ov5670", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ },
+ .sensorDelays = { },
+ } },
+ { "ov5675", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ },
+ .sensorDelays = { },
+ } },
+ { "ov5693", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ /*
+ * No corresponding test pattern mode for
+ * 1: "Random data" and 3: "Colour Bars with
+ * Rolling Bar".
+ */
+ },
+ .sensorDelays = { },
+ } },
+ { "ov64a40", {
+ .unitCellSize = { 1008, 1008 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2 },
+ /*
+ * No corresponding test patter mode
+ * 3: "Vertical Color Bar Type 3",
+ * 4: "Vertical Color Bar Type 4"
+ */
+ },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "ov7251", {
+ .unitCellSize = { 3000, 3000 },
+ .testPatternModes = { },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "ov8858", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ { controls::draft::TestPatternModeColorBarsFadeToGray, 2 },
+ /*
+ * No corresponding test patter mode
+ * 3: "Vertical Color Bar Type 3",
+ * 4: "Vertical Color Bar Type 4"
+ */
+ },
+ .sensorDelays = { },
+ } },
+ { "ov8865", {
+ .unitCellSize = { 1400, 1400 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 2 },
+ /*
+ * No corresponding test pattern mode for:
+ * 1: "Random data"
+ * 3: "Color bars with rolling bar"
+ * 4: "Color squares"
+ * 5: "Color squares with rolling bar"
+ */
+ },
+ .sensorDelays = { },
+ } },
+ { "ov9281", {
+ .unitCellSize = { 3000, 3000 },
+ .testPatternModes = { },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
+ } },
+ { "ov13858", {
+ .unitCellSize = { 1120, 1120 },
+ .testPatternModes = {
+ { controls::draft::TestPatternModeOff, 0 },
+ { controls::draft::TestPatternModeColorBars, 1 },
+ },
+ .sensorDelays = { },
+ } },
+ };
+
+ const auto it = sensorProps.find(sensor);
+ if (it == sensorProps.end()) {
+ LOG(CameraSensorProperties, Warning)
+ << "No static properties available for '" << sensor << "'";
+ LOG(CameraSensorProperties, Warning)
+ << "Please consider updating the camera sensor properties database";
+ return nullptr;
+ }
+
+ return &it->second;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/sensor/camera_sensor_raw.cpp b/src/libcamera/sensor/camera_sensor_raw.cpp
new file mode 100644
index 00000000..ab75b1f8
--- /dev/null
+++ b/src/libcamera/sensor/camera_sensor_raw.cpp
@@ -0,0 +1,1157 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy.
+ *
+ * camera_sensor_raw.cpp - A raw camera sensor using the V4L2 streams API
+ */
+
+#include <algorithm>
+#include <cmath>
+#include <float.h>
+#include <iomanip>
+#include <limits.h>
+#include <map>
+#include <memory>
+#include <optional>
+#include <string.h>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/geometry.h>
+#include <libcamera/orientation.h>
+#include <libcamera/property_ids.h>
+#include <libcamera/transform.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera_lens.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/sysfs.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+
+namespace libcamera {
+
+class BayerFormat;
+class CameraLens;
+class MediaEntity;
+class SensorConfiguration;
+
+struct CameraSensorProperties;
+
+enum class Orientation;
+
+LOG_DECLARE_CATEGORY(CameraSensor)
+
+class CameraSensorRaw : public CameraSensor, protected Loggable
+{
+public:
+ CameraSensorRaw(const MediaEntity *entity);
+ ~CameraSensorRaw();
+
+ static std::variant<std::unique_ptr<CameraSensor>, int>
+ match(MediaEntity *entity);
+
+ const std::string &model() const override { return model_; }
+ const std::string &id() const override { return id_; }
+
+ const MediaEntity *entity() const override { return entity_; }
+ V4L2Subdevice *device() override { return subdev_.get(); }
+
+ CameraLens *focusLens() override { return focusLens_.get(); }
+
+ const std::vector<unsigned int> &mbusCodes() const override { return mbusCodes_; }
+ std::vector<Size> sizes(unsigned int mbusCode) const override;
+ Size resolution() const override;
+
+ V4L2SubdeviceFormat getFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size,
+ const Size maxSize) const override;
+ int setFormat(V4L2SubdeviceFormat *format,
+ Transform transform = Transform::Identity) override;
+ int tryFormat(V4L2SubdeviceFormat *format) const override;
+
+ int applyConfiguration(const SensorConfiguration &config,
+ Transform transform = Transform::Identity,
+ V4L2SubdeviceFormat *sensorFormat = nullptr) override;
+
+ V4L2Subdevice::Stream imageStream() const override;
+ std::optional<V4L2Subdevice::Stream> embeddedDataStream() const override;
+ V4L2SubdeviceFormat embeddedDataFormat() const override;
+ int setEmbeddedDataEnabled(bool enable) override;
+
+ const ControlList &properties() const override { return properties_; }
+ int sensorInfo(IPACameraSensorInfo *info) const override;
+ Transform computeTransform(Orientation *orientation) const override;
+ BayerFormat::Order bayerOrder(Transform t) const override;
+
+ const ControlInfoMap &controls() const override;
+ ControlList getControls(const std::vector<uint32_t> &ids) override;
+ int setControls(ControlList *ctrls) override;
+
+ const std::vector<controls::draft::TestPatternModeEnum> &
+ testPatternModes() const override { return testPatternModes_; }
+ int setTestPatternMode(controls::draft::TestPatternModeEnum mode) override;
+ const CameraSensorProperties::SensorDelays &sensorDelays() override;
+
+protected:
+ std::string logPrefix() const override;
+
+private:
+ LIBCAMERA_DISABLE_COPY(CameraSensorRaw)
+
+ std::optional<int> init();
+ int initProperties();
+ void initStaticProperties();
+ void initTestPatternModes();
+ int applyTestPatternMode(controls::draft::TestPatternModeEnum mode);
+
+ const MediaEntity *entity_;
+ std::unique_ptr<V4L2Subdevice> subdev_;
+
+ struct Streams {
+ V4L2Subdevice::Stream sink;
+ V4L2Subdevice::Stream source;
+ };
+
+ struct {
+ Streams image;
+ std::optional<Streams> edata;
+ } streams_;
+
+ const CameraSensorProperties *staticProps_;
+
+ std::string model_;
+ std::string id_;
+
+ V4L2Subdevice::Formats formats_;
+ std::vector<unsigned int> mbusCodes_;
+ std::vector<Size> sizes_;
+ std::vector<controls::draft::TestPatternModeEnum> testPatternModes_;
+ controls::draft::TestPatternModeEnum testPatternMode_;
+
+ Size pixelArraySize_;
+ Rectangle activeArea_;
+ BayerFormat::Order cfaPattern_;
+ bool supportFlips_;
+ bool flipsAlterBayerOrder_;
+ Orientation mountingOrientation_;
+
+ ControlList properties_;
+
+ std::unique_ptr<CameraLens> focusLens_;
+};
+
+/**
+ * \class CameraSensorRaw
+ * \brief A camera sensor based on V4L2 subdevices
+ *
+ * This class supports single-subdev sensors with a single source pad and one
+ * or two internal sink pads (for the image and embedded data streams).
+ */
+
+CameraSensorRaw::CameraSensorRaw(const MediaEntity *entity)
+ : entity_(entity), staticProps_(nullptr), supportFlips_(false),
+ flipsAlterBayerOrder_(false), properties_(properties::properties)
+{
+}
+
+CameraSensorRaw::~CameraSensorRaw() = default;
+
+std::variant<std::unique_ptr<CameraSensor>, int>
+CameraSensorRaw::match(MediaEntity *entity)
+{
+ /* Check the entity type. */
+ if (entity->type() != MediaEntity::Type::V4L2Subdevice ||
+ entity->function() != MEDIA_ENT_F_CAM_SENSOR) {
+ libcamera::LOG(CameraSensor, Debug)
+ << entity->name() << ": unsupported entity type ("
+ << utils::to_underlying(entity->type())
+ << ") or function (" << utils::hex(entity->function()) << ")";
+ return { 0 };
+ }
+
+ /* Count and check the number of pads. */
+ static constexpr uint32_t kPadFlagsMask = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_SOURCE
+ | MEDIA_PAD_FL_INTERNAL;
+ unsigned int numSinks = 0;
+ unsigned int numSources = 0;
+
+ for (const MediaPad *pad : entity->pads()) {
+ switch (pad->flags() & kPadFlagsMask) {
+ case MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_INTERNAL:
+ numSinks++;
+ break;
+
+ case MEDIA_PAD_FL_SOURCE:
+ numSources++;
+ break;
+
+ default:
+ libcamera::LOG(CameraSensor, Debug)
+ << entity->name() << ": unsupported pad " << pad->index()
+ << " type " << utils::hex(pad->flags());
+ return { 0 };
+ }
+ }
+
+ if (numSinks < 1 || numSinks > 2 || numSources != 1) {
+ libcamera::LOG(CameraSensor, Debug)
+ << entity->name() << ": unsupported number of sinks ("
+ << numSinks << ") or sources (" << numSources << ")";
+ return { 0 };
+ }
+
+ /*
+ * The entity matches. Create the camera sensor and initialize it. The
+ * init() function will perform further match checks.
+ */
+ std::unique_ptr<CameraSensorRaw> sensor =
+ std::make_unique<CameraSensorRaw>(entity);
+
+ std::optional<int> err = sensor->init();
+ if (err)
+ return { *err };
+
+ return { std::move(sensor) };
+}
+
+std::optional<int> CameraSensorRaw::init()
+{
+ /* Create and open the subdev. */
+ subdev_ = std::make_unique<V4L2Subdevice>(entity_);
+ int ret = subdev_->open();
+ if (ret)
+ return { ret };
+
+ /*
+ * 1. Identify the pads.
+ */
+
+ /*
+ * First locate the source pad. The match() function guarantees there
+ * is one and only one source pad.
+ */
+ unsigned int sourcePad = UINT_MAX;
+
+ for (const MediaPad *pad : entity_->pads()) {
+ if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
+ sourcePad = pad->index();
+ break;
+ }
+ }
+
+ /*
+ * Iterate over the routes to identify the streams on the source pad,
+ * and the internal sink pads.
+ */
+ V4L2Subdevice::Routing routing = {};
+ ret = subdev_->getRouting(&routing, V4L2Subdevice::TryFormat);
+ if (ret)
+ return { ret };
+
+ bool imageStreamFound = false;
+
+ for (const V4L2Subdevice::Route &route : routing) {
+ if (route.source.pad != sourcePad) {
+ LOG(CameraSensor, Error) << "Invalid route " << route;
+ return { -EINVAL };
+ }
+
+ /* Identify the stream type based on the supported formats. */
+ V4L2Subdevice::Formats formats = subdev_->formats(route.source);
+
+ std::optional<MediaBusFormatInfo::Type> type;
+
+ for (const auto &[code, sizes] : formats) {
+ const MediaBusFormatInfo &info =
+ MediaBusFormatInfo::info(code);
+ if (info.isValid()) {
+ type = info.type;
+ break;
+ }
+ }
+
+ if (!type) {
+ LOG(CameraSensor, Warning)
+ << "No known format on pad " << route.source;
+ continue;
+ }
+
+ switch (*type) {
+ case MediaBusFormatInfo::Type::Image:
+ if (imageStreamFound) {
+ LOG(CameraSensor, Error)
+ << "Multiple internal image streams ("
+ << streams_.image.sink << " and "
+ << route.sink << ")";
+ return { -EINVAL };
+ }
+
+ imageStreamFound = true;
+ streams_.image.sink = route.sink;
+ streams_.image.source = route.source;
+ break;
+
+ case MediaBusFormatInfo::Type::Metadata:
+ /*
+ * Skip metadata streams that are not sensor embedded
+ * data. The source stream reports a generic metadata
+ * format, check the sink stream for the exact format.
+ */
+ formats = subdev_->formats(route.sink);
+ if (formats.size() != 1)
+ continue;
+
+ if (MediaBusFormatInfo::info(formats.cbegin()->first).type !=
+ MediaBusFormatInfo::Type::EmbeddedData)
+ continue;
+
+ if (streams_.edata) {
+ LOG(CameraSensor, Error)
+ << "Multiple internal embedded data streams ("
+ << streams_.edata->sink << " and "
+ << route.sink << ")";
+ return { -EINVAL };
+ }
+
+ streams_.edata = { route.sink, route.source };
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (!imageStreamFound) {
+ LOG(CameraSensor, Error) << "No image stream found";
+ return { -EINVAL };
+ }
+
+ LOG(CameraSensor, Debug)
+ << "Found image stream " << streams_.image.sink
+ << " -> " << streams_.image.source;
+
+ if (streams_.edata)
+ LOG(CameraSensor, Debug)
+ << "Found embedded data stream " << streams_.edata->sink
+ << " -> " << streams_.edata->source;
+
+ /*
+ * 2. Enumerate and cache the media bus codes, sizes and colour filter
+ * array order for the image stream.
+ */
+
+ /*
+ * Get the native sensor CFA pattern. It is simpler to retrieve it from
+ * the internal image sink pad as it is guaranteed to expose a single
+ * format, and is not affected by flips.
+ */
+ V4L2Subdevice::Formats formats = subdev_->formats(streams_.image.sink);
+ if (formats.size() != 1) {
+ LOG(CameraSensor, Error)
+ << "Image pad has " << formats.size()
+ << " formats, expected 1";
+ return { -EINVAL };
+ }
+
+ uint32_t nativeFormat = formats.cbegin()->first;
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(nativeFormat);
+ if (!bayerFormat.isValid()) {
+ LOG(CameraSensor, Error)
+ << "Invalid native format " << nativeFormat;
+ return { 0 };
+ }
+
+ cfaPattern_ = bayerFormat.order;
+
+ /*
+ * Retrieve and cache the media bus codes and sizes on the source image
+ * stream.
+ */
+ formats_ = subdev_->formats(streams_.image.source);
+ if (formats_.empty()) {
+ LOG(CameraSensor, Error) << "No image format found";
+ return { -EINVAL };
+ }
+
+ /* Populate and sort the media bus codes and the sizes. */
+ for (const auto &[code, ranges] : formats_) {
+ /* Drop non-raw formats (in case we have a hybrid sensor). */
+ const MediaBusFormatInfo &info = MediaBusFormatInfo::info(code);
+ if (info.colourEncoding != PixelFormatInfo::ColourEncodingRAW)
+ continue;
+
+ mbusCodes_.push_back(code);
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes_),
+ [](const SizeRange &range) { return range.max; });
+ }
+
+ if (mbusCodes_.empty()) {
+ LOG(CameraSensor, Debug) << "No raw image formats found";
+ return { 0 };
+ }
+
+ std::sort(mbusCodes_.begin(), mbusCodes_.end());
+ std::sort(sizes_.begin(), sizes_.end());
+
+ /*
+ * Remove duplicate sizes. There are no duplicate media bus codes as
+ * they are the keys in the formats map.
+ */
+ auto last = std::unique(sizes_.begin(), sizes_.end());
+ sizes_.erase(last, sizes_.end());
+
+ /*
+ * 3. Query selection rectangles. Retrieve properties, and verify that
+ * all the expected selection rectangles are supported.
+ */
+
+ Rectangle rect;
+ ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP_BOUNDS,
+ &rect);
+ if (ret) {
+ LOG(CameraSensor, Error) << "No pixel array crop bounds";
+ return { ret };
+ }
+
+ pixelArraySize_ = rect.size();
+
+ ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP_DEFAULT,
+ &activeArea_);
+ if (ret) {
+ LOG(CameraSensor, Error) << "No pixel array crop default";
+ return { ret };
+ }
+
+ ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP,
+ &rect);
+ if (ret) {
+ LOG(CameraSensor, Error) << "No pixel array crop rectangle";
+ return { ret };
+ }
+
+ /*
+ * 4. Verify that all required controls are present.
+ */
+
+ const ControlIdMap &controls = subdev_->controls().idmap();
+
+ static constexpr uint32_t mandatoryControls[] = {
+ V4L2_CID_ANALOGUE_GAIN,
+ V4L2_CID_CAMERA_ORIENTATION,
+ V4L2_CID_EXPOSURE,
+ V4L2_CID_HBLANK,
+ V4L2_CID_PIXEL_RATE,
+ V4L2_CID_VBLANK,
+ };
+
+ ret = 0;
+
+ for (uint32_t ctrl : mandatoryControls) {
+ if (!controls.count(ctrl)) {
+ LOG(CameraSensor, Error)
+ << "Mandatory V4L2 control " << utils::hex(ctrl)
+ << " not available";
+ ret = -EINVAL;
+ }
+ }
+
+ if (ret) {
+ LOG(CameraSensor, Error)
+ << "The sensor kernel driver needs to be fixed";
+ LOG(CameraSensor, Error)
+ << "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information";
+ return { ret };
+ }
+
+ /*
+ * Verify if sensor supports horizontal/vertical flips
+ *
+ * \todo Handle horizontal and vertical flips independently.
+ */
+ const struct v4l2_query_ext_ctrl *hflipInfo = subdev_->controlInfo(V4L2_CID_HFLIP);
+ const struct v4l2_query_ext_ctrl *vflipInfo = subdev_->controlInfo(V4L2_CID_VFLIP);
+ if (hflipInfo && !(hflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY) &&
+ vflipInfo && !(vflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ supportFlips_ = true;
+
+ if (hflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT ||
+ vflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT)
+ flipsAlterBayerOrder_ = true;
+ }
+
+ if (!supportFlips_)
+ LOG(CameraSensor, Debug)
+ << "Camera sensor does not support horizontal/vertical flip";
+
+ /*
+ * 5. Discover ancillary devices.
+ *
+ * \todo This code may be shared by different V4L2 sensor classes.
+ */
+ for (MediaEntity *ancillary : entity_->ancillaryEntities()) {
+ switch (ancillary->function()) {
+ case MEDIA_ENT_F_LENS:
+ focusLens_ = std::make_unique<CameraLens>(ancillary);
+ ret = focusLens_->init();
+ if (ret) {
+ LOG(CameraSensor, Error)
+ << "Lens initialisation failed, lens disabled";
+ focusLens_.reset();
+ }
+ break;
+
+ default:
+ LOG(CameraSensor, Warning)
+ << "Unsupported ancillary entity function "
+ << ancillary->function();
+ break;
+ }
+ }
+
+ /*
+ * 6. Initialize properties.
+ */
+
+ ret = initProperties();
+ if (ret)
+ return { ret };
+
+ /*
+ * 7. Initialize controls.
+ */
+
+ /*
+ * Set HBLANK to the minimum to start with a well-defined line length,
+ * allowing IPA modules that do not modify HBLANK to use the sensor
+ * minimum line length in their calculations.
+ */
+ const struct v4l2_query_ext_ctrl *hblankInfo = subdev_->controlInfo(V4L2_CID_HBLANK);
+ if (hblankInfo && !(hblankInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ ControlList ctrl(subdev_->controls());
+
+ ctrl.set(V4L2_CID_HBLANK, static_cast<int32_t>(hblankInfo->minimum));
+ ret = subdev_->setControls(&ctrl);
+ if (ret)
+ return ret;
+ }
+
+ ret = applyTestPatternMode(controls::draft::TestPatternModeEnum::TestPatternModeOff);
+ if (ret)
+ return { ret };
+
+ return {};
+}
+
+int CameraSensorRaw::initProperties()
+{
+ model_ = subdev_->model();
+ properties_.set(properties::Model, utils::toAscii(model_));
+
+ /* Generate a unique ID for the sensor. */
+ id_ = sysfs::firmwareNodePath(subdev_->devicePath());
+ if (id_.empty()) {
+ LOG(CameraSensor, Error) << "Can't generate sensor ID";
+ return -EINVAL;
+ }
+
+ /* Initialize the static properties from the sensor database. */
+ initStaticProperties();
+
+ /* Retrieve and register properties from the kernel interface. */
+ const ControlInfoMap &controls = subdev_->controls();
+
+ const auto &orientation = controls.find(V4L2_CID_CAMERA_ORIENTATION);
+ if (orientation != controls.end()) {
+ int32_t v4l2Orientation = orientation->second.def().get<int32_t>();
+ int32_t propertyValue;
+
+ switch (v4l2Orientation) {
+ default:
+ LOG(CameraSensor, Warning)
+ << "Unsupported camera location "
+ << v4l2Orientation << ", setting to External";
+ [[fallthrough]];
+ case V4L2_CAMERA_ORIENTATION_EXTERNAL:
+ propertyValue = properties::CameraLocationExternal;
+ break;
+ case V4L2_CAMERA_ORIENTATION_FRONT:
+ propertyValue = properties::CameraLocationFront;
+ break;
+ case V4L2_CAMERA_ORIENTATION_BACK:
+ propertyValue = properties::CameraLocationBack;
+ break;
+ }
+ properties_.set(properties::Location, propertyValue);
+ } else {
+ LOG(CameraSensor, Warning) << "Failed to retrieve the camera location";
+ }
+
+ const auto &rotationControl = controls.find(V4L2_CID_CAMERA_SENSOR_ROTATION);
+ if (rotationControl != controls.end()) {
+ int32_t propertyValue = rotationControl->second.def().get<int32_t>();
+
+ /*
+ * Cache the Transform associated with the camera mounting
+ * rotation for later use in computeTransform().
+ */
+ bool success;
+ mountingOrientation_ = orientationFromRotation(propertyValue, &success);
+ if (!success) {
+ LOG(CameraSensor, Warning)
+ << "Invalid rotation of " << propertyValue
+ << " degrees - ignoring";
+ mountingOrientation_ = Orientation::Rotate0;
+ }
+
+ properties_.set(properties::Rotation, propertyValue);
+ } else {
+ LOG(CameraSensor, Warning)
+ << "Rotation control not available, default to 0 degrees";
+ properties_.set(properties::Rotation, 0);
+ mountingOrientation_ = Orientation::Rotate0;
+ }
+
+ properties_.set(properties::PixelArraySize, pixelArraySize_);
+ properties_.set(properties::PixelArrayActiveAreas, { activeArea_ });
+
+ /* Color filter array pattern. */
+ uint32_t cfa;
+
+ switch (cfaPattern_) {
+ case BayerFormat::BGGR:
+ cfa = properties::draft::BGGR;
+ break;
+ case BayerFormat::GBRG:
+ cfa = properties::draft::GBRG;
+ break;
+ case BayerFormat::GRBG:
+ cfa = properties::draft::GRBG;
+ break;
+ case BayerFormat::RGGB:
+ cfa = properties::draft::RGGB;
+ break;
+ case BayerFormat::MONO:
+ default:
+ cfa = properties::draft::MONO;
+ break;
+ }
+
+ properties_.set(properties::draft::ColorFilterArrangement, cfa);
+
+ return 0;
+}
+
+void CameraSensorRaw::initStaticProperties()
+{
+ staticProps_ = CameraSensorProperties::get(model_);
+ if (!staticProps_)
+ return;
+
+ /* Register the properties retrieved from the sensor database. */
+ properties_.set(properties::UnitCellSize, staticProps_->unitCellSize);
+
+ initTestPatternModes();
+}
+
+const CameraSensorProperties::SensorDelays &CameraSensorRaw::sensorDelays()
+{
+ static constexpr CameraSensorProperties::SensorDelays defaultSensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 1,
+ .vblankDelay = 2,
+ .hblankDelay = 2,
+ };
+
+ if (!staticProps_ ||
+ (!staticProps_->sensorDelays.exposureDelay &&
+ !staticProps_->sensorDelays.gainDelay &&
+ !staticProps_->sensorDelays.vblankDelay &&
+ !staticProps_->sensorDelays.hblankDelay)) {
+ LOG(CameraSensor, Warning)
+ << "No sensor delays found in static properties. "
+ "Assuming unverified defaults.";
+
+ return defaultSensorDelays;
+ }
+
+ return staticProps_->sensorDelays;
+}
+
+void CameraSensorRaw::initTestPatternModes()
+{
+ const auto &v4l2TestPattern = controls().find(V4L2_CID_TEST_PATTERN);
+ if (v4l2TestPattern == controls().end()) {
+ LOG(CameraSensor, Debug) << "V4L2_CID_TEST_PATTERN is not supported";
+ return;
+ }
+
+ const auto &testPatternModes = staticProps_->testPatternModes;
+ if (testPatternModes.empty()) {
+ /*
+ * The camera sensor supports test patterns but we don't know
+ * how to map them so this should be fixed.
+ */
+ LOG(CameraSensor, Debug) << "No static test pattern map for \'"
+ << model() << "\'";
+ return;
+ }
+
+ /*
+ * Create a map that associates the V4L2 control index to the test
+ * pattern mode by reversing the testPatternModes map provided by the
+ * camera sensor properties. This makes it easier to verify if the
+ * control index is supported in the below for loop that creates the
+ * list of supported test patterns.
+ */
+ std::map<int32_t, controls::draft::TestPatternModeEnum> indexToTestPatternMode;
+ for (const auto &it : testPatternModes)
+ indexToTestPatternMode[it.second] = it.first;
+
+ for (const ControlValue &value : v4l2TestPattern->second.values()) {
+ const int32_t index = value.get<int32_t>();
+
+ const auto it = indexToTestPatternMode.find(index);
+ if (it == indexToTestPatternMode.end()) {
+ LOG(CameraSensor, Debug)
+ << "Test pattern mode " << index << " ignored";
+ continue;
+ }
+
+ testPatternModes_.push_back(it->second);
+ }
+}
+
+std::vector<Size> CameraSensorRaw::sizes(unsigned int mbusCode) const
+{
+ std::vector<Size> sizes;
+
+ const auto &format = formats_.find(mbusCode);
+ if (format == formats_.end())
+ return sizes;
+
+ const std::vector<SizeRange> &ranges = format->second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
+ [](const SizeRange &range) { return range.max; });
+
+ std::sort(sizes.begin(), sizes.end());
+
+ return sizes;
+}
+
+Size CameraSensorRaw::resolution() const
+{
+ return std::min(sizes_.back(), activeArea_.size());
+}
+
+V4L2SubdeviceFormat
+CameraSensorRaw::getFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size, Size maxSize) const
+{
+ unsigned int desiredArea = size.width * size.height;
+ unsigned int bestArea = UINT_MAX;
+ float desiredRatio = static_cast<float>(size.width) / size.height;
+ float bestRatio = FLT_MAX;
+ const Size *bestSize = nullptr;
+ uint32_t bestCode = 0;
+
+ for (unsigned int code : mbusCodes) {
+ const auto formats = formats_.find(code);
+ if (formats == formats_.end())
+ continue;
+
+ for (const SizeRange &range : formats->second) {
+ const Size &sz = range.max;
+
+ if (!maxSize.isNull() &&
+ (sz.width > maxSize.width || sz.height > maxSize.height))
+ continue;
+
+ if (sz.width < size.width || sz.height < size.height)
+ continue;
+
+ float ratio = static_cast<float>(sz.width) / sz.height;
+ float ratioDiff = std::abs(ratio - desiredRatio);
+ unsigned int area = sz.width * sz.height;
+ unsigned int areaDiff = area - desiredArea;
+
+ if (ratioDiff > bestRatio)
+ continue;
+
+ if (ratioDiff < bestRatio || areaDiff < bestArea) {
+ bestRatio = ratioDiff;
+ bestArea = areaDiff;
+ bestSize = &sz;
+ bestCode = code;
+ }
+ }
+ }
+
+ if (!bestSize) {
+ LOG(CameraSensor, Debug) << "No supported format or size found";
+ return {};
+ }
+
+ V4L2SubdeviceFormat format{
+ .code = bestCode,
+ .size = *bestSize,
+ .colorSpace = ColorSpace::Raw,
+ };
+
+ return format;
+}
+
+int CameraSensorRaw::setFormat(V4L2SubdeviceFormat *format, Transform transform)
+{
+ /* Configure flips if the sensor supports that. */
+ if (supportFlips_) {
+ ControlList flipCtrls(subdev_->controls());
+
+ flipCtrls.set(V4L2_CID_HFLIP,
+ static_cast<int32_t>(!!(transform & Transform::HFlip)));
+ flipCtrls.set(V4L2_CID_VFLIP,
+ static_cast<int32_t>(!!(transform & Transform::VFlip)));
+
+ int ret = subdev_->setControls(&flipCtrls);
+ if (ret)
+ return ret;
+ }
+
+ /* Apply format on the subdev. */
+ int ret = subdev_->setFormat(streams_.image.source, format);
+ if (ret)
+ return ret;
+
+ subdev_->updateControlInfo();
+ return 0;
+}
+
+int CameraSensorRaw::tryFormat(V4L2SubdeviceFormat *format) const
+{
+ return subdev_->setFormat(streams_.image.source, format,
+ V4L2Subdevice::Whence::TryFormat);
+}
+
+int CameraSensorRaw::applyConfiguration(const SensorConfiguration &config,
+ Transform transform,
+ V4L2SubdeviceFormat *sensorFormat)
+{
+ if (!config.isValid()) {
+ LOG(CameraSensor, Error) << "Invalid sensor configuration";
+ return -EINVAL;
+ }
+
+ std::vector<unsigned int> filteredCodes;
+ std::copy_if(mbusCodes_.begin(), mbusCodes_.end(),
+ std::back_inserter(filteredCodes),
+ [&config](unsigned int mbusCode) {
+ BayerFormat bayer = BayerFormat::fromMbusCode(mbusCode);
+ if (bayer.bitDepth == config.bitDepth)
+ return true;
+ return false;
+ });
+ if (filteredCodes.empty()) {
+ LOG(CameraSensor, Error)
+ << "Cannot find any format with bit depth "
+ << config.bitDepth;
+ return -EINVAL;
+ }
+
+ /*
+ * Compute the sensor's data frame size by applying the cropping
+ * rectangle, subsampling and output crop to the sensor's pixel array
+ * size.
+ *
+ * \todo The actual size computation is for now ignored and only the
+ * output size is considered. This implies that resolutions obtained
+ * with two different cropping/subsampling will look identical and
+ * only the first found one will be considered.
+ */
+ V4L2SubdeviceFormat subdevFormat = {};
+ for (unsigned int code : filteredCodes) {
+ for (const Size &size : sizes(code)) {
+ if (size.width != config.outputSize.width ||
+ size.height != config.outputSize.height)
+ continue;
+
+ subdevFormat.code = code;
+ subdevFormat.size = size;
+ break;
+ }
+ }
+ if (!subdevFormat.code) {
+ LOG(CameraSensor, Error) << "Invalid output size in sensor configuration";
+ return -EINVAL;
+ }
+
+ int ret = setFormat(&subdevFormat, transform);
+ if (ret)
+ return ret;
+
+ /*
+ * Return to the caller the format actually applied to the sensor.
+ * This is relevant if transform has changed the bayer pattern order.
+ */
+ if (sensorFormat)
+ *sensorFormat = subdevFormat;
+
+ /* \todo Handle AnalogCrop. Most sensors do not support set_selection */
+ /* \todo Handle scaling in the digital domain. */
+
+ return 0;
+}
+
+V4L2Subdevice::Stream CameraSensorRaw::imageStream() const
+{
+ return streams_.image.source;
+}
+
+std::optional<V4L2Subdevice::Stream> CameraSensorRaw::embeddedDataStream() const
+{
+ if (!streams_.edata)
+ return {};
+
+ return { streams_.edata->source };
+}
+
+V4L2SubdeviceFormat CameraSensorRaw::embeddedDataFormat() const
+{
+ if (!streams_.edata)
+ return {};
+
+ V4L2SubdeviceFormat format;
+ int ret = subdev_->getFormat(streams_.edata->source, &format);
+ if (ret)
+ return {};
+
+ return format;
+}
+
+int CameraSensorRaw::setEmbeddedDataEnabled(bool enable)
+{
+ if (!streams_.edata)
+ return enable ? -ENOSTR : 0;
+
+ V4L2Subdevice::Routing routing{ 2 };
+
+ routing[0].sink = streams_.image.sink;
+ routing[0].source = streams_.image.source;
+ routing[0].flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE;
+
+ routing[1].sink = streams_.edata->sink;
+ routing[1].source = streams_.edata->source;
+ routing[1].flags = enable ? V4L2_SUBDEV_ROUTE_FL_ACTIVE : 0;
+
+ int ret = subdev_->setRouting(&routing);
+ if (ret)
+ return ret;
+
+ /*
+ * Check if the embedded data stream has been enabled or disabled
+ * correctly. Assume at least one route will match the embedded data
+ * source stream, as there would be something seriously wrong
+ * otherwise.
+ */
+ bool enabled = false;
+
+ for (const V4L2Subdevice::Route &route : routing) {
+ if (route.source != streams_.edata->source)
+ continue;
+
+ enabled = route.flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE;
+ break;
+ }
+
+ if (enabled != enable)
+ return enabled ? -EISCONN : -ENOSTR;
+
+ return 0;
+}
+
+int CameraSensorRaw::sensorInfo(IPACameraSensorInfo *info) const
+{
+ info->model = model();
+
+ /*
+ * The active area size is a static property, while the crop
+ * rectangle needs to be re-read as it depends on the sensor
+ * configuration.
+ */
+ info->activeAreaSize = { activeArea_.width, activeArea_.height };
+
+ int ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP,
+ &info->analogCrop);
+ if (ret)
+ return ret;
+
+ /*
+ * IPACameraSensorInfo::analogCrop::x and IPACameraSensorInfo::analogCrop::y
+ * are defined relatively to the active pixel area, while V4L2's
+ * TGT_CROP target is defined in respect to the full pixel array.
+ *
+ * Compensate it by subtracting the active area offset.
+ */
+ info->analogCrop.x -= activeArea_.x;
+ info->analogCrop.y -= activeArea_.y;
+
+ /* The bit depth and image size depend on the currently applied format. */
+ V4L2SubdeviceFormat format{};
+ ret = subdev_->getFormat(streams_.image.source, &format);
+ if (ret)
+ return ret;
+ info->bitsPerPixel = MediaBusFormatInfo::info(format.code).bitsPerPixel;
+ info->outputSize = format.size;
+
+ std::optional<int32_t> cfa = properties_.get(properties::draft::ColorFilterArrangement);
+ info->cfaPattern = cfa ? *cfa : properties::draft::RGB;
+
+ /*
+ * Retrieve the pixel rate, line length and minimum/maximum frame
+ * duration through V4L2 controls. Support for the V4L2_CID_PIXEL_RATE,
+ * V4L2_CID_HBLANK and V4L2_CID_VBLANK controls is mandatory.
+ */
+ ControlList ctrls = subdev_->getControls({ V4L2_CID_PIXEL_RATE,
+ V4L2_CID_HBLANK,
+ V4L2_CID_VBLANK });
+ if (ctrls.empty()) {
+ LOG(CameraSensor, Error)
+ << "Failed to retrieve camera info controls";
+ return -EINVAL;
+ }
+
+ info->pixelRate = ctrls.get(V4L2_CID_PIXEL_RATE).get<int64_t>();
+
+ const ControlInfo hblank = ctrls.infoMap()->at(V4L2_CID_HBLANK);
+ info->minLineLength = info->outputSize.width + hblank.min().get<int32_t>();
+ info->maxLineLength = info->outputSize.width + hblank.max().get<int32_t>();
+
+ const ControlInfo vblank = ctrls.infoMap()->at(V4L2_CID_VBLANK);
+ info->minFrameLength = info->outputSize.height + vblank.min().get<int32_t>();
+ info->maxFrameLength = info->outputSize.height + vblank.max().get<int32_t>();
+
+ return 0;
+}
+
+Transform CameraSensorRaw::computeTransform(Orientation *orientation) const
+{
+ /*
+ * If we cannot do any flips we cannot change the native camera mounting
+ * orientation.
+ */
+ if (!supportFlips_) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ /*
+ * Now compute the required transform to obtain 'orientation' starting
+ * from the mounting rotation.
+ *
+ * As a note:
+ * orientation / mountingOrientation_ = transform
+ * mountingOrientation_ * transform = orientation
+ */
+ Transform transform = *orientation / mountingOrientation_;
+
+ /*
+ * If transform contains any Transpose we cannot do it, so adjust
+ * 'orientation' to report the image native orientation and return Identity.
+ */
+ if (!!(transform & Transform::Transpose)) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ return transform;
+}
+
+BayerFormat::Order CameraSensorRaw::bayerOrder(Transform t) const
+{
+ if (!flipsAlterBayerOrder_)
+ return cfaPattern_;
+
+ /*
+ * Apply the transform to the native (i.e. untransformed) Bayer order,
+ * using the rest of the Bayer format supplied by the caller.
+ */
+ BayerFormat format{ cfaPattern_, 8, BayerFormat::Packing::None };
+ return format.transform(t).order;
+}
+
+const ControlInfoMap &CameraSensorRaw::controls() const
+{
+ return subdev_->controls();
+}
+
+ControlList CameraSensorRaw::getControls(const std::vector<uint32_t> &ids)
+{
+ return subdev_->getControls(ids);
+}
+
+int CameraSensorRaw::setControls(ControlList *ctrls)
+{
+ return subdev_->setControls(ctrls);
+}
+
+int CameraSensorRaw::setTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternMode_ == mode)
+ return 0;
+
+ if (testPatternModes_.empty()) {
+ LOG(CameraSensor, Error)
+ << "Camera sensor does not support test pattern modes.";
+ return -EINVAL;
+ }
+
+ return applyTestPatternMode(mode);
+}
+
+int CameraSensorRaw::applyTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternModes_.empty())
+ return 0;
+
+ auto it = std::find(testPatternModes_.begin(), testPatternModes_.end(),
+ mode);
+ if (it == testPatternModes_.end()) {
+ LOG(CameraSensor, Error) << "Unsupported test pattern mode "
+ << mode;
+ return -EINVAL;
+ }
+
+ LOG(CameraSensor, Debug) << "Apply test pattern mode " << mode;
+
+ int32_t index = staticProps_->testPatternModes.at(mode);
+ ControlList ctrls{ controls() };
+ ctrls.set(V4L2_CID_TEST_PATTERN, index);
+
+ int ret = setControls(&ctrls);
+ if (ret)
+ return ret;
+
+ testPatternMode_ = mode;
+
+ return 0;
+}
+
+std::string CameraSensorRaw::logPrefix() const
+{
+ return "'" + entity_->name() + "'";
+}
+
+REGISTER_CAMERA_SENSOR(CameraSensorRaw, 0)
+
+} /* namespace libcamera */
diff --git a/src/libcamera/sensor/meson.build b/src/libcamera/sensor/meson.build
new file mode 100644
index 00000000..dce74ed6
--- /dev/null
+++ b/src/libcamera/sensor/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_sources += files([
+ 'camera_sensor.cpp',
+ 'camera_sensor_legacy.cpp',
+ 'camera_sensor_properties.cpp',
+ 'camera_sensor_raw.cpp',
+])
diff --git a/src/libcamera/shared_mem_object.cpp b/src/libcamera/shared_mem_object.cpp
new file mode 100644
index 00000000..d9b61d37
--- /dev/null
+++ b/src/libcamera/shared_mem_object.cpp
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ * Copyright (C) 2024 Andrei Konovalov
+ * Copyright (C) 2024 Dennis Bonke
+ * Copyright (C) 2024 Ideas on Board Oy
+ *
+ * Helpers for shared memory allocations
+ */
+
+#include "libcamera/internal/shared_mem_object.h"
+
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+
+#include <libcamera/base/memfd.h>
+
+/**
+ * \file shared_mem_object.cpp
+ * \brief Helpers for shared memory allocations
+ */
+
+namespace libcamera {
+
+/**
+ * \class SharedMem
+ * \brief Helper class to allocate and manage memory shareable between processes
+ *
+ * SharedMem manages memory suitable for sharing between processes. When an
+ * instance is constructed, it allocates a memory buffer of the requested size
+ * backed by an anonymous file, using the memfd API.
+ *
+ * The allocated memory is exposed by the mem() function. If memory allocation
+ * fails, the function returns an empty Span. This can be also checked using the
+ * bool() operator.
+ *
+ * The file descriptor for the backing file is exposed as a SharedFD by the fd()
+ * function. It can be shared with other processes across IPC boundaries, which
+ * can then map the memory with mmap().
+ *
+ * A single memfd is created for every SharedMem. If there is a need to allocate
+ * a large number of objects in shared memory, these objects should be grouped
+ * together and use the shared memory allocated by a single SharedMem object if
+ * possible. This will help to minimize the number of created memfd's.
+ */
+
+SharedMem::SharedMem() = default;
+
+/**
+ * \brief Construct a SharedMem with memory of the given \a size
+ * \param[in] name Name of the SharedMem
+ * \param[in] size Size of the shared memory to allocate and map
+ *
+ * The \a name is used for debugging purpose only. Multiple SharedMem instances
+ * can have the same name.
+ */
+SharedMem::SharedMem(const std::string &name, std::size_t size)
+{
+ UniqueFD memfd = MemFd::create(name.c_str(), size,
+ MemFd::Seal::Shrink | MemFd::Seal::Grow);
+ if (!memfd.isValid())
+ return;
+
+ fd_ = SharedFD(std::move(memfd));
+
+ void *mem = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd_.get(), 0);
+ if (mem == MAP_FAILED) {
+ fd_ = SharedFD();
+ return;
+ }
+
+ mem_ = { static_cast<uint8_t *>(mem), size };
+}
+
+/**
+ * \brief Move constructor for SharedMem
+ * \param[in] rhs The object to move
+ */
+SharedMem::SharedMem(SharedMem &&rhs)
+{
+ this->fd_ = std::move(rhs.fd_);
+ this->mem_ = rhs.mem_;
+ rhs.mem_ = {};
+}
+
+/**
+ * \brief Destroy the SharedMem instance
+ *
+ * Destroying an instance invalidates the memory mapping exposed with mem().
+ * Other mappings of the backing file, created in this or other processes with
+ * mmap(), remain valid.
+ *
+ * Similarly, other references to the backing file descriptor created by copying
+ * the SharedFD returned by fd() remain valid. The underlying memory will be
+ * freed only when all file descriptors that reference the anonymous file get
+ * closed.
+ */
+SharedMem::~SharedMem()
+{
+ if (!mem_.empty())
+ munmap(mem_.data(), mem_.size_bytes());
+}
+
+/**
+ * \brief Move assignment operator for SharedMem
+ * \param[in] rhs The object to move
+ */
+SharedMem &SharedMem::operator=(SharedMem &&rhs)
+{
+ this->fd_ = std::move(rhs.fd_);
+ this->mem_ = rhs.mem_;
+ rhs.mem_ = {};
+ return *this;
+}
+
+/**
+ * \fn const SharedFD &SharedMem::fd() const
+ * \brief Retrieve the file descriptor for the underlying shared memory
+ * \return The file descriptor, or an invalid SharedFD if allocation failed
+ */
+
+/**
+ * \fn Span<uint8_t> SharedMem::mem() const
+ * \brief Retrieve the underlying shared memory
+ * \return The memory buffer, or an empty Span if allocation failed
+ */
+
+/**
+ * \fn SharedMem::operator bool()
+ * \brief Check if the shared memory allocation succeeded
+ * \return True if allocation of the shared memory succeeded, false otherwise
+ */
+
+/**
+ * \class SharedMemObject
+ * \brief Helper class to allocate an object in shareable memory
+ * \tparam The object type
+ *
+ * The SharedMemObject class is a specialization of the SharedMem class that
+ * wraps an object of type \a T and constructs it in shareable memory. It uses
+ * the same underlying memory allocation and sharing mechanism as the SharedMem
+ * class.
+ *
+ * The wrapped object is constructed at the same time as the SharedMemObject
+ * instance, by forwarding the arguments passed to the SharedMemObject
+ * constructor. The underlying memory allocation is sized to the object \a T
+ * size. The bool() operator should be used to check the allocation was
+ * successful. The object can be accessed using the dereference operators
+ * operator*() and operator->().
+ *
+ * While no restriction on the type \a T is enforced, not all types are suitable
+ * for sharing between multiple processes. Most notably, any object type that
+ * contains pointer or reference members will likely cause issues. Even if those
+ * members refer to other members of the same object, the shared memory will be
+ * mapped at different addresses in different processes, and the pointers will
+ * not be valid.
+ *
+ * A new anonymous file is created for every SharedMemObject instance. If there
+ * is a need to share a large number of small objects, these objects should be
+ * grouped into a single larger object to limit the number of file descriptors.
+ *
+ * To share the object with other processes, see the SharedMem documentation.
+ */
+
+/**
+ * \var SharedMemObject::kSize
+ * \brief The size of the object stored in shared memory
+ */
+
+/**
+ * \fn SharedMemObject::SharedMemObject(const std::string &name, Args &&...args)
+ * \brief Construct a SharedMemObject
+ * \param[in] name Name of the SharedMemObject
+ * \param[in] args Arguments to pass to the constructor of the object T
+ *
+ * The \a name is used for debugging purpose only. Multiple SharedMem instances
+ * can have the same name.
+ */
+
+/**
+ * \fn SharedMemObject::SharedMemObject(SharedMemObject<T> &&rhs)
+ * \brief Move constructor for SharedMemObject
+ * \param[in] rhs The object to move
+ */
+
+/**
+ * \fn SharedMemObject::~SharedMemObject()
+ * \brief Destroy the SharedMemObject instance
+ *
+ * Destroying a SharedMemObject calls the wrapped T object's destructor. While
+ * the underlying memory may not be freed immediately if other mappings have
+ * been created manually (see SharedMem::~SharedMem() for more information), the
+ * stored object may be modified. Depending on the ~T() destructor, accessing
+ * the object after destruction of the SharedMemObject causes undefined
+ * behaviour. It is the responsibility of the user of this class to synchronize
+ * with other users who have access to the shared object.
+ */
+
+/**
+ * \fn SharedMemObject::operator=(SharedMemObject<T> &&rhs)
+ * \brief Move assignment operator for SharedMemObject
+ * \param[in] rhs The SharedMemObject object to take the data from
+ *
+ * Moving a SharedMemObject does not affect the stored object.
+ */
+
+/**
+ * \fn SharedMemObject::operator->()
+ * \brief Dereference the stored object
+ * \return Pointer to the stored object
+ */
+
+/**
+ * \fn const T *SharedMemObject::operator->() const
+ * \copydoc SharedMemObject::operator->
+ */
+
+/**
+ * \fn SharedMemObject::operator*()
+ * \brief Dereference the stored object
+ * \return Reference to the stored object
+ */
+
+/**
+ * \fn const T &SharedMemObject::operator*() const
+ * \copydoc SharedMemObject::operator*
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/signal.cpp b/src/libcamera/signal.cpp
deleted file mode 100644
index 6eab1fa7..00000000
--- a/src/libcamera/signal.cpp
+++ /dev/null
@@ -1,179 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * signal.cpp - Signal & slot implementation
- */
-
-#include <libcamera/signal.h>
-
-#include "thread.h"
-
-/**
- * \file signal.h
- * \brief Signal & slot implementation
- */
-
-namespace libcamera {
-
-namespace {
-
-/*
- * Mutex to protect the SignalBase::slots_ and Object::signals_ lists. If lock
- * contention needs to be decreased, this could be replaced with locks in
- * Object and SignalBase, or with a mutex pool.
- */
-Mutex signalsLock;
-
-} /* namespace */
-
-void SignalBase::connect(BoundMethodBase *slot)
-{
- MutexLocker locker(signalsLock);
-
- Object *object = slot->object();
- if (object)
- object->connect(this);
- slots_.push_back(slot);
-}
-
-void SignalBase::disconnect(Object *object)
-{
- disconnect([object](SlotList::iterator &iter) {
- return (*iter)->match(object);
- });
-}
-
-void SignalBase::disconnect(std::function<bool(SlotList::iterator &)> match)
-{
- MutexLocker locker(signalsLock);
-
- for (auto iter = slots_.begin(); iter != slots_.end(); ) {
- if (match(iter)) {
- Object *object = (*iter)->object();
- if (object)
- object->disconnect(this);
-
- delete *iter;
- iter = slots_.erase(iter);
- } else {
- ++iter;
- }
- }
-}
-
-SignalBase::SlotList SignalBase::slots()
-{
- MutexLocker locker(signalsLock);
- return slots_;
-}
-
-/**
- * \class Signal
- * \brief Generic signal and slot communication mechanism
- *
- * Signals and slots are a language construct aimed at communication between
- * objects through the observer pattern without the need for boilerplate code.
- * See http://doc.qt.io/qt-5/signalsandslots.html for more information.
- *
- * Signals model events that can be observed from objects unrelated to the event
- * source. Slots are functions that are called in response to a signal. Signals
- * can be connected to and disconnected from slots dynamically at runtime. When
- * a signal is emitted, all connected slots are called sequentially in the order
- * they have been connected.
- *
- * Signals are defined with zero, one or more typed parameters. They are emitted
- * with a value for each of the parameters, and those values are passed to the
- * connected slots.
- *
- * Slots are normal static or class member functions. In order to be connected
- * to a signal, their signature must match the signal type (taking the same
- * arguments as the signal and returning void).
- *
- * Connecting a signal to a slot results in the slot being called with the
- * arguments passed to the emit() function when the signal is emitted. Multiple
- * slots can be connected to the same signal, and multiple signals can connected
- * to the same slot. Duplicate connections between a signal and a slot are
- * allowed and result in the slot being called multiple times for the same
- * signal emission.
- *
- * When a slot belongs to an instance of the Object class, the slot is called
- * in the context of the thread that the object is bound to. If the signal is
- * emitted from the same thread, the slot will be called synchronously, before
- * Signal::emit() returns. If the signal is emitted from a different thread,
- * the slot will be called asynchronously from the object's thread's event
- * loop, after the Signal::emit() method returns, with a copy of the signal's
- * arguments. The emitter shall thus ensure that any pointer or reference
- * passed through the signal will remain valid after the signal is emitted.
- */
-
-/**
- * \fn Signal::connect(T *object, R (T::*func)(Args...))
- * \brief Connect the signal to a member function slot
- * \param[in] object The slot object pointer
- * \param[in] func The slot member function
- *
- * If the typename T inherits from Object, the signal will be automatically
- * disconnected from the \a func slot of \a object when \a object is destroyed.
- * Otherwise the caller shall disconnect signals manually before destroying \a
- * object.
- *
- * \context This function is \threadsafe.
- */
-
-/**
- * \fn Signal::connect(R (*func)(Args...))
- * \brief Connect the signal to a static function slot
- * \param[in] func The slot static function
- *
- * \context This function is \threadsafe.
- */
-
-/**
- * \fn Signal::disconnect()
- * \brief Disconnect the signal from all slots
- *
- * \context This function is \threadsafe.
- */
-
-/**
- * \fn Signal::disconnect(T *object)
- * \brief Disconnect the signal from all slots of the \a object
- * \param[in] object The object pointer whose slots to disconnect
- *
- * \context This function is \threadsafe.
- */
-
-/**
- * \fn Signal::disconnect(T *object, R (T::*func)(Args...))
- * \brief Disconnect the signal from the \a object slot member function \a func
- * \param[in] object The object pointer whose slots to disconnect
- * \param[in] func The slot member function to disconnect
- *
- * \context This function is \threadsafe.
- */
-
-/**
- * \fn Signal::disconnect(R (*func)(Args...))
- * \brief Disconnect the signal from the slot static function \a func
- * \param[in] func The slot static function to disconnect
- *
- * \context This function is \threadsafe.
- */
-
-/**
- * \fn Signal::emit(Args... args)
- * \brief Emit the signal and call all connected slots
- * \param args The arguments passed to the connected slots
- *
- * Emitting a signal calls all connected slots synchronously and sequentially in
- * the order the slots have been connected. The arguments passed to the emit()
- * function are passed to the slot functions unchanged. If a slot modifies one
- * of the arguments (when passed by pointer or reference), the modification is
- * thus visible to all subsequently called slots.
- *
- * This function is not \threadsafe, but thread-safety is guaranteed against
- * concurrent connect() and disconnect() calls.
- */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/TODO b/src/libcamera/software_isp/TODO
new file mode 100644
index 00000000..a50db668
--- /dev/null
+++ b/src/libcamera/software_isp/TODO
@@ -0,0 +1,208 @@
+2. Reconsider stats sharing
+
+>>> +void SwStatsCpu::finishFrame(void)
+>>> +{
+>>> + *sharedStats_ = stats_;
+>>
+>> Is it more efficient to copy the stats instead of operating directly on
+>> the shared memory ?
+>
+> I inherited doing things this way from Andrey. I kept this because
+> we don't really have any synchronization with the IPA reading this.
+>
+> So the idea is to only touch this when the next set of statistics
+> is ready since we don't know when the IPA is done with accessing
+> the previous set of statistics ...
+>
+> This is both something which seems mostly a theoretic problem,
+> yet also definitely something which I think we need to fix.
+>
+> Maybe use a ringbuffer of stats buffers and pass the index into
+> the ringbuffer to the emit signal ?
+
+That would match how we deal with hardware ISPs, and I think that's a
+good idea. It will help decoupling the processing side from the IPA.
+
+---
+
+3. Remove statsReady signal
+
+> class SwStatsCpu
+> {
+> /**
+> * \brief Signals that the statistics are ready
+> */
+> Signal<> statsReady;
+
+But better, I wonder if the signal could be dropped completely. The
+SwStatsCpu class does not operate asynchronously. Shouldn't whoever
+calls the finishFrame() function then handle emitting the signal ?
+
+Now, the trouble is that this would be the DebayerCpu class, whose name
+doesn't indicate as a prime candidate to handle stats. However, it
+already exposes a getStatsFD() function, so we're already calling for
+trouble :-) Either that should be moved to somewhere else, or the class
+should be renamed. Considering that the class applies colour gains in
+addition to performing the interpolation, it may be more of a naming
+issue.
+
+Removing the signal and refactoring those classes doesn't have to be
+addressed now, I think it would be part of a larger refactoring
+(possibly also considering platforms that have no ISP but can produce
+stats in hardware, such as the i.MX7), but please keep it on your radar.
+
+---
+
+5. Store ISP parameters in per-frame buffers
+
+> /**
+> * \fn void Debayer::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+> * \brief Process the bayer data into the requested format.
+> * \param[in] input The input buffer.
+> * \param[in] output The output buffer.
+> * \param[in] params The parameters to be used in debayering.
+> *
+> * \note DebayerParams is passed by value deliberately so that a copy is passed
+> * when this is run in another thread by invokeMethod().
+> */
+
+Possibly something to address later, by storing ISP parameters in
+per-frame buffers like we do for hardware ISPs.
+
+---
+
+6. Input buffer copying configuration
+
+> DebayerCpu::DebayerCpu(std::unique_ptr<SwStatsCpu> stats)
+> : stats_(std::move(stats)), gammaCorrection_(1.0)
+> {
+> enableInputMemcpy_ = true;
+
+Set this appropriately and/or make it configurable.
+
+---
+
+7. Performance measurement configuration
+
+> void DebayerCpu::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+> /* Measure before emitting signals */
+> if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure &&
+> ++measuredFrames_ > DebayerCpu::kFramesToSkip) {
+> timespec frameEndTime = {};
+> clock_gettime(CLOCK_MONOTONIC_RAW, &frameEndTime);
+> frameProcessTime_ += timeDiff(frameEndTime, frameStartTime);
+> if (measuredFrames_ == DebayerCpu::kLastFrameToMeasure) {
+> const unsigned int measuredFrames = DebayerCpu::kLastFrameToMeasure -
+> DebayerCpu::kFramesToSkip;
+> LOG(Debayer, Info)
+> << "Processed " << measuredFrames
+> << " frames in " << frameProcessTime_ / 1000 << "us, "
+> << frameProcessTime_ / (1000 * measuredFrames)
+> << " us/frame";
+> }
+> }
+
+I wonder if there would be a way to control at runtime when/how to
+perform those measurements. Maybe that's a bit overkill.
+
+---
+
+8. DebayerCpu cleanups
+
+> >> class DebayerCpu : public Debayer, public Object
+> >> const SharedFD &getStatsFD() { return stats_->getStatsFD(); }
+> >
+> > This,
+>
+> Note the statistics pass-through stuff is sort of a necessary evil
+> since we want one main loop going over the data line by line and
+> doing both debayering as well as stats while the line is still
+> hot in the l2 cache. And things like the process2() and process4()
+> loops are highly CPU debayering specific so I don't think we should
+> move those out of the CpuDebayer code.
+
+Yes, that I understood from the review. "necessary evil" is indeed the
+right term :-) I expect it will take quite some design skills to balance
+the need for performances and the need for a maintainable architecture.
+
+> > plus the fact that this class handles colour gains and gamma,
+> > makes me thing we have either a naming issue, or an architecture issue.
+>
+> I agree that this does a bit more then debayering, although
+> the debayering really is the main thing it does.
+>
+> I guess the calculation of the rgb lookup tables which do the
+> color gains and gamma could be moved outside of this class,
+> that might even be beneficial for GPU based debayering assuming
+> that that is going to use rgb lookup tables too (it could
+> implement actual color gains + gamma correction in some different
+> way).
+>
+> I think this falls under the lets wait until we have a GPU
+> based SoftISP MVP/POC and then do some refactoring to see which
+> bits should go where.
+
+---
+
+8. Decouple pipeline and IPA naming
+
+> The current src/ipa/meson.build assumes the IPA name to match the
+> pipeline name. For this reason "-Dipas=simple" is used for the
+> Soft IPA module.
+
+This should be addressed.
+
+---
+
+9. Doxyfile cleanup
+
+>> diff --git a/Documentation/Doxyfile.in b/Documentation/Doxyfile.in
+>> index a86ea6c1..2be8d47b 100644
+>> --- a/Documentation/Doxyfile.in
+>> +++ b/Documentation/Doxyfile.in
+>> @@ -44,6 +44,7 @@ EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/span.h \
+>> @TOP_SRCDIR@/src/libcamera/pipeline/ \
+>> @TOP_SRCDIR@/src/libcamera/tracepoints.cpp \
+>> @TOP_BUILDDIR@/include/libcamera/internal/tracepoints.h \
+>> + @TOP_BUILDDIR@/include/libcamera/ipa/soft_ipa_interface.h \
+> Why is this needed ?
+>
+>> @TOP_BUILDDIR@/src/libcamera/proxy/
+>> EXCLUDE_PATTERNS = @TOP_BUILDDIR@/include/libcamera/ipa/*_serializer.h \
+>> diff --git a/include/libcamera/ipa/meson.build b/include/libcamera/ipa/meson.build
+>> index f3b4881c..3352d08f 100644
+>> --- a/include/libcamera/ipa/meson.build
+>> +++ b/include/libcamera/ipa/meson.build
+>> @@ -65,6 +65,7 @@ pipeline_ipa_mojom_mapping = {
+>> 'ipu3': 'ipu3.mojom',
+>> 'rkisp1': 'rkisp1.mojom',
+>> 'rpi/vc4': 'raspberrypi.mojom',
+>> + 'simple': 'soft.mojom',
+>> 'vimc': 'vimc.mojom',
+>> }
+>> diff --git a/include/libcamera/ipa/soft.mojom b/include/libcamera/ipa/soft.mojom
+>> new file mode 100644
+>> index 00000000..c249bd75
+>> --- /dev/null
+>> +++ b/include/libcamera/ipa/soft.mojom
+>> @@ -0,0 +1,28 @@
+>> +/* SPDX-License-Identifier: LGPL-2.1-or-later */
+>> +
+>> +/*
+>> + * \todo Document the interface and remove the related EXCLUDE_PATTERNS entry.
+> Ah that's why.
+
+Yes, because, well... all the other IPAs were doing that...
+
+> It doesn't have to be done before merging, but could you
+> address this sooner than later ?
+
+---
+
+13. Improve black level and colour gains application
+
+I think the black level should eventually be moved before debayering, and
+ideally the colour gains as well. I understand the need for optimizations to
+lower the CPU consumption, but at the same time I don't feel comfortable
+building up on top of an implementation that may work a bit more by chance than
+by correctness, as that's not very maintainable.
diff --git a/src/libcamera/software_isp/debayer.cpp b/src/libcamera/software_isp/debayer.cpp
new file mode 100644
index 00000000..f0b83261
--- /dev/null
+++ b/src/libcamera/software_isp/debayer.cpp
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, 2024 Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * debayer base class
+ */
+
+#include "debayer.h"
+
+namespace libcamera {
+
+/**
+ * \struct DebayerParams
+ * \brief Struct to hold the debayer parameters.
+ */
+
+/**
+ * \var DebayerParams::kRGBLookupSize
+ * \brief Size of a color lookup table
+ */
+
+/**
+ * \typedef DebayerParams::ColorLookupTable
+ * \brief Type of the lookup tables for red, green, blue values
+ */
+
+/**
+ * \var DebayerParams::red
+ * \brief Lookup table for red color, mapping input values to output values
+ */
+
+/**
+ * \var DebayerParams::green
+ * \brief Lookup table for green color, mapping input values to output values
+ */
+
+/**
+ * \var DebayerParams::blue
+ * \brief Lookup table for blue color, mapping input values to output values
+ */
+
+/**
+ * \class Debayer
+ * \brief Base debayering class
+ *
+ * Base class that provides functions for setting up the debayering process.
+ */
+
+LOG_DEFINE_CATEGORY(Debayer)
+
+Debayer::~Debayer()
+{
+}
+
+/**
+ * \fn int Debayer::configure(const StreamConfiguration &inputCfg, const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
+ * \brief Configure the debayer object according to the passed in parameters
+ * \param[in] inputCfg The input configuration
+ * \param[in] outputCfgs The output configurations
+ *
+ * \return 0 on success, a negative errno on failure
+ */
+
+/**
+ * \fn Size Debayer::patternSize(PixelFormat inputFormat)
+ * \brief Get the width and height at which the bayer pattern repeats
+ * \param[in] inputFormat The input format
+ *
+ * Valid sizes are: 2x2, 4x2 or 4x4.
+ *
+ * \return Pattern size or an empty size for unsupported inputFormats
+ */
+
+/**
+ * \fn std::vector<PixelFormat> Debayer::formats(PixelFormat inputFormat)
+ * \brief Get the supported output formats
+ * \param[in] inputFormat The input format
+ *
+ * \return All supported output formats or an empty vector if there are none
+ */
+
+/**
+ * \fn std::tuple<unsigned int, unsigned int> Debayer::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+ * \brief Get the stride and the frame size
+ * \param[in] outputFormat The output format
+ * \param[in] size The output size
+ *
+ * \return A tuple of the stride and the frame size, or a tuple with 0,0 if
+ * there is no valid output config
+ */
+
+/**
+ * \fn void Debayer::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+ * \brief Process the bayer data into the requested format
+ * \param[in] frame The frame number
+ * \param[in] input The input buffer
+ * \param[in] output The output buffer
+ * \param[in] params The parameters to be used in debayering
+ *
+ * \note DebayerParams is passed by value deliberately so that a copy is passed
+ * when this is run in another thread by invokeMethod().
+ */
+
+/**
+ * \fn virtual SizeRange Debayer::sizes(PixelFormat inputFormat, const Size &inputSize)
+ * \brief Get the supported output sizes for the given input format and size
+ * \param[in] inputFormat The input format
+ * \param[in] inputSize The input size
+ *
+ * \return The valid size ranges or an empty range if there are none
+ */
+
+/**
+ * \var Signal<FrameBuffer *> Debayer::inputBufferReady
+ * \brief Signals when the input buffer is ready
+ */
+
+/**
+ * \var Signal<FrameBuffer *> Debayer::outputBufferReady
+ * \brief Signals when the output buffer is ready
+ */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer.h b/src/libcamera/software_isp/debayer.h
new file mode 100644
index 00000000..d7ca060d
--- /dev/null
+++ b/src/libcamera/software_isp/debayer.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * debayering base class
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/signal.h>
+
+#include <libcamera/geometry.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/software_isp/debayer_params.h"
+
+namespace libcamera {
+
+class FrameBuffer;
+
+LOG_DECLARE_CATEGORY(Debayer)
+
+class Debayer
+{
+public:
+ virtual ~Debayer() = 0;
+
+ virtual int configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs) = 0;
+
+ virtual std::vector<PixelFormat> formats(PixelFormat inputFormat) = 0;
+
+ virtual std::tuple<unsigned int, unsigned int>
+ strideAndFrameSize(const PixelFormat &outputFormat, const Size &size) = 0;
+
+ virtual void process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params) = 0;
+
+ virtual SizeRange sizes(PixelFormat inputFormat, const Size &inputSize) = 0;
+
+ Signal<FrameBuffer *> inputBufferReady;
+ Signal<FrameBuffer *> outputBufferReady;
+
+private:
+ virtual Size patternSize(PixelFormat inputFormat) = 0;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer_cpu.cpp b/src/libcamera/software_isp/debayer_cpu.cpp
new file mode 100644
index 00000000..31ab96ab
--- /dev/null
+++ b/src/libcamera/software_isp/debayer_cpu.cpp
@@ -0,0 +1,835 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based debayering class
+ */
+
+#include "debayer_cpu.h"
+
+#include <stdlib.h>
+#include <sys/ioctl.h>
+#include <time.h>
+
+#include <linux/dma-buf.h>
+
+#include <libcamera/formats.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/dma_buf_allocator.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/mapped_framebuffer.h"
+
+namespace libcamera {
+
+/**
+ * \class DebayerCpu
+ * \brief Class for debayering on the CPU
+ *
+ * Implementation for CPU based debayering
+ */
+
+/**
+ * \brief Constructs a DebayerCpu object
+ * \param[in] stats Pointer to the stats object to use
+ */
+DebayerCpu::DebayerCpu(std::unique_ptr<SwStatsCpu> stats)
+ : stats_(std::move(stats))
+{
+ /*
+ * Reading from uncached buffers may be very slow.
+ * In such a case, it's better to copy input buffer data to normal memory.
+ * But in case of cached buffers, copying the data is unnecessary overhead.
+ * enable_input_memcpy_ makes this behavior configurable. At the moment, we
+ * always set it to true as the safer choice but this should be changed in
+ * future.
+ */
+ enableInputMemcpy_ = true;
+
+ /* Initialize color lookup tables */
+ for (unsigned int i = 0; i < DebayerParams::kRGBLookupSize; i++)
+ red_[i] = green_[i] = blue_[i] = i;
+}
+
+DebayerCpu::~DebayerCpu() = default;
+
+#define DECLARE_SRC_POINTERS(pixel_t) \
+ const pixel_t *prev = (const pixel_t *)src[0] + xShift_; \
+ const pixel_t *curr = (const pixel_t *)src[1] + xShift_; \
+ const pixel_t *next = (const pixel_t *)src[2] + xShift_;
+
+/*
+ * RGR
+ * GBG
+ * RGR
+ */
+#define BGGR_BGR888(p, n, div) \
+ *dst++ = blue_[curr[x] / (div)]; \
+ *dst++ = green_[(prev[x] + curr[x - p] + curr[x + n] + next[x]) / (4 * (div))]; \
+ *dst++ = red_[(prev[x - p] + prev[x + n] + next[x - p] + next[x + n]) / (4 * (div))]; \
+ if constexpr (addAlphaByte) \
+ *dst++ = 255; \
+ x++;
+
+/*
+ * GBG
+ * RGR
+ * GBG
+ */
+#define GRBG_BGR888(p, n, div) \
+ *dst++ = blue_[(prev[x] + next[x]) / (2 * (div))]; \
+ *dst++ = green_[curr[x] / (div)]; \
+ *dst++ = red_[(curr[x - p] + curr[x + n]) / (2 * (div))]; \
+ if constexpr (addAlphaByte) \
+ *dst++ = 255; \
+ x++;
+
+/*
+ * GRG
+ * BGB
+ * GRG
+ */
+#define GBRG_BGR888(p, n, div) \
+ *dst++ = blue_[(curr[x - p] + curr[x + n]) / (2 * (div))]; \
+ *dst++ = green_[curr[x] / (div)]; \
+ *dst++ = red_[(prev[x] + next[x]) / (2 * (div))]; \
+ if constexpr (addAlphaByte) \
+ *dst++ = 255; \
+ x++;
+
+/*
+ * BGB
+ * GRG
+ * BGB
+ */
+#define RGGB_BGR888(p, n, div) \
+ *dst++ = blue_[(prev[x - p] + prev[x + n] + next[x - p] + next[x + n]) / (4 * (div))]; \
+ *dst++ = green_[(prev[x] + curr[x - p] + curr[x + n] + next[x]) / (4 * (div))]; \
+ *dst++ = red_[curr[x] / (div)]; \
+ if constexpr (addAlphaByte) \
+ *dst++ = 255; \
+ x++;
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer8_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint8_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ BGGR_BGR888(1, 1, 1)
+ GBRG_BGR888(1, 1, 1)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer8_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint8_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ GRBG_BGR888(1, 1, 1)
+ RGGB_BGR888(1, 1, 1)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 4 for 10 -> 8 bpp value */
+ BGGR_BGR888(1, 1, 4)
+ GBRG_BGR888(1, 1, 4)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 4 for 10 -> 8 bpp value */
+ GRBG_BGR888(1, 1, 4)
+ RGGB_BGR888(1, 1, 4)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer12_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 16 for 12 -> 8 bpp value */
+ BGGR_BGR888(1, 1, 16)
+ GBRG_BGR888(1, 1, 16)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer12_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ DECLARE_SRC_POINTERS(uint16_t)
+
+ for (int x = 0; x < (int)window_.width;) {
+ /* divide values by 16 for 12 -> 8 bpp value */
+ GRBG_BGR888(1, 1, 16)
+ RGGB_BGR888(1, 1, 16)
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10P_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ /*
+ * For the first pixel getting a pixel from the previous column uses
+ * x - 2 to skip the 5th byte with least-significant bits for 4 pixels.
+ * Same for last pixel (uses x + 2) and looking at the next column.
+ */
+ for (int x = 0; x < widthInBytes;) {
+ /* First pixel */
+ BGGR_BGR888(2, 1, 1)
+ /* Second pixel BGGR -> GBRG */
+ GBRG_BGR888(1, 1, 1)
+ /* Same thing for third and fourth pixels */
+ BGGR_BGR888(1, 1, 1)
+ GBRG_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10P_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* First pixel */
+ GRBG_BGR888(2, 1, 1)
+ /* Second pixel GRBG -> RGGB */
+ RGGB_BGR888(1, 1, 1)
+ /* Same thing for third and fourth pixels */
+ GRBG_BGR888(1, 1, 1)
+ RGGB_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10P_GBGB_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* Even pixel */
+ GBRG_BGR888(2, 1, 1)
+ /* Odd pixel GBGR -> BGGR */
+ BGGR_BGR888(1, 1, 1)
+ /* Same thing for next 2 pixels */
+ GBRG_BGR888(1, 1, 1)
+ BGGR_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+template<bool addAlphaByte>
+void DebayerCpu::debayer10P_RGRG_BGR888(uint8_t *dst, const uint8_t *src[])
+{
+ const int widthInBytes = window_.width * 5 / 4;
+ const uint8_t *prev = src[0];
+ const uint8_t *curr = src[1];
+ const uint8_t *next = src[2];
+
+ for (int x = 0; x < widthInBytes;) {
+ /* Even pixel */
+ RGGB_BGR888(2, 1, 1)
+ /* Odd pixel RGGB -> GRBG */
+ GRBG_BGR888(1, 1, 1)
+ /* Same thing for next 2 pixels */
+ RGGB_BGR888(1, 1, 1)
+ GRBG_BGR888(1, 2, 1)
+ /* Skip 5th src byte with 4 x 2 least-significant-bits */
+ x++;
+ }
+}
+
+static bool isStandardBayerOrder(BayerFormat::Order order)
+{
+ return order == BayerFormat::BGGR || order == BayerFormat::GBRG ||
+ order == BayerFormat::GRBG || order == BayerFormat::RGGB;
+}
+
+/*
+ * Setup the Debayer object according to the passed in parameters.
+ * Return 0 on success, a negative errno value on failure
+ * (unsupported parameters).
+ */
+int DebayerCpu::getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputFormat);
+
+ if ((bayerFormat.bitDepth == 8 || bayerFormat.bitDepth == 10 || bayerFormat.bitDepth == 12) &&
+ bayerFormat.packing == BayerFormat::Packing::None &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ config.bpp = (bayerFormat.bitDepth + 7) & ~7;
+ config.patternSize.width = 2;
+ config.patternSize.height = 2;
+ config.outputFormats = std::vector<PixelFormat>({ formats::RGB888,
+ formats::XRGB8888,
+ formats::ARGB8888,
+ formats::BGR888,
+ formats::XBGR8888,
+ formats::ABGR8888 });
+ return 0;
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2 &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ config.bpp = 10;
+ config.patternSize.width = 4; /* 5 bytes per *4* pixels */
+ config.patternSize.height = 2;
+ config.outputFormats = std::vector<PixelFormat>({ formats::RGB888,
+ formats::XRGB8888,
+ formats::ARGB8888,
+ formats::BGR888,
+ formats::XBGR8888,
+ formats::ABGR8888 });
+ return 0;
+ }
+
+ LOG(Debayer, Info)
+ << "Unsupported input format " << inputFormat.toString();
+ return -EINVAL;
+}
+
+int DebayerCpu::getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config)
+{
+ if (outputFormat == formats::RGB888 || outputFormat == formats::BGR888) {
+ config.bpp = 24;
+ return 0;
+ }
+
+ if (outputFormat == formats::XRGB8888 || outputFormat == formats::ARGB8888 ||
+ outputFormat == formats::XBGR8888 || outputFormat == formats::ABGR8888) {
+ config.bpp = 32;
+ return 0;
+ }
+
+ LOG(Debayer, Info)
+ << "Unsupported output format " << outputFormat.toString();
+ return -EINVAL;
+}
+
+/*
+ * Check for standard Bayer orders and set xShift_ and swap debayer0/1, so that
+ * a single pair of BGGR debayer functions can be used for all 4 standard orders.
+ */
+int DebayerCpu::setupStandardBayerOrder(BayerFormat::Order order)
+{
+ switch (order) {
+ case BayerFormat::BGGR:
+ break;
+ case BayerFormat::GBRG:
+ xShift_ = 1; /* BGGR -> GBRG */
+ break;
+ case BayerFormat::GRBG:
+ std::swap(debayer0_, debayer1_); /* BGGR -> GRBG */
+ break;
+ case BayerFormat::RGGB:
+ xShift_ = 1; /* BGGR -> GBRG */
+ std::swap(debayer0_, debayer1_); /* GBRG -> RGGB */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int DebayerCpu::setDebayerFunctions(PixelFormat inputFormat, PixelFormat outputFormat)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputFormat);
+ bool addAlphaByte = false;
+
+ xShift_ = 0;
+ swapRedBlueGains_ = false;
+
+ auto invalidFmt = []() -> int {
+ LOG(Debayer, Error) << "Unsupported input output format combination";
+ return -EINVAL;
+ };
+
+ switch (outputFormat) {
+ case formats::XRGB8888:
+ case formats::ARGB8888:
+ addAlphaByte = true;
+ [[fallthrough]];
+ case formats::RGB888:
+ break;
+ case formats::XBGR8888:
+ case formats::ABGR8888:
+ addAlphaByte = true;
+ [[fallthrough]];
+ case formats::BGR888:
+ /* Swap R and B in bayer order to generate BGR888 instead of RGB888 */
+ swapRedBlueGains_ = true;
+
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ bayerFormat.order = BayerFormat::RGGB;
+ break;
+ case BayerFormat::GBRG:
+ bayerFormat.order = BayerFormat::GRBG;
+ break;
+ case BayerFormat::GRBG:
+ bayerFormat.order = BayerFormat::GBRG;
+ break;
+ case BayerFormat::RGGB:
+ bayerFormat.order = BayerFormat::BGGR;
+ break;
+ default:
+ return invalidFmt();
+ }
+ break;
+ default:
+ return invalidFmt();
+ }
+
+ if ((bayerFormat.bitDepth == 8 || bayerFormat.bitDepth == 10 || bayerFormat.bitDepth == 12) &&
+ bayerFormat.packing == BayerFormat::Packing::None &&
+ isStandardBayerOrder(bayerFormat.order)) {
+ switch (bayerFormat.bitDepth) {
+ case 8:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer8_BGBG_BGR888<true> : &DebayerCpu::debayer8_BGBG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer8_GRGR_BGR888<true> : &DebayerCpu::debayer8_GRGR_BGR888<false>;
+ break;
+ case 10:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10_BGBG_BGR888<true> : &DebayerCpu::debayer10_BGBG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10_GRGR_BGR888<true> : &DebayerCpu::debayer10_GRGR_BGR888<false>;
+ break;
+ case 12:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer12_BGBG_BGR888<true> : &DebayerCpu::debayer12_BGBG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer12_GRGR_BGR888<true> : &DebayerCpu::debayer12_GRGR_BGR888<false>;
+ break;
+ }
+ setupStandardBayerOrder(bayerFormat.order);
+ return 0;
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2) {
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_BGBG_BGR888<true> : &DebayerCpu::debayer10P_BGBG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_GRGR_BGR888<true> : &DebayerCpu::debayer10P_GRGR_BGR888<false>;
+ return 0;
+ case BayerFormat::GBRG:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_GBGB_BGR888<true> : &DebayerCpu::debayer10P_GBGB_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_RGRG_BGR888<true> : &DebayerCpu::debayer10P_RGRG_BGR888<false>;
+ return 0;
+ case BayerFormat::GRBG:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_GRGR_BGR888<true> : &DebayerCpu::debayer10P_GRGR_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_BGBG_BGR888<true> : &DebayerCpu::debayer10P_BGBG_BGR888<false>;
+ return 0;
+ case BayerFormat::RGGB:
+ debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_RGRG_BGR888<true> : &DebayerCpu::debayer10P_RGRG_BGR888<false>;
+ debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_GBGB_BGR888<true> : &DebayerCpu::debayer10P_GBGB_BGR888<false>;
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ return invalidFmt();
+}
+
+int DebayerCpu::configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
+{
+ if (getInputConfig(inputCfg.pixelFormat, inputConfig_) != 0)
+ return -EINVAL;
+
+ if (stats_->configure(inputCfg) != 0)
+ return -EINVAL;
+
+ const Size &statsPatternSize = stats_->patternSize();
+ if (inputConfig_.patternSize.width != statsPatternSize.width ||
+ inputConfig_.patternSize.height != statsPatternSize.height) {
+ LOG(Debayer, Error)
+ << "mismatching stats and debayer pattern sizes for "
+ << inputCfg.pixelFormat.toString();
+ return -EINVAL;
+ }
+
+ inputConfig_.stride = inputCfg.stride;
+
+ if (outputCfgs.size() != 1) {
+ LOG(Debayer, Error)
+ << "Unsupported number of output streams: "
+ << outputCfgs.size();
+ return -EINVAL;
+ }
+
+ const StreamConfiguration &outputCfg = outputCfgs[0];
+ SizeRange outSizeRange = sizes(inputCfg.pixelFormat, inputCfg.size);
+ std::tie(outputConfig_.stride, outputConfig_.frameSize) =
+ strideAndFrameSize(outputCfg.pixelFormat, outputCfg.size);
+
+ if (!outSizeRange.contains(outputCfg.size) || outputConfig_.stride != outputCfg.stride) {
+ LOG(Debayer, Error)
+ << "Invalid output size/stride: "
+ << "\n " << outputCfg.size << " (" << outSizeRange << ")"
+ << "\n " << outputCfg.stride << " (" << outputConfig_.stride << ")";
+ return -EINVAL;
+ }
+
+ if (setDebayerFunctions(inputCfg.pixelFormat, outputCfg.pixelFormat) != 0)
+ return -EINVAL;
+
+ window_.x = ((inputCfg.size.width - outputCfg.size.width) / 2) &
+ ~(inputConfig_.patternSize.width - 1);
+ window_.y = ((inputCfg.size.height - outputCfg.size.height) / 2) &
+ ~(inputConfig_.patternSize.height - 1);
+ window_.width = outputCfg.size.width;
+ window_.height = outputCfg.size.height;
+
+ /* Don't pass x,y since process() already adjusts src before passing it */
+ stats_->setWindow(Rectangle(window_.size()));
+
+ /* pad with patternSize.Width on both left and right side */
+ lineBufferPadding_ = inputConfig_.patternSize.width * inputConfig_.bpp / 8;
+ lineBufferLength_ = window_.width * inputConfig_.bpp / 8 +
+ 2 * lineBufferPadding_;
+
+ if (enableInputMemcpy_) {
+ for (unsigned int i = 0; i <= inputConfig_.patternSize.height; i++)
+ lineBuffers_[i].resize(lineBufferLength_);
+ }
+
+ measuredFrames_ = 0;
+ frameProcessTime_ = 0;
+
+ return 0;
+}
+
+/*
+ * Get width and height at which the bayer-pattern repeats.
+ * Return pattern-size or an empty Size for an unsupported inputFormat.
+ */
+Size DebayerCpu::patternSize(PixelFormat inputFormat)
+{
+ DebayerCpu::DebayerInputConfig config;
+
+ if (getInputConfig(inputFormat, config) != 0)
+ return {};
+
+ return config.patternSize;
+}
+
+std::vector<PixelFormat> DebayerCpu::formats(PixelFormat inputFormat)
+{
+ DebayerCpu::DebayerInputConfig config;
+
+ if (getInputConfig(inputFormat, config) != 0)
+ return std::vector<PixelFormat>();
+
+ return config.outputFormats;
+}
+
+std::tuple<unsigned int, unsigned int>
+DebayerCpu::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+{
+ DebayerCpu::DebayerOutputConfig config;
+
+ if (getOutputConfig(outputFormat, config) != 0)
+ return std::make_tuple(0, 0);
+
+ /* round up to multiple of 8 for 64 bits alignment */
+ unsigned int stride = (size.width * config.bpp / 8 + 7) & ~7;
+
+ return std::make_tuple(stride, stride * size.height);
+}
+
+void DebayerCpu::setupInputMemcpy(const uint8_t *linePointers[])
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ if (!enableInputMemcpy_)
+ return;
+
+ for (unsigned int i = 0; i < patternHeight; i++) {
+ memcpy(lineBuffers_[i].data(),
+ linePointers[i + 1] - lineBufferPadding_,
+ lineBufferLength_);
+ linePointers[i + 1] = lineBuffers_[i].data() + lineBufferPadding_;
+ }
+
+ /* Point lineBufferIndex_ to first unused lineBuffer */
+ lineBufferIndex_ = patternHeight;
+}
+
+void DebayerCpu::shiftLinePointers(const uint8_t *linePointers[], const uint8_t *src)
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ for (unsigned int i = 0; i < patternHeight; i++)
+ linePointers[i] = linePointers[i + 1];
+
+ linePointers[patternHeight] = src +
+ (patternHeight / 2) * (int)inputConfig_.stride;
+}
+
+void DebayerCpu::memcpyNextLine(const uint8_t *linePointers[])
+{
+ const unsigned int patternHeight = inputConfig_.patternSize.height;
+
+ if (!enableInputMemcpy_)
+ return;
+
+ memcpy(lineBuffers_[lineBufferIndex_].data(),
+ linePointers[patternHeight] - lineBufferPadding_,
+ lineBufferLength_);
+ linePointers[patternHeight] = lineBuffers_[lineBufferIndex_].data() + lineBufferPadding_;
+
+ lineBufferIndex_ = (lineBufferIndex_ + 1) % (patternHeight + 1);
+}
+
+void DebayerCpu::process2(const uint8_t *src, uint8_t *dst)
+{
+ unsigned int yEnd = window_.y + window_.height;
+ /* Holds [0] previous- [1] current- [2] next-line */
+ const uint8_t *linePointers[3];
+
+ /* Adjust src to top left corner of the window */
+ src += window_.y * inputConfig_.stride + window_.x * inputConfig_.bpp / 8;
+
+ /* [x] becomes [x - 1] after initial shiftLinePointers() call */
+ if (window_.y) {
+ linePointers[1] = src - inputConfig_.stride; /* previous-line */
+ linePointers[2] = src;
+ } else {
+ /* window_.y == 0, use the next line as prev line */
+ linePointers[1] = src + inputConfig_.stride;
+ linePointers[2] = src;
+ /* Last 2 lines also need special handling */
+ yEnd -= 2;
+ }
+
+ setupInputMemcpy(linePointers);
+
+ for (unsigned int y = window_.y; y < yEnd; y += 2) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(y, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+
+ if (window_.y == 0) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(yEnd, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ /* next line may point outside of src, use prev. */
+ linePointers[2] = linePointers[0];
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+}
+
+void DebayerCpu::process4(const uint8_t *src, uint8_t *dst)
+{
+ const unsigned int yEnd = window_.y + window_.height;
+ /*
+ * This holds pointers to [0] 2-lines-up [1] 1-line-up [2] current-line
+ * [3] 1-line-down [4] 2-lines-down.
+ */
+ const uint8_t *linePointers[5];
+
+ /* Adjust src to top left corner of the window */
+ src += window_.y * inputConfig_.stride + window_.x * inputConfig_.bpp / 8;
+
+ /* [x] becomes [x - 1] after initial shiftLinePointers() call */
+ linePointers[1] = src - 2 * inputConfig_.stride;
+ linePointers[2] = src - inputConfig_.stride;
+ linePointers[3] = src;
+ linePointers[4] = src + inputConfig_.stride;
+
+ setupInputMemcpy(linePointers);
+
+ for (unsigned int y = window_.y; y < yEnd; y += 4) {
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine0(y, linePointers);
+ (this->*debayer0_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer1_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ stats_->processLine2(y, linePointers);
+ (this->*debayer2_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+
+ shiftLinePointers(linePointers, src);
+ memcpyNextLine(linePointers);
+ (this->*debayer3_)(dst, linePointers);
+ src += inputConfig_.stride;
+ dst += outputConfig_.stride;
+ }
+}
+
+namespace {
+
+inline int64_t timeDiff(timespec &after, timespec &before)
+{
+ return (after.tv_sec - before.tv_sec) * 1000000000LL +
+ (int64_t)after.tv_nsec - (int64_t)before.tv_nsec;
+}
+
+} /* namespace */
+
+void DebayerCpu::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params)
+{
+ timespec frameStartTime;
+
+ if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure) {
+ frameStartTime = {};
+ clock_gettime(CLOCK_MONOTONIC_RAW, &frameStartTime);
+ }
+
+ std::vector<DmaSyncer> dmaSyncers;
+ for (const FrameBuffer::Plane &plane : input->planes())
+ dmaSyncers.emplace_back(plane.fd, DmaSyncer::SyncType::Read);
+
+ for (const FrameBuffer::Plane &plane : output->planes())
+ dmaSyncers.emplace_back(plane.fd, DmaSyncer::SyncType::Write);
+
+ green_ = params.green;
+ red_ = swapRedBlueGains_ ? params.blue : params.red;
+ blue_ = swapRedBlueGains_ ? params.red : params.blue;
+
+ /* Copy metadata from the input buffer */
+ FrameMetadata &metadata = output->_d()->metadata();
+ metadata.status = input->metadata().status;
+ metadata.sequence = input->metadata().sequence;
+ metadata.timestamp = input->metadata().timestamp;
+
+ MappedFrameBuffer in(input, MappedFrameBuffer::MapFlag::Read);
+ MappedFrameBuffer out(output, MappedFrameBuffer::MapFlag::Write);
+ if (!in.isValid() || !out.isValid()) {
+ LOG(Debayer, Error) << "mmap-ing buffer(s) failed";
+ metadata.status = FrameMetadata::FrameError;
+ return;
+ }
+
+ stats_->startFrame();
+
+ if (inputConfig_.patternSize.height == 2)
+ process2(in.planes()[0].data(), out.planes()[0].data());
+ else
+ process4(in.planes()[0].data(), out.planes()[0].data());
+
+ metadata.planes()[0].bytesused = out.planes()[0].size();
+
+ dmaSyncers.clear();
+
+ /* Measure before emitting signals */
+ if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure &&
+ ++measuredFrames_ > DebayerCpu::kFramesToSkip) {
+ timespec frameEndTime = {};
+ clock_gettime(CLOCK_MONOTONIC_RAW, &frameEndTime);
+ frameProcessTime_ += timeDiff(frameEndTime, frameStartTime);
+ if (measuredFrames_ == DebayerCpu::kLastFrameToMeasure) {
+ const unsigned int measuredFrames = DebayerCpu::kLastFrameToMeasure -
+ DebayerCpu::kFramesToSkip;
+ LOG(Debayer, Info)
+ << "Processed " << measuredFrames
+ << " frames in " << frameProcessTime_ / 1000 << "us, "
+ << frameProcessTime_ / (1000 * measuredFrames)
+ << " us/frame";
+ }
+ }
+
+ /*
+ * Buffer ids are currently not used, so pass zeros as its parameter.
+ *
+ * \todo Pass real bufferId once stats buffer passing is changed.
+ */
+ stats_->finishFrame(frame, 0);
+ outputBufferReady.emit(output);
+ inputBufferReady.emit(input);
+}
+
+SizeRange DebayerCpu::sizes(PixelFormat inputFormat, const Size &inputSize)
+{
+ Size patternSize = this->patternSize(inputFormat);
+ unsigned int borderHeight = patternSize.height;
+
+ if (patternSize.isNull())
+ return {};
+
+ /* No need for top/bottom border with a pattern height of 2 */
+ if (patternSize.height == 2)
+ borderHeight = 0;
+
+ /*
+ * For debayer interpolation a border is kept around the entire image
+ * and the minimum output size is pattern-height x pattern-width.
+ */
+ if (inputSize.width < (3 * patternSize.width) ||
+ inputSize.height < (2 * borderHeight + patternSize.height)) {
+ LOG(Debayer, Warning)
+ << "Input format size too small: " << inputSize.toString();
+ return {};
+ }
+
+ return SizeRange(Size(patternSize.width, patternSize.height),
+ Size((inputSize.width - 2 * patternSize.width) & ~(patternSize.width - 1),
+ (inputSize.height - 2 * borderHeight) & ~(patternSize.height - 1)),
+ patternSize.width, patternSize.height);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/debayer_cpu.h b/src/libcamera/software_isp/debayer_cpu.h
new file mode 100644
index 00000000..2c47e7c6
--- /dev/null
+++ b/src/libcamera/software_isp/debayer_cpu.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based debayering header
+ */
+
+#pragma once
+
+#include <memory>
+#include <stdint.h>
+#include <vector>
+
+#include <libcamera/base/object.h>
+
+#include "libcamera/internal/bayer_format.h"
+
+#include "debayer.h"
+#include "swstats_cpu.h"
+
+namespace libcamera {
+
+class DebayerCpu : public Debayer, public Object
+{
+public:
+ DebayerCpu(std::unique_ptr<SwStatsCpu> stats);
+ ~DebayerCpu();
+
+ int configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs);
+ Size patternSize(PixelFormat inputFormat);
+ std::vector<PixelFormat> formats(PixelFormat input);
+ std::tuple<unsigned int, unsigned int>
+ strideAndFrameSize(const PixelFormat &outputFormat, const Size &size);
+ void process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params);
+ SizeRange sizes(PixelFormat inputFormat, const Size &inputSize);
+
+ /**
+ * \brief Get the file descriptor for the statistics
+ *
+ * \return the file descriptor pointing to the statistics
+ */
+ const SharedFD &getStatsFD() { return stats_->getStatsFD(); }
+
+ /**
+ * \brief Get the output frame size
+ *
+ * \return The output frame size
+ */
+ unsigned int frameSize() { return outputConfig_.frameSize; }
+
+private:
+ /**
+ * \brief Called to debayer 1 line of Bayer input data to output format
+ * \param[out] dst Pointer to the start of the output line to write
+ * \param[in] src The input data
+ *
+ * Input data is an array of (patternSize_.height + 1) src
+ * pointers each pointing to a line in the Bayer source. The middle
+ * element of the array will point to the actual line being processed.
+ * Earlier element(s) will point to the previous line(s) and later
+ * element(s) to the next line(s).
+ *
+ * These functions take an array of src pointers, rather than
+ * a single src pointer + a stride for the source, so that when the src
+ * is slow uncached memory it can be copied to faster memory before
+ * debayering. Debayering a standard 2x2 Bayer pattern requires access
+ * to the previous and next src lines for interpolating the missing
+ * colors. To allow copying the src lines only once 3 temporary buffers
+ * each holding a single line are used, re-using the oldest buffer for
+ * the next line and the pointers are swizzled so that:
+ * src[0] = previous-line, src[1] = currrent-line, src[2] = next-line.
+ * This way the 3 pointers passed to the debayer functions form
+ * a sliding window over the src avoiding the need to copy each
+ * line more than once.
+ *
+ * Similarly for bayer patterns which repeat every 4 lines, 5 src
+ * pointers are passed holding: src[0] = 2-lines-up, src[1] = 1-line-up
+ * src[2] = current-line, src[3] = 1-line-down, src[4] = 2-lines-down.
+ */
+ using debayerFn = void (DebayerCpu::*)(uint8_t *dst, const uint8_t *src[]);
+
+ /* 8-bit raw bayer format */
+ template<bool addAlphaByte>
+ void debayer8_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer8_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* unpacked 10-bit raw bayer format */
+ template<bool addAlphaByte>
+ void debayer10_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer10_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* unpacked 12-bit raw bayer format */
+ template<bool addAlphaByte>
+ void debayer12_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer12_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ /* CSI-2 packed 10-bit raw bayer format (all the 4 orders) */
+ template<bool addAlphaByte>
+ void debayer10P_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer10P_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer10P_GBGB_BGR888(uint8_t *dst, const uint8_t *src[]);
+ template<bool addAlphaByte>
+ void debayer10P_RGRG_BGR888(uint8_t *dst, const uint8_t *src[]);
+
+ struct DebayerInputConfig {
+ Size patternSize;
+ unsigned int bpp; /* Memory used per pixel, not precision */
+ unsigned int stride;
+ std::vector<PixelFormat> outputFormats;
+ };
+
+ struct DebayerOutputConfig {
+ unsigned int bpp; /* Memory used per pixel, not precision */
+ unsigned int stride;
+ unsigned int frameSize;
+ };
+
+ int getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config);
+ int getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config);
+ int setupStandardBayerOrder(BayerFormat::Order order);
+ int setDebayerFunctions(PixelFormat inputFormat, PixelFormat outputFormat);
+ void setupInputMemcpy(const uint8_t *linePointers[]);
+ void shiftLinePointers(const uint8_t *linePointers[], const uint8_t *src);
+ void memcpyNextLine(const uint8_t *linePointers[]);
+ void process2(const uint8_t *src, uint8_t *dst);
+ void process4(const uint8_t *src, uint8_t *dst);
+
+ /* Max. supported Bayer pattern height is 4, debayering this requires 5 lines */
+ static constexpr unsigned int kMaxLineBuffers = 5;
+
+ DebayerParams::ColorLookupTable red_;
+ DebayerParams::ColorLookupTable green_;
+ DebayerParams::ColorLookupTable blue_;
+ debayerFn debayer0_;
+ debayerFn debayer1_;
+ debayerFn debayer2_;
+ debayerFn debayer3_;
+ Rectangle window_;
+ DebayerInputConfig inputConfig_;
+ DebayerOutputConfig outputConfig_;
+ std::unique_ptr<SwStatsCpu> stats_;
+ std::vector<uint8_t> lineBuffers_[kMaxLineBuffers];
+ unsigned int lineBufferLength_;
+ unsigned int lineBufferPadding_;
+ unsigned int lineBufferIndex_;
+ unsigned int xShift_; /* Offset of 0/1 applied to window_.x */
+ bool enableInputMemcpy_;
+ bool swapRedBlueGains_;
+ unsigned int measuredFrames_;
+ int64_t frameProcessTime_;
+ /* Skip 30 frames for things to stabilize then measure 30 frames */
+ static constexpr unsigned int kFramesToSkip = 30;
+ static constexpr unsigned int kLastFrameToMeasure = 60;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/meson.build b/src/libcamera/software_isp/meson.build
new file mode 100644
index 00000000..aac7eda7
--- /dev/null
+++ b/src/libcamera/software_isp/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: CC0-1.0
+
+softisp_enabled = pipelines.contains('simple')
+summary({'SoftISP support' : softisp_enabled}, section : 'Configuration')
+
+if not softisp_enabled
+ subdir_done()
+endif
+
+libcamera_internal_sources += files([
+ 'debayer.cpp',
+ 'debayer_cpu.cpp',
+ 'software_isp.cpp',
+ 'swstats_cpu.cpp',
+])
diff --git a/src/libcamera/software_isp/software_isp.cpp b/src/libcamera/software_isp/software_isp.cpp
new file mode 100644
index 00000000..2bea64d9
--- /dev/null
+++ b/src/libcamera/software_isp/software_isp.cpp
@@ -0,0 +1,370 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ *
+ * Simple software ISP implementation
+ */
+
+#include "libcamera/internal/software_isp/software_isp.h"
+
+#include <cmath>
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <libcamera/controls.h>
+#include <libcamera/formats.h>
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/software_isp/debayer_params.h"
+
+#include "debayer_cpu.h"
+
+/**
+ * \file software_isp.cpp
+ * \brief Simple software ISP implementation
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(SoftwareIsp)
+
+/**
+ * \class SoftwareIsp
+ * \brief Class for the Software ISP
+ */
+
+/**
+ * \var SoftwareIsp::inputBufferReady
+ * \brief A signal emitted when the input frame buffer completes
+ */
+
+/**
+ * \var SoftwareIsp::outputBufferReady
+ * \brief A signal emitted when the output frame buffer completes
+ */
+
+/**
+ * \var SoftwareIsp::ispStatsReady
+ * \brief A signal emitted when the statistics for IPA are ready
+ */
+
+/**
+ * \var SoftwareIsp::setSensorControls
+ * \brief A signal emitted when the values to write to the sensor controls are
+ * ready
+ */
+
+/**
+ * \brief Constructs SoftwareIsp object
+ * \param[in] pipe The pipeline handler in use
+ * \param[in] sensor Pointer to the CameraSensor instance owned by the pipeline
+ * \param[out] ipaControls The IPA controls to update
+ * handler
+ */
+SoftwareIsp::SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor,
+ ControlInfoMap *ipaControls)
+ : dmaHeap_(DmaBufAllocator::DmaBufAllocatorFlag::CmaHeap |
+ DmaBufAllocator::DmaBufAllocatorFlag::SystemHeap |
+ DmaBufAllocator::DmaBufAllocatorFlag::UDmaBuf)
+{
+ /*
+ * debayerParams_ must be initialized because the initial value is used for
+ * the first two frames, i.e. until stats processing starts providing its
+ * own parameters.
+ *
+ * \todo This should be handled in the same place as the related
+ * operations, in the IPA module.
+ */
+ std::array<uint8_t, 256> gammaTable;
+ for (unsigned int i = 0; i < 256; i++)
+ gammaTable[i] = UINT8_MAX * std::pow(i / 256.0, 0.5);
+ for (unsigned int i = 0; i < DebayerParams::kRGBLookupSize; i++) {
+ debayerParams_.red[i] = gammaTable[i];
+ debayerParams_.green[i] = gammaTable[i];
+ debayerParams_.blue[i] = gammaTable[i];
+ }
+
+ if (!dmaHeap_.isValid()) {
+ LOG(SoftwareIsp, Error) << "Failed to create DmaBufAllocator object";
+ return;
+ }
+
+ sharedParams_ = SharedMemObject<DebayerParams>("softIsp_params");
+ if (!sharedParams_) {
+ LOG(SoftwareIsp, Error) << "Failed to create shared memory for parameters";
+ return;
+ }
+
+ auto stats = std::make_unique<SwStatsCpu>();
+ if (!stats->isValid()) {
+ LOG(SoftwareIsp, Error) << "Failed to create SwStatsCpu object";
+ return;
+ }
+ stats->statsReady.connect(this, &SoftwareIsp::statsReady);
+
+ debayer_ = std::make_unique<DebayerCpu>(std::move(stats));
+ debayer_->inputBufferReady.connect(this, &SoftwareIsp::inputReady);
+ debayer_->outputBufferReady.connect(this, &SoftwareIsp::outputReady);
+
+ ipa_ = IPAManager::createIPA<ipa::soft::IPAProxySoft>(pipe, 0, 0);
+ if (!ipa_) {
+ LOG(SoftwareIsp, Error)
+ << "Creating IPA for software ISP failed";
+ debayer_.reset();
+ return;
+ }
+
+ /*
+ * The API tuning file is made from the sensor name. If the tuning file
+ * isn't found, fall back to the 'uncalibrated' file.
+ */
+ std::string ipaTuningFile =
+ ipa_->configurationFile(sensor->model() + ".yaml", "uncalibrated.yaml");
+
+ int ret = ipa_->init(IPASettings{ ipaTuningFile, sensor->model() },
+ debayer_->getStatsFD(),
+ sharedParams_.fd(),
+ sensor->controls(),
+ ipaControls);
+ if (ret) {
+ LOG(SoftwareIsp, Error) << "IPA init failed";
+ debayer_.reset();
+ return;
+ }
+
+ ipa_->setIspParams.connect(this, &SoftwareIsp::saveIspParams);
+ ipa_->setSensorControls.connect(this, &SoftwareIsp::setSensorCtrls);
+
+ debayer_->moveToThread(&ispWorkerThread_);
+}
+
+SoftwareIsp::~SoftwareIsp()
+{
+ /* make sure to destroy the DebayerCpu before the ispWorkerThread_ is gone */
+ debayer_.reset();
+}
+
+/**
+ * \fn int SoftwareIsp::loadConfiguration([[maybe_unused]] const std::string &filename)
+ * \brief Load a configuration from a file
+ * \param[in] filename The file to load the configuration data from
+ *
+ * Currently is a stub doing nothing and always returning "success".
+ *
+ * \return 0 on success
+ */
+
+/**
+ * \brief Process the statistics gathered
+ * \param[in] frame The frame number
+ * \param[in] bufferId ID of the statistics buffer
+ * \param[in] sensorControls The sensor controls
+ *
+ * Requests the IPA to calculate new parameters for ISP and new control
+ * values for the sensor.
+ */
+void SoftwareIsp::processStats(const uint32_t frame, const uint32_t bufferId,
+ const ControlList &sensorControls)
+{
+ ASSERT(ipa_);
+ ipa_->processStats(frame, bufferId, sensorControls);
+}
+
+/**
+ * \brief Check the validity of Software Isp object
+ * \return True if Software Isp is valid, false otherwise
+ */
+bool SoftwareIsp::isValid() const
+{
+ return !!debayer_;
+}
+
+/**
+ * \brief Get the output formats supported for the given input format
+ * \param[in] inputFormat The input format
+ * \return All the supported output formats or an empty vector if there are none
+ */
+std::vector<PixelFormat> SoftwareIsp::formats(PixelFormat inputFormat)
+{
+ ASSERT(debayer_);
+
+ return debayer_->formats(inputFormat);
+}
+
+/**
+ * \brief Get the supported output sizes for the given input format and size
+ * \param[in] inputFormat The input format
+ * \param[in] inputSize The input frame size
+ * \return The valid size range or an empty range if there are none
+ */
+SizeRange SoftwareIsp::sizes(PixelFormat inputFormat, const Size &inputSize)
+{
+ ASSERT(debayer_);
+
+ return debayer_->sizes(inputFormat, inputSize);
+}
+
+/**
+ * Get the output stride and the frame size in bytes for the given output format and size
+ * \param[in] outputFormat The output format
+ * \param[in] size The output size (width and height in pixels)
+ * \return A tuple of the stride and the frame size in bytes, or a tuple of 0,0
+ * if there is no valid output config
+ */
+std::tuple<unsigned int, unsigned int>
+SoftwareIsp::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
+{
+ ASSERT(debayer_);
+
+ return debayer_->strideAndFrameSize(outputFormat, size);
+}
+
+/**
+ * \brief Configure the SoftwareIsp object according to the passed in parameters
+ * \param[in] inputCfg The input configuration
+ * \param[in] outputCfgs The output configurations
+ * \param[in] configInfo The IPA configuration data, received from the pipeline
+ * handler
+ * \return 0 on success, a negative errno on failure
+ */
+int SoftwareIsp::configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs,
+ const ipa::soft::IPAConfigInfo &configInfo)
+{
+ ASSERT(ipa_ && debayer_);
+
+ int ret = ipa_->configure(configInfo);
+ if (ret < 0)
+ return ret;
+
+ return debayer_->configure(inputCfg, outputCfgs);
+}
+
+/**
+ * \brief Export the buffers from the Software ISP
+ * \param[in] stream Output stream exporting the buffers
+ * \param[in] count Number of buffers to allocate
+ * \param[out] buffers Vector to store the allocated buffers
+ * \return The number of allocated buffers on success or a negative error code
+ * otherwise
+ */
+int SoftwareIsp::exportBuffers(const Stream *stream, unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+ ASSERT(debayer_ != nullptr);
+
+ /* single output for now */
+ if (stream == nullptr)
+ return -EINVAL;
+
+ return dmaHeap_.exportBuffers(count, { debayer_->frameSize() }, buffers);
+}
+
+/**
+ * \brief Queue a request and process the control list from the application
+ * \param[in] frame The number of the frame which will be processed next
+ * \param[in] controls The controls for the \a frame
+ */
+void SoftwareIsp::queueRequest(const uint32_t frame, const ControlList &controls)
+{
+ ipa_->queueRequest(frame, controls);
+}
+
+/**
+ * \brief Queue buffers to Software ISP
+ * \param[in] frame The frame number
+ * \param[in] input The input framebuffer
+ * \param[in] outputs The container holding the output stream pointers and
+ * their respective frame buffer outputs
+ * \return 0 on success, a negative errno on failure
+ */
+int SoftwareIsp::queueBuffers(uint32_t frame, FrameBuffer *input,
+ const std::map<const Stream *, FrameBuffer *> &outputs)
+{
+ /*
+ * Validate the outputs as a sanity check: at least one output is
+ * required, all outputs must reference a valid stream.
+ */
+ if (outputs.empty())
+ return -EINVAL;
+
+ for (auto [stream, buffer] : outputs) {
+ if (!buffer)
+ return -EINVAL;
+ if (outputs.size() != 1) /* only single stream atm */
+ return -EINVAL;
+ }
+
+ for (auto iter = outputs.begin(); iter != outputs.end(); iter++)
+ process(frame, input, iter->second);
+
+ return 0;
+}
+
+/**
+ * \brief Starts the Software ISP streaming operation
+ * \return 0 on success, any other value indicates an error
+ */
+int SoftwareIsp::start()
+{
+ int ret = ipa_->start();
+ if (ret)
+ return ret;
+
+ ispWorkerThread_.start();
+ return 0;
+}
+
+/**
+ * \brief Stops the Software ISP streaming operation
+ */
+void SoftwareIsp::stop()
+{
+ ispWorkerThread_.exit();
+ ispWorkerThread_.wait();
+
+ ipa_->stop();
+}
+
+/**
+ * \brief Passes the input framebuffer to the ISP worker to process
+ * \param[in] frame The frame number
+ * \param[in] input The input framebuffer
+ * \param[out] output The framebuffer to write the processed frame to
+ */
+void SoftwareIsp::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output)
+{
+ ipa_->computeParams(frame);
+ debayer_->invokeMethod(&DebayerCpu::process,
+ ConnectionTypeQueued, frame, input, output, debayerParams_);
+}
+
+void SoftwareIsp::saveIspParams()
+{
+ debayerParams_ = *sharedParams_;
+}
+
+void SoftwareIsp::setSensorCtrls(const ControlList &sensorControls)
+{
+ setSensorControls.emit(sensorControls);
+}
+
+void SoftwareIsp::statsReady(uint32_t frame, uint32_t bufferId)
+{
+ ispStatsReady.emit(frame, bufferId);
+}
+
+void SoftwareIsp::inputReady(FrameBuffer *input)
+{
+ inputBufferReady.emit(input);
+}
+
+void SoftwareIsp::outputReady(FrameBuffer *output)
+{
+ outputBufferReady.emit(output);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/swstats_cpu.cpp b/src/libcamera/software_isp/swstats_cpu.cpp
new file mode 100644
index 00000000..c520c806
--- /dev/null
+++ b/src/libcamera/software_isp/swstats_cpu.cpp
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based software statistics implementation
+ */
+
+#include "swstats_cpu.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/stream.h>
+
+#include "libcamera/internal/bayer_format.h"
+
+namespace libcamera {
+
+/**
+ * \class SwStatsCpu
+ * \brief Class for gathering statistics on the CPU
+ *
+ * CPU based software ISP statistics implementation.
+ *
+ * This class offers a configure function + functions to gather statistics on a
+ * line by line basis. This allows CPU based software debayering to interleave
+ * debayering and statistics gathering on a line by line basis while the input
+ * data is still hot in the cache.
+ *
+ * It is also possible to specify a window over which to gather statistics
+ * instead of processing the whole frame.
+ */
+
+/**
+ * \fn bool SwStatsCpu::isValid() const
+ * \brief Gets whether the statistics object is valid
+ *
+ * \return True if it's valid, false otherwise
+ */
+
+/**
+ * \fn const SharedFD &SwStatsCpu::getStatsFD()
+ * \brief Get the file descriptor for the statistics
+ *
+ * \return The file descriptor
+ */
+
+/**
+ * \fn const Size &SwStatsCpu::patternSize()
+ * \brief Get the pattern size
+ *
+ * For some input-formats, e.g. Bayer data, processing is done multiple lines
+ * and/or columns at a time. Get width and height at which the (bayer) pattern
+ * repeats. Window values are rounded down to a multiple of this and the height
+ * also indicates if processLine2() should be called or not.
+ * This may only be called after a successful configure() call.
+ *
+ * \return The pattern size
+ */
+
+/**
+ * \fn void SwStatsCpu::processLine0(unsigned int y, const uint8_t *src[])
+ * \brief Process line 0
+ * \param[in] y The y coordinate.
+ * \param[in] src The input data.
+ *
+ * This function processes line 0 for input formats with
+ * patternSize height == 1.
+ * It'll process line 0 and 1 for input formats with patternSize height >= 2.
+ * This function may only be called after a successful setWindow() call.
+ */
+
+/**
+ * \fn void SwStatsCpu::processLine2(unsigned int y, const uint8_t *src[])
+ * \brief Process line 2 and 3
+ * \param[in] y The y coordinate.
+ * \param[in] src The input data.
+ *
+ * This function processes line 2 and 3 for input formats with
+ * patternSize height == 4.
+ * This function may only be called after a successful setWindow() call.
+ */
+
+/**
+ * \var Signal<> SwStatsCpu::statsReady
+ * \brief Signals that the statistics are ready
+ */
+
+/**
+ * \typedef SwStatsCpu::statsProcessFn
+ * \brief Called when there is data to get statistics from
+ * \param[in] src The input data
+ *
+ * These functions take an array of (patternSize_.height + 1) src
+ * pointers each pointing to a line in the source image. The middle
+ * element of the array will point to the actual line being processed.
+ * Earlier element(s) will point to the previous line(s) and later
+ * element(s) to the next line(s).
+ *
+ * See the documentation of DebayerCpu::debayerFn for more details.
+ */
+
+/**
+ * \var unsigned int SwStatsCpu::ySkipMask_
+ * \brief Skip lines where this bitmask is set in y
+ */
+
+/**
+ * \var Rectangle SwStatsCpu::window_
+ * \brief Statistics window, set by setWindow(), used every line
+ */
+
+/**
+ * \var Size SwStatsCpu::patternSize_
+ * \brief The size of the bayer pattern
+ *
+ * Valid sizes are: 2x2, 4x2 or 4x4.
+ */
+
+/**
+ * \var unsigned int SwStatsCpu::xShift_
+ * \brief The offset of x, applied to window_.x for bayer variants
+ *
+ * This can either be 0 or 1.
+ */
+
+LOG_DEFINE_CATEGORY(SwStatsCpu)
+
+SwStatsCpu::SwStatsCpu()
+ : sharedStats_("softIsp_stats")
+{
+ if (!sharedStats_)
+ LOG(SwStatsCpu, Error)
+ << "Failed to create shared memory for statistics";
+}
+
+static constexpr unsigned int kRedYMul = 77; /* 0.299 * 256 */
+static constexpr unsigned int kGreenYMul = 150; /* 0.587 * 256 */
+static constexpr unsigned int kBlueYMul = 29; /* 0.114 * 256 */
+
+#define SWSTATS_START_LINE_STATS(pixel_t) \
+ pixel_t r, g, g2, b; \
+ uint64_t yVal; \
+ \
+ uint64_t sumR = 0; \
+ uint64_t sumG = 0; \
+ uint64_t sumB = 0;
+
+#define SWSTATS_ACCUMULATE_LINE_STATS(div) \
+ sumR += r; \
+ sumG += g; \
+ sumB += b; \
+ \
+ yVal = r * kRedYMul; \
+ yVal += g * kGreenYMul; \
+ yVal += b * kBlueYMul; \
+ stats_.yHistogram[yVal * SwIspStats::kYHistogramSize / (256 * 256 * (div))]++;
+
+#define SWSTATS_FINISH_LINE_STATS() \
+ stats_.sumR_ += sumR; \
+ stats_.sumG_ += sumG; \
+ stats_.sumB_ += sumB;
+
+void SwStatsCpu::statsBGGR8Line0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x;
+ const uint8_t *src1 = src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR10Line0(const uint8_t *src[])
+{
+ const uint16_t *src0 = (const uint16_t *)src[1] + window_.x;
+ const uint16_t *src1 = (const uint16_t *)src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint16_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ /* divide Y by 4 for 10 -> 8 bpp value */
+ SWSTATS_ACCUMULATE_LINE_STATS(4)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR12Line0(const uint8_t *src[])
+{
+ const uint16_t *src0 = (const uint16_t *)src[1] + window_.x;
+ const uint16_t *src1 = (const uint16_t *)src[2] + window_.x;
+
+ SWSTATS_START_LINE_STATS(uint16_t)
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ /* x += 4 sample every other 2x2 block */
+ for (int x = 0; x < (int)window_.width; x += 4) {
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+
+ g = (g + g2) / 2;
+
+ /* divide Y by 16 for 12 -> 8 bpp value */
+ SWSTATS_ACCUMULATE_LINE_STATS(16)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsBGGR10PLine0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x * 5 / 4;
+ const uint8_t *src1 = src[2] + window_.x * 5 / 4;
+ const int widthInBytes = window_.width * 5 / 4;
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ /* x += 5 sample every other 2x2 block */
+ for (int x = 0; x < widthInBytes; x += 5) {
+ /* BGGR */
+ b = src0[x];
+ g = src0[x + 1];
+ g2 = src1[x];
+ r = src1[x + 1];
+ g = (g + g2) / 2;
+ /* Data is already 8 bits, divide by 1 */
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+void SwStatsCpu::statsGBRG10PLine0(const uint8_t *src[])
+{
+ const uint8_t *src0 = src[1] + window_.x * 5 / 4;
+ const uint8_t *src1 = src[2] + window_.x * 5 / 4;
+ const int widthInBytes = window_.width * 5 / 4;
+
+ if (swapLines_)
+ std::swap(src0, src1);
+
+ SWSTATS_START_LINE_STATS(uint8_t)
+
+ /* x += 5 sample every other 2x2 block */
+ for (int x = 0; x < widthInBytes; x += 5) {
+ /* GBRG */
+ g = src0[x];
+ b = src0[x + 1];
+ r = src1[x];
+ g2 = src1[x + 1];
+ g = (g + g2) / 2;
+ /* Data is already 8 bits, divide by 1 */
+ SWSTATS_ACCUMULATE_LINE_STATS(1)
+ }
+
+ SWSTATS_FINISH_LINE_STATS()
+}
+
+/**
+ * \brief Reset state to start statistics gathering for a new frame
+ *
+ * This may only be called after a successful setWindow() call.
+ */
+void SwStatsCpu::startFrame(void)
+{
+ if (window_.width == 0)
+ LOG(SwStatsCpu, Error) << "Calling startFrame() without setWindow()";
+
+ stats_.sumR_ = 0;
+ stats_.sumB_ = 0;
+ stats_.sumG_ = 0;
+ stats_.yHistogram.fill(0);
+}
+
+/**
+ * \brief Finish statistics calculation for the current frame
+ * \param[in] frame The frame number
+ * \param[in] bufferId ID of the statistics buffer
+ *
+ * This may only be called after a successful setWindow() call.
+ */
+void SwStatsCpu::finishFrame(uint32_t frame, uint32_t bufferId)
+{
+ *sharedStats_ = stats_;
+ statsReady.emit(frame, bufferId);
+}
+
+/**
+ * \brief Setup SwStatsCpu object for standard Bayer orders
+ * \param[in] order The Bayer order
+ *
+ * Check if order is a standard Bayer order and setup xShift_ and swapLines_
+ * so that a single BGGR stats function can be used for all 4 standard orders.
+ */
+int SwStatsCpu::setupStandardBayerOrder(BayerFormat::Order order)
+{
+ switch (order) {
+ case BayerFormat::BGGR:
+ xShift_ = 0;
+ swapLines_ = false;
+ break;
+ case BayerFormat::GBRG:
+ xShift_ = 1; /* BGGR -> GBRG */
+ swapLines_ = false;
+ break;
+ case BayerFormat::GRBG:
+ xShift_ = 0;
+ swapLines_ = true; /* BGGR -> GRBG */
+ break;
+ case BayerFormat::RGGB:
+ xShift_ = 1; /* BGGR -> GBRG */
+ swapLines_ = true; /* GBRG -> RGGB */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ patternSize_.height = 2;
+ patternSize_.width = 2;
+ ySkipMask_ = 0x02; /* Skip every 3th and 4th line */
+ return 0;
+}
+
+/**
+ * \brief Configure the statistics object for the passed in input format
+ * \param[in] inputCfg The input format
+ *
+ * \return 0 on success, a negative errno value on failure
+ */
+int SwStatsCpu::configure(const StreamConfiguration &inputCfg)
+{
+ BayerFormat bayerFormat =
+ BayerFormat::fromPixelFormat(inputCfg.pixelFormat);
+
+ if (bayerFormat.packing == BayerFormat::Packing::None &&
+ setupStandardBayerOrder(bayerFormat.order) == 0) {
+ switch (bayerFormat.bitDepth) {
+ case 8:
+ stats0_ = &SwStatsCpu::statsBGGR8Line0;
+ return 0;
+ case 10:
+ stats0_ = &SwStatsCpu::statsBGGR10Line0;
+ return 0;
+ case 12:
+ stats0_ = &SwStatsCpu::statsBGGR12Line0;
+ return 0;
+ }
+ }
+
+ if (bayerFormat.bitDepth == 10 &&
+ bayerFormat.packing == BayerFormat::Packing::CSI2) {
+ patternSize_.height = 2;
+ patternSize_.width = 4; /* 5 bytes per *4* pixels */
+ /* Skip every 3th and 4th line, sample every other 2x2 block */
+ ySkipMask_ = 0x02;
+ xShift_ = 0;
+
+ switch (bayerFormat.order) {
+ case BayerFormat::BGGR:
+ case BayerFormat::GRBG:
+ stats0_ = &SwStatsCpu::statsBGGR10PLine0;
+ swapLines_ = bayerFormat.order == BayerFormat::GRBG;
+ return 0;
+ case BayerFormat::GBRG:
+ case BayerFormat::RGGB:
+ stats0_ = &SwStatsCpu::statsGBRG10PLine0;
+ swapLines_ = bayerFormat.order == BayerFormat::RGGB;
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ LOG(SwStatsCpu, Info)
+ << "Unsupported input format " << inputCfg.pixelFormat.toString();
+ return -EINVAL;
+}
+
+/**
+ * \brief Specify window coordinates over which to gather statistics
+ * \param[in] window The window object.
+ */
+void SwStatsCpu::setWindow(const Rectangle &window)
+{
+ window_ = window;
+
+ window_.x &= ~(patternSize_.width - 1);
+ window_.x += xShift_;
+ window_.y &= ~(patternSize_.height - 1);
+
+ /* width_ - xShift_ to make sure the window fits */
+ window_.width -= xShift_;
+ window_.width &= ~(patternSize_.width - 1);
+ window_.height &= ~(patternSize_.height - 1);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/software_isp/swstats_cpu.h b/src/libcamera/software_isp/swstats_cpu.h
new file mode 100644
index 00000000..26a2f462
--- /dev/null
+++ b/src/libcamera/software_isp/swstats_cpu.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * CPU based software statistics implementation
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#include <libcamera/base/signal.h>
+
+#include <libcamera/geometry.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/shared_mem_object.h"
+#include "libcamera/internal/software_isp/swisp_stats.h"
+
+namespace libcamera {
+
+class PixelFormat;
+struct StreamConfiguration;
+
+class SwStatsCpu
+{
+public:
+ SwStatsCpu();
+ ~SwStatsCpu() = default;
+
+ bool isValid() const { return sharedStats_.fd().isValid(); }
+
+ const SharedFD &getStatsFD() { return sharedStats_.fd(); }
+
+ const Size &patternSize() { return patternSize_; }
+
+ int configure(const StreamConfiguration &inputCfg);
+ void setWindow(const Rectangle &window);
+ void startFrame();
+ void finishFrame(uint32_t frame, uint32_t bufferId);
+
+ void processLine0(unsigned int y, const uint8_t *src[])
+ {
+ if ((y & ySkipMask_) || y < static_cast<unsigned int>(window_.y) ||
+ y >= (window_.y + window_.height))
+ return;
+
+ (this->*stats0_)(src);
+ }
+
+ void processLine2(unsigned int y, const uint8_t *src[])
+ {
+ if ((y & ySkipMask_) || y < static_cast<unsigned int>(window_.y) ||
+ y >= (window_.y + window_.height))
+ return;
+
+ (this->*stats2_)(src);
+ }
+
+ Signal<uint32_t, uint32_t> statsReady;
+
+private:
+ using statsProcessFn = void (SwStatsCpu::*)(const uint8_t *src[]);
+
+ int setupStandardBayerOrder(BayerFormat::Order order);
+ /* Bayer 8 bpp unpacked */
+ void statsBGGR8Line0(const uint8_t *src[]);
+ /* Bayer 10 bpp unpacked */
+ void statsBGGR10Line0(const uint8_t *src[]);
+ /* Bayer 12 bpp unpacked */
+ void statsBGGR12Line0(const uint8_t *src[]);
+ /* Bayer 10 bpp packed */
+ void statsBGGR10PLine0(const uint8_t *src[]);
+ void statsGBRG10PLine0(const uint8_t *src[]);
+
+ /* Variables set by configure(), used every line */
+ statsProcessFn stats0_;
+ statsProcessFn stats2_;
+ bool swapLines_;
+
+ unsigned int ySkipMask_;
+
+ Rectangle window_;
+
+ Size patternSize_;
+
+ unsigned int xShift_;
+
+ SharedMemObject<SwIspStats> sharedStats_;
+ SwIspStats stats_;
+};
+
+} /* namespace libcamera */
diff --git a/src/libcamera/source_paths.cpp b/src/libcamera/source_paths.cpp
new file mode 100644
index 00000000..1af5386a
--- /dev/null
+++ b/src/libcamera/source_paths.cpp
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Identify libcamera source and build paths
+ */
+
+#include "libcamera/internal/source_paths.h"
+
+#include <dlfcn.h>
+#include <elf.h>
+#include <link.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <libcamera/base/utils.h>
+
+/**
+ * \file source_paths.h
+ * \brief Identify the build and source path of a not-yet-installed library
+ */
+
+/* musl doesn't declare _DYNAMIC in link.h, declare it manually. */
+extern ElfW(Dyn) _DYNAMIC[];
+
+namespace libcamera {
+
+namespace {
+
+/**
+ * \brief Check if libcamera is installed or not
+ *
+ * Utilise the build_rpath dynamic tag which is stripped out by meson at
+ * install time to determine at runtime if the library currently executing
+ * has been installed or not.
+ *
+ * \return True if libcamera is installed, false otherwise
+ */
+bool isLibcameraInstalled()
+{
+ /*
+ * DT_RUNPATH (DT_RPATH when the linker uses old dtags) is removed on
+ * install.
+ */
+ for (const ElfW(Dyn) *dyn = _DYNAMIC; dyn->d_tag != DT_NULL; ++dyn) {
+ if (dyn->d_tag == DT_RUNPATH || dyn->d_tag == DT_RPATH)
+ return false;
+ }
+
+ return true;
+}
+
+} /* namespace */
+
+namespace utils {
+
+/**
+ * \brief Retrieve the path to the build directory
+ *
+ * During development, it is useful to run libcamera binaries directly from the
+ * build directory without installing them. This function helps components that
+ * need to locate resources in the build tree, such as IPA modules or IPA proxy
+ * workers, by providing them with the path to the root of the build directory.
+ * Callers can then use it to complement or override searches in system-wide
+ * directories.
+ *
+ * If libcamera has been installed, the build directory path is not available
+ * and this function returns an empty string.
+ *
+ * \return The path to the build directory if running from a build, or an empty
+ * string otherwise
+ */
+std::string libcameraBuildPath()
+{
+ if (isLibcameraInstalled())
+ return std::string();
+
+ Dl_info info;
+
+ /* Look up our own symbol. */
+ int ret = dladdr(reinterpret_cast<void *>(libcameraBuildPath), &info);
+ if (ret == 0)
+ return std::string();
+
+ std::string path = dirname(info.dli_fname) + "/../../";
+
+ char *real = realpath(path.c_str(), nullptr);
+ if (!real)
+ return std::string();
+
+ path = real;
+ free(real);
+
+ return path + "/";
+}
+
+/**
+ * \brief Retrieve the path to the source directory
+ *
+ * During development, it is useful to run libcamera binaries directly from the
+ * build directory without installing them. This function helps components that
+ * need to locate resources in the source tree, such as IPA configuration
+ * files, by providing them with the path to the root of the source directory.
+ * Callers can then use it to complement or override searches in system-wide
+ * directories.
+ *
+ * If libcamera has been installed, the source directory path is not available
+ * and this function returns an empty string.
+ *
+ * \return The path to the source directory if running from a build directory,
+ * or an empty string otherwise
+ */
+std::string libcameraSourcePath()
+{
+ std::string path = libcameraBuildPath();
+ if (path.empty())
+ return std::string();
+
+ path += "source";
+
+ char *real = realpath(path.c_str(), nullptr);
+ if (!real)
+ return std::string();
+
+ path = real;
+ free(real);
+
+ struct stat statbuf;
+ int ret = stat(path.c_str(), &statbuf);
+ if (ret < 0 || (statbuf.st_mode & S_IFMT) != S_IFDIR)
+ return std::string();
+
+ return path + "/";
+}
+
+} /* namespace utils */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/stream.cpp b/src/libcamera/stream.cpp
index ef16aaa1..978d7275 100644
--- a/src/libcamera/stream.cpp
+++ b/src/libcamera/stream.cpp
@@ -2,21 +2,22 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * stream.cpp - Video stream for a Camera
+ * Video stream for a Camera
*/
#include <libcamera/stream.h>
#include <algorithm>
#include <array>
-#include <iomanip>
#include <limits.h>
-#include <sstream>
+#include <ostream>
+#include <string>
+#include <vector>
-#include <libcamera/request.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
-#include "log.h"
-#include "utils.h"
+#include <libcamera/request.h>
/**
* \file stream.h
@@ -275,11 +276,12 @@ SizeRange StreamFormats::range(const PixelFormat &pixelformat) const
*/
/**
- * \todo This method is deprecated and should be removed once all pipeline
- * handlers provied StreamFormats.
+ * \todo This function is deprecated and should be removed once all pipeline
+ * handlers provide StreamFormats.
*/
StreamConfiguration::StreamConfiguration()
- : pixelFormat(0), stream_(nullptr)
+ : pixelFormat(0), stride(0), frameSize(0), bufferCount(0),
+ stream_(nullptr)
{
}
@@ -287,7 +289,8 @@ StreamConfiguration::StreamConfiguration()
* \brief Construct a configuration with stream formats
*/
StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
- : pixelFormat(0), stream_(nullptr), formats_(formats)
+ : pixelFormat(0), stride(0), frameSize(0), bufferCount(0),
+ stream_(nullptr), formats_(formats)
{
}
@@ -302,16 +305,57 @@ StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
*/
/**
+ * \var StreamConfiguration::stride
+ * \brief Image stride for the stream, in bytes
+ *
+ * The stride value reports the number of bytes between the beginning of
+ * successive lines in an image buffer for this stream. The value is
+ * valid after successfully validating the configuration with a call to
+ * CameraConfiguration::validate(). For compressed formats (such as MJPEG),
+ * this value will be zero.
+ */
+
+/**
+ * \var StreamConfiguration::frameSize
+ * \brief Frame size for the stream, in bytes
+ *
+ * The frameSize value reports the number of bytes necessary to contain one
+ * frame of an image buffer for this stream. This total includes the bytes
+ * required for all image planes. The value is valid after successfully
+ * validating the configuration with a call to CameraConfiguration::validate().
+ */
+
+/**
* \var StreamConfiguration::bufferCount
* \brief Requested number of buffers to allocate for the stream
*/
/**
+ * \var StreamConfiguration::colorSpace
+ * \brief The ColorSpace for this stream
+ *
+ * This field allows a ColorSpace to be selected for this Stream.
+ *
+ * The field is optional and an application can choose to leave it unset.
+ * Platforms that support the use of color spaces may provide default
+ * values through the generateConfiguration() method. An application can
+ * override these when necessary.
+ *
+ * If a specific ColorSpace is requested but the Camera cannot deliver it,
+ * then the StreamConfiguration will be adjusted to a value that can be
+ * delivered. In this case the validate() method will indicate via its
+ * return value that the CameraConfiguration has been adjusted.
+ *
+ * Note that platforms will typically have different constraints on what
+ * color spaces can be supported and in what combinations.
+ */
+
+/**
* \fn StreamConfiguration::stream()
* \brief Retrieve the stream associated with the configuration
*
* When a camera is configured with Camera::configure() Stream instances are
- * associated with each stream configuration entry. This method retrieves the
+ * associated with each stream configuration entry. This function retrieves the
* associated Stream, which remains valid until the next call to
* Camera::configure() or Camera::release().
*
@@ -322,8 +366,8 @@ StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
* \fn StreamConfiguration::setStream()
* \brief Associate a stream with a configuration
*
- * This method is meant for the PipelineHandler::configure() method and shall
- * not be called by applications.
+ * This function is meant for the PipelineHandler::configure() function and
+ * shall not be called by applications.
*
* \param[in] stream The stream
*/
@@ -332,10 +376,11 @@ StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
* \fn StreamConfiguration::formats()
* \brief Retrieve advisory stream format information
*
- * This method retrieves information about the pixel formats and sizes supported
- * by the stream configuration. The sizes are advisory and not all of them are
- * guaranteed to be supported by the stream. Users shall always inspect the size
- * in the stream configuration after calling CameraConfiguration::validate().
+ * This function retrieves information about the pixel formats and sizes
+ * supported by the stream configuration. The sizes are advisory and not all of
+ * them are guaranteed to be supported by the stream. Users shall always inspect
+ * the size in the stream configuration after calling
+ * CameraConfiguration::validate().
*
* \return Stream formats information
*/
@@ -347,7 +392,23 @@ StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
*/
std::string StreamConfiguration::toString() const
{
- return size.toString() + "-" + pixelFormat.toString();
+ std::stringstream ss;
+ ss << *this;
+
+ return ss.str();
+}
+
+/**
+ * \brief Insert a text representation of a StreamConfiguration into an output
+ * stream
+ * \param[in] out The output stream
+ * \param[in] cfg The StreamConfiguration
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const StreamConfiguration &cfg)
+{
+ out << cfg.size << "-" << cfg.pixelFormat;
+ return out;
}
/**
@@ -358,12 +419,11 @@ std::string StreamConfiguration::toString() const
* are specified by applications and passed to cameras, that then select the
* most appropriate streams and their default configurations.
*
+ * \var Raw
+ * The stream is intended to capture raw frames from the sensor.
* \var StillCapture
* The stream is intended to capture high-resolution, high-quality still images
* with low frame rate. The captured frames may be exposed with flash.
- * \var StillCaptureRaw
- * The stream is intended to capture high-resolution, raw still images with low
- * frame rate.
* \var VideoRecording
* The stream is intended to capture video for the purpose of recording or
* streaming. The video stream may produce a high frame rate and may be
@@ -375,9 +435,23 @@ std::string StreamConfiguration::toString() const
*/
/**
- * \typedef StreamRoles
- * \brief A vector of StreamRole
+ * \brief Insert a text representation of a StreamRole into an output stream
+ * \param[in] out The output stream
+ * \param[in] role The StreamRole
+ * \return The output stream \a out
*/
+std::ostream &operator<<(std::ostream &out, StreamRole role)
+{
+ static constexpr std::array<const char *, 4> names{
+ "Raw",
+ "StillCapture",
+ "VideoRecording",
+ "Viewfinder",
+ };
+
+ out << names[utils::to_underlying(role)];
+ return out;
+}
/**
* \class Stream
diff --git a/src/libcamera/sysfs.cpp b/src/libcamera/sysfs.cpp
new file mode 100644
index 00000000..3d9885b0
--- /dev/null
+++ b/src/libcamera/sysfs.cpp
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Miscellaneous utility functions to access sysfs
+ */
+
+#include "libcamera/internal/sysfs.h"
+
+#include <fstream>
+#include <sstream>
+#include <sys/stat.h>
+#include <sys/sysmacros.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+
+/**
+ * \file sysfs.h
+ * \brief Miscellaneous utility functions to access sysfs
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(SysFs)
+
+namespace sysfs {
+
+/**
+ * \brief Retrieve the sysfs path for a character device
+ * \param[in] deviceNode Path to character device node
+ * \return The sysfs path on success or an empty string on failure
+ */
+std::string charDevPath(const std::string &deviceNode)
+{
+ struct stat st;
+ int ret = stat(deviceNode.c_str(), &st);
+ if (ret < 0) {
+ ret = -errno;
+ LOG(SysFs, Error)
+ << "Unable to stat '" << deviceNode << "': "
+ << strerror(-ret);
+ return {};
+ }
+
+ std::ostringstream dev("/sys/dev/char/", std::ios_base::ate);
+ dev << major(st.st_rdev) << ":" << minor(st.st_rdev);
+
+ return dev.str();
+}
+
+/**
+ * \brief Retrieve the path of the firmware node for a device
+ * \param[in] device Path in sysfs to search
+ *
+ * Physical devices in a system are described by the system firmware. Depending
+ * on the type of platform, devices are identified using different naming
+ * schemes. The Linux kernel abstract those differences with "firmware nodes".
+ * This function retrieves the firmware node path corresponding to the
+ * \a device.
+ *
+ * For DT-based systems, the path is the full name of the DT node that
+ * represents the device. For ACPI-based systems, the path is the absolute
+ * namespace path to the ACPI object that represents the device. In both cases,
+ * the path is guaranteed to be unique and persistent as long as the system
+ * firmware is not modified.
+ *
+ * \return The firmware node path on success or an empty string on failure
+ */
+std::string firmwareNodePath(const std::string &device)
+{
+ std::string fwPath, node;
+ struct stat st;
+
+ /* Lookup for DT-based systems */
+ node = device + "/of_node";
+ if (!stat(node.c_str(), &st)) {
+ char *ofPath = realpath(node.c_str(), nullptr);
+ if (!ofPath)
+ return {};
+
+ static const char prefix[] = "/sys/firmware/devicetree";
+ if (strncmp(ofPath, prefix, strlen(prefix)) == 0)
+ fwPath = ofPath + strlen(prefix);
+ else
+ fwPath = ofPath;
+
+ free(ofPath);
+
+ return fwPath;
+ }
+
+ /* Lookup for ACPI-based systems */
+ node = device + "/firmware_node/path";
+ if (File::exists(node)) {
+ std::ifstream file(node);
+ if (!file.is_open())
+ return {};
+
+ std::getline(file, fwPath);
+ file.close();
+
+ return fwPath;
+ }
+
+ return {};
+}
+
+} /* namespace sysfs */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/thread.cpp b/src/libcamera/thread.cpp
deleted file mode 100644
index 85293c18..00000000
--- a/src/libcamera/thread.cpp
+++ /dev/null
@@ -1,626 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * thread.cpp - Thread support
- */
-
-#include "thread.h"
-
-#include <atomic>
-#include <condition_variable>
-#include <list>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <libcamera/event_dispatcher.h>
-
-#include "event_dispatcher_poll.h"
-#include "log.h"
-#include "message.h"
-
-/**
- * \page thread Thread Support
- *
- * libcamera supports multi-threaded applications through a threading model that
- * sets precise rules to guarantee thread-safe usage of the API. Additionally,
- * libcamera makes internal use of threads, and offers APIs that simplify
- * interactions with application threads. Careful compliance with the threading
- * model will ensure avoidance of race conditions.
- *
- * \section thread-objects Threads and Objects
- *
- * Instances of the Object class and all its derived classes are thread-aware
- * and are bound to the thread they are created in. They are said to *live* in
- * a thread, and they interact with the event loop of their thread for the
- * purpose of message passing and signal delivery. Messages posted to the
- * object with Object::postMessage() will be delivered from the event loop of
- * the thread that the object lives in. Signals delivered to the object, unless
- * explicitly connected with ConnectionTypeDirect, will also be delivered from
- * the object thread's event loop.
- *
- * All Object instances created by libcamera are bound to an internal thread,
- * and applications don't need to provide an event loop to support them. Object
- * instances created by applications require an event loop. It is the
- * responsibility of applications to provide that event loop, either explicitly
- * through CameraManager::setEventDispatcher(), or by running the default event
- * loop provided by CameraManager::eventDispatcher() in their main thread. The
- * main thread of an application is the one that calls CameraManager::start().
- *
- * \section thread-signals Threads and Signals
- *
- * When sent to a receiver that does not inherit from the Object class, signals
- * are delivered synchronously in the thread of the sender. When the receiver
- * inherits from the Object class, delivery is by default asynchronous if the
- * sender and receiver live in different threads. In that case, the signal is
- * posted to the receiver's message queue and will be delivered from the
- * receiver's event loop, running in the receiver's thread. This mechanism can
- * be overridden by selecting a different connection type when calling
- * Signal::connect().
- *
- * Asynchronous signal delivery is used internally in libcamera, but is also
- * available to applications if desired. To use this feature, applications
- * shall create receiver classes that inherit from the Object class, and
- * provide an event loop to the CameraManager as explained above. Note that
- * Object instances created by the application are limited to living in the
- * application's main thread. Creating Object instances from another thread of
- * an application causes undefined behaviour.
- *
- * \section thread-reentrancy Reentrancy and Thread-Safety
- *
- * Through the documentation, several terms are used to define how classes and
- * their member functions can be used from multiple threads.
- *
- * - A **reentrant** function may be called simultaneously from multiple
- * threads if and only if each invocation uses a different instance of the
- * class. This is the default for all member functions not explictly marked
- * otherwise.
- *
- * - \anchor thread-safe A **thread-safe** function may be called
- * simultaneously from multiple threads on the same instance of a class. A
- * thread-safe function is thus reentrant. Thread-safe functions may also be
- * called simultaneously with any other reentrant function of the same class
- * on the same instance.
- *
- * - \anchor thread-bound A **thread-bound** function may be called only from
- * the thread that the class instances lives in (see section \ref
- * thread-objects). For instances of classes that do not derive from the
- * Object class, this is the thread in which the instance was created. A
- * thread-bound function is not thread-safe, and may or may not be reentrant.
- *
- * Neither reentrancy nor thread-safety, in this context, mean that a function
- * may be called simultaneously from the same thread, for instance from a
- * callback invoked by the function. This may deadlock and isn't allowed unless
- * separately documented.
- *
- * A class is defined as reentrant, thread-safe or thread-bound if all its
- * member functions are reentrant, thread-safe or thread-bound respectively.
- * Some member functions may additionally be documented as having additional
- * thread-related attributes.
- *
- * Most classes are reentrant but not thread-safe, as making them fully
- * thread-safe would incur locking costs considered prohibitive for the
- * expected use cases.
- */
-
-/**
- * \file thread.h
- * \brief Thread support
- */
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(Thread)
-
-class ThreadMain;
-
-/**
- * \brief A queue of posted messages
- */
-class MessageQueue
-{
-public:
- /**
- * \brief List of queued Message instances
- */
- std::list<std::unique_ptr<Message>> list_;
- /**
- * \brief Protects the \ref list_
- */
- Mutex mutex_;
-};
-
-/**
- * \brief Thread-local internal data
- */
-class ThreadData
-{
-public:
- ThreadData()
- : thread_(nullptr), running_(false), dispatcher_(nullptr)
- {
- }
-
- static ThreadData *current();
-
-private:
- friend class Thread;
- friend class ThreadMain;
-
- Thread *thread_;
- bool running_;
- pid_t tid_;
-
- Mutex mutex_;
-
- std::atomic<EventDispatcher *> dispatcher_;
-
- std::condition_variable cv_;
- std::atomic<bool> exit_;
- int exitCode_;
-
- MessageQueue messages_;
-};
-
-/**
- * \brief Thread wrapper for the main thread
- */
-class ThreadMain : public Thread
-{
-public:
- ThreadMain()
- {
- data_->running_ = true;
- }
-
-protected:
- void run() override
- {
- LOG(Thread, Fatal) << "The main thread can't be restarted";
- }
-};
-
-static thread_local ThreadData *currentThreadData = nullptr;
-static ThreadMain mainThread;
-
-/**
- * \brief Retrieve thread-local internal data for the current thread
- * \return The thread-local internal data for the current thread
- */
-ThreadData *ThreadData::current()
-{
- if (currentThreadData)
- return currentThreadData;
-
- /*
- * The main thread doesn't receive thread-local data when it is
- * started, set it here.
- */
- ThreadData *data = mainThread.data_;
- data->tid_ = syscall(SYS_gettid);
- currentThreadData = data;
- return data;
-}
-
-/**
- * \typedef Mutex
- * \brief An alias for std::mutex
- */
-
-/**
- * \typedef MutexLocker
- * \brief An alias for std::unique_lock<std::mutex>
- */
-
-/**
- * \class Thread
- * \brief A thread of execution
- *
- * The Thread class is a wrapper around std::thread that handles integration
- * with the Object, Signal and EventDispatcher classes.
- *
- * Thread instances by default run an event loop until the exit() method is
- * called. A custom event dispatcher may be installed with
- * setEventDispatcher(), otherwise a poll-based event dispatcher is used. This
- * behaviour can be overriden by overloading the run() method.
- *
- * \context This class is \threadsafe.
- */
-
-/**
- * \brief Create a thread
- */
-Thread::Thread()
-{
- data_ = new ThreadData;
- data_->thread_ = this;
-}
-
-Thread::~Thread()
-{
- delete data_->dispatcher_.load(std::memory_order_relaxed);
- delete data_;
-}
-
-/**
- * \brief Start the thread
- */
-void Thread::start()
-{
- MutexLocker locker(data_->mutex_);
-
- if (data_->running_)
- return;
-
- data_->running_ = true;
- data_->exitCode_ = -1;
- data_->exit_.store(false, std::memory_order_relaxed);
-
- thread_ = std::thread(&Thread::startThread, this);
-}
-
-void Thread::startThread()
-{
- struct ThreadCleaner {
- ThreadCleaner(Thread *thread, void (Thread::*cleaner)())
- : thread_(thread), cleaner_(cleaner)
- {
- }
- ~ThreadCleaner()
- {
- (thread_->*cleaner_)();
- }
-
- Thread *thread_;
- void (Thread::*cleaner_)();
- };
-
- /*
- * Make sure the thread is cleaned up even if the run method exits
- * abnormally (for instance via a direct call to pthread_cancel()).
- */
- thread_local ThreadCleaner cleaner(this, &Thread::finishThread);
-
- data_->tid_ = syscall(SYS_gettid);
- currentThreadData = data_;
-
- run();
-}
-
-/**
- * \brief Enter the event loop
- *
- * This method enter an event loop based on the event dispatcher instance for
- * the thread, and blocks until the exit() method is called. It is meant to be
- * called within the thread from the run() method and shall not be called
- * outside of the thread.
- *
- * \return The exit code passed to the exit() method
- */
-int Thread::exec()
-{
- MutexLocker locker(data_->mutex_);
-
- EventDispatcher *dispatcher = eventDispatcher();
-
- locker.unlock();
-
- while (!data_->exit_.load(std::memory_order_acquire))
- dispatcher->processEvents();
-
- locker.lock();
-
- return data_->exitCode_;
-}
-
-/**
- * \brief Main method of the thread
- *
- * When the thread is started with start(), it calls this method in the context
- * of the new thread. The run() method can be overloaded to perform custom
- * work. When this method returns the thread execution is stopped, and the \ref
- * finished signal is emitted.
- *
- * The base implementation just calls exec().
- */
-void Thread::run()
-{
- exec();
-}
-
-void Thread::finishThread()
-{
- data_->mutex_.lock();
- data_->running_ = false;
- data_->mutex_.unlock();
-
- finished.emit(this);
- data_->cv_.notify_all();
-}
-
-/**
- * \brief Stop the thread's event loop
- * \param[in] code The exit code
- *
- * This method interrupts the event loop started by the exec() method, causing
- * exec() to return \a code.
- *
- * Calling exit() on a thread that reimplements the run() method and doesn't
- * call exec() will likely have no effect.
- */
-void Thread::exit(int code)
-{
- data_->exitCode_ = code;
- data_->exit_.store(true, std::memory_order_release);
-
- EventDispatcher *dispatcher = data_->dispatcher_.load(std::memory_order_relaxed);
- if (!dispatcher)
- return;
-
- dispatcher->interrupt();
-}
-
-/**
- * \brief Wait for the thread to finish
- * \param[in] duration Maximum wait duration
- *
- * This function waits until the thread finishes or the \a duration has
- * elapsed, whichever happens first. If \a duration is equal to
- * utils::duration::max(), the wait never times out. If the thread is not
- * running the function returns immediately.
- *
- * \return True if the thread has finished, or false if the wait timed out
- */
-bool Thread::wait(utils::duration duration)
-{
- bool finished = true;
-
- {
- MutexLocker locker(data_->mutex_);
-
- if (duration == utils::duration::max())
- data_->cv_.wait(locker, [&]() { return !data_->running_; });
- else
- finished = data_->cv_.wait_for(locker, duration,
- [&]() { return !data_->running_; });
- }
-
- if (thread_.joinable())
- thread_.join();
-
- return finished;
-}
-
-/**
- * \brief Check if the thread is running
- *
- * A Thread instance is considered as running once the underlying thread has
- * started. This method guarantees that it returns true after the start()
- * method returns, and false after the wait() method returns.
- *
- * \return True if the thread is running, false otherwise
- */
-bool Thread::isRunning()
-{
- MutexLocker locker(data_->mutex_);
- return data_->running_;
-}
-
-/**
- * \var Thread::finished
- * \brief Signal the end of thread execution
- */
-
-/**
- * \brief Retrieve the Thread instance for the current thread
- * \return The Thread instance for the current thread
- */
-Thread *Thread::current()
-{
- ThreadData *data = ThreadData::current();
- return data->thread_;
-}
-
-/**
- * \brief Retrieve the ID of the current thread
- *
- * The thread ID corresponds to the Linux thread ID (TID) as returned by the
- * gettid system call.
- *
- * \return The ID of the current thread
- */
-pid_t Thread::currentId()
-{
- ThreadData *data = ThreadData::current();
- return data->tid_;
-}
-
-/**
- * \brief Set the event dispatcher
- * \param[in] dispatcher Pointer to the event dispatcher
- *
- * Threads that run an event loop require an event dispatcher to integrate
- * event notification and timers with the loop. Users that want to provide
- * their own event dispatcher shall call this method once and only once before
- * the thread is started with start(). If no event dispatcher is provided, a
- * default poll-based implementation will be used.
- *
- * The Thread takes ownership of the event dispatcher and will delete it when
- * the thread is destroyed.
- */
-void Thread::setEventDispatcher(std::unique_ptr<EventDispatcher> dispatcher)
-{
- if (data_->dispatcher_.load(std::memory_order_relaxed)) {
- LOG(Thread, Warning) << "Event dispatcher is already set";
- return;
- }
-
- data_->dispatcher_.store(dispatcher.release(),
- std::memory_order_relaxed);
-}
-
-/**
- * \brief Retrieve the event dispatcher
- *
- * This method retrieves the event dispatcher set with setEventDispatcher().
- * If no dispatcher has been set, a default poll-based implementation is created
- * and returned, and no custom event dispatcher may be installed anymore.
- *
- * The returned event dispatcher is valid until the thread is destroyed.
- *
- * \return Pointer to the event dispatcher
- */
-EventDispatcher *Thread::eventDispatcher()
-{
- if (!data_->dispatcher_.load(std::memory_order_relaxed))
- data_->dispatcher_.store(new EventDispatcherPoll(),
- std::memory_order_release);
-
- return data_->dispatcher_.load(std::memory_order_relaxed);
-}
-
-/**
- * \brief Post a message to the thread for the \a receiver
- * \param[in] msg The message
- * \param[in] receiver The receiver
- *
- * This method stores the message \a msg in the message queue of the thread for
- * the \a receiver and wake up the thread's event loop. Message ownership is
- * passed to the thread, and the message will be deleted after being delivered.
- *
- * Messages are delivered through the thread's event loop. If the thread is not
- * running its event loop the message will not be delivered until the event
- * loop gets started.
- *
- * If the \a receiver is not bound to this thread the behaviour is undefined.
- *
- * \sa exec()
- */
-void Thread::postMessage(std::unique_ptr<Message> msg, Object *receiver)
-{
- msg->receiver_ = receiver;
-
- ASSERT(data_ == receiver->thread()->data_);
-
- MutexLocker locker(data_->messages_.mutex_);
- data_->messages_.list_.push_back(std::move(msg));
- receiver->pendingMessages_++;
- locker.unlock();
-
- EventDispatcher *dispatcher =
- data_->dispatcher_.load(std::memory_order_acquire);
- if (dispatcher)
- dispatcher->interrupt();
-}
-
-/**
- * \brief Remove all posted messages for the \a receiver
- * \param[in] receiver The receiver
- *
- * If the \a receiver is not bound to this thread the behaviour is undefined.
- */
-void Thread::removeMessages(Object *receiver)
-{
- ASSERT(data_ == receiver->thread()->data_);
-
- MutexLocker locker(data_->messages_.mutex_);
- if (!receiver->pendingMessages_)
- return;
-
- std::vector<std::unique_ptr<Message>> toDelete;
- for (std::unique_ptr<Message> &msg : data_->messages_.list_) {
- if (!msg)
- continue;
- if (msg->receiver_ != receiver)
- continue;
-
- /*
- * Move the message to the pending deletion list to delete it
- * after releasing the lock. The messages list element will
- * contain a null pointer, and will be removed when dispatching
- * messages.
- */
- toDelete.push_back(std::move(msg));
- receiver->pendingMessages_--;
- }
-
- ASSERT(!receiver->pendingMessages_);
- locker.unlock();
-
- toDelete.clear();
-}
-
-/**
- * \brief Dispatch all posted messages for this thread
- */
-void Thread::dispatchMessages()
-{
- MutexLocker locker(data_->messages_.mutex_);
-
- while (!data_->messages_.list_.empty()) {
- std::unique_ptr<Message> msg = std::move(data_->messages_.list_.front());
- data_->messages_.list_.pop_front();
- if (!msg)
- continue;
-
- Object *receiver = msg->receiver_;
- ASSERT(data_ == receiver->thread()->data_);
-
- receiver->pendingMessages_--;
-
- locker.unlock();
- receiver->message(msg.get());
- locker.lock();
- }
-}
-
-/**
- * \brief Move an \a object and all its children to the thread
- * \param[in] object The object
- */
-void Thread::moveObject(Object *object)
-{
- ThreadData *currentData = object->thread_->data_;
- ThreadData *targetData = data_;
-
- MutexLocker lockerFrom(currentData->messages_.mutex_, std::defer_lock);
- MutexLocker lockerTo(targetData->messages_.mutex_, std::defer_lock);
- std::lock(lockerFrom, lockerTo);
-
- moveObject(object, currentData, targetData);
-}
-
-void Thread::moveObject(Object *object, ThreadData *currentData,
- ThreadData *targetData)
-{
- /* Move pending messages to the message queue of the new thread. */
- if (object->pendingMessages_) {
- unsigned int movedMessages = 0;
-
- for (std::unique_ptr<Message> &msg : currentData->messages_.list_) {
- if (!msg)
- continue;
- if (msg->receiver_ != object)
- continue;
-
- targetData->messages_.list_.push_back(std::move(msg));
- movedMessages++;
- }
-
- if (movedMessages) {
- EventDispatcher *dispatcher =
- targetData->dispatcher_.load(std::memory_order_acquire);
- if (dispatcher)
- dispatcher->interrupt();
- }
- }
-
- object->thread_ = this;
-
- /* Move all children. */
- for (auto child : object->children_)
- moveObject(child, currentData, targetData);
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/timer.cpp b/src/libcamera/timer.cpp
deleted file mode 100644
index 24da5152..00000000
--- a/src/libcamera/timer.cpp
+++ /dev/null
@@ -1,185 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * timer.cpp - Generic timer
- */
-
-#include <libcamera/timer.h>
-
-#include <chrono>
-
-#include <libcamera/camera_manager.h>
-#include <libcamera/event_dispatcher.h>
-
-#include "log.h"
-#include "message.h"
-#include "thread.h"
-#include "utils.h"
-
-/**
- * \file timer.h
- * \brief Generic timer
- */
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(Timer)
-
-/**
- * \class Timer
- * \brief Single-shot timer interface
- *
- * The Timer class models a single-shot timer that is started with start() and
- * emits the \ref timeout signal when it times out.
- *
- * Once started the timer will run until it times out. It can be stopped with
- * stop(), and once it times out or is stopped, can be started again with
- * start().
- *
- * The timer deadline is specified as either a duration in milliseconds or an
- * absolute time point. If the deadline is set to the current time or to the
- * past, the timer will time out immediately when execution returns to the
- * event loop of the timer's thread.
- *
- * Timers run in the thread they belong to, and thus emit the \a ref timeout
- * signal from that thread. To avoid race conditions they must not be started
- * or stopped from a different thread, attempts to do so will be rejected and
- * logged, and may cause undefined behaviour.
- */
-
-/**
- * \brief Construct a timer
- * \param[in] parent The parent Object
- */
-Timer::Timer(Object *parent)
- : Object(parent), running_(false)
-{
-}
-
-Timer::~Timer()
-{
- stop();
-}
-
-/**
- * \fn Timer::start(unsigned int msec)
- * \brief Start or restart the timer with a timeout of \a msec
- * \param[in] msec The timer duration in milliseconds
- *
- * If the timer is already running it will be stopped and restarted.
- *
- * \context This function is \threadbound.
- */
-
-/**
- * \brief Start or restart the timer with a timeout of \a duration
- * \param[in] duration The timer duration in milliseconds
- *
- * If the timer is already running it will be stopped and restarted.
- *
- * \context This function is \threadbound.
- */
-void Timer::start(std::chrono::milliseconds duration)
-{
- start(utils::clock::now() + duration);
-}
-
-/**
- * \brief Start or restart the timer with a \a deadline
- * \param[in] deadline The timer deadline
- *
- * If the timer is already running it will be stopped and restarted.
- *
- * \context This function is \threadbound.
- */
-void Timer::start(std::chrono::steady_clock::time_point deadline)
-{
- if (Thread::current() != thread()) {
- LOG(Timer, Error) << "Timer can't be started from another thread";
- return;
- }
-
- deadline_ = deadline;
-
- LOG(Timer, Debug)
- << "Starting timer " << this << ": deadline "
- << utils::time_point_to_string(deadline_);
-
- if (isRunning())
- unregisterTimer();
-
- registerTimer();
-}
-
-/**
- * \brief Stop the timer
- *
- * After this function returns the timer is guaranteed not to emit the
- * \ref timeout signal.
- *
- * If the timer is not running this function performs no operation.
- *
- * \context This function is \threadbound.
- */
-void Timer::stop()
-{
- if (!isRunning())
- return;
-
- if (Thread::current() != thread()) {
- LOG(Timer, Error) << "Timer can't be stopped from another thread";
- return;
- }
-
- unregisterTimer();
-}
-
-void Timer::registerTimer()
-{
- thread()->eventDispatcher()->registerTimer(this);
- running_ = true;
-}
-
-void Timer::unregisterTimer()
-{
- running_ = false;
- thread()->eventDispatcher()->unregisterTimer(this);
-}
-
-/**
- * \brief Check if the timer is running
- * \return True if the timer is running, false otherwise
- */
-bool Timer::isRunning() const
-{
- return running_;
-}
-
-/**
- * \fn Timer::deadline()
- * \brief Retrieve the timer deadline
- * \return The timer deadline
- */
-
-/**
- * \var Timer::timeout
- * \brief Signal emitted when the timer times out
- *
- * The timer pointer is passed as a parameter.
- */
-
-void Timer::message(Message *msg)
-{
- if (msg->type() == Message::ThreadMoveMessage) {
- if (isRunning()) {
- unregisterTimer();
- invokeMethod(&Timer::registerTimer,
- ConnectionTypeQueued);
- }
- }
-
- Object::message(msg);
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/tracepoints.cpp b/src/libcamera/tracepoints.cpp
new file mode 100644
index 00000000..90662d12
--- /dev/null
+++ b/src/libcamera/tracepoints.cpp
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Tracepoints with lttng
+ */
+#define TRACEPOINT_CREATE_PROBES
+#define TRACEPOINT_DEFINE
+
+#include "libcamera/internal/tracepoints.h"
diff --git a/src/libcamera/transform.cpp b/src/libcamera/transform.cpp
new file mode 100644
index 00000000..9fe8b562
--- /dev/null
+++ b/src/libcamera/transform.cpp
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * 2D plane transforms.
+ */
+
+#include <libcamera/transform.h>
+
+#include <libcamera/orientation.h>
+
+/**
+ * \file transform.h
+ * \brief Enum to represent and manipulate 2D plane transforms
+ */
+
+namespace libcamera {
+
+/**
+ * \enum Transform
+ * \brief Enum to represent a 2D plane transform
+ *
+ * The Transform can take 8 distinct values, representing the usual 2D plane
+ * transforms listed below. Each of these transforms can be constructed
+ * out of 3 basic operations, namely a horizontal flip (mirror), a vertical
+ * flip, and a transposition (about the main diagonal). The transforms are
+ * encoded such that a single bit indicates the presence of each of the 3
+ * basic operations:
+ *
+ * - bit 0 - presence of a horizontal flip
+ * - bit 1 - presence of a vertical flip
+ * - bit 2 - presence of a transposition.
+ *
+ * We regard these 3 basic operations as being applied in a specific order:
+ * first the two flip operations (actually they commute, so the order between
+ * them is unimportant) and finally any transpose operation.
+ *
+ * Functions are provided to manipulate directly the bits within the transform
+ * encoding, but there are also higher-level functions to invert and compose
+ * transforms. Transforms are composed according to the usual mathematical
+ * convention such that the right transform is applied first, and the left
+ * transform is applied second.
+ *
+ * Finally, we have a total of 8 distinct transformations, as follows (a
+ * couple of them have additional synonyms for convenience). We illustrate each
+ * with its nominal effect on a rectangle with vertices labelled A, B, C and D.
+ *
+ * \sa https://en.wikipedia.org/wiki/Examples_of_groups#dihedral_group_of_order_8
+ *
+ * The set of 2D plane transforms is also known as the symmetry group of a
+ * square, described in the link. Note that the group can be generated by
+ * only 2 elements (the horizontal flip and a 90 degree rotation, for
+ * example), however, the encoding used here makes the presence of the vertical
+ * flip explicit.
+ *
+ * \var Transform::Identity
+ *
+ * Identity transform.
+~~~
+ A-B A-B
+Input image | | goes to output image | |
+ C-D C-D
+~~~
+ * Numeric value: 0 (no bits set).
+ *
+ * \var Transform::Rot0
+ *
+ * Synonym for Transform::Identity (zero degree rotation).
+ *
+ * \var Transform::HFlip
+ *
+ * Horizontal flip.
+~~~
+ A-B B-A
+Input image | | goes to output image | |
+ C-D D-C
+~~~
+ * Numeric value: 1 (horizontal flip bit set only).
+ *
+ * \var Transform::VFlip
+ *
+ * Vertical flip.
+~~~
+ A-B C-D
+Input image | | goes to output image | |
+ C-D A-B
+~~~
+ * Numeric value: 2 (vertical flip bit set only).
+ *
+ * \var Transform::HVFlip
+ *
+ * Horizontal and vertical flip (identical to a 180 degree rotation).
+~~~
+ A-B D-C
+Input image | | goes to output image | |
+ C-D B-A
+~~~
+ * Numeric value: 3 (horizontal and vertical flip bits set).
+ *
+ * \var Transform::Rot180
+ *
+ * Synonym for `HVFlip` (180 degree rotation).
+ *
+ * \var Transform::Transpose
+ *
+ * Transpose (about the main diagonal).
+~~~
+ A-B A-C
+Input image | | goes to output image | |
+ C-D B-D
+~~~
+ * Numeric value: 4 (transpose bit set only).
+ *
+ * \var Transform::Rot270
+ *
+ * Rotation by 270 degrees clockwise (90 degrees anticlockwise).
+~~~
+ A-B B-D
+Input image | | goes to output image | |
+ C-D A-C
+~~~
+ * Numeric value: 5 (transpose and horizontal flip bits set).
+ *
+ * \var Transform::Rot90
+ *
+ * Rotation by 90 degrees clockwise (270 degrees anticlockwise).
+~~~
+ A-B C-A
+Input image | | goes to output image | |
+ C-D D-B
+~~~
+ * Numeric value: 6 (transpose and vertical flip bits set).
+ *
+ * \var Transform::Rot180Transpose
+ *
+ * Rotation by 180 degrees followed by transpose (alternatively, transposition
+ * about the "opposite diagonal").
+~~~
+ A-B D-B
+Input image | | goes to output image | |
+ C-D C-A
+~~~
+ * Numeric value: 7 (all bits set).
+ */
+
+/**
+ * \fn operator &(Transform t0, Transform t1)
+ * \brief Apply bitwise AND operator between the bits in the two transforms
+ * \param[in] t0 The first transform
+ * \param[in] t1 The second transform
+ */
+
+/**
+ * \fn operator |(Transform t0, Transform t1)
+ * \brief Apply bitwise OR operator between the bits in the two transforms
+ * \param[in] t0 The first transform
+ * \param[in] t1 The second transform
+ */
+
+/**
+ * \fn operator ^(Transform t0, Transform t1)
+ * \brief Apply bitwise XOR operator between the bits in the two transforms
+ * \param[in] t0 The first transform
+ * \param[in] t1 The second transform
+ */
+
+/**
+ * \fn operator &=(Transform &t0, Transform t1)
+ * \brief Apply bitwise AND-assignment operator between the bits in the two
+ * transforms
+ * \param[in] t0 The first transform
+ * \param[in] t1 The second transform
+ */
+
+/**
+ * \fn operator |=(Transform &t0, Transform t1)
+ * \brief Apply bitwise OR-assignment operator between the bits in the two
+ * transforms
+ * \param[in] t0 The first transform
+ * \param[in] t1 The second transform
+ */
+
+/**
+ * \fn operator ^=(Transform &t0, Transform t1)
+ * \brief Apply bitwise XOR-assignment operator between the bits in the two
+ * transforms
+ * \param[in] t0 The first transform
+ * \param[in] t1 The second transform
+ */
+
+/**
+ * \brief Compose two transforms by applying \a t0 first then \a t1
+ * \param[in] t0 The first transform to apply
+ * \param[in] t1 The second transform to apply
+ *
+ * Compose two transforms into a transform that is equivalent to first applying
+ * \a t0 and then applying \a t1. For example, `HFlip * Transpose` performs
+ * `HFlip` first and then the `Transpose` yielding `Rot270`, as shown below.
+~~~
+ A-B B-A B-D
+Input image | | -> HFLip -> | | -> Transpose -> | | = Rot270
+ C-D D-C A-C
+~~~
+ * Note that composition is generally non-commutative for Transforms, and not
+ * the same as XOR-ing the underlying bit representations.
+ *
+ * \return A Transform equivalent to applying \a t0 and then \a t1
+ */
+Transform operator*(Transform t0, Transform t1)
+{
+ /*
+ * Reorder the operations so that we imagine doing t0's transpose
+ * (if any) after t1's flips. The effect is to swap t1's hflips for
+ * vflips and vice versa, after which we can just xor all the bits.
+ */
+ Transform reordered = t1;
+ if (!!(t0 & Transform::Transpose)) {
+ reordered = t1 & Transform::Transpose;
+ if (!!(t1 & Transform::HFlip))
+ reordered |= Transform::VFlip;
+ if (!!(t1 & Transform::VFlip))
+ reordered |= Transform::HFlip;
+ }
+
+ return reordered ^ t0;
+}
+
+/**
+ * \brief Invert a transform
+ * \param[in] t The transform to be inverted
+ *
+ * That is, we return the transform such that `t * (-t)` and `(-t) * t` both
+ * yield the identity transform.
+ */
+Transform operator-(Transform t)
+{
+ /* All are self-inverses, except for Rot270 and Rot90. */
+ static const Transform inverses[] = {
+ Transform::Identity,
+ Transform::HFlip,
+ Transform::VFlip,
+ Transform::HVFlip,
+ Transform::Transpose,
+ Transform::Rot90,
+ Transform::Rot270,
+ Transform::Rot180Transpose
+ };
+
+ return inverses[static_cast<int>(t)];
+}
+
+/**
+ * \fn operator!(Transform t)
+ * \brief Return `true` if the transform is the `Identity`, otherwise `false`
+ * \param[in] t The transform to be tested
+ */
+
+/**
+ * \fn operator~(Transform t)
+ * \brief Return the transform with all the bits inverted individually
+ * \param[in] t The transform of which the bits will be inverted
+ *
+ * This inverts the bits that encode the transform in a bitwise manner. Note
+ * that this is not the proper inverse of transform \a t (for which use \a
+ * operator-).
+ */
+
+/**
+ * \brief Return the transform representing a rotation of the given angle
+ * clockwise
+ * \param[in] angle The angle of rotation in a clockwise sense. Negative values
+ * can be used to represent anticlockwise rotations
+ * \param[out] success Set to `true` if the angle is a multiple of 90 degrees,
+ * otherwise `false`
+ * \return The transform corresponding to the rotation if \a success was set to
+ * `true`, otherwise the `Identity` transform
+ */
+Transform transformFromRotation(int angle, bool *success)
+{
+ angle = angle % 360;
+ if (angle < 0)
+ angle += 360;
+
+ if (success != nullptr)
+ *success = true;
+
+ switch (angle) {
+ case 0:
+ return Transform::Identity;
+ case 90:
+ return Transform::Rot90;
+ case 180:
+ return Transform::Rot180;
+ case 270:
+ return Transform::Rot270;
+ }
+
+ if (success != nullptr)
+ *success = false;
+
+ return Transform::Identity;
+}
+
+namespace {
+
+/**
+ * \brief Return the transform representing \a orientation
+ * \param[in] orientation The orientation to convert
+ * \return The transform corresponding to \a orientation
+ */
+Transform transformFromOrientation(const Orientation &orientation)
+{
+ switch (orientation) {
+ case Orientation::Rotate0:
+ return Transform::Identity;
+ case Orientation::Rotate0Mirror:
+ return Transform::HFlip;
+ case Orientation::Rotate180:
+ return Transform::Rot180;
+ case Orientation::Rotate180Mirror:
+ return Transform::VFlip;
+ case Orientation::Rotate90Mirror:
+ return Transform::Transpose;
+ case Orientation::Rotate90:
+ return Transform::Rot90;
+ case Orientation::Rotate270Mirror:
+ return Transform::Rot180Transpose;
+ case Orientation::Rotate270:
+ return Transform::Rot270;
+ }
+
+ return Transform::Identity;
+}
+
+} /* namespace */
+
+/**
+ * \brief Return the Transform that applied to \a o2 gives \a o1
+ * \param o1 The Orientation to obtain
+ * \param o2 The base Orientation
+ *
+ * This operation can be used to easily compute the Transform to apply to a
+ * base orientation \a o2 to get the desired orientation \a o1.
+ *
+ * \return A Transform that applied to \a o2 gives \a o1
+ */
+Transform operator/(const Orientation &o1, const Orientation &o2)
+{
+ Transform t1 = transformFromOrientation(o1);
+ Transform t2 = transformFromOrientation(o2);
+
+ return -t2 * t1;
+}
+
+/**
+ * \brief Apply the Transform \a t on the orientation \a o
+ * \param o The orientation
+ * \param t The transform to apply on \a o
+ * \return The Orientation resulting from applying \a t on \a o
+ */
+Orientation operator*(const Orientation &o, const Transform &t)
+{
+ /*
+ * Apply a Transform corresponding to the orientation first and
+ * then apply \a t to it.
+ */
+ switch (transformFromOrientation(o) * t) {
+ case Transform::Identity:
+ return Orientation::Rotate0;
+ case Transform::HFlip:
+ return Orientation::Rotate0Mirror;
+ case Transform::VFlip:
+ return Orientation::Rotate180Mirror;
+ case Transform::Rot180:
+ return Orientation::Rotate180;
+ case Transform::Transpose:
+ return Orientation::Rotate90Mirror;
+ case Transform::Rot270:
+ return Orientation::Rotate270;
+ case Transform::Rot90:
+ return Orientation::Rotate90;
+ case Transform::Rot180Transpose:
+ return Orientation::Rotate270Mirror;
+ }
+
+ return Orientation::Rotate0;
+}
+
+/**
+ * \brief Return a character string describing the transform
+ * \param[in] t The transform to be described.
+ */
+const char *transformToString(Transform t)
+{
+ static const char *strings[] = {
+ "identity",
+ "hflip",
+ "vflip",
+ "hvflip",
+ "transpose",
+ "rot270",
+ "rot90",
+ "rot180transpose"
+ };
+
+ return strings[static_cast<int>(t)];
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/utils.cpp b/src/libcamera/utils.cpp
deleted file mode 100644
index 58ee7cc1..00000000
--- a/src/libcamera/utils.cpp
+++ /dev/null
@@ -1,374 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * utils.cpp - Miscellaneous utility functions
- */
-
-#include "utils.h"
-
-#include <dlfcn.h>
-#include <elf.h>
-#include <iomanip>
-#include <link.h>
-#include <sstream>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-
-/**
- * \file utils.h
- * \brief Miscellaneous utility functions
- */
-
-/* musl doesn't declare _DYNAMIC in link.h, declare it manually. */
-extern ElfW(Dyn) _DYNAMIC[];
-
-namespace libcamera {
-
-namespace utils {
-
-/**
- * \def ARRAY_SIZE(array)
- * \brief Determine the number of elements in the static array.
- */
-
-/**
- * \brief Strip the directory prefix from the path
- * \param[in] path The path to process
- *
- * basename is implemented differently across different C libraries. This
- * implementation matches the one provided by the GNU libc, and does not
- * modify its input parameter.
- *
- * \return A pointer within the given path without any leading directory
- * components.
- */
-const char *basename(const char *path)
-{
- const char *base = strrchr(path, '/');
- return base ? base + 1 : path;
-}
-
-/**
- * \brief Get an environment variable
- * \param[in] name The name of the variable to return
- *
- * The environment list is searched to find the variable 'name', and the
- * corresponding string is returned.
- *
- * If 'secure execution' is required then this function always returns NULL to
- * avoid vulnerabilities that could occur if set-user-ID or set-group-ID
- * programs accidentally trust the environment.
- *
- * \return A pointer to the value in the environment or NULL if the requested
- * environment variable doesn't exist or if secure execution is required.
- */
-char *secure_getenv(const char *name)
-{
-#if HAVE_SECURE_GETENV
- return ::secure_getenv(name);
-#else
- if (issetugid())
- return NULL;
-
- return getenv(name);
-#endif
-}
-
-/**
- * \brief Identify the dirname portion of a path
- * \param[in] path The full path to parse
- *
- * This function conforms with the behaviour of the %dirname() function as
- * defined by POSIX.
- *
- * \return A string of the directory component of the path
- */
-std::string dirname(const std::string &path)
-{
- if (path.empty())
- return ".";
-
- /*
- * Skip all trailing slashes. If the path is only made of slashes,
- * return "/".
- */
- size_t pos = path.size() - 1;
- while (path[pos] == '/') {
- if (!pos)
- return "/";
- pos--;
- }
-
- /*
- * Find the previous slash. If the path contains no non-trailing slash,
- * return ".".
- */
- while (path[pos] != '/') {
- if (!pos)
- return ".";
- pos--;
- }
-
- /*
- * Return the directory name up to (but not including) any trailing
- * slash. If this would result in an empty string, return "/".
- */
- while (path[pos] == '/') {
- if (!pos)
- return "/";
- pos--;
- }
-
- return path.substr(0, pos + 1);
-}
-
-/**
- * \fn libcamera::utils::set_overlap(InputIt1 first1, InputIt1 last1,
- * InputIt2 first2, InputIt2 last2)
- * \brief Count the number of elements in the intersection of two ranges
- *
- * Count the number of elements in the intersection of the sorted ranges [\a
- * first1, \a last1) and [\a first1, \a last2). Elements are compared using
- * operator< and the ranges must be sorted with respect to the same.
- *
- * \return The number of elements in the intersection of the two ranges
- */
-
-/**
- * \fn libcamera::utils::clamp(const T& v, const T& lo, const T& hi)
- * \param[in] v The value to clamp
- * \param[in] lo The lower boundary to clamp v to
- * \param[in] hi The higher boundary to clamp v to
- * \return lo if v is less than lo, hi if v is greater than hi, otherwise v
- */
-
-/**
- * \typedef clock
- * \brief The libcamera clock (monotonic)
- */
-
-/**
- * \typedef duration
- * \brief The libcamera duration related to libcamera::utils::clock
- */
-
-/**
- * \typedef time_point
- * \brief The libcamera time point related to libcamera::utils::clock
- */
-
-/**
- * \brief Convert a duration to a timespec
- * \param[in] value The duration
- * \return A timespec expressing the duration
- */
-struct timespec duration_to_timespec(const duration &value)
-{
- uint64_t nsecs = std::chrono::duration_cast<std::chrono::nanoseconds>(value).count();
- struct timespec ts;
- ts.tv_sec = nsecs / 1000000000ULL;
- ts.tv_nsec = nsecs % 1000000000ULL;
- return ts;
-}
-
-/**
- * \brief Convert a time point to a string representation
- * \param[in] time The time point
- * \return A string representing the time point in hh:mm:ss.nanoseconds format
- */
-std::string time_point_to_string(const time_point &time)
-{
- uint64_t nsecs = std::chrono::duration_cast<std::chrono::nanoseconds>(time.time_since_epoch()).count();
- unsigned int secs = nsecs / 1000000000ULL;
-
- std::ostringstream ossTimestamp;
- ossTimestamp.fill('0');
- ossTimestamp << secs / (60 * 60) << ":"
- << std::setw(2) << (secs / 60) % 60 << ":"
- << std::setw(2) << secs % 60 << "."
- << std::setw(9) << nsecs % 1000000000ULL;
- return ossTimestamp.str();
-}
-
-std::basic_ostream<char, std::char_traits<char>> &
-operator<<(std::basic_ostream<char, std::char_traits<char>> &stream, const _hex &h)
-{
- stream << "0x";
-
- std::ostream::fmtflags flags = stream.setf(std::ios_base::hex,
- std::ios_base::basefield);
- std::streamsize width = stream.width(h.w);
- char fill = stream.fill('0');
-
- stream << h.v;
-
- stream.flags(flags);
- stream.width(width);
- stream.fill(fill);
-
- return stream;
-}
-
-/**
- * \fn hex(T value, unsigned int width)
- * \brief Write an hexadecimal value to an output string
- * \param value The value
- * \param width The width
- *
- * Return an object of unspecified type such that, if \a os is the name of an
- * output stream of type std::ostream, and T is an integer type, then the
- * expression
- *
- * \code{.cpp}
- * os << utils::hex(value)
- * \endcode
- *
- * will output the \a value to the stream in hexadecimal form with the base
- * prefix and the filling character set to '0'. The field width is set to \a
- * width if specified to a non-zero value, or to the native width of type T
- * otherwise. The \a os stream configuration is not modified.
- */
-
-/**
- * \brief Copy a string with a size limit
- * \param[in] dst The destination string
- * \param[in] src The source string
- * \param[in] size The size of the destination string
- *
- * This function copies the null-terminated string \a src to \a dst with a limit
- * of \a size - 1 characters, and null-terminates the result if \a size is
- * larger than 0. If \a src is larger than \a size - 1, \a dst is truncated.
- *
- * \return The size of \a src
- */
-size_t strlcpy(char *dst, const char *src, size_t size)
-{
- if (size) {
- strncpy(dst, src, size);
- dst[size - 1] = '\0';
- }
-
- return strlen(src);
-}
-
-details::StringSplitter::StringSplitter(const std::string &str, const std::string &delim)
- : str_(str), delim_(delim)
-{
-}
-
-details::StringSplitter::iterator::iterator(const details::StringSplitter *ss, std::string::size_type pos)
- : ss_(ss), pos_(pos)
-{
- next_ = ss_->str_.find(ss_->delim_, pos_);
-}
-
-details::StringSplitter::iterator &details::StringSplitter::iterator::operator++()
-{
- pos_ = next_;
- if (pos_ != std::string::npos) {
- pos_ += ss_->delim_.length();
- next_ = ss_->str_.find(ss_->delim_, pos_);
- }
-
- return *this;
-}
-
-std::string details::StringSplitter::iterator::operator*() const
-{
- std::string::size_type count;
- count = next_ != std::string::npos ? next_ - pos_ : next_;
- return ss_->str_.substr(pos_, count);
-}
-
-bool details::StringSplitter::iterator::operator!=(const details::StringSplitter::iterator &other) const
-{
- return pos_ != other.pos_;
-}
-
-details::StringSplitter::iterator details::StringSplitter::begin() const
-{
- return iterator(this, 0);
-}
-
-details::StringSplitter::iterator details::StringSplitter::end() const
-{
- return iterator(this, std::string::npos);
-}
-
-/**
- * \fn split(const std::string &str, const std::string &delim)
- * \brief Split a string based on a delimiter
- * \param[in] str The string to split
- * \param[in] delim The delimiter string
- *
- * This function splits the string \a str into substrings based on the
- * delimiter \a delim. It returns an object of unspecified type that can be
- * used in a range-based for loop and yields the substrings in sequence.
- *
- * \return An object that can be used in a range-based for loop to iterate over
- * the substrings
- */
-details::StringSplitter split(const std::string &str, const std::string &delim)
-{
- /** \todo Try to avoid copies of str and delim */
- return details::StringSplitter(str, delim);
-}
-
-/**
- * \brief Check if libcamera is installed or not
- *
- * Utilise the build_rpath dynamic tag which is stripped out by meson at
- * install time to determine at runtime if the library currently executing
- * has been installed or not.
- *
- * \return True if libcamera is installed, false otherwise
- */
-bool isLibcameraInstalled()
-{
- /*
- * DT_RUNPATH (DT_RPATH when the linker uses old dtags) is removed on
- * install.
- */
- for (const ElfW(Dyn) *dyn = _DYNAMIC; dyn->d_tag != DT_NULL; ++dyn) {
- if (dyn->d_tag == DT_RUNPATH || dyn->d_tag == DT_RPATH)
- return false;
- }
-
- return true;
-}
-
-/**
- * \brief Retrieve the path to the build directory
- *
- * During development, it is useful to run libcamera binaries directly from the
- * build directory without installing them. This function helps components that
- * need to locate resources, such as IPA modules or IPA proxy workers, by
- * providing them with the path to the root of the build directory. Callers can
- * then use it to complement or override searches in system-wide directories.
- *
- * If libcamera has been installed, the build directory path is not available
- * and this function returns an empty string.
- *
- * \return The path to the build directory if running from a build, or an empty
- * string otherwise
- */
-std::string libcameraBuildPath()
-{
- if (isLibcameraInstalled())
- return std::string();
-
- Dl_info info;
-
- /* Look up our own symbol. */
- int ret = dladdr(reinterpret_cast<void *>(libcameraBuildPath), &info);
- if (ret == 0)
- return std::string();
-
- return dirname(info.dli_fname) + "/../../";
-}
-
-} /* namespace utils */
-
-} /* namespace libcamera */
diff --git a/src/libcamera/v4l2_controls.cpp b/src/libcamera/v4l2_controls.cpp
deleted file mode 100644
index 8e2415f2..00000000
--- a/src/libcamera/v4l2_controls.cpp
+++ /dev/null
@@ -1,151 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * v4l2_controls.cpp - V4L2 Controls Support
- */
-
-#include "v4l2_controls.h"
-
-#include <string.h>
-
-/**
- * \file v4l2_controls.h
- * \brief Support for V4L2 Controls using the V4L2 Extended Controls APIs
- *
- * The V4L2 Control API allows application to inspect and modify sets of
- * configurable parameters on a video device or subdevice. The nature of the
- * parameters an application can modify using the control framework depends on
- * what the driver implements support for, and on the characteristics of the
- * underlying hardware platform. Generally controls are used to modify user
- * visible settings, such as the image brightness and exposure time, or
- * non-standard parameters which cannot be controlled through the V4L2 format
- * negotiation API.
- *
- * Controls are identified by a numerical ID, defined by the V4L2 kernel headers
- * and have an associated type. Each control has a value, which is the data that
- * can be modified with V4L2Device::setControls() or retrieved with
- * V4L2Device::getControls().
- *
- * The control's type along with the control's flags define the type of the
- * control's value content. Controls can transport a single data value stored in
- * variable inside the control, or they might as well deal with more complex
- * data types, such as arrays of matrices, stored in a contiguous memory
- * locations associated with the control and called 'the payload'. Such controls
- * are called 'compound controls' and are currently not supported by the
- * libcamera V4L2 control framework.
- *
- * libcamera implements support for controls using the V4L2 Extended Control
- * API, which allows future handling of controls with payloads of arbitrary
- * sizes.
- *
- * The libcamera V4L2 Controls framework operates on lists of controls, wrapped
- * by the ControlList class, to match the V4L2 extended controls API. The
- * interface to set and get control is implemented by the V4L2Device class, and
- * this file only provides the data type definitions.
- *
- * \todo Add support for compound controls
- */
-
-namespace libcamera {
-
-namespace {
-
-std::string v4l2_ctrl_name(const struct v4l2_query_ext_ctrl &ctrl)
-{
- size_t len = strnlen(ctrl.name, sizeof(ctrl.name));
- return std::string(static_cast<const char *>(ctrl.name), len);
-}
-
-ControlType v4l2_ctrl_type(const struct v4l2_query_ext_ctrl &ctrl)
-{
- switch (ctrl.type) {
- case V4L2_CTRL_TYPE_U8:
- return ControlTypeByte;
-
- case V4L2_CTRL_TYPE_BOOLEAN:
- return ControlTypeBool;
-
- case V4L2_CTRL_TYPE_INTEGER:
- return ControlTypeInteger32;
-
- case V4L2_CTRL_TYPE_INTEGER64:
- return ControlTypeInteger64;
-
- case V4L2_CTRL_TYPE_MENU:
- case V4L2_CTRL_TYPE_BUTTON:
- case V4L2_CTRL_TYPE_BITMASK:
- case V4L2_CTRL_TYPE_INTEGER_MENU:
- /*
- * More precise types may be needed, for now use a 32-bit
- * integer type.
- */
- return ControlTypeInteger32;
-
- default:
- return ControlTypeNone;
- }
-}
-
-} /* namespace */
-
-/**
- * \class V4L2ControlId
- * \brief V4L2 control static metadata
- *
- * The V4L2ControlId class is a specialisation of the ControlId for V4L2
- * controls.
- */
-
-/**
- * \brief Construct a V4L2ControlId from a struct v4l2_query_ext_ctrl
- * \param[in] ctrl The struct v4l2_query_ext_ctrl as returned by the kernel
- */
-V4L2ControlId::V4L2ControlId(const struct v4l2_query_ext_ctrl &ctrl)
- : ControlId(ctrl.id, v4l2_ctrl_name(ctrl), v4l2_ctrl_type(ctrl))
-{
-}
-
-/**
- * \class V4L2ControlInfo
- * \brief Convenience specialisation of ControlInfo for V4L2 controls
- *
- * The V4L2ControlInfo class is a specialisation of the ControlInfo for V4L2
- * controls. It offers a convenience constructor from a struct
- * v4l2_query_ext_ctrl, and is otherwise equivalent to the ControlInfo class.
- */
-
-/**
- * \brief Construct a V4L2ControlInfo from a struct v4l2_query_ext_ctrl
- * \param[in] ctrl The struct v4l2_query_ext_ctrl as returned by the kernel
- */
-V4L2ControlInfo::V4L2ControlInfo(const struct v4l2_query_ext_ctrl &ctrl)
-{
- switch (ctrl.type) {
- case V4L2_CTRL_TYPE_U8:
- ControlInfo::operator=(ControlInfo(static_cast<uint8_t>(ctrl.minimum),
- static_cast<uint8_t>(ctrl.maximum),
- static_cast<uint8_t>(ctrl.default_value)));
- break;
-
- case V4L2_CTRL_TYPE_BOOLEAN:
- ControlInfo::operator=(ControlInfo(static_cast<bool>(ctrl.minimum),
- static_cast<bool>(ctrl.maximum),
- static_cast<bool>(ctrl.default_value)));
- break;
-
- case V4L2_CTRL_TYPE_INTEGER64:
- ControlInfo::operator=(ControlInfo(static_cast<int64_t>(ctrl.minimum),
- static_cast<int64_t>(ctrl.maximum),
- static_cast<int64_t>(ctrl.default_value)));
- break;
-
- default:
- ControlInfo::operator=(ControlInfo(static_cast<int32_t>(ctrl.minimum),
- static_cast<int32_t>(ctrl.maximum),
- static_cast<int32_t>(ctrl.default_value)));
- break;
- }
-}
-
-} /* namespace libcamera */
diff --git a/src/libcamera/v4l2_device.cpp b/src/libcamera/v4l2_device.cpp
index 03e30516..2f65a43a 100644
--- a/src/libcamera/v4l2_device.cpp
+++ b/src/libcamera/v4l2_device.cpp
@@ -2,21 +2,29 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_device.cpp - Common base for V4L2 video devices and subdevices
+ * Common base for V4L2 video devices and subdevices
*/
-#include "v4l2_device.h"
+#include "libcamera/internal/v4l2_device.h"
#include <fcntl.h>
-#include <iomanip>
+#include <map>
+#include <stdint.h>
+#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/syscall.h>
#include <unistd.h>
+#include <vector>
-#include "log.h"
-#include "utils.h"
-#include "v4l2_controls.h"
+#include <linux/v4l2-mediabus.h>
+
+#include <libcamera/base/event_notifier.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/sysfs.h"
/**
* \file v4l2_device.h
@@ -31,9 +39,9 @@ LOG_DEFINE_CATEGORY(V4L2)
* \class V4L2Device
* \brief Base class for V4L2VideoDevice and V4L2Subdevice
*
- * The V4L2Device class groups together the methods and fields common to
+ * The V4L2Device class groups together the functions and fields common to
* both the V4L2VideoDevice and V4L2Subdevice classes, and provides a base
- * class with methods to open and close the device node associated with the
+ * class with functions to open and close the device node associated with the
* device and to perform IOCTL system calls on it.
*
* The V4L2Device class cannot be instantiated directly, as its constructor
@@ -49,7 +57,8 @@ LOG_DEFINE_CATEGORY(V4L2)
* at open() time, and the \a logTag to prefix log messages with.
*/
V4L2Device::V4L2Device(const std::string &deviceNode)
- : deviceNode_(deviceNode), fd_(-1)
+ : deviceNode_(deviceNode), fdEventNotifier_(nullptr),
+ frameStartEnabled_(false)
{
}
@@ -76,17 +85,17 @@ int V4L2Device::open(unsigned int flags)
return -EBUSY;
}
- int ret = syscall(SYS_openat, AT_FDCWD, deviceNode_.c_str(), flags);
- if (ret < 0) {
- ret = -errno;
- LOG(V4L2, Error) << "Failed to open V4L2 device: "
+ UniqueFD fd(syscall(SYS_openat, AT_FDCWD, deviceNode_.c_str(),
+ flags | O_CLOEXEC));
+ if (!fd.isValid()) {
+ int ret = -errno;
+ LOG(V4L2, Error) << "Failed to open V4L2 device '"
+ << deviceNode_ << "': "
<< strerror(-ret);
return ret;
}
- fd_ = ret;
-
- listControls();
+ setFd(std::move(fd));
return 0;
}
@@ -95,24 +104,30 @@ int V4L2Device::open(unsigned int flags)
* \brief Set the file descriptor of a V4L2 device
* \param[in] fd The file descriptor handle
*
- * This method allows a device to provide an already opened file descriptor
+ * This function allows a device to provide an already opened file descriptor
* referring to the V4L2 device node, instead of opening it with open(). This
* can be used for V4L2 M2M devices where a single video device node is used for
* both the output and capture devices, or when receiving an open file
* descriptor in a context that doesn't have permission to open the device node
* itself.
*
- * This method and the open() method are mutually exclusive, only one of the two
- * shall be used for a V4L2Device instance.
+ * This function and the open() function are mutually exclusive, only one of the
+ * two shall be used for a V4L2Device instance.
*
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Device::setFd(int fd)
+int V4L2Device::setFd(UniqueFD fd)
{
if (isOpen())
return -EBUSY;
- fd_ = fd;
+ fd_ = std::move(fd);
+
+ fdEventNotifier_ = new EventNotifier(fd_.get(), EventNotifier::Exception);
+ fdEventNotifier_->activated.connect(this, &V4L2Device::eventAvailable);
+ fdEventNotifier_->setEnabled(false);
+
+ listControls();
return 0;
}
@@ -127,10 +142,9 @@ void V4L2Device::close()
if (!isOpen())
return;
- if (::close(fd_) < 0)
- LOG(V4L2, Error) << "Failed to close V4L2 device: "
- << strerror(errno);
- fd_ = -1;
+ delete fdEventNotifier_;
+
+ fd_.reset();
}
/**
@@ -147,114 +161,127 @@ void V4L2Device::close()
/**
* \brief Read controls from the device
- * \param[inout] ctrls The list of controls to read
- *
- * This method reads the value of all controls contained in \a ctrls, and stores
- * their values in the corresponding \a ctrls entry.
+ * \param[in] ids The list of controls to read, specified by their ID
*
- * If any control in \a ctrls is not supported by the device, is disabled (i.e.
- * has the V4L2_CTRL_FLAG_DISABLED flag set), is a compound control, or if any
- * other error occurs during validation of the requested controls, no control is
- * read and this method returns -EINVAL.
+ * This function reads the value of all controls contained in \a ids, and
+ * returns their values as a ControlList.
*
- * If an error occurs while reading the controls, the index of the first control
- * that couldn't be read is returned. The value of all controls below that index
- * are updated in \a ctrls, while the value of all the other controls are not
- * changed.
+ * If any control in \a ids is not supported by the device, is disabled (i.e.
+ * has the V4L2_CTRL_FLAG_DISABLED flag set), or if any other error occurs
+ * during validation of the requested controls, no control is read and this
+ * function returns an empty control list.
*
- * \return 0 on success or an error code otherwise
- * \retval -EINVAL One of the control is not supported or not accessible
- * \retval i The index of the control that failed
+ * \return The control values in a ControlList on success, or an empty list on
+ * error
*/
-int V4L2Device::getControls(ControlList *ctrls)
+ControlList V4L2Device::getControls(const std::vector<uint32_t> &ids)
{
- unsigned int count = ctrls->size();
- if (count == 0)
- return 0;
+ if (ids.empty())
+ return {};
- struct v4l2_ext_control v4l2Ctrls[count];
- memset(v4l2Ctrls, 0, sizeof(v4l2Ctrls));
+ ControlList ctrls{ controls_ };
- unsigned int i = 0;
- for (auto &ctrl : *ctrls) {
- unsigned int id = ctrl.first;
+ for (uint32_t id : ids) {
const auto iter = controls_.find(id);
if (iter == controls_.end()) {
LOG(V4L2, Error)
<< "Control " << utils::hex(id) << " not found";
- return -EINVAL;
+ return {};
}
+ ctrls.set(id, {});
+ }
+
+ std::vector<v4l2_ext_control> v4l2Ctrls(ids.size());
+ memset(v4l2Ctrls.data(), 0, sizeof(v4l2_ext_control) * ctrls.size());
+
+ unsigned int i = 0;
+ for (auto &ctrl : ctrls) {
+ unsigned int id = ctrl.first;
const struct v4l2_query_ext_ctrl &info = controlInfo_[id];
- ControlValue &value = ctrl.second;
+
+ v4l2_ext_control &v4l2Ctrl = v4l2Ctrls[i++];
+ v4l2Ctrl.id = id;
if (info.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD) {
ControlType type;
+ ControlValue &value = ctrl.second;
+ Span<uint8_t> data;
switch (info.type) {
case V4L2_CTRL_TYPE_U8:
type = ControlTypeByte;
+ value.reserve(type, true, info.elems);
+ data = value.data();
+ v4l2Ctrl.p_u8 = data.data();
+ break;
+
+ case V4L2_CTRL_TYPE_U16:
+ type = ControlTypeUnsigned16;
+ value.reserve(type, true, info.elems);
+ data = value.data();
+ v4l2Ctrl.p_u16 = reinterpret_cast<uint16_t *>(data.data());
+ break;
+
+ case V4L2_CTRL_TYPE_U32:
+ type = ControlTypeUnsigned32;
+ value.reserve(type, true, info.elems);
+ data = value.data();
+ v4l2Ctrl.p_u32 = reinterpret_cast<uint32_t *>(data.data());
break;
default:
LOG(V4L2, Error)
<< "Unsupported payload control type "
<< info.type;
- return -EINVAL;
+ return {};
}
- value.reserve(type, true, info.elems);
- Span<uint8_t> data = value.data();
-
- v4l2Ctrls[i].p_u8 = data.data();
- v4l2Ctrls[i].size = data.size();
+ v4l2Ctrl.size = data.size();
}
-
- v4l2Ctrls[i].id = id;
- i++;
}
struct v4l2_ext_controls v4l2ExtCtrls = {};
v4l2ExtCtrls.which = V4L2_CTRL_WHICH_CUR_VAL;
- v4l2ExtCtrls.controls = v4l2Ctrls;
- v4l2ExtCtrls.count = count;
+ v4l2ExtCtrls.controls = v4l2Ctrls.data();
+ v4l2ExtCtrls.count = v4l2Ctrls.size();
int ret = ioctl(VIDIOC_G_EXT_CTRLS, &v4l2ExtCtrls);
if (ret) {
unsigned int errorIdx = v4l2ExtCtrls.error_idx;
/* Generic validation error. */
- if (errorIdx == 0 || errorIdx >= count) {
+ if (errorIdx == 0 || errorIdx >= v4l2Ctrls.size()) {
LOG(V4L2, Error) << "Unable to read controls: "
<< strerror(-ret);
- return -EINVAL;
+ return {};
}
/* A specific control failed. */
- LOG(V4L2, Error) << "Unable to read control " << errorIdx
+ const unsigned int id = v4l2Ctrls[errorIdx].id;
+ LOG(V4L2, Error) << "Unable to read control " << utils::hex(id)
<< ": " << strerror(-ret);
- count = errorIdx - 1;
- ret = errorIdx;
+
+ v4l2Ctrls.resize(errorIdx);
}
- updateControls(ctrls, v4l2Ctrls, count);
+ updateControls(&ctrls, v4l2Ctrls);
- return ret;
+ return ctrls;
}
/**
* \brief Write controls to the device
* \param[in] ctrls The list of controls to write
*
- * This method writes the value of all controls contained in \a ctrls, and
+ * This function writes the value of all controls contained in \a ctrls, and
* stores the values actually applied to the device in the corresponding
* \a ctrls entry.
*
* If any control in \a ctrls is not supported by the device, is disabled (i.e.
- * has the V4L2_CTRL_FLAG_DISABLED flag set), is read-only, is a
- * compound control, or if any other error occurs during validation of
- * the requested controls, no control is written and this method returns
- * -EINVAL.
+ * has the V4L2_CTRL_FLAG_DISABLED flag set), is read-only, if any other error
+ * occurs during validation of the requested controls, no control is written and
+ * this function returns -EINVAL.
*
* If an error occurs while writing the controls, the index of the first
* control that couldn't be written is returned. All controls below that index
@@ -267,30 +294,64 @@ int V4L2Device::getControls(ControlList *ctrls)
*/
int V4L2Device::setControls(ControlList *ctrls)
{
- unsigned int count = ctrls->size();
- if (count == 0)
+ if (ctrls->empty())
return 0;
- struct v4l2_ext_control v4l2Ctrls[count];
- memset(v4l2Ctrls, 0, sizeof(v4l2Ctrls));
+ std::vector<v4l2_ext_control> v4l2Ctrls(ctrls->size());
+ memset(v4l2Ctrls.data(), 0, sizeof(v4l2_ext_control) * ctrls->size());
- unsigned int i = 0;
- for (auto &ctrl : *ctrls) {
- unsigned int id = ctrl.first;
+ for (auto [ctrl, i] = std::pair(ctrls->begin(), 0u); i < ctrls->size(); ctrl++, i++) {
+ const unsigned int id = ctrl->first;
const auto iter = controls_.find(id);
if (iter == controls_.end()) {
LOG(V4L2, Error)
<< "Control " << utils::hex(id) << " not found";
return -EINVAL;
}
-
- v4l2Ctrls[i].id = id;
+ v4l2_ext_control &v4l2Ctrl = v4l2Ctrls[i];
+ v4l2Ctrl.id = id;
/* Set the v4l2_ext_control value for the write operation. */
- ControlValue &value = ctrl.second;
+ ControlValue &value = ctrl->second;
switch (iter->first->type()) {
+ case ControlTypeUnsigned16: {
+ if (value.isArray()) {
+ Span<uint8_t> data = value.data();
+ v4l2Ctrl.p_u16 = reinterpret_cast<uint16_t *>(data.data());
+ v4l2Ctrl.size = data.size();
+ } else {
+ v4l2Ctrl.value = value.get<uint16_t>();
+ }
+
+ break;
+ }
+
+ case ControlTypeUnsigned32: {
+ if (value.isArray()) {
+ Span<uint8_t> data = value.data();
+ v4l2Ctrl.p_u32 = reinterpret_cast<uint32_t *>(data.data());
+ v4l2Ctrl.size = data.size();
+ } else {
+ v4l2Ctrl.value = value.get<uint32_t>();
+ }
+
+ break;
+ }
+
+ case ControlTypeInteger32: {
+ if (value.isArray()) {
+ Span<uint8_t> data = value.data();
+ v4l2Ctrl.p_u32 = reinterpret_cast<uint32_t *>(data.data());
+ v4l2Ctrl.size = data.size();
+ } else {
+ v4l2Ctrl.value = value.get<int32_t>();
+ }
+
+ break;
+ }
+
case ControlTypeInteger64:
- v4l2Ctrls[i].value64 = value.get<int64_t>();
+ v4l2Ctrl.value64 = value.get<int64_t>();
break;
case ControlTypeByte: {
@@ -302,50 +363,127 @@ int V4L2Device::setControls(ControlList *ctrls)
}
Span<uint8_t> data = value.data();
- v4l2Ctrls[i].p_u8 = data.data();
- v4l2Ctrls[i].size = data.size();
+ v4l2Ctrl.p_u8 = data.data();
+ v4l2Ctrl.size = data.size();
break;
}
default:
/* \todo To be changed to support strings. */
- v4l2Ctrls[i].value = value.get<int32_t>();
+ v4l2Ctrl.value = value.get<int32_t>();
break;
}
-
- i++;
}
struct v4l2_ext_controls v4l2ExtCtrls = {};
v4l2ExtCtrls.which = V4L2_CTRL_WHICH_CUR_VAL;
- v4l2ExtCtrls.controls = v4l2Ctrls;
- v4l2ExtCtrls.count = count;
+ v4l2ExtCtrls.controls = v4l2Ctrls.data();
+ v4l2ExtCtrls.count = v4l2Ctrls.size();
int ret = ioctl(VIDIOC_S_EXT_CTRLS, &v4l2ExtCtrls);
if (ret) {
unsigned int errorIdx = v4l2ExtCtrls.error_idx;
/* Generic validation error. */
- if (errorIdx == 0 || errorIdx >= count) {
+ if (errorIdx == 0 || errorIdx >= v4l2Ctrls.size()) {
LOG(V4L2, Error) << "Unable to set controls: "
<< strerror(-ret);
return -EINVAL;
}
/* A specific control failed. */
- LOG(V4L2, Error) << "Unable to set control " << errorIdx
+ const unsigned int id = v4l2Ctrls[errorIdx].id;
+ LOG(V4L2, Error) << "Unable to set control " << utils::hex(id)
<< ": " << strerror(-ret);
- count = errorIdx - 1;
+
+ v4l2Ctrls.resize(errorIdx);
ret = errorIdx;
}
- updateControls(ctrls, v4l2Ctrls, count);
+ updateControls(ctrls, v4l2Ctrls);
return ret;
}
/**
+ * \brief Retrieve the v4l2_query_ext_ctrl information for the given control
+ * \param[in] id The V4L2 control id
+ * \return A pointer to the v4l2_query_ext_ctrl structure for the given
+ * control, or a null pointer if not found
+ */
+const struct v4l2_query_ext_ctrl *V4L2Device::controlInfo(uint32_t id) const
+{
+ const auto it = controlInfo_.find(id);
+ if (it == controlInfo_.end())
+ return nullptr;
+
+ return &it->second;
+}
+
+/**
+ * \brief Retrieve the device path in sysfs
+ *
+ * This function returns the sysfs path to the physical device backing the V4L2
+ * device. The path is guaranteed to be an absolute path, without any symbolic
+ * link.
+ *
+ * It includes the sysfs mount point prefix
+ *
+ * \return The device path in sysfs
+ */
+std::string V4L2Device::devicePath() const
+{
+ std::string devicePath = sysfs::charDevPath(deviceNode_) + "/device";
+
+ char *realPath = realpath(devicePath.c_str(), nullptr);
+ if (!realPath) {
+ LOG(V4L2, Fatal)
+ << "Can not resolve device path for " << devicePath;
+ return {};
+ }
+
+ std::string path{ realPath };
+ free(realPath);
+
+ return path;
+}
+
+/**
+ * \brief Enable or disable frame start event notification
+ * \param[in] enable True to enable frame start events, false to disable them
+ *
+ * This function enables or disables generation of frame start events. Once
+ * enabled, the events are signalled through the frameStart signal.
+ *
+ * \return 0 on success, a negative error code otherwise
+ */
+int V4L2Device::setFrameStartEnabled(bool enable)
+{
+ if (frameStartEnabled_ == enable)
+ return 0;
+
+ struct v4l2_event_subscription event{};
+ event.type = V4L2_EVENT_FRAME_SYNC;
+
+ unsigned long request = enable ? VIDIOC_SUBSCRIBE_EVENT
+ : VIDIOC_UNSUBSCRIBE_EVENT;
+ int ret = ioctl(request, &event);
+ if (enable && ret)
+ return ret;
+
+ fdEventNotifier_->setEnabled(enable);
+ frameStartEnabled_ = enable;
+
+ return ret;
+}
+
+/**
+ * \var V4L2Device::frameStart
+ * \brief A Signal emitted when capture of a frame has started
+ */
+
+/**
* \brief Perform an IOCTL system call on the device node
* \param[in] request The IOCTL request code
* \param[in] argp A pointer to the IOCTL argument
@@ -357,7 +495,7 @@ int V4L2Device::ioctl(unsigned long request, void *argp)
* Printing out an error message is usually better performed
* in the caller, which can provide more context.
*/
- if (::ioctl(fd_, request, argp) < 0)
+ if (::ioctl(fd_.get(), request, argp) < 0)
return -errno;
return 0;
@@ -375,6 +513,150 @@ int V4L2Device::ioctl(unsigned long request, void *argp)
* \return The V4L2 device file descriptor, -1 if the device node is not open
*/
+/**
+ * \brief Retrieve the libcamera control type associated with the V4L2 control
+ * \param[in] ctrlType The V4L2 control type
+ * \return The ControlType associated to \a ctrlType
+ */
+ControlType V4L2Device::v4l2CtrlType(uint32_t ctrlType)
+{
+ switch (ctrlType) {
+ case V4L2_CTRL_TYPE_U8:
+ return ControlTypeByte;
+
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ return ControlTypeBool;
+
+ case V4L2_CTRL_TYPE_U16:
+ return ControlTypeUnsigned16;
+
+ case V4L2_CTRL_TYPE_U32:
+ return ControlTypeUnsigned32;
+
+ case V4L2_CTRL_TYPE_INTEGER:
+ return ControlTypeInteger32;
+
+ case V4L2_CTRL_TYPE_INTEGER64:
+ return ControlTypeInteger64;
+
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_BUTTON:
+ case V4L2_CTRL_TYPE_BITMASK:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ /*
+ * More precise types may be needed, for now use a 32-bit
+ * integer type.
+ */
+ return ControlTypeInteger32;
+
+ default:
+ return ControlTypeNone;
+ }
+}
+
+/**
+ * \brief Create a ControlId for a V4L2 control
+ * \param[in] ctrl The v4l2_query_ext_ctrl that represents a V4L2 control
+ * \return A ControlId associated to \a ctrl
+ */
+std::unique_ptr<ControlId> V4L2Device::v4l2ControlId(const v4l2_query_ext_ctrl &ctrl)
+{
+ const size_t len = strnlen(ctrl.name, sizeof(ctrl.name));
+ const std::string name(static_cast<const char *>(ctrl.name), len);
+ const ControlType type = v4l2CtrlType(ctrl.type);
+
+ ControlId::DirectionFlags flags;
+ if (ctrl.flags & V4L2_CTRL_FLAG_READ_ONLY)
+ flags = ControlId::Direction::Out;
+ else if (ctrl.flags & V4L2_CTRL_FLAG_WRITE_ONLY)
+ flags = ControlId::Direction::In;
+ else
+ flags = ControlId::Direction::In | ControlId::Direction::Out;
+
+ return std::make_unique<ControlId>(ctrl.id, name, "v4l2", type, flags);
+}
+
+/**
+ * \brief Create a ControlInfo for a V4L2 control
+ * \param[in] ctrl The v4l2_query_ext_ctrl that represents a V4L2 control
+ * \return A ControlInfo that represents \a ctrl
+ */
+std::optional<ControlInfo> V4L2Device::v4l2ControlInfo(const v4l2_query_ext_ctrl &ctrl)
+{
+ switch (ctrl.type) {
+ case V4L2_CTRL_TYPE_U8:
+ return ControlInfo(static_cast<uint8_t>(ctrl.minimum),
+ static_cast<uint8_t>(ctrl.maximum),
+ static_cast<uint8_t>(ctrl.default_value));
+
+ case V4L2_CTRL_TYPE_U16:
+ return ControlInfo(static_cast<uint16_t>(ctrl.minimum),
+ static_cast<uint16_t>(ctrl.maximum),
+ static_cast<uint16_t>(ctrl.default_value));
+
+ case V4L2_CTRL_TYPE_U32:
+ return ControlInfo(static_cast<uint32_t>(ctrl.minimum),
+ static_cast<uint32_t>(ctrl.maximum),
+ static_cast<uint32_t>(ctrl.default_value));
+
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ return ControlInfo(static_cast<bool>(ctrl.minimum),
+ static_cast<bool>(ctrl.maximum),
+ static_cast<bool>(ctrl.default_value));
+
+ case V4L2_CTRL_TYPE_INTEGER64:
+ return ControlInfo(static_cast<int64_t>(ctrl.minimum),
+ static_cast<int64_t>(ctrl.maximum),
+ static_cast<int64_t>(ctrl.default_value));
+
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ case V4L2_CTRL_TYPE_MENU:
+ return v4l2MenuControlInfo(ctrl);
+
+ default:
+ return ControlInfo(static_cast<int32_t>(ctrl.minimum),
+ static_cast<int32_t>(ctrl.maximum),
+ static_cast<int32_t>(ctrl.default_value));
+ }
+}
+
+/**
+ * \brief Create ControlInfo for a V4L2 menu control
+ * \param[in] ctrl The v4l2_query_ext_ctrl that represents a V4L2 menu control
+ *
+ * The created ControlInfo contains indices acquired by VIDIOC_QUERYMENU.
+ *
+ * \return A ControlInfo that represents \a ctrl
+ */
+std::optional<ControlInfo> V4L2Device::v4l2MenuControlInfo(const struct v4l2_query_ext_ctrl &ctrl)
+{
+ std::vector<ControlValue> indices;
+ struct v4l2_querymenu menu = {};
+ menu.id = ctrl.id;
+
+ if (ctrl.minimum < 0)
+ return std::nullopt;
+
+ for (int32_t index = ctrl.minimum; index <= ctrl.maximum; ++index) {
+ menu.index = index;
+ if (ioctl(VIDIOC_QUERYMENU, &menu) != 0)
+ continue;
+
+ indices.push_back(index);
+ }
+
+ /*
+ * Some faulty UVC devices are known to return an empty menu control.
+ * Controls without a menu option can not be set, or read, so they are
+ * not exposed.
+ */
+ if (indices.size() == 0)
+ return std::nullopt;
+
+ return ControlInfo(indices,
+ ControlValue(static_cast<int32_t>(ctrl.default_value)));
+}
+
/*
* \brief List and store information about all controls supported by the
* V4L2 device
@@ -384,7 +666,6 @@ void V4L2Device::listControls()
ControlInfoMap::Map ctrls;
struct v4l2_query_ext_ctrl ctrl = {};
- /* \todo Add support for menu and compound controls. */
while (1) {
ctrl.id |= V4L2_CTRL_FLAG_NEXT_CTRL |
V4L2_CTRL_FLAG_NEXT_COMPOUND;
@@ -404,6 +685,8 @@ void V4L2Device::listControls()
case V4L2_CTRL_TYPE_BITMASK:
case V4L2_CTRL_TYPE_INTEGER_MENU:
case V4L2_CTRL_TYPE_U8:
+ case V4L2_CTRL_TYPE_U16:
+ case V4L2_CTRL_TYPE_U32:
break;
/* \todo Support other control types. */
default:
@@ -413,13 +696,64 @@ void V4L2Device::listControls()
continue;
}
- controlIds_.emplace_back(std::make_unique<V4L2ControlId>(ctrl));
+ LOG(V4L2, Debug) << "Control: " << ctrl.name
+ << " (" << utils::hex(ctrl.id) << ")";
+
+ controlIds_.emplace_back(v4l2ControlId(ctrl));
+ controlIdMap_[ctrl.id] = controlIds_.back().get();
controlInfo_.emplace(ctrl.id, ctrl);
- ctrls.emplace(controlIds_.back().get(), V4L2ControlInfo(ctrl));
+ std::optional<ControlInfo> info = v4l2ControlInfo(ctrl);
+
+ if (!info) {
+ LOG(V4L2, Error)
+ << "Control " << ctrl.name
+ << " cannot be registered";
+
+ continue;
+ }
+
+ ctrls.emplace(controlIds_.back().get(), *info);
}
- controls_ = std::move(ctrls);
+ controls_ = ControlInfoMap(std::move(ctrls), controlIdMap_);
+}
+
+/**
+* \brief Update the information for all device controls
+ *
+ * The V4L2Device class caches information about all controls supported by the
+ * device and exposes it through the controls() and controlInfo() functions.
+ * Control information may change at runtime, for instance when formats on a
+ * subdev are modified. When this occurs, this function can be used to refresh
+ * control information. The information is refreshed in-place, all pointers to
+ * v4l2_query_ext_ctrl instances previously returned by controlInfo() and
+ * iterators to the ControlInfoMap returned by controls() remain valid.
+ *
+ * Note that control information isn't refreshed automatically is it may be an
+ * expensive operation. The V4L2Device users are responsible for calling this
+ * function when required, based on their usage pattern of the class.
+ */
+void V4L2Device::updateControlInfo()
+{
+ for (auto &[controlId, info] : controls_) {
+ unsigned int id = controlId->id();
+
+ /*
+ * Assume controlInfo_ has a corresponding entry, as it has been
+ * generated by listControls().
+ */
+ struct v4l2_query_ext_ctrl &ctrl = controlInfo_[id];
+
+ if (ioctl(VIDIOC_QUERY_EXT_CTRL, &ctrl)) {
+ LOG(V4L2, Debug)
+ << "Could not refresh control "
+ << utils::hex(id);
+ continue;
+ }
+
+ info = *v4l2ControlInfo(ctrl);
+ }
}
/*
@@ -427,45 +761,303 @@ void V4L2Device::listControls()
* values in \a v4l2Ctrls
* \param[inout] ctrls List of V4L2 controls to update
* \param[in] v4l2Ctrls List of V4L2 extended controls as returned by the driver
- * \param[in] count The number of controls to update
*/
void V4L2Device::updateControls(ControlList *ctrls,
- const struct v4l2_ext_control *v4l2Ctrls,
- unsigned int count)
+ Span<const v4l2_ext_control> v4l2Ctrls)
{
- unsigned int i = 0;
- for (auto &ctrl : *ctrls) {
- if (i == count)
- break;
+ for (const v4l2_ext_control &v4l2Ctrl : v4l2Ctrls) {
+ const unsigned int id = v4l2Ctrl.id;
- const struct v4l2_ext_control *v4l2Ctrl = &v4l2Ctrls[i];
- unsigned int id = ctrl.first;
- ControlValue &value = ctrl.second;
+ ControlValue value = ctrls->get(id);
+ if (value.isArray()) {
+ /*
+ * No action required, the VIDIOC_[GS]_EXT_CTRLS ioctl
+ * accessed the ControlValue storage directly for array
+ * controls.
+ */
+ continue;
+ }
const auto iter = controls_.find(id);
+ ASSERT(iter != controls_.end());
+
switch (iter->first->type()) {
case ControlTypeInteger64:
- value.set<int64_t>(v4l2Ctrl->value64);
- break;
-
- case ControlTypeByte:
- /*
- * No action required, the VIDIOC_[GS]_EXT_CTRLS ioctl
- * accessed the ControlValue storage directly.
- */
+ value.set<int64_t>(v4l2Ctrl.value64);
break;
default:
/*
- * \todo To be changed when support for string and
- * compound controls will be added.
+ * Note: this catches the ControlTypeInteger32 case.
+ *
+ * \todo To be changed when support for string controls
+ * will be added.
*/
- value.set<int32_t>(v4l2Ctrl->value);
+ value.set<int32_t>(v4l2Ctrl.value);
break;
}
- i++;
+ ctrls->set(id, value);
}
}
+/**
+ * \brief Slot to handle V4L2 events from the V4L2 device
+ *
+ * When this slot is called, a V4L2 event is available to be dequeued from the
+ * device.
+ */
+void V4L2Device::eventAvailable()
+{
+ struct v4l2_event event{};
+ int ret = ioctl(VIDIOC_DQEVENT, &event);
+ if (ret < 0) {
+ LOG(V4L2, Error)
+ << "Failed to dequeue event, disabling event notifier";
+ fdEventNotifier_->setEnabled(false);
+ return;
+ }
+
+ if (event.type != V4L2_EVENT_FRAME_SYNC) {
+ LOG(V4L2, Error)
+ << "Spurious event (" << event.type
+ << "), disabling event notifier";
+ fdEventNotifier_->setEnabled(false);
+ return;
+ }
+
+ frameStart.emit(event.u.frame_sync.frame_sequence);
+}
+
+static const std::map<uint32_t, ColorSpace> v4l2ToColorSpace = {
+ { V4L2_COLORSPACE_RAW, ColorSpace::Raw },
+ { V4L2_COLORSPACE_SRGB, {
+ ColorSpace::Primaries::Rec709,
+ ColorSpace::TransferFunction::Srgb,
+ ColorSpace::YcbcrEncoding::Rec601,
+ ColorSpace::Range::Limited } },
+ { V4L2_COLORSPACE_JPEG, ColorSpace::Sycc },
+ { V4L2_COLORSPACE_SMPTE170M, ColorSpace::Smpte170m },
+ { V4L2_COLORSPACE_REC709, ColorSpace::Rec709 },
+ { V4L2_COLORSPACE_BT2020, ColorSpace::Rec2020 },
+};
+
+static const std::map<uint32_t, ColorSpace::TransferFunction> v4l2ToTransferFunction = {
+ { V4L2_XFER_FUNC_NONE, ColorSpace::TransferFunction::Linear },
+ { V4L2_XFER_FUNC_SRGB, ColorSpace::TransferFunction::Srgb },
+ { V4L2_XFER_FUNC_709, ColorSpace::TransferFunction::Rec709 },
+};
+
+static const std::map<uint32_t, ColorSpace::YcbcrEncoding> v4l2ToYcbcrEncoding = {
+ { V4L2_YCBCR_ENC_601, ColorSpace::YcbcrEncoding::Rec601 },
+ { V4L2_YCBCR_ENC_709, ColorSpace::YcbcrEncoding::Rec709 },
+ { V4L2_YCBCR_ENC_BT2020, ColorSpace::YcbcrEncoding::Rec2020 },
+};
+
+static const std::map<uint32_t, ColorSpace::Range> v4l2ToRange = {
+ { V4L2_QUANTIZATION_FULL_RANGE, ColorSpace::Range::Full },
+ { V4L2_QUANTIZATION_LIM_RANGE, ColorSpace::Range::Limited },
+};
+
+static const std::vector<std::pair<ColorSpace, v4l2_colorspace>> colorSpaceToV4l2 = {
+ { ColorSpace::Raw, V4L2_COLORSPACE_RAW },
+ { ColorSpace::Sycc, V4L2_COLORSPACE_JPEG },
+ { ColorSpace::Smpte170m, V4L2_COLORSPACE_SMPTE170M },
+ { ColorSpace::Rec709, V4L2_COLORSPACE_REC709 },
+ { ColorSpace::Rec2020, V4L2_COLORSPACE_BT2020 },
+};
+
+static const std::map<ColorSpace::Primaries, v4l2_colorspace> primariesToV4l2 = {
+ { ColorSpace::Primaries::Raw, V4L2_COLORSPACE_RAW },
+ { ColorSpace::Primaries::Smpte170m, V4L2_COLORSPACE_SMPTE170M },
+ { ColorSpace::Primaries::Rec709, V4L2_COLORSPACE_REC709 },
+ { ColorSpace::Primaries::Rec2020, V4L2_COLORSPACE_BT2020 },
+};
+
+static const std::map<ColorSpace::TransferFunction, v4l2_xfer_func> transferFunctionToV4l2 = {
+ { ColorSpace::TransferFunction::Linear, V4L2_XFER_FUNC_NONE },
+ { ColorSpace::TransferFunction::Srgb, V4L2_XFER_FUNC_SRGB },
+ { ColorSpace::TransferFunction::Rec709, V4L2_XFER_FUNC_709 },
+};
+
+static const std::map<ColorSpace::YcbcrEncoding, v4l2_ycbcr_encoding> ycbcrEncodingToV4l2 = {
+ /* V4L2 has no "none" encoding. */
+ { ColorSpace::YcbcrEncoding::None, V4L2_YCBCR_ENC_DEFAULT },
+ { ColorSpace::YcbcrEncoding::Rec601, V4L2_YCBCR_ENC_601 },
+ { ColorSpace::YcbcrEncoding::Rec709, V4L2_YCBCR_ENC_709 },
+ { ColorSpace::YcbcrEncoding::Rec2020, V4L2_YCBCR_ENC_BT2020 },
+};
+
+static const std::map<ColorSpace::Range, v4l2_quantization> rangeToV4l2 = {
+ { ColorSpace::Range::Full, V4L2_QUANTIZATION_FULL_RANGE },
+ { ColorSpace::Range::Limited, V4L2_QUANTIZATION_LIM_RANGE },
+};
+
+/**
+ * \brief Convert the color space fields in a V4L2 format to a ColorSpace
+ * \param[in] v4l2Format A V4L2 format containing color space information
+ * \param[in] colourEncoding Type of colour encoding
+ *
+ * The colorspace, ycbcr_enc, xfer_func and quantization fields within a
+ * V4L2 format structure are converted to a corresponding ColorSpace.
+ *
+ * If any V4L2 fields are not recognised then we return an "unset"
+ * color space.
+ *
+ * \return The ColorSpace corresponding to the input V4L2 format
+ * \retval std::nullopt One or more V4L2 color space fields were not recognised
+ */
+template<typename T>
+std::optional<ColorSpace> V4L2Device::toColorSpace(const T &v4l2Format,
+ PixelFormatInfo::ColourEncoding colourEncoding)
+{
+ auto itColor = v4l2ToColorSpace.find(v4l2Format.colorspace);
+ if (itColor == v4l2ToColorSpace.end())
+ return std::nullopt;
+
+ /* This sets all the color space fields to the correct "default" values. */
+ ColorSpace colorSpace = itColor->second;
+
+ if (v4l2Format.xfer_func != V4L2_XFER_FUNC_DEFAULT) {
+ auto itTransfer = v4l2ToTransferFunction.find(v4l2Format.xfer_func);
+ if (itTransfer == v4l2ToTransferFunction.end())
+ return std::nullopt;
+
+ colorSpace.transferFunction = itTransfer->second;
+ }
+
+ if (v4l2Format.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT) {
+ auto itYcbcrEncoding = v4l2ToYcbcrEncoding.find(v4l2Format.ycbcr_enc);
+ if (itYcbcrEncoding == v4l2ToYcbcrEncoding.end())
+ return std::nullopt;
+
+ colorSpace.ycbcrEncoding = itYcbcrEncoding->second;
+
+ /*
+ * V4L2 has no "none" encoding, override the value returned by
+ * the kernel for non-YUV formats as YCbCr encoding isn't
+ * applicable in that case.
+ */
+ if (colourEncoding != PixelFormatInfo::ColourEncodingYUV)
+ colorSpace.ycbcrEncoding = ColorSpace::YcbcrEncoding::None;
+ }
+
+ if (v4l2Format.quantization != V4L2_QUANTIZATION_DEFAULT) {
+ auto itRange = v4l2ToRange.find(v4l2Format.quantization);
+ if (itRange == v4l2ToRange.end())
+ return std::nullopt;
+
+ colorSpace.range = itRange->second;
+
+ /*
+ * "Limited" quantization range is only meant for YUV formats.
+ * Override the range to "Full" for all other formats.
+ */
+ if (colourEncoding != PixelFormatInfo::ColourEncodingYUV)
+ colorSpace.range = ColorSpace::Range::Full;
+ }
+
+ return colorSpace;
+}
+
+template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_pix_format &,
+ PixelFormatInfo::ColourEncoding);
+template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_pix_format_mplane &,
+ PixelFormatInfo::ColourEncoding);
+template std::optional<ColorSpace> V4L2Device::toColorSpace(const struct v4l2_mbus_framefmt &,
+ PixelFormatInfo::ColourEncoding);
+
+/**
+ * \brief Fill in the color space fields of a V4L2 format from a ColorSpace
+ * \param[in] colorSpace The ColorSpace to be converted
+ * \param[out] v4l2Format A V4L2 format containing color space information
+ *
+ * The colorspace, ycbcr_enc, xfer_func and quantization fields within a
+ * V4L2 format structure are filled in from a corresponding ColorSpace.
+ *
+ * An error is returned if any of the V4L2 fields do not support the
+ * value given in the ColorSpace. Such fields are set to the V4L2
+ * "default" values, but all other fields are still filled in where
+ * possible.
+ *
+ * If the color space is completely unset, "default" V4L2 values are used
+ * everywhere, so a driver would then choose its preferred color space.
+ *
+ * \return 0 on success or a negative error code otherwise
+ * \retval -EINVAL The ColorSpace does not have a representation using V4L2 enums
+ */
+template<typename T>
+int V4L2Device::fromColorSpace(const std::optional<ColorSpace> &colorSpace, T &v4l2Format)
+{
+ v4l2Format.colorspace = V4L2_COLORSPACE_DEFAULT;
+ v4l2Format.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ v4l2Format.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ v4l2Format.quantization = V4L2_QUANTIZATION_DEFAULT;
+
+ if (!colorSpace)
+ return 0;
+
+ auto itColor = std::find_if(colorSpaceToV4l2.begin(), colorSpaceToV4l2.end(),
+ [&colorSpace](const auto &item) {
+ return colorSpace == item.first;
+ });
+ if (itColor != colorSpaceToV4l2.end()) {
+ v4l2Format.colorspace = itColor->second;
+ /* Leaving all the other fields as "default" should be fine. */
+ return 0;
+ }
+
+ /*
+ * If the colorSpace doesn't precisely match a standard color space,
+ * then we must choose a V4L2 colorspace with matching primaries.
+ */
+ int ret = 0;
+
+ auto itPrimaries = primariesToV4l2.find(colorSpace->primaries);
+ if (itPrimaries != primariesToV4l2.end()) {
+ v4l2Format.colorspace = itPrimaries->second;
+ } else {
+ libcamera::LOG(V4L2, Warning)
+ << "Unrecognised primaries in "
+ << ColorSpace::toString(colorSpace);
+ ret = -EINVAL;
+ }
+
+ auto itTransfer = transferFunctionToV4l2.find(colorSpace->transferFunction);
+ if (itTransfer != transferFunctionToV4l2.end()) {
+ v4l2Format.xfer_func = itTransfer->second;
+ } else {
+ libcamera::LOG(V4L2, Warning)
+ << "Unrecognised transfer function in "
+ << ColorSpace::toString(colorSpace);
+ ret = -EINVAL;
+ }
+
+ auto itYcbcrEncoding = ycbcrEncodingToV4l2.find(colorSpace->ycbcrEncoding);
+ if (itYcbcrEncoding != ycbcrEncodingToV4l2.end()) {
+ v4l2Format.ycbcr_enc = itYcbcrEncoding->second;
+ } else {
+ libcamera::LOG(V4L2, Warning)
+ << "Unrecognised YCbCr encoding in "
+ << ColorSpace::toString(colorSpace);
+ ret = -EINVAL;
+ }
+
+ auto itRange = rangeToV4l2.find(colorSpace->range);
+ if (itRange != rangeToV4l2.end()) {
+ v4l2Format.quantization = itRange->second;
+ } else {
+ libcamera::LOG(V4L2, Warning)
+ << "Unrecognised quantization in "
+ << ColorSpace::toString(colorSpace);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+template int V4L2Device::fromColorSpace(const std::optional<ColorSpace> &, struct v4l2_pix_format &);
+template int V4L2Device::fromColorSpace(const std::optional<ColorSpace> &, struct v4l2_pix_format_mplane &);
+template int V4L2Device::fromColorSpace(const std::optional<ColorSpace> &, struct v4l2_mbus_framefmt &);
+
} /* namespace libcamera */
diff --git a/src/libcamera/v4l2_pixelformat.cpp b/src/libcamera/v4l2_pixelformat.cpp
new file mode 100644
index 00000000..e8b3eb9c
--- /dev/null
+++ b/src/libcamera/v4l2_pixelformat.cpp
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019, Google Inc.
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * V4L2 Pixel Format
+ */
+
+#include "libcamera/internal/v4l2_pixelformat.h"
+
+#include <ctype.h>
+#include <map>
+#include <string.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/pixel_format.h>
+
+#include "libcamera/internal/formats.h"
+
+/**
+ * \file v4l2_pixelformat.h
+ * \brief V4L2 Pixel Format
+ */
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(V4L2)
+
+/**
+ * \class V4L2PixelFormat
+ * \brief V4L2 pixel format FourCC wrapper
+ *
+ * The V4L2PixelFormat class describes the pixel format of a V4L2 buffer. It
+ * wraps the V4L2 numerical FourCC, and shall be used in all APIs that deal with
+ * V4L2 pixel formats. Its purpose is to prevent unintentional confusion of
+ * V4L2 and DRM FourCCs in code by catching implicit conversion attempts at
+ * compile time.
+ *
+ * To achieve this goal, construction of a V4L2PixelFormat from an integer value
+ * is explicit. To retrieve the integer value of a V4L2PixelFormat, both the
+ * explicit value() and implicit uint32_t conversion operators may be used.
+ */
+
+namespace {
+
+const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{
+ /* RGB formats. */
+ { V4L2PixelFormat(V4L2_PIX_FMT_RGB565),
+ { formats::RGB565, "16-bit RGB 5-6-5" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_RGB565X),
+ { formats::RGB565_BE, "16-bit RGB 5-6-5 BE" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_RGB24),
+ { formats::BGR888, "24-bit RGB 8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_BGR24),
+ { formats::RGB888, "24-bit BGR 8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_XBGR32),
+ { formats::XRGB8888, "32-bit BGRX 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_XRGB32),
+ { formats::BGRX8888, "32-bit XRGB 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_RGBX32),
+ { formats::XBGR8888, "32-bit RGBX 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_BGRX32),
+ { formats::RGBX8888, "32-bit XBGR 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_RGBA32),
+ { formats::ABGR8888, "32-bit RGBA 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_ABGR32),
+ { formats::ARGB8888, "32-bit BGRA 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_ARGB32),
+ { formats::BGRA8888, "32-bit ARGB 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_BGRA32),
+ { formats::RGBA8888, "32-bit ABGR 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_RGB48),
+ { formats::BGR161616, "48-bit RGB 16-16-16" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_BGR48),
+ { formats::RGB161616, "48-bit BGR 16-16-16" } },
+
+ /* YUV packed formats. */
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUYV),
+ { formats::YUYV, "YUYV 4:2:2" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YVYU),
+ { formats::YVYU, "YVYU 4:2:2" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_UYVY),
+ { formats::UYVY, "UYVY 4:2:2" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_VYUY),
+ { formats::VYUY, "VYUY 4:2:2" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUVA32),
+ { formats::AVUY8888, "32-bit YUVA 8-8-8-8" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUVX32),
+ { formats::XVUY8888, "32-bit YUVX 8-8-8-8" } },
+
+ /* YUV planar formats. */
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV16),
+ { formats::NV16, "Y/CbCr 4:2:2" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV16M),
+ { formats::NV16, "Y/CbCr 4:2:2 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV61),
+ { formats::NV61, "Y/CrCb 4:2:2" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV61M),
+ { formats::NV61, "Y/CrCb 4:2:2 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV12),
+ { formats::NV12, "Y/CbCr 4:2:0" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV12M),
+ { formats::NV12, "Y/CbCr 4:2:0 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV21),
+ { formats::NV21, "Y/CrCb 4:2:0" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV21M),
+ { formats::NV21, "Y/CrCb 4:2:0 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV24),
+ { formats::NV24, "Y/CbCr 4:4:4" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_NV42),
+ { formats::NV42, "Y/CrCb 4:4:4" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUV420),
+ { formats::YUV420, "Planar YUV 4:2:0" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUV420M),
+ { formats::YUV420, "Planar YUV 4:2:0 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YVU420),
+ { formats::YVU420, "Planar YVU 4:2:0" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YVU420M),
+ { formats::YVU420, "Planar YVU 4:2:0 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUV422P),
+ { formats::YUV422, "Planar YUV 4:2:2" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUV422M),
+ { formats::YUV422, "Planar YUV 4:2:2 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YVU422M),
+ { formats::YVU422, "Planar YVU 4:2:2 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YUV444M),
+ { formats::YUV444, "Planar YUV 4:4:4 (N-C)" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_YVU444M),
+ { formats::YVU444, "Planar YVU 4:4:4 (N-C)" } },
+
+ /* Greyscale formats. */
+ { V4L2PixelFormat(V4L2_PIX_FMT_GREY),
+ { formats::R8, "8-bit Greyscale" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_Y10),
+ { formats::R10, "10-bit Greyscale" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_Y10P),
+ { formats::R10_CSI2P, "10-bit Greyscale Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_Y12),
+ { formats::R12, "12-bit Greyscale" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_Y12P),
+ { formats::R12_CSI2P, "12-bit Greyscale Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_Y16),
+ { formats::R16, "16-bit Greyscale" } },
+
+ /* Bayer formats. */
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR8),
+ { formats::SBGGR8, "8-bit Bayer BGBG/GRGR" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG8),
+ { formats::SGBRG8, "8-bit Bayer GBGB/RGRG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG8),
+ { formats::SGRBG8, "8-bit Bayer GRGR/BGBG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB8),
+ { formats::SRGGB8, "8-bit Bayer RGRG/GBGB" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10),
+ { formats::SBGGR10, "10-bit Bayer BGBG/GRGR" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10),
+ { formats::SGBRG10, "10-bit Bayer GBGB/RGRG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10),
+ { formats::SGRBG10, "10-bit Bayer GRGR/BGBG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10),
+ { formats::SRGGB10, "10-bit Bayer RGRG/GBGB" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR10P),
+ { formats::SBGGR10_CSI2P, "10-bit Bayer BGBG/GRGR Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG10P),
+ { formats::SGBRG10_CSI2P, "10-bit Bayer GBGB/RGRG Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG10P),
+ { formats::SGRBG10_CSI2P, "10-bit Bayer GRGR/BGBG Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB10P),
+ { formats::SRGGB10_CSI2P, "10-bit Bayer RGRG/GBGB Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12),
+ { formats::SBGGR12, "12-bit Bayer BGBG/GRGR" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12),
+ { formats::SGBRG12, "12-bit Bayer GBGB/RGRG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12),
+ { formats::SGRBG12, "12-bit Bayer GRGR/BGBG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12),
+ { formats::SRGGB12, "12-bit Bayer RGRG/GBGB" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR12P),
+ { formats::SBGGR12_CSI2P, "12-bit Bayer BGBG/GRGR Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG12P),
+ { formats::SGBRG12_CSI2P, "12-bit Bayer GBGB/RGRG Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG12P),
+ { formats::SGRBG12_CSI2P, "12-bit Bayer GRGR/BGBG Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB12P),
+ { formats::SRGGB12_CSI2P, "12-bit Bayer RGRG/GBGB Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14),
+ { formats::SBGGR14, "14-bit Bayer BGBG/GRGR" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14),
+ { formats::SGBRG14, "14-bit Bayer GBGB/RGRG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14),
+ { formats::SGRBG14, "14-bit Bayer GRGR/BGBG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14),
+ { formats::SRGGB14, "14-bit Bayer RGRG/GBGB" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR14P),
+ { formats::SBGGR14_CSI2P, "14-bit Bayer BGBG/GRGR Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG14P),
+ { formats::SGBRG14_CSI2P, "14-bit Bayer GBGB/RGRG Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG14P),
+ { formats::SGRBG14_CSI2P, "14-bit Bayer GRGR/BGBG Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB14P),
+ { formats::SRGGB14_CSI2P, "14-bit Bayer RGRG/GBGB Packed" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SBGGR16),
+ { formats::SBGGR16, "16-bit Bayer BGBG/GRGR" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGBRG16),
+ { formats::SGBRG16, "16-bit Bayer GBGB/RGRG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SGRBG16),
+ { formats::SGRBG16, "16-bit Bayer GRGR/BGBG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_SRGGB16),
+ { formats::SRGGB16, "16-bit Bayer RGRG/GBGB" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_BGGR),
+ { formats::BGGR_PISP_COMP1, "16-bit Bayer BGBG/GRGR PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GBRG),
+ { formats::GBRG_PISP_COMP1, "16-bit Bayer GBGB/RGRG PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_GRBG),
+ { formats::GRBG_PISP_COMP1, "16-bit Bayer GRGR/BGBG PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_RGGB),
+ { formats::RGGB_PISP_COMP1, "16-bit Bayer RGRG/GBGB PiSP Compress Mode 1" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_PISP_COMP1_MONO),
+ { formats::MONO_PISP_COMP1, "16-bit Mono PiSP Compress Mode 1" } },
+
+ /* Compressed formats. */
+ { V4L2PixelFormat(V4L2_PIX_FMT_MJPEG),
+ { formats::MJPEG, "Motion-JPEG" } },
+ { V4L2PixelFormat(V4L2_PIX_FMT_JPEG),
+ { formats::MJPEG, "JPEG JFIF" } },
+};
+
+} /* namespace */
+
+/**
+ * \struct V4L2PixelFormat::Info
+ * \brief Information about a V4L2 pixel format
+ *
+ * \var V4L2PixelFormat::Info::format
+ * \brief The corresponding libcamera PixelFormat
+ *
+ * \sa PixelFormat
+ *
+ * \var V4L2PixelFormat::Info::description
+ * \brief The human-readable description of the V4L2 pixel format
+ */
+
+/**
+ * \fn V4L2PixelFormat::V4L2PixelFormat()
+ * \brief Construct a V4L2PixelFormat with an invalid format
+ *
+ * V4L2PixelFormat instances constructed with the default constructor are
+ * invalid, calling the isValid() function returns false.
+ */
+
+/**
+ * \fn V4L2PixelFormat::V4L2PixelFormat(uint32_t fourcc)
+ * \brief Construct a V4L2PixelFormat from a FourCC value
+ * \param[in] fourcc The pixel format FourCC numerical value
+ */
+
+/**
+ * \fn bool V4L2PixelFormat::isValid() const
+ * \brief Check if the pixel format is valid
+ *
+ * V4L2PixelFormat instances constructed with the default constructor are
+ * invalid. Instances constructed with a FourCC defined in the V4L2 API are
+ * valid. The behaviour is undefined otherwise.
+ *
+ * \return True if the pixel format is valid, false otherwise
+ */
+
+/**
+ * \fn uint32_t V4L2PixelFormat::fourcc() const
+ * \brief Retrieve the pixel format FourCC numerical value
+ * \return The pixel format FourCC numerical value
+ */
+
+/**
+ * \fn V4L2PixelFormat::operator uint32_t() const
+ * \brief Convert to the pixel format FourCC numerical value
+ * \return The pixel format FourCC numerical value
+ */
+
+/**
+ * \brief Assemble and return a string describing the pixel format
+ * \return A string describing the pixel format
+ */
+std::string V4L2PixelFormat::toString() const
+{
+ if (fourcc_ == 0)
+ return "<INVALID>";
+
+ char ss[8] = { static_cast<char>(fourcc_ & 0x7f),
+ static_cast<char>((fourcc_ >> 8) & 0x7f),
+ static_cast<char>((fourcc_ >> 16) & 0x7f),
+ static_cast<char>((fourcc_ >> 24) & 0x7f) };
+
+ for (unsigned int i = 0; i < 4; i++) {
+ if (!isprint(ss[i]))
+ ss[i] = '.';
+ }
+
+ if (fourcc_ & (1 << 31))
+ strcat(ss, "-BE");
+
+ return ss;
+}
+
+/**
+ * \brief Retrieve the V4L2 description for the format
+ *
+ * The description matches the value used by the kernel, as would be reported
+ * by the VIDIOC_ENUM_FMT ioctl.
+ *
+ * \return The V4L2 description corresponding to the V4L2 format, or a
+ * placeholder description if not found
+ */
+const char *V4L2PixelFormat::description() const
+{
+ const auto iter = vpf2pf.find(*this);
+ if (iter == vpf2pf.end()) {
+ LOG(V4L2, Warning)
+ << "Unsupported V4L2 pixel format "
+ << toString();
+ return "Unsupported format";
+ }
+
+ return iter->second.description;
+}
+
+/**
+ * \brief Convert the V4L2 pixel format to the corresponding PixelFormat
+ * \param[in] warn When true, log a warning message if the V4L2 pixel format
+ * isn't known
+ *
+ * Users of this function might try to convert a V4L2PixelFormat to a
+ * PixelFormat just to check if the format is supported or not. In that case,
+ * they can suppress the warning message by setting the \a warn argument to
+ * false to not pollute the log with unnecessary messages.
+ *
+ * \return The PixelFormat corresponding to the V4L2 pixel format
+ */
+PixelFormat V4L2PixelFormat::toPixelFormat(bool warn) const
+{
+ const auto iter = vpf2pf.find(*this);
+ if (iter == vpf2pf.end()) {
+ if (warn)
+ LOG(V4L2, Warning) << "Unsupported V4L2 pixel format "
+ << toString();
+ return PixelFormat();
+ }
+
+ return iter->second.format;
+}
+
+/**
+ * \brief Retrieve the list of V4L2PixelFormat associated with \a pixelFormat
+ * \param[in] pixelFormat The PixelFormat to convert
+ *
+ * Multiple V4L2 formats may exist for one PixelFormat as V4L2 defines separate
+ * 4CCs for contiguous and non-contiguous versions of the same image format.
+ *
+ * \return The list of V4L2PixelFormat corresponding to \a pixelFormat
+ */
+const std::vector<V4L2PixelFormat> &
+V4L2PixelFormat::fromPixelFormat(const PixelFormat &pixelFormat)
+{
+ static const std::vector<V4L2PixelFormat> empty;
+
+ const PixelFormatInfo &info = PixelFormatInfo::info(pixelFormat);
+ if (!info.isValid())
+ return empty;
+
+ return info.v4l2Formats;
+}
+
+/**
+ * \brief Test if a V4L2PixelFormat is one of the line based generic metadata
+ * formats
+ *
+ * A limited number of metadata formats, the ones that represents generic
+ * line-based metadata buffers, need to have their width, height and
+ * bytesperline set by userspace.
+ *
+ * This function tests if the current V4L2PixelFormat is one of those.
+ *
+ * Note: It would have been nicer to store this information in a
+ * V4L2PixelFormat::Info instance, but as metadata format are not exposed to
+ * applications, there are no PixelFormat and DRM fourcc codes associated to
+ * them.
+ *
+ * \return True if the V4L2PixelFormat() is a generic line based format, false
+ * otherwise
+ */
+bool V4L2PixelFormat::isGenericLineBasedMetadata() const
+{
+ switch (fourcc_) {
+ case V4L2_META_FMT_GENERIC_8:
+ case V4L2_META_FMT_GENERIC_CSI2_10:
+ case V4L2_META_FMT_GENERIC_CSI2_12:
+ case V4L2_META_FMT_GENERIC_CSI2_14:
+ case V4L2_META_FMT_GENERIC_CSI2_16:
+ case V4L2_META_FMT_GENERIC_CSI2_20:
+ case V4L2_META_FMT_GENERIC_CSI2_24:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * \brief Insert a text representation of a V4L2PixelFormat into an output
+ * stream
+ * \param[in] out The output stream
+ * \param[in] f The V4L2PixelFormat
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2PixelFormat &f)
+{
+ out << f.toString();
+ return out;
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/v4l2_subdevice.cpp b/src/libcamera/v4l2_subdevice.cpp
index 8b9da81e..7a064d87 100644
--- a/src/libcamera/v4l2_subdevice.cpp
+++ b/src/libcamera/v4l2_subdevice.cpp
@@ -2,26 +2,29 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_subdevice.cpp - V4L2 Subdevice
+ * V4L2 Subdevice
*/
-#include "v4l2_subdevice.h"
+#include "libcamera/internal/v4l2_subdevice.h"
#include <fcntl.h>
-#include <iomanip>
+#include <regex>
#include <sstream>
#include <string.h>
#include <sys/ioctl.h>
#include <unistd.h>
+#include <linux/media-bus-format.h>
#include <linux/v4l2-subdev.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
#include <libcamera/geometry.h>
-#include "log.h"
-#include "media_device.h"
-#include "media_object.h"
-#include "utils.h"
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/media_object.h"
/**
* \file v4l2_subdevice.h
@@ -33,6 +36,831 @@ namespace libcamera {
LOG_DECLARE_CATEGORY(V4L2)
/**
+ * \class MediaBusFormatInfo
+ * \brief Information about media bus formats
+ *
+ * The MediaBusFormatInfo class groups together information describing a media
+ * bus format. It facilitates handling of media bus formats by providing data
+ * commonly used in pipeline handlers.
+ *
+ * \var MediaBusFormatInfo::name
+ * \brief The format name as a human-readable string, used as the text
+ * representation of the format
+ *
+ * \var MediaBusFormatInfo::code
+ * \brief The media bus format code described by this instance (MEDIA_BUS_FMT_*)
+ *
+ * \var MediaBusFormatInfo::type
+ * \brief The media bus format type
+ *
+ * \var MediaBusFormatInfo::bitsPerPixel
+ * \brief The average number of bits per pixel
+ *
+ * The number of bits per pixel averages the total number of bits for all
+ * colour components over the whole image, excluding any padding bits or
+ * padding pixels.
+ *
+ * For formats that transmit multiple or fractional pixels per sample, the
+ * value will differ from the bus width.
+ *
+ * Formats that don't have a fixed number of bits per pixel, such as compressed
+ * formats, or device-specific embedded data formats, report 0 in this field.
+ *
+ * \var MediaBusFormatInfo::colourEncoding
+ * \brief The colour encoding type
+ *
+ * This field is valid for Type::Image formats only.
+ */
+
+/**
+ * \enum MediaBusFormatInfo::Type
+ * \brief The format type
+ *
+ * \var MediaBusFormatInfo::Type::Image
+ * \brief The format describes image data
+ *
+ * \var MediaBusFormatInfo::Type::Metadata
+ * \brief The format describes generic metadata
+ *
+ * \var MediaBusFormatInfo::Type::EmbeddedData
+ * \brief The format describes sensor embedded data
+ */
+
+namespace {
+
+const std::map<uint32_t, MediaBusFormatInfo> mediaBusFormatInfo{
+ /* This table is sorted to match the order in linux/media-bus-format.h */
+ { MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE, {
+ .name = "RGB444_2X8_PADHI_BE",
+ .code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE, {
+ .name = "RGB444_2X8_PADHI_LE",
+ .code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE, {
+ .name = "RGB555_2X8_PADHI_BE",
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE, {
+ .name = "RGB555_2X8_PADHI_LE",
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB565_1X16, {
+ .name = "RGB565_1X16",
+ .code = MEDIA_BUS_FMT_RGB565_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_BGR565_2X8_BE, {
+ .name = "BGR565_2X8_BE",
+ .code = MEDIA_BUS_FMT_BGR565_2X8_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_BGR565_2X8_LE, {
+ .name = "BGR565_2X8_LE",
+ .code = MEDIA_BUS_FMT_BGR565_2X8_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB565_2X8_BE, {
+ .name = "RGB565_2X8_BE",
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB565_2X8_LE, {
+ .name = "RGB565_2X8_LE",
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB666_1X18, {
+ .name = "RGB666_1X18",
+ .code = MEDIA_BUS_FMT_RGB666_1X18,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 18,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_BGR888_1X24, {
+ .name = "BGR888_1X24",
+ .code = MEDIA_BUS_FMT_BGR888_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB888_1X24, {
+ .name = "RGB888_1X24",
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB888_2X12_BE, {
+ .name = "RGB888_2X12_BE",
+ .code = MEDIA_BUS_FMT_RGB888_2X12_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB888_2X12_LE, {
+ .name = "RGB888_2X12_LE",
+ .code = MEDIA_BUS_FMT_RGB888_2X12_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB121212_1X36, {
+ .name = "RGB121212_1X36",
+ .code = MEDIA_BUS_FMT_RGB121212_1X36,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 36,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_RGB202020_1X60, {
+ .name = "RGB202020_1X60",
+ .code = MEDIA_BUS_FMT_RGB202020_1X60,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 60,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_ARGB8888_1X32, {
+ .name = "ARGB8888_1X32",
+ .code = MEDIA_BUS_FMT_ARGB8888_1X32,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_Y8_1X8, {
+ .name = "Y8_1X8",
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UV8_1X8, {
+ .name = "UV8_1X8",
+ .code = MEDIA_BUS_FMT_UV8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY8_1_5X8, {
+ .name = "UYVY8_1_5X8",
+ .code = MEDIA_BUS_FMT_UYVY8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY8_1_5X8, {
+ .name = "VYUY8_1_5X8",
+ .code = MEDIA_BUS_FMT_VYUY8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV8_1_5X8, {
+ .name = "YUYV8_1_5X8",
+ .code = MEDIA_BUS_FMT_YUYV8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU8_1_5X8, {
+ .name = "YVYU8_1_5X8",
+ .code = MEDIA_BUS_FMT_YVYU8_1_5X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY8_2X8, {
+ .name = "UYVY8_2X8",
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY8_2X8, {
+ .name = "VYUY8_2X8",
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV8_2X8, {
+ .name = "YUYV8_2X8",
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU8_2X8, {
+ .name = "YVYU8_2X8",
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_Y10_1X10, {
+ .name = "Y10_1X10",
+ .code = MEDIA_BUS_FMT_Y10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY10_2X10, {
+ .name = "UYVY10_2X10",
+ .code = MEDIA_BUS_FMT_UYVY10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY10_2X10, {
+ .name = "VYUY10_2X10",
+ .code = MEDIA_BUS_FMT_VYUY10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV10_2X10, {
+ .name = "YUYV10_2X10",
+ .code = MEDIA_BUS_FMT_YUYV10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU10_2X10, {
+ .name = "YVYU10_2X10",
+ .code = MEDIA_BUS_FMT_YVYU10_2X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_Y12_1X12, {
+ .name = "Y12_1X12",
+ .code = MEDIA_BUS_FMT_Y12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_Y16_1X16, {
+ .name = "Y16_1X16",
+ .code = MEDIA_BUS_FMT_Y16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY8_1X16, {
+ .name = "UYVY8_1X16",
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY8_1X16, {
+ .name = "VYUY8_1X16",
+ .code = MEDIA_BUS_FMT_VYUY8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV8_1X16, {
+ .name = "YUYV8_1X16",
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU8_1X16, {
+ .name = "YVYU8_1X16",
+ .code = MEDIA_BUS_FMT_YVYU8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YDYUYDYV8_1X16, {
+ .name = "YDYUYDYV8_1X16",
+ .code = MEDIA_BUS_FMT_YDYUYDYV8_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY10_1X20, {
+ .name = "UYVY10_1X20",
+ .code = MEDIA_BUS_FMT_UYVY10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY10_1X20, {
+ .name = "VYUY10_1X20",
+ .code = MEDIA_BUS_FMT_VYUY10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV10_1X20, {
+ .name = "YUYV10_1X20",
+ .code = MEDIA_BUS_FMT_YUYV10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU10_1X20, {
+ .name = "YVYU10_1X20",
+ .code = MEDIA_BUS_FMT_YVYU10_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUV8_1X24, {
+ .name = "YUV8_1X24",
+ .code = MEDIA_BUS_FMT_YUV8_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUV10_1X30, {
+ .name = "YUV10_1X30",
+ .code = MEDIA_BUS_FMT_YUV10_1X30,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 30,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_AYUV8_1X32, {
+ .name = "AYUV8_1X32",
+ .code = MEDIA_BUS_FMT_AYUV8_1X32,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY12_2X12, {
+ .name = "UYVY12_2X12",
+ .code = MEDIA_BUS_FMT_UYVY12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY12_2X12, {
+ .name = "VYUY12_2X12",
+ .code = MEDIA_BUS_FMT_VYUY12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV12_2X12, {
+ .name = "YUYV12_2X12",
+ .code = MEDIA_BUS_FMT_YUYV12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU12_2X12, {
+ .name = "YVYU12_2X12",
+ .code = MEDIA_BUS_FMT_YVYU12_2X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_UYVY12_1X24, {
+ .name = "UYVY12_1X24",
+ .code = MEDIA_BUS_FMT_UYVY12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_VYUY12_1X24, {
+ .name = "VYUY12_1X24",
+ .code = MEDIA_BUS_FMT_VYUY12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YUYV12_1X24, {
+ .name = "YUYV12_1X24",
+ .code = MEDIA_BUS_FMT_YUYV12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_YVYU12_1X24, {
+ .name = "YVYU12_1X24",
+ .code = MEDIA_BUS_FMT_YVYU12_1X24,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_SBGGR8_1X8, {
+ .name = "SBGGR8_1X8",
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, {
+ .name = "SGBRG8_1X8",
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, {
+ .name = "SGRBG8_1X8",
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, {
+ .name = "SRGGB8_1X8",
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8, {
+ .name = "SBGGR10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8, {
+ .name = "SGBRG10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8, {
+ .name = "SGRBG10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8, {
+ .name = "SRGGB10_ALAW8_1X8",
+ .code = MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, {
+ .name = "SBGGR10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, {
+ .name = "SGBRG10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, {
+ .name = "SGRBG10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, {
+ .name = "SRGGB10_DPCM8_1X8",
+ .code = MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE, {
+ .name = "SBGGR10_2X8_PADHI_BE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, {
+ .name = "SBGGR10_2X8_PADHI_LE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE, {
+ .name = "SBGGR10_2X8_PADLO_BE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE, {
+ .name = "SBGGR10_2X8_PADLO_LE",
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, {
+ .name = "SBGGR10_1X10",
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, {
+ .name = "SGBRG10_1X10",
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, {
+ .name = "SGRBG10_1X10",
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, {
+ .name = "SRGGB10_1X10",
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, {
+ .name = "SBGGR12_1X12",
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, {
+ .name = "SGBRG12_1X12",
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, {
+ .name = "SGRBG12_1X12",
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, {
+ .name = "SRGGB12_1X12",
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, {
+ .name = "SBGGR14_1X14",
+ .code = MEDIA_BUS_FMT_SBGGR14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, {
+ .name = "SGBRG14_1X14",
+ .code = MEDIA_BUS_FMT_SGBRG14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, {
+ .name = "SGRBG14_1X14",
+ .code = MEDIA_BUS_FMT_SGRBG14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, {
+ .name = "SRGGB14_1X14",
+ .code = MEDIA_BUS_FMT_SRGGB14_1X14,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_SBGGR16_1X16, {
+ .name = "SBGGR16_1X16",
+ .code = MEDIA_BUS_FMT_SBGGR16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGBRG16_1X16, {
+ .name = "SGBRG16_1X16",
+ .code = MEDIA_BUS_FMT_SGBRG16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGRBG16_1X16, {
+ .name = "SGRBG16_1X16",
+ .code = MEDIA_BUS_FMT_SGRBG16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SRGGB16_1X16, {
+ .name = "SRGGB16_1X16",
+ .code = MEDIA_BUS_FMT_SRGGB16_1X16,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SBGGR20_1X20, {
+ .name = "SBGGR20_1X20",
+ .code = MEDIA_BUS_FMT_SBGGR20_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGBRG20_1X20, {
+ .name = "SGBRG20_1X20",
+ .code = MEDIA_BUS_FMT_SGBRG20_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SGRBG20_1X20, {
+ .name = "SGRBG20_1X20",
+ .code = MEDIA_BUS_FMT_SGRBG20_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ { MEDIA_BUS_FMT_SRGGB20_1X20, {
+ .name = "SRGGB20_1X20",
+ .code = MEDIA_BUS_FMT_SRGGB20_1X20,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW
+ } },
+ /* \todo Clarify colour encoding for HSV formats */
+ { MEDIA_BUS_FMT_AHSV8888_1X32, {
+ .name = "AHSV8888_1X32",
+ .code = MEDIA_BUS_FMT_AHSV8888_1X32,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 32,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRGB,
+ } },
+ { MEDIA_BUS_FMT_JPEG_1X8, {
+ .name = "JPEG_1X8",
+ .code = MEDIA_BUS_FMT_JPEG_1X8,
+ .type = MediaBusFormatInfo::Type::Image,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingYUV,
+ } },
+ { MEDIA_BUS_FMT_METADATA_FIXED, {
+ .name = "METADATA_FIXED",
+ .code = MEDIA_BUS_FMT_METADATA_FIXED,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 0,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_8, {
+ .name = "META_8",
+ .code = MEDIA_BUS_FMT_META_8,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_10, {
+ .name = "META_10",
+ .code = MEDIA_BUS_FMT_META_10,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_12, {
+ .name = "META_12",
+ .code = MEDIA_BUS_FMT_META_12,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_14, {
+ .name = "META_14",
+ .code = MEDIA_BUS_FMT_META_14,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_16, {
+ .name = "META_16",
+ .code = MEDIA_BUS_FMT_META_16,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_20, {
+ .name = "META_20",
+ .code = MEDIA_BUS_FMT_META_20,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_24, {
+ .name = "META_24",
+ .code = MEDIA_BUS_FMT_META_24,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_CCS_EMBEDDED, {
+ .name = "CCS_EMBEDDED",
+ .code = MEDIA_BUS_FMT_CCS_EMBEDDED,
+ .type = MediaBusFormatInfo::Type::EmbeddedData,
+ .bitsPerPixel = 0,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_OV2740_EMBEDDED, {
+ .name = "OV2740_EMBEDDED",
+ .code = MEDIA_BUS_FMT_CCS_EMBEDDED,
+ .type = MediaBusFormatInfo::Type::EmbeddedData,
+ .bitsPerPixel = 0,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+};
+
+} /* namespace */
+
+/**
+ * \fn bool MediaBusFormatInfo::isValid() const
+ * \brief Check if the media bus format info is valid
+ * \return True if the media bus format info is valid, false otherwise
+ */
+
+/**
+ * \brief Retrieve information about a media bus format
+ * \param[in] code The media bus format code
+ * \return The MediaBusFormatInfo describing the \a code if known, or an invalid
+ * MediaBusFormatInfo otherwise
+ */
+const MediaBusFormatInfo &MediaBusFormatInfo::info(uint32_t code)
+{
+ static const MediaBusFormatInfo invalid{};
+
+ const auto it = mediaBusFormatInfo.find(code);
+ if (it == mediaBusFormatInfo.end()) {
+ LOG(V4L2, Warning)
+ << "Unsupported media bus format "
+ << utils::hex(code, 4);
+ return invalid;
+ }
+
+ return it->second;
+}
+
+/**
+ * \struct V4L2SubdeviceCapability
+ * \brief struct v4l2_subdev_capability object wrapper and helpers
+ *
+ * The V4L2SubdeviceCapability structure manages the information returned by the
+ * VIDIOC_SUBDEV_QUERYCAP ioctl.
+ */
+
+/**
+ * \fn V4L2SubdeviceCapability::isReadOnly()
+ * \brief Retrieve if a subdevice is registered as read-only
+ *
+ * A V4L2 subdevice is registered as read-only if V4L2_SUBDEV_CAP_RO_SUBDEV
+ * is listed as part of its capabilities.
+ *
+ * \return True if the subdevice is registered as read-only, false otherwise
+ */
+
+/**
+ * \fn V4L2SubdeviceCapability::hasStreams()
+ * \brief Retrieve if a subdevice supports the V4L2 streams API
+ * \return True if the subdevice supports the streams API, false otherwise
+ */
+
+/**
* \struct V4L2SubdeviceFormat
* \brief The V4L2 sub-device image format and sizes
*
@@ -61,7 +889,7 @@ LOG_DECLARE_CATEGORY(V4L2)
*/
/**
- * \var V4L2SubdeviceFormat::mbus_code
+ * \var V4L2SubdeviceFormat::code
* \brief The image format bus code
*/
@@ -71,17 +899,54 @@ LOG_DECLARE_CATEGORY(V4L2)
*/
/**
+ * \var V4L2SubdeviceFormat::colorSpace
+ * \brief The color space of the pixels
+ *
+ * The color space of the image. When setting the format this may be
+ * unset, in which case the driver gets to use its default color space.
+ * After being set, this value should contain the color space that
+ * was actually used. If this value is unset, then the color space chosen
+ * by the driver could not be represented by the ColorSpace class (and
+ * should probably be added).
+ *
+ * It is up to the pipeline handler or application to check if the
+ * resulting color space is acceptable.
+ */
+
+/**
* \brief Assemble and return a string describing the format
* \return A string describing the V4L2SubdeviceFormat
*/
const std::string V4L2SubdeviceFormat::toString() const
{
std::stringstream ss;
- ss << size.toString() << "-" << utils::hex(mbus_code, 4);
+ ss << *this;
+
return ss.str();
}
/**
+ * \brief Insert a text representation of a V4L2SubdeviceFormat into an output
+ * stream
+ * \param[in] out The output stream
+ * \param[in] f The V4L2SubdeviceFormat
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2SubdeviceFormat &f)
+{
+ out << f.size << "-";
+
+ const auto it = mediaBusFormatInfo.find(f.code);
+
+ if (it == mediaBusFormatInfo.end())
+ out << utils::hex(f.code, 4);
+ else
+ out << it->second.name;
+
+ return out;
+}
+
+/**
* \class V4L2Subdevice
* \brief A V4L2 subdevice as exposed by the Linux kernel
*
@@ -96,6 +961,148 @@ const std::string V4L2SubdeviceFormat::toString() const
*/
/**
+ * \typedef V4L2Subdevice::Formats
+ * \brief A map of supported media bus formats to frame sizes
+ */
+
+/**
+ * \enum V4L2Subdevice::Whence
+ * \brief Specify the type of format for getFormat() and setFormat() operations
+ * \var V4L2Subdevice::ActiveFormat
+ * \brief The format operation applies to ACTIVE formats
+ * \var V4L2Subdevice::TryFormat
+ * \brief The format operation applies to TRY formats
+ */
+
+/**
+ * \class V4L2Subdevice::Stream
+ * \brief V4L2 subdevice stream
+ *
+ * This class identifies a subdev stream, by bundling the pad number with the
+ * stream number. It is used in all stream-aware functions of the V4L2Subdevice
+ * class to identify the stream the functions operate on.
+ *
+ * \var V4L2Subdevice::Stream::pad
+ * \brief The 0-indexed pad number
+ *
+ * \var V4L2Subdevice::Stream::stream
+ * \brief The stream number
+ */
+
+/**
+ * \fn V4L2Subdevice::Stream::Stream()
+ * \brief Construct a Stream with pad and stream set to 0
+ */
+
+/**
+ * \fn V4L2Subdevice::Stream::Stream(unsigned int pad, unsigned int stream)
+ * \brief Construct a Stream with a given \a pad and \a stream number
+ * \param[in] pad The indexed pad number
+ * \param[in] stream The stream number
+ */
+
+/**
+ * \brief Compare streams for equality
+ * \return True if the two streams are equal, false otherwise
+ */
+bool operator==(const V4L2Subdevice::Stream &lhs, const V4L2Subdevice::Stream &rhs)
+{
+ return lhs.pad == rhs.pad && lhs.stream == rhs.stream;
+}
+
+/**
+ * \fn bool operator!=(const V4L2Subdevice::Stream &lhs, const V4L2Subdevice::Stream &rhs)
+ * \brief Compare streams for inequality
+ * \return True if the two streams are not equal, false otherwise
+ */
+
+/**
+ * \brief Insert a text representation of a V4L2Subdevice::Stream into an
+ * output stream
+ * \param[in] out The output stream
+ * \param[in] stream The V4L2Subdevice::Stream
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Stream &stream)
+{
+ out << stream.pad << "/" << stream.stream;
+
+ return out;
+}
+
+/**
+ * \class V4L2Subdevice::Route
+ * \brief V4L2 subdevice routing table entry
+ *
+ * This class models a route in the subdevice routing table. It is similar to
+ * the v4l2_subdev_route structure, but uses the V4L2Subdevice::Stream class
+ * for easier usage with the V4L2Subdevice stream-aware functions.
+ *
+ * \var V4L2Subdevice::Route::sink
+ * \brief The sink stream of the route
+ *
+ * \var V4L2Subdevice::Route::source
+ * \brief The source stream of the route
+ *
+ * \var V4L2Subdevice::Route::flags
+ * \brief The route flags (V4L2_SUBDEV_ROUTE_FL_*)
+ */
+
+/**
+ * \fn V4L2Subdevice::Route::Route()
+ * \brief Construct a Route with default streams
+ */
+
+/**
+ * \fn V4L2Subdevice::Route::Route(const Stream &sink, const Stream &source,
+ * uint32_t flags)
+ * \brief Construct a Route from \a sink to \a source
+ * \param[in] sink The sink stream
+ * \param[in] source The source stream
+ * \param[in] flags The route flags
+ */
+
+/**
+ * \brief Insert a text representation of a V4L2Subdevice::Route into an
+ * output stream
+ * \param[in] out The output stream
+ * \param[in] route The V4L2Subdevice::Route
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Route &route)
+{
+ out << route.sink << " -> " << route.source
+ << " (" << utils::hex(route.flags) << ")";
+
+ return out;
+}
+
+/**
+ * \typedef V4L2Subdevice::Routing
+ * \brief V4L2 subdevice routing table
+ *
+ * This class stores a subdevice routing table as a vector of routes.
+ */
+
+/**
+ * \brief Insert a text representation of a V4L2Subdevice::Routing into an
+ * output stream
+ * \param[in] out The output stream
+ * \param[in] routing The V4L2Subdevice::Routing
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Routing &routing)
+{
+ for (const auto &[i, route] : utils::enumerate(routing)) {
+ out << "[" << i << "] " << route;
+ if (i != routing.size() - 1)
+ out << ", ";
+ }
+
+ return out;
+}
+
+/**
* \brief Create a V4L2 subdevice from a MediaEntity using its device node
* path
*/
@@ -115,7 +1122,40 @@ V4L2Subdevice::~V4L2Subdevice()
*/
int V4L2Subdevice::open()
{
- return V4L2Device::open(O_RDWR);
+ int ret = V4L2Device::open(O_RDWR);
+ if (ret)
+ return ret;
+
+ /*
+ * Try to query the subdev capabilities. The VIDIOC_SUBDEV_QUERYCAP API
+ * was introduced in kernel v5.8, ENOTTY errors must be ignored to
+ * support older kernels.
+ */
+ caps_ = {};
+ ret = ioctl(VIDIOC_SUBDEV_QUERYCAP, &caps_);
+ if (ret < 0 && errno != ENOTTY) {
+ ret = -errno;
+ LOG(V4L2, Error)
+ << "Unable to query capabilities: " << strerror(-ret);
+ return ret;
+ }
+
+ /* If the subdev supports streams, enable the streams API. */
+ if (caps_.hasStreams()) {
+ struct v4l2_subdev_client_capability clientCaps{};
+ clientCaps.capabilities = V4L2_SUBDEV_CLIENT_CAP_STREAMS;
+
+ ret = ioctl(VIDIOC_SUBDEV_S_CLIENT_CAP, &clientCaps);
+ if (ret < 0) {
+ ret = -errno;
+ LOG(V4L2, Error)
+ << "Unable to set client capabilities: "
+ << strerror(-ret);
+ return ret;
+ }
+ }
+
+ return 0;
}
/**
@@ -125,54 +1165,136 @@ int V4L2Subdevice::open()
*/
/**
- * \brief Set a crop rectangle on one of the V4L2 subdevice pads
- * \param[in] pad The 0-indexed pad number the rectangle is to be applied to
- * \param[inout] rect The rectangle describing crop target area
+ * \brief Get selection rectangle \a rect for \a target
+ * \param[in] stream The stream the rectangle is retrieved from
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[out] rect The retrieved selection rectangle
+ *
+ * \todo Define a V4L2SelectionTarget enum for the selection target
+ *
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::setCrop(unsigned int pad, Rectangle *rect)
+int V4L2Subdevice::getSelection(const Stream &stream, unsigned int target,
+ Rectangle *rect)
{
- return setSelection(pad, V4L2_SEL_TGT_CROP, rect);
+ struct v4l2_subdev_selection sel = {};
+
+ sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sel.pad = stream.pad;
+ sel.stream = stream.stream;
+ sel.target = target;
+ sel.flags = 0;
+
+ int ret = ioctl(VIDIOC_SUBDEV_G_SELECTION, &sel);
+ if (ret < 0) {
+ LOG(V4L2, Error)
+ << "Unable to get rectangle " << target << " on pad "
+ << stream << ": " << strerror(-ret);
+ return ret;
+ }
+
+ rect->x = sel.r.left;
+ rect->y = sel.r.top;
+ rect->width = sel.r.width;
+ rect->height = sel.r.height;
+
+ return 0;
}
/**
- * \brief Set a compose rectangle on one of the V4L2 subdevice pads
- * \param[in] pad The 0-indexed pad number the rectangle is to be applied to
- * \param[inout] rect The rectangle describing the compose target area
+ * \fn V4L2Subdevice::getSelection(unsigned int pad, unsigned int target,
+ * Rectangle *rect)
+ * \brief Get selection rectangle \a rect for \a target
+ * \param[in] pad The 0-indexed pad number the rectangle is retrieved from
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[out] rect The retrieved selection rectangle
+ *
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::setCompose(unsigned int pad, Rectangle *rect)
+
+/**
+ * \brief Set selection rectangle \a rect for \a target
+ * \param[in] stream The stream the rectangle is to be applied to
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[inout] rect The selection rectangle to be applied
+ *
+ * \todo Define a V4L2SelectionTarget enum for the selection target
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int V4L2Subdevice::setSelection(const Stream &stream, unsigned int target,
+ Rectangle *rect)
{
- return setSelection(pad, V4L2_SEL_TGT_COMPOSE, rect);
+ struct v4l2_subdev_selection sel = {};
+
+ sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sel.pad = stream.pad;
+ sel.stream = stream.stream;
+ sel.target = target;
+ sel.flags = 0;
+
+ sel.r.left = rect->x;
+ sel.r.top = rect->y;
+ sel.r.width = rect->width;
+ sel.r.height = rect->height;
+
+ int ret = ioctl(VIDIOC_SUBDEV_S_SELECTION, &sel);
+ if (ret < 0) {
+ LOG(V4L2, Error)
+ << "Unable to set rectangle " << target << " on pad "
+ << stream << ": " << strerror(-ret);
+ return ret;
+ }
+
+ rect->x = sel.r.left;
+ rect->y = sel.r.top;
+ rect->width = sel.r.width;
+ rect->height = sel.r.height;
+
+ return 0;
}
/**
- * \brief Enumerate all media bus codes and frame sizes on a \a pad
- * \param[in] pad The 0-indexed pad number to enumerate formats on
+ * \fn V4L2Subdevice::setSelection(unsigned int pad, unsigned int target,
+ * Rectangle *rect)
+ * \brief Set selection rectangle \a rect for \a target
+ * \param[in] pad The 0-indexed pad number the rectangle is to be applied to
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[inout] rect The selection rectangle to be applied
+ *
+ * \todo Define a V4L2SelectionTarget enum for the selection target
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
+ * \brief Enumerate all media bus codes and frame sizes on a \a stream
+ * \param[in] stream The stream to enumerate formats for
*
* Enumerate all media bus codes and frame sizes supported by the subdevice on
- * a \a pad.
+ * a \a stream.
*
* \return A list of the supported device formats
*/
-ImageFormats V4L2Subdevice::formats(unsigned int pad)
+V4L2Subdevice::Formats V4L2Subdevice::formats(const Stream &stream)
{
- ImageFormats formats;
+ Formats formats;
- if (pad >= entity_->pads().size()) {
- LOG(V4L2, Error) << "Invalid pad: " << pad;
+ if (stream.pad >= entity_->pads().size()) {
+ LOG(V4L2, Error) << "Invalid pad: " << stream.pad;
return {};
}
- for (unsigned int code : enumPadCodes(pad)) {
- std::vector<SizeRange> sizes = enumPadSizes(pad, code);
+ for (unsigned int code : enumPadCodes(stream)) {
+ std::vector<SizeRange> sizes = enumPadSizes(stream, code);
if (sizes.empty())
return {};
- if (formats.addFormat(code, sizes)) {
+ const auto inserted = formats.insert({ code, sizes });
+ if (!inserted.second) {
LOG(V4L2, Error)
<< "Could not add sizes for media bus code "
- << code << " on pad " << pad;
+ << code << " on pad " << stream.pad;
return {};
}
}
@@ -181,86 +1303,461 @@ ImageFormats V4L2Subdevice::formats(unsigned int pad)
}
/**
- * \brief Retrieve the image format set on one of the V4L2 subdevice pads
- * \param[in] pad The 0-indexed pad number the format is to be retrieved from
+ * \fn V4L2Subdevice::formats(unsigned int pad)
+ * \brief Enumerate all media bus codes and frame sizes on a \a pad
+ * \param[in] pad The 0-indexed pad number to enumerate formats on
+ *
+ * Enumerate all media bus codes and frame sizes supported by the subdevice on
+ * a \a pad
+ *
+ * \return A list of the supported device formats
+ */
+
+std::optional<ColorSpace> V4L2Subdevice::toColorSpace(const v4l2_mbus_framefmt &format) const
+{
+ /*
+ * Only image formats have a color space, for other formats (such as
+ * metadata formats) the color space concept isn't applicable. V4L2
+ * subdev drivers return a colorspace set to V4L2_COLORSPACE_DEFAULT in
+ * that case (as well as for image formats when the driver hasn't
+ * bothered implementing color space support). Check the colorspace
+ * field here and return std::nullopt directly to avoid logging a
+ * warning.
+ */
+ if (format.colorspace == V4L2_COLORSPACE_DEFAULT)
+ return std::nullopt;
+
+ PixelFormatInfo::ColourEncoding colourEncoding;
+ const MediaBusFormatInfo &info = MediaBusFormatInfo::info(format.code);
+ if (info.isValid()) {
+ colourEncoding = info.colourEncoding;
+ } else {
+ LOG(V4L2, Warning)
+ << "Unknown subdev format "
+ << utils::hex(format.code, 4)
+ << ", defaulting to RGB encoding";
+
+ colourEncoding = PixelFormatInfo::ColourEncodingRGB;
+ }
+
+ return V4L2Device::toColorSpace(format, colourEncoding);
+}
+
+/**
+ * \brief Retrieve the image format set on one of the V4L2 subdevice streams
+ * \param[in] stream The stream the format is to be retrieved from
* \param[out] format The image bus format
+ * \param[in] whence The format to get, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::getFormat(unsigned int pad, V4L2SubdeviceFormat *format)
+int V4L2Subdevice::getFormat(const Stream &stream, V4L2SubdeviceFormat *format,
+ Whence whence)
{
struct v4l2_subdev_format subdevFmt = {};
- subdevFmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- subdevFmt.pad = pad;
+ subdevFmt.which = whence;
+ subdevFmt.pad = stream.pad;
+ subdevFmt.stream = stream.stream;
int ret = ioctl(VIDIOC_SUBDEV_G_FMT, &subdevFmt);
if (ret) {
LOG(V4L2, Error)
- << "Unable to get format on pad " << pad
- << ": " << strerror(-ret);
+ << "Unable to get format on pad " << stream << ": "
+ << strerror(-ret);
return ret;
}
format->size.width = subdevFmt.format.width;
format->size.height = subdevFmt.format.height;
- format->mbus_code = subdevFmt.format.code;
+ format->code = subdevFmt.format.code;
+ format->colorSpace = toColorSpace(subdevFmt.format);
return 0;
}
/**
+ * \fn V4L2Subdevice::getFormat(unsigned int pad, V4L2SubdeviceFormat *format,
+ * Whence whence)
+ * \brief Retrieve the image format set on one of the V4L2 subdevice pads
+ * \param[in] pad The 0-indexed pad number the format is to be retrieved from
+ * \param[out] format The image bus format
+ * \param[in] whence The format to get, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ * \return 0 on success or a negative error code otherwise
+ */
+
+/**
* \brief Set an image format on one of the V4L2 subdevice pads
- * \param[in] pad The 0-indexed pad number the format is to be applied to
- * \param[inout] format The image bus format to apply to the subdevice's pad
+ * \param[in] stream The stream the format is to be applied to
+ * \param[inout] format The image bus format to apply to the stream
+ * \param[in] whence The format to set, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
*
- * Apply the requested image format to the desired media pad and return the
- * actually applied format parameters, as \ref V4L2Subdevice::getFormat would
- * do.
+ * Apply the requested image format to the desired stream and return the
+ * actually applied format parameters, as getFormat() would do.
*
* \return 0 on success or a negative error code otherwise
*/
-int V4L2Subdevice::setFormat(unsigned int pad, V4L2SubdeviceFormat *format)
+int V4L2Subdevice::setFormat(const Stream &stream, V4L2SubdeviceFormat *format,
+ Whence whence)
{
struct v4l2_subdev_format subdevFmt = {};
- subdevFmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- subdevFmt.pad = pad;
+ subdevFmt.which = whence;
+ subdevFmt.pad = stream.pad;
+ subdevFmt.stream = stream.stream;
subdevFmt.format.width = format->size.width;
subdevFmt.format.height = format->size.height;
- subdevFmt.format.code = format->mbus_code;
+ subdevFmt.format.code = format->code;
+ subdevFmt.format.field = V4L2_FIELD_NONE;
+ if (format->colorSpace) {
+ fromColorSpace(format->colorSpace, subdevFmt.format);
+
+ /* The CSC flag is only applicable to source pads. */
+ if (entity_->pads()[stream.pad]->flags() & MEDIA_PAD_FL_SOURCE)
+ subdevFmt.format.flags |= V4L2_MBUS_FRAMEFMT_SET_CSC;
+ }
int ret = ioctl(VIDIOC_SUBDEV_S_FMT, &subdevFmt);
if (ret) {
LOG(V4L2, Error)
- << "Unable to set format on pad " << pad
- << ": " << strerror(-ret);
+ << "Unable to set format on pad " << stream << ": "
+ << strerror(-ret);
return ret;
}
format->size.width = subdevFmt.format.width;
format->size.height = subdevFmt.format.height;
- format->mbus_code = subdevFmt.format.code;
+ format->code = subdevFmt.format.code;
+ format->colorSpace = toColorSpace(subdevFmt.format);
+
+ return 0;
+}
+
+/**
+ * \fn V4L2Subdevice::setFormat(unsigned int pad, V4L2SubdeviceFormat *format,
+ * Whence whence)
+ * \brief Set an image format on one of the V4L2 subdevice pads
+ * \param[in] pad The 0-indexed pad number the format is to be applied to
+ * \param[inout] format The image bus format to apply to the subdevice's pad
+ * \param[in] whence The format to set, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ *
+ * Apply the requested image format to the desired media pad and return the
+ * actually applied format parameters, as getFormat() would do.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+
+namespace {
+
+void routeFromKernel(V4L2Subdevice::Route &route,
+ const struct v4l2_subdev_route &kroute)
+{
+ route.sink.pad = kroute.sink_pad;
+ route.sink.stream = kroute.sink_stream;
+ route.source.pad = kroute.source_pad;
+ route.source.stream = kroute.source_stream;
+ route.flags = kroute.flags;
+}
+
+void routeToKernel(const V4L2Subdevice::Route &route,
+ struct v4l2_subdev_route &kroute)
+{
+ kroute.sink_pad = route.sink.pad;
+ kroute.sink_stream = route.sink.stream;
+ kroute.source_pad = route.source.pad;
+ kroute.source_stream = route.source.stream;
+ kroute.flags = route.flags;
+}
+
+/*
+ * Legacy routing support for pre-v6.10-rc1 kernels. Drop when v6.12-rc1 gets
+ * released.
+ */
+struct v4l2_subdev_routing_legacy {
+ __u32 which;
+ __u32 num_routes;
+ __u64 routes;
+ __u32 reserved[6];
+};
+
+#define VIDIOC_SUBDEV_G_ROUTING_LEGACY _IOWR('V', 38, struct v4l2_subdev_routing_legacy)
+#define VIDIOC_SUBDEV_S_ROUTING_LEGACY _IOWR('V', 39, struct v4l2_subdev_routing_legacy)
+
+} /* namespace */
+
+int V4L2Subdevice::getRoutingLegacy(Routing *routing, Whence whence)
+{
+ struct v4l2_subdev_routing_legacy rt = {};
+
+ rt.which = whence;
+
+ int ret = ioctl(VIDIOC_SUBDEV_G_ROUTING_LEGACY, &rt);
+ if (ret == 0 || ret == -ENOTTY)
+ return ret;
+
+ if (ret != -ENOSPC) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve number of routes: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ std::vector<struct v4l2_subdev_route> routes{ rt.num_routes };
+ rt.routes = reinterpret_cast<uintptr_t>(routes.data());
+
+ ret = ioctl(VIDIOC_SUBDEV_G_ROUTING_LEGACY, &rt);
+ if (ret) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve routes: " << strerror(-ret);
+ return ret;
+ }
+
+ if (rt.num_routes != routes.size()) {
+ LOG(V4L2, Error) << "Invalid number of routes";
+ return -EINVAL;
+ }
+
+ routing->resize(rt.num_routes);
+
+ for (const auto &[i, route] : utils::enumerate(routes))
+ routeFromKernel((*routing)[i], route);
+
+ return 0;
+}
+
+/**
+ * \brief Retrieve the subdevice's internal routing table
+ * \param[out] routing The routing table
+ * \param[in] whence The routing table to get, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int V4L2Subdevice::getRouting(Routing *routing, Whence whence)
+{
+ routing->clear();
+
+ if (!caps_.hasStreams())
+ return 0;
+
+ struct v4l2_subdev_routing rt = {};
+
+ rt.which = whence;
+
+ int ret = ioctl(VIDIOC_SUBDEV_G_ROUTING, &rt);
+ if (ret == -ENOTTY)
+ return V4L2Subdevice::getRoutingLegacy(routing, whence);
+
+ if (ret) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve number of routes: "
+ << strerror(-ret);
+ return ret;
+ }
+
+ if (!rt.num_routes)
+ return 0;
+
+ std::vector<struct v4l2_subdev_route> routes{ rt.num_routes };
+ rt.routes = reinterpret_cast<uintptr_t>(routes.data());
+
+ rt.len_routes = rt.num_routes;
+ rt.num_routes = 0;
+
+ ret = ioctl(VIDIOC_SUBDEV_G_ROUTING, &rt);
+ if (ret) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve routes: " << strerror(-ret);
+ return ret;
+ }
+
+ if (rt.num_routes != routes.size()) {
+ LOG(V4L2, Error) << "Invalid number of routes";
+ return -EINVAL;
+ }
+
+ routing->resize(rt.num_routes);
+
+ for (const auto &[i, route] : utils::enumerate(routes))
+ routeFromKernel((*routing)[i], route);
+
+ return 0;
+}
+
+int V4L2Subdevice::setRoutingLegacy(Routing *routing, Whence whence)
+{
+ std::vector<struct v4l2_subdev_route> routes{ routing->size() };
+
+ for (const auto &[i, route] : utils::enumerate(*routing))
+ routeToKernel(route, routes[i]);
+
+ struct v4l2_subdev_routing_legacy rt = {};
+ rt.which = whence;
+ rt.num_routes = routes.size();
+ rt.routes = reinterpret_cast<uintptr_t>(routes.data());
+
+ int ret = ioctl(VIDIOC_SUBDEV_S_ROUTING_LEGACY, &rt);
+ if (ret) {
+ LOG(V4L2, Error) << "Failed to set routes: " << strerror(-ret);
+ return ret;
+ }
+
+ routes.resize(rt.num_routes);
+ routing->resize(rt.num_routes);
+
+ for (const auto &[i, route] : utils::enumerate(routes))
+ routeFromKernel((*routing)[i], route);
return 0;
}
/**
+ * \brief Set a routing table on the V4L2 subdevice
+ * \param[inout] routing The routing table
+ * \param[in] whence The routing table to set, \ref V4L2Subdevice::ActiveFormat
+ * "ActiveFormat" or \ref V4L2Subdevice::TryFormat "TryFormat"
+ *
+ * Apply to the V4L2 subdevice the routing table \a routing and update its
+ * content to reflect the actually applied routing table as getRouting() would
+ * do.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int V4L2Subdevice::setRouting(Routing *routing, Whence whence)
+{
+ if (!caps_.hasStreams()) {
+ routing->clear();
+ return 0;
+ }
+
+ std::vector<struct v4l2_subdev_route> routes{ routing->size() };
+
+ for (const auto &[i, route] : utils::enumerate(*routing))
+ routeToKernel(route, routes[i]);
+
+ struct v4l2_subdev_routing rt = {};
+ rt.which = whence;
+ rt.len_routes = routes.size();
+ rt.num_routes = routes.size();
+ rt.routes = reinterpret_cast<uintptr_t>(routes.data());
+
+ int ret = ioctl(VIDIOC_SUBDEV_S_ROUTING, &rt);
+ if (ret == -ENOTTY)
+ return setRoutingLegacy(routing, whence);
+
+ if (ret) {
+ LOG(V4L2, Error) << "Failed to set routes: " << strerror(-ret);
+ return ret;
+ }
+
+ /*
+ * The kernel may want to return more routes than we have space for. In
+ * that event, we must issue a VIDIOC_SUBDEV_G_ROUTING call to retrieve
+ * the additional routes.
+ */
+ if (rt.num_routes > routes.size()) {
+ routes.resize(rt.num_routes);
+
+ rt.len_routes = rt.num_routes;
+ rt.num_routes = 0;
+
+ ret = ioctl(VIDIOC_SUBDEV_G_ROUTING, &rt);
+ if (ret) {
+ LOG(V4L2, Error)
+ << "Failed to retrieve routes: " << strerror(-ret);
+ return ret;
+ }
+ }
+
+ if (rt.num_routes != routes.size()) {
+ LOG(V4L2, Error) << "Invalid number of routes";
+ return -EINVAL;
+ }
+
+ routing->resize(rt.num_routes);
+
+ for (const auto &[i, route] : utils::enumerate(routes))
+ routeFromKernel((*routing)[i], route);
+
+ return 0;
+}
+
+/**
+ * \brief Retrieve the model name of the device
+ *
+ * The model name allows identification of the specific device model. This can
+ * be used to infer device characteristics, for instance to determine the
+ * analogue gain model of a camera sensor based on the sensor model name.
+ *
+ * Neither the V4L2 API nor the Media Controller API expose an explicit model
+ * name. This function implements a heuristics to extract the model name from
+ * the subdevice's entity name. This should produce accurate results for
+ * I2C-based devices. If the heuristics can't match a known naming pattern,
+ * the function returns the full entity name.
+ *
+ * \return The model name of the device
+ */
+const std::string &V4L2Subdevice::model()
+{
+ if (!model_.empty())
+ return model_;
+
+ /*
+ * Extract model name from the media entity name.
+ *
+ * There is no standardized naming scheme for sensor or other entities
+ * in the Linux kernel at the moment.
+ *
+ * - The most common rule, used by I2C sensors, associates the model
+ * name with the I2C bus number and address (e.g. 'imx219 0-0010').
+ *
+ * - When the sensor exposes multiple subdevs, the model name is
+ * usually followed by a function name, as in the smiapp driver (e.g.
+ * 'jt8ew9 pixel_array 0-0010').
+ *
+ * - The vimc driver names its sensors 'Sensor A' and 'Sensor B'.
+ *
+ * Other schemes probably exist. As a best effort heuristic, use the
+ * part of the entity name before the first space if the name contains
+ * an I2C address, and use the full entity name otherwise.
+ */
+ std::string entityName = entity_->name();
+ std::regex i2cRegex{ " [0-9]+-[0-9a-f]{4}" };
+ std::smatch match;
+
+ std::string model;
+ if (std::regex_search(entityName, match, i2cRegex))
+ model_ = entityName.substr(0, entityName.find(' '));
+ else
+ model_ = entityName;
+
+ return model_;
+}
+
+/**
+ * \fn V4L2Subdevice::caps()
+ * \brief Retrieve the subdevice V4L2 capabilities
+ * \return The subdevice V4L2 capabilities
+ */
+
+/**
* \brief Create a new video subdevice instance from \a entity in media device
* \a media
* \param[in] media The media device where the entity is registered
* \param[in] entity The media entity name
*
- * Releasing memory of the newly created instance is responsibility of the
- * caller of this function.
- *
* \return A newly created V4L2Subdevice on success, nullptr otherwise
*/
-V4L2Subdevice *V4L2Subdevice::fromEntityName(const MediaDevice *media,
- const std::string &entity)
+std::unique_ptr<V4L2Subdevice>
+V4L2Subdevice::fromEntityName(const MediaDevice *media,
+ const std::string &entity)
{
MediaEntity *mediaEntity = media->getEntityByName(entity);
if (!mediaEntity)
return nullptr;
- return new V4L2Subdevice(mediaEntity);
+ return std::make_unique<V4L2Subdevice>(mediaEntity);
}
std::string V4L2Subdevice::logPrefix() const
@@ -268,14 +1765,15 @@ std::string V4L2Subdevice::logPrefix() const
return "'" + entity_->name() + "'";
}
-std::vector<unsigned int> V4L2Subdevice::enumPadCodes(unsigned int pad)
+std::vector<unsigned int> V4L2Subdevice::enumPadCodes(const Stream &stream)
{
std::vector<unsigned int> codes;
int ret;
for (unsigned int index = 0; ; index++) {
struct v4l2_subdev_mbus_code_enum mbusEnum = {};
- mbusEnum.pad = pad;
+ mbusEnum.pad = stream.pad;
+ mbusEnum.stream = stream.stream;
mbusEnum.index = index;
mbusEnum.which = V4L2_SUBDEV_FORMAT_ACTIVE;
@@ -288,7 +1786,7 @@ std::vector<unsigned int> V4L2Subdevice::enumPadCodes(unsigned int pad)
if (ret < 0 && ret != -EINVAL) {
LOG(V4L2, Error)
- << "Unable to enumerate formats on pad " << pad
+ << "Unable to enumerate formats on pad " << stream
<< ": " << strerror(-ret);
return {};
}
@@ -296,7 +1794,7 @@ std::vector<unsigned int> V4L2Subdevice::enumPadCodes(unsigned int pad)
return codes;
}
-std::vector<SizeRange> V4L2Subdevice::enumPadSizes(unsigned int pad,
+std::vector<SizeRange> V4L2Subdevice::enumPadSizes(const Stream &stream,
unsigned int code)
{
std::vector<SizeRange> sizes;
@@ -305,7 +1803,8 @@ std::vector<SizeRange> V4L2Subdevice::enumPadSizes(unsigned int pad,
for (unsigned int index = 0;; index++) {
struct v4l2_subdev_frame_size_enum sizeEnum = {};
sizeEnum.index = index;
- sizeEnum.pad = pad;
+ sizeEnum.pad = stream.pad;
+ sizeEnum.stream = stream.stream;
sizeEnum.code = code;
sizeEnum.which = V4L2_SUBDEV_FORMAT_ACTIVE;
@@ -319,7 +1818,7 @@ std::vector<SizeRange> V4L2Subdevice::enumPadSizes(unsigned int pad,
if (ret < 0 && ret != -EINVAL && ret != -ENOTTY) {
LOG(V4L2, Error)
- << "Unable to enumerate sizes on pad " << pad
+ << "Unable to enumerate sizes on pad " << stream
<< ": " << strerror(-ret);
return {};
}
@@ -327,35 +1826,4 @@ std::vector<SizeRange> V4L2Subdevice::enumPadSizes(unsigned int pad,
return sizes;
}
-int V4L2Subdevice::setSelection(unsigned int pad, unsigned int target,
- Rectangle *rect)
-{
- struct v4l2_subdev_selection sel = {};
-
- sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- sel.pad = pad;
- sel.target = target;
- sel.flags = 0;
-
- sel.r.left = rect->x;
- sel.r.top = rect->y;
- sel.r.width = rect->w;
- sel.r.height = rect->h;
-
- int ret = ioctl(VIDIOC_SUBDEV_S_SELECTION, &sel);
- if (ret < 0) {
- LOG(V4L2, Error)
- << "Unable to set rectangle " << target << " on pad "
- << pad << ": " << strerror(-ret);
- return ret;
- }
-
- rect->x = sel.r.left;
- rect->y = sel.r.top;
- rect->w = sel.r.width;
- rect->h = sel.r.height;
-
- return 0;
-}
-
} /* namespace libcamera */
diff --git a/src/libcamera/v4l2_videodevice.cpp b/src/libcamera/v4l2_videodevice.cpp
index eb33a68e..e241eb47 100644
--- a/src/libcamera/v4l2_videodevice.cpp
+++ b/src/libcamera/v4l2_videodevice.cpp
@@ -2,37 +2,40 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_videodevice.cpp - V4L2 Video Device
+ * V4L2 Video Device
*/
-#include "v4l2_videodevice.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+#include <algorithm>
+#include <array>
#include <fcntl.h>
-#include <iomanip>
#include <sstream>
#include <string.h>
#include <sys/ioctl.h>
-#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <unistd.h>
#include <vector>
-#include <linux/drm_fourcc.h>
#include <linux/version.h>
-#include <libcamera/event_notifier.h>
-#include <libcamera/file_descriptor.h>
+#include <libcamera/base/event_notifier.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/shared_fd.h>
+#include <libcamera/base/unique_fd.h>
+#include <libcamera/base/utils.h>
-#include "log.h"
-#include "media_device.h"
-#include "media_object.h"
-#include "utils.h"
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/media_object.h"
/**
* \file v4l2_videodevice.h
* \brief V4L2 Video Device
*/
+
namespace libcamera {
LOG_DECLARE_CATEGORY(V4L2)
@@ -137,6 +140,12 @@ LOG_DECLARE_CATEGORY(V4L2)
*/
/**
+ * \fn V4L2Capability::hasMediaController()
+ * \brief Determine if the video device uses Media Controller to configure I/O
+ * \return True if the video device is controlled by a Media Controller device
+ */
+
+/**
* \class V4L2BufferCache
* \brief Hot cache of associations between V4L2 buffer indexes and FrameBuffer
*
@@ -182,7 +191,7 @@ V4L2BufferCache::V4L2BufferCache(const std::vector<std::unique_ptr<FrameBuffer>>
for (const std::unique_ptr<FrameBuffer> &buffer : buffers)
cache_.emplace_back(true,
lastUsedCounter_.fetch_add(1, std::memory_order_acq_rel),
- buffer->planes());
+ *buffer.get());
}
V4L2BufferCache::~V4L2BufferCache()
@@ -192,6 +201,19 @@ V4L2BufferCache::~V4L2BufferCache()
}
/**
+ * \brief Check if all the entries in the cache are unused
+ */
+bool V4L2BufferCache::isEmpty() const
+{
+ for (auto const &entry : cache_) {
+ if (!entry.free_)
+ return false;
+ }
+
+ return true;
+}
+
+/**
* \brief Find the best V4L2 buffer for a FrameBuffer
* \param[in] buffer The FrameBuffer
*
@@ -213,7 +235,7 @@ int V4L2BufferCache::get(const FrameBuffer &buffer)
for (unsigned int index = 0; index < cache_.size(); index++) {
const Entry &entry = cache_[index];
- if (!entry.free)
+ if (!entry.free_)
continue;
/* Try to find a cache hit by comparing the planes. */
@@ -223,9 +245,9 @@ int V4L2BufferCache::get(const FrameBuffer &buffer)
break;
}
- if (entry.lastUsed < oldest) {
+ if (entry.lastUsed_ < oldest) {
use = index;
- oldest = entry.lastUsed;
+ oldest = entry.lastUsed_;
}
}
@@ -249,16 +271,16 @@ int V4L2BufferCache::get(const FrameBuffer &buffer)
void V4L2BufferCache::put(unsigned int index)
{
ASSERT(index < cache_.size());
- cache_[index].free = true;
+ cache_[index].free_ = true;
}
V4L2BufferCache::Entry::Entry()
- : free(true), lastUsed(0)
+ : free_(true), lastUsed_(0)
{
}
V4L2BufferCache::Entry::Entry(bool free, uint64_t lastUsed, const FrameBuffer &buffer)
- : free(free), lastUsed(lastUsed)
+ : free_(free), lastUsed_(lastUsed)
{
for (const FrameBuffer::Plane &plane : buffer.planes())
planes_.emplace_back(plane);
@@ -272,90 +294,13 @@ bool V4L2BufferCache::Entry::operator==(const FrameBuffer &buffer) const
return false;
for (unsigned int i = 0; i < planes.size(); i++)
- if (planes_[i].fd != planes[i].fd.fd() ||
+ if (planes_[i].fd != planes[i].fd.get() ||
planes_[i].length != planes[i].length)
return false;
return true;
}
/**
- * \class V4L2PixelFormat
- * \brief V4L2 pixel format FourCC wrapper
- *
- * The V4L2PixelFormat class describes the pixel format of a V4L2 buffer. It
- * wraps the V4L2 numerical FourCC, and shall be used in all APIs that deal with
- * V4L2 pixel formats. Its purpose is to prevent unintentional confusion of
- * V4L2 and DRM FourCCs in code by catching implicit conversion attempts at
- * compile time.
- *
- * To achieve this goal, construction of a V4L2PixelFormat from an integer value
- * is explicit. To retrieve the integer value of a V4L2PixelFormat, both the
- * explicit value() and implicit uint32_t conversion operators may be used.
- */
-
-/**
- * \fn V4L2PixelFormat::V4L2PixelFormat()
- * \brief Construct a V4L2PixelFormat with an invalid format
- *
- * V4L2PixelFormat instances constructed with the default constructor are
- * invalid, calling the isValid() function returns false.
- */
-
-/**
- * \fn V4L2PixelFormat::V4L2PixelFormat(uint32_t fourcc)
- * \brief Construct a V4L2PixelFormat from a FourCC value
- * \param[in] fourcc The pixel format FourCC numerical value
- */
-
-/**
- * \fn bool V4L2PixelFormat::isValid() const
- * \brief Check if the pixel format is valid
- *
- * V4L2PixelFormat instances constructed with the default constructor are
- * invalid. Instances constructed with a FourCC defined in the V4L2 API are
- * valid. The behaviour is undefined otherwise.
- *
- * \return True if the pixel format is valid, false otherwise
- */
-
-/**
- * \fn uint32_t V4L2PixelFormat::fourcc() const
- * \brief Retrieve the pixel format FourCC numerical value
- * \return The pixel format FourCC numerical value
- */
-
-/**
- * \fn V4L2PixelFormat::operator uint32_t() const
- * \brief Convert to the pixel format FourCC numerical value
- * \return The pixel format FourCC numerical value
- */
-
-/**
- * \brief Assemble and return a string describing the pixel format
- * \return A string describing the pixel format
- */
-std::string V4L2PixelFormat::toString() const
-{
- if (fourcc_ == 0)
- return "<INVALID>";
-
- char ss[8] = { static_cast<char>(fourcc_ & 0x7f),
- static_cast<char>((fourcc_ >> 8) & 0x7f),
- static_cast<char>((fourcc_ >> 16) & 0x7f),
- static_cast<char>((fourcc_ >> 24) & 0x7f) };
-
- for (unsigned int i = 0; i < 4; i++) {
- if (!isprint(ss[i]))
- ss[i] = '.';
- }
-
- if (fourcc_ & (1 << 31))
- strcat(ss, "-BE");
-
- return ss;
-}
-
-/**
* \class V4L2DeviceFormat
* \brief The V4L2 video device image format and sizes
*
@@ -429,6 +374,15 @@ std::string V4L2PixelFormat::toString() const
*/
/**
+ * \struct V4L2DeviceFormat::Plane
+ * \brief Per-plane memory size information
+ * \var V4L2DeviceFormat::Plane::size
+ * \brief The plane total memory size (in bytes)
+ * \var V4L2DeviceFormat::Plane::bpl
+ * \brief The plane line stride (in bytes)
+ */
+
+/**
* \var V4L2DeviceFormat::size
* \brief The image size in pixels
*/
@@ -442,6 +396,21 @@ std::string V4L2PixelFormat::toString() const
*/
/**
+ * \var V4L2DeviceFormat::colorSpace
+ * \brief The color space of the pixels
+ *
+ * The color space of the image. When setting the format this may be
+ * unset, in which case the driver gets to use its default color space.
+ * After being set, this value should contain the color space that
+ * was actually used. If this value is unset, then the color space chosen
+ * by the driver could not be represented by the ColorSpace class (and
+ * should probably be added).
+ *
+ * It is up to the pipeline handler or application to check if the
+ * resulting color space is acceptable.
+ */
+
+/**
* \var V4L2DeviceFormat::planes
* \brief The per-plane memory size information
*
@@ -463,11 +432,25 @@ std::string V4L2PixelFormat::toString() const
const std::string V4L2DeviceFormat::toString() const
{
std::stringstream ss;
- ss << size.toString() << "-" << fourcc.toString();
+ ss << *this;
+
return ss.str();
}
/**
+ * \brief Insert a text representation of a V4L2DeviceFormat into an output
+ * stream
+ * \param[in] out The output stream
+ * \param[in] f The V4L2DeviceFormat
+ * \return The output stream \a out
+ */
+std::ostream &operator<<(std::ostream &out, const V4L2DeviceFormat &f)
+{
+ out << f.size << "-" << f.fourcc;
+ return out;
+}
+
+/**
* \class V4L2VideoDevice
* \brief V4L2VideoDevice object and API
*
@@ -540,11 +523,18 @@ const std::string V4L2DeviceFormat::toString() const
*/
/**
+ * \typedef V4L2VideoDevice::Formats
+ * \brief A map of supported V4L2 pixel formats to frame sizes
+ */
+
+/**
* \brief Construct a V4L2VideoDevice
* \param[in] deviceNode The file-system path to the video device node
*/
V4L2VideoDevice::V4L2VideoDevice(const std::string &deviceNode)
- : V4L2Device(deviceNode), cache_(nullptr), fdEvent_(nullptr)
+ : V4L2Device(deviceNode), formatInfo_(nullptr), cache_(nullptr),
+ fdBufferNotifier_(nullptr), state_(State::Stopped),
+ watchdogDuration_(0.0)
{
/*
* We default to an MMAP based CAPTURE video device, however this will
@@ -563,6 +553,7 @@ V4L2VideoDevice::V4L2VideoDevice(const std::string &deviceNode)
V4L2VideoDevice::V4L2VideoDevice(const MediaEntity *entity)
: V4L2VideoDevice(entity->deviceNode())
{
+ watchdog_.timeout.connect(this, &V4L2VideoDevice::watchdogExpired);
}
V4L2VideoDevice::~V4L2VideoDevice()
@@ -610,34 +601,41 @@ int V4L2VideoDevice::open()
* devices (POLLIN), and write notifications for OUTPUT video devices
* (POLLOUT).
*/
+ EventNotifier::Type notifierType;
+
if (caps_.isVideoCapture()) {
- fdEvent_ = new EventNotifier(fd(), EventNotifier::Read);
+ notifierType = EventNotifier::Read;
bufferType_ = caps_.isMultiplanar()
? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
: V4L2_BUF_TYPE_VIDEO_CAPTURE;
} else if (caps_.isVideoOutput()) {
- fdEvent_ = new EventNotifier(fd(), EventNotifier::Write);
+ notifierType = EventNotifier::Write;
bufferType_ = caps_.isMultiplanar()
? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
: V4L2_BUF_TYPE_VIDEO_OUTPUT;
} else if (caps_.isMetaCapture()) {
- fdEvent_ = new EventNotifier(fd(), EventNotifier::Read);
+ notifierType = EventNotifier::Read;
bufferType_ = V4L2_BUF_TYPE_META_CAPTURE;
} else if (caps_.isMetaOutput()) {
- fdEvent_ = new EventNotifier(fd(), EventNotifier::Write);
+ notifierType = EventNotifier::Write;
bufferType_ = V4L2_BUF_TYPE_META_OUTPUT;
} else {
LOG(V4L2, Error) << "Device is not a supported type";
return -EINVAL;
}
- fdEvent_->activated.connect(this, &V4L2VideoDevice::bufferAvailable);
- fdEvent_->setEnabled(false);
+ fdBufferNotifier_ = new EventNotifier(fd(), notifierType);
+ fdBufferNotifier_->activated.connect(this, &V4L2VideoDevice::bufferAvailable);
+ fdBufferNotifier_->setEnabled(false);
LOG(V4L2, Debug)
<< "Opened device " << caps_.bus_info() << ": "
<< caps_.driver() << ": " << caps_.card();
+ ret = initFormats();
+ if (ret)
+ return ret;
+
return 0;
}
@@ -647,37 +645,33 @@ int V4L2VideoDevice::open()
* \param[in] handle The file descriptor to set
* \param[in] type The device type to operate on
*
- * This methods opens a video device from the existing file descriptor \a
- * handle. Like open(), this method queries the capabilities of the device, but
- * handles it according to the given device \a type instead of determining its
- * type from the capabilities. This can be used to force a given device type for
- * memory-to-memory devices.
+ * This function opens a video device from the existing file descriptor \a
+ * handle. Like open(), this function queries the capabilities of the device,
+ * but handles it according to the given device \a type instead of determining
+ * its type from the capabilities. This can be used to force a given device type
+ * for memory-to-memory devices.
*
- * The file descriptor \a handle is duplicated, and the caller is responsible
- * for closing the \a handle when it has no further use for it. The close()
- * method will close the duplicated file descriptor, leaving \a handle
- * untouched.
+ * The file descriptor \a handle is duplicated, no reference to the original
+ * handle is kept.
*
* \return 0 on success or a negative error code otherwise
*/
-int V4L2VideoDevice::open(int handle, enum v4l2_buf_type type)
+int V4L2VideoDevice::open(SharedFD handle, enum v4l2_buf_type type)
{
int ret;
- int newFd;
- newFd = dup(handle);
- if (newFd < 0) {
+ UniqueFD newFd = handle.dup();
+ if (!newFd.isValid()) {
ret = -errno;
LOG(V4L2, Error) << "Failed to duplicate file handle: "
<< strerror(-ret);
return ret;
}
- ret = V4L2Device::setFd(newFd);
+ ret = V4L2Device::setFd(std::move(newFd));
if (ret < 0) {
LOG(V4L2, Error) << "Failed to set file handle: "
<< strerror(-ret);
- ::close(newFd);
return ret;
}
@@ -699,15 +693,17 @@ int V4L2VideoDevice::open(int handle, enum v4l2_buf_type type)
* devices (POLLIN), and write notifications for OUTPUT video devices
* (POLLOUT).
*/
+ EventNotifier::Type notifierType;
+
switch (type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- fdEvent_ = new EventNotifier(fd(), EventNotifier::Write);
+ notifierType = EventNotifier::Write;
bufferType_ = caps_.isMultiplanar()
? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
: V4L2_BUF_TYPE_VIDEO_OUTPUT;
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- fdEvent_ = new EventNotifier(fd(), EventNotifier::Read);
+ notifierType = EventNotifier::Read;
bufferType_ = caps_.isMultiplanar()
? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
: V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -717,13 +713,39 @@ int V4L2VideoDevice::open(int handle, enum v4l2_buf_type type)
return -EINVAL;
}
- fdEvent_->activated.connect(this, &V4L2VideoDevice::bufferAvailable);
- fdEvent_->setEnabled(false);
+ fdBufferNotifier_ = new EventNotifier(fd(), notifierType);
+ fdBufferNotifier_->activated.connect(this, &V4L2VideoDevice::bufferAvailable);
+ fdBufferNotifier_->setEnabled(false);
LOG(V4L2, Debug)
<< "Opened device " << caps_.bus_info() << ": "
<< caps_.driver() << ": " << caps_.card();
+ ret = initFormats();
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int V4L2VideoDevice::initFormats()
+{
+ const std::vector<V4L2PixelFormat> &deviceFormats = enumPixelformats(0);
+ if (deviceFormats.empty()) {
+ LOG(V4L2, Error) << "Failed to initialize device formats";
+ return -EINVAL;
+ }
+
+ pixelFormats_ = { deviceFormats.begin(), deviceFormats.end() };
+
+ int ret = getFormat(&format_);
+ if (ret) {
+ LOG(V4L2, Error) << "Failed to get format";
+ return ret;
+ }
+
+ formatInfo_ = &PixelFormatInfo::info(format_.fourcc);
+
return 0;
}
@@ -736,7 +758,9 @@ void V4L2VideoDevice::close()
return;
releaseBuffers();
- delete fdEvent_;
+ delete fdBufferNotifier_;
+
+ formatInfo_ = nullptr;
V4L2Device::close();
}
@@ -759,9 +783,16 @@ void V4L2VideoDevice::close()
* \return The string containing the device location
*/
+/**
+ * \fn V4L2VideoDevice::caps()
+ * \brief Retrieve the device V4L2 capabilities
+ * \return The device V4L2 capabilities
+ */
+
std::string V4L2VideoDevice::logPrefix() const
{
- return deviceNode() + (V4L2_TYPE_IS_OUTPUT(bufferType_) ? "[out]" : "[cap]");
+ return deviceNode() + "[" + std::to_string(fd()) +
+ (V4L2_TYPE_IS_OUTPUT(bufferType_) ? ":out]" : ":cap]");
}
/**
@@ -771,12 +802,46 @@ std::string V4L2VideoDevice::logPrefix() const
*/
int V4L2VideoDevice::getFormat(V4L2DeviceFormat *format)
{
- if (caps_.isMeta())
- return getFormatMeta(format);
- else if (caps_.isMultiplanar())
- return getFormatMultiplane(format);
- else
+ switch (bufferType_) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
return getFormatSingleplane(format);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return getFormatMultiplane(format);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ return getFormatMeta(format);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * \brief Try an image format on the V4L2 video device
+ * \param[inout] format The image format to test applicability to the video device
+ *
+ * Try the supplied \a format on the video device without applying it, returning
+ * the format that would be applied. This is equivalent to setFormat(), except
+ * that the device configuration is not changed.
+ *
+ * \return 0 on success or a negative error code otherwise
+ */
+int V4L2VideoDevice::tryFormat(V4L2DeviceFormat *format)
+{
+ switch (bufferType_) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return trySetFormatSingleplane(format, false);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return trySetFormatMultiplane(format, false);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ return trySetFormatMeta(format, false);
+ default:
+ return -EINVAL;
+ }
}
/**
@@ -790,18 +855,40 @@ int V4L2VideoDevice::getFormat(V4L2DeviceFormat *format)
*/
int V4L2VideoDevice::setFormat(V4L2DeviceFormat *format)
{
- if (caps_.isMeta())
- return setFormatMeta(format);
- else if (caps_.isMultiplanar())
- return setFormatMultiplane(format);
- else
- return setFormatSingleplane(format);
+ int ret;
+
+ switch (bufferType_) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ ret = trySetFormatSingleplane(format, true);
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ ret = trySetFormatMultiplane(format, true);
+ break;
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ ret = trySetFormatMeta(format, true);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ /* Cache the set format on success. */
+ if (ret)
+ return ret;
+
+ format_ = *format;
+ formatInfo_ = &PixelFormatInfo::info(format_.fourcc);
+
+ return 0;
}
int V4L2VideoDevice::getFormatMeta(V4L2DeviceFormat *format)
{
struct v4l2_format v4l2Format = {};
- struct v4l2_meta_format *pix = &v4l2Format.fmt.meta;
+ struct v4l2_meta_format *meta = &v4l2Format.fmt.meta;
int ret;
v4l2Format.type = bufferType_;
@@ -811,28 +898,47 @@ int V4L2VideoDevice::getFormatMeta(V4L2DeviceFormat *format)
return ret;
}
- format->size.width = 0;
- format->size.height = 0;
- format->fourcc = V4L2PixelFormat(pix->dataformat);
+ format->fourcc = V4L2PixelFormat(meta->dataformat);
+ format->planes[0].size = meta->buffersize;
format->planesCount = 1;
- format->planes[0].bpl = pix->buffersize;
- format->planes[0].size = pix->buffersize;
+
+ bool genericLineBased = caps_.isMetaCapture() &&
+ format->fourcc.isGenericLineBasedMetadata();
+
+ if (genericLineBased) {
+ format->size.width = meta->width;
+ format->size.height = meta->height;
+ format->planes[0].bpl = meta->bytesperline;
+ } else {
+ format->size.width = 0;
+ format->size.height = 0;
+ format->planes[0].bpl = meta->buffersize;
+ }
return 0;
}
-int V4L2VideoDevice::setFormatMeta(V4L2DeviceFormat *format)
+int V4L2VideoDevice::trySetFormatMeta(V4L2DeviceFormat *format, bool set)
{
+ bool genericLineBased = caps_.isMetaCapture() &&
+ format->fourcc.isGenericLineBasedMetadata();
struct v4l2_format v4l2Format = {};
- struct v4l2_meta_format *pix = &v4l2Format.fmt.meta;
+ struct v4l2_meta_format *meta = &v4l2Format.fmt.meta;
int ret;
v4l2Format.type = bufferType_;
- pix->dataformat = format->fourcc;
- pix->buffersize = format->planes[0].size;
- ret = ioctl(VIDIOC_S_FMT, &v4l2Format);
+ meta->dataformat = format->fourcc;
+ meta->buffersize = format->planes[0].size;
+ if (genericLineBased) {
+ meta->width = format->size.width;
+ meta->height = format->size.height;
+ meta->bytesperline = format->planes[0].bpl;
+ }
+ ret = ioctl(set ? VIDIOC_S_FMT : VIDIOC_TRY_FMT, &v4l2Format);
if (ret) {
- LOG(V4L2, Error) << "Unable to set format: " << strerror(-ret);
+ LOG(V4L2, Error)
+ << "Unable to " << (set ? "set" : "try")
+ << " format: " << strerror(-ret);
return ret;
}
@@ -840,16 +946,29 @@ int V4L2VideoDevice::setFormatMeta(V4L2DeviceFormat *format)
* Return to caller the format actually applied on the video device,
* which might differ from the requested one.
*/
- format->size.width = 0;
- format->size.height = 0;
- format->fourcc = format->fourcc;
+ format->fourcc = V4L2PixelFormat(meta->dataformat);
format->planesCount = 1;
- format->planes[0].bpl = pix->buffersize;
- format->planes[0].size = pix->buffersize;
+ format->planes[0].size = meta->buffersize;
+ if (genericLineBased) {
+ format->size.width = meta->width;
+ format->size.height = meta->height;
+ format->planes[0].bpl = meta->bytesperline;
+ } else {
+ format->size.width = 0;
+ format->size.height = 0;
+ format->planes[0].bpl = meta->buffersize;
+ }
return 0;
}
+template<typename T>
+std::optional<ColorSpace> V4L2VideoDevice::toColorSpace(const T &v4l2Format)
+{
+ V4L2PixelFormat fourcc{ v4l2Format.pixelformat };
+ return V4L2Device::toColorSpace(v4l2Format, PixelFormatInfo::info(fourcc).colourEncoding);
+}
+
int V4L2VideoDevice::getFormatMultiplane(V4L2DeviceFormat *format)
{
struct v4l2_format v4l2Format = {};
@@ -867,6 +986,7 @@ int V4L2VideoDevice::getFormatMultiplane(V4L2DeviceFormat *format)
format->size.height = pix->height;
format->fourcc = V4L2PixelFormat(pix->pixelformat);
format->planesCount = pix->num_planes;
+ format->colorSpace = toColorSpace(*pix);
for (unsigned int i = 0; i < format->planesCount; ++i) {
format->planes[i].bpl = pix->plane_fmt[i].bytesperline;
@@ -876,7 +996,7 @@ int V4L2VideoDevice::getFormatMultiplane(V4L2DeviceFormat *format)
return 0;
}
-int V4L2VideoDevice::setFormatMultiplane(V4L2DeviceFormat *format)
+int V4L2VideoDevice::trySetFormatMultiplane(V4L2DeviceFormat *format, bool set)
{
struct v4l2_format v4l2Format = {};
struct v4l2_pix_format_mplane *pix = &v4l2Format.fmt.pix_mp;
@@ -888,15 +1008,25 @@ int V4L2VideoDevice::setFormatMultiplane(V4L2DeviceFormat *format)
pix->pixelformat = format->fourcc;
pix->num_planes = format->planesCount;
pix->field = V4L2_FIELD_NONE;
+ if (format->colorSpace) {
+ fromColorSpace(format->colorSpace, *pix);
+
+ if (caps_.isVideoCapture())
+ pix->flags |= V4L2_PIX_FMT_FLAG_SET_CSC;
+ }
+
+ ASSERT(pix->num_planes <= std::size(pix->plane_fmt));
for (unsigned int i = 0; i < pix->num_planes; ++i) {
pix->plane_fmt[i].bytesperline = format->planes[i].bpl;
pix->plane_fmt[i].sizeimage = format->planes[i].size;
}
- ret = ioctl(VIDIOC_S_FMT, &v4l2Format);
+ ret = ioctl(set ? VIDIOC_S_FMT : VIDIOC_TRY_FMT, &v4l2Format);
if (ret) {
- LOG(V4L2, Error) << "Unable to set format: " << strerror(-ret);
+ LOG(V4L2, Error)
+ << "Unable to " << (set ? "set" : "try")
+ << " format: " << strerror(-ret);
return ret;
}
@@ -912,6 +1042,7 @@ int V4L2VideoDevice::setFormatMultiplane(V4L2DeviceFormat *format)
format->planes[i].bpl = pix->plane_fmt[i].bytesperline;
format->planes[i].size = pix->plane_fmt[i].sizeimage;
}
+ format->colorSpace = toColorSpace(*pix);
return 0;
}
@@ -935,11 +1066,12 @@ int V4L2VideoDevice::getFormatSingleplane(V4L2DeviceFormat *format)
format->planesCount = 1;
format->planes[0].bpl = pix->bytesperline;
format->planes[0].size = pix->sizeimage;
+ format->colorSpace = toColorSpace(*pix);
return 0;
}
-int V4L2VideoDevice::setFormatSingleplane(V4L2DeviceFormat *format)
+int V4L2VideoDevice::trySetFormatSingleplane(V4L2DeviceFormat *format, bool set)
{
struct v4l2_format v4l2Format = {};
struct v4l2_pix_format *pix = &v4l2Format.fmt.pix;
@@ -951,9 +1083,18 @@ int V4L2VideoDevice::setFormatSingleplane(V4L2DeviceFormat *format)
pix->pixelformat = format->fourcc;
pix->bytesperline = format->planes[0].bpl;
pix->field = V4L2_FIELD_NONE;
- ret = ioctl(VIDIOC_S_FMT, &v4l2Format);
+ if (format->colorSpace) {
+ fromColorSpace(format->colorSpace, *pix);
+
+ if (caps_.isVideoCapture())
+ pix->flags |= V4L2_PIX_FMT_FLAG_SET_CSC;
+ }
+
+ ret = ioctl(set ? VIDIOC_S_FMT : VIDIOC_TRY_FMT, &v4l2Format);
if (ret) {
- LOG(V4L2, Error) << "Unable to set format: " << strerror(-ret);
+ LOG(V4L2, Error)
+ << "Unable to " << (set ? "set" : "try")
+ << " format: " << strerror(-ret);
return ret;
}
@@ -967,22 +1108,26 @@ int V4L2VideoDevice::setFormatSingleplane(V4L2DeviceFormat *format)
format->planesCount = 1;
format->planes[0].bpl = pix->bytesperline;
format->planes[0].size = pix->sizeimage;
+ format->colorSpace = toColorSpace(*pix);
return 0;
}
/**
* \brief Enumerate all pixel formats and frame sizes
+ * \param[in] code Restrict formats to this media bus code.
*
* Enumerate all pixel formats and frame sizes supported by the video device.
+ * If the \a code argument is not zero, only formats compatible with that media
+ * bus code will be enumerated.
*
* \return A list of the supported video device formats
*/
-std::map<V4L2PixelFormat, std::vector<SizeRange>> V4L2VideoDevice::formats()
+V4L2VideoDevice::Formats V4L2VideoDevice::formats(uint32_t code)
{
- std::map<V4L2PixelFormat, std::vector<SizeRange>> formats;
+ Formats formats;
- for (V4L2PixelFormat pixelFormat : enumPixelformats()) {
+ for (V4L2PixelFormat pixelFormat : enumPixelformats(code)) {
std::vector<SizeRange> sizes = enumSizes(pixelFormat);
if (sizes.empty())
return {};
@@ -1000,15 +1145,22 @@ std::map<V4L2PixelFormat, std::vector<SizeRange>> V4L2VideoDevice::formats()
return formats;
}
-std::vector<V4L2PixelFormat> V4L2VideoDevice::enumPixelformats()
+std::vector<V4L2PixelFormat> V4L2VideoDevice::enumPixelformats(uint32_t code)
{
std::vector<V4L2PixelFormat> formats;
int ret;
+ if (code && !caps_.hasMediaController()) {
+ LOG(V4L2, Error)
+ << "Media bus code filtering not supported by the device";
+ return {};
+ }
+
for (unsigned int index = 0; ; index++) {
struct v4l2_fmtdesc pixelformatEnum = {};
pixelformatEnum.index = index;
pixelformatEnum.type = bufferType_;
+ pixelformatEnum.mbus_code = code;
ret = ioctl(VIDIOC_ENUM_FMT, &pixelformatEnum);
if (ret)
@@ -1086,25 +1238,46 @@ std::vector<SizeRange> V4L2VideoDevice::enumSizes(V4L2PixelFormat pixelFormat)
}
/**
- * \brief Set a crop rectangle on the V4L2 video device node
- * \param[inout] rect The rectangle describing the crop target area
+ * \brief Get the selection rectangle for \a target
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[out] rect The selection rectangle to retrieve
+ *
+ * \todo Define a V4L2SelectionTarget enum for the selection target
+ *
* \return 0 on success or a negative error code otherwise
*/
-int V4L2VideoDevice::setCrop(Rectangle *rect)
+int V4L2VideoDevice::getSelection(unsigned int target, Rectangle *rect)
{
- return setSelection(V4L2_SEL_TGT_CROP, rect);
+ struct v4l2_selection sel = {};
+
+ sel.type = bufferType_;
+ sel.target = target;
+ sel.flags = 0;
+
+ int ret = ioctl(VIDIOC_G_SELECTION, &sel);
+ if (ret < 0) {
+ LOG(V4L2, Error) << "Unable to get rectangle " << target
+ << ": " << strerror(-ret);
+ return ret;
+ }
+
+ rect->x = sel.r.left;
+ rect->y = sel.r.top;
+ rect->width = sel.r.width;
+ rect->height = sel.r.height;
+
+ return 0;
}
/**
- * \brief Set a compose rectangle on the V4L2 video device node
- * \param[inout] rect The rectangle describing the compose target area
+ * \brief Set a selection rectangle \a rect for \a target
+ * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags
+ * \param[inout] rect The selection rectangle to be applied
+ *
+ * \todo Define a V4L2SelectionTarget enum for the selection target
+ *
* \return 0 on success or a negative error code otherwise
*/
-int V4L2VideoDevice::setCompose(Rectangle *rect)
-{
- return setSelection(V4L2_SEL_TGT_COMPOSE, rect);
-}
-
int V4L2VideoDevice::setSelection(unsigned int target, Rectangle *rect)
{
struct v4l2_selection sel = {};
@@ -1115,8 +1288,8 @@ int V4L2VideoDevice::setSelection(unsigned int target, Rectangle *rect)
sel.r.left = rect->x;
sel.r.top = rect->y;
- sel.r.width = rect->w;
- sel.r.height = rect->h;
+ sel.r.width = rect->width;
+ sel.r.height = rect->height;
int ret = ioctl(VIDIOC_S_SELECTION, &sel);
if (ret < 0) {
@@ -1127,8 +1300,8 @@ int V4L2VideoDevice::setSelection(unsigned int target, Rectangle *rect)
rect->x = sel.r.left;
rect->y = sel.r.top;
- rect->w = sel.r.width;
- rect->h = sel.r.height;
+ rect->width = sel.r.width;
+ rect->height = sel.r.height;
return 0;
}
@@ -1174,8 +1347,13 @@ int V4L2VideoDevice::requestBuffers(unsigned int count,
* successful return the driver's internal buffer management is initialized in
* MMAP mode, and the video device is ready to accept queueBuffer() calls.
*
- * The number of planes and the plane sizes for the allocation are determined
- * by the currently active format on the device as set by setFormat().
+ * The number of planes and their offsets and sizes are determined by the
+ * currently active format on the device as set by setFormat(). They do not map
+ * to the V4L2 buffer planes, but to colour planes of the pixel format. For
+ * instance, if the active format is formats::NV12, the allocated FrameBuffer
+ * instances will have two planes, for the luma and chroma components,
+ * regardless of whether the device uses V4L2_PIX_FMT_NV12 or
+ * V4L2_PIX_FMT_NV12M.
*
* Buffers allocated with this function shall later be free with
* releaseBuffers(). If buffers have already been allocated with
@@ -1212,8 +1390,13 @@ int V4L2VideoDevice::allocateBuffers(unsigned int count,
* usable with any V4L2 video device in DMABUF mode, or with other dmabuf
* importers.
*
- * The number of planes and the plane sizes for the allocation are determined
- * by the currently active format on the device as set by setFormat().
+ * The number of planes and their offsets and sizes are determined by the
+ * currently active format on the device as set by setFormat(). They do not map
+ * to the V4L2 buffer planes, but to colour planes of the pixel format. For
+ * instance, if the active format is formats::NV12, the allocated FrameBuffer
+ * instances will have two planes, for the luma and chroma components,
+ * regardless of whether the device uses V4L2_PIX_FMT_NV12 or
+ * V4L2_PIX_FMT_NV12M.
*
* Multiple independent sets of buffers can be allocated with multiple calls to
* this function. Device-specific limitations may apply regarding the minimum
@@ -1278,8 +1461,7 @@ std::unique_ptr<FrameBuffer> V4L2VideoDevice::createBuffer(unsigned int index)
buf.index = index;
buf.type = bufferType_;
- buf.memory = V4L2_MEMORY_MMAP;
- buf.length = ARRAY_SIZE(v4l2Planes);
+ buf.length = std::size(v4l2Planes);
buf.m.planes = v4l2Planes;
int ret = ioctl(VIDIOC_QUERYBUF, &buf);
@@ -1300,23 +1482,68 @@ std::unique_ptr<FrameBuffer> V4L2VideoDevice::createBuffer(unsigned int index)
std::vector<FrameBuffer::Plane> planes;
for (unsigned int nplane = 0; nplane < numPlanes; nplane++) {
- FileDescriptor fd = exportDmabufFd(buf.index, nplane);
+ UniqueFD fd = exportDmabufFd(buf.index, nplane);
if (!fd.isValid())
return nullptr;
FrameBuffer::Plane plane;
- plane.fd = std::move(fd);
- plane.length = multiPlanar ?
- buf.m.planes[nplane].length : buf.length;
+ plane.fd = SharedFD(std::move(fd));
+ /*
+ * V4L2 API doesn't provide dmabuf offset information of plane.
+ * Set 0 as a placeholder offset.
+ * \todo Set the right offset once V4L2 API provides a way.
+ */
+ plane.offset = 0;
+ plane.length = multiPlanar ? buf.m.planes[nplane].length : buf.length;
planes.push_back(std::move(plane));
}
- return std::make_unique<FrameBuffer>(std::move(planes));
+ /*
+ * If we have a multi-planar format with a V4L2 single-planar buffer,
+ * split the single V4L2 plane into multiple FrameBuffer planes by
+ * computing the offsets manually.
+ *
+ * The format info is not guaranteed to be valid, as there are no
+ * PixelFormatInfo for metadata formats, so check it first.
+ */
+ if (formatInfo_->isValid() && formatInfo_->numPlanes() != numPlanes) {
+ /*
+ * There's no valid situation where the number of colour planes
+ * differs from the number of V4L2 planes and the V4L2 buffer
+ * has more than one plane.
+ */
+ ASSERT(numPlanes == 1u);
+
+ planes.resize(formatInfo_->numPlanes());
+ const SharedFD &fd = planes[0].fd;
+ size_t offset = 0;
+
+ for (auto [i, plane] : utils::enumerate(planes)) {
+ /*
+ * The stride is reported by V4L2 for the first plane
+ * only. Compute the stride of the other planes by
+ * taking the horizontal subsampling factor into
+ * account, which is equal to the bytesPerGroup ratio of
+ * the planes.
+ */
+ unsigned int stride = format_.planes[0].bpl
+ * formatInfo_->planes[i].bytesPerGroup
+ / formatInfo_->planes[0].bytesPerGroup;
+
+ plane.fd = fd;
+ plane.offset = offset;
+ plane.length = formatInfo_->planeSize(format_.size.height,
+ i, stride);
+ offset += plane.length;
+ }
+ }
+
+ return std::make_unique<FrameBuffer>(planes);
}
-FileDescriptor V4L2VideoDevice::exportDmabufFd(unsigned int index,
- unsigned int plane)
+UniqueFD V4L2VideoDevice::exportDmabufFd(unsigned int index,
+ unsigned int plane)
{
struct v4l2_exportbuffer expbuf = {};
int ret;
@@ -1324,16 +1551,16 @@ FileDescriptor V4L2VideoDevice::exportDmabufFd(unsigned int index,
expbuf.type = bufferType_;
expbuf.index = index;
expbuf.plane = plane;
- expbuf.flags = O_RDWR;
+ expbuf.flags = O_CLOEXEC | O_RDWR;
ret = ioctl(VIDIOC_EXPBUF, &expbuf);
if (ret < 0) {
LOG(V4L2, Error)
<< "Failed to export buffer: " << strerror(-ret);
- return FileDescriptor();
+ return {};
}
- return FileDescriptor(expbuf.fd);
+ return UniqueFD(expbuf.fd);
}
/**
@@ -1386,6 +1613,9 @@ int V4L2VideoDevice::importBuffers(unsigned int count)
*/
int V4L2VideoDevice::releaseBuffers()
{
+ if (!cache_)
+ return 0;
+
LOG(V4L2, Debug) << "Releasing buffers";
delete cache_;
@@ -1406,6 +1636,9 @@ int V4L2VideoDevice::releaseBuffers()
* The best available V4L2 buffer is picked for \a buffer using the V4L2 buffer
* cache.
*
+ * Note that queueBuffer() will fail if the device is in the process of being
+ * stopped from a streaming state through streamOff().
+ *
* \return 0 on success or a negative error code otherwise
*/
int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer)
@@ -1414,6 +1647,21 @@ int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer)
struct v4l2_buffer buf = {};
int ret;
+ if (state_ == State::Stopping) {
+ LOG(V4L2, Error) << "Device is in a stopping state.";
+ return -ESHUTDOWN;
+ }
+
+ /*
+ * Pipeline handlers should not requeue buffers after releasing the
+ * buffers on the device. Any occurence of this error should be fixed
+ * in the pipeline handler directly.
+ */
+ if (!cache_) {
+ LOG(V4L2, Fatal) << "No BufferCache available to queue.";
+ return -ENOENT;
+ }
+
ret = cache_->get(*buffer);
if (ret < 0)
return ret;
@@ -1425,37 +1673,99 @@ int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer)
bool multiPlanar = V4L2_TYPE_IS_MULTIPLANAR(buf.type);
const std::vector<FrameBuffer::Plane> &planes = buffer->planes();
+ const unsigned int numV4l2Planes = format_.planesCount;
+
+ /*
+ * Ensure that the frame buffer has enough planes, and that they're
+ * contiguous if the V4L2 format requires them to be.
+ */
+ if (planes.size() < numV4l2Planes) {
+ LOG(V4L2, Error) << "Frame buffer has too few planes";
+ return -EINVAL;
+ }
+
+ if (planes.size() != numV4l2Planes && !buffer->_d()->isContiguous()) {
+ LOG(V4L2, Error) << "Device format requires contiguous buffer";
+ return -EINVAL;
+ }
if (buf.memory == V4L2_MEMORY_DMABUF) {
if (multiPlanar) {
- for (unsigned int p = 0; p < planes.size(); ++p)
- v4l2Planes[p].m.fd = planes[p].fd.fd();
+ for (unsigned int p = 0; p < numV4l2Planes; ++p)
+ v4l2Planes[p].m.fd = planes[p].fd.get();
} else {
- buf.m.fd = planes[0].fd.fd();
+ buf.m.fd = planes[0].fd.get();
}
}
if (multiPlanar) {
- buf.length = planes.size();
+ buf.length = numV4l2Planes;
buf.m.planes = v4l2Planes;
}
if (V4L2_TYPE_IS_OUTPUT(buf.type)) {
const FrameMetadata &metadata = buffer->metadata();
- if (multiPlanar) {
- unsigned int nplane = 0;
- for (const FrameMetadata::Plane &plane : metadata.planes) {
- v4l2Planes[nplane].bytesused = plane.bytesused;
- v4l2Planes[nplane].length = buffer->planes()[nplane].length;
- nplane++;
+ for (const auto &plane : metadata.planes()) {
+ if (!plane.bytesused)
+ LOG(V4L2, Warning) << "byteused == 0 is deprecated";
+ }
+
+ if (numV4l2Planes != planes.size()) {
+ /*
+ * If we have a multi-planar buffer with a V4L2
+ * single-planar format, coalesce all planes. The length
+ * and number of bytes used may only differ in the last
+ * plane as any other situation can't be represented.
+ */
+ unsigned int bytesused = 0;
+ unsigned int length = 0;
+
+ for (auto [i, plane] : utils::enumerate(planes)) {
+ bytesused += metadata.planes()[i].bytesused;
+ length += plane.length;
+
+ if (i != planes.size() - 1 && bytesused != length) {
+ LOG(V4L2, Error)
+ << "Holes in multi-planar buffer not supported";
+ return -EINVAL;
+ }
+ }
+
+ if (multiPlanar) {
+ v4l2Planes[0].bytesused = bytesused;
+ v4l2Planes[0].length = length;
+ } else {
+ buf.bytesused = bytesused;
+ buf.length = length;
+ }
+ } else if (multiPlanar) {
+ /*
+ * If we use the multi-planar API, fill in the planes.
+ * The number of planes in the frame buffer and in the
+ * V4L2 buffer is guaranteed to be equal at this point.
+ */
+ for (auto [i, plane] : utils::enumerate(planes)) {
+ v4l2Planes[i].bytesused = metadata.planes()[i].bytesused;
+ v4l2Planes[i].length = plane.length;
}
} else {
- if (metadata.planes.size())
- buf.bytesused = metadata.planes[0].bytesused;
+ /*
+ * Single-planar API with a single plane in the buffer
+ * is trivial to handle.
+ */
+ buf.bytesused = metadata.planes()[0].bytesused;
+ buf.length = planes[0].length;
}
- buf.sequence = metadata.sequence;
+ /*
+ * Timestamps are to be supplied if the device is a mem-to-mem
+ * device. The drivers will have V4L2_BUF_FLAG_TIMESTAMP_COPY
+ * set hence these timestamps will be copied from the output
+ * buffers to capture buffers. If the device is not mem-to-mem,
+ * there is no harm in setting the timestamps as they will be
+ * ignored (and over-written).
+ */
buf.timestamp.tv_sec = metadata.timestamp / 1000000000;
buf.timestamp.tv_usec = (metadata.timestamp / 1000) % 1000000;
}
@@ -1470,8 +1780,11 @@ int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer)
return ret;
}
- if (queuedBuffers_.empty())
- fdEvent_->setEnabled(true);
+ if (queuedBuffers_.empty()) {
+ fdBufferNotifier_->setEnabled(true);
+ if (watchdogDuration_)
+ watchdog_.start(std::chrono::duration_cast<std::chrono::milliseconds>(watchdogDuration_));
+ }
queuedBuffers_[buf.index] = buffer;
@@ -1480,7 +1793,6 @@ int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer)
/**
* \brief Slot to handle completed buffer events from the V4L2 video device
- * \param[in] notifier The event notifier
*
* When this slot is called, a Buffer has become available from the device, and
* will be emitted through the bufferReady Signal.
@@ -1488,7 +1800,7 @@ int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer)
* For Capture video devices the FrameBuffer will contain valid data.
* For Output video devices the FrameBuffer can be considered empty.
*/
-void V4L2VideoDevice::bufferAvailable(EventNotifier *notifier)
+void V4L2VideoDevice::bufferAvailable()
{
FrameBuffer *buffer = dequeueBuffer();
if (!buffer)
@@ -1501,8 +1813,8 @@ void V4L2VideoDevice::bufferAvailable(EventNotifier *notifier)
/**
* \brief Dequeue the next available buffer from the video device
*
- * This method dequeues the next available buffer from the device. If no buffer
- * is available to be dequeued it will return nullptr immediately.
+ * This function dequeues the next available buffer from the device. If no
+ * buffer is available to be dequeued it will return nullptr immediately.
*
* \return A pointer to the dequeued buffer on success, or nullptr otherwise
*/
@@ -1531,28 +1843,124 @@ FrameBuffer *V4L2VideoDevice::dequeueBuffer()
LOG(V4L2, Debug) << "Dequeuing buffer " << buf.index;
+ /*
+ * If the video node fails to stream-on successfully (which can occur
+ * when queuing a buffer), a vb2 kernel bug can lead to the buffer which
+ * returns a failure upon queuing being mistakenly kept in the kernel.
+ * This leads to the kernel notifying us that a buffer is available to
+ * dequeue, which we have no awareness of being queued, and thus we will
+ * not find it in the queuedBuffers_ list.
+ *
+ * Whilst this kernel bug has been fixed in mainline, ensure that we
+ * safely ignore buffers which are unexpected to prevent crashes on
+ * older kernels.
+ */
+ auto it = queuedBuffers_.find(buf.index);
+ if (it == queuedBuffers_.end()) {
+ LOG(V4L2, Error)
+ << "Dequeued unexpected buffer index " << buf.index;
+
+ return nullptr;
+ }
+
cache_->put(buf.index);
- auto it = queuedBuffers_.find(buf.index);
FrameBuffer *buffer = it->second;
queuedBuffers_.erase(it);
- if (queuedBuffers_.empty())
- fdEvent_->setEnabled(false);
+ if (queuedBuffers_.empty()) {
+ fdBufferNotifier_->setEnabled(false);
+ watchdog_.stop();
+ } else if (watchdogDuration_) {
+ /*
+ * Restart the watchdog timer if there are buffers still queued
+ * in the device.
+ */
+ watchdog_.start(std::chrono::duration_cast<std::chrono::milliseconds>(watchdogDuration_));
+ }
- buffer->metadata_.status = buf.flags & V4L2_BUF_FLAG_ERROR
- ? FrameMetadata::FrameError
- : FrameMetadata::FrameSuccess;
- buffer->metadata_.sequence = buf.sequence;
- buffer->metadata_.timestamp = buf.timestamp.tv_sec * 1000000000ULL
- + buf.timestamp.tv_usec * 1000ULL;
+ FrameMetadata &metadata = buffer->_d()->metadata();
- buffer->metadata_.planes.clear();
- if (multiPlanar) {
- for (unsigned int nplane = 0; nplane < buf.length; nplane++)
- buffer->metadata_.planes.push_back({ planes[nplane].bytesused });
+ metadata.status = buf.flags & V4L2_BUF_FLAG_ERROR
+ ? FrameMetadata::FrameError
+ : FrameMetadata::FrameSuccess;
+ metadata.sequence = buf.sequence;
+ metadata.timestamp = buf.timestamp.tv_sec * 1000000000ULL
+ + buf.timestamp.tv_usec * 1000ULL;
+
+ if (V4L2_TYPE_IS_OUTPUT(buf.type))
+ return buffer;
+
+ /*
+ * Detect kernel drivers which do not reset the sequence number to zero
+ * on stream start.
+ */
+ if (!firstFrame_.has_value()) {
+ if (buf.sequence)
+ LOG(V4L2, Info)
+ << "Zero sequence expected for first frame (got "
+ << buf.sequence << ")";
+ firstFrame_ = buf.sequence;
+ }
+ metadata.sequence -= firstFrame_.value();
+
+ unsigned int numV4l2Planes = multiPlanar ? buf.length : 1;
+
+ if (numV4l2Planes != buffer->planes().size()) {
+ /*
+ * If we have a multi-planar buffer with a V4L2
+ * single-planar format, split the V4L2 buffer across
+ * the buffer planes. Only the last plane may have less
+ * bytes used than its length.
+ */
+ if (numV4l2Planes != 1) {
+ LOG(V4L2, Error)
+ << "Invalid number of planes (" << numV4l2Planes
+ << " != " << buffer->planes().size() << ")";
+
+ metadata.status = FrameMetadata::FrameError;
+ return buffer;
+ }
+
+ /*
+ * With a V4L2 single-planar format, all the data is stored in
+ * a single memory plane. The number of bytes used is conveyed
+ * through that plane when using the V4L2 multi-planar API, or
+ * set directly in the buffer when using the V4L2 single-planar
+ * API.
+ */
+ unsigned int bytesused = multiPlanar ? planes[0].bytesused
+ : buf.bytesused;
+ unsigned int remaining = bytesused;
+
+ for (auto [i, plane] : utils::enumerate(buffer->planes())) {
+ if (!remaining) {
+ LOG(V4L2, Error)
+ << "Dequeued buffer (" << bytesused
+ << " bytes) too small for plane lengths "
+ << utils::join(buffer->planes(), "/",
+ [](const FrameBuffer::Plane &p) {
+ return p.length;
+ });
+
+ metadata.status = FrameMetadata::FrameError;
+ return buffer;
+ }
+
+ metadata.planes()[i].bytesused =
+ std::min(plane.length, remaining);
+ remaining -= metadata.planes()[i].bytesused;
+ }
+ } else if (multiPlanar) {
+ /*
+ * If we use the multi-planar API, fill in the planes.
+ * The number of planes in the frame buffer and in the
+ * V4L2 buffer is guaranteed to be equal at this point.
+ */
+ for (unsigned int i = 0; i < numV4l2Planes; ++i)
+ metadata.planes()[i].bytesused = planes[i].bytesused;
} else {
- buffer->metadata_.planes.push_back({ buf.bytesused });
+ metadata.planes()[0].bytesused = buf.bytesused;
}
return buffer;
@@ -1571,6 +1979,8 @@ int V4L2VideoDevice::streamOn()
{
int ret;
+ firstFrame_.reset();
+
ret = ioctl(VIDIOC_STREAMON, &bufferType_);
if (ret < 0) {
LOG(V4L2, Error)
@@ -1578,6 +1988,10 @@ int V4L2VideoDevice::streamOn()
return ret;
}
+ state_ = State::Streaming;
+ if (watchdogDuration_ && !queuedBuffers_.empty())
+ watchdog_.start(std::chrono::duration_cast<std::chrono::milliseconds>(watchdogDuration_));
+
return 0;
}
@@ -1589,12 +2003,21 @@ int V4L2VideoDevice::streamOn()
* and the bufferReady signal is emitted for them. The order in which those
* buffers are dequeued is not specified.
*
+ * This will be a no-op if the stream is not started in the first place and
+ * has no queued buffers.
+ *
* \return 0 on success or a negative error code otherwise
*/
int V4L2VideoDevice::streamOff()
{
int ret;
+ if (state_ != State::Streaming && queuedBuffers_.empty())
+ return 0;
+
+ if (watchdogDuration_.count())
+ watchdog_.stop();
+
ret = ioctl(VIDIOC_STREAMOFF, &bufferType_);
if (ret < 0) {
LOG(V4L2, Error)
@@ -1602,189 +2025,121 @@ int V4L2VideoDevice::streamOff()
return ret;
}
+ state_ = State::Stopping;
+
/* Send back all queued buffers. */
for (auto it : queuedBuffers_) {
FrameBuffer *buffer = it.second;
+ FrameMetadata &metadata = buffer->_d()->metadata();
- buffer->metadata_.status = FrameMetadata::FrameCancelled;
+ cache_->put(it.first);
+ metadata.status = FrameMetadata::FrameCancelled;
bufferReady.emit(buffer);
}
+ ASSERT(cache_->isEmpty());
+
queuedBuffers_.clear();
- fdEvent_->setEnabled(false);
+ fdBufferNotifier_->setEnabled(false);
+ state_ = State::Stopped;
return 0;
}
/**
- * \brief Create a new video device instance from \a entity in media device
- * \a media
- * \param[in] media The media device where the entity is registered
- * \param[in] entity The media entity name
+ * \brief Set the dequeue timeout value
+ * \param[in] timeout The timeout value to be used
*
- * Releasing memory of the newly created instance is responsibility of the
- * caller of this function.
+ * Sets a timeout value, given by \a timeout, that will be used by a watchdog
+ * timer to ensure buffer dequeue events are periodically occurring when the
+ * device is streaming. The watchdog timer is only active when the device is
+ * streaming, so it is not necessary to disable it when the device stops
+ * streaming. The timeout value can be safely updated at any time.
*
- * \return A newly created V4L2VideoDevice on success, nullptr otherwise
+ * If the timer expires, the \ref V4L2VideoDevice::dequeueTimeout signal is
+ * emitted. This can typically be used by pipeline handlers to be notified of
+ * stalled devices.
+ *
+ * Set \a timeout to 0 to disable the watchdog timer.
*/
-V4L2VideoDevice *V4L2VideoDevice::fromEntityName(const MediaDevice *media,
- const std::string &entity)
+void V4L2VideoDevice::setDequeueTimeout(utils::Duration timeout)
{
- MediaEntity *mediaEntity = media->getEntityByName(entity);
- if (!mediaEntity)
- return nullptr;
+ watchdogDuration_ = timeout;
- return new V4L2VideoDevice(mediaEntity);
+ watchdog_.stop();
+ if (watchdogDuration_ && state_ == State::Streaming && !queuedBuffers_.empty())
+ watchdog_.start(std::chrono::duration_cast<std::chrono::milliseconds>(timeout));
}
/**
- * \brief Convert a \a v4l2Fourcc to the corresponding PixelFormat
- * \param[in] v4l2Fourcc The V4L2 pixel format (V4L2_PIX_FORMAT_*)
- * \return The PixelFormat corresponding to \a v4l2Fourcc
+ * \var V4L2VideoDevice::dequeueTimeout
+ * \brief A Signal emitted when the dequeue watchdog timer expires
*/
-PixelFormat V4L2VideoDevice::toPixelFormat(V4L2PixelFormat v4l2Fourcc)
+
+/**
+ * \brief Slot to handle an expired dequeue timer
+ *
+ * When this slot is called, the time between successive dequeue events is over
+ * the required timeout. Emit the \ref V4L2VideoDevice::dequeueTimeout signal.
+ */
+void V4L2VideoDevice::watchdogExpired()
{
- switch (v4l2Fourcc) {
- /* RGB formats. */
- case V4L2_PIX_FMT_RGB24:
- return PixelFormat(DRM_FORMAT_BGR888);
- case V4L2_PIX_FMT_BGR24:
- return PixelFormat(DRM_FORMAT_RGB888);
- case V4L2_PIX_FMT_RGBA32:
- return PixelFormat(DRM_FORMAT_ABGR8888);
- case V4L2_PIX_FMT_ABGR32:
- return PixelFormat(DRM_FORMAT_ARGB8888);
- case V4L2_PIX_FMT_ARGB32:
- return PixelFormat(DRM_FORMAT_BGRA8888);
- case V4L2_PIX_FMT_BGRA32:
- return PixelFormat(DRM_FORMAT_RGBA8888);
-
- /* YUV packed formats. */
- case V4L2_PIX_FMT_YUYV:
- return PixelFormat(DRM_FORMAT_YUYV);
- case V4L2_PIX_FMT_YVYU:
- return PixelFormat(DRM_FORMAT_YVYU);
- case V4L2_PIX_FMT_UYVY:
- return PixelFormat(DRM_FORMAT_UYVY);
- case V4L2_PIX_FMT_VYUY:
- return PixelFormat(DRM_FORMAT_VYUY);
-
- /* YUY planar formats. */
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV16M:
- return PixelFormat(DRM_FORMAT_NV16);
- case V4L2_PIX_FMT_NV61:
- case V4L2_PIX_FMT_NV61M:
- return PixelFormat(DRM_FORMAT_NV61);
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV12M:
- return PixelFormat(DRM_FORMAT_NV12);
- case V4L2_PIX_FMT_NV21:
- case V4L2_PIX_FMT_NV21M:
- return PixelFormat(DRM_FORMAT_NV21);
-
- /* Compressed formats. */
- case V4L2_PIX_FMT_MJPEG:
- return PixelFormat(DRM_FORMAT_MJPEG);
-
- /* V4L2 formats not yet supported by DRM. */
- case V4L2_PIX_FMT_GREY:
- default:
- /*
- * \todo We can't use LOG() in a static method of a Loggable
- * class. Until we fix the logger, work around it.
- */
- libcamera::_log(__FILE__, __LINE__, _LOG_CATEGORY(V4L2)(),
- LogError).stream()
- << "Unsupported V4L2 pixel format "
- << v4l2Fourcc.toString();
- return PixelFormat();
- }
+ LOG(V4L2, Warning)
+ << "Dequeue timer of " << watchdogDuration_ << " has expired!";
+
+ dequeueTimeout.emit();
}
/**
- * \brief Convert \a PixelFormat to its corresponding V4L2 FourCC
- * \param[in] pixelFormat The PixelFormat to convert
- *
- * For multiplanar formats, the V4L2 format variant (contiguous or
- * non-contiguous planes) is selected automatically based on the capabilities
- * of the video device. If the video device supports the V4L2 multiplanar API,
- * non-contiguous formats are preferred.
+ * \brief Create a new video device instance from \a entity in media device
+ * \a media
+ * \param[in] media The media device where the entity is registered
+ * \param[in] entity The media entity name
*
- * \return The V4L2_PIX_FMT_* pixel format code corresponding to \a pixelFormat
+ * \return A newly created V4L2VideoDevice on success, nullptr otherwise
*/
-V4L2PixelFormat V4L2VideoDevice::toV4L2PixelFormat(const PixelFormat &pixelFormat)
+std::unique_ptr<V4L2VideoDevice>
+V4L2VideoDevice::fromEntityName(const MediaDevice *media,
+ const std::string &entity)
{
- return toV4L2PixelFormat(pixelFormat, caps_.isMultiplanar());
+ MediaEntity *mediaEntity = media->getEntityByName(entity);
+ if (!mediaEntity)
+ return nullptr;
+
+ return std::make_unique<V4L2VideoDevice>(mediaEntity);
}
/**
- * \brief Convert \a pixelFormat to its corresponding V4L2 FourCC
+ * \brief Convert \a PixelFormat to a V4L2PixelFormat supported by the device
* \param[in] pixelFormat The PixelFormat to convert
- * \param[in] multiplanar V4L2 Multiplanar API support flag
*
- * Multiple V4L2 formats may exist for one PixelFormat when the format uses
- * multiple planes, as V4L2 defines separate 4CCs for contiguous and separate
- * planes formats. Set the \a multiplanar parameter to false to select a format
- * with contiguous planes, or to true to select a format with non-contiguous
- * planes.
+ * Convert \a pixelformat to a V4L2 FourCC that is known to be supported by
+ * the video device.
+ *
+ * A V4L2VideoDevice may support different V4L2 pixel formats that map the same
+ * PixelFormat. This is the case of the contiguous and non-contiguous variants
+ * of multiplanar formats, and with the V4L2 MJPEG and JPEG pixel formats.
+ * Converting a PixelFormat to a V4L2PixelFormat may thus have multiple answers.
*
- * \return The V4L2_PIX_FMT_* pixel format code corresponding to \a pixelFormat
+ * This function converts the \a pixelFormat using the list of V4L2 pixel
+ * formats that the V4L2VideoDevice supports. This guarantees that the returned
+ * V4L2PixelFormat will be valid for the device. If multiple matches are still
+ * possible, contiguous variants are preferred. If the \a pixelFormat is not
+ * supported by the device, the function returns an invalid V4L2PixelFormat.
+ *
+ * \return The V4L2PixelFormat corresponding to \a pixelFormat if supported by
+ * the device, or an invalid V4L2PixelFormat otherwise
*/
-V4L2PixelFormat V4L2VideoDevice::toV4L2PixelFormat(const PixelFormat &pixelFormat,
- bool multiplanar)
+V4L2PixelFormat V4L2VideoDevice::toV4L2PixelFormat(const PixelFormat &pixelFormat) const
{
- switch (pixelFormat) {
- /* RGB formats. */
- case DRM_FORMAT_BGR888:
- return V4L2PixelFormat(V4L2_PIX_FMT_RGB24);
- case DRM_FORMAT_RGB888:
- return V4L2PixelFormat(V4L2_PIX_FMT_BGR24);
- case DRM_FORMAT_ABGR8888:
- return V4L2PixelFormat(V4L2_PIX_FMT_RGBA32);
- case DRM_FORMAT_ARGB8888:
- return V4L2PixelFormat(V4L2_PIX_FMT_ABGR32);
- case DRM_FORMAT_BGRA8888:
- return V4L2PixelFormat(V4L2_PIX_FMT_ARGB32);
- case DRM_FORMAT_RGBA8888:
- return V4L2PixelFormat(V4L2_PIX_FMT_BGRA32);
-
- /* YUV packed formats. */
- case DRM_FORMAT_YUYV:
- return V4L2PixelFormat(V4L2_PIX_FMT_YUYV);
- case DRM_FORMAT_YVYU:
- return V4L2PixelFormat(V4L2_PIX_FMT_YVYU);
- case DRM_FORMAT_UYVY:
- return V4L2PixelFormat(V4L2_PIX_FMT_UYVY);
- case DRM_FORMAT_VYUY:
- return V4L2PixelFormat(V4L2_PIX_FMT_VYUY);
-
- /*
- * YUY planar formats.
- * \todo Add support for non-contiguous memory planes
- * \todo Select the format variant not only based on \a multiplanar but
- * also take into account the formats supported by the device.
- */
- case DRM_FORMAT_NV16:
- return V4L2PixelFormat(V4L2_PIX_FMT_NV16);
- case DRM_FORMAT_NV61:
- return V4L2PixelFormat(V4L2_PIX_FMT_NV61);
- case DRM_FORMAT_NV12:
- return V4L2PixelFormat(V4L2_PIX_FMT_NV12);
- case DRM_FORMAT_NV21:
- return V4L2PixelFormat(V4L2_PIX_FMT_NV21);
+ const std::vector<V4L2PixelFormat> &v4l2PixelFormats =
+ V4L2PixelFormat::fromPixelFormat(pixelFormat);
- /* Compressed formats. */
- case DRM_FORMAT_MJPEG:
- return V4L2PixelFormat(V4L2_PIX_FMT_MJPEG);
+ for (const V4L2PixelFormat &v4l2Format : v4l2PixelFormats) {
+ if (pixelFormats_.count(v4l2Format))
+ return v4l2Format;
}
- /*
- * \todo We can't use LOG() in a static method of a Loggable
- * class. Until we fix the logger, work around it.
- */
- libcamera::_log(__FILE__, __LINE__, _LOG_CATEGORY(V4L2)(), LogError).stream()
- << "Unsupported V4L2 pixel format " << pixelFormat.toString();
return {};
}
@@ -1792,15 +2147,24 @@ V4L2PixelFormat V4L2VideoDevice::toV4L2PixelFormat(const PixelFormat &pixelForma
* \class V4L2M2MDevice
* \brief Memory-to-Memory video device
*
+ * Memory to Memory devices in the kernel using the V4L2 M2M API can
+ * operate with multiple contexts for parallel operations on a single
+ * device. Each instance of a V4L2M2MDevice represents a single context.
+ *
* The V4L2M2MDevice manages two V4L2VideoDevice instances on the same
* deviceNode which operate together using two queues to implement the V4L2
* Memory to Memory API.
*
- * The two devices should be opened by calling open() on the V4L2M2MDevice, and
- * can be closed by calling close on the V4L2M2MDevice.
+ * Users of this class should create a new instance of the V4L2M2MDevice for
+ * each desired execution context and then open it by calling open() on the
+ * V4L2M2MDevice and close it by calling close() on the V4L2M2MDevice.
*
* Calling V4L2VideoDevice::open() and V4L2VideoDevice::close() on the capture
* or output V4L2VideoDevice is not permitted.
+ *
+ * Once the M2M device is open, users can operate on the output and capture
+ * queues represented by the V4L2VideoDevice returned by the output() and
+ * capture() functions.
*/
/**
@@ -1842,21 +2206,18 @@ V4L2M2MDevice::~V4L2M2MDevice()
*/
int V4L2M2MDevice::open()
{
- int fd;
int ret;
/*
* The output and capture V4L2VideoDevice instances use the same file
- * handle for the same device node. The local file handle can be closed
- * as the V4L2VideoDevice::open() retains a handle by duplicating the
- * fd passed in.
+ * handle for the same device node.
*/
- fd = syscall(SYS_openat, AT_FDCWD, deviceNode_.c_str(),
- O_RDWR | O_NONBLOCK);
- if (fd < 0) {
+ SharedFD fd(syscall(SYS_openat, AT_FDCWD, deviceNode_.c_str(),
+ O_RDWR | O_NONBLOCK));
+ if (!fd.isValid()) {
ret = -errno;
- LOG(V4L2, Error)
- << "Failed to open V4L2 M2M device: " << strerror(-ret);
+ LOG(V4L2, Error) << "Failed to open V4L2 M2M device: "
+ << strerror(-ret);
return ret;
}
@@ -1868,13 +2229,10 @@ int V4L2M2MDevice::open()
if (ret)
goto err;
- ::close(fd);
-
return 0;
err:
close();
- ::close(fd);
return ret;
}
diff --git a/src/libcamera/version.cpp.in b/src/libcamera/version.cpp.in
index 5aec08a1..bf5a2c30 100644
--- a/src/libcamera/version.cpp.in
+++ b/src/libcamera/version.cpp.in
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * version.cpp - libcamera version
+ * libcamera version
*
* This file is auto-generated. Do not edit.
*/
diff --git a/src/libcamera/yaml_parser.cpp b/src/libcamera/yaml_parser.cpp
new file mode 100644
index 00000000..a5e42461
--- /dev/null
+++ b/src/libcamera/yaml_parser.cpp
@@ -0,0 +1,784 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * libcamera YAML parsing helper
+ */
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include <charconv>
+#include <errno.h>
+#include <functional>
+#include <limits>
+#include <stdlib.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+
+#include <yaml.h>
+
+/**
+ * \file yaml_parser.h
+ * \brief A YAML parser helper
+ */
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(YamlParser)
+
+namespace {
+
+/* Empty static YamlObject as a safe result for invalid operations */
+static const YamlObject empty;
+
+} /* namespace */
+
+/**
+ * \class YamlObject
+ * \brief A class representing the tree structure of the YAML content
+ *
+ * The YamlObject class represents the tree structure of YAML content. A
+ * YamlObject can be empty, a dictionary or list of YamlObjects, or a value if a
+ * tree leaf.
+ */
+
+YamlObject::YamlObject()
+ : type_(Type::Empty)
+{
+}
+
+YamlObject::~YamlObject() = default;
+
+/**
+ * \fn YamlObject::isValue()
+ * \brief Return whether the YamlObject is a value
+ *
+ * \return True if the YamlObject is a value, false otherwise
+ */
+
+/**
+ * \fn YamlObject::isList()
+ * \brief Return whether the YamlObject is a list
+ *
+ * \return True if the YamlObject is a list, false otherwise
+ */
+
+/**
+ * \fn YamlObject::isDictionary()
+ * \brief Return whether the YamlObject is a dictionary
+ *
+ * \return True if the YamlObject is a dictionary, false otherwise
+ */
+
+/**
+ * \fn YamlObject::isEmpty()
+ * \brief Return whether the YamlObject is an empty
+ *
+ * \return True if the YamlObject is empty, false otherwise
+ */
+
+/**
+ * \fn YamlObject::operator bool()
+ * \brief Return whether the YamlObject is a non-empty
+ *
+ * \return False if the YamlObject is empty, true otherwise
+ */
+
+/**
+ * \fn YamlObject::size()
+ * \brief Retrieve the number of elements in a dictionary or list YamlObject
+ *
+ * This function retrieves the size of the YamlObject, defined as the number of
+ * child elements it contains. Only YamlObject instances of Dictionary or List
+ * types have a size, calling this function on other types of instances is
+ * invalid and results in undefined behaviour.
+ *
+ * \return The size of the YamlObject
+ */
+std::size_t YamlObject::size() const
+{
+ switch (type_) {
+ case Type::Dictionary:
+ case Type::List:
+ return list_.size();
+ default:
+ return 0;
+ }
+}
+
+/**
+ * \fn template<typename T> YamlObject::get<T>() const
+ * \brief Parse the YamlObject as a \a T value
+ *
+ * This function parses the value of the YamlObject as a \a T object, and
+ * returns the value. If parsing fails (usually because the YamlObject doesn't
+ * store a \a T value), std::nullopt is returned.
+ *
+ * \return The YamlObject value, or std::nullopt if parsing failed
+ */
+
+/**
+ * \fn template<typename T, typename U> YamlObject::get<T>(U &&defaultValue) const
+ * \brief Parse the YamlObject as a \a T value
+ * \param[in] defaultValue The default value when failing to parse
+ *
+ * This function parses the value of the YamlObject as a \a T object, and
+ * returns the value. If parsing fails (usually because the YamlObject doesn't
+ * store a \a T value), the \a defaultValue is returned.
+ *
+ * \return The YamlObject value, or \a defaultValue if parsing failed
+ */
+
+#ifndef __DOXYGEN__
+
+template<>
+std::optional<bool>
+YamlObject::Getter<bool>::get(const YamlObject &obj) const
+{
+ if (obj.type_ != Type::Value)
+ return std::nullopt;
+
+ if (obj.value_ == "true")
+ return true;
+ else if (obj.value_ == "false")
+ return false;
+
+ return std::nullopt;
+}
+
+template<typename T>
+struct YamlObject::Getter<T, std::enable_if_t<
+ std::is_same_v<int8_t, T> ||
+ std::is_same_v<uint8_t, T> ||
+ std::is_same_v<int16_t, T> ||
+ std::is_same_v<uint16_t, T> ||
+ std::is_same_v<int32_t, T> ||
+ std::is_same_v<uint32_t, T>>>
+{
+ std::optional<T> get(const YamlObject &obj) const
+ {
+ if (obj.type_ != Type::Value)
+ return std::nullopt;
+
+ const std::string &str = obj.value_;
+ T value;
+
+ auto [ptr, ec] = std::from_chars(str.data(), str.data() + str.size(),
+ value);
+ if (ptr != str.data() + str.size() || ec != std::errc())
+ return std::nullopt;
+
+ return value;
+ }
+};
+
+template struct YamlObject::Getter<int8_t>;
+template struct YamlObject::Getter<uint8_t>;
+template struct YamlObject::Getter<int16_t>;
+template struct YamlObject::Getter<uint16_t>;
+template struct YamlObject::Getter<int32_t>;
+template struct YamlObject::Getter<uint32_t>;
+
+template<>
+std::optional<float>
+YamlObject::Getter<float>::get(const YamlObject &obj) const
+{
+ return obj.get<double>();
+}
+
+template<>
+std::optional<double>
+YamlObject::Getter<double>::get(const YamlObject &obj) const
+{
+ if (obj.type_ != Type::Value)
+ return std::nullopt;
+
+ if (obj.value_.empty())
+ return std::nullopt;
+
+ char *end;
+
+ errno = 0;
+ double value = utils::strtod(obj.value_.c_str(), &end);
+
+ if ('\0' != *end || errno == ERANGE)
+ return std::nullopt;
+
+ return value;
+}
+
+template<>
+std::optional<std::string>
+YamlObject::Getter<std::string>::get(const YamlObject &obj) const
+{
+ if (obj.type_ != Type::Value)
+ return std::nullopt;
+
+ return obj.value_;
+}
+
+template<>
+std::optional<Size>
+YamlObject::Getter<Size>::get(const YamlObject &obj) const
+{
+ if (obj.type_ != Type::List)
+ return std::nullopt;
+
+ if (obj.list_.size() != 2)
+ return std::nullopt;
+
+ auto width = obj.list_[0].value->get<uint32_t>();
+ if (!width)
+ return std::nullopt;
+
+ auto height = obj.list_[1].value->get<uint32_t>();
+ if (!height)
+ return std::nullopt;
+
+ return Size(*width, *height);
+}
+
+#endif /* __DOXYGEN__ */
+
+/**
+ * \fn template<typename T> YamlObject::getList<T>() const
+ * \brief Parse the YamlObject as a list of \a T
+ *
+ * This function parses the value of the YamlObject as a list of \a T objects,
+ * and returns the value as a \a std::vector<T>. If parsing fails, std::nullopt
+ * is returned.
+ *
+ * \return The YamlObject value as a std::vector<T>, or std::nullopt if parsing
+ * failed
+ */
+
+#ifndef __DOXYGEN__
+
+template<typename T,
+ std::enable_if_t<
+ std::is_same_v<bool, T> ||
+ std::is_same_v<float, T> ||
+ std::is_same_v<double, T> ||
+ std::is_same_v<int8_t, T> ||
+ std::is_same_v<uint8_t, T> ||
+ std::is_same_v<int16_t, T> ||
+ std::is_same_v<uint16_t, T> ||
+ std::is_same_v<int32_t, T> ||
+ std::is_same_v<uint32_t, T> ||
+ std::is_same_v<std::string, T> ||
+ std::is_same_v<Size, T>> *>
+std::optional<std::vector<T>> YamlObject::getList() const
+{
+ if (type_ != Type::List)
+ return std::nullopt;
+
+ std::vector<T> values;
+ values.reserve(list_.size());
+
+ for (const YamlObject &entry : asList()) {
+ const auto value = entry.get<T>();
+ if (!value)
+ return std::nullopt;
+ values.emplace_back(*value);
+ }
+
+ return values;
+}
+
+template std::optional<std::vector<bool>> YamlObject::getList<bool>() const;
+template std::optional<std::vector<float>> YamlObject::getList<float>() const;
+template std::optional<std::vector<double>> YamlObject::getList<double>() const;
+template std::optional<std::vector<int8_t>> YamlObject::getList<int8_t>() const;
+template std::optional<std::vector<uint8_t>> YamlObject::getList<uint8_t>() const;
+template std::optional<std::vector<int16_t>> YamlObject::getList<int16_t>() const;
+template std::optional<std::vector<uint16_t>> YamlObject::getList<uint16_t>() const;
+template std::optional<std::vector<int32_t>> YamlObject::getList<int32_t>() const;
+template std::optional<std::vector<uint32_t>> YamlObject::getList<uint32_t>() const;
+template std::optional<std::vector<std::string>> YamlObject::getList<std::string>() const;
+template std::optional<std::vector<Size>> YamlObject::getList<Size>() const;
+
+#endif /* __DOXYGEN__ */
+
+/**
+ * \fn YamlObject::asDict() const
+ * \brief Wrap a dictionary YamlObject in an adapter that exposes iterators
+ *
+ * The YamlObject class doesn't directly implement iterators, as the iterator
+ * type depends on whether the object is a Dictionary or List. This function
+ * wraps a YamlObject of Dictionary type into an adapter that exposes
+ * iterators, as well as begin() and end() functions, allowing usage of
+ * range-based for loops with YamlObject. As YAML mappings are not ordered, the
+ * iteration order is not specified.
+ *
+ * The iterator's value_type is a
+ * <em>std::pair<const std::string &, const \ref YamlObject &></em>.
+ *
+ * If the YamlObject is not of Dictionary type, the returned adapter operates
+ * as an empty container.
+ *
+ * \return An adapter of unspecified type compatible with range-based for loops
+ */
+
+/**
+ * \fn YamlObject::asList() const
+ * \brief Wrap a list YamlObject in an adapter that exposes iterators
+ *
+ * The YamlObject class doesn't directly implement iterators, as the iterator
+ * type depends on whether the object is a Dictionary or List. This function
+ * wraps a YamlObject of List type into an adapter that exposes iterators, as
+ * well as begin() and end() functions, allowing usage of range-based for loops
+ * with YamlObject. As YAML lists are ordered, the iteration order is identical
+ * to the list order in the YAML data.
+ *
+ * The iterator's value_type is a <em>const YamlObject &</em>.
+ *
+ * If the YamlObject is not of List type, the returned adapter operates as an
+ * empty container.
+ *
+ * \return An adapter of unspecified type compatible with range-based for loops
+ */
+
+/**
+ * \fn YamlObject::operator[](std::size_t index) const
+ * \brief Retrieve the element from list YamlObject by index
+ *
+ * This function retrieves an element of the YamlObject. Only YamlObject
+ * instances of List type associate elements with index, calling this function
+ * on other types of instances or with an invalid index results in an empty
+ * object.
+ *
+ * \return The YamlObject as an element of the list
+ */
+const YamlObject &YamlObject::operator[](std::size_t index) const
+{
+ if (type_ != Type::List || index >= size())
+ return empty;
+
+ return *list_[index].value;
+}
+
+/**
+ * \fn YamlObject::contains()
+ * \brief Check if an element of a dictionary exists
+ *
+ * This function check if the YamlObject contains an element. Only YamlObject
+ * instances of Dictionary type associate elements with names, calling this
+ * function on other types of instances is invalid and results in undefined
+ * behaviour.
+ *
+ * \return True if an element exists, false otherwise
+ */
+bool YamlObject::contains(std::string_view key) const
+{
+ return dictionary_.find(key) != dictionary_.end();
+}
+
+/**
+ * \fn YamlObject::operator[](std::string_view key) const
+ * \brief Retrieve a member by name from the dictionary
+ *
+ * This function retrieve a member of a YamlObject by name. Only YamlObject
+ * instances of Dictionary type associate elements with names, calling this
+ * function on other types of instances or with a nonexistent key results in an
+ * empty object.
+ *
+ * \return The YamlObject corresponding to the \a key member
+ */
+const YamlObject &YamlObject::operator[](std::string_view key) const
+{
+ if (type_ != Type::Dictionary)
+ return empty;
+
+ auto iter = dictionary_.find(key);
+ if (iter == dictionary_.end())
+ return empty;
+
+ return *iter->second;
+}
+
+#ifndef __DOXYGEN__
+
+class YamlParserContext
+{
+public:
+ YamlParserContext();
+ ~YamlParserContext();
+
+ int init(File &file);
+ int parseContent(YamlObject &yamlObject);
+
+private:
+ struct EventDeleter {
+ void operator()(yaml_event_t *event) const
+ {
+ yaml_event_delete(event);
+ delete event;
+ }
+ };
+ using EventPtr = std::unique_ptr<yaml_event_t, EventDeleter>;
+
+ static int yamlRead(void *data, unsigned char *buffer, size_t size,
+ size_t *sizeRead);
+
+ EventPtr nextEvent();
+
+ void readValue(std::string &value, EventPtr event);
+ int parseDictionaryOrList(YamlObject::Type type,
+ const std::function<int(EventPtr event)> &parseItem);
+ int parseNextYamlObject(YamlObject &yamlObject, EventPtr event);
+
+ bool parserValid_;
+ yaml_parser_t parser_;
+};
+
+/**
+ * \class YamlParserContext
+ * \brief Class for YamlParser parsing and context data
+ *
+ * The YamlParserContext class stores the internal yaml_parser_t and provides
+ * helper functions to do event-based parsing for YAML files.
+ */
+YamlParserContext::YamlParserContext()
+ : parserValid_(false)
+{
+}
+
+/**
+ * \class YamlParserContext
+ * \brief Destructor of YamlParserContext
+ */
+YamlParserContext::~YamlParserContext()
+{
+ if (parserValid_) {
+ yaml_parser_delete(&parser_);
+ parserValid_ = false;
+ }
+}
+
+/**
+ * \fn YamlParserContext::init()
+ * \brief Initialize a parser with an opened file for parsing
+ * \param[in] fh The YAML file to parse
+ *
+ * Prior to parsing the YAML content, the YamlParserContext must be initialized
+ * with a file to create an internal parser. The file needs to stay valid until
+ * parsing completes.
+ *
+ * \return 0 on success or a negative error code otherwise
+ * \retval -EINVAL The parser has failed to initialize
+ */
+int YamlParserContext::init(File &file)
+{
+ /* yaml_parser_initialize returns 1 when it succeededs */
+ if (!yaml_parser_initialize(&parser_)) {
+ LOG(YamlParser, Error) << "Failed to initialize YAML parser";
+ return -EINVAL;
+ }
+ parserValid_ = true;
+ yaml_parser_set_input(&parser_, &YamlParserContext::yamlRead, &file);
+
+ return 0;
+}
+
+int YamlParserContext::yamlRead(void *data, unsigned char *buffer, size_t size,
+ size_t *sizeRead)
+{
+ File *file = static_cast<File *>(data);
+
+ Span<unsigned char> buf{ buffer, size };
+ ssize_t ret = file->read(buf);
+ if (ret < 0)
+ return 0;
+
+ *sizeRead = ret;
+ return 1;
+}
+
+/**
+ * \fn YamlParserContext::nextEvent()
+ * \brief Get the next event
+ *
+ * Get the next event in the current YAML event stream, and return nullptr when
+ * there is no more event.
+ *
+ * \return The next event on success or nullptr otherwise
+ */
+YamlParserContext::EventPtr YamlParserContext::nextEvent()
+{
+ EventPtr event(new yaml_event_t);
+
+ /* yaml_parser_parse returns 1 when it succeeds */
+ if (!yaml_parser_parse(&parser_, event.get())) {
+ File *file = static_cast<File *>(parser_.read_handler_data);
+
+ LOG(YamlParser, Error) << file->fileName() << ":"
+ << parser_.problem_mark.line << ":"
+ << parser_.problem_mark.column << " "
+ << parser_.problem << " "
+ << parser_.context;
+
+ return nullptr;
+ }
+
+ return event;
+}
+
+/**
+ * \fn YamlParserContext::parseContent()
+ * \brief Parse the content of a YAML document
+ * \param[in] yamlObject The result of YamlObject
+ *
+ * Check YAML start and end events of a YAML document, and parse the root object
+ * of the YAML document into a YamlObject.
+ *
+ * \return 0 on success or a negative error code otherwise
+ * \retval -EINVAL The parser has failed to validate end of a YAML file
+ */
+int YamlParserContext::parseContent(YamlObject &yamlObject)
+{
+ /* Check start of the YAML file. */
+ EventPtr event = nextEvent();
+ if (!event || event->type != YAML_STREAM_START_EVENT)
+ return -EINVAL;
+
+ event = nextEvent();
+ if (!event || event->type != YAML_DOCUMENT_START_EVENT)
+ return -EINVAL;
+
+ /* Parse the root object. */
+ event = nextEvent();
+ if (parseNextYamlObject(yamlObject, std::move(event)))
+ return -EINVAL;
+
+ /* Check end of the YAML file. */
+ event = nextEvent();
+ if (!event || event->type != YAML_DOCUMENT_END_EVENT)
+ return -EINVAL;
+
+ event = nextEvent();
+ if (!event || event->type != YAML_STREAM_END_EVENT)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * \fn YamlParserContext::readValue()
+ * \brief Parse event scalar and fill its content into a string
+ * \param[in] value The string reference to fill value
+ *
+ * A helper function to parse a scalar event as string. The caller needs to
+ * guarantee the event is of scaler type.
+ */
+void YamlParserContext::readValue(std::string &value, EventPtr event)
+{
+ value.assign(reinterpret_cast<char *>(event->data.scalar.value),
+ event->data.scalar.length);
+}
+
+/**
+ * \fn YamlParserContext::parseDictionaryOrList()
+ * \brief A helper function to abstract the common part of parsing dictionary or list
+ *
+ * \param[in] isDictionary True for parsing a dictionary, and false for a list
+ * \param[in] parseItem The callback to handle an item
+ *
+ * A helper function to abstract parsing an item from a dictionary or a list.
+ * The differences of them in a YAML event stream are:
+ *
+ * 1. The start and end event types are different
+ * 2. There is a leading scalar string as key in the items of a dictionary
+ *
+ * The caller should handle the leading key string in its callback parseItem
+ * when it's a dictionary.
+ *
+ * \return 0 on success or a negative error code otherwise
+ * \retval -EINVAL The parser is failed to initialize
+ */
+int YamlParserContext::parseDictionaryOrList(YamlObject::Type type,
+ const std::function<int(EventPtr event)> &parseItem)
+{
+ yaml_event_type_t endEventType = YAML_SEQUENCE_END_EVENT;
+ if (type == YamlObject::Type::Dictionary)
+ endEventType = YAML_MAPPING_END_EVENT;
+
+ /*
+ * Add a safety counter to make sure we don't loop indefinitely in case
+ * the YAML file is malformed.
+ */
+ for (unsigned int sentinel = 2000; sentinel; sentinel--) {
+ auto evt = nextEvent();
+ if (!evt)
+ return -EINVAL;
+
+ if (evt->type == endEventType)
+ return 0;
+
+ int ret = parseItem(std::move(evt));
+ if (ret)
+ return ret;
+ }
+
+ LOG(YamlParser, Error) << "The YAML file contains a List or Dictionary"
+ " whose size exceeds the parser's limit (1000)";
+
+ return -EINVAL;
+}
+
+/**
+ * \fn YamlParserContext::parseNextYamlObject()
+ * \brief Parse next YAML event and read it as a YamlObject
+ * \param[in] yamlObject The result of YamlObject
+ * \param[in] event The leading event of the object
+ *
+ * Parse next YAML object separately as a value, list or dictionary.
+ *
+ * \return 0 on success or a negative error code otherwise
+ * \retval -EINVAL Fail to parse the YAML file.
+ */
+int YamlParserContext::parseNextYamlObject(YamlObject &yamlObject, EventPtr event)
+{
+ if (!event)
+ return -EINVAL;
+
+ switch (event->type) {
+ case YAML_SCALAR_EVENT:
+ yamlObject.type_ = YamlObject::Type::Value;
+ readValue(yamlObject.value_, std::move(event));
+ return 0;
+
+ case YAML_SEQUENCE_START_EVENT: {
+ yamlObject.type_ = YamlObject::Type::List;
+ auto &list = yamlObject.list_;
+ auto handler = [this, &list](EventPtr evt) {
+ list.emplace_back(std::string{}, std::make_unique<YamlObject>());
+ return parseNextYamlObject(*list.back().value, std::move(evt));
+ };
+ return parseDictionaryOrList(YamlObject::Type::List, handler);
+ }
+
+ case YAML_MAPPING_START_EVENT: {
+ yamlObject.type_ = YamlObject::Type::Dictionary;
+ auto &list = yamlObject.list_;
+ auto handler = [this, &list](EventPtr evtKey) {
+ /* Parse key */
+ if (evtKey->type != YAML_SCALAR_EVENT) {
+ LOG(YamlParser, Error) << "Expect key at line: "
+ << evtKey->start_mark.line
+ << " column: "
+ << evtKey->start_mark.column;
+ return -EINVAL;
+ }
+
+ std::string key;
+ readValue(key, std::move(evtKey));
+
+ /* Parse value */
+ EventPtr evtValue = nextEvent();
+ if (!evtValue)
+ return -EINVAL;
+
+ auto &elem = list.emplace_back(std::move(key),
+ std::make_unique<YamlObject>());
+ return parseNextYamlObject(*elem.value, std::move(evtValue));
+ };
+ int ret = parseDictionaryOrList(YamlObject::Type::Dictionary, handler);
+ if (ret)
+ return ret;
+
+ auto &dictionary = yamlObject.dictionary_;
+ for (const auto &elem : list)
+ dictionary.emplace(elem.key, elem.value.get());
+
+ return 0;
+ }
+
+ default:
+ LOG(YamlParser, Error) << "Invalid YAML file";
+ return -EINVAL;
+ }
+}
+
+#endif /* __DOXYGEN__ */
+
+/**
+ * \class YamlParser
+ * \brief A helper class for parsing a YAML file
+ *
+ * The YamlParser class provides an easy interface to parse the contents of a
+ * YAML file into a tree of YamlObject instances.
+ *
+ * Example usage:
+ *
+ * \code{.unparsed}
+ *
+ * name:
+ * "John"
+ * numbers:
+ * - 1
+ * - 2
+ *
+ * \endcode
+ *
+ * The following code illustrates how to parse the above YAML file:
+ *
+ * \code{.cpp}
+ *
+ * std::unique_ptr<YamlObject> root = YamlParser::parse(fh);
+ * if (!root)
+ * return;
+ *
+ * if (!root->isDictionary())
+ * return;
+ *
+ * const YamlObject &name = (*root)["name"];
+ * std::cout << name.get<std::string>("") << std::endl;
+ *
+ * const YamlObject &numbers = (*root)["numbers"];
+ * if (!numbers.isList())
+ * return;
+ *
+ * for (std::size_t i = 0; i < numbers.size(); i++)
+ * std::cout << numbers[i].get<int32_t>(0) << std::endl;
+ *
+ * \endcode
+ *
+ * The YamlParser::parse() function takes an open FILE, parses its contents, and
+ * returns a pointer to a YamlObject corresponding to the root node of the YAML
+ * document.
+ *
+ * The parser preserves the order of items in the YAML file, for both lists and
+ * dictionaries.
+ */
+
+/**
+ * \brief Parse a YAML file as a YamlObject
+ * \param[in] file The YAML file to parse
+ *
+ * The YamlParser::parse() function takes a file, parses its contents, and
+ * returns a pointer to a YamlObject corresponding to the root node of the YAML
+ * document.
+ *
+ * \return Pointer to result YamlObject on success or nullptr otherwise
+ */
+std::unique_ptr<YamlObject> YamlParser::parse(File &file)
+{
+ YamlParserContext context;
+
+ if (context.init(file))
+ return nullptr;
+
+ std::unique_ptr<YamlObject> root(new YamlObject());
+
+ if (context.parseContent(*root)) {
+ LOG(YamlParser, Error)
+ << "Failed to parse YAML content from "
+ << file.fileName();
+ return nullptr;
+ }
+
+ return root;
+}
+
+} /* namespace libcamera */
diff --git a/src/meson.build b/src/meson.build
index d818d8b8..76198e95 100644
--- a/src/meson.build
+++ b/src/meson.build
@@ -1,14 +1,61 @@
-if get_option('android')
- subdir('android')
+# SPDX-License-Identifier: CC0-1.0
+
+# Cache system paths
+libcamera_datadir = get_option('datadir') / 'libcamera'
+libcamera_libdir = get_option('libdir') / 'libcamera'
+libcamera_libexecdir = get_option('libexecdir') / 'libcamera'
+libcamera_sysconfdir = get_option('sysconfdir') / 'libcamera'
+
+config_h.set('LIBCAMERA_DATA_DIR', '"' + get_option('prefix') / libcamera_datadir + '"')
+config_h.set('LIBCAMERA_SYSCONF_DIR', '"' + get_option('prefix') / libcamera_sysconfdir + '"')
+
+summary({
+ 'LIBCAMERA_DATA_DIR' : config_h.get('LIBCAMERA_DATA_DIR'),
+ 'LIBCAMERA_SYSCONF_DIR' : config_h.get('LIBCAMERA_SYSCONF_DIR'),
+ }, section : 'Paths')
+
+# Module Signing
+openssl = find_program('openssl', required : false)
+if openssl.found()
+ ipa_priv_key = custom_target('ipa-priv-key',
+ output : ['ipa-priv-key.pem'],
+ command : [gen_ipa_priv_key, '@OUTPUT@'])
+ config_h.set('HAVE_IPA_PUBKEY', 1)
+ ipa_sign_module = true
+else
+ warning('openssl not found, all IPA modules will be isolated')
+ ipa_sign_module = false
+endif
+
+# libyuv, used by the Android adaptation layer and the virtual pipeline handler.
+# Fallback to a subproject if libyuv isn't found, as it's typically not provided
+# by distributions.
+libyuv_dep = dependency('libyuv', required : false)
+
+if (pipelines.contains('virtual') or get_option('android').allowed()) and \
+ not libyuv_dep.found()
+ cmake = import('cmake')
+
+ libyuv_vars = cmake.subproject_options()
+ libyuv_vars.add_cmake_defines({'CMAKE_POSITION_INDEPENDENT_CODE': 'ON'})
+ libyuv_vars.set_override_option('cpp_std', 'c++17')
+ libyuv_vars.append_compile_args('cpp',
+ '-Wno-sign-compare',
+ '-Wno-unused-variable',
+ '-Wno-unused-parameter')
+ libyuv_vars.append_link_args('-ljpeg')
+ libyuv = cmake.subproject('libyuv', options : libyuv_vars)
+ libyuv_dep = libyuv.dependency('yuv')
endif
+# libcamera must be built first as a dependency to the other components.
subdir('libcamera')
+
+subdir('android')
subdir('ipa')
-subdir('cam')
-subdir('qcam')
-if get_option('v4l2')
- subdir('v4l2')
-endif
+subdir('apps')
subdir('gstreamer')
+subdir('py')
+subdir('v4l2')
diff --git a/src/py/cam/cam.py b/src/py/cam/cam.py
new file mode 100755
index 00000000..ff4b7f66
--- /dev/null
+++ b/src/py/cam/cam.py
@@ -0,0 +1,472 @@
+#!/usr/bin/env python3
+
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+from typing import Any
+import argparse
+import binascii
+import libcamera as libcam
+import libcamera.utils
+import sys
+import traceback
+
+
+class CameraContext:
+ camera: libcam.Camera
+ id: str
+ idx: int
+
+ opt_stream: str
+ opt_strict_formats: bool
+ opt_crc: bool
+ opt_metadata: bool
+ opt_save_frames: bool
+ opt_capture: int
+ opt_orientation: str
+
+ stream_names: dict[libcam.Stream, str]
+ streams: list[libcam.Stream]
+ allocator: libcam.FrameBufferAllocator
+ requests: list[libcam.Request]
+ reqs_queued: int
+ reqs_completed: int
+ last: int = 0
+ fps: float
+
+ def __init__(self, camera, idx):
+ self.camera = camera
+ self.idx = idx
+ self.id = 'cam' + str(idx)
+ self.reqs_queued = 0
+ self.reqs_completed = 0
+
+ def do_cmd_list_props(self):
+ print('Properties for', self.id)
+
+ for cid, val in self.camera.properties.items():
+ print('\t{}: {}'.format(cid, val))
+
+ def do_cmd_list_controls(self):
+ print('Controls for', self.id)
+
+ for cid, info in self.camera.controls.items():
+ print('\t{}: {}'.format(cid, info))
+
+ def do_cmd_info(self):
+ print('Stream info for', self.id)
+
+ roles = [libcam.StreamRole.Viewfinder]
+
+ camconfig = self.camera.generate_configuration(roles)
+ if camconfig is None:
+ raise Exception('Generating config failed')
+
+ for i, stream_config in enumerate(camconfig):
+ print('\t{}: {}'.format(i, stream_config))
+
+ formats = stream_config.formats
+ for fmt in formats.pixel_formats:
+ print('\t * Pixelformat:', fmt, formats.range(fmt))
+
+ for size in formats.sizes(fmt):
+ print('\t -', size)
+
+ def acquire(self):
+ self.camera.acquire()
+
+ def release(self):
+ self.camera.release()
+
+ def __parse_streams(self):
+ streams = []
+
+ for stream_desc in self.opt_stream:
+ stream_opts: dict[str, Any]
+ stream_opts = {'role': libcam.StreamRole.Viewfinder}
+
+ for stream_opt in stream_desc.split(','):
+ if stream_opt == 0:
+ continue
+
+ arr = stream_opt.split('=')
+ if len(arr) != 2:
+ print('Bad stream option', stream_opt)
+ sys.exit(-1)
+
+ key = arr[0]
+ value = arr[1]
+
+ if key in ['width', 'height']:
+ value = int(value)
+ elif key == 'role':
+ rolemap = {
+ 'still': libcam.StreamRole.StillCapture,
+ 'raw': libcam.StreamRole.Raw,
+ 'video': libcam.StreamRole.VideoRecording,
+ 'viewfinder': libcam.StreamRole.Viewfinder,
+ }
+
+ role = rolemap.get(value.lower(), None)
+
+ if role is None:
+ print('Bad stream role', value)
+ sys.exit(-1)
+
+ value = role
+ elif key == 'pixelformat':
+ pass
+ else:
+ print('Bad stream option key', key)
+ sys.exit(-1)
+
+ stream_opts[key] = value
+
+ streams.append(stream_opts)
+
+ return streams
+
+ def configure(self):
+ streams = self.__parse_streams()
+
+ roles = [opts['role'] for opts in streams]
+
+ camconfig = self.camera.generate_configuration(roles)
+ if camconfig is None:
+ raise Exception('Generating config failed')
+
+ for idx, stream_opts in enumerate(streams):
+ stream_config = camconfig.at(idx)
+
+ if 'width' in stream_opts:
+ stream_config.size.width = stream_opts['width']
+
+ if 'height' in stream_opts:
+ stream_config.size.height = stream_opts['height']
+
+ if 'pixelformat' in stream_opts:
+ stream_config.pixel_format = libcam.PixelFormat(stream_opts['pixelformat'])
+
+ if self.opt_orientation is not None:
+ orientation_map = {
+ 'rot0': libcam.Orientation.Rotate0,
+ 'rot180': libcam.Orientation.Rotate180,
+ 'mirror': libcam.Orientation.Rotate0Mirror,
+ 'flip': libcam.Orientation.Rotate180Mirror,
+ }
+
+ orient = orientation_map.get(self.opt_orientation, None)
+ if orient is None:
+ print('Bad orientation: ', self.opt_orientation)
+ sys.exit(-1)
+
+ camconfig.orientation = orient
+
+ stat = camconfig.validate()
+
+ if stat == libcam.CameraConfiguration.Status.Invalid:
+ print('Camera configuration invalid')
+ exit(-1)
+ elif stat == libcam.CameraConfiguration.Status.Adjusted:
+ if self.opt_strict_formats:
+ print('Adjusting camera configuration disallowed by --strict-formats argument')
+ exit(-1)
+
+ print('Camera configuration adjusted')
+
+ self.camera.configure(camconfig)
+
+ self.stream_names = {}
+ self.streams = []
+
+ for idx, stream_config in enumerate(camconfig):
+ stream = stream_config.stream
+ self.streams.append(stream)
+ self.stream_names[stream] = 'stream' + str(idx)
+ print('{}-{}: stream config {}'.format(self.id, self.stream_names[stream], stream.configuration))
+
+ def alloc_buffers(self):
+ allocator = libcam.FrameBufferAllocator(self.camera)
+
+ for stream in self.streams:
+ allocated = allocator.allocate(stream)
+
+ print('{}-{}: Allocated {} buffers'.format(self.id, self.stream_names[stream], allocated))
+
+ self.allocator = allocator
+
+ def create_requests(self):
+ self.requests = []
+
+ # Identify the stream with the least number of buffers
+ num_bufs = min([len(self.allocator.buffers(stream)) for stream in self.streams])
+
+ requests = []
+
+ for buf_num in range(num_bufs):
+ request = self.camera.create_request(self.idx)
+
+ if request is None:
+ print('Can not create request')
+ exit(-1)
+
+ for stream in self.streams:
+ buffers = self.allocator.buffers(stream)
+ buffer = buffers[buf_num]
+
+ request.add_buffer(stream, buffer)
+
+ requests.append(request)
+
+ self.requests = requests
+
+ def start(self):
+ self.camera.start()
+
+ def stop(self):
+ self.camera.stop()
+
+ def queue_requests(self):
+ for request in self.requests:
+ self.camera.queue_request(request)
+ self.reqs_queued += 1
+
+ del self.requests
+
+
+class CaptureState:
+ cm: libcam.CameraManager
+ contexts: list[CameraContext]
+ renderer: Any
+
+ def __init__(self, cm, contexts):
+ self.cm = cm
+ self.contexts = contexts
+
+ # Called from renderer when there is a libcamera event
+ def event_handler(self):
+ try:
+ reqs = self.cm.get_ready_requests()
+
+ for req in reqs:
+ ctx = next(ctx for ctx in self.contexts if ctx.idx == req.cookie)
+ self.__request_handler(ctx, req)
+
+ running = any(ctx.reqs_completed < ctx.opt_capture for ctx in self.contexts)
+ return running
+ except Exception:
+ traceback.print_exc()
+ return False
+
+ def __request_handler(self, ctx, req):
+ if req.status != libcam.Request.Status.Complete:
+ raise Exception('{}: Request failed: {}'.format(ctx.id, req.status))
+
+ buffers = req.buffers
+
+ # Compute the frame rate. The timestamp is arbitrarily retrieved from
+ # the first buffer, as all buffers should have matching timestamps.
+ ts = buffers[next(iter(buffers))].metadata.timestamp
+ last = ctx.last
+ fps = 1000000000.0 / (ts - last) if (last != 0 and (ts - last) != 0) else 0
+ ctx.last = ts
+ ctx.fps = fps
+
+ if ctx.opt_metadata:
+ reqmeta = req.metadata
+ for ctrl, val in reqmeta.items():
+ print(f'\t{ctrl} = {val}')
+
+ for stream, fb in buffers.items():
+ stream_name = ctx.stream_names[stream]
+
+ crcs = []
+ if ctx.opt_crc:
+ with libcamera.utils.MappedFrameBuffer(fb) as mfb:
+ plane_crcs = [binascii.crc32(p) for p in mfb.planes]
+ crcs.append(plane_crcs)
+
+ meta = fb.metadata
+
+ print('{:.6f} ({:.2f} fps) {}-{}: seq {}, bytes {}, CRCs {}'
+ .format(ts / 1000000000, fps,
+ ctx.id, stream_name,
+ meta.sequence,
+ '/'.join([str(p.bytes_used) for p in meta.planes]),
+ crcs))
+
+ if ctx.opt_save_frames:
+ with libcamera.utils.MappedFrameBuffer(fb) as mfb:
+ filename = 'frame-{}-{}-{}.data'.format(ctx.id, stream_name, ctx.reqs_completed)
+ with open(filename, 'wb') as f:
+ for p in mfb.planes:
+ f.write(p)
+
+ self.renderer.request_handler(ctx, req)
+
+ ctx.reqs_completed += 1
+
+ # Called from renderer when it has finished with a request
+ def request_processed(self, ctx, req):
+ if ctx.reqs_queued < ctx.opt_capture:
+ req.reuse()
+ ctx.camera.queue_request(req)
+ ctx.reqs_queued += 1
+
+ def __capture_init(self):
+ for ctx in self.contexts:
+ ctx.acquire()
+
+ for ctx in self.contexts:
+ ctx.configure()
+
+ for ctx in self.contexts:
+ ctx.alloc_buffers()
+
+ for ctx in self.contexts:
+ ctx.create_requests()
+
+ def __capture_start(self):
+ for ctx in self.contexts:
+ ctx.start()
+
+ for ctx in self.contexts:
+ ctx.queue_requests()
+
+ def __capture_deinit(self):
+ for ctx in self.contexts:
+ ctx.stop()
+
+ for ctx in self.contexts:
+ ctx.release()
+
+ def do_cmd_capture(self):
+ self.__capture_init()
+
+ self.renderer.setup()
+
+ self.__capture_start()
+
+ self.renderer.run()
+
+ self.__capture_deinit()
+
+
+class CustomAction(argparse.Action):
+ def __init__(self, option_strings, dest, **kwargs):
+ super().__init__(option_strings, dest, default={}, **kwargs)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ if len(namespace.camera) == 0:
+ print(f'Option {option_string} requires a --camera context')
+ sys.exit(-1)
+
+ if self.type == bool:
+ values = True
+
+ current = namespace.camera[-1]
+
+ data = getattr(namespace, self.dest)
+
+ if self.nargs == '+':
+ if current not in data:
+ data[current] = []
+
+ data[current] += values
+ else:
+ data[current] = values
+
+
+def do_cmd_list(cm):
+ print('Available cameras:')
+
+ for idx, c in enumerate(cm.cameras):
+ print(f'{idx + 1}: {c.id}')
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ # global options
+ parser.add_argument('-l', '--list', action='store_true', help='List all cameras')
+ parser.add_argument('-c', '--camera', type=int, action='extend', nargs=1, default=[], help='Specify which camera to operate on, by index')
+ parser.add_argument('-p', '--list-properties', action='store_true', help='List cameras properties')
+ parser.add_argument('--list-controls', action='store_true', help='List cameras controls')
+ parser.add_argument('-I', '--info', action='store_true', help='Display information about stream(s)')
+ parser.add_argument('-R', '--renderer', default='null', help='Renderer (null, kms, qt, qtgl)')
+
+ # per camera options
+ parser.add_argument('-C', '--capture', nargs='?', type=int, const=1000000, action=CustomAction, help='Capture until interrupted by user or until CAPTURE frames captured')
+ parser.add_argument('--crc', nargs=0, type=bool, action=CustomAction, help='Print CRC32 for captured frames')
+ parser.add_argument('--save-frames', nargs=0, type=bool, action=CustomAction, help='Save captured frames to files')
+ parser.add_argument('--metadata', nargs=0, type=bool, action=CustomAction, help='Print the metadata for completed requests')
+ parser.add_argument('--strict-formats', type=bool, nargs=0, action=CustomAction, help='Do not allow requested stream format(s) to be adjusted')
+ parser.add_argument('-s', '--stream', nargs='+', action=CustomAction)
+ parser.add_argument('-o', '--orientation', help='Desired image orientation (rot0, rot180, mirror, flip)')
+ args = parser.parse_args()
+
+ cm = libcam.CameraManager.singleton()
+
+ if args.list:
+ do_cmd_list(cm)
+
+ contexts = []
+
+ for cam_idx in args.camera:
+ camera = next((c for i, c in enumerate(cm.cameras) if i + 1 == cam_idx), None)
+
+ if camera is None:
+ print('Unable to find camera', cam_idx)
+ return -1
+
+ ctx = CameraContext(camera, cam_idx)
+ ctx.opt_capture = args.capture.get(cam_idx, 0)
+ ctx.opt_crc = args.crc.get(cam_idx, False)
+ ctx.opt_save_frames = args.save_frames.get(cam_idx, False)
+ ctx.opt_metadata = args.metadata.get(cam_idx, False)
+ ctx.opt_strict_formats = args.strict_formats.get(cam_idx, False)
+ ctx.opt_stream = args.stream.get(cam_idx, ['role=viewfinder'])
+ ctx.opt_orientation = args.orientation
+ contexts.append(ctx)
+
+ for ctx in contexts:
+ print('Using camera {} as {}'.format(ctx.camera.id, ctx.id))
+
+ for ctx in contexts:
+ if args.list_properties:
+ ctx.do_cmd_list_props()
+ if args.list_controls:
+ ctx.do_cmd_list_controls()
+ if args.info:
+ ctx.do_cmd_info()
+
+ # Filter out capture contexts which are not marked for capture
+ contexts = [ctx for ctx in contexts if ctx.opt_capture > 0]
+
+ if contexts:
+ state = CaptureState(cm, contexts)
+
+ if args.renderer == 'null':
+ import cam_null
+ renderer = cam_null.NullRenderer(state)
+ elif args.renderer == 'kms':
+ import cam_kms
+ renderer = cam_kms.KMSRenderer(state)
+ elif args.renderer == 'qt':
+ import cam_qt
+ renderer = cam_qt.QtRenderer(state)
+ elif args.renderer == 'qtgl':
+ import cam_qtgl
+ renderer = cam_qtgl.QtRenderer(state)
+ else:
+ print('Bad renderer', args.renderer)
+ return -1
+
+ state.renderer = renderer
+
+ state.do_cmd_capture()
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/src/py/cam/cam_kms.py b/src/py/cam/cam_kms.py
new file mode 100644
index 00000000..38fc382d
--- /dev/null
+++ b/src/py/cam/cam_kms.py
@@ -0,0 +1,184 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+import pykms
+import selectors
+import sys
+
+
+class KMSRenderer:
+ def __init__(self, state):
+ self.state = state
+
+ self.cm = state.cm
+ self.contexts = state.contexts
+ self.running = False
+
+ card = pykms.Card()
+
+ res = pykms.ResourceManager(card)
+ conn = res.reserve_connector()
+ crtc = res.reserve_crtc(conn)
+ mode = conn.get_default_mode()
+ modeb = mode.to_blob(card)
+
+ req = pykms.AtomicReq(card)
+ req.add_connector(conn, crtc)
+ req.add_crtc(crtc, modeb)
+ r = req.commit_sync(allow_modeset=True)
+ assert(r == 0)
+
+ self.card = card
+ self.resman = res
+ self.crtc = crtc
+ self.mode = mode
+
+ self.bufqueue = []
+ self.current = None
+ self.next = None
+ self.cam_2_drm = {}
+
+ # KMS
+
+ def close(self):
+ req = pykms.AtomicReq(self.card)
+ for s in self.streams:
+ req.add_plane(s['plane'], None, None, dst=(0, 0, 0, 0))
+ req.commit()
+
+ def add_plane(self, req, stream, fb):
+ s = next(s for s in self.streams if s['stream'] == stream)
+ idx = s['idx']
+ plane = s['plane']
+
+ if idx % 2 == 0:
+ x = 0
+ else:
+ x = self.mode.hdisplay - fb.width
+
+ if idx // 2 == 0:
+ y = 0
+ else:
+ y = self.mode.vdisplay - fb.height
+
+ req.add_plane(plane, fb, self.crtc, dst=(x, y, fb.width, fb.height))
+
+ def apply_request(self, drmreq):
+
+ buffers = drmreq['camreq'].buffers
+
+ req = pykms.AtomicReq(self.card)
+
+ for stream, fb in buffers.items():
+ drmfb = self.cam_2_drm.get(fb, None)
+ self.add_plane(req, stream, drmfb)
+
+ req.commit()
+
+ def handle_page_flip(self, frame, time):
+ old = self.current
+ self.current = self.next
+
+ if len(self.bufqueue) > 0:
+ self.next = self.bufqueue.pop(0)
+ else:
+ self.next = None
+
+ if self.next:
+ drmreq = self.next
+
+ self.apply_request(drmreq)
+
+ if old:
+ req = old['camreq']
+ ctx = old['camctx']
+ self.state.request_processed(ctx, req)
+
+ def queue(self, drmreq):
+ if not self.next:
+ self.next = drmreq
+ self.apply_request(drmreq)
+ else:
+ self.bufqueue.append(drmreq)
+
+ # libcamera
+
+ def setup(self):
+ self.streams = []
+
+ idx = 0
+ for ctx in self.contexts:
+ for stream in ctx.streams:
+
+ cfg = stream.configuration
+ fmt = cfg.pixel_format
+ fmt = pykms.PixelFormat(fmt.fourcc)
+
+ plane = self.resman.reserve_generic_plane(self.crtc, fmt)
+ assert(plane is not None)
+
+ self.streams.append({
+ 'idx': idx,
+ 'stream': stream,
+ 'plane': plane,
+ 'fmt': fmt,
+ 'size': cfg.size,
+ })
+
+ for fb in ctx.allocator.buffers(stream):
+ w = cfg.size.width
+ h = cfg.size.height
+ fds = []
+ strides = []
+ offsets = []
+ for plane in fb.planes:
+ fds.append(plane.fd)
+ strides.append(cfg.stride)
+ offsets.append(plane.offset)
+
+ drmfb = pykms.DmabufFramebuffer(self.card, w, h, fmt,
+ fds, strides, offsets)
+ self.cam_2_drm[fb] = drmfb
+
+ idx += 1
+
+ def readdrm(self, fileobj):
+ for ev in self.card.read_events():
+ if ev.type == pykms.DrmEventType.FLIP_COMPLETE:
+ self.handle_page_flip(ev.seq, ev.time)
+
+ def readcam(self, fd):
+ self.running = self.state.event_handler()
+
+ def readkey(self, fileobj):
+ sys.stdin.readline()
+ self.running = False
+
+ def run(self):
+ print('Capturing...')
+
+ self.running = True
+
+ sel = selectors.DefaultSelector()
+ sel.register(self.card.fd, selectors.EVENT_READ, self.readdrm)
+ sel.register(self.cm.event_fd, selectors.EVENT_READ, self.readcam)
+ sel.register(sys.stdin, selectors.EVENT_READ, self.readkey)
+
+ print('Press enter to exit')
+
+ while self.running:
+ events = sel.select()
+ for key, mask in events:
+ callback = key.data
+ callback(key.fileobj)
+
+ print('Exiting...')
+
+ def request_handler(self, ctx, req):
+
+ drmreq = {
+ 'camctx': ctx,
+ 'camreq': req,
+ }
+
+ self.queue(drmreq)
diff --git a/src/py/cam/cam_null.py b/src/py/cam/cam_null.py
new file mode 100644
index 00000000..40dbd266
--- /dev/null
+++ b/src/py/cam/cam_null.py
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+import selectors
+import sys
+
+
+class NullRenderer:
+ def __init__(self, state):
+ self.state = state
+
+ self.cm = state.cm
+ self.contexts = state.contexts
+
+ self.running = False
+
+ def setup(self):
+ pass
+
+ def run(self):
+ print('Capturing...')
+
+ self.running = True
+
+ sel = selectors.DefaultSelector()
+ sel.register(self.cm.event_fd, selectors.EVENT_READ, self.readcam)
+ sel.register(sys.stdin, selectors.EVENT_READ, self.readkey)
+
+ print('Press enter to exit')
+
+ while self.running:
+ events = sel.select()
+ for key, mask in events:
+ callback = key.data
+ callback(key.fileobj)
+
+ print('Exiting...')
+
+ def readcam(self, fd):
+ self.running = self.state.event_handler()
+
+ def readkey(self, fileobj):
+ sys.stdin.readline()
+ self.running = False
+
+ def request_handler(self, ctx, req):
+ self.state.request_processed(ctx, req)
diff --git a/src/py/cam/cam_qt.py b/src/py/cam/cam_qt.py
new file mode 100644
index 00000000..22d8c4da
--- /dev/null
+++ b/src/py/cam/cam_qt.py
@@ -0,0 +1,182 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+from helpers import mfb_to_rgb
+from PyQt6 import QtCore, QtGui, QtWidgets
+import libcamera as libcam
+import libcamera.utils
+import sys
+
+
+# Loading MJPEG to a QPixmap produces corrupt JPEG data warnings. Ignore these.
+def qt_message_handler(msg_type, msg_log_context, msg_string):
+ if msg_string.startswith("Corrupt JPEG data"):
+ return
+
+ # For some reason qInstallMessageHandler returns None, so we won't
+ # call the old handler
+ if old_msg_handler is not None:
+ old_msg_handler(msg_type, msg_log_context, msg_string)
+ else:
+ print(msg_string)
+
+
+old_msg_handler = QtCore.qInstallMessageHandler(qt_message_handler)
+
+
+def rgb_to_pix(rgb):
+ w = rgb.shape[1]
+ h = rgb.shape[0]
+ qim = QtGui.QImage(rgb, w, h, QtGui.QImage.Format.Format_RGB888)
+ pix = QtGui.QPixmap.fromImage(qim)
+ return pix
+
+
+class QtRenderer:
+ def __init__(self, state):
+ self.state = state
+
+ self.cm = state.cm
+ self.contexts = state.contexts
+
+ def setup(self):
+ self.app = QtWidgets.QApplication([])
+
+ windows = []
+
+ for ctx in self.contexts:
+ for stream in ctx.streams:
+ window = MainWindow(ctx, stream)
+ window.show()
+ windows.append(window)
+
+ self.windows = windows
+
+ buf_mmap_map = {}
+
+ for ctx in self.contexts:
+ for stream in ctx.streams:
+ for buf in ctx.allocator.buffers(stream):
+ mfb = libcamera.utils.MappedFrameBuffer(buf).mmap()
+ buf_mmap_map[buf] = mfb
+
+ self.buf_mmap_map = buf_mmap_map
+
+ def run(self):
+ camnotif = QtCore.QSocketNotifier(self.cm.event_fd, QtCore.QSocketNotifier.Type.Read)
+ camnotif.activated.connect(lambda _: self.readcam())
+
+ keynotif = QtCore.QSocketNotifier(sys.stdin.fileno(), QtCore.QSocketNotifier.Type.Read)
+ keynotif.activated.connect(lambda _: self.readkey())
+
+ print('Capturing...')
+
+ self.app.exec()
+
+ print('Exiting...')
+
+ def readcam(self):
+ running = self.state.event_handler()
+
+ if not running:
+ self.app.quit()
+
+ def readkey(self):
+ sys.stdin.readline()
+ self.app.quit()
+
+ def request_handler(self, ctx, req):
+ buffers = req.buffers
+
+ for stream, fb in buffers.items():
+ wnd = next(wnd for wnd in self.windows if wnd.stream == stream)
+
+ mfb = self.buf_mmap_map[fb]
+
+ wnd.handle_request(stream, mfb)
+
+ self.state.request_processed(ctx, req)
+
+ def cleanup(self):
+ for w in self.windows:
+ w.close()
+
+
+class MainWindow(QtWidgets.QWidget):
+ def __init__(self, ctx, stream):
+ super().__init__()
+
+ self.ctx = ctx
+ self.stream = stream
+
+ self.label = QtWidgets.QLabel()
+
+ windowLayout = QtWidgets.QHBoxLayout()
+ self.setLayout(windowLayout)
+
+ windowLayout.addWidget(self.label)
+
+ controlsLayout = QtWidgets.QVBoxLayout()
+ windowLayout.addLayout(controlsLayout)
+
+ windowLayout.addStretch()
+
+ group = QtWidgets.QGroupBox('Info')
+ groupLayout = QtWidgets.QVBoxLayout()
+ group.setLayout(groupLayout)
+ controlsLayout.addWidget(group)
+
+ lab = QtWidgets.QLabel(ctx.id)
+ groupLayout.addWidget(lab)
+
+ self.frameLabel = QtWidgets.QLabel()
+ groupLayout.addWidget(self.frameLabel)
+
+ group = QtWidgets.QGroupBox('Properties')
+ groupLayout = QtWidgets.QVBoxLayout()
+ group.setLayout(groupLayout)
+ controlsLayout.addWidget(group)
+
+ camera = ctx.camera
+
+ for cid, cv in camera.properties.items():
+ lab = QtWidgets.QLabel()
+ lab.setText('{} = {}'.format(cid, cv))
+ groupLayout.addWidget(lab)
+
+ group = QtWidgets.QGroupBox('Controls')
+ groupLayout = QtWidgets.QVBoxLayout()
+ group.setLayout(groupLayout)
+ controlsLayout.addWidget(group)
+
+ for cid, cinfo in camera.controls.items():
+ lab = QtWidgets.QLabel()
+ lab.setText('{} = {}/{}/{}'
+ .format(cid, cinfo.min, cinfo.max, cinfo.default))
+ groupLayout.addWidget(lab)
+
+ controlsLayout.addStretch()
+
+ def buf_to_qpixmap(self, stream, mfb):
+ cfg = stream.configuration
+
+ if cfg.pixel_format == libcam.formats.MJPEG:
+ pix = QtGui.QPixmap(cfg.size.width, cfg.size.height)
+ pix.loadFromData(mfb.planes[0])
+ else:
+ rgb = mfb_to_rgb(mfb, cfg)
+ if rgb is None:
+ raise Exception('Format not supported: ' + cfg.pixel_format)
+
+ pix = rgb_to_pix(rgb)
+
+ return pix
+
+ def handle_request(self, stream, mfb):
+ ctx = self.ctx
+
+ pix = self.buf_to_qpixmap(stream, mfb)
+ self.label.setPixmap(pix)
+
+ self.frameLabel.setText('Queued: {}\nDone: {}\nFps: {:.2f}'
+ .format(ctx.reqs_queued, ctx.reqs_completed, ctx.fps))
diff --git a/src/py/cam/cam_qtgl.py b/src/py/cam/cam_qtgl.py
new file mode 100644
index 00000000..35b4b06b
--- /dev/null
+++ b/src/py/cam/cam_qtgl.py
@@ -0,0 +1,363 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+from PyQt6 import QtCore, QtWidgets
+from PyQt6.QtCore import Qt
+
+import math
+import os
+import sys
+
+os.environ['PYOPENGL_PLATFORM'] = 'egl'
+
+from OpenGL.EGL.EXT.image_dma_buf_import import *
+from OpenGL.EGL.KHR.image import *
+from OpenGL.EGL.VERSION.EGL_1_0 import *
+from OpenGL.EGL.VERSION.EGL_1_2 import *
+from OpenGL.EGL.VERSION.EGL_1_3 import *
+
+from OpenGL.GLES2.OES.EGL_image import *
+from OpenGL.GLES2.OES.EGL_image_external import *
+from OpenGL.GLES2.VERSION.GLES2_2_0 import *
+from OpenGL.GLES3.VERSION.GLES3_3_0 import *
+
+from OpenGL.GL import shaders
+
+from gl_helpers import *
+
+
+class EglState:
+ def __init__(self):
+ self.create_display()
+ self.choose_config()
+ self.create_context()
+ self.check_extensions()
+
+ def create_display(self):
+ xdpy = getEGLNativeDisplay()
+ dpy = eglGetDisplay(xdpy)
+ self.display = dpy
+
+ def choose_config(self):
+ dpy = self.display
+
+ major, minor = EGLint(), EGLint()
+
+ b = eglInitialize(dpy, major, minor)
+ assert(b)
+
+ print('EGL {} {}'.format(
+ eglQueryString(dpy, EGL_VENDOR).decode(),
+ eglQueryString(dpy, EGL_VERSION).decode()))
+
+ check_egl_extensions(dpy, ['EGL_EXT_image_dma_buf_import'])
+
+ b = eglBindAPI(EGL_OPENGL_ES_API)
+ assert(b)
+
+ def print_config(dpy, cfg):
+
+ def getconf(a):
+ value = ctypes.c_long()
+ eglGetConfigAttrib(dpy, cfg, a, value)
+ return value.value
+
+ print('EGL Config {}: color buf {}/{}/{}/{} = {}, depth {}, stencil {}, native visualid {}, native visualtype {}'.format(
+ getconf(EGL_CONFIG_ID),
+ getconf(EGL_ALPHA_SIZE),
+ getconf(EGL_RED_SIZE),
+ getconf(EGL_GREEN_SIZE),
+ getconf(EGL_BLUE_SIZE),
+ getconf(EGL_BUFFER_SIZE),
+ getconf(EGL_DEPTH_SIZE),
+ getconf(EGL_STENCIL_SIZE),
+ getconf(EGL_NATIVE_VISUAL_ID),
+ getconf(EGL_NATIVE_VISUAL_TYPE)))
+
+ if False:
+ num_configs = ctypes.c_long()
+ eglGetConfigs(dpy, None, 0, num_configs)
+ print('{} configs'.format(num_configs.value))
+
+ configs = (EGLConfig * num_configs.value)()
+ eglGetConfigs(dpy, configs, num_configs.value, num_configs)
+ for config_id in configs:
+ print_config(dpy, config_id)
+
+ config_attribs = [
+ EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
+ EGL_RED_SIZE, 8,
+ EGL_GREEN_SIZE, 8,
+ EGL_BLUE_SIZE, 8,
+ EGL_ALPHA_SIZE, 0,
+ EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
+ EGL_NONE,
+ ]
+
+ n = EGLint()
+ configs = (EGLConfig * 1)()
+ b = eglChooseConfig(dpy, config_attribs, configs, 1, n)
+ assert(b and n.value == 1)
+ config = configs[0]
+
+ print('Chosen Config:')
+ print_config(dpy, config)
+
+ self.config = config
+
+ def create_context(self):
+ dpy = self.display
+
+ context_attribs = [
+ EGL_CONTEXT_CLIENT_VERSION, 2,
+ EGL_NONE,
+ ]
+
+ context = eglCreateContext(dpy, self.config, EGL_NO_CONTEXT, context_attribs)
+ assert(context)
+
+ b = eglMakeCurrent(dpy, EGL_NO_SURFACE, EGL_NO_SURFACE, context)
+ assert(b)
+
+ self.context = context
+
+ def check_extensions(self):
+ check_gl_extensions(['GL_OES_EGL_image'])
+
+ assert(eglCreateImageKHR)
+ assert(eglDestroyImageKHR)
+ assert(glEGLImageTargetTexture2DOES)
+
+
+class QtRenderer:
+ def __init__(self, state):
+ self.state = state
+
+ def setup(self):
+ self.app = QtWidgets.QApplication([])
+
+ window = MainWindow(self.state)
+ window.show()
+
+ self.window = window
+
+ def run(self):
+ camnotif = QtCore.QSocketNotifier(self.state.cm.event_fd, QtCore.QSocketNotifier.Type.Read)
+ camnotif.activated.connect(lambda _: self.readcam())
+
+ keynotif = QtCore.QSocketNotifier(sys.stdin.fileno(), QtCore.QSocketNotifier.Type.Read)
+ keynotif.activated.connect(lambda _: self.readkey())
+
+ print('Capturing...')
+
+ self.app.exec()
+
+ print('Exiting...')
+
+ def readcam(self):
+ running = self.state.event_handler()
+
+ if not running:
+ self.app.quit()
+
+ def readkey(self):
+ sys.stdin.readline()
+ self.app.quit()
+
+ def request_handler(self, ctx, req):
+ self.window.handle_request(ctx, req)
+
+ def cleanup(self):
+ self.window.close()
+
+
+class MainWindow(QtWidgets.QWidget):
+ def __init__(self, state):
+ super().__init__()
+
+ self.setAttribute(Qt.WidgetAttribute.WA_PaintOnScreen)
+ self.setAttribute(Qt.WidgetAttribute.WA_NativeWindow)
+
+ self.state = state
+
+ self.textures = {}
+ self.reqqueue = {}
+ self.current = {}
+
+ for ctx in self.state.contexts:
+
+ self.reqqueue[ctx.idx] = []
+ self.current[ctx.idx] = []
+
+ for stream in ctx.streams:
+ self.textures[stream] = None
+
+ num_tiles = len(self.textures)
+ self.num_columns = math.ceil(math.sqrt(num_tiles))
+ self.num_rows = math.ceil(num_tiles / self.num_columns)
+
+ self.egl = EglState()
+
+ self.surface = None
+
+ def paintEngine(self):
+ return None
+
+ def create_surface(self):
+ native_surface = c_void_p(self.winId().__int__())
+ surface = eglCreateWindowSurface(self.egl.display, self.egl.config,
+ native_surface, None)
+
+ b = eglMakeCurrent(self.egl.display, self.surface, self.surface, self.egl.context)
+ assert(b)
+
+ self.surface = surface
+
+ def init_gl(self):
+ self.create_surface()
+
+ vertShaderSrc = '''
+ attribute vec2 aPosition;
+ varying vec2 texcoord;
+
+ void main()
+ {
+ gl_Position = vec4(aPosition * 2.0 - 1.0, 0.0, 1.0);
+ texcoord.x = aPosition.x;
+ texcoord.y = 1.0 - aPosition.y;
+ }
+ '''
+ fragShaderSrc = '''
+ #extension GL_OES_EGL_image_external : enable
+ precision mediump float;
+ varying vec2 texcoord;
+ uniform samplerExternalOES texture;
+
+ void main()
+ {
+ gl_FragColor = texture2D(texture, texcoord);
+ }
+ '''
+
+ program = shaders.compileProgram(
+ shaders.compileShader(vertShaderSrc, GL_VERTEX_SHADER),
+ shaders.compileShader(fragShaderSrc, GL_FRAGMENT_SHADER)
+ )
+
+ glUseProgram(program)
+
+ glClearColor(0.5, 0.8, 0.7, 1.0)
+
+ vertPositions = [
+ 0.0, 0.0,
+ 1.0, 0.0,
+ 1.0, 1.0,
+ 0.0, 1.0
+ ]
+
+ inputAttrib = glGetAttribLocation(program, 'aPosition')
+ glVertexAttribPointer(inputAttrib, 2, GL_FLOAT, GL_FALSE, 0, vertPositions)
+ glEnableVertexAttribArray(inputAttrib)
+
+ def create_texture(self, stream, fb):
+ cfg = stream.configuration
+ fmt = cfg.pixel_format.fourcc
+ w = cfg.size.width
+ h = cfg.size.height
+
+ attribs = [
+ EGL_WIDTH, w,
+ EGL_HEIGHT, h,
+ EGL_LINUX_DRM_FOURCC_EXT, fmt,
+ EGL_DMA_BUF_PLANE0_FD_EXT, fb.planes[0].fd,
+ EGL_DMA_BUF_PLANE0_OFFSET_EXT, 0,
+ EGL_DMA_BUF_PLANE0_PITCH_EXT, cfg.stride,
+ EGL_NONE,
+ ]
+
+ image = eglCreateImageKHR(self.egl.display,
+ EGL_NO_CONTEXT,
+ EGL_LINUX_DMA_BUF_EXT,
+ None,
+ attribs)
+ assert(image)
+
+ textures = glGenTextures(1)
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, textures)
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES, image)
+
+ return textures
+
+ def resizeEvent(self, event):
+ size = event.size()
+
+ print('Resize', size)
+
+ super().resizeEvent(event)
+
+ if self.surface is None:
+ return
+
+ glViewport(0, 0, size.width() // 2, size.height())
+
+ def paintEvent(self, event):
+ if self.surface is None:
+ self.init_gl()
+
+ for ctx_idx, queue in self.reqqueue.items():
+ if len(queue) == 0:
+ continue
+
+ ctx = next(ctx for ctx in self.state.contexts if ctx.idx == ctx_idx)
+
+ if self.current[ctx_idx]:
+ old = self.current[ctx_idx]
+ self.current[ctx_idx] = None
+ self.state.request_processed(ctx, old)
+
+ next_req = queue.pop(0)
+ self.current[ctx_idx] = next_req
+
+ stream, fb = next(iter(next_req.buffers.items()))
+
+ self.textures[stream] = self.create_texture(stream, fb)
+
+ self.paint_gl()
+
+ def paint_gl(self):
+ b = eglMakeCurrent(self.egl.display, self.surface, self.surface, self.egl.context)
+ assert(b)
+
+ glClear(GL_COLOR_BUFFER_BIT)
+
+ size = self.size()
+
+ for idx, ctx in enumerate(self.state.contexts):
+ for stream in ctx.streams:
+ if self.textures[stream] is None:
+ continue
+
+ w = size.width() // self.num_columns
+ h = size.height() // self.num_rows
+
+ x = idx % self.num_columns
+ y = idx // self.num_columns
+
+ x *= w
+ y *= h
+
+ glViewport(x, y, w, h)
+
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, self.textures[stream])
+ glDrawArrays(GL_TRIANGLE_FAN, 0, 4)
+
+ b = eglSwapBuffers(self.egl.display, self.surface)
+ assert(b)
+
+ def handle_request(self, ctx, req):
+ self.reqqueue[ctx.idx].append(req)
+ self.update()
diff --git a/src/py/cam/gl_helpers.py b/src/py/cam/gl_helpers.py
new file mode 100644
index 00000000..53b3e9df
--- /dev/null
+++ b/src/py/cam/gl_helpers.py
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+from OpenGL.EGL.VERSION.EGL_1_0 import EGLNativeDisplayType, eglGetProcAddress, eglQueryString, EGL_EXTENSIONS
+
+from OpenGL.raw.GLES2 import _types as _cs
+from OpenGL.GLES2.VERSION.GLES2_2_0 import *
+from OpenGL.GLES3.VERSION.GLES3_3_0 import *
+from OpenGL import GL as gl
+
+from ctypes import c_int, c_char_p, c_void_p, cdll, POINTER, util, \
+ pointer, CFUNCTYPE, c_bool
+
+
+def getEGLNativeDisplay():
+ _x11lib = cdll.LoadLibrary(util.find_library('X11'))
+ XOpenDisplay = _x11lib.XOpenDisplay
+ XOpenDisplay.argtypes = [c_char_p]
+ XOpenDisplay.restype = POINTER(EGLNativeDisplayType)
+
+ return XOpenDisplay(None)
+
+
+# Hack. PyOpenGL doesn't seem to manage to find glEGLImageTargetTexture2DOES.
+def getglEGLImageTargetTexture2DOES():
+ funcptr = eglGetProcAddress('glEGLImageTargetTexture2DOES')
+ prototype = CFUNCTYPE(None, _cs.GLenum, _cs.GLeglImageOES)
+ return prototype(funcptr)
+
+
+glEGLImageTargetTexture2DOES = getglEGLImageTargetTexture2DOES()
+
+
+def get_gl_extensions():
+ n = GLint()
+ glGetIntegerv(GL_NUM_EXTENSIONS, n)
+ gl_extensions = []
+ for i in range(n.value):
+ gl_extensions.append(gl.glGetStringi(GL_EXTENSIONS, i).decode())
+ return gl_extensions
+
+
+def check_gl_extensions(required_extensions):
+ extensions = get_gl_extensions()
+
+ if False:
+ print('GL EXTENSIONS: ', ' '.join(extensions))
+
+ for ext in required_extensions:
+ if ext not in extensions:
+ raise Exception(ext + ' missing')
+
+
+def get_egl_extensions(egl_display):
+ return eglQueryString(egl_display, EGL_EXTENSIONS).decode().split(' ')
+
+
+def check_egl_extensions(egl_display, required_extensions):
+ extensions = get_egl_extensions(egl_display)
+
+ if False:
+ print('EGL EXTENSIONS: ', ' '.join(extensions))
+
+ for ext in required_extensions:
+ if ext not in extensions:
+ raise Exception(ext + ' missing')
diff --git a/src/py/cam/helpers.py b/src/py/cam/helpers.py
new file mode 100644
index 00000000..2d906667
--- /dev/null
+++ b/src/py/cam/helpers.py
@@ -0,0 +1,158 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+#
+# Debayering code from PiCamera documentation
+
+from numpy.lib.stride_tricks import as_strided
+import libcamera as libcam
+import libcamera.utils
+import numpy as np
+
+
+def demosaic(data, r0, g0, g1, b0):
+ # Separate the components from the Bayer data to RGB planes
+
+ rgb = np.zeros(data.shape + (3,), dtype=data.dtype)
+ rgb[r0[1]::2, r0[0]::2, 0] = data[r0[1]::2, r0[0]::2] # Red
+ rgb[g0[1]::2, g0[0]::2, 1] = data[g0[1]::2, g0[0]::2] # Green
+ rgb[g1[1]::2, g1[0]::2, 1] = data[g1[1]::2, g1[0]::2] # Green
+ rgb[b0[1]::2, b0[0]::2, 2] = data[b0[1]::2, b0[0]::2] # Blue
+
+ # Below we present a fairly naive de-mosaic method that simply
+ # calculates the weighted average of a pixel based on the pixels
+ # surrounding it. The weighting is provided by a byte representation of
+ # the Bayer filter which we construct first:
+
+ bayer = np.zeros(rgb.shape, dtype=np.uint8)
+ bayer[r0[1]::2, r0[0]::2, 0] = 1 # Red
+ bayer[g0[1]::2, g0[0]::2, 1] = 1 # Green
+ bayer[g1[1]::2, g1[0]::2, 1] = 1 # Green
+ bayer[b0[1]::2, b0[0]::2, 2] = 1 # Blue
+
+ # Allocate an array to hold our output with the same shape as the input
+ # data. After this we define the size of window that will be used to
+ # calculate each weighted average (3x3). Then we pad out the rgb and
+ # bayer arrays, adding blank pixels at their edges to compensate for the
+ # size of the window when calculating averages for edge pixels.
+
+ output = np.empty(rgb.shape, dtype=rgb.dtype)
+ window = (3, 3)
+ borders = (window[0] - 1, window[1] - 1)
+ border = (borders[0] // 2, borders[1] // 2)
+
+ rgb = np.pad(rgb, [
+ (border[0], border[0]),
+ (border[1], border[1]),
+ (0, 0),
+ ], 'constant')
+ bayer = np.pad(bayer, [
+ (border[0], border[0]),
+ (border[1], border[1]),
+ (0, 0),
+ ], 'constant')
+
+ # For each plane in the RGB data, we use a nifty numpy trick
+ # (as_strided) to construct a view over the plane of 3x3 matrices. We do
+ # the same for the bayer array, then use Einstein summation on each
+ # (np.sum is simpler, but copies the data so it's slower), and divide
+ # the results to get our weighted average:
+
+ for plane in range(3):
+ p = rgb[..., plane]
+ b = bayer[..., plane]
+ pview = as_strided(p, shape=(
+ p.shape[0] - borders[0],
+ p.shape[1] - borders[1]) + window, strides=p.strides * 2)
+ bview = as_strided(b, shape=(
+ b.shape[0] - borders[0],
+ b.shape[1] - borders[1]) + window, strides=b.strides * 2)
+ psum = np.einsum('ijkl->ij', pview)
+ bsum = np.einsum('ijkl->ij', bview)
+ output[..., plane] = psum // bsum
+
+ return output
+
+
+def to_rgb(fmt, size, data):
+ w = size.width
+ h = size.height
+
+ if fmt == libcam.formats.YUYV:
+ # YUV422
+ yuyv = data.reshape((h, w // 2 * 4))
+
+ # YUV444
+ yuv = np.empty((h, w, 3), dtype=np.uint8)
+ yuv[:, :, 0] = yuyv[:, 0::2] # Y
+ yuv[:, :, 1] = yuyv[:, 1::4].repeat(2, axis=1) # U
+ yuv[:, :, 2] = yuyv[:, 3::4].repeat(2, axis=1) # V
+
+ m = np.array([
+ [1.0, 1.0, 1.0],
+ [-0.000007154783816076815, -0.3441331386566162, 1.7720025777816772],
+ [1.4019975662231445, -0.7141380310058594, 0.00001542569043522235]
+ ])
+
+ rgb = np.dot(yuv, m)
+ rgb[:, :, 0] -= 179.45477266423404
+ rgb[:, :, 1] += 135.45870971679688
+ rgb[:, :, 2] -= 226.8183044444304
+ rgb = rgb.astype(np.uint8)
+
+ elif fmt == libcam.formats.RGB888:
+ rgb = data.reshape((h, w, 3))
+ rgb[:, :, [0, 1, 2]] = rgb[:, :, [2, 1, 0]]
+
+ elif fmt == libcam.formats.BGR888:
+ rgb = data.reshape((h, w, 3))
+
+ elif fmt in [libcam.formats.ARGB8888, libcam.formats.XRGB8888]:
+ rgb = data.reshape((h, w, 4))
+ rgb = np.flip(rgb, axis=2)
+ # drop alpha component
+ rgb = np.delete(rgb, np.s_[0::4], axis=2)
+
+ elif str(fmt).startswith('S'):
+ fmt = str(fmt)
+ bayer_pattern = fmt[1:5]
+ bitspp = int(fmt[5:])
+
+ if bitspp == 8:
+ data = data.reshape((h, w))
+ data = data.astype(np.uint16)
+ elif bitspp in [10, 12]:
+ data = data.view(np.uint16)
+ data = data.reshape((h, w))
+ else:
+ raise Exception('Bad bitspp:' + str(bitspp))
+
+ idx = bayer_pattern.find('R')
+ assert(idx != -1)
+ r0 = (idx % 2, idx // 2)
+
+ idx = bayer_pattern.find('G')
+ assert(idx != -1)
+ g0 = (idx % 2, idx // 2)
+
+ idx = bayer_pattern.find('G', idx + 1)
+ assert(idx != -1)
+ g1 = (idx % 2, idx // 2)
+
+ idx = bayer_pattern.find('B')
+ assert(idx != -1)
+ b0 = (idx % 2, idx // 2)
+
+ rgb = demosaic(data, r0, g0, g1, b0)
+ rgb = (rgb >> (bitspp - 8)).astype(np.uint8)
+
+ else:
+ rgb = None
+
+ return rgb
+
+
+# A naive format conversion to 24-bit RGB
+def mfb_to_rgb(mfb: libcamera.utils.MappedFrameBuffer, cfg: libcam.StreamConfiguration):
+ data = np.array(mfb.planes[0], dtype=np.uint8)
+ rgb = to_rgb(cfg.pixel_format, cfg.size, data)
+ return rgb
diff --git a/src/py/examples/simple-cam.py b/src/py/examples/simple-cam.py
new file mode 100755
index 00000000..1cd1019d
--- /dev/null
+++ b/src/py/examples/simple-cam.py
@@ -0,0 +1,340 @@
+#!/usr/bin/env python3
+
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+# A simple libcamera capture example
+#
+# This is a python version of simple-cam from:
+# https://git.libcamera.org/libcamera/simple-cam.git
+#
+# \todo Move to simple-cam repository when the Python API has stabilized more
+
+import libcamera as libcam
+import selectors
+import sys
+import time
+
+TIMEOUT_SEC = 3
+
+
+def handle_camera_event(cm):
+ # cm.get_ready_requests() returns the ready requests, which in our case
+ # should almost always return a single Request, but in some cases there
+ # could be multiple or none.
+
+ reqs = cm.get_ready_requests()
+
+ # Process the captured frames
+
+ for req in reqs:
+ process_request(req)
+
+
+def process_request(request):
+ global camera
+
+ print()
+
+ print(f'Request completed: {request}')
+
+ # When a request has completed, it is populated with a metadata control
+ # list that allows an application to determine various properties of
+ # the completed request. This can include the timestamp of the Sensor
+ # capture, or its gain and exposure values, or properties from the IPA
+ # such as the state of the 3A algorithms.
+ #
+ # To examine each request, print all the metadata for inspection. A custom
+ # application can parse each of these items and process them according to
+ # its needs.
+
+ requestMetadata = request.metadata
+ for id, value in requestMetadata.items():
+ print(f'\t{id.name} = {value}')
+
+ # Each buffer has its own FrameMetadata to describe its state, or the
+ # usage of each buffer. While in our simple capture we only provide one
+ # buffer per request, a request can have a buffer for each stream that
+ # is established when configuring the camera.
+ #
+ # This allows a viewfinder and a still image to be processed at the
+ # same time, or to allow obtaining the RAW capture buffer from the
+ # sensor along with the image as processed by the ISP.
+
+ buffers = request.buffers
+ for _, buffer in buffers.items():
+ metadata = buffer.metadata
+
+ # Print some information about the buffer which has completed.
+ print(f' seq: {metadata.sequence:06} timestamp: {metadata.timestamp} bytesused: ' +
+ '/'.join([str(p.bytes_used) for p in metadata.planes]))
+
+ # Image data can be accessed here, but the FrameBuffer
+ # must be mapped by the application
+
+ # Re-queue the Request to the camera.
+ request.reuse()
+ camera.queue_request(request)
+
+
+# ----------------------------------------------------------------------------
+# Camera Naming.
+#
+# Applications are responsible for deciding how to name cameras, and present
+# that information to the users. Every camera has a unique identifier, though
+# this string is not designed to be friendly for a human reader.
+#
+# To support human consumable names, libcamera provides camera properties
+# that allow an application to determine a naming scheme based on its needs.
+#
+# In this example, we focus on the location property, but also detail the
+# model string for external cameras, as this is more likely to be visible
+# information to the user of an externally connected device.
+#
+# The unique camera ID is appended for informative purposes.
+#
+def camera_name(camera):
+ props = camera.properties
+ location = props.get(libcam.properties.Location, None)
+
+ if location == libcam.properties.LocationEnum.Front:
+ name = 'Internal front camera'
+ elif location == libcam.properties.LocationEnum.Back:
+ name = 'Internal back camera'
+ elif location == libcam.properties.LocationEnum.External:
+ name = 'External camera'
+ if libcam.properties.Model in props:
+ name += f' "{props[libcam.properties.Model]}"'
+ else:
+ name = 'Undefined location'
+
+ name += f' ({camera.id})'
+
+ return name
+
+
+def main():
+ global camera
+
+ # --------------------------------------------------------------------
+ # Get the Camera Manager.
+ #
+ # The Camera Manager is responsible for enumerating all the Camera
+ # in the system, by associating Pipeline Handlers with media entities
+ # registered in the system.
+ #
+ # The CameraManager provides a list of available Cameras that
+ # applications can operate on.
+ #
+ # There can only be a single CameraManager within any process space.
+
+ cm = libcam.CameraManager.singleton()
+
+ # Just as a test, generate names of the Cameras registered in the
+ # system, and list them.
+
+ for camera in cm.cameras:
+ print(f' - {camera_name(camera)}')
+
+ # --------------------------------------------------------------------
+ # Camera
+ #
+ # Camera are entities created by pipeline handlers, inspecting the
+ # entities registered in the system and reported to applications
+ # by the CameraManager.
+ #
+ # In general terms, a Camera corresponds to a single image source
+ # available in the system, such as an image sensor.
+ #
+ # Application lock usage of Camera by 'acquiring' them.
+ # Once done with it, application shall similarly 'release' the Camera.
+ #
+ # As an example, use the first available camera in the system after
+ # making sure that at least one camera is available.
+ #
+ # Cameras can be obtained by their ID or their index, to demonstrate
+ # this, the following code gets the ID of the first camera; then gets
+ # the camera associated with that ID (which is of course the same as
+ # cm.cameras[0]).
+
+ if not cm.cameras:
+ print('No cameras were identified on the system.')
+ return -1
+
+ camera_id = cm.cameras[0].id
+ camera = cm.get(camera_id)
+ camera.acquire()
+
+ # --------------------------------------------------------------------
+ # Stream
+ #
+ # Each Camera supports a variable number of Stream. A Stream is
+ # produced by processing data produced by an image source, usually
+ # by an ISP.
+ #
+ # +-------------------------------------------------------+
+ # | Camera |
+ # | +-----------+ |
+ # | +--------+ | |------> [ Main output ] |
+ # | | Image | | | |
+ # | | |---->| ISP |------> [ Viewfinder ] |
+ # | | Source | | | |
+ # | +--------+ | |------> [ Still Capture ] |
+ # | +-----------+ |
+ # +-------------------------------------------------------+
+ #
+ # The number and capabilities of the Stream in a Camera are
+ # a platform dependent property, and it's the pipeline handler
+ # implementation that has the responsibility of correctly
+ # report them.
+
+ # --------------------------------------------------------------------
+ # Camera Configuration.
+ #
+ # Camera configuration is tricky! It boils down to assign resources
+ # of the system (such as DMA engines, scalers, format converters) to
+ # the different image streams an application has requested.
+ #
+ # Depending on the system characteristics, some combinations of
+ # sizes, formats and stream usages might or might not be possible.
+ #
+ # A Camera produces a CameraConfigration based on a set of intended
+ # roles for each Stream the application requires.
+
+ config = camera.generate_configuration([libcam.StreamRole.Viewfinder])
+
+ # The CameraConfiguration contains a StreamConfiguration instance
+ # for each StreamRole requested by the application, provided
+ # the Camera can support all of them.
+ #
+ # Each StreamConfiguration has default size and format, assigned
+ # by the Camera depending on the Role the application has requested.
+
+ stream_config = config.at(0)
+ print(f'Default viewfinder configuration is: {stream_config}')
+
+ # Each StreamConfiguration parameter which is part of a
+ # CameraConfiguration can be independently modified by the
+ # application.
+ #
+ # In order to validate the modified parameter, the CameraConfiguration
+ # should be validated -before- the CameraConfiguration gets applied
+ # to the Camera.
+ #
+ # The CameraConfiguration validation process adjusts each
+ # StreamConfiguration to a valid value.
+
+ # Validating a CameraConfiguration -before- applying it will adjust it
+ # to a valid configuration which is as close as possible to the one
+ # requested.
+
+ config.validate()
+ print(f'Validated viewfinder configuration is: {stream_config}')
+
+ # Once we have a validated configuration, we can apply it to the
+ # Camera.
+
+ camera.configure(config)
+
+ # --------------------------------------------------------------------
+ # Buffer Allocation
+ #
+ # Now that a camera has been configured, it knows all about its
+ # Streams sizes and formats. The captured images need to be stored in
+ # framebuffers which can either be provided by the application to the
+ # library, or allocated in the Camera and exposed to the application
+ # by libcamera.
+ #
+ # An application may decide to allocate framebuffers from elsewhere,
+ # for example in memory allocated by the display driver that will
+ # render the captured frames. The application will provide them to
+ # libcamera by constructing FrameBuffer instances to capture images
+ # directly into.
+ #
+ # Alternatively libcamera can help the application by exporting
+ # buffers allocated in the Camera using a FrameBufferAllocator
+ # instance and referencing a configured Camera to determine the
+ # appropriate buffer size and types to create.
+
+ allocator = libcam.FrameBufferAllocator(camera)
+
+ for cfg in config:
+ allocated = allocator.allocate(cfg.stream)
+ print(f'Allocated {allocated} buffers for stream')
+
+ # --------------------------------------------------------------------
+ # Frame Capture
+ #
+ # libcamera frames capture model is based on the 'Request' concept.
+ # For each frame a Request has to be queued to the Camera.
+ #
+ # A Request refers to (at least one) Stream for which a Buffer that
+ # will be filled with image data shall be added to the Request.
+ #
+ # A Request is associated with a list of Controls, which are tunable
+ # parameters (similar to v4l2_controls) that have to be applied to
+ # the image.
+ #
+ # Once a request completes, all its buffers will contain image data
+ # that applications can access and for each of them a list of metadata
+ # properties that reports the capture parameters applied to the image.
+
+ stream = stream_config.stream
+ buffers = allocator.buffers(stream)
+ requests = []
+ for i in range(len(buffers)):
+ request = camera.create_request()
+
+ buffer = buffers[i]
+ request.add_buffer(stream, buffer)
+
+ # Controls can be added to a request on a per frame basis.
+ request.set_control(libcam.controls.Brightness, 0.5)
+
+ requests.append(request)
+
+ # --------------------------------------------------------------------
+ # Start Capture
+ #
+ # In order to capture frames the Camera has to be started and
+ # Request queued to it. Enough Request to fill the Camera pipeline
+ # depth have to be queued before the Camera start delivering frames.
+ #
+ # When a Request has been completed, it will be added to a list in the
+ # CameraManager and an event will be raised using eventfd.
+ #
+ # The list of completed Requests can be retrieved with
+ # CameraManager.get_ready_requests(), which will also clear the list in the
+ # CameraManager.
+ #
+ # The eventfd can be retrieved from CameraManager.event_fd, and the fd can
+ # be waited upon using e.g. Python's selectors.
+
+ camera.start()
+ for request in requests:
+ camera.queue_request(request)
+
+ sel = selectors.DefaultSelector()
+ sel.register(cm.event_fd, selectors.EVENT_READ, lambda fd: handle_camera_event(cm))
+
+ start_time = time.time()
+
+ while time.time() - start_time < TIMEOUT_SEC:
+ events = sel.select()
+ for key, mask in events:
+ key.data(key.fileobj)
+
+ # --------------------------------------------------------------------
+ # Clean Up
+ #
+ # Stop the Camera, release resources and stop the CameraManager.
+ # libcamera has now released all resources it owned.
+
+ camera.stop()
+ camera.release()
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/src/py/examples/simple-capture.py b/src/py/examples/simple-capture.py
new file mode 100755
index 00000000..4b85408f
--- /dev/null
+++ b/src/py/examples/simple-capture.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python3
+
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+# A simple capture example showing:
+# - How to setup the camera
+# - Capture certain number of frames in a blocking manner
+# - How to stop the camera
+#
+# This simple example is, in many ways, too simple. The purpose of the example
+# is to introduce the concepts. A more realistic example is given in
+# simple-continuous-capture.py.
+
+import argparse
+import libcamera as libcam
+import selectors
+import sys
+
+# Number of frames to capture
+TOTAL_FRAMES = 30
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-c', '--camera', type=str, default='1',
+ help='Camera index number (starting from 1) or part of the name')
+ parser.add_argument('-f', '--format', type=str, help='Pixel format')
+ parser.add_argument('-s', '--size', type=str, help='Size ("WxH")')
+ args = parser.parse_args()
+
+ cm = libcam.CameraManager.singleton()
+
+ try:
+ if args.camera.isnumeric():
+ cam_idx = int(args.camera)
+ cam = next((cam for i, cam in enumerate(cm.cameras) if i + 1 == cam_idx))
+ else:
+ cam = next((cam for cam in cm.cameras if args.camera in cam.id))
+ except Exception:
+ print(f'Failed to find camera "{args.camera}"')
+ return -1
+
+ # Acquire the camera for our use
+
+ cam.acquire()
+
+ # Configure the camera
+
+ cam_config = cam.generate_configuration([libcam.StreamRole.Viewfinder])
+
+ stream_config = cam_config.at(0)
+
+ if args.format:
+ fmt = libcam.PixelFormat(args.format)
+ stream_config.pixel_format = fmt
+
+ if args.size:
+ w, h = [int(v) for v in args.size.split('x')]
+ stream_config.size = libcam.Size(w, h)
+
+ cam.configure(cam_config)
+
+ print(f'Capturing {TOTAL_FRAMES} frames with {stream_config}')
+
+ stream = stream_config.stream
+
+ # Allocate the buffers for capture
+
+ allocator = libcam.FrameBufferAllocator(cam)
+ ret = allocator.allocate(stream)
+ assert ret > 0
+
+ num_bufs = len(allocator.buffers(stream))
+
+ # Create the requests and assign a buffer for each request
+
+ reqs = []
+ for i in range(num_bufs):
+ # Use the buffer index as the cookie
+ req = cam.create_request(i)
+
+ buffer = allocator.buffers(stream)[i]
+ req.add_buffer(stream, buffer)
+
+ reqs.append(req)
+
+ # Start the camera
+
+ cam.start()
+
+ # frames_queued and frames_done track the number of frames queued and done
+
+ frames_queued = 0
+ frames_done = 0
+
+ # Queue the requests to the camera
+
+ for req in reqs:
+ cam.queue_request(req)
+ frames_queued += 1
+
+ # The main loop. Wait for the queued Requests to complete, process them,
+ # and re-queue them again.
+
+ sel = selectors.DefaultSelector()
+ sel.register(cm.event_fd, selectors.EVENT_READ)
+
+ while frames_done < TOTAL_FRAMES:
+ # cm.get_ready_requests() does not block, so we use a Selector to wait
+ # for a camera event. Here we should almost always get a single
+ # Request, but in some cases there could be multiple or none.
+
+ events = sel.select()
+ if not events:
+ continue
+
+ reqs = cm.get_ready_requests()
+
+ for req in reqs:
+ frames_done += 1
+
+ buffers = req.buffers
+
+ # A ready Request could contain multiple buffers if multiple streams
+ # were being used. Here we know we only have a single stream,
+ # and we use next(iter()) to get the first and only buffer.
+
+ assert len(buffers) == 1
+
+ stream, fb = next(iter(buffers.items()))
+
+ # Here we could process the received buffer. In this example we only
+ # print a few details below.
+
+ meta = fb.metadata
+
+ print("seq {:3}, bytes {}, frames queued/done {:3}/{:<3}"
+ .format(meta.sequence,
+ '/'.join([str(p.bytes_used) for p in meta.planes]),
+ frames_queued, frames_done))
+
+ # If we want to capture more frames we need to queue more Requests.
+ # We could create a totally new Request, but it is more efficient
+ # to reuse the existing one that we just received.
+ if frames_queued < TOTAL_FRAMES:
+ req.reuse()
+ cam.queue_request(req)
+ frames_queued += 1
+
+ # Stop the camera
+
+ cam.stop()
+
+ # Release the camera
+
+ cam.release()
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/src/py/examples/simple-continuous-capture.py b/src/py/examples/simple-continuous-capture.py
new file mode 100755
index 00000000..e1cb931e
--- /dev/null
+++ b/src/py/examples/simple-continuous-capture.py
@@ -0,0 +1,185 @@
+#!/usr/bin/env python3
+
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+# A simple capture example extending the simple-capture.py example:
+# - Capture frames using events from multiple cameras
+# - Listening events from stdin to exit the application
+# - Memory mapping the frames and calculating CRC
+
+import binascii
+import libcamera as libcam
+import libcamera.utils
+import selectors
+import sys
+
+
+# A container class for our state per camera
+class CameraCaptureContext:
+ idx: int
+ cam: libcam.Camera
+ reqs: list[libcam.Request]
+ mfbs: dict[libcam.FrameBuffer, libcamera.utils.MappedFrameBuffer]
+
+ def __init__(self, cam, idx):
+ self.idx = idx
+ self.cam = cam
+
+ # Acquire the camera for our use
+
+ cam.acquire()
+
+ # Configure the camera
+
+ cam_config = cam.generate_configuration([libcam.StreamRole.Viewfinder])
+
+ stream_config = cam_config.at(0)
+
+ cam.configure(cam_config)
+
+ stream = stream_config.stream
+
+ # Allocate the buffers for capture
+
+ allocator = libcam.FrameBufferAllocator(cam)
+ ret = allocator.allocate(stream)
+ assert ret > 0
+
+ num_bufs = len(allocator.buffers(stream))
+
+ print(f'cam{idx} ({cam.id}): capturing {num_bufs} buffers with {stream_config}')
+
+ # Create the requests and assign a buffer for each request
+
+ self.reqs = []
+ self.mfbs = {}
+
+ for i in range(num_bufs):
+ # Use the buffer index as the "cookie"
+ req = cam.create_request(idx)
+
+ buffer = allocator.buffers(stream)[i]
+ req.add_buffer(stream, buffer)
+
+ self.reqs.append(req)
+
+ # Save a mmapped buffer so we can calculate the CRC later
+ self.mfbs[buffer] = libcamera.utils.MappedFrameBuffer(buffer).mmap()
+
+ def uninit_camera(self):
+ # Stop the camera
+
+ self.cam.stop()
+
+ # Release the camera
+
+ self.cam.release()
+
+
+# A container class for our state
+class CaptureContext:
+ cm: libcam.CameraManager
+ camera_contexts: list[CameraCaptureContext] = []
+
+ def handle_camera_event(self):
+ # cm.get_ready_requests() returns the ready requests, which in our case
+ # should almost always return a single Request, but in some cases there
+ # could be multiple or none.
+
+ reqs = self.cm.get_ready_requests()
+
+ # Process the captured frames
+
+ for req in reqs:
+ self.handle_request(req)
+
+ return True
+
+ def handle_request(self, req: libcam.Request):
+ cam_ctx = self.camera_contexts[req.cookie]
+
+ buffers = req.buffers
+
+ assert len(buffers) == 1
+
+ # A ready Request could contain multiple buffers if multiple streams
+ # were being used. Here we know we only have a single stream,
+ # and we use next(iter()) to get the first and only buffer.
+
+ stream, fb = next(iter(buffers.items()))
+
+ # Use the MappedFrameBuffer to access the pixel data with CPU. We calculate
+ # the crc for each plane.
+
+ mfb = cam_ctx.mfbs[fb]
+ crcs = [binascii.crc32(p) for p in mfb.planes]
+
+ meta = fb.metadata
+
+ print('cam{:<6} seq {:<6} bytes {:10} CRCs {}'
+ .format(cam_ctx.idx,
+ meta.sequence,
+ '/'.join([str(p.bytes_used) for p in meta.planes]),
+ crcs))
+
+ # We want to re-queue the buffer we just handled. Instead of creating
+ # a new Request, we re-use the old one. We need to call req.reuse()
+ # to re-initialize the Request before queuing.
+
+ req.reuse()
+ cam_ctx.cam.queue_request(req)
+
+ def handle_key_event(self):
+ sys.stdin.readline()
+ print('Exiting...')
+ return False
+
+ def capture(self):
+ # Queue the requests to the camera
+
+ for cam_ctx in self.camera_contexts:
+ for req in cam_ctx.reqs:
+ cam_ctx.cam.queue_request(req)
+
+ # Use Selector to wait for events from the camera and from the keyboard
+
+ sel = selectors.DefaultSelector()
+ sel.register(sys.stdin, selectors.EVENT_READ, self.handle_key_event)
+ sel.register(self.cm.event_fd, selectors.EVENT_READ, lambda: self.handle_camera_event())
+
+ running = True
+
+ while running:
+ events = sel.select()
+ for key, mask in events:
+ # If the handler return False, we should exit
+ if not key.data():
+ running = False
+
+
+def main():
+ cm = libcam.CameraManager.singleton()
+
+ ctx = CaptureContext()
+ ctx.cm = cm
+
+ for idx, cam in enumerate(cm.cameras):
+ cam_ctx = CameraCaptureContext(cam, idx)
+ ctx.camera_contexts.append(cam_ctx)
+
+ # Start the cameras
+
+ for cam_ctx in ctx.camera_contexts:
+ cam_ctx.cam.start()
+
+ ctx.capture()
+
+ for cam_ctx in ctx.camera_contexts:
+ cam_ctx.uninit_camera()
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/src/py/libcamera/__init__.py b/src/py/libcamera/__init__.py
new file mode 100644
index 00000000..e234a5e4
--- /dev/null
+++ b/src/py/libcamera/__init__.py
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+# Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+from ._libcamera import *
diff --git a/src/py/libcamera/gen-py-controls.py b/src/py/libcamera/gen-py-controls.py
new file mode 100755
index 00000000..d43a7c1c
--- /dev/null
+++ b/src/py/libcamera/gen-py-controls.py
@@ -0,0 +1,111 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Generate Python bindings controls from YAML
+
+import argparse
+import jinja2
+import sys
+import yaml
+
+from controls import Control
+
+
+def find_common_prefix(strings):
+ prefix = strings[0]
+
+ for string in strings[1:]:
+ while string[:len(prefix)] != prefix and prefix:
+ prefix = prefix[:len(prefix) - 1]
+ if not prefix:
+ break
+
+ return prefix
+
+
+def extend_control(ctrl, mode):
+ if ctrl.vendor != 'libcamera':
+ ctrl.klass = ctrl.vendor
+ ctrl.namespace = f'{ctrl.vendor}::'
+ else:
+ ctrl.klass = mode
+ ctrl.namespace = ''
+
+ if not ctrl.is_enum:
+ return ctrl
+
+ if mode == 'controls':
+ # Adjustments for controls
+ if ctrl.name == 'LensShadingMapMode':
+ prefix = 'LensShadingMapMode'
+ else:
+ prefix = find_common_prefix([e.name for e in ctrl.enum_values])
+ else:
+ # Adjustments for properties
+ prefix = find_common_prefix([e.name for e in ctrl.enum_values])
+
+ for enum in ctrl.enum_values:
+ enum.py_name = enum.name[len(prefix):]
+
+ return ctrl
+
+
+def main(argv):
+ headers = {
+ 'controls': 'control_ids.h',
+ 'properties': 'property_ids.h',
+ }
+
+ # Parse command line arguments
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--mode', '-m', type=str, required=True,
+ help='Mode is either "controls" or "properties"')
+ parser.add_argument('--output', '-o', metavar='file', type=str,
+ help='Output file name. Defaults to standard output if not specified.')
+ parser.add_argument('--template', '-t', type=str, required=True,
+ help='Template file name.')
+ parser.add_argument('input', type=str, nargs='+',
+ help='Input file name.')
+ args = parser.parse_args(argv[1:])
+
+ if not headers.get(args.mode):
+ print(f'Invalid mode option "{args.mode}"', file=sys.stderr)
+ return -1
+
+ controls = []
+ vendors = []
+
+ for input in args.input:
+ data = yaml.safe_load(open(input, 'rb').read())
+
+ vendor = data['vendor']
+ if vendor != 'libcamera':
+ vendors.append(vendor)
+
+ for ctrl in data['controls']:
+ ctrl = Control(*ctrl.popitem(), vendor, args.mode)
+ controls.append(extend_control(ctrl, args.mode))
+
+ data = {
+ 'mode': args.mode,
+ 'header': headers[args.mode],
+ 'vendors': vendors,
+ 'controls': controls,
+ }
+
+ env = jinja2.Environment()
+ template = env.from_string(open(args.template, 'r', encoding='utf-8').read())
+ string = template.render(data)
+
+ if args.output:
+ output = open(args.output, 'w', encoding='utf-8')
+ output.write(string)
+ output.close()
+ else:
+ sys.stdout.write(string)
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/src/py/libcamera/gen-py-formats.py b/src/py/libcamera/gen-py-formats.py
new file mode 100755
index 00000000..0ff1d12a
--- /dev/null
+++ b/src/py/libcamera/gen-py-formats.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Generate Python format definitions from YAML
+
+import argparse
+import string
+import sys
+import yaml
+
+
+def generate(formats):
+ fmts = []
+
+ for format in formats:
+ name, format = format.popitem()
+ fmts.append(f'\t\t.def_readonly_static("{name}", &libcamera::formats::{name})')
+
+ return {'formats': '\n'.join(fmts)}
+
+
+def fill_template(template, data):
+ with open(template, encoding='utf-8') as f:
+ template = f.read()
+
+ template = string.Template(template)
+ return template.substitute(data)
+
+
+def main(argv):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-o', dest='output', metavar='file', type=str,
+ help='Output file name. Defaults to standard output if not specified.')
+ parser.add_argument('input', type=str,
+ help='Input file name.')
+ parser.add_argument('template', type=str,
+ help='Template file name.')
+ args = parser.parse_args(argv[1:])
+
+ with open(args.input, encoding='utf-8') as f:
+ formats = yaml.safe_load(f)['formats']
+
+ data = generate(formats)
+ data = fill_template(args.template, data)
+
+ if args.output:
+ with open(args.output, 'w', encoding='utf-8') as f:
+ f.write(data)
+ else:
+ sys.stdout.write(data)
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/src/py/libcamera/meson.build b/src/py/libcamera/meson.build
new file mode 100644
index 00000000..596a203c
--- /dev/null
+++ b/src/py/libcamera/meson.build
@@ -0,0 +1,102 @@
+# SPDX-License-Identifier: CC0-1.0
+
+py3_dep = dependency('python3', required : get_option('pycamera'))
+
+if not py3_dep.found()
+ pycamera_enabled = false
+ subdir_done()
+endif
+
+pybind11_dep = dependency('pybind11', required : get_option('pycamera'))
+
+if not pybind11_dep.found()
+ pycamera_enabled = false
+ subdir_done()
+endif
+
+pycamera_enabled = true
+
+pycamera_sources = files([
+ 'py_camera_manager.cpp',
+ 'py_color_space.cpp',
+ 'py_enums.cpp',
+ 'py_geometry.cpp',
+ 'py_helpers.cpp',
+ 'py_main.cpp',
+ 'py_transform.cpp',
+])
+
+# Generate controls and properties
+
+gen_py_controls_template = files('py_controls_generated.cpp.in')
+gen_py_controls = files('gen-py-controls.py')
+
+pycamera_sources += custom_target('py_gen_controls',
+ input : controls_files,
+ output : ['py_controls_generated.cpp'],
+ command : [gen_py_controls, '--mode', 'controls', '-o', '@OUTPUT@',
+ '-t', gen_py_controls_template, '@INPUT@'],
+ env : py_build_env)
+
+pycamera_sources += custom_target('py_gen_properties',
+ input : properties_files,
+ output : ['py_properties_generated.cpp'],
+ command : [gen_py_controls, '--mode', 'properties', '-o', '@OUTPUT@',
+ '-t', gen_py_controls_template, '@INPUT@'],
+ env : py_build_env)
+
+# Generate formats
+
+gen_py_formats_input_files = files([
+ '../../libcamera/formats.yaml',
+ 'py_formats_generated.cpp.in',
+])
+
+gen_py_formats = files('gen-py-formats.py')
+
+pycamera_sources += custom_target('py_gen_formats',
+ input : gen_py_formats_input_files,
+ output : ['py_formats_generated.cpp'],
+ command : [gen_py_formats, '-o', '@OUTPUT@', '@INPUT@'])
+
+pycamera_deps = [
+ libcamera_private,
+ py3_dep,
+ pybind11_dep,
+]
+
+pycamera_args = [
+ '-fvisibility=hidden',
+ '-Wno-shadow',
+ '-DPYBIND11_USE_SMART_HOLDER_AS_DEFAULT',
+]
+
+destdir = get_option('libdir') / ('python' + py3_dep.version()) / 'site-packages' / 'libcamera'
+
+pycamera = shared_module('_libcamera',
+ pycamera_sources,
+ install : true,
+ install_dir : destdir,
+ install_tag : 'python-runtime',
+ name_prefix : '',
+ dependencies : pycamera_deps,
+ cpp_args : pycamera_args)
+
+# Create symlinks from the build dir to the source dir so that we can use the
+# Python module directly from the build dir.
+
+run_command('ln', '-fsrT', files('__init__.py'),
+ meson.current_build_dir() / '__init__.py',
+ check : true)
+
+run_command('ln', '-fsrT', meson.current_source_dir() / 'utils',
+ meson.current_build_dir() / 'utils',
+ check : true)
+
+install_data(['__init__.py'],
+ install_dir : destdir,
+ install_tag : 'python-runtime')
+
+# \todo Generate stubs when building. See https://peps.python.org/pep-0484/#stub-files
+# Note: Depends on pybind11-stubgen. To generate pylibcamera stubs:
+# $ PYTHONPATH=build/src/py pybind11-stubgen --no-setup-py -o build/src/py libcamera
diff --git a/src/py/libcamera/py_camera_manager.cpp b/src/py/libcamera/py_camera_manager.cpp
new file mode 100644
index 00000000..9ccb7aad
--- /dev/null
+++ b/src/py/libcamera/py_camera_manager.cpp
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#include "py_camera_manager.h"
+
+#include <errno.h>
+#include <memory>
+#include <sys/eventfd.h>
+#include <system_error>
+#include <unistd.h>
+#include <vector>
+
+#include "py_main.h"
+
+namespace py = pybind11;
+
+using namespace libcamera;
+
+PyCameraManager::PyCameraManager()
+{
+ LOG(Python, Debug) << "PyCameraManager()";
+
+ cameraManager_ = std::make_unique<CameraManager>();
+
+ int fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+ if (fd == -1)
+ throw std::system_error(errno, std::generic_category(),
+ "Failed to create eventfd");
+
+ eventFd_ = UniqueFD(fd);
+
+ int ret = cameraManager_->start();
+ if (ret)
+ throw std::system_error(-ret, std::generic_category(),
+ "Failed to start CameraManager");
+}
+
+PyCameraManager::~PyCameraManager()
+{
+ LOG(Python, Debug) << "~PyCameraManager()";
+}
+
+py::list PyCameraManager::cameras()
+{
+ /*
+ * Create a list of Cameras, where each camera has a keep-alive to
+ * CameraManager.
+ */
+ py::list l;
+
+ for (auto &camera : cameraManager_->cameras()) {
+ py::object py_cm = py::cast(this);
+ py::object py_cam = py::cast(camera);
+ py::detail::keep_alive_impl(py_cam, py_cm);
+ l.append(py_cam);
+ }
+
+ return l;
+}
+
+std::vector<py::object> PyCameraManager::getReadyRequests()
+{
+ int ret = readFd();
+
+ if (ret == -EAGAIN)
+ return std::vector<py::object>();
+
+ if (ret != 0)
+ throw std::system_error(-ret, std::generic_category());
+
+ std::vector<py::object> py_reqs;
+
+ for (Request *request : getCompletedRequests()) {
+ py::object o = py::cast(request);
+ /* Decrease the ref increased in Camera.queue_request() */
+ o.dec_ref();
+ py_reqs.push_back(o);
+ }
+
+ return py_reqs;
+}
+
+/* Note: Called from another thread */
+void PyCameraManager::handleRequestCompleted(Request *req)
+{
+ pushRequest(req);
+ writeFd();
+}
+
+void PyCameraManager::writeFd()
+{
+ uint64_t v = 1;
+
+ size_t s = write(eventFd_.get(), &v, 8);
+ /*
+ * We should never fail, and have no simple means to manage the error,
+ * so let's log a fatal error.
+ */
+ if (s != 8)
+ LOG(Python, Fatal) << "Unable to write to eventfd";
+}
+
+int PyCameraManager::readFd()
+{
+ uint8_t buf[8];
+
+ ssize_t ret = read(eventFd_.get(), buf, 8);
+
+ if (ret == 8)
+ return 0;
+ else if (ret < 0)
+ return -errno;
+ else
+ return -EIO;
+}
+
+void PyCameraManager::pushRequest(Request *req)
+{
+ MutexLocker guard(completedRequestsMutex_);
+ completedRequests_.push_back(req);
+}
+
+std::vector<Request *> PyCameraManager::getCompletedRequests()
+{
+ std::vector<Request *> v;
+ MutexLocker guard(completedRequestsMutex_);
+ swap(v, completedRequests_);
+ return v;
+}
diff --git a/src/py/libcamera/py_camera_manager.h b/src/py/libcamera/py_camera_manager.h
new file mode 100644
index 00000000..3574db23
--- /dev/null
+++ b/src/py/libcamera/py_camera_manager.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#pragma once
+
+#include <libcamera/base/mutex.h>
+
+#include <libcamera/libcamera.h>
+
+#include <pybind11/pybind11.h>
+
+using namespace libcamera;
+
+class PyCameraManager
+{
+public:
+ PyCameraManager();
+ ~PyCameraManager();
+
+ pybind11::list cameras();
+ std::shared_ptr<Camera> get(const std::string &name) { return cameraManager_->get(name); }
+
+ static const std::string &version() { return CameraManager::version(); }
+
+ int eventFd() const { return eventFd_.get(); }
+
+ std::vector<pybind11::object> getReadyRequests();
+
+ void handleRequestCompleted(Request *req);
+
+private:
+ std::unique_ptr<CameraManager> cameraManager_;
+
+ UniqueFD eventFd_;
+ libcamera::Mutex completedRequestsMutex_;
+ std::vector<Request *> completedRequests_
+ LIBCAMERA_TSA_GUARDED_BY(completedRequestsMutex_);
+
+ void writeFd();
+ int readFd();
+ void pushRequest(Request *req);
+ std::vector<Request *> getCompletedRequests();
+};
diff --git a/src/py/libcamera/py_color_space.cpp b/src/py/libcamera/py_color_space.cpp
new file mode 100644
index 00000000..fd5a5dab
--- /dev/null
+++ b/src/py/libcamera/py_color_space.cpp
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ *
+ * Python bindings - Color Space classes
+ */
+
+#include <libcamera/color_space.h>
+#include <libcamera/libcamera.h>
+
+#include <pybind11/operators.h>
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+#include "py_main.h"
+
+namespace py = pybind11;
+
+using namespace libcamera;
+
+void init_py_color_space(py::module &m)
+{
+ auto pyColorSpace = py::class_<ColorSpace>(m, "ColorSpace");
+ auto pyColorSpacePrimaries = py::enum_<ColorSpace::Primaries>(pyColorSpace, "Primaries");
+ auto pyColorSpaceTransferFunction = py::enum_<ColorSpace::TransferFunction>(pyColorSpace, "TransferFunction");
+ auto pyColorSpaceYcbcrEncoding = py::enum_<ColorSpace::YcbcrEncoding>(pyColorSpace, "YcbcrEncoding");
+ auto pyColorSpaceRange = py::enum_<ColorSpace::Range>(pyColorSpace, "Range");
+
+ pyColorSpace
+ .def(py::init([](ColorSpace::Primaries primaries,
+ ColorSpace::TransferFunction transferFunction,
+ ColorSpace::YcbcrEncoding ycbcrEncoding,
+ ColorSpace::Range range) {
+ return ColorSpace(primaries, transferFunction, ycbcrEncoding, range);
+ }), py::arg("primaries"), py::arg("transferFunction"),
+ py::arg("ycbcrEncoding"), py::arg("range"))
+ .def(py::init([](ColorSpace &other) { return other; }))
+ .def("__str__", [](ColorSpace &self) {
+ return "<libcamera.ColorSpace '" + self.toString() + "'>";
+ })
+ .def_readwrite("primaries", &ColorSpace::primaries)
+ .def_readwrite("transferFunction", &ColorSpace::transferFunction)
+ .def_readwrite("ycbcrEncoding", &ColorSpace::ycbcrEncoding)
+ .def_readwrite("range", &ColorSpace::range)
+ .def_static("Raw", []() { return ColorSpace::Raw; })
+ .def_static("Srgb", []() { return ColorSpace::Srgb; })
+ .def_static("Sycc", []() { return ColorSpace::Sycc; })
+ .def_static("Smpte170m", []() { return ColorSpace::Smpte170m; })
+ .def_static("Rec709", []() { return ColorSpace::Rec709; })
+ .def_static("Rec2020", []() { return ColorSpace::Rec2020; });
+
+ pyColorSpacePrimaries
+ .value("Raw", ColorSpace::Primaries::Raw)
+ .value("Smpte170m", ColorSpace::Primaries::Smpte170m)
+ .value("Rec709", ColorSpace::Primaries::Rec709)
+ .value("Rec2020", ColorSpace::Primaries::Rec2020);
+
+ pyColorSpaceTransferFunction
+ .value("Linear", ColorSpace::TransferFunction::Linear)
+ .value("Srgb", ColorSpace::TransferFunction::Srgb)
+ .value("Rec709", ColorSpace::TransferFunction::Rec709);
+
+ pyColorSpaceYcbcrEncoding
+ .value("Null", ColorSpace::YcbcrEncoding::None)
+ .value("Rec601", ColorSpace::YcbcrEncoding::Rec601)
+ .value("Rec709", ColorSpace::YcbcrEncoding::Rec709)
+ .value("Rec2020", ColorSpace::YcbcrEncoding::Rec2020);
+
+ pyColorSpaceRange
+ .value("Full", ColorSpace::Range::Full)
+ .value("Limited", ColorSpace::Range::Limited);
+}
diff --git a/src/py/libcamera/py_controls_generated.cpp.in b/src/py/libcamera/py_controls_generated.cpp.in
new file mode 100644
index 00000000..22a132d1
--- /dev/null
+++ b/src/py/libcamera/py_controls_generated.cpp.in
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ *
+ * Python bindings - Auto-generated {{mode}}
+ *
+ * This file is auto-generated. Do not edit.
+ */
+
+#include <libcamera/{{header}}>
+
+#include <pybind11/pybind11.h>
+
+#include "py_main.h"
+
+namespace py = pybind11;
+
+class Py{{mode|capitalize}}
+{
+};
+
+{% for vendor in vendors -%}
+class Py{{vendor|capitalize}}{{mode|capitalize}}
+{
+};
+
+{% endfor -%}
+
+void init_py_{{mode}}_generated(py::module& m)
+{
+ auto {{mode}} = py::class_<Py{{mode|capitalize}}>(m, "{{mode}}");
+{%- for vendor in vendors %}
+ auto {{vendor}} = py::class_<Py{{vendor|capitalize}}{{mode|capitalize}}>({{mode}}, "{{vendor}}");
+{%- endfor %}
+
+{% for ctrl in controls %}
+ {{ctrl.klass}}.def_readonly_static("{{ctrl.name}}", static_cast<const libcamera::ControlId *>(&libcamera::{{mode}}::{{ctrl.namespace}}{{ctrl.name}}));
+{%- if ctrl.is_enum %}
+
+ py::enum_<libcamera::{{mode}}::{{ctrl.namespace}}{{ctrl.name}}Enum>({{ctrl.klass}}, "{{ctrl.name}}Enum")
+{%- for enum in ctrl.enum_values %}
+ .value("{{enum.py_name}}", libcamera::{{mode}}::{{ctrl.namespace}}{{enum.name}})
+{%- endfor %}
+ ;
+{%- endif %}
+{% endfor -%}
+}
diff --git a/src/py/libcamera/py_enums.cpp b/src/py/libcamera/py_enums.cpp
new file mode 100644
index 00000000..9e75ec1a
--- /dev/null
+++ b/src/py/libcamera/py_enums.cpp
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ *
+ * Python bindings - Enumerations
+ */
+
+#include <libcamera/libcamera.h>
+
+#include <pybind11/pybind11.h>
+
+#include "py_main.h"
+
+namespace py = pybind11;
+
+using namespace libcamera;
+
+void init_py_enums(py::module &m)
+{
+ py::enum_<StreamRole>(m, "StreamRole")
+ .value("StillCapture", StreamRole::StillCapture)
+ .value("Raw", StreamRole::Raw)
+ .value("VideoRecording", StreamRole::VideoRecording)
+ .value("Viewfinder", StreamRole::Viewfinder);
+
+ py::enum_<ControlType>(m, "ControlType")
+ .value("Null", ControlType::ControlTypeNone)
+ .value("Bool", ControlType::ControlTypeBool)
+ .value("Byte", ControlType::ControlTypeByte)
+ .value("Integer32", ControlType::ControlTypeInteger32)
+ .value("Integer64", ControlType::ControlTypeInteger64)
+ .value("Float", ControlType::ControlTypeFloat)
+ .value("String", ControlType::ControlTypeString)
+ .value("Rectangle", ControlType::ControlTypeRectangle)
+ .value("Size", ControlType::ControlTypeSize)
+ .value("Point", ControlType::ControlTypePoint);
+
+ py::enum_<Orientation>(m, "Orientation")
+ .value("Rotate0", Orientation::Rotate0)
+ .value("Rotate0Mirror", Orientation::Rotate0Mirror)
+ .value("Rotate180", Orientation::Rotate180)
+ .value("Rotate180Mirror", Orientation::Rotate180Mirror)
+ .value("Rotate90Mirror", Orientation::Rotate90Mirror)
+ .value("Rotate270", Orientation::Rotate270)
+ .value("Rotate270Mirror", Orientation::Rotate270Mirror)
+ .value("Rotate90", Orientation::Rotate90);
+}
diff --git a/src/py/libcamera/py_formats_generated.cpp.in b/src/py/libcamera/py_formats_generated.cpp.in
new file mode 100644
index 00000000..c5fb9063
--- /dev/null
+++ b/src/py/libcamera/py_formats_generated.cpp.in
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ *
+ * Python bindings - Auto-generated formats
+ *
+ * This file is auto-generated. Do not edit.
+ */
+
+#include <libcamera/formats.h>
+
+#include <pybind11/pybind11.h>
+
+#include "py_main.h"
+
+namespace py = pybind11;
+
+class PyFormats
+{
+};
+
+void init_py_formats_generated(py::module& m)
+{
+ py::class_<PyFormats>(m, "formats")
+${formats}
+ ;
+}
diff --git a/src/py/libcamera/py_geometry.cpp b/src/py/libcamera/py_geometry.cpp
new file mode 100644
index 00000000..c7e30360
--- /dev/null
+++ b/src/py/libcamera/py_geometry.cpp
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ *
+ * Python bindings - Geometry classes
+ */
+
+#include <array>
+
+#include <libcamera/geometry.h>
+#include <libcamera/libcamera.h>
+
+#include <pybind11/operators.h>
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+#include "py_main.h"
+
+namespace py = pybind11;
+
+using namespace libcamera;
+
+void init_py_geometry(py::module &m)
+{
+ auto pyPoint = py::class_<Point>(m, "Point");
+ auto pySize = py::class_<Size>(m, "Size");
+ auto pySizeRange = py::class_<SizeRange>(m, "SizeRange");
+ auto pyRectangle = py::class_<Rectangle>(m, "Rectangle");
+
+ pyPoint
+ .def(py::init<>())
+ .def(py::init<int, int>())
+ .def_readwrite("x", &Point::x)
+ .def_readwrite("y", &Point::y)
+ .def(py::self == py::self)
+ .def(-py::self)
+ .def("__str__", &Point::toString)
+ .def("__repr__", [](const Point &self) {
+ return py::str("libcamera.Point({}, {})")
+ .format(self.x, self.y);
+ });
+
+ pySize
+ .def(py::init<>())
+ .def(py::init<unsigned int, unsigned int>())
+ .def_readwrite("width", &Size::width)
+ .def_readwrite("height", &Size::height)
+ .def_property_readonly("is_null", &Size::isNull)
+ .def("align_down_to", &Size::alignDownTo)
+ .def("align_up_to", &Size::alignUpTo)
+ .def("bound_to", &Size::boundTo)
+ .def("expand_to", &Size::expandTo)
+ .def("grow_by", &Size::growBy)
+ .def("shrink_by", &Size::shrinkBy)
+ .def("aligned_up_to", &Size::alignedUpTo)
+ .def("aligned_up_to", &Size::alignedUpTo)
+ .def("bounded_to", &Size::boundedTo)
+ .def("expanded_to", &Size::expandedTo)
+ .def("grown_by", &Size::grownBy)
+ .def("shrunk_by", &Size::shrunkBy)
+ .def("bounded_to_aspect_ratio", &Size::boundedToAspectRatio)
+ .def("expanded_to_aspect_ratio", &Size::expandedToAspectRatio)
+ .def("centered_to", &Size::centeredTo)
+ .def(py::self == py::self)
+ .def(py::self < py::self)
+ .def(py::self <= py::self)
+ .def(py::self * float())
+ .def(py::self / float())
+ .def(py::self *= float())
+ .def(py::self /= float())
+ .def("__str__", &Size::toString)
+ .def("__repr__", [](const Size &self) {
+ return py::str("libcamera.Size({}, {})")
+ .format(self.width, self.height);
+ });
+
+ pySizeRange
+ .def(py::init<>())
+ .def(py::init<Size>())
+ .def(py::init<Size, Size>())
+ .def(py::init<Size, Size, unsigned int, unsigned int>())
+ .def_readwrite("min", &SizeRange::min)
+ .def_readwrite("max", &SizeRange::max)
+ .def_readwrite("hStep", &SizeRange::hStep)
+ .def_readwrite("vStep", &SizeRange::vStep)
+ .def("contains", &SizeRange::contains)
+ .def(py::self == py::self)
+ .def("__str__", &SizeRange::toString)
+ .def("__repr__", [](const SizeRange &self) {
+ return py::str("libcamera.SizeRange(({}, {}), ({}, {}), {}, {})")
+ .format(self.min.width, self.min.height,
+ self.max.width, self.max.height,
+ self.hStep, self.vStep);
+ });
+
+ pyRectangle
+ .def(py::init<>())
+ .def(py::init<int, int, Size>())
+ .def(py::init<int, int, unsigned int, unsigned int>())
+ .def(py::init<Size>())
+ .def_readwrite("x", &Rectangle::x)
+ .def_readwrite("y", &Rectangle::y)
+ .def_readwrite("width", &Rectangle::width)
+ .def_readwrite("height", &Rectangle::height)
+ .def_property_readonly("is_null", &Rectangle::isNull)
+ .def_property_readonly("center", &Rectangle::center)
+ .def_property_readonly("size", &Rectangle::size)
+ .def_property_readonly("topLeft", &Rectangle::topLeft)
+ .def("scale_by", &Rectangle::scaleBy)
+ .def("translate_by", &Rectangle::translateBy)
+ .def("bounded_to", &Rectangle::boundedTo)
+ .def("enclosed_in", &Rectangle::enclosedIn)
+ .def("scaled_by", &Rectangle::scaledBy)
+ .def("translated_by", &Rectangle::translatedBy)
+ .def(py::self == py::self)
+ .def("__str__", &Rectangle::toString)
+ .def("__repr__", [](const Rectangle &self) {
+ return py::str("libcamera.Rectangle({}, {}, {}, {})")
+ .format(self.x, self.y, self.width, self.height);
+ });
+}
diff --git a/src/py/libcamera/py_helpers.cpp b/src/py/libcamera/py_helpers.cpp
new file mode 100644
index 00000000..1ad1d4c1
--- /dev/null
+++ b/src/py/libcamera/py_helpers.cpp
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#include "py_helpers.h"
+
+#include <libcamera/libcamera.h>
+
+#include <pybind11/functional.h>
+#include <pybind11/stl.h>
+#include <pybind11/stl_bind.h>
+
+namespace py = pybind11;
+
+using namespace libcamera;
+
+template<typename T>
+static py::object valueOrTuple(const ControlValue &cv)
+{
+ if (cv.isArray()) {
+ const T *v = reinterpret_cast<const T *>(cv.data().data());
+ auto t = py::tuple(cv.numElements());
+
+ for (size_t i = 0; i < cv.numElements(); ++i)
+ t[i] = v[i];
+
+ return std::move(t);
+ }
+
+ return py::cast(cv.get<T>());
+}
+
+py::object controlValueToPy(const ControlValue &cv)
+{
+ switch (cv.type()) {
+ case ControlTypeNone:
+ return py::none();
+ case ControlTypeBool:
+ return valueOrTuple<bool>(cv);
+ case ControlTypeByte:
+ return valueOrTuple<uint8_t>(cv);
+ case ControlTypeInteger32:
+ return valueOrTuple<int32_t>(cv);
+ case ControlTypeInteger64:
+ return valueOrTuple<int64_t>(cv);
+ case ControlTypeFloat:
+ return valueOrTuple<float>(cv);
+ case ControlTypeString:
+ return py::cast(cv.get<std::string>());
+ case ControlTypeSize: {
+ const Size *v = reinterpret_cast<const Size *>(cv.data().data());
+ return py::cast(v);
+ }
+ case ControlTypeRectangle:
+ return valueOrTuple<Rectangle>(cv);
+ case ControlTypePoint:
+ return valueOrTuple<Point>(cv);
+ default:
+ throw std::runtime_error("Unsupported ControlValue type");
+ }
+}
+
+template<typename T>
+static ControlValue controlValueMaybeArray(const py::object &ob)
+{
+ if (py::isinstance<py::list>(ob) || py::isinstance<py::tuple>(ob)) {
+ std::vector<T> vec = ob.cast<std::vector<T>>();
+ return ControlValue(Span<const T>(vec));
+ }
+
+ return ControlValue(ob.cast<T>());
+}
+
+ControlValue pyToControlValue(const py::object &ob, ControlType type)
+{
+ switch (type) {
+ case ControlTypeNone:
+ return ControlValue();
+ case ControlTypeBool:
+ return ControlValue(ob.cast<bool>());
+ case ControlTypeByte:
+ return controlValueMaybeArray<uint8_t>(ob);
+ case ControlTypeInteger32:
+ return controlValueMaybeArray<int32_t>(ob);
+ case ControlTypeInteger64:
+ return controlValueMaybeArray<int64_t>(ob);
+ case ControlTypeFloat:
+ return controlValueMaybeArray<float>(ob);
+ case ControlTypeString:
+ return ControlValue(ob.cast<std::string>());
+ case ControlTypeRectangle:
+ return controlValueMaybeArray<Rectangle>(ob);
+ case ControlTypeSize:
+ return ControlValue(ob.cast<Size>());
+ case ControlTypePoint:
+ return controlValueMaybeArray<Point>(ob);
+ default:
+ throw std::runtime_error("Control type not implemented");
+ }
+}
diff --git a/src/py/libcamera/py_helpers.h b/src/py/libcamera/py_helpers.h
new file mode 100644
index 00000000..983969df
--- /dev/null
+++ b/src/py/libcamera/py_helpers.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#pragma once
+
+#include <libcamera/libcamera.h>
+
+#include <pybind11/pybind11.h>
+
+pybind11::object controlValueToPy(const libcamera::ControlValue &cv);
+libcamera::ControlValue pyToControlValue(const pybind11::object &ob, libcamera::ControlType type);
diff --git a/src/py/libcamera/py_main.cpp b/src/py/libcamera/py_main.cpp
new file mode 100644
index 00000000..441a70ab
--- /dev/null
+++ b/src/py/libcamera/py_main.cpp
@@ -0,0 +1,523 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ *
+ * Python bindings
+ */
+
+#include "py_main.h"
+
+#include <limits>
+#include <memory>
+#include <stdexcept>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/libcamera.h>
+
+#include <pybind11/functional.h>
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <pybind11/stl_bind.h>
+
+#include "py_camera_manager.h"
+#include "py_helpers.h"
+
+namespace py = pybind11;
+
+using namespace libcamera;
+
+namespace libcamera {
+
+LOG_DEFINE_CATEGORY(Python)
+
+}
+
+/*
+ * This is a holder class used only for the Camera class, for the sole purpose
+ * of avoiding the compilation issue with Camera's private destructor.
+ *
+ * pybind11 requires a public destructor for classes held with shared_ptrs, even
+ * in cases where the public destructor is not strictly needed. The current
+ * understanding is that there are the following options to solve the problem:
+ *
+ * - Use pybind11 'smart_holder' branch. The downside is that 'smart_holder'
+ * is not the mainline branch, and not available in distributions.
+ * - https://github.com/pybind/pybind11/pull/2067
+ * - Make the Camera destructor public
+ * - Something like the PyCameraSmartPtr here, which adds a layer, hiding the
+ * issue.
+ */
+template<typename T>
+class PyCameraSmartPtr
+{
+public:
+ using element_type = T;
+
+ PyCameraSmartPtr()
+ {
+ }
+
+ explicit PyCameraSmartPtr(T *)
+ {
+ throw std::runtime_error("invalid SmartPtr constructor call");
+ }
+
+ explicit PyCameraSmartPtr(std::shared_ptr<T> p)
+ : ptr_(p)
+ {
+ }
+
+ T *get() const { return ptr_.get(); }
+
+ operator std::shared_ptr<T>() const { return ptr_; }
+
+private:
+ std::shared_ptr<T> ptr_;
+};
+
+PYBIND11_DECLARE_HOLDER_TYPE(T, PyCameraSmartPtr<T>)
+
+/*
+ * Note: global C++ destructors can be ran on this before the py module is
+ * destructed.
+ */
+static std::weak_ptr<PyCameraManager> gCameraManager;
+
+PYBIND11_MODULE(_libcamera, m)
+{
+ init_py_enums(m);
+ init_py_controls_generated(m);
+ init_py_geometry(m);
+ init_py_properties_generated(m);
+ init_py_color_space(m);
+ init_py_transform(m);
+
+ /* Forward declarations */
+
+ /*
+ * We need to declare all the classes here so that Python docstrings
+ * can be generated correctly.
+ * https://pybind11.readthedocs.io/en/latest/advanced/misc.html#avoiding-c-types-in-docstrings
+ */
+
+ auto pyCameraManager = py::class_<PyCameraManager, std::shared_ptr<PyCameraManager>>(m, "CameraManager");
+ auto pyCamera = py::class_<Camera, PyCameraSmartPtr<Camera>>(m, "Camera");
+ auto pySensorConfiguration = py::class_<SensorConfiguration>(m, "SensorConfiguration");
+ auto pyCameraConfiguration = py::class_<CameraConfiguration>(m, "CameraConfiguration");
+ auto pyCameraConfigurationStatus = py::enum_<CameraConfiguration::Status>(pyCameraConfiguration, "Status");
+ auto pyStreamConfiguration = py::class_<StreamConfiguration>(m, "StreamConfiguration");
+ auto pyStreamFormats = py::class_<StreamFormats>(m, "StreamFormats");
+ auto pyFrameBufferAllocator = py::class_<FrameBufferAllocator>(m, "FrameBufferAllocator");
+ auto pyFrameBuffer = py::class_<FrameBuffer>(m, "FrameBuffer");
+ auto pyFrameBufferPlane = py::class_<FrameBuffer::Plane>(pyFrameBuffer, "Plane");
+ auto pyStream = py::class_<Stream>(m, "Stream");
+ auto pyControlId = py::class_<ControlId>(m, "ControlId");
+ auto pyControlInfo = py::class_<ControlInfo>(m, "ControlInfo");
+ auto pyRequest = py::class_<Request>(m, "Request");
+ auto pyRequestStatus = py::enum_<Request::Status>(pyRequest, "Status");
+ auto pyRequestReuse = py::enum_<Request::ReuseFlag>(pyRequest, "Reuse");
+ auto pyFrameMetadata = py::class_<FrameMetadata>(m, "FrameMetadata");
+ auto pyFrameMetadataStatus = py::enum_<FrameMetadata::Status>(pyFrameMetadata, "Status");
+ auto pyFrameMetadataPlane = py::class_<FrameMetadata::Plane>(pyFrameMetadata, "Plane");
+ auto pyPixelFormat = py::class_<PixelFormat>(m, "PixelFormat");
+
+ init_py_formats_generated(m);
+
+ /* Global functions */
+ m.def("log_set_level", &logSetLevel);
+
+ /* Classes */
+ pyCameraManager
+ .def_static("singleton", []() {
+ std::shared_ptr<PyCameraManager> cm = gCameraManager.lock();
+
+ if (!cm) {
+ cm = std::make_shared<PyCameraManager>();
+ gCameraManager = cm;
+ }
+
+ return cm;
+ })
+
+ .def_property_readonly_static("version", [](py::object /* self */) { return PyCameraManager::version(); })
+ .def("get", &PyCameraManager::get, py::keep_alive<0, 1>())
+ .def_property_readonly("cameras", &PyCameraManager::cameras)
+
+ .def_property_readonly("event_fd", &PyCameraManager::eventFd)
+ .def("get_ready_requests", &PyCameraManager::getReadyRequests);
+
+ pyCamera
+ .def_property_readonly("id", &Camera::id)
+ .def("acquire", [](Camera &self) {
+ int ret = self.acquire();
+ if (ret)
+ throw std::system_error(-ret, std::generic_category(),
+ "Failed to acquire camera");
+ })
+ .def("release", [](Camera &self) {
+ int ret = self.release();
+ if (ret)
+ throw std::system_error(-ret, std::generic_category(),
+ "Failed to release camera");
+ })
+ .def("start", [](Camera &self,
+ const std::unordered_map<const ControlId *, py::object> &controls) {
+ /* \todo What happens if someone calls start() multiple times? */
+
+ auto cm = gCameraManager.lock();
+ ASSERT(cm);
+
+ self.requestCompleted.connect(cm.get(), &PyCameraManager::handleRequestCompleted);
+
+ ControlList controlList(self.controls());
+
+ for (const auto &[id, obj] : controls) {
+ auto val = pyToControlValue(obj, id->type());
+ controlList.set(id->id(), val);
+ }
+
+ int ret = self.start(&controlList);
+ if (ret) {
+ self.requestCompleted.disconnect();
+ throw std::system_error(-ret, std::generic_category(),
+ "Failed to start camera");
+ }
+ }, py::arg("controls") = std::unordered_map<const ControlId *, py::object>())
+
+ .def("stop", [](Camera &self) {
+ int ret = self.stop();
+
+ self.requestCompleted.disconnect();
+
+ if (ret)
+ throw std::system_error(-ret, std::generic_category(),
+ "Failed to stop camera");
+ })
+
+ .def("__str__", [](Camera &self) {
+ return "<libcamera.Camera '" + self.id() + "'>";
+ })
+
+ /* Keep the camera alive, as StreamConfiguration contains a Stream* */
+ .def("generate_configuration", [](Camera &self, const std::vector<StreamRole> &roles) {
+ return self.generateConfiguration(roles);
+ }, py::keep_alive<0, 1>())
+
+ .def("configure", [](Camera &self, CameraConfiguration *config) {
+ int ret = self.configure(config);
+ if (ret)
+ throw std::system_error(-ret, std::generic_category(),
+ "Failed to configure camera");
+ })
+
+ .def("create_request", [](Camera &self, uint64_t cookie) {
+ std::unique_ptr<Request> req = self.createRequest(cookie);
+ if (!req)
+ throw std::system_error(ENOMEM, std::generic_category(),
+ "Failed to create request");
+ return req;
+ }, py::arg("cookie") = 0)
+
+ .def("queue_request", [](Camera &self, Request *req) {
+ py::object py_req = py::cast(req);
+
+ /*
+ * Increase the reference count, will be dropped in
+ * CameraManager.get_ready_requests().
+ */
+
+ py_req.inc_ref();
+
+ int ret = self.queueRequest(req);
+ if (ret) {
+ py_req.dec_ref();
+ throw std::system_error(-ret, std::generic_category(),
+ "Failed to queue request");
+ }
+ })
+
+ .def_property_readonly("streams", [](Camera &self) {
+ py::set set;
+ for (auto &s : self.streams()) {
+ py::object py_self = py::cast(self);
+ py::object py_s = py::cast(s);
+ py::detail::keep_alive_impl(py_s, py_self);
+ set.add(py_s);
+ }
+ return set;
+ })
+
+ .def_property_readonly("controls", [](Camera &self) {
+ /* Convert ControlInfoMap to std container */
+
+ std::unordered_map<const ControlId *, ControlInfo> ret;
+
+ for (const auto &[k, cv] : self.controls())
+ ret[k] = cv;
+
+ return ret;
+ })
+
+ .def_property_readonly("properties", [](Camera &self) {
+ /* Convert ControlList to std container */
+
+ std::unordered_map<const ControlId *, py::object> ret;
+
+ for (const auto &[k, cv] : self.properties()) {
+ const ControlId *id = properties::properties.at(k);
+ py::object ob = controlValueToPy(cv);
+ ret[id] = ob;
+ }
+
+ return ret;
+ });
+
+ pySensorConfiguration
+ .def(py::init<>())
+ .def_readwrite("bit_depth", &SensorConfiguration::bitDepth)
+ .def_readwrite("analog_crop", &SensorConfiguration::analogCrop)
+ .def_property(
+ "binning",
+ [](SensorConfiguration &self) {
+ return py::make_tuple(self.binning.binX, self.binning.binY);
+ },
+ [](SensorConfiguration &self, py::object value) {
+ auto vec = value.cast<std::vector<unsigned int>>();
+ if (vec.size() != 2)
+ throw std::runtime_error("binning requires iterable of 2 values");
+ self.binning.binX = vec[0];
+ self.binning.binY = vec[1];
+ })
+ .def_property(
+ "skipping",
+ [](SensorConfiguration &self) {
+ return py::make_tuple(self.skipping.xOddInc, self.skipping.xEvenInc,
+ self.skipping.yOddInc, self.skipping.yEvenInc);
+ },
+ [](SensorConfiguration &self, py::object value) {
+ auto vec = value.cast<std::vector<unsigned int>>();
+ if (vec.size() != 4)
+ throw std::runtime_error("skipping requires iterable of 4 values");
+ self.skipping.xOddInc = vec[0];
+ self.skipping.xEvenInc = vec[1];
+ self.skipping.yOddInc = vec[2];
+ self.skipping.yEvenInc = vec[3];
+ })
+ .def_readwrite("output_size", &SensorConfiguration::outputSize)
+ .def("is_valid", &SensorConfiguration::isValid);
+
+ pyCameraConfiguration
+ .def("__iter__", [](CameraConfiguration &self) {
+ return py::make_iterator<py::return_value_policy::reference_internal>(self);
+ }, py::keep_alive<0, 1>())
+ .def("__len__", [](CameraConfiguration &self) {
+ return self.size();
+ })
+ .def("validate", &CameraConfiguration::validate)
+ .def("at", py::overload_cast<unsigned int>(&CameraConfiguration::at),
+ py::return_value_policy::reference_internal)
+ .def_property_readonly("size", &CameraConfiguration::size)
+ .def_property_readonly("empty", &CameraConfiguration::empty)
+ .def_readwrite("sensor_config", &CameraConfiguration::sensorConfig)
+ .def_readwrite("orientation", &CameraConfiguration::orientation);
+
+ pyCameraConfigurationStatus
+ .value("Valid", CameraConfiguration::Valid)
+ .value("Adjusted", CameraConfiguration::Adjusted)
+ .value("Invalid", CameraConfiguration::Invalid);
+
+ pyStreamConfiguration
+ .def("__str__", &StreamConfiguration::toString)
+ .def_property_readonly("stream", &StreamConfiguration::stream,
+ py::return_value_policy::reference_internal)
+ .def_readwrite("size", &StreamConfiguration::size)
+ .def_readwrite("pixel_format", &StreamConfiguration::pixelFormat)
+ .def_readwrite("stride", &StreamConfiguration::stride)
+ .def_readwrite("frame_size", &StreamConfiguration::frameSize)
+ .def_readwrite("buffer_count", &StreamConfiguration::bufferCount)
+ .def_property_readonly("formats", &StreamConfiguration::formats,
+ py::return_value_policy::reference_internal)
+ .def_readwrite("color_space", &StreamConfiguration::colorSpace);
+
+ pyStreamFormats
+ .def_property_readonly("pixel_formats", &StreamFormats::pixelformats)
+ .def("sizes", &StreamFormats::sizes)
+ .def("range", &StreamFormats::range);
+
+ pyFrameBufferAllocator
+ .def(py::init<PyCameraSmartPtr<Camera>>(), py::keep_alive<1, 2>())
+ .def("allocate", [](FrameBufferAllocator &self, Stream *stream) {
+ int ret = self.allocate(stream);
+ if (ret < 0)
+ throw std::system_error(-ret, std::generic_category(),
+ "Failed to allocate buffers");
+ return ret;
+ })
+ .def_property_readonly("allocated", &FrameBufferAllocator::allocated)
+ /* Create a list of FrameBuffers, where each FrameBuffer has a keep-alive to FrameBufferAllocator */
+ .def("buffers", [](FrameBufferAllocator &self, Stream *stream) {
+ py::object py_self = py::cast(self);
+ py::list l;
+ for (auto &ub : self.buffers(stream)) {
+ py::object py_buf = py::cast(ub.get(), py::return_value_policy::reference_internal, py_self);
+ l.append(py_buf);
+ }
+ return l;
+ });
+
+ pyFrameBuffer
+ .def(py::init<std::vector<FrameBuffer::Plane>, unsigned int>(),
+ py::arg("planes"), py::arg("cookie") = 0)
+ .def_property_readonly("metadata", &FrameBuffer::metadata, py::return_value_policy::reference_internal)
+ .def_property_readonly("planes", &FrameBuffer::planes)
+ .def_property("cookie", &FrameBuffer::cookie, &FrameBuffer::setCookie);
+
+ pyFrameBufferPlane
+ .def(py::init())
+ .def(py::init([](int fd, unsigned int offset, unsigned int length) {
+ auto p = FrameBuffer::Plane();
+ p.fd = SharedFD(fd);
+ p.offset = offset;
+ p.length = length;
+ return p;
+ }), py::arg("fd"), py::arg("offset"), py::arg("length"))
+ .def_property("fd",
+ [](const FrameBuffer::Plane &self) {
+ return self.fd.get();
+ },
+ [](FrameBuffer::Plane &self, int fd) {
+ self.fd = SharedFD(fd);
+ })
+ .def_readwrite("offset", &FrameBuffer::Plane::offset)
+ .def_readwrite("length", &FrameBuffer::Plane::length);
+
+ pyStream
+ .def_property_readonly("configuration", &Stream::configuration);
+
+ pyControlId
+ .def_property_readonly("id", &ControlId::id)
+ .def_property_readonly("name", &ControlId::name)
+ .def_property_readonly("vendor", &ControlId::vendor)
+ .def_property_readonly("type", &ControlId::type)
+ .def_property_readonly("isArray", &ControlId::isArray)
+ .def_property_readonly("size", &ControlId::size)
+ .def("__str__", [](const ControlId &self) { return self.name(); })
+ .def("__repr__", [](const ControlId &self) {
+ std::string sizeStr = "";
+ if (self.isArray()) {
+ sizeStr = "[";
+ size_t size = self.size();
+ if (size == std::numeric_limits<size_t>::max())
+ sizeStr += "n";
+ else
+ sizeStr += std::to_string(size);
+ sizeStr += "]";
+ }
+ return py::str("libcamera.ControlId({}, {}.{}{}, {})")
+ .format(self.id(), self.vendor(), self.name(), sizeStr, self.type());
+ })
+ .def("enumerators", &ControlId::enumerators);
+
+ pyControlInfo
+ .def_property_readonly("min", [](const ControlInfo &self) {
+ return controlValueToPy(self.min());
+ })
+ .def_property_readonly("max", [](const ControlInfo &self) {
+ return controlValueToPy(self.max());
+ })
+ .def_property_readonly("default", [](const ControlInfo &self) {
+ return controlValueToPy(self.def());
+ })
+ .def_property_readonly("values", [](const ControlInfo &self) {
+ py::list l;
+ for (const auto &v : self.values())
+ l.append(controlValueToPy(v));
+ return l;
+ })
+ .def("__str__", &ControlInfo::toString)
+ .def("__repr__", [](const ControlInfo &self) {
+ return py::str("libcamera.ControlInfo({})")
+ .format(self.toString());
+ });
+
+ pyRequest
+ /* \todo Fence is not supported, so we cannot expose addBuffer() directly */
+ .def("add_buffer", [](Request &self, const Stream *stream, FrameBuffer *buffer) {
+ int ret = self.addBuffer(stream, buffer);
+ if (ret)
+ throw std::system_error(-ret, std::generic_category(),
+ "Failed to add buffer");
+ }, py::keep_alive<1, 3>()) /* Request keeps Framebuffer alive */
+ .def_property_readonly("status", &Request::status)
+ .def_property_readonly("buffers", &Request::buffers)
+ .def_property_readonly("cookie", &Request::cookie)
+ .def_property_readonly("sequence", &Request::sequence)
+ .def_property_readonly("has_pending_buffers", &Request::hasPendingBuffers)
+ .def("set_control", [](Request &self, const ControlId &id, py::object value) {
+ self.controls().set(id.id(), pyToControlValue(value, id.type()));
+ })
+ .def_property_readonly("metadata", [](Request &self) {
+ /* Convert ControlList to std container */
+
+ std::unordered_map<const ControlId *, py::object> ret;
+
+ for (const auto &[key, cv] : self.metadata()) {
+ const ControlId *id = controls::controls.at(key);
+ py::object ob = controlValueToPy(cv);
+ ret[id] = ob;
+ }
+
+ return ret;
+ })
+ /*
+ * \todo As we add a keep_alive to the fb in addBuffers(), we
+ * can only allow reuse with ReuseBuffers.
+ */
+ .def("reuse", [](Request &self) { self.reuse(Request::ReuseFlag::ReuseBuffers); })
+ .def("__str__", &Request::toString);
+
+ pyRequestStatus
+ .value("Pending", Request::RequestPending)
+ .value("Complete", Request::RequestComplete)
+ .value("Cancelled", Request::RequestCancelled);
+
+ pyRequestReuse
+ .value("Default", Request::ReuseFlag::Default)
+ .value("ReuseBuffers", Request::ReuseFlag::ReuseBuffers);
+
+ pyFrameMetadata
+ .def_readonly("status", &FrameMetadata::status)
+ .def_readonly("sequence", &FrameMetadata::sequence)
+ .def_readonly("timestamp", &FrameMetadata::timestamp)
+ .def_property_readonly("planes", [](const FrameMetadata &self) {
+ /* Convert from Span<> to std::vector<> */
+ /* Note: this creates a copy */
+ std::vector<FrameMetadata::Plane> v(self.planes().begin(), self.planes().end());
+ return v;
+ });
+
+ pyFrameMetadataStatus
+ .value("Success", FrameMetadata::FrameSuccess)
+ .value("Error", FrameMetadata::FrameError)
+ .value("Cancelled", FrameMetadata::FrameCancelled);
+
+ pyFrameMetadataPlane
+ .def_readwrite("bytes_used", &FrameMetadata::Plane::bytesused);
+
+ pyPixelFormat
+ .def(py::init<>())
+ .def(py::init<uint32_t, uint64_t>())
+ .def(py::init<>([](const std::string &str) {
+ return PixelFormat::fromString(str);
+ }))
+ .def_property_readonly("fourcc", &PixelFormat::fourcc)
+ .def_property_readonly("modifier", &PixelFormat::modifier)
+ .def(py::self == py::self)
+ .def("__str__", &PixelFormat::toString)
+ .def("__repr__", [](const PixelFormat &self) {
+ return "libcamera.PixelFormat('" + self.toString() + "')";
+ });
+}
diff --git a/src/py/libcamera/py_main.h b/src/py/libcamera/py_main.h
new file mode 100644
index 00000000..4d594326
--- /dev/null
+++ b/src/py/libcamera/py_main.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#pragma once
+
+#include <libcamera/base/log.h>
+
+#include <pybind11/pybind11.h>
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Python)
+
+}
+
+void init_py_color_space(pybind11::module &m);
+void init_py_controls_generated(pybind11::module &m);
+void init_py_enums(pybind11::module &m);
+void init_py_formats_generated(pybind11::module &m);
+void init_py_geometry(pybind11::module &m);
+void init_py_properties_generated(pybind11::module &m);
+void init_py_transform(pybind11::module &m);
diff --git a/src/py/libcamera/py_transform.cpp b/src/py/libcamera/py_transform.cpp
new file mode 100644
index 00000000..768260ff
--- /dev/null
+++ b/src/py/libcamera/py_transform.cpp
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ *
+ * Python bindings - Transform class
+ */
+
+#include <libcamera/transform.h>
+#include <libcamera/libcamera.h>
+
+#include <pybind11/operators.h>
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+#include "py_main.h"
+
+namespace py = pybind11;
+
+using namespace libcamera;
+
+void init_py_transform(py::module &m)
+{
+ auto pyTransform = py::class_<Transform>(m, "Transform");
+
+ pyTransform
+ .def(py::init([](int rotation, bool hflip, bool vflip, bool transpose) {
+ bool ok;
+
+ Transform t = transformFromRotation(rotation, &ok);
+ if (!ok)
+ throw std::invalid_argument("Invalid rotation");
+
+ if (hflip)
+ t ^= Transform::HFlip;
+ if (vflip)
+ t ^= Transform::VFlip;
+ if (transpose)
+ t ^= Transform::Transpose;
+ return t;
+ }), py::arg("rotation") = 0, py::arg("hflip") = false,
+ py::arg("vflip") = false, py::arg("transpose") = false)
+ .def(py::init([](Transform &other) { return other; }))
+ .def("__str__", [](Transform &self) {
+ return "<libcamera.Transform '" + std::string(transformToString(self)) + "'>";
+ })
+ .def_property("hflip",
+ [](Transform &self) {
+ return !!(self & Transform::HFlip);
+ },
+ [](Transform &self, bool hflip) {
+ if (hflip)
+ self |= Transform::HFlip;
+ else
+ self &= ~Transform::HFlip;
+ })
+ .def_property("vflip",
+ [](Transform &self) {
+ return !!(self & Transform::VFlip);
+ },
+ [](Transform &self, bool vflip) {
+ if (vflip)
+ self |= Transform::VFlip;
+ else
+ self &= ~Transform::VFlip;
+ })
+ .def_property("transpose",
+ [](Transform &self) {
+ return !!(self & Transform::Transpose);
+ },
+ [](Transform &self, bool transpose) {
+ if (transpose)
+ self |= Transform::Transpose;
+ else
+ self &= ~Transform::Transpose;
+ })
+ .def("inverse", [](Transform &self) { return -self; })
+ .def("invert", [](Transform &self) {
+ self = -self;
+ })
+ .def("compose", [](Transform &self, Transform &other) {
+ self = self * other;
+ });
+}
diff --git a/src/py/libcamera/utils/MappedFrameBuffer.py b/src/py/libcamera/utils/MappedFrameBuffer.py
new file mode 100644
index 00000000..329e51fa
--- /dev/null
+++ b/src/py/libcamera/utils/MappedFrameBuffer.py
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+# Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+import libcamera
+from typing import Tuple
+
+class MappedFrameBuffer:
+ """
+ Provides memoryviews for the FrameBuffer's planes
+ """
+ def __init__(self, fb: libcamera.FrameBuffer):
+ self.__fb = fb
+ self.__planes = ()
+ self.__maps = ()
+
+ def __enter__(self):
+ return self.mmap()
+
+ def __exit__(self, exc_type, exc_value, exc_traceback):
+ self.munmap()
+
+ def mmap(self):
+ if self.__planes:
+ raise RuntimeError('MappedFrameBuffer already mmapped')
+
+ import os
+ import mmap
+
+ fb = self.__fb
+
+ # Collect information about the buffers
+
+ bufinfos = {}
+
+ for plane in fb.planes:
+ fd = plane.fd
+
+ if fd not in bufinfos:
+ buflen = os.lseek(fd, 0, os.SEEK_END)
+ bufinfos[fd] = {'maplen': 0, 'buflen': buflen}
+ else:
+ buflen = bufinfos[fd]['buflen']
+
+ if plane.offset > buflen or plane.offset + plane.length > buflen:
+ raise RuntimeError(f'plane is out of buffer: buffer length={buflen}, ' +
+ f'plane offset={plane.offset}, plane length={plane.length}')
+
+ bufinfos[fd]['maplen'] = max(bufinfos[fd]['maplen'], plane.offset + plane.length)
+
+ # mmap the buffers
+
+ maps = []
+
+ for fd, info in bufinfos.items():
+ map = mmap.mmap(fd, info['maplen'], mmap.MAP_SHARED, mmap.PROT_READ | mmap.PROT_WRITE)
+ info['map'] = map
+ maps.append(map)
+
+ self.__maps = tuple(maps)
+
+ # Create memoryviews for the planes
+
+ planes = []
+
+ for plane in fb.planes:
+ fd = plane.fd
+ info = bufinfos[fd]
+
+ mv = memoryview(info['map'])
+
+ start = plane.offset
+ end = plane.offset + plane.length
+
+ mv = mv[start:end]
+
+ planes.append(mv)
+
+ self.__planes = tuple(planes)
+
+ return self
+
+ def munmap(self):
+ if not self.__planes:
+ raise RuntimeError('MappedFrameBuffer not mmapped')
+
+ for p in self.__planes:
+ p.release()
+
+ for mm in self.__maps:
+ mm.close()
+
+ self.__planes = ()
+ self.__maps = ()
+
+ @property
+ def planes(self) -> Tuple[memoryview, ...]:
+ """memoryviews for the planes"""
+ if not self.__planes:
+ raise RuntimeError('MappedFrameBuffer not mmapped')
+
+ return self.__planes
+
+ @property
+ def fb(self):
+ return self.__fb
diff --git a/src/py/libcamera/utils/__init__.py b/src/py/libcamera/utils/__init__.py
new file mode 100644
index 00000000..4a23ce36
--- /dev/null
+++ b/src/py/libcamera/utils/__init__.py
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+# Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+from .MappedFrameBuffer import MappedFrameBuffer
diff --git a/src/py/meson.build b/src/py/meson.build
new file mode 100644
index 00000000..a4586b4a
--- /dev/null
+++ b/src/py/meson.build
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: CC0-1.0
+
+subdir('libcamera')
diff --git a/src/qcam/assets/feathericons/README.md b/src/qcam/assets/feathericons/README.md
deleted file mode 100644
index ce7664f6..00000000
--- a/src/qcam/assets/feathericons/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-Icons from https://feathericons.com/
-License: MIT
-
-Generate the QRC file with:
- rcc --project
diff --git a/src/qcam/assets/feathericons/feathericons.qrc b/src/qcam/assets/feathericons/feathericons.qrc
deleted file mode 100644
index c4eb7a0b..00000000
--- a/src/qcam/assets/feathericons/feathericons.qrc
+++ /dev/null
@@ -1,9 +0,0 @@
-<!DOCTYPE RCC><RCC version="1.0">
-<qresource>
-<file>./camera-off.svg</file>
-<file>./play-circle.svg</file>
-<file>./save.svg</file>
-<file>./stop-circle.svg</file>
-<file>./x-circle.svg</file>
-</qresource>
-</RCC>
diff --git a/src/qcam/format_converter.cpp b/src/qcam/format_converter.cpp
deleted file mode 100644
index bf887ad4..00000000
--- a/src/qcam/format_converter.cpp
+++ /dev/null
@@ -1,275 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * format_convert.cpp - qcam - Convert buffer to RGB
- */
-
-#include "format_converter.h"
-
-#include <errno.h>
-
-#include <QImage>
-
-#define RGBSHIFT 8
-#ifndef MAX
-#define MAX(a,b) ((a)>(b)?(a):(b))
-#endif
-#ifndef MIN
-#define MIN(a,b) ((a)<(b)?(a):(b))
-#endif
-#ifndef CLAMP
-#define CLAMP(a,low,high) MAX((low),MIN((high),(a)))
-#endif
-#ifndef CLIP
-#define CLIP(x) CLAMP(x,0,255)
-#endif
-
-int FormatConverter::configure(const libcamera::PixelFormat &format,
- const QSize &size)
-{
- switch (format) {
- case DRM_FORMAT_NV12:
- formatFamily_ = NV;
- horzSubSample_ = 2;
- vertSubSample_ = 2;
- nvSwap_ = false;
- break;
- case DRM_FORMAT_NV21:
- formatFamily_ = NV;
- horzSubSample_ = 2;
- vertSubSample_ = 2;
- nvSwap_ = true;
- break;
- case DRM_FORMAT_NV16:
- formatFamily_ = NV;
- horzSubSample_ = 2;
- vertSubSample_ = 1;
- nvSwap_ = false;
- break;
- case DRM_FORMAT_NV61:
- formatFamily_ = NV;
- horzSubSample_ = 2;
- vertSubSample_ = 1;
- nvSwap_ = true;
- break;
- case DRM_FORMAT_NV24:
- formatFamily_ = NV;
- horzSubSample_ = 1;
- vertSubSample_ = 1;
- nvSwap_ = false;
- break;
- case DRM_FORMAT_NV42:
- formatFamily_ = NV;
- horzSubSample_ = 1;
- vertSubSample_ = 1;
- nvSwap_ = true;
- break;
-
- case DRM_FORMAT_RGB888:
- formatFamily_ = RGB;
- r_pos_ = 2;
- g_pos_ = 1;
- b_pos_ = 0;
- bpp_ = 3;
- break;
- case DRM_FORMAT_BGR888:
- formatFamily_ = RGB;
- r_pos_ = 0;
- g_pos_ = 1;
- b_pos_ = 2;
- bpp_ = 3;
- break;
- case DRM_FORMAT_ARGB8888:
- formatFamily_ = RGB;
- r_pos_ = 2;
- g_pos_ = 1;
- b_pos_ = 0;
- bpp_ = 4;
- break;
- case DRM_FORMAT_RGBA8888:
- formatFamily_ = RGB;
- r_pos_ = 3;
- g_pos_ = 2;
- b_pos_ = 1;
- bpp_ = 4;
- break;
- case DRM_FORMAT_ABGR8888:
- formatFamily_ = RGB;
- r_pos_ = 0;
- g_pos_ = 1;
- b_pos_ = 2;
- bpp_ = 4;
- break;
- case DRM_FORMAT_BGRA8888:
- formatFamily_ = RGB;
- r_pos_ = 1;
- g_pos_ = 2;
- b_pos_ = 3;
- bpp_ = 4;
- break;
-
- case DRM_FORMAT_VYUY:
- formatFamily_ = YUV;
- y_pos_ = 1;
- cb_pos_ = 2;
- break;
- case DRM_FORMAT_YVYU:
- formatFamily_ = YUV;
- y_pos_ = 0;
- cb_pos_ = 3;
- break;
- case DRM_FORMAT_UYVY:
- formatFamily_ = YUV;
- y_pos_ = 1;
- cb_pos_ = 0;
- break;
- case DRM_FORMAT_YUYV:
- formatFamily_ = YUV;
- y_pos_ = 0;
- cb_pos_ = 1;
- break;
-
- case DRM_FORMAT_MJPEG:
- formatFamily_ = MJPEG;
- break;
-
- default:
- return -EINVAL;
- };
-
- format_ = format;
- width_ = size.width();
- height_ = size.height();
-
- return 0;
-}
-
-void FormatConverter::convert(const unsigned char *src, size_t size,
- QImage *dst)
-{
- switch (formatFamily_) {
- case MJPEG:
- dst->loadFromData(src, size, "JPEG");
- break;
- case YUV:
- convertYUV(src, dst->bits());
- break;
- case RGB:
- convertRGB(src, dst->bits());
- break;
- case NV:
- convertNV(src, dst->bits());
- break;
- };
-}
-
-static void yuv_to_rgb(int y, int u, int v, int *r, int *g, int *b)
-{
- int c = y - 16;
- int d = u - 128;
- int e = v - 128;
- *r = CLIP(( 298 * c + 409 * e + 128) >> RGBSHIFT);
- *g = CLIP(( 298 * c - 100 * d - 208 * e + 128) >> RGBSHIFT);
- *b = CLIP(( 298 * c + 516 * d + 128) >> RGBSHIFT);
-}
-
-void FormatConverter::convertNV(const unsigned char *src, unsigned char *dst)
-{
- unsigned int c_stride = width_ * (2 / horzSubSample_);
- unsigned int c_inc = horzSubSample_ == 1 ? 2 : 0;
- unsigned int cb_pos = nvSwap_ ? 1 : 0;
- unsigned int cr_pos = nvSwap_ ? 0 : 1;
- const unsigned char *src_c = src + width_ * height_;
- int r, g, b;
-
- for (unsigned int y = 0; y < height_; y++) {
- const unsigned char *src_y = src + y * width_;
- const unsigned char *src_cb = src_c + (y / vertSubSample_) *
- c_stride + cb_pos;
- const unsigned char *src_cr = src_c + (y / vertSubSample_) *
- c_stride + cr_pos;
-
- for (unsigned int x = 0; x < width_; x += 2) {
- yuv_to_rgb(*src_y, *src_cb, *src_cr, &r, &g, &b);
- dst[0] = b;
- dst[1] = g;
- dst[2] = r;
- dst[3] = 0xff;
- src_y++;
- src_cb += c_inc;
- src_cr += c_inc;
- dst += 4;
-
- yuv_to_rgb(*src_y, *src_cb, *src_cr, &r, &g, &b);
- dst[0] = b;
- dst[1] = g;
- dst[2] = r;
- dst[3] = 0xff;
- src_y++;
- src_cb += 2;
- src_cr += 2;
- dst += 4;
- }
- }
-}
-
-void FormatConverter::convertRGB(const unsigned char *src, unsigned char *dst)
-{
- unsigned int x, y;
- int r, g, b;
-
- for (y = 0; y < height_; y++) {
- for (x = 0; x < width_; x++) {
- r = src[bpp_ * x + r_pos_];
- g = src[bpp_ * x + g_pos_];
- b = src[bpp_ * x + b_pos_];
-
- dst[4 * x + 0] = b;
- dst[4 * x + 1] = g;
- dst[4 * x + 2] = r;
- dst[4 * x + 3] = 0xff;
- }
-
- src += width_ * bpp_;
- dst += width_ * 4;
- }
-}
-
-void FormatConverter::convertYUV(const unsigned char *src, unsigned char *dst)
-{
- unsigned int src_x, src_y, dst_x, dst_y;
- unsigned int src_stride;
- unsigned int dst_stride;
- unsigned int cr_pos;
- int r, g, b, y, cr, cb;
-
- cr_pos = (cb_pos_ + 2) % 4;
- src_stride = width_ * 2;
- dst_stride = width_ * 4;
-
- for (src_y = 0, dst_y = 0; dst_y < height_; src_y++, dst_y++) {
- for (src_x = 0, dst_x = 0; dst_x < width_; ) {
- cb = src[src_y * src_stride + src_x * 4 + cb_pos_];
- cr = src[src_y * src_stride + src_x * 4 + cr_pos];
-
- y = src[src_y * src_stride + src_x * 4 + y_pos_];
- yuv_to_rgb(y, cb, cr, &r, &g, &b);
- dst[dst_y * dst_stride + 4 * dst_x + 0] = b;
- dst[dst_y * dst_stride + 4 * dst_x + 1] = g;
- dst[dst_y * dst_stride + 4 * dst_x + 2] = r;
- dst[dst_y * dst_stride + 4 * dst_x + 3] = 0xff;
- dst_x++;
-
- y = src[src_y * src_stride + src_x * 4 + y_pos_ + 2];
- yuv_to_rgb(y, cb, cr, &r, &g, &b);
- dst[dst_y * dst_stride + 4 * dst_x + 0] = b;
- dst[dst_y * dst_stride + 4 * dst_x + 1] = g;
- dst[dst_y * dst_stride + 4 * dst_x + 2] = r;
- dst[dst_y * dst_stride + 4 * dst_x + 3] = 0xff;
- dst_x++;
-
- src_x++;
- }
- }
-}
diff --git a/src/qcam/format_converter.h b/src/qcam/format_converter.h
deleted file mode 100644
index 5e28adf0..00000000
--- a/src/qcam/format_converter.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * format_convert.h - qcam - Convert buffer to RGB
- */
-#ifndef __QCAM_FORMAT_CONVERTER_H__
-#define __QCAM_FORMAT_CONVERTER_H__
-
-#include <stddef.h>
-
-#include <QSize>
-
-#include <libcamera/pixelformats.h>
-
-class QImage;
-
-class FormatConverter
-{
-public:
- int configure(const libcamera::PixelFormat &format, const QSize &size);
-
- void convert(const unsigned char *src, size_t size, QImage *dst);
-
-private:
- enum FormatFamily {
- MJPEG,
- NV,
- RGB,
- YUV,
- };
-
- void convertNV(const unsigned char *src, unsigned char *dst);
- void convertRGB(const unsigned char *src, unsigned char *dst);
- void convertYUV(const unsigned char *src, unsigned char *dst);
-
- libcamera::PixelFormat format_;
- unsigned int width_;
- unsigned int height_;
-
- enum FormatFamily formatFamily_;
-
- /* NV parameters */
- unsigned int horzSubSample_;
- unsigned int vertSubSample_;
- bool nvSwap_;
-
- /* RGB parameters */
- unsigned int bpp_;
- unsigned int r_pos_;
- unsigned int g_pos_;
- unsigned int b_pos_;
-
- /* YUV parameters */
- unsigned int y_pos_;
- unsigned int cb_pos_;
-};
-
-#endif /* __QCAM_FORMAT_CONVERTER_H__ */
diff --git a/src/qcam/main.cpp b/src/qcam/main.cpp
deleted file mode 100644
index 862d714f..00000000
--- a/src/qcam/main.cpp
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * main.cpp - cam - The libcamera swiss army knife
- */
-
-#include <signal.h>
-#include <string.h>
-
-#include <QApplication>
-#include <QtDebug>
-
-#include <libcamera/camera_manager.h>
-
-#include "main_window.h"
-#include "../cam/options.h"
-
-void signalHandler(int signal)
-{
- qInfo() << "Exiting";
- qApp->quit();
-}
-
-OptionsParser::Options parseOptions(int argc, char *argv[])
-{
- KeyValueParser sizeParser;
- sizeParser.addOption("width", OptionInteger, "Width in pixels",
- ArgumentRequired);
- sizeParser.addOption("height", OptionInteger, "Height in pixels",
- ArgumentRequired);
-
- OptionsParser parser;
- parser.addOption(OptCamera, OptionString,
- "Specify which camera to operate on", "camera",
- ArgumentRequired, "camera");
- parser.addOption(OptHelp, OptionNone, "Display this help message",
- "help");
- parser.addOption(OptSize, &sizeParser, "Set the stream size",
- "size", true);
-
- OptionsParser::Options options = parser.parse(argc, argv);
- if (options.isSet(OptHelp))
- parser.usage();
-
- return options;
-}
-
-int main(int argc, char **argv)
-{
- QApplication app(argc, argv);
- int ret;
-
- OptionsParser::Options options = parseOptions(argc, argv);
- if (!options.valid())
- return EXIT_FAILURE;
- if (options.isSet(OptHelp))
- return 0;
-
- struct sigaction sa = {};
- sa.sa_handler = &signalHandler;
- sigaction(SIGINT, &sa, nullptr);
-
- CameraManager *cm = new CameraManager();
-
- ret = cm->start();
- if (ret) {
- qInfo() << "Failed to start camera manager:"
- << strerror(-ret);
- return EXIT_FAILURE;
- }
-
- MainWindow *mainWindow = new MainWindow(cm, options);
- mainWindow->show();
- ret = app.exec();
- delete mainWindow;
-
- cm->stop();
- delete cm;
-
- return ret;
-}
diff --git a/src/qcam/main_window.cpp b/src/qcam/main_window.cpp
deleted file mode 100644
index cf39ed7a..00000000
--- a/src/qcam/main_window.cpp
+++ /dev/null
@@ -1,554 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * main_window.cpp - qcam - Main application window
- */
-
-#include "main_window.h"
-
-#include <iomanip>
-#include <string>
-#include <sys/mman.h>
-
-#include <QComboBox>
-#include <QCoreApplication>
-#include <QFileDialog>
-#include <QImage>
-#include <QImageWriter>
-#include <QInputDialog>
-#include <QMutexLocker>
-#include <QStandardPaths>
-#include <QTimer>
-#include <QToolBar>
-#include <QToolButton>
-#include <QtDebug>
-
-#include <libcamera/camera_manager.h>
-#include <libcamera/version.h>
-
-using namespace libcamera;
-
-/**
- * \brief Custom QEvent to signal capture completion
- */
-class CaptureEvent : public QEvent
-{
-public:
- CaptureEvent()
- : QEvent(type())
- {
- }
-
- static Type type()
- {
- static int type = QEvent::registerEventType();
- return static_cast<Type>(type);
- }
-};
-
-MainWindow::MainWindow(CameraManager *cm, const OptionsParser::Options &options)
- : options_(options), cm_(cm), allocator_(nullptr), isCapturing_(false)
-{
- int ret;
-
- /*
- * Initialize the UI: Create the toolbar, set the window title and
- * create the viewfinder widget.
- */
- createToolbars();
-
- title_ = "QCam " + QString::fromStdString(CameraManager::version());
- setWindowTitle(title_);
- connect(&titleTimer_, SIGNAL(timeout()), this, SLOT(updateTitle()));
-
- viewfinder_ = new ViewFinder(this);
- connect(viewfinder_, &ViewFinder::renderComplete,
- this, &MainWindow::queueRequest);
- setCentralWidget(viewfinder_);
- adjustSize();
-
- /* Open the camera and start capture. */
- ret = openCamera();
- if (ret < 0)
- quit();
-
- startStopAction_->setChecked(true);
-}
-
-MainWindow::~MainWindow()
-{
- if (camera_) {
- stopCapture();
- camera_->release();
- camera_.reset();
- }
-}
-
-bool MainWindow::event(QEvent *e)
-{
- if (e->type() == CaptureEvent::type()) {
- processCapture();
- return true;
- }
-
- return QMainWindow::event(e);
-}
-
-int MainWindow::createToolbars()
-{
- QAction *action;
-
- toolbar_ = addToolBar("Main");
-
- /* Disable right click context menu. */
- toolbar_->setContextMenuPolicy(Qt::PreventContextMenu);
-
- /* Quit action. */
- action = toolbar_->addAction(QIcon::fromTheme("application-exit",
- QIcon(":x-circle.svg")),
- "Quit");
- action->setShortcut(Qt::CTRL | Qt::Key_Q);
- connect(action, &QAction::triggered, this, &MainWindow::quit);
-
- /* Camera selector. */
- QComboBox *cameraCombo = new QComboBox();
- connect(cameraCombo, QOverload<int>::of(&QComboBox::activated),
- this, &MainWindow::switchCamera);
-
- for (const std::shared_ptr<Camera> &cam : cm_->cameras())
- cameraCombo->addItem(QString::fromStdString(cam->name()));
-
- toolbar_->addWidget(cameraCombo);
-
- toolbar_->addSeparator();
-
- /* Start/Stop action. */
- iconPlay_ = QIcon::fromTheme("media-playback-start",
- QIcon(":play-circle.svg"));
- iconStop_ = QIcon::fromTheme("media-playback-stop",
- QIcon(":stop-circle.svg"));
-
- action = toolbar_->addAction(iconPlay_, "Start Capture");
- action->setCheckable(true);
- action->setShortcut(Qt::Key_Space);
- connect(action, &QAction::toggled, this, &MainWindow::toggleCapture);
- startStopAction_ = action;
-
- /* Save As... action. */
- action = toolbar_->addAction(QIcon::fromTheme("document-save-as",
- QIcon(":save.svg")),
- "Save As...");
- action->setShortcut(QKeySequence::SaveAs);
- connect(action, &QAction::triggered, this, &MainWindow::saveImageAs);
-
- return 0;
-}
-
-void MainWindow::quit()
-{
- QTimer::singleShot(0, QCoreApplication::instance(),
- &QCoreApplication::quit);
-}
-
-void MainWindow::updateTitle()
-{
- /* Calculate the average frame rate over the last period. */
- unsigned int duration = frameRateInterval_.elapsed();
- unsigned int frames = framesCaptured_ - previousFrames_;
- double fps = frames * 1000.0 / duration;
-
- /* Restart counters. */
- frameRateInterval_.start();
- previousFrames_ = framesCaptured_;
-
- setWindowTitle(title_ + " : " + QString::number(fps, 'f', 2) + " fps");
-}
-
-/* -----------------------------------------------------------------------------
- * Camera Selection
- */
-
-void MainWindow::switchCamera(int index)
-{
- /* Get and acquire the new camera. */
- const auto &cameras = cm_->cameras();
- if (static_cast<unsigned int>(index) >= cameras.size())
- return;
-
- const std::shared_ptr<Camera> &cam = cameras[index];
-
- if (cam->acquire()) {
- qInfo() << "Failed to acquire camera" << cam->name().c_str();
- return;
- }
-
- qInfo() << "Switching to camera" << cam->name().c_str();
-
- /*
- * Stop the capture session, release the current camera, replace it with
- * the new camera and start a new capture session.
- */
- startStopAction_->setChecked(false);
-
- camera_->release();
- camera_ = cam;
-
- startStopAction_->setChecked(true);
-}
-
-std::string MainWindow::chooseCamera()
-{
- QStringList cameras;
- bool result;
-
- /* If only one camera is available, use it automatically. */
- if (cm_->cameras().size() == 1)
- return cm_->cameras()[0]->name();
-
- /* Present a dialog box to pick a camera. */
- for (const std::shared_ptr<Camera> &cam : cm_->cameras())
- cameras.append(QString::fromStdString(cam->name()));
-
- QString name = QInputDialog::getItem(this, "Select Camera",
- "Camera:", cameras, 0,
- false, &result);
- if (!result)
- return std::string();
-
- return name.toStdString();
-}
-
-int MainWindow::openCamera()
-{
- std::string cameraName;
-
- /*
- * Use the camera specified on the command line, if any, or display the
- * camera selection dialog box otherwise.
- */
- if (options_.isSet(OptCamera))
- cameraName = static_cast<std::string>(options_[OptCamera]);
- else
- cameraName = chooseCamera();
-
- if (cameraName == "")
- return -EINVAL;
-
- /* Get and acquire the camera. */
- camera_ = cm_->get(cameraName);
- if (!camera_) {
- qInfo() << "Camera" << cameraName.c_str() << "not found";
- return -ENODEV;
- }
-
- if (camera_->acquire()) {
- qInfo() << "Failed to acquire camera";
- camera_.reset();
- return -EBUSY;
- }
-
- return 0;
-}
-
-/* -----------------------------------------------------------------------------
- * Capture Start & Stop
- */
-
-void MainWindow::toggleCapture(bool start)
-{
- if (start) {
- startCapture();
- startStopAction_->setIcon(iconStop_);
- startStopAction_->setText("Stop Capture");
- } else {
- stopCapture();
- startStopAction_->setIcon(iconPlay_);
- startStopAction_->setText("Start Capture");
- }
-}
-
-/**
- * \brief Start capture with the current camera
- *
- * This function shall not be called directly, use toggleCapture() instead.
- */
-int MainWindow::startCapture()
-{
- int ret;
-
- /* Configure the camera. */
- config_ = camera_->generateConfiguration({ StreamRole::Viewfinder });
-
- StreamConfiguration &cfg = config_->at(0);
-
- if (options_.isSet(OptSize)) {
- const std::vector<OptionValue> &sizeOptions =
- options_[OptSize].toArray();
-
- /* Set desired stream size if requested. */
- for (const auto &value : sizeOptions) {
- KeyValueParser::Options opt = value.toKeyValues();
-
- if (opt.isSet("width"))
- cfg.size.width = opt["width"];
-
- if (opt.isSet("height"))
- cfg.size.height = opt["height"];
- }
- }
-
- /* Use a format supported by the viewfinder if available. */
- std::vector<PixelFormat> formats = cfg.formats().pixelformats();
- for (const PixelFormat &format : viewfinder_->nativeFormats()) {
- auto match = std::find_if(formats.begin(), formats.end(),
- [&](const PixelFormat &f) {
- return f == format;
- });
- if (match != formats.end()) {
- cfg.pixelFormat = format;
- break;
- }
- }
-
- CameraConfiguration::Status validation = config_->validate();
- if (validation == CameraConfiguration::Invalid) {
- qWarning() << "Failed to create valid camera configuration";
- return -EINVAL;
- }
-
- if (validation == CameraConfiguration::Adjusted)
- qInfo() << "Stream configuration adjusted to "
- << cfg.toString().c_str();
-
- ret = camera_->configure(config_.get());
- if (ret < 0) {
- qInfo() << "Failed to configure camera";
- return ret;
- }
-
- /* Configure the viewfinder. */
- Stream *stream = cfg.stream();
- ret = viewfinder_->setFormat(cfg.pixelFormat,
- QSize(cfg.size.width, cfg.size.height));
- if (ret < 0) {
- qInfo() << "Failed to set viewfinder format";
- return ret;
- }
-
- adjustSize();
-
- /* Allocate buffers and requests. */
- allocator_ = new FrameBufferAllocator(camera_);
- ret = allocator_->allocate(stream);
- if (ret < 0) {
- qWarning() << "Failed to allocate capture buffers";
- return ret;
- }
-
- std::vector<Request *> requests;
- for (const std::unique_ptr<FrameBuffer> &buffer : allocator_->buffers(stream)) {
- Request *request = camera_->createRequest();
- if (!request) {
- qWarning() << "Can't create request";
- ret = -ENOMEM;
- goto error;
- }
-
- ret = request->addBuffer(stream, buffer.get());
- if (ret < 0) {
- qWarning() << "Can't set buffer for request";
- goto error;
- }
-
- requests.push_back(request);
-
- /* Map memory buffers and cache the mappings. */
- const FrameBuffer::Plane &plane = buffer->planes().front();
- void *memory = mmap(NULL, plane.length, PROT_READ, MAP_SHARED,
- plane.fd.fd(), 0);
- mappedBuffers_[buffer.get()] = { memory, plane.length };
- }
-
- /* Start the title timer and the camera. */
- titleTimer_.start(2000);
- frameRateInterval_.start();
- previousFrames_ = 0;
- framesCaptured_ = 0;
- lastBufferTime_ = 0;
-
- ret = camera_->start();
- if (ret) {
- qInfo() << "Failed to start capture";
- goto error;
- }
-
- camera_->requestCompleted.connect(this, &MainWindow::requestComplete);
-
- /* Queue all requests. */
- for (Request *request : requests) {
- ret = camera_->queueRequest(request);
- if (ret < 0) {
- qWarning() << "Can't queue request";
- goto error_disconnect;
- }
- }
-
- isCapturing_ = true;
-
- return 0;
-
-error_disconnect:
- camera_->requestCompleted.disconnect(this, &MainWindow::requestComplete);
- camera_->stop();
-
-error:
- for (Request *request : requests)
- delete request;
-
- for (auto &iter : mappedBuffers_) {
- const MappedBuffer &buffer = iter.second;
- munmap(buffer.memory, buffer.size);
- }
- mappedBuffers_.clear();
-
- delete allocator_;
- allocator_ = nullptr;
-
- return ret;
-}
-
-/**
- * \brief Stop ongoing capture
- *
- * This function may be called directly when tearing down the MainWindow. Use
- * toggleCapture() instead in all other cases.
- */
-void MainWindow::stopCapture()
-{
- if (!isCapturing_)
- return;
-
- viewfinder_->stop();
-
- int ret = camera_->stop();
- if (ret)
- qInfo() << "Failed to stop capture";
-
- camera_->requestCompleted.disconnect(this, &MainWindow::requestComplete);
-
- for (auto &iter : mappedBuffers_) {
- const MappedBuffer &buffer = iter.second;
- munmap(buffer.memory, buffer.size);
- }
- mappedBuffers_.clear();
-
- delete allocator_;
-
- isCapturing_ = false;
-
- config_.reset();
-
- /*
- * A CaptureEvent may have been posted before we stopped the camera,
- * but not processed yet. Clear the queue of done buffers to avoid
- * racing with the event handler.
- */
- doneQueue_.clear();
-
- titleTimer_.stop();
- setWindowTitle(title_);
-}
-
-/* -----------------------------------------------------------------------------
- * Image Save
- */
-
-void MainWindow::saveImageAs()
-{
- QImage image = viewfinder_->getCurrentImage();
- QString defaultPath = QStandardPaths::writableLocation(QStandardPaths::PicturesLocation);
-
- QString filename = QFileDialog::getSaveFileName(this, "Save Image", defaultPath,
- "Image Files (*.png *.jpg *.jpeg)");
- if (filename.isEmpty())
- return;
-
- QImageWriter writer(filename);
- writer.setQuality(95);
- writer.write(image);
-}
-
-/* -----------------------------------------------------------------------------
- * Request Completion Handling
- */
-
-void MainWindow::requestComplete(Request *request)
-{
- if (request->status() == Request::RequestCancelled)
- return;
-
- /*
- * We're running in the libcamera thread context, expensive operations
- * are not allowed. Add the buffer to the done queue and post a
- * CaptureEvent for the application thread to handle.
- */
- const std::map<Stream *, FrameBuffer *> &buffers = request->buffers();
- FrameBuffer *buffer = buffers.begin()->second;
-
- {
- QMutexLocker locker(&mutex_);
- doneQueue_.enqueue(buffer);
- }
-
- QCoreApplication::postEvent(this, new CaptureEvent);
-}
-
-void MainWindow::processCapture()
-{
- /*
- * Retrieve the next buffer from the done queue. The queue may be empty
- * if stopCapture() has been called while a CaptureEvent was posted but
- * not processed yet. Return immediately in that case.
- */
- FrameBuffer *buffer;
-
- {
- QMutexLocker locker(&mutex_);
- if (doneQueue_.isEmpty())
- return;
-
- buffer = doneQueue_.dequeue();
- }
-
- framesCaptured_++;
-
- const FrameMetadata &metadata = buffer->metadata();
-
- double fps = metadata.timestamp - lastBufferTime_;
- fps = lastBufferTime_ && fps ? 1000000000.0 / fps : 0.0;
- lastBufferTime_ = metadata.timestamp;
-
- qInfo() << "seq:" << qSetFieldWidth(6) << qSetPadChar('0')
- << metadata.sequence << reset
- << "bytesused:" << metadata.planes[0].bytesused
- << "timestamp:" << metadata.timestamp
- << "fps:" << fixed << qSetRealNumberPrecision(2) << fps;
-
- /* Render the frame on the viewfinder. */
- viewfinder_->render(buffer, &mappedBuffers_[buffer]);
-}
-
-void MainWindow::queueRequest(FrameBuffer *buffer)
-{
- Request *request = camera_->createRequest();
- if (!request) {
- qWarning() << "Can't create request";
- return;
- }
-
- Stream *stream = config_->at(0).stream();
- request->addBuffer(stream, buffer);
-
- camera_->queueRequest(request);
-}
diff --git a/src/qcam/main_window.h b/src/qcam/main_window.h
deleted file mode 100644
index 5d6251c8..00000000
--- a/src/qcam/main_window.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * main_window.h - qcam - Main application window
- */
-#ifndef __QCAM_MAIN_WINDOW_H__
-#define __QCAM_MAIN_WINDOW_H__
-
-#include <memory>
-
-#include <QElapsedTimer>
-#include <QIcon>
-#include <QMainWindow>
-#include <QMutex>
-#include <QObject>
-#include <QQueue>
-#include <QTimer>
-
-#include <libcamera/buffer.h>
-#include <libcamera/camera.h>
-#include <libcamera/camera_manager.h>
-#include <libcamera/framebuffer_allocator.h>
-#include <libcamera/stream.h>
-
-#include "../cam/options.h"
-#include "viewfinder.h"
-
-using namespace libcamera;
-
-class QAction;
-
-enum {
- OptCamera = 'c',
- OptHelp = 'h',
- OptSize = 's',
-};
-
-class MainWindow : public QMainWindow
-{
- Q_OBJECT
-
-public:
- MainWindow(CameraManager *cm, const OptionsParser::Options &options);
- ~MainWindow();
-
- bool event(QEvent *e) override;
-
-private Q_SLOTS:
- void quit();
- void updateTitle();
-
- void switchCamera(int index);
- void toggleCapture(bool start);
-
- void saveImageAs();
-
- void queueRequest(FrameBuffer *buffer);
-
-private:
- int createToolbars();
-
- std::string chooseCamera();
- int openCamera();
-
- int startCapture();
- void stopCapture();
-
- void requestComplete(Request *request);
- void processCapture();
-
- /* UI elements */
- QToolBar *toolbar_;
- QAction *startStopAction_;
- ViewFinder *viewfinder_;
-
- QIcon iconPlay_;
- QIcon iconStop_;
-
- QString title_;
- QTimer titleTimer_;
-
- /* Options */
- const OptionsParser::Options &options_;
-
- /* Camera manager, camera, configuration and buffers */
- CameraManager *cm_;
- std::shared_ptr<Camera> camera_;
- FrameBufferAllocator *allocator_;
-
- std::unique_ptr<CameraConfiguration> config_;
- std::map<FrameBuffer *, MappedBuffer> mappedBuffers_;
-
- /* Capture state, buffers queue and statistics */
- bool isCapturing_;
- QQueue<FrameBuffer *> doneQueue_;
- QMutex mutex_; /* Protects doneQueue_ */
-
- uint64_t lastBufferTime_;
- QElapsedTimer frameRateInterval_;
- uint32_t previousFrames_;
- uint32_t framesCaptured_;
-};
-
-#endif /* __QCAM_MAIN_WINDOW__ */
diff --git a/src/qcam/meson.build b/src/qcam/meson.build
deleted file mode 100644
index c256d06f..00000000
--- a/src/qcam/meson.build
+++ /dev/null
@@ -1,44 +0,0 @@
-qcam_sources = files([
- 'format_converter.cpp',
- 'main.cpp',
- 'main_window.cpp',
- '../cam/options.cpp',
- 'viewfinder.cpp',
-])
-
-qcam_moc_headers = files([
- 'main_window.h',
- 'viewfinder.h',
-])
-
-qcam_resources = files([
- 'assets/feathericons/feathericons.qrc',
-])
-
-qt5 = import('qt5')
-qt5_dep = dependency('qt5',
- method : 'pkg-config',
- modules : ['Core', 'Gui', 'Widgets'],
- required : false)
-
-if qt5_dep.found()
- qt5_cpp_args = [ '-DQT_NO_KEYWORDS' ]
-
- # gcc 9 introduced a deprecated-copy warning that is triggered by Qt until
- # Qt 5.13. clang 10 introduced the same warning, but detects more issues
- # that are not fixed in Qt yet. Disable the warning manually in both cases.
- if ((cc.get_id() == 'gcc' and cc.version().version_compare('>=9.0') and
- qt5_dep.version().version_compare('<5.13')) or
- (cc.get_id() == 'clang' and cc.version().version_compare('>=10.0')))
- qt5_cpp_args += [ '-Wno-deprecated-copy' ]
- endif
-
- resources = qt5.preprocess(moc_headers: qcam_moc_headers,
- qresources : qcam_resources,
- dependencies: qt5_dep)
-
- qcam = executable('qcam', qcam_sources, resources,
- install : true,
- dependencies : [libcamera_dep, qt5_dep],
- cpp_args : qt5_cpp_args)
-endif
diff --git a/src/qcam/viewfinder.cpp b/src/qcam/viewfinder.cpp
deleted file mode 100644
index 0d68f62e..00000000
--- a/src/qcam/viewfinder.cpp
+++ /dev/null
@@ -1,179 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * viewfinder.cpp - qcam - Viewfinder
- */
-
-#include "viewfinder.h"
-
-#include <stdint.h>
-#include <utility>
-
-#include <QImage>
-#include <QImageWriter>
-#include <QMap>
-#include <QMutexLocker>
-#include <QPainter>
-#include <QtDebug>
-
-#include "format_converter.h"
-
-static const QMap<libcamera::PixelFormat, QImage::Format> nativeFormats
-{
-#if QT_VERSION >= QT_VERSION_CHECK(5, 2, 0)
- { libcamera::PixelFormat{ DRM_FORMAT_ABGR8888 }, QImage::Format_RGBA8888 },
-#endif
- { libcamera::PixelFormat{ DRM_FORMAT_ARGB8888 }, QImage::Format_RGB32 },
-#if QT_VERSION >= QT_VERSION_CHECK(5, 14, 0)
- { libcamera::PixelFormat{ DRM_FORMAT_BGR888 }, QImage::Format_BGR888 },
-#endif
- { libcamera::PixelFormat{ DRM_FORMAT_RGB888 }, QImage::Format_RGB888 },
-};
-
-ViewFinder::ViewFinder(QWidget *parent)
- : QWidget(parent), buffer_(nullptr)
-{
- icon_ = QIcon(":camera-off.svg");
-}
-
-ViewFinder::~ViewFinder()
-{
-}
-
-const QList<libcamera::PixelFormat> &ViewFinder::nativeFormats() const
-{
- static const QList<libcamera::PixelFormat> formats = ::nativeFormats.keys();
- return formats;
-}
-
-int ViewFinder::setFormat(const libcamera::PixelFormat &format,
- const QSize &size)
-{
- image_ = QImage();
-
- /*
- * If format conversion is needed, configure the converter and allocate
- * the destination image.
- */
- if (!::nativeFormats.contains(format)) {
- int ret = converter_.configure(format, size);
- if (ret < 0)
- return ret;
-
- image_ = QImage(size, QImage::Format_RGB32);
-
- qInfo() << "Using software format conversion from"
- << format.toString().c_str();
- } else {
- qInfo() << "Zero-copy enabled";
- }
-
- format_ = format;
- size_ = size;
-
- updateGeometry();
- return 0;
-}
-
-void ViewFinder::render(libcamera::FrameBuffer *buffer, MappedBuffer *map)
-{
- if (buffer->planes().size() != 1) {
- qWarning() << "Multi-planar buffers are not supported";
- return;
- }
-
- unsigned char *memory = static_cast<unsigned char *>(map->memory);
- size_t size = buffer->metadata().planes[0].bytesused;
-
- {
- QMutexLocker locker(&mutex_);
-
- if (::nativeFormats.contains(format_)) {
- /*
- * If the frame format is identical to the display
- * format, create a QImage that references the frame
- * and store a reference to the frame buffer. The
- * previously stored frame buffer, if any, will be
- * released.
- *
- * \todo Get the stride from the buffer instead of
- * computing it naively
- */
- image_ = QImage(memory, size_.width(), size_.height(),
- size / size_.height(),
- ::nativeFormats[format_]);
- std::swap(buffer, buffer_);
- } else {
- /*
- * Otherwise, convert the format and release the frame
- * buffer immediately.
- */
- converter_.convert(memory, size, &image_);
- }
- }
-
- update();
-
- if (buffer)
- renderComplete(buffer);
-}
-
-void ViewFinder::stop()
-{
- image_ = QImage();
-
- if (buffer_) {
- renderComplete(buffer_);
- buffer_ = nullptr;
- }
-
- update();
-}
-
-QImage ViewFinder::getCurrentImage()
-{
- QMutexLocker locker(&mutex_);
-
- return image_.copy();
-}
-
-void ViewFinder::paintEvent(QPaintEvent *)
-{
- QPainter painter(this);
-
- /* If we have an image, draw it. */
- if (!image_.isNull()) {
- painter.drawImage(rect(), image_, image_.rect());
- return;
- }
-
- /*
- * Otherwise, draw the camera stopped icon. Render it to the pixmap if
- * the size has changed.
- */
- constexpr int margin = 20;
-
- if (vfSize_ != size() || pixmap_.isNull()) {
- QSize vfSize = size() - QSize{ 2 * margin, 2 * margin };
- QSize pixmapSize{ 1, 1 };
- pixmapSize.scale(vfSize, Qt::KeepAspectRatio);
- pixmap_ = icon_.pixmap(pixmapSize);
-
- vfSize_ = size();
- }
-
- QPoint point{ margin, margin };
- if (pixmap_.width() < width() - 2 * margin)
- point.setX((width() - pixmap_.width()) / 2);
- else
- point.setY((height() - pixmap_.height()) / 2);
-
- painter.setBackgroundMode(Qt::OpaqueMode);
- painter.drawPixmap(point, pixmap_);
-}
-
-QSize ViewFinder::sizeHint() const
-{
- return size_.isValid() ? size_ : QSize(640, 480);
-}
diff --git a/src/qcam/viewfinder.h b/src/qcam/viewfinder.h
deleted file mode 100644
index b3f1d25d..00000000
--- a/src/qcam/viewfinder.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019, Google Inc.
- *
- * viewfinder.h - qcam - Viewfinder
- */
-#ifndef __QCAM_VIEWFINDER_H__
-#define __QCAM_VIEWFINDER_H__
-
-#include <stddef.h>
-
-#include <QIcon>
-#include <QList>
-#include <QImage>
-#include <QMutex>
-#include <QSize>
-#include <QWidget>
-
-#include <libcamera/buffer.h>
-#include <libcamera/pixelformats.h>
-
-#include "format_converter.h"
-
-class QImage;
-
-struct MappedBuffer {
- void *memory;
- size_t size;
-};
-
-class ViewFinder : public QWidget
-{
- Q_OBJECT
-
-public:
- ViewFinder(QWidget *parent);
- ~ViewFinder();
-
- const QList<libcamera::PixelFormat> &nativeFormats() const;
-
- int setFormat(const libcamera::PixelFormat &format, const QSize &size);
- void render(libcamera::FrameBuffer *buffer, MappedBuffer *map);
- void stop();
-
- QImage getCurrentImage();
-
-Q_SIGNALS:
- void renderComplete(libcamera::FrameBuffer *buffer);
-
-protected:
- void paintEvent(QPaintEvent *) override;
- QSize sizeHint() const override;
-
-private:
- FormatConverter converter_;
-
- libcamera::PixelFormat format_;
- QSize size_;
-
- /* Camera stopped icon */
- QSize vfSize_;
- QIcon icon_;
- QPixmap pixmap_;
-
- /* Buffer and render image */
- libcamera::FrameBuffer *buffer_;
- QImage image_;
- QMutex mutex_; /* Prevent concurrent access to image_ */
-};
-
-#endif /* __QCAM_VIEWFINDER__ */
diff --git a/src/v4l2/libcamerify.in b/src/v4l2/libcamerify.in
new file mode 100755
index 00000000..c4ea273f
--- /dev/null
+++ b/src/v4l2/libcamerify.in
@@ -0,0 +1,47 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+help() {
+ echo "$0: Load an application with libcamera V4L2 compatibility layer preload"
+ echo " $0 [OPTIONS...] executable [args]"
+ echo " -d, --debug Increase log level"
+}
+
+debug=0
+while [ $# -gt 0 ]; do
+ case $1 in
+ -d|--debug)
+ debug=$((debug+1))
+ ;;
+ -h)
+ help;
+ exit 0
+ ;;
+ --)
+ shift;
+ break;;
+ -*)
+ echo "Unrecognised option: $1";
+ help;
+ exit 1
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+done
+
+[ $debug -gt 0 ] && loglevel=V4L2Compat:0
+[ $debug -gt 1 ] && loglevel=0
+[ "$loglevel" != "" ] && export LIBCAMERA_LOG_LEVELS=$loglevel
+
+if [ "$LD_PRELOAD" = "" ] ; then
+ LD_PRELOAD='@LIBCAMERA_V4L2_SO@'
+else
+ LD_PRELOAD="$LD_PRELOAD "'@LIBCAMERA_V4L2_SO@'
+fi
+
+export LD_PRELOAD
+
+exec "$@"
diff --git a/src/v4l2/meson.build b/src/v4l2/meson.build
index efab968f..2c040414 100644
--- a/src/v4l2/meson.build
+++ b/src/v4l2/meson.build
@@ -1,5 +1,14 @@
+# SPDX-License-Identifier: CC0-1.0
+
+v4l2_enabled = get_option('v4l2').allowed()
+
+if not v4l2_enabled
+ subdir_done()
+endif
+
v4l2_compat_sources = files([
'v4l2_camera.cpp',
+ 'v4l2_camera_file.cpp',
'v4l2_camera_proxy.cpp',
'v4l2_compat.cpp',
'v4l2_compat_manager.cpp',
@@ -13,6 +22,8 @@ v4l2_compat_cpp_args = [
# file operations, disable transparent large file support.
'-U_FILE_OFFSET_BITS',
'-D_FILE_OFFSET_BITS=32',
+ '-D_LARGEFILE64_SOURCE',
+ '-U_TIME_BITS',
'-fvisibility=hidden',
]
@@ -20,6 +31,18 @@ v4l2_compat = shared_library('v4l2-compat',
v4l2_compat_sources,
name_prefix : '',
install : true,
- include_directories : libcamera_internal_includes,
- dependencies : [ libcamera_dep, libdl ],
+ install_dir : libcamera_libexecdir,
+ dependencies : [libcamera_private, libdl],
cpp_args : v4l2_compat_cpp_args)
+
+# Provide a wrapper script to support easily loading applications with the V4L2
+# adaptation layer.
+
+cdata = configuration_data()
+cdata.set('LIBCAMERA_V4L2_SO', get_option('prefix') / libcamera_libexecdir / 'v4l2-compat.so')
+
+configure_file(input : 'libcamerify.in',
+ output : 'libcamerify',
+ configuration : cdata,
+ install_dir : get_option('bindir'),
+ install_tag : 'bin')
diff --git a/src/v4l2/v4l2_camera.cpp b/src/v4l2/v4l2_camera.cpp
index ecbb70ac..94d138cd 100644
--- a/src/v4l2/v4l2_camera.cpp
+++ b/src/v4l2/v4l2_camera.cpp
@@ -1,22 +1,26 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_camera.cpp - V4L2 compatibility camera
+ * V4L2 compatibility camera
*/
#include "v4l2_camera.h"
#include <errno.h>
+#include <unistd.h>
-#include "log.h"
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
using namespace libcamera;
-LOG_DECLARE_CATEGORY(V4L2Compat);
+LOG_DECLARE_CATEGORY(V4L2Compat)
V4L2Camera::V4L2Camera(std::shared_ptr<Camera> camera)
- : camera_(camera), isRunning_(false), bufferAllocator_(nullptr)
+ : camera_(camera), controls_(controls::controls), isRunning_(false),
+ bufferAllocator_(nullptr), efd_(-1), bufferAvailableCount_(0)
{
camera_->requestCompleted.connect(this, &V4L2Camera::requestComplete);
}
@@ -26,9 +30,8 @@ V4L2Camera::~V4L2Camera()
close();
}
-int V4L2Camera::open()
+int V4L2Camera::open(StreamConfiguration *streamConfig)
{
- /* \todo Support multiple open. */
if (camera_->acquire() < 0) {
LOG(V4L2Compat, Error) << "Failed to acquire camera";
return -EINVAL;
@@ -42,31 +45,38 @@ int V4L2Camera::open()
bufferAllocator_ = new FrameBufferAllocator(camera_);
+ *streamConfig = config_->at(0);
return 0;
}
void V4L2Camera::close()
{
+ requestPool_.clear();
+
delete bufferAllocator_;
bufferAllocator_ = nullptr;
camera_->release();
}
-void V4L2Camera::getStreamConfig(StreamConfiguration *streamConfig)
+void V4L2Camera::bind(int efd)
{
- *streamConfig = config_->at(0);
+ efd_ = efd;
+}
+
+void V4L2Camera::unbind()
+{
+ efd_ = -1;
}
std::vector<V4L2Camera::Buffer> V4L2Camera::completedBuffers()
{
std::vector<Buffer> v;
- bufferLock_.lock();
+ MutexLocker lock(bufferLock_);
for (std::unique_ptr<Buffer> &metadata : completedBuffers_)
v.push_back(*metadata.get());
completedBuffers_.clear();
- bufferLock_.unlock();
return v;
}
@@ -84,7 +94,17 @@ void V4L2Camera::requestComplete(Request *request)
completedBuffers_.push_back(std::move(metadata));
bufferLock_.unlock();
- bufferSema_.release();
+ uint64_t data = 1;
+ int ret = ::write(efd_, &data, sizeof(data));
+ if (ret != sizeof(data))
+ LOG(V4L2Compat, Error) << "Failed to signal eventfd POLLIN";
+
+ request->reuse();
+ {
+ MutexLocker locker(bufferMutex_);
+ bufferAvailableCount_++;
+ }
+ bufferCV_.notify_all();
}
int V4L2Camera::configure(StreamConfiguration *streamConfigOut,
@@ -118,29 +138,65 @@ int V4L2Camera::configure(StreamConfiguration *streamConfigOut,
return 0;
}
+int V4L2Camera::validateConfiguration(const PixelFormat &pixelFormat,
+ const Size &size,
+ StreamConfiguration *streamConfigOut)
+{
+ std::unique_ptr<CameraConfiguration> config =
+ camera_->generateConfiguration({ StreamRole::Viewfinder });
+ StreamConfiguration &cfg = config->at(0);
+ cfg.size = size;
+ cfg.pixelFormat = pixelFormat;
+ cfg.bufferCount = 1;
+
+ CameraConfiguration::Status validation = config->validate();
+ if (validation == CameraConfiguration::Invalid)
+ return -EINVAL;
+
+ *streamConfigOut = cfg;
+
+ return 0;
+}
+
int V4L2Camera::allocBuffers(unsigned int count)
{
- Stream *stream = *camera_->streams().begin();
+ Stream *stream = config_->at(0).stream();
- return bufferAllocator_->allocate(stream);
+ int ret = bufferAllocator_->allocate(stream);
+ if (ret < 0)
+ return ret;
+
+ for (unsigned int i = 0; i < count; i++) {
+ std::unique_ptr<Request> request = camera_->createRequest(i);
+ if (!request) {
+ requestPool_.clear();
+ return -ENOMEM;
+ }
+ requestPool_.push_back(std::move(request));
+ }
+
+ return ret;
}
void V4L2Camera::freeBuffers()
{
- Stream *stream = *camera_->streams().begin();
+ pendingRequests_.clear();
+ requestPool_.clear();
+
+ Stream *stream = config_->at(0).stream();
bufferAllocator_->free(stream);
}
-FileDescriptor V4L2Camera::getBufferFd(unsigned int index)
+int V4L2Camera::getBufferFd(unsigned int index)
{
- Stream *stream = *camera_->streams().begin();
+ Stream *stream = config_->at(0).stream();
const std::vector<std::unique_ptr<FrameBuffer>> &buffers =
bufferAllocator_->buffers(stream);
if (buffers.size() <= index)
- return FileDescriptor();
+ return -1;
- return buffers[index]->planes()[0].fd;
+ return buffers[index]->planes()[0].fd.get();
}
int V4L2Camera::streamOn()
@@ -148,15 +204,17 @@ int V4L2Camera::streamOn()
if (isRunning_)
return 0;
- int ret = camera_->start();
+ int ret = camera_->start(&controls_);
if (ret < 0)
return ret == -EACCES ? -EBUSY : ret;
+ controls_.clear();
+
isRunning_ = true;
- for (std::unique_ptr<Request> &req : pendingRequests_) {
+ for (Request *req : pendingRequests_) {
/* \todo What should we do if this returns -EINVAL? */
- ret = camera_->queueRequest(req.release());
+ ret = camera_->queueRequest(req);
if (ret < 0)
return ret == -EACCES ? -EBUSY : ret;
}
@@ -168,27 +226,35 @@ int V4L2Camera::streamOn()
int V4L2Camera::streamOff()
{
- /* \todo Restore buffers to reqbufs state? */
- if (!isRunning_)
+ if (!isRunning_) {
+ for (std::unique_ptr<Request> &req : requestPool_)
+ req->reuse();
+
return 0;
+ }
+
+ pendingRequests_.clear();
int ret = camera_->stop();
if (ret < 0)
return ret == -EACCES ? -EBUSY : ret;
- isRunning_ = false;
+ {
+ MutexLocker locker(bufferMutex_);
+ isRunning_ = false;
+ }
+ bufferCV_.notify_all();
return 0;
}
int V4L2Camera::qbuf(unsigned int index)
{
- std::unique_ptr<Request> request =
- std::unique_ptr<Request>(camera_->createRequest(index));
- if (!request) {
- LOG(V4L2Compat, Error) << "Can't create request";
- return -ENOMEM;
+ if (index >= requestPool_.size()) {
+ LOG(V4L2Compat, Error) << "Invalid index";
+ return -EINVAL;
}
+ Request *request = requestPool_[index].get();
Stream *stream = config_->at(0).stream();
FrameBuffer *buffer = bufferAllocator_->buffers(stream)[index].get();
@@ -199,11 +265,13 @@ int V4L2Camera::qbuf(unsigned int index)
}
if (!isRunning_) {
- pendingRequests_.push_back(std::move(request));
+ pendingRequests_.push_back(request);
return 0;
}
- ret = camera_->queueRequest(request.release());
+ request->controls().merge(std::move(controls_));
+
+ ret = camera_->queueRequest(request);
if (ret < 0) {
LOG(V4L2Compat, Error) << "Can't queue request";
return ret == -EACCES ? -EBUSY : ret;
@@ -211,3 +279,28 @@ int V4L2Camera::qbuf(unsigned int index)
return 0;
}
+
+void V4L2Camera::waitForBufferAvailable()
+{
+ MutexLocker locker(bufferMutex_);
+ bufferCV_.wait(locker, [&]() LIBCAMERA_TSA_REQUIRES(bufferMutex_) {
+ return bufferAvailableCount_ >= 1 || !isRunning_;
+ });
+ if (isRunning_)
+ bufferAvailableCount_--;
+}
+
+bool V4L2Camera::isBufferAvailable()
+{
+ MutexLocker locker(bufferMutex_);
+ if (bufferAvailableCount_ < 1)
+ return false;
+
+ bufferAvailableCount_--;
+ return true;
+}
+
+bool V4L2Camera::isRunning()
+{
+ return isRunning_;
+}
diff --git a/src/v4l2/v4l2_camera.h b/src/v4l2/v4l2_camera.h
index 130995d9..9bd161b9 100644
--- a/src/v4l2/v4l2_camera.h
+++ b/src/v4l2/v4l2_camera.h
@@ -1,75 +1,96 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_camera.h - V4L2 compatibility camera
+ * V4L2 compatibility camera
*/
-#ifndef __V4L2_CAMERA_H__
-#define __V4L2_CAMERA_H__
+#pragma once
#include <deque>
-#include <mutex>
-#include <utility>
+#include <memory>
+#include <vector>
+
+#include <libcamera/base/mutex.h>
+#include <libcamera/base/semaphore.h>
+#include <libcamera/base/shared_fd.h>
-#include <libcamera/buffer.h>
#include <libcamera/camera.h>
-#include <libcamera/file_descriptor.h>
+#include <libcamera/controls.h>
+#include <libcamera/framebuffer.h>
#include <libcamera/framebuffer_allocator.h>
-#include "semaphore.h"
-
-using namespace libcamera;
-
class V4L2Camera
{
public:
struct Buffer {
- Buffer(unsigned int index, const FrameMetadata &data)
- : index(index), data(data)
+ Buffer(unsigned int index, const libcamera::FrameMetadata &data)
+ : index_(index), data_(data)
{
}
- unsigned int index;
- FrameMetadata data;
+ unsigned int index_;
+ libcamera::FrameMetadata data_;
};
- V4L2Camera(std::shared_ptr<Camera> camera);
+ V4L2Camera(std::shared_ptr<libcamera::Camera> camera);
~V4L2Camera();
- int open();
+ int open(libcamera::StreamConfiguration *streamConfig);
void close();
- void getStreamConfig(StreamConfiguration *streamConfig);
- std::vector<Buffer> completedBuffers();
+ void bind(int efd);
+ void unbind();
- int configure(StreamConfiguration *streamConfigOut,
- const Size &size, const PixelFormat &pixelformat,
+ std::vector<Buffer> completedBuffers() LIBCAMERA_TSA_EXCLUDES(bufferLock_);
+
+ int configure(libcamera::StreamConfiguration *streamConfigOut,
+ const libcamera::Size &size,
+ const libcamera::PixelFormat &pixelformat,
unsigned int bufferCount);
+ int validateConfiguration(const libcamera::PixelFormat &pixelformat,
+ const libcamera::Size &size,
+ libcamera::StreamConfiguration *streamConfigOut);
+
+ libcamera::ControlList &controls() { return controls_; }
+ const libcamera::ControlInfoMap &controlInfo() { return camera_->controls(); }
int allocBuffers(unsigned int count);
void freeBuffers();
- FileDescriptor getBufferFd(unsigned int index);
+ int getBufferFd(unsigned int index);
int streamOn();
int streamOff();
int qbuf(unsigned int index);
- Semaphore bufferSema_;
+ void waitForBufferAvailable() LIBCAMERA_TSA_EXCLUDES(bufferMutex_);
+ bool isBufferAvailable() LIBCAMERA_TSA_EXCLUDES(bufferMutex_);
+
+ bool isRunning();
private:
- void requestComplete(Request *request);
+ void requestComplete(libcamera::Request *request)
+ LIBCAMERA_TSA_EXCLUDES(bufferLock_);
- std::shared_ptr<Camera> camera_;
- std::unique_ptr<CameraConfiguration> config_;
+ std::shared_ptr<libcamera::Camera> camera_;
+ std::unique_ptr<libcamera::CameraConfiguration> config_;
+
+ libcamera::ControlList controls_;
bool isRunning_;
- std::mutex bufferLock_;
- FrameBufferAllocator *bufferAllocator_;
+ libcamera::Mutex bufferLock_;
+ libcamera::FrameBufferAllocator *bufferAllocator_;
- std::deque<std::unique_ptr<Request>> pendingRequests_;
- std::deque<std::unique_ptr<Buffer>> completedBuffers_;
-};
+ std::vector<std::unique_ptr<libcamera::Request>> requestPool_;
+
+ std::deque<libcamera::Request *> pendingRequests_;
+ std::deque<std::unique_ptr<Buffer>> completedBuffers_
+ LIBCAMERA_TSA_GUARDED_BY(bufferLock_);
-#endif /* __V4L2_CAMERA_H__ */
+ int efd_;
+
+ libcamera::Mutex bufferMutex_;
+ libcamera::ConditionVariable bufferCV_;
+ unsigned int bufferAvailableCount_ LIBCAMERA_TSA_GUARDED_BY(bufferMutex_);
+};
diff --git a/src/v4l2/v4l2_camera_file.cpp b/src/v4l2/v4l2_camera_file.cpp
new file mode 100644
index 00000000..d8fe854b
--- /dev/null
+++ b/src/v4l2/v4l2_camera_file.cpp
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * V4L2 compatibility camera file information
+ */
+
+#include "v4l2_camera_file.h"
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <linux/videodev2.h>
+
+#include "v4l2_camera_proxy.h"
+
+using namespace libcamera;
+
+V4L2CameraFile::V4L2CameraFile(int dirfd, const char *path, int efd,
+ bool nonBlocking, V4L2CameraProxy *proxy)
+ : proxy_(proxy), nonBlocking_(nonBlocking), efd_(efd),
+ priority_(V4L2_PRIORITY_DEFAULT)
+{
+ proxy_->open(this);
+
+ if (path[0] != '/') {
+ if (dirfd == AT_FDCWD) {
+ char *cwd = getcwd(nullptr, 0);
+ if (cwd) {
+ description_ = std::string(cwd) + "/";
+ free(cwd);
+ } else {
+ description_ = std::string("(unreachable)/");
+ }
+ } else {
+ description_ = "(dirfd:" + std::to_string(dirfd) + ")/";
+ }
+ }
+
+ description_ += std::string(path) + " (fd:" + std::to_string(efd) + ")";
+}
+
+V4L2CameraFile::~V4L2CameraFile()
+{
+ proxy_->close(this);
+}
+
+const std::string &V4L2CameraFile::description() const
+{
+ return description_;
+}
diff --git a/src/v4l2/v4l2_camera_file.h b/src/v4l2/v4l2_camera_file.h
new file mode 100644
index 00000000..1212989e
--- /dev/null
+++ b/src/v4l2/v4l2_camera_file.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * V4L2 compatibility camera file information
+ */
+
+#pragma once
+
+#include <string>
+
+#include <linux/videodev2.h>
+
+class V4L2CameraProxy;
+
+class V4L2CameraFile
+{
+public:
+ V4L2CameraFile(int dirfd, const char *path, int efd, bool nonBlocking,
+ V4L2CameraProxy *proxy);
+ ~V4L2CameraFile();
+
+ V4L2CameraProxy *proxy() const { return proxy_; }
+
+ bool nonBlocking() const { return nonBlocking_; }
+ int efd() const { return efd_; }
+
+ enum v4l2_priority priority() const { return priority_; }
+ void setPriority(enum v4l2_priority priority) { priority_ = priority; }
+
+ const std::string &description() const;
+
+private:
+ V4L2CameraProxy *proxy_;
+
+ std::string description_;
+ bool nonBlocking_;
+ int efd_;
+ enum v4l2_priority priority_;
+};
diff --git a/src/v4l2/v4l2_camera_proxy.cpp b/src/v4l2/v4l2_camera_proxy.cpp
index 47d0528b..559ffc61 100644
--- a/src/v4l2/v4l2_camera_proxy.cpp
+++ b/src/v4l2/v4l2_camera_proxy.cpp
@@ -1,70 +1,97 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_camera_proxy.cpp - Proxy to V4L2 compatibility camera
+ * Proxy to V4L2 compatibility camera
*/
#include "v4l2_camera_proxy.h"
#include <algorithm>
-#include <array>
#include <errno.h>
-#include <linux/videodev2.h>
+#include <numeric>
+#include <set>
#include <string.h>
#include <sys/mman.h>
+#include <unistd.h>
+
+#include <linux/videodev2.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/object.h>
+#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
-#include <libcamera/object.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/formats.h>
+
+#include "libcamera/internal/v4l2_pixelformat.h"
-#include "log.h"
-#include "utils.h"
#include "v4l2_camera.h"
+#include "v4l2_camera_file.h"
#include "v4l2_compat_manager.h"
#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
using namespace libcamera;
+using namespace std::literals::chrono_literals;
-LOG_DECLARE_CATEGORY(V4L2Compat);
+LOG_DECLARE_CATEGORY(V4L2Compat)
V4L2CameraProxy::V4L2CameraProxy(unsigned int index,
std::shared_ptr<Camera> camera)
: refcount_(0), index_(index), bufferCount_(0), currentBuf_(0),
- vcam_(std::make_unique<V4L2Camera>(camera))
+ vcam_(std::make_unique<V4L2Camera>(camera)), owner_(nullptr)
{
querycap(camera);
}
-int V4L2CameraProxy::open(bool nonBlocking)
+int V4L2CameraProxy::open(V4L2CameraFile *file)
{
- LOG(V4L2Compat, Debug) << "Servicing open";
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
- int ret = vcam_->open();
- if (ret < 0) {
- errno = -ret;
- return -1;
+ MutexLocker locker(proxyMutex_);
+
+ if (refcount_++) {
+ files_.insert(file);
+ return 0;
}
- nonBlocking_ = nonBlocking;
+ /*
+ * We open the camera here, once, and keep it open until the last
+ * V4L2CameraFile is closed. The proxy is initially not owned by any
+ * file. The first file that calls reqbufs with count > 0 or s_fmt
+ * will become the owner, and no other file will be allowed to call
+ * buffer-related ioctls (except querybuf), set the format, or start or
+ * stop the stream until ownership is released with a call to reqbufs
+ * with count = 0.
+ */
+
+ int ret = vcam_->open(&streamConfig_);
+ if (ret < 0) {
+ refcount_--;
+ return ret;
+ }
- vcam_->getStreamConfig(&streamConfig_);
setFmtFromConfig(streamConfig_);
- sizeimage_ = calculateSizeImage(streamConfig_);
- refcount_++;
+ files_.insert(file);
return 0;
}
-void V4L2CameraProxy::dup()
+void V4L2CameraProxy::close(V4L2CameraFile *file)
{
- refcount_++;
-}
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
-void V4L2CameraProxy::close()
-{
- LOG(V4L2Compat, Debug) << "Servicing close";
+ MutexLocker locker(proxyMutex_);
+
+ files_.erase(file);
+
+ release(file);
if (--refcount_ > 0)
return;
@@ -72,13 +99,24 @@ void V4L2CameraProxy::close()
vcam_->close();
}
-void *V4L2CameraProxy::mmap(void *addr, size_t length, int prot, int flags,
- off_t offset)
+void *V4L2CameraProxy::mmap(V4L2CameraFile *file, void *addr, size_t length,
+ int prot, int flags, off64_t offset)
{
- LOG(V4L2Compat, Debug) << "Servicing mmap";
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
- /* \todo Validate prot and flags properly. */
- if (prot != (PROT_READ | PROT_WRITE)) {
+ MutexLocker locker(proxyMutex_);
+
+ /*
+ * Mimic the videobuf2 behaviour, which requires PROT_READ and
+ * MAP_SHARED.
+ */
+ if (!(prot & PROT_READ)) {
+ errno = EINVAL;
+ return MAP_FAILED;
+ }
+
+ if (!(flags & MAP_SHARED)) {
errno = EINVAL;
return MAP_FAILED;
}
@@ -90,14 +128,14 @@ void *V4L2CameraProxy::mmap(void *addr, size_t length, int prot, int flags,
return MAP_FAILED;
}
- FileDescriptor fd = vcam_->getBufferFd(index);
- if (!fd.isValid()) {
+ int fd = vcam_->getBufferFd(index);
+ if (fd < 0) {
errno = EINVAL;
return MAP_FAILED;
}
void *map = V4L2CompatManager::instance()->fops().mmap(addr, length, prot,
- flags, fd.fd(), 0);
+ flags, fd, 0);
if (map == MAP_FAILED)
return map;
@@ -107,9 +145,12 @@ void *V4L2CameraProxy::mmap(void *addr, size_t length, int prot, int flags,
return map;
}
-int V4L2CameraProxy::munmap(void *addr, size_t length)
+int V4L2CameraProxy::munmap(V4L2CameraFile *file, void *addr, size_t length)
{
- LOG(V4L2Compat, Debug) << "Servicing munmap";
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
+
+ MutexLocker locker(proxyMutex_);
auto iter = mmaps_.find(addr);
if (iter == mmaps_.end() || length != sizeimage_) {
@@ -137,31 +178,46 @@ bool V4L2CameraProxy::validateMemoryType(uint32_t memory)
return memory == V4L2_MEMORY_MMAP;
}
-void V4L2CameraProxy::setFmtFromConfig(StreamConfiguration &streamConfig)
-{
- curV4L2Format_.fmt.pix.width = streamConfig.size.width;
- curV4L2Format_.fmt.pix.height = streamConfig.size.height;
- curV4L2Format_.fmt.pix.pixelformat = drmToV4L2(streamConfig.pixelFormat);
- curV4L2Format_.fmt.pix.field = V4L2_FIELD_NONE;
- curV4L2Format_.fmt.pix.bytesperline =
- bplMultiplier(curV4L2Format_.fmt.pix.pixelformat) *
- curV4L2Format_.fmt.pix.width;
- curV4L2Format_.fmt.pix.sizeimage =
- imageSize(curV4L2Format_.fmt.pix.pixelformat,
- curV4L2Format_.fmt.pix.width,
- curV4L2Format_.fmt.pix.height);
- curV4L2Format_.fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
-}
-
-unsigned int V4L2CameraProxy::calculateSizeImage(StreamConfiguration &streamConfig)
+void V4L2CameraProxy::setFmtFromConfig(const StreamConfiguration &streamConfig)
{
- /*
- * \todo Merge this method with setFmtFromConfig (need imageSize to
- * support all libcamera formats first, or filter out MJPEG for now).
- */
- return imageSize(drmToV4L2(streamConfig.pixelFormat),
- streamConfig.size.width,
- streamConfig.size.height);
+ const Size &size = streamConfig.size;
+
+ v4l2PixFormat_.width = size.width;
+ v4l2PixFormat_.height = size.height;
+ v4l2PixFormat_.pixelformat = V4L2PixelFormat::fromPixelFormat(streamConfig.pixelFormat)[0];
+ v4l2PixFormat_.field = V4L2_FIELD_NONE;
+ v4l2PixFormat_.bytesperline = streamConfig.stride;
+ v4l2PixFormat_.sizeimage = streamConfig.frameSize;
+ v4l2PixFormat_.colorspace = V4L2_COLORSPACE_SRGB;
+ v4l2PixFormat_.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ v4l2PixFormat_.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ v4l2PixFormat_.quantization = V4L2_QUANTIZATION_DEFAULT;
+ v4l2PixFormat_.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ sizeimage_ = streamConfig.frameSize;
+
+ const ControlInfoMap &controls = vcam_->controlInfo();
+ const auto &it = controls.find(&controls::FrameDurationLimits);
+
+ if (it != controls.end()) {
+ const int64_t duration = it->second.def().get<int64_t>();
+
+ v4l2TimePerFrame_.numerator = duration;
+ v4l2TimePerFrame_.denominator = 1000000;
+ } else {
+ /*
+ * Default to 30fps if the camera doesn't expose the
+ * FrameDurationLimits control.
+ *
+ * \todo Remove this once all pipeline handlers implement the
+ * control
+ */
+ LOG(V4L2Compat, Warning)
+ << "Camera does not support FrameDurationLimits";
+
+ v4l2TimePerFrame_.numerator = 333333;
+ v4l2TimePerFrame_.denominator = 1000000;
+ }
}
void V4L2CameraProxy::querycap(std::shared_ptr<Camera> camera)
@@ -171,13 +227,15 @@ void V4L2CameraProxy::querycap(std::shared_ptr<Camera> camera)
utils::strlcpy(reinterpret_cast<char *>(capabilities_.driver), driver.c_str(),
sizeof(capabilities_.driver));
- utils::strlcpy(reinterpret_cast<char *>(capabilities_.card), camera->name().c_str(),
+ utils::strlcpy(reinterpret_cast<char *>(capabilities_.card), camera->id().c_str(),
sizeof(capabilities_.card));
utils::strlcpy(reinterpret_cast<char *>(capabilities_.bus_info), bus_info.c_str(),
sizeof(capabilities_.bus_info));
/* \todo Put this in a header/config somewhere. */
capabilities_.version = KERNEL_VERSION(5, 2, 0);
- capabilities_.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ capabilities_.device_caps = V4L2_CAP_VIDEO_CAPTURE
+ | V4L2_CAP_STREAMING
+ | V4L2_CAP_EXT_PIX_FORMAT;
capabilities_.capabilities = capabilities_.device_caps
| V4L2_CAP_DEVICE_CAPS;
memset(capabilities_.reserved, 0, sizeof(capabilities_.reserved));
@@ -187,15 +245,19 @@ void V4L2CameraProxy::updateBuffers()
{
std::vector<V4L2Camera::Buffer> completedBuffers = vcam_->completedBuffers();
for (const V4L2Camera::Buffer &buffer : completedBuffers) {
- const FrameMetadata &fmd = buffer.data;
- struct v4l2_buffer &buf = buffers_[buffer.index];
+ const FrameMetadata &fmd = buffer.data_;
+ struct v4l2_buffer &buf = buffers_[buffer.index_];
switch (fmd.status) {
case FrameMetadata::FrameSuccess:
- buf.bytesused = fmd.planes[0].bytesused;
+ buf.bytesused = std::accumulate(fmd.planes().begin(),
+ fmd.planes().end(), 0,
+ [](unsigned int total, const auto &plane) {
+ return total + plane.bytesused;
+ });
buf.field = V4L2_FIELD_NONE;
buf.timestamp.tv_sec = fmd.timestamp / 1000000000;
- buf.timestamp.tv_usec = fmd.timestamp % 1000000;
+ buf.timestamp.tv_usec = (fmd.timestamp / 1000) % 1000000;
buf.sequence = fmd.sequence;
buf.flags |= V4L2_BUF_FLAG_DONE;
@@ -209,128 +271,237 @@ void V4L2CameraProxy::updateBuffers()
}
}
-int V4L2CameraProxy::vidioc_querycap(struct v4l2_capability *arg)
+int V4L2CameraProxy::vidioc_querycap(V4L2CameraFile *file, struct v4l2_capability *arg)
{
- LOG(V4L2Compat, Debug) << "Servicing vidioc_querycap";
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
*arg = capabilities_;
return 0;
}
-int V4L2CameraProxy::vidioc_enum_fmt(struct v4l2_fmtdesc *arg)
+int V4L2CameraProxy::vidioc_enum_framesizes(V4L2CameraFile *file, struct v4l2_frmsizeenum *arg)
+{
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
+
+ V4L2PixelFormat v4l2Format = V4L2PixelFormat(arg->pixel_format);
+ PixelFormat format = v4l2Format.toPixelFormat();
+ /*
+ * \todo This might need to be expanded as few pipeline handlers
+ * report StreamFormats.
+ */
+ const std::vector<Size> &frameSizes = streamConfig_.formats().sizes(format);
+
+ if (arg->index >= frameSizes.size())
+ return -EINVAL;
+
+ arg->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ arg->discrete.width = frameSizes[arg->index].width;
+ arg->discrete.height = frameSizes[arg->index].height;
+ memset(arg->reserved, 0, sizeof(arg->reserved));
+
+ return 0;
+}
+
+int V4L2CameraProxy::vidioc_enum_fmt(V4L2CameraFile *file, struct v4l2_fmtdesc *arg)
{
- LOG(V4L2Compat, Debug) << "Servicing vidioc_enum_fmt";
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
if (!validateBufferType(arg->type) ||
- arg->index > streamConfig_.formats().pixelformats().size())
+ arg->index >= streamConfig_.formats().pixelformats().size())
return -EINVAL;
- /* \todo Add map from format to description. */
- utils::strlcpy(reinterpret_cast<char *>(arg->description), "Video Format Description",
- sizeof(arg->description));
- arg->pixelformat = drmToV4L2(streamConfig_.formats().pixelformats()[arg->index]);
+ PixelFormat format = streamConfig_.formats().pixelformats()[arg->index];
+ V4L2PixelFormat v4l2Format = V4L2PixelFormat::fromPixelFormat(format)[0];
+
+ arg->flags = format == formats::MJPEG ? V4L2_FMT_FLAG_COMPRESSED : 0;
+ utils::strlcpy(reinterpret_cast<char *>(arg->description),
+ v4l2Format.description(), sizeof(arg->description));
+ arg->pixelformat = v4l2Format;
+
+ memset(arg->reserved, 0, sizeof(arg->reserved));
return 0;
}
-int V4L2CameraProxy::vidioc_g_fmt(struct v4l2_format *arg)
+int V4L2CameraProxy::vidioc_g_fmt(V4L2CameraFile *file, struct v4l2_format *arg)
{
- LOG(V4L2Compat, Debug) << "Servicing vidioc_g_fmt";
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
if (!validateBufferType(arg->type))
return -EINVAL;
memset(&arg->fmt, 0, sizeof(arg->fmt));
- arg->fmt.pix = curV4L2Format_.fmt.pix;
+ arg->fmt.pix = v4l2PixFormat_;
return 0;
}
-void V4L2CameraProxy::tryFormat(struct v4l2_format *arg)
+int V4L2CameraProxy::tryFormat(struct v4l2_format *arg)
{
- PixelFormat format = v4l2ToDrm(arg->fmt.pix.pixelformat);
- const std::vector<PixelFormat> &formats =
- streamConfig_.formats().pixelformats();
- if (std::find(formats.begin(), formats.end(), format) == formats.end())
- format = streamConfig_.formats().pixelformats()[0];
-
+ V4L2PixelFormat v4l2Format = V4L2PixelFormat(arg->fmt.pix.pixelformat);
+ PixelFormat format = v4l2Format.toPixelFormat();
Size size(arg->fmt.pix.width, arg->fmt.pix.height);
- const std::vector<Size> &sizes = streamConfig_.formats().sizes(format);
- if (std::find(sizes.begin(), sizes.end(), size) == sizes.end())
- size = streamConfig_.formats().sizes(format)[0];
- arg->fmt.pix.width = size.width;
- arg->fmt.pix.height = size.height;
- arg->fmt.pix.pixelformat = drmToV4L2(format);
+ StreamConfiguration config;
+ int ret = vcam_->validateConfiguration(format, size, &config);
+ if (ret < 0) {
+ LOG(V4L2Compat, Error)
+ << "Failed to negotiate a valid format: "
+ << format;
+ return -EINVAL;
+ }
+
+ arg->fmt.pix.width = config.size.width;
+ arg->fmt.pix.height = config.size.height;
+ arg->fmt.pix.pixelformat = V4L2PixelFormat::fromPixelFormat(config.pixelFormat)[0];
arg->fmt.pix.field = V4L2_FIELD_NONE;
- arg->fmt.pix.bytesperline = bplMultiplier(drmToV4L2(format)) *
- arg->fmt.pix.width;
- arg->fmt.pix.sizeimage = imageSize(drmToV4L2(format),
- arg->fmt.pix.width,
- arg->fmt.pix.height);
+ arg->fmt.pix.bytesperline = config.stride;
+ arg->fmt.pix.sizeimage = config.frameSize;
arg->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ arg->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ arg->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ arg->fmt.pix.quantization = V4L2_QUANTIZATION_DEFAULT;
+ arg->fmt.pix.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ return 0;
}
-int V4L2CameraProxy::vidioc_s_fmt(struct v4l2_format *arg)
+int V4L2CameraProxy::vidioc_s_fmt(V4L2CameraFile *file, struct v4l2_format *arg)
{
- LOG(V4L2Compat, Debug) << "Servicing vidioc_s_fmt";
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
if (!validateBufferType(arg->type))
return -EINVAL;
- tryFormat(arg);
+ if (file->priority() < maxPriority())
+ return -EBUSY;
+
+ int ret = acquire(file);
+ if (ret < 0)
+ return ret;
+
+ ret = tryFormat(arg);
+ if (ret < 0)
+ return ret;
Size size(arg->fmt.pix.width, arg->fmt.pix.height);
- int ret = vcam_->configure(&streamConfig_, size,
- v4l2ToDrm(arg->fmt.pix.pixelformat),
- bufferCount_);
+ V4L2PixelFormat v4l2Format = V4L2PixelFormat(arg->fmt.pix.pixelformat);
+ ret = vcam_->configure(&streamConfig_, size, v4l2Format.toPixelFormat(),
+ bufferCount_);
if (ret < 0)
return -EINVAL;
- unsigned int sizeimage = calculateSizeImage(streamConfig_);
- if (sizeimage == 0)
+ setFmtFromConfig(streamConfig_);
+
+ return 0;
+}
+
+int V4L2CameraProxy::vidioc_try_fmt(V4L2CameraFile *file, struct v4l2_format *arg)
+{
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
+
+ if (!validateBufferType(arg->type))
return -EINVAL;
- sizeimage_ = sizeimage;
+ int ret = tryFormat(arg);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
- setFmtFromConfig(streamConfig_);
+enum v4l2_priority V4L2CameraProxy::maxPriority()
+{
+ auto max = std::max_element(files_.begin(), files_.end(),
+ [](const V4L2CameraFile *a, const V4L2CameraFile *b) {
+ return a->priority() < b->priority();
+ });
+ return max != files_.end() ? (*max)->priority() : V4L2_PRIORITY_UNSET;
+}
+
+int V4L2CameraProxy::vidioc_g_priority(V4L2CameraFile *file, enum v4l2_priority *arg)
+{
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
+
+ *arg = maxPriority();
return 0;
}
-int V4L2CameraProxy::vidioc_try_fmt(struct v4l2_format *arg)
+int V4L2CameraProxy::vidioc_s_priority(V4L2CameraFile *file, enum v4l2_priority *arg)
{
- LOG(V4L2Compat, Debug) << "Servicing vidioc_try_fmt";
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
- if (!validateBufferType(arg->type))
+ if (*arg > V4L2_PRIORITY_RECORD)
return -EINVAL;
- tryFormat(arg);
+ if (file->priority() < maxPriority())
+ return -EBUSY;
+
+ file->setPriority(*arg);
return 0;
}
-int V4L2CameraProxy::freeBuffers()
+int V4L2CameraProxy::vidioc_enuminput(V4L2CameraFile *file, struct v4l2_input *arg)
{
- LOG(V4L2Compat, Debug) << "Freeing libcamera bufs";
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
- int ret = vcam_->streamOff();
- if (ret < 0) {
- LOG(V4L2Compat, Error) << "Failed to stop stream";
- return ret;
- }
- vcam_->freeBuffers();
- bufferCount_ = 0;
+ if (arg->index != 0)
+ return -EINVAL;
+
+ memset(arg, 0, sizeof(*arg));
+
+ utils::strlcpy(reinterpret_cast<char *>(arg->name),
+ reinterpret_cast<char *>(capabilities_.card),
+ sizeof(arg->name));
+ arg->type = V4L2_INPUT_TYPE_CAMERA;
return 0;
}
-int V4L2CameraProxy::vidioc_reqbufs(struct v4l2_requestbuffers *arg)
+int V4L2CameraProxy::vidioc_g_input(V4L2CameraFile *file, int *arg)
{
- int ret;
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
- LOG(V4L2Compat, Debug) << "Servicing vidioc_reqbufs";
+ *arg = 0;
+
+ return 0;
+}
+
+int V4L2CameraProxy::vidioc_s_input(V4L2CameraFile *file, int *arg)
+{
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
+
+ if (*arg != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+void V4L2CameraProxy::freeBuffers()
+{
+ vcam_->freeBuffers();
+ buffers_.clear();
+ bufferCount_ = 0;
+}
+
+int V4L2CameraProxy::vidioc_reqbufs(V4L2CameraFile *file, struct v4l2_requestbuffers *arg)
+{
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
if (!validateBufferType(arg->type) ||
!validateMemoryType(arg->memory))
@@ -338,20 +509,38 @@ int V4L2CameraProxy::vidioc_reqbufs(struct v4l2_requestbuffers *arg)
LOG(V4L2Compat, Debug) << arg->count << " buffers requested ";
+ if (file->priority() < maxPriority())
+ return -EBUSY;
+
+ if (!hasOwnership(file) && owner_)
+ return -EBUSY;
+
arg->capabilities = V4L2_BUF_CAP_SUPPORTS_MMAP;
+ arg->flags = 0;
+ memset(arg->reserved, 0, sizeof(arg->reserved));
- if (arg->count == 0)
- return freeBuffers();
+ if (arg->count == 0) {
+ /* \todo Add buffer orphaning support */
+ if (!mmaps_.empty())
+ return -EBUSY;
- Size size(curV4L2Format_.fmt.pix.width, curV4L2Format_.fmt.pix.height);
- ret = vcam_->configure(&streamConfig_, size,
- v4l2ToDrm(curV4L2Format_.fmt.pix.pixelformat),
- arg->count);
- if (ret < 0)
- return -EINVAL;
+ if (vcam_->isRunning())
+ return -EBUSY;
+
+ freeBuffers();
+ release(file);
- sizeimage_ = calculateSizeImage(streamConfig_);
- if (sizeimage_ == 0)
+ return 0;
+ }
+
+ if (bufferCount_ > 0)
+ freeBuffers();
+
+ Size size(v4l2PixFormat_.width, v4l2PixFormat_.height);
+ V4L2PixelFormat v4l2Format = V4L2PixelFormat(v4l2PixFormat_.pixelformat);
+ int ret = vcam_->configure(&streamConfig_, size,
+ v4l2Format.toPixelFormat(), arg->count);
+ if (ret < 0)
return -EINVAL;
setFmtFromConfig(streamConfig_);
@@ -369,22 +558,29 @@ int V4L2CameraProxy::vidioc_reqbufs(struct v4l2_requestbuffers *arg)
for (unsigned int i = 0; i < arg->count; i++) {
struct v4l2_buffer buf = {};
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf.length = curV4L2Format_.fmt.pix.sizeimage;
+ buf.length = v4l2PixFormat_.sizeimage;
buf.memory = V4L2_MEMORY_MMAP;
- buf.m.offset = i * curV4L2Format_.fmt.pix.sizeimage;
+ buf.m.offset = i * v4l2PixFormat_.sizeimage;
buf.index = i;
+ buf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
buffers_[i] = buf;
}
LOG(V4L2Compat, Debug) << "Allocated " << arg->count << " buffers";
+ acquire(file);
+
return 0;
}
-int V4L2CameraProxy::vidioc_querybuf(struct v4l2_buffer *arg)
+int V4L2CameraProxy::vidioc_querybuf(V4L2CameraFile *file, struct v4l2_buffer *arg)
{
- LOG(V4L2Compat, Debug) << "Servicing vidioc_querybuf";
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
+
+ if (arg->index >= bufferCount_)
+ return -EINVAL;
if (!validateBufferType(arg->type) ||
arg->index >= bufferCount_)
@@ -397,10 +593,52 @@ int V4L2CameraProxy::vidioc_querybuf(struct v4l2_buffer *arg)
return 0;
}
-int V4L2CameraProxy::vidioc_qbuf(struct v4l2_buffer *arg)
+int V4L2CameraProxy::vidioc_prepare_buf(V4L2CameraFile *file, struct v4l2_buffer *arg)
{
- LOG(V4L2Compat, Debug) << "Servicing vidioc_qbuf, index = "
- << arg->index;
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__
+ << "(index=" << arg->index << ")";
+
+ if (!hasOwnership(file))
+ return -EBUSY;
+
+ if (arg->index >= bufferCount_)
+ return -EINVAL;
+
+ if (arg->flags & V4L2_BUF_FLAG_REQUEST_FD)
+ return -EINVAL;
+
+ if (!validateBufferType(arg->type) ||
+ !validateMemoryType(arg->memory))
+ return -EINVAL;
+
+ struct v4l2_buffer &buffer = buffers_[arg->index];
+
+ if (buffer.flags & V4L2_BUF_FLAG_QUEUED ||
+ buffer.flags & V4L2_BUF_FLAG_PREPARED)
+ return -EINVAL;
+
+ buffer.flags |= V4L2_BUF_FLAG_PREPARED;
+
+ arg->flags = buffer.flags;
+
+ return 0;
+}
+
+int V4L2CameraProxy::vidioc_qbuf(V4L2CameraFile *file, struct v4l2_buffer *arg)
+{
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__
+ << "(index=" << arg->index << ")";
+
+ if (arg->index >= bufferCount_)
+ return -EINVAL;
+
+ if (buffers_[arg->index].flags & V4L2_BUF_FLAG_QUEUED)
+ return -EINVAL;
+
+ if (!hasOwnership(file))
+ return -EBUSY;
if (!validateBufferType(arg->type) ||
!validateMemoryType(arg->memory) ||
@@ -411,55 +649,130 @@ int V4L2CameraProxy::vidioc_qbuf(struct v4l2_buffer *arg)
if (ret < 0)
return ret;
- arg->flags |= V4L2_BUF_FLAG_QUEUED;
- arg->flags &= ~V4L2_BUF_FLAG_DONE;
+ buffers_[arg->index].flags |= V4L2_BUF_FLAG_QUEUED;
+
+ arg->flags = buffers_[arg->index].flags;
return ret;
}
-int V4L2CameraProxy::vidioc_dqbuf(struct v4l2_buffer *arg)
+int V4L2CameraProxy::vidioc_dqbuf(V4L2CameraFile *file, struct v4l2_buffer *arg,
+ Mutex *lock)
{
- LOG(V4L2Compat, Debug) << "Servicing vidioc_dqbuf";
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
+
+ if (arg->index >= bufferCount_)
+ return -EINVAL;
+
+ if (!hasOwnership(file))
+ return -EBUSY;
+
+ if (!vcam_->isRunning())
+ return -EINVAL;
if (!validateBufferType(arg->type) ||
!validateMemoryType(arg->memory))
return -EINVAL;
- if (nonBlocking_ && !vcam_->bufferSema_.tryAcquire())
+ if (!file->nonBlocking()) {
+ lock->unlock();
+ vcam_->waitForBufferAvailable();
+ lock->lock();
+ } else if (!vcam_->isBufferAvailable())
return -EAGAIN;
- else
- vcam_->bufferSema_.acquire();
+
+ /*
+ * We need to check here again in case stream was turned off while we
+ * were blocked on waitForBufferAvailable().
+ */
+ if (!vcam_->isRunning())
+ return -EINVAL;
updateBuffers();
struct v4l2_buffer &buf = buffers_[currentBuf_];
- buf.flags &= ~V4L2_BUF_FLAG_QUEUED;
+ buf.flags &= ~(V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_PREPARED);
buf.length = sizeimage_;
*arg = buf;
currentBuf_ = (currentBuf_ + 1) % bufferCount_;
+ uint64_t data;
+ int ret = ::read(file->efd(), &data, sizeof(data));
+ if (ret != sizeof(data))
+ LOG(V4L2Compat, Error) << "Failed to clear eventfd POLLIN";
+
+ return 0;
+}
+
+int V4L2CameraProxy::vidioc_expbuf(V4L2CameraFile *file, struct v4l2_exportbuffer *arg)
+{
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
+
+ if (!hasOwnership(file))
+ return -EBUSY;
+
+ /* \todo Verify that the memory type is MMAP when adding DMABUF support */
+ if (!validateBufferType(arg->type))
+ return -EINVAL;
+
+ if (arg->index >= bufferCount_)
+ return -EINVAL;
+
+ if (arg->flags & ~(O_CLOEXEC | O_ACCMODE))
+ return -EINVAL;
+
+ memset(arg->reserved, 0, sizeof(arg->reserved));
+
+ /* \todo honor the O_ACCMODE flags passed to this function */
+ arg->fd = fcntl(vcam_->getBufferFd(arg->index),
+ arg->flags & O_CLOEXEC ? F_DUPFD_CLOEXEC : F_DUPFD, 0);
+
return 0;
}
-int V4L2CameraProxy::vidioc_streamon(int *arg)
+int V4L2CameraProxy::vidioc_streamon(V4L2CameraFile *file, int *arg)
{
- LOG(V4L2Compat, Debug) << "Servicing vidioc_streamon";
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
+
+ if (bufferCount_ == 0)
+ return -EINVAL;
if (!validateBufferType(*arg))
return -EINVAL;
+ if (file->priority() < maxPriority())
+ return -EBUSY;
+
+ if (!hasOwnership(file))
+ return -EBUSY;
+
+ if (vcam_->isRunning())
+ return 0;
+
+ currentBuf_ = 0;
+
return vcam_->streamOn();
}
-int V4L2CameraProxy::vidioc_streamoff(int *arg)
+int V4L2CameraProxy::vidioc_streamoff(V4L2CameraFile *file, int *arg)
{
- LOG(V4L2Compat, Debug) << "Servicing vidioc_streamoff";
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
if (!validateBufferType(*arg))
return -EINVAL;
+ if (file->priority() < maxPriority())
+ return -EBUSY;
+
+ if (!hasOwnership(file) && owner_)
+ return -EBUSY;
+
int ret = vcam_->streamOff();
for (struct v4l2_buffer &buf : buffers_)
@@ -468,42 +781,169 @@ int V4L2CameraProxy::vidioc_streamoff(int *arg)
return ret;
}
-int V4L2CameraProxy::ioctl(unsigned long request, void *arg)
+int V4L2CameraProxy::vidioc_g_parm(V4L2CameraFile *file, struct v4l2_streamparm *arg)
+{
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
+
+ if (!validateBufferType(arg->type))
+ return -EINVAL;
+
+ memset(&arg->parm, 0, sizeof(arg->parm));
+
+ arg->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ arg->parm.capture.timeperframe = v4l2TimePerFrame_;
+
+ return 0;
+}
+
+int V4L2CameraProxy::vidioc_s_parm(V4L2CameraFile *file, struct v4l2_streamparm *arg)
{
+ LOG(V4L2Compat, Debug)
+ << "[" << file->description() << "] " << __func__ << "()";
+
+ if (!validateBufferType(arg->type))
+ return -EINVAL;
+
+ /*
+ * Store the frame duration if it is valid, otherwise keep the current
+ * value.
+ *
+ * \todo The provided value should be adjusted based on the camera
+ * capabilities.
+ */
+ if (arg->parm.capture.timeperframe.numerator &&
+ arg->parm.capture.timeperframe.denominator)
+ v4l2TimePerFrame_ = arg->parm.capture.timeperframe;
+
+ memset(&arg->parm, 0, sizeof(arg->parm));
+
+ arg->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ arg->parm.capture.timeperframe = v4l2TimePerFrame_;
+
+ /* Apply the frame duration. */
+ utils::Duration frameDuration = 1.0s * v4l2TimePerFrame_.numerator
+ / v4l2TimePerFrame_.denominator;
+ int64_t uDuration = frameDuration.get<std::micro>();
+ vcam_->controls().set(controls::FrameDurationLimits, { uDuration, uDuration });
+
+ return 0;
+}
+
+const std::set<unsigned long> V4L2CameraProxy::supportedIoctls_ = {
+ VIDIOC_QUERYCAP,
+ VIDIOC_ENUM_FRAMESIZES,
+ VIDIOC_ENUM_FMT,
+ VIDIOC_G_FMT,
+ VIDIOC_S_FMT,
+ VIDIOC_TRY_FMT,
+ VIDIOC_G_PRIORITY,
+ VIDIOC_S_PRIORITY,
+ VIDIOC_ENUMINPUT,
+ VIDIOC_G_INPUT,
+ VIDIOC_S_INPUT,
+ VIDIOC_REQBUFS,
+ VIDIOC_QUERYBUF,
+ VIDIOC_PREPARE_BUF,
+ VIDIOC_QBUF,
+ VIDIOC_DQBUF,
+ VIDIOC_EXPBUF,
+ VIDIOC_STREAMON,
+ VIDIOC_STREAMOFF,
+ VIDIOC_G_PARM,
+ VIDIOC_S_PARM,
+};
+
+int V4L2CameraProxy::ioctl(V4L2CameraFile *file, unsigned long longRequest, void *arg)
+{
+ MutexLocker locker(proxyMutex_);
+
+ /*
+ * The Linux Kernel only processes 32 bits of an IOCTL.
+ *
+ * Prevent unexpected sign-extensions that could occur if applications
+ * use a signed int for the ioctl request, which would sign-extend to
+ * an incorrect value for unsigned longs on 64 bit architectures by
+ * explicitly casting as an unsigned int here.
+ */
+ unsigned int request = longRequest;
+
+ if (!arg && (_IOC_DIR(request) & _IOC_WRITE)) {
+ errno = EFAULT;
+ return -1;
+ }
+
+ if (supportedIoctls_.find(request) == supportedIoctls_.end()) {
+ errno = ENOTTY;
+ return -1;
+ }
+
+ if (!arg && (_IOC_DIR(request) & _IOC_READ)) {
+ errno = EFAULT;
+ return -1;
+ }
+
int ret;
switch (request) {
case VIDIOC_QUERYCAP:
- ret = vidioc_querycap(static_cast<struct v4l2_capability *>(arg));
+ ret = vidioc_querycap(file, static_cast<struct v4l2_capability *>(arg));
+ break;
+ case VIDIOC_ENUM_FRAMESIZES:
+ ret = vidioc_enum_framesizes(file, static_cast<struct v4l2_frmsizeenum *>(arg));
break;
case VIDIOC_ENUM_FMT:
- ret = vidioc_enum_fmt(static_cast<struct v4l2_fmtdesc *>(arg));
+ ret = vidioc_enum_fmt(file, static_cast<struct v4l2_fmtdesc *>(arg));
break;
case VIDIOC_G_FMT:
- ret = vidioc_g_fmt(static_cast<struct v4l2_format *>(arg));
+ ret = vidioc_g_fmt(file, static_cast<struct v4l2_format *>(arg));
break;
case VIDIOC_S_FMT:
- ret = vidioc_s_fmt(static_cast<struct v4l2_format *>(arg));
+ ret = vidioc_s_fmt(file, static_cast<struct v4l2_format *>(arg));
break;
case VIDIOC_TRY_FMT:
- ret = vidioc_try_fmt(static_cast<struct v4l2_format *>(arg));
+ ret = vidioc_try_fmt(file, static_cast<struct v4l2_format *>(arg));
+ break;
+ case VIDIOC_G_PRIORITY:
+ ret = vidioc_g_priority(file, static_cast<enum v4l2_priority *>(arg));
+ break;
+ case VIDIOC_S_PRIORITY:
+ ret = vidioc_s_priority(file, static_cast<enum v4l2_priority *>(arg));
+ break;
+ case VIDIOC_ENUMINPUT:
+ ret = vidioc_enuminput(file, static_cast<struct v4l2_input *>(arg));
+ break;
+ case VIDIOC_G_INPUT:
+ ret = vidioc_g_input(file, static_cast<int *>(arg));
+ break;
+ case VIDIOC_S_INPUT:
+ ret = vidioc_s_input(file, static_cast<int *>(arg));
break;
case VIDIOC_REQBUFS:
- ret = vidioc_reqbufs(static_cast<struct v4l2_requestbuffers *>(arg));
+ ret = vidioc_reqbufs(file, static_cast<struct v4l2_requestbuffers *>(arg));
break;
case VIDIOC_QUERYBUF:
- ret = vidioc_querybuf(static_cast<struct v4l2_buffer *>(arg));
+ ret = vidioc_querybuf(file, static_cast<struct v4l2_buffer *>(arg));
break;
case VIDIOC_QBUF:
- ret = vidioc_qbuf(static_cast<struct v4l2_buffer *>(arg));
+ ret = vidioc_qbuf(file, static_cast<struct v4l2_buffer *>(arg));
break;
case VIDIOC_DQBUF:
- ret = vidioc_dqbuf(static_cast<struct v4l2_buffer *>(arg));
+ ret = vidioc_dqbuf(file, static_cast<struct v4l2_buffer *>(arg), &proxyMutex_);
+ break;
+ case VIDIOC_EXPBUF:
+ ret = vidioc_expbuf(file, static_cast<struct v4l2_exportbuffer *>(arg));
break;
case VIDIOC_STREAMON:
- ret = vidioc_streamon(static_cast<int *>(arg));
+ ret = vidioc_streamon(file, static_cast<int *>(arg));
break;
case VIDIOC_STREAMOFF:
- ret = vidioc_streamoff(static_cast<int *>(arg));
+ ret = vidioc_streamoff(file, static_cast<int *>(arg));
+ break;
+ case VIDIOC_G_PARM:
+ ret = vidioc_g_parm(file, static_cast<struct v4l2_streamparm *>(arg));
+ break;
+ case VIDIOC_S_PARM:
+ ret = vidioc_s_parm(file, static_cast<struct v4l2_streamparm *>(arg));
break;
default:
ret = -ENOTTY;
@@ -518,94 +958,43 @@ int V4L2CameraProxy::ioctl(unsigned long request, void *arg)
return ret;
}
-struct PixelFormatPlaneInfo {
- unsigned int bitsPerPixel;
- unsigned int hSubSampling;
- unsigned int vSubSampling;
-};
-
-struct PixelFormatInfo {
- PixelFormat format;
- uint32_t v4l2Format;
- unsigned int numPlanes;
- std::array<PixelFormatPlaneInfo, 3> planes;
-};
-
-namespace {
-
-static const std::array<PixelFormatInfo, 13> pixelFormatInfo = {{
- /* RGB formats. */
- { PixelFormat(DRM_FORMAT_RGB888), V4L2_PIX_FMT_BGR24, 1, {{ { 24, 1, 1 }, { 0, 0, 0 }, { 0, 0, 0 } }} },
- { PixelFormat(DRM_FORMAT_BGR888), V4L2_PIX_FMT_RGB24, 1, {{ { 24, 1, 1 }, { 0, 0, 0 }, { 0, 0, 0 } }} },
- { PixelFormat(DRM_FORMAT_BGRA8888), V4L2_PIX_FMT_ARGB32, 1, {{ { 32, 1, 1 }, { 0, 0, 0 }, { 0, 0, 0 } }} },
- /* YUV packed formats. */
- { PixelFormat(DRM_FORMAT_UYVY), V4L2_PIX_FMT_UYVY, 1, {{ { 16, 1, 1 }, { 0, 0, 0 }, { 0, 0, 0 } }} },
- { PixelFormat(DRM_FORMAT_VYUY), V4L2_PIX_FMT_VYUY, 1, {{ { 16, 1, 1 }, { 0, 0, 0 }, { 0, 0, 0 } }} },
- { PixelFormat(DRM_FORMAT_YUYV), V4L2_PIX_FMT_YUYV, 1, {{ { 16, 1, 1 }, { 0, 0, 0 }, { 0, 0, 0 } }} },
- { PixelFormat(DRM_FORMAT_YVYU), V4L2_PIX_FMT_YVYU, 1, {{ { 16, 1, 1 }, { 0, 0, 0 }, { 0, 0, 0 } }} },
- /* YUY planar formats. */
- { PixelFormat(DRM_FORMAT_NV12), V4L2_PIX_FMT_NV12, 2, {{ { 8, 1, 1 }, { 16, 2, 2 }, { 0, 0, 0 } }} },
- { PixelFormat(DRM_FORMAT_NV21), V4L2_PIX_FMT_NV21, 2, {{ { 8, 1, 1 }, { 16, 2, 2 }, { 0, 0, 0 } }} },
- { PixelFormat(DRM_FORMAT_NV16), V4L2_PIX_FMT_NV16, 2, {{ { 8, 1, 1 }, { 16, 2, 1 }, { 0, 0, 0 } }} },
- { PixelFormat(DRM_FORMAT_NV61), V4L2_PIX_FMT_NV61, 2, {{ { 8, 1, 1 }, { 16, 2, 1 }, { 0, 0, 0 } }} },
- { PixelFormat(DRM_FORMAT_NV24), V4L2_PIX_FMT_NV24, 2, {{ { 8, 1, 1 }, { 16, 2, 1 }, { 0, 0, 0 } }} },
- { PixelFormat(DRM_FORMAT_NV42), V4L2_PIX_FMT_NV42, 2, {{ { 8, 1, 1 }, { 16, 1, 1 }, { 0, 0, 0 } }} },
-}};
-
-} /* namespace */
-
-/* \todo make libcamera export these */
-unsigned int V4L2CameraProxy::bplMultiplier(uint32_t format)
-{
- auto info = std::find_if(pixelFormatInfo.begin(), pixelFormatInfo.end(),
- [format](const PixelFormatInfo &info) {
- return info.v4l2Format == format;
- });
- if (info == pixelFormatInfo.end())
- return 0;
-
- return info->planes[0].bitsPerPixel / 8;
+bool V4L2CameraProxy::hasOwnership(V4L2CameraFile *file)
+{
+ return owner_ == file;
}
-unsigned int V4L2CameraProxy::imageSize(uint32_t format, unsigned int width,
- unsigned int height)
+/**
+ * \brief Acquire exclusive ownership of the V4L2Camera
+ *
+ * \return Zero on success or if already acquired, and negative error on
+ * failure.
+ *
+ * This is sufficient for poll()ing for buffers. Events, however, are signaled
+ * on the file level, so all fds must be signaled. poll()ing from a different
+ * fd than the one that locks the device is a corner case, and is currently not
+ * supported.
+ */
+int V4L2CameraProxy::acquire(V4L2CameraFile *file)
{
- auto info = std::find_if(pixelFormatInfo.begin(), pixelFormatInfo.end(),
- [format](const PixelFormatInfo &info) {
- return info.v4l2Format == format;
- });
- if (info == pixelFormatInfo.end())
+ if (owner_ == file)
return 0;
- unsigned int multiplier = 0;
- for (unsigned int i = 0; i < info->numPlanes; ++i)
- multiplier += info->planes[i].bitsPerPixel
- / info->planes[i].hSubSampling
- / info->planes[i].vSubSampling;
+ if (owner_)
+ return -EBUSY;
- return width * height * multiplier / 8;
-}
+ vcam_->bind(file->efd());
-PixelFormat V4L2CameraProxy::v4l2ToDrm(uint32_t format)
-{
- auto info = std::find_if(pixelFormatInfo.begin(), pixelFormatInfo.end(),
- [format](const PixelFormatInfo &info) {
- return info.v4l2Format == format;
- });
- if (info == pixelFormatInfo.end())
- return PixelFormat();
+ owner_ = file;
- return info->format;
+ return 0;
}
-uint32_t V4L2CameraProxy::drmToV4L2(const PixelFormat &format)
+void V4L2CameraProxy::release(V4L2CameraFile *file)
{
- auto info = std::find_if(pixelFormatInfo.begin(), pixelFormatInfo.end(),
- [format](const PixelFormatInfo &info) {
- return info.format == format;
- });
- if (info == pixelFormatInfo.end())
- return format;
+ if (owner_ != file)
+ return;
+
+ vcam_->unbind();
- return info->v4l2Format;
+ owner_ = nullptr;
}
diff --git a/src/v4l2/v4l2_camera_proxy.h b/src/v4l2/v4l2_camera_proxy.h
index e15b230d..5aa352c3 100644
--- a/src/v4l2/v4l2_camera_proxy.h
+++ b/src/v4l2/v4l2_camera_proxy.h
@@ -1,82 +1,111 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_camera_proxy.h - Proxy to V4L2 compatibility camera
+ * Proxy to V4L2 compatibility camera
*/
-#ifndef __V4L2_CAMERA_PROXY_H__
-#define __V4L2_CAMERA_PROXY_H__
+#pragma once
#include <linux/videodev2.h>
#include <map>
#include <memory>
+#include <set>
#include <sys/types.h>
#include <vector>
+#include <libcamera/base/mutex.h>
+
#include <libcamera/camera.h>
#include "v4l2_camera.h"
-using namespace libcamera;
+class V4L2CameraFile;
class V4L2CameraProxy
{
public:
- V4L2CameraProxy(unsigned int index, std::shared_ptr<Camera> camera);
+ V4L2CameraProxy(unsigned int index, std::shared_ptr<libcamera::Camera> camera);
- int open(bool nonBlocking);
- void dup();
- void close();
- void *mmap(void *addr, size_t length, int prot, int flags, off_t offset);
- int munmap(void *addr, size_t length);
+ int open(V4L2CameraFile *file) LIBCAMERA_TSA_EXCLUDES(proxyMutex_);
+ void close(V4L2CameraFile *file) LIBCAMERA_TSA_EXCLUDES(proxyMutex_);
+ void *mmap(V4L2CameraFile *file, void *addr, size_t length, int prot,
+ int flags, off64_t offset) LIBCAMERA_TSA_EXCLUDES(proxyMutex_);
+ int munmap(V4L2CameraFile *file, void *addr, size_t length)
+ LIBCAMERA_TSA_EXCLUDES(proxyMutex_);
- int ioctl(unsigned long request, void *arg);
+ int ioctl(V4L2CameraFile *file, unsigned long request, void *arg)
+ LIBCAMERA_TSA_EXCLUDES(proxyMutex_);
private:
bool validateBufferType(uint32_t type);
bool validateMemoryType(uint32_t memory);
- void setFmtFromConfig(StreamConfiguration &streamConfig);
- unsigned int calculateSizeImage(StreamConfiguration &streamConfig);
- void querycap(std::shared_ptr<Camera> camera);
- void tryFormat(struct v4l2_format *arg);
+ void setFmtFromConfig(const libcamera::StreamConfiguration &streamConfig);
+ void querycap(std::shared_ptr<libcamera::Camera> camera);
+ int tryFormat(struct v4l2_format *arg);
+ enum v4l2_priority maxPriority();
void updateBuffers();
- int freeBuffers();
-
- int vidioc_querycap(struct v4l2_capability *arg);
- int vidioc_enum_fmt(struct v4l2_fmtdesc *arg);
- int vidioc_g_fmt(struct v4l2_format *arg);
- int vidioc_s_fmt(struct v4l2_format *arg);
- int vidioc_try_fmt(struct v4l2_format *arg);
- int vidioc_reqbufs(struct v4l2_requestbuffers *arg);
- int vidioc_querybuf(struct v4l2_buffer *arg);
- int vidioc_qbuf(struct v4l2_buffer *arg);
- int vidioc_dqbuf(struct v4l2_buffer *arg);
- int vidioc_streamon(int *arg);
- int vidioc_streamoff(int *arg);
-
- static unsigned int bplMultiplier(uint32_t format);
- static unsigned int imageSize(uint32_t format, unsigned int width,
- unsigned int height);
-
- static PixelFormat v4l2ToDrm(uint32_t format);
- static uint32_t drmToV4L2(const PixelFormat &format);
+ void freeBuffers();
+
+ int vidioc_querycap(V4L2CameraFile *file, struct v4l2_capability *arg);
+ int vidioc_enum_framesizes(V4L2CameraFile *file, struct v4l2_frmsizeenum *arg);
+ int vidioc_enum_fmt(V4L2CameraFile *file, struct v4l2_fmtdesc *arg);
+ int vidioc_g_fmt(V4L2CameraFile *file, struct v4l2_format *arg);
+ int vidioc_s_fmt(V4L2CameraFile *file, struct v4l2_format *arg);
+ int vidioc_try_fmt(V4L2CameraFile *file, struct v4l2_format *arg);
+ int vidioc_g_priority(V4L2CameraFile *file, enum v4l2_priority *arg);
+ int vidioc_s_priority(V4L2CameraFile *file, enum v4l2_priority *arg);
+ int vidioc_enuminput(V4L2CameraFile *file, struct v4l2_input *arg);
+ int vidioc_g_input(V4L2CameraFile *file, int *arg);
+ int vidioc_s_input(V4L2CameraFile *file, int *arg);
+ int vidioc_reqbufs(V4L2CameraFile *file, struct v4l2_requestbuffers *arg);
+ int vidioc_querybuf(V4L2CameraFile *file, struct v4l2_buffer *arg);
+ int vidioc_prepare_buf(V4L2CameraFile *file, struct v4l2_buffer *arg);
+ int vidioc_qbuf(V4L2CameraFile *file, struct v4l2_buffer *arg);
+ int vidioc_dqbuf(V4L2CameraFile *file, struct v4l2_buffer *arg,
+ libcamera::Mutex *lock) LIBCAMERA_TSA_REQUIRES(*lock);
+ int vidioc_expbuf(V4L2CameraFile *file, struct v4l2_exportbuffer *arg);
+ int vidioc_streamon(V4L2CameraFile *file, int *arg);
+ int vidioc_streamoff(V4L2CameraFile *file, int *arg);
+ int vidioc_g_parm(V4L2CameraFile *file, struct v4l2_streamparm *arg);
+ int vidioc_s_parm(V4L2CameraFile *file, struct v4l2_streamparm *arg);
+
+ bool hasOwnership(V4L2CameraFile *file);
+ int acquire(V4L2CameraFile *file);
+ void release(V4L2CameraFile *file);
+
+ static const std::set<unsigned long> supportedIoctls_;
unsigned int refcount_;
unsigned int index_;
- bool nonBlocking_;
- struct v4l2_format curV4L2Format_;
- StreamConfiguration streamConfig_;
- struct v4l2_capability capabilities_;
+ libcamera::StreamConfiguration streamConfig_;
unsigned int bufferCount_;
unsigned int currentBuf_;
unsigned int sizeimage_;
+ struct v4l2_capability capabilities_;
+ struct v4l2_pix_format v4l2PixFormat_;
+ struct v4l2_fract v4l2TimePerFrame_;
+
std::vector<struct v4l2_buffer> buffers_;
std::map<void *, unsigned int> mmaps_;
+ std::set<V4L2CameraFile *> files_;
+
std::unique_ptr<V4L2Camera> vcam_;
-};
-#endif /* __V4L2_CAMERA_PROXY_H__ */
+ /*
+ * This is the exclusive owner of this V4L2CameraProxy instance.
+ * When there is no owner, anybody can call any ioctl before reqbufs.
+ * The first file to call reqbufs with count > 0 or s_fmt will become
+ * the owner, and when the owner calls reqbufs with count = 0 it will
+ * release ownership. Any buffer-related ioctl (except querybuf) or
+ * s_fmt that is called by a non-owner while there exists an owner
+ * will return -EBUSY.
+ */
+ V4L2CameraFile *owner_;
+
+ /* This mutex is to serialize access to the proxy. */
+ libcamera::Mutex proxyMutex_;
+};
diff --git a/src/v4l2/v4l2_compat.cpp b/src/v4l2/v4l2_compat.cpp
index a162037f..ff833f57 100644
--- a/src/v4l2/v4l2_compat.cpp
+++ b/src/v4l2/v4l2_compat.cpp
@@ -1,18 +1,22 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_compat.cpp - V4L2 compatibility layer
+ * V4L2 compatibility layer
*/
#include "v4l2_compat_manager.h"
-#include <errno.h>
+#include <assert.h>
#include <fcntl.h>
#include <stdarg.h>
+#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
+#include <unistd.h>
+
+#include <libcamera/base/utils.h>
#define LIBCAMERA_PUBLIC __attribute__((visibility("default")))
@@ -26,38 +30,98 @@ using namespace libcamera;
va_end(ap); \
}
+namespace {
+
+/*
+ * Determine if the flags require a further mode arguments that needs to be
+ * parsed from va_args.
+ */
+bool needs_mode(int flags)
+{
+ return (flags & O_CREAT) || ((flags & O_TMPFILE) == O_TMPFILE);
+}
+
+} /* namespace */
+
extern "C" {
LIBCAMERA_PUBLIC int open(const char *path, int oflag, ...)
{
mode_t mode = 0;
- if (oflag & O_CREAT || oflag & O_TMPFILE)
+ if (needs_mode(oflag))
extract_va_arg(mode_t, mode, oflag);
return V4L2CompatManager::instance()->openat(AT_FDCWD, path,
oflag, mode);
}
-/* _FORTIFY_SOURCE redirects open to __open_2 */
-LIBCAMERA_PUBLIC int __open_2(const char *path, int oflag)
+#ifndef open64
+LIBCAMERA_PUBLIC int open64(const char *path, int oflag, ...)
{
- return open(path, oflag);
+ mode_t mode = 0;
+ if (needs_mode(oflag))
+ extract_va_arg(mode_t, mode, oflag);
+
+ return V4L2CompatManager::instance()->openat(AT_FDCWD, path,
+ oflag | O_LARGEFILE, mode);
}
+#endif
LIBCAMERA_PUBLIC int openat(int dirfd, const char *path, int oflag, ...)
{
mode_t mode = 0;
- if (oflag & O_CREAT || oflag & O_TMPFILE)
+ if (needs_mode(oflag))
extract_va_arg(mode_t, mode, oflag);
return V4L2CompatManager::instance()->openat(dirfd, path, oflag, mode);
}
+#ifndef openat64
+LIBCAMERA_PUBLIC int openat64(int dirfd, const char *path, int oflag, ...)
+{
+ mode_t mode = 0;
+ if (needs_mode(oflag))
+ extract_va_arg(mode_t, mode, oflag);
+
+ return V4L2CompatManager::instance()->openat(dirfd, path,
+ oflag | O_LARGEFILE, mode);
+}
+#endif
+
+/*
+ * _FORTIFY_SOURCE redirects open* to __open*_2. Disable the
+ * -Wmissing-declarations warnings, as the functions won't be declared if
+ * _FORTIFY_SOURCE is not in use.
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmissing-declarations"
+
+LIBCAMERA_PUBLIC int __open_2(const char *path, int oflag)
+{
+ assert(!needs_mode(oflag));
+ return open(path, oflag);
+}
+
+LIBCAMERA_PUBLIC int __open64_2(const char *path, int oflag)
+{
+ assert(!needs_mode(oflag));
+ return open64(path, oflag);
+}
+
LIBCAMERA_PUBLIC int __openat_2(int dirfd, const char *path, int oflag)
{
+ assert(!needs_mode(oflag));
return openat(dirfd, path, oflag);
}
+LIBCAMERA_PUBLIC int __openat64_2(int dirfd, const char *path, int oflag)
+{
+ assert(!needs_mode(oflag));
+ return openat64(dirfd, path, oflag);
+}
+
+#pragma GCC diagnostic pop
+
LIBCAMERA_PUBLIC int dup(int oldfd)
{
return V4L2CompatManager::instance()->dup(oldfd);
@@ -75,12 +139,25 @@ LIBCAMERA_PUBLIC void *mmap(void *addr, size_t length, int prot, int flags,
fd, offset);
}
+#ifndef mmap64
+LIBCAMERA_PUBLIC void *mmap64(void *addr, size_t length, int prot, int flags,
+ int fd, off64_t offset)
+{
+ return V4L2CompatManager::instance()->mmap(addr, length, prot, flags,
+ fd, offset);
+}
+#endif
+
LIBCAMERA_PUBLIC int munmap(void *addr, size_t length)
{
return V4L2CompatManager::instance()->munmap(addr, length);
}
+#if HAVE_POSIX_IOCTL
+LIBCAMERA_PUBLIC int ioctl(int fd, int request, ...)
+#else
LIBCAMERA_PUBLIC int ioctl(int fd, unsigned long request, ...)
+#endif
{
void *arg;
extract_va_arg(void *, arg, request);
diff --git a/src/v4l2/v4l2_compat_manager.cpp b/src/v4l2/v4l2_compat_manager.cpp
index 961d06b3..f53fb300 100644
--- a/src/v4l2/v4l2_compat_manager.cpp
+++ b/src/v4l2/v4l2_compat_manager.cpp
@@ -1,8 +1,8 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_compat_manager.cpp - V4L2 compatibility manager
+ * V4L2 compatibility manager
*/
#include "v4l2_compat_manager.h"
@@ -10,7 +10,6 @@
#include <dlfcn.h>
#include <fcntl.h>
#include <map>
-#include <stdarg.h>
#include <string.h>
#include <sys/eventfd.h>
#include <sys/mman.h>
@@ -19,10 +18,14 @@
#include <sys/types.h>
#include <unistd.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
#include <libcamera/camera.h>
#include <libcamera/camera_manager.h>
+#include <libcamera/property_ids.h>
-#include "log.h"
+#include "v4l2_camera_file.h"
using namespace libcamera;
@@ -39,17 +42,17 @@ void get_symbol(T &func, const char *name)
V4L2CompatManager::V4L2CompatManager()
: cm_(nullptr)
{
- get_symbol(fops_.openat, "openat");
+ get_symbol(fops_.openat, "openat64");
get_symbol(fops_.dup, "dup");
get_symbol(fops_.close, "close");
get_symbol(fops_.ioctl, "ioctl");
- get_symbol(fops_.mmap, "mmap");
+ get_symbol(fops_.mmap, "mmap64");
get_symbol(fops_.munmap, "munmap");
}
V4L2CompatManager::~V4L2CompatManager()
{
- devices_.clear();
+ files_.clear();
mmaps_.clear();
if (cm_) {
@@ -79,11 +82,10 @@ int V4L2CompatManager::start()
* For each Camera registered in the system, a V4L2CameraProxy gets
* created here to wrap a camera device.
*/
- unsigned int index = 0;
- for (auto &camera : cm_->cameras()) {
+ auto cameras = cm_->cameras();
+ for (auto [index, camera] : utils::enumerate(cameras)) {
V4L2CameraProxy *proxy = new V4L2CameraProxy(index, camera);
proxies_.emplace_back(proxy);
- ++index;
}
return 0;
@@ -95,13 +97,13 @@ V4L2CompatManager *V4L2CompatManager::instance()
return &instance;
}
-V4L2CameraProxy *V4L2CompatManager::getProxy(int fd)
+std::shared_ptr<V4L2CameraFile> V4L2CompatManager::cameraFile(int fd)
{
- auto device = devices_.find(fd);
- if (device == devices_.end())
+ auto file = files_.find(fd);
+ if (file == files_.end())
return nullptr;
- return device->second;
+ return file->second;
}
int V4L2CompatManager::getCameraIndex(int fd)
@@ -111,15 +113,35 @@ int V4L2CompatManager::getCameraIndex(int fd)
if (ret < 0)
return -1;
- std::shared_ptr<Camera> target = cm_->get(statbuf.st_rdev);
- if (!target)
- return -1;
+ const dev_t devnum = statbuf.st_rdev;
- unsigned int index = 0;
- for (auto &camera : cm_->cameras()) {
- if (camera == target)
- return index;
- ++index;
+ /*
+ * Iterate each known camera and identify if it reports this nodes
+ * device number in its list of SystemDevices.
+ */
+ auto cameras = cm_->cameras();
+ for (auto [index, camera] : utils::enumerate(cameras)) {
+ Span<const int64_t> devices = camera->properties()
+ .get(properties::SystemDevices)
+ .value_or(Span<int64_t>{});
+
+ /*
+ * While there may be multiple cameras that could reference the
+ * same device node, we take a first match as a best effort for
+ * now.
+ *
+ * \todo Each camera can be accessed through any of the video
+ * device nodes that it uses. This may confuse applications.
+ * Consider reworking the V4L2 adaptation layer to instead
+ * expose each Camera instance through a single video device
+ * node (with a consistent and stable mapping). The other
+ * device nodes could possibly be hidden from the application
+ * by intercepting additional calls to the C library.
+ */
+ for (const int64_t dev : devices) {
+ if (dev == static_cast<int64_t>(devnum))
+ return index;
+ }
}
return -1;
@@ -142,27 +164,24 @@ int V4L2CompatManager::openat(int dirfd, const char *path, int oflag, mode_t mod
ret = getCameraIndex(fd);
if (ret < 0) {
- LOG(V4L2Compat, Info) << "No camera found for " << path;
+ LOG(V4L2Compat, Debug) << "No camera found for " << path;
return fd;
}
fops_.close(fd);
- unsigned int camera_index = static_cast<unsigned int>(ret);
-
- V4L2CameraProxy *proxy = proxies_[camera_index].get();
- ret = proxy->open(oflag & O_NONBLOCK);
- if (ret < 0)
- return ret;
-
- int efd = eventfd(0, oflag & (O_CLOEXEC | O_NONBLOCK));
- if (efd < 0) {
- proxy->close();
+ int efd = eventfd(0, EFD_SEMAPHORE |
+ ((oflag & O_CLOEXEC) ? EFD_CLOEXEC : 0) |
+ ((oflag & O_NONBLOCK) ? EFD_NONBLOCK : 0));
+ if (efd < 0)
return efd;
- }
- devices_.emplace(efd, proxy);
+ V4L2CameraProxy *proxy = proxies_[ret].get();
+ files_.emplace(efd, std::make_shared<V4L2CameraFile>(dirfd, path, efd,
+ oflag & O_NONBLOCK,
+ proxy));
+ LOG(V4L2Compat, Debug) << "Opened " << path << " -> fd " << efd;
return efd;
}
@@ -172,40 +191,36 @@ int V4L2CompatManager::dup(int oldfd)
if (newfd < 0)
return newfd;
- auto device = devices_.find(oldfd);
- if (device != devices_.end()) {
- V4L2CameraProxy *proxy = device->second;
- devices_[newfd] = proxy;
- proxy->dup();
- }
+ auto file = files_.find(oldfd);
+ if (file != files_.end())
+ files_[newfd] = file->second;
return newfd;
}
int V4L2CompatManager::close(int fd)
{
- V4L2CameraProxy *proxy = getProxy(fd);
- if (proxy) {
- proxy->close();
- devices_.erase(fd);
- return 0;
- }
+ auto file = files_.find(fd);
+ if (file != files_.end())
+ files_.erase(file);
+ /* We still need to close the eventfd. */
return fops_.close(fd);
}
void *V4L2CompatManager::mmap(void *addr, size_t length, int prot, int flags,
- int fd, off_t offset)
+ int fd, off64_t offset)
{
- V4L2CameraProxy *proxy = getProxy(fd);
- if (!proxy)
+ std::shared_ptr<V4L2CameraFile> file = cameraFile(fd);
+ if (!file)
return fops_.mmap(addr, length, prot, flags, fd, offset);
- void *map = proxy->mmap(addr, length, prot, flags, offset);
+ void *map = file->proxy()->mmap(file.get(), addr, length, prot, flags,
+ offset);
if (map == MAP_FAILED)
return map;
- mmaps_[map] = proxy;
+ mmaps_[map] = file;
return map;
}
@@ -215,9 +230,9 @@ int V4L2CompatManager::munmap(void *addr, size_t length)
if (device == mmaps_.end())
return fops_.munmap(addr, length);
- V4L2CameraProxy *proxy = device->second;
+ V4L2CameraFile *file = device->second.get();
- int ret = proxy->munmap(addr, length);
+ int ret = file->proxy()->munmap(file, addr, length);
if (ret < 0)
return ret;
@@ -228,9 +243,9 @@ int V4L2CompatManager::munmap(void *addr, size_t length)
int V4L2CompatManager::ioctl(int fd, unsigned long request, void *arg)
{
- V4L2CameraProxy *proxy = getProxy(fd);
- if (!proxy)
+ std::shared_ptr<V4L2CameraFile> file = cameraFile(fd);
+ if (!file)
return fops_.ioctl(fd, request, arg);
- return proxy->ioctl(request, arg);
+ return file->proxy()->ioctl(file.get(), request, arg);
}
diff --git a/src/v4l2/v4l2_compat_manager.h b/src/v4l2/v4l2_compat_manager.h
index 872c7c3b..f7c6f122 100644
--- a/src/v4l2/v4l2_compat_manager.h
+++ b/src/v4l2/v4l2_compat_manager.h
@@ -1,12 +1,11 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_compat_manager.h - V4L2 compatibility manager
+ * V4L2 compatibility manager
*/
-#ifndef __V4L2_COMPAT_MANAGER_H__
-#define __V4L2_COMPAT_MANAGER_H__
+#pragma once
#include <fcntl.h>
#include <map>
@@ -18,8 +17,6 @@
#include "v4l2_camera_proxy.h"
-using namespace libcamera;
-
class V4L2CompatManager
{
public:
@@ -30,7 +27,7 @@ public:
using close_func_t = int (*)(int fd);
using ioctl_func_t = int (*)(int fd, unsigned long request, ...);
using mmap_func_t = void *(*)(void *addr, size_t length, int prot,
- int flags, int fd, off_t offset);
+ int flags, int fd, off64_t offset);
using munmap_func_t = int (*)(void *addr, size_t length);
openat_func_t openat;
@@ -43,7 +40,6 @@ public:
static V4L2CompatManager *instance();
- V4L2CameraProxy *getProxy(int fd);
const FileOperations &fops() const { return fops_; }
int openat(int dirfd, const char *path, int oflag, mode_t mode);
@@ -51,7 +47,7 @@ public:
int dup(int oldfd);
int close(int fd);
void *mmap(void *addr, size_t length, int prot, int flags,
- int fd, off_t offset);
+ int fd, off64_t offset);
int munmap(void *addr, size_t length);
int ioctl(int fd, unsigned long request, void *arg);
@@ -61,14 +57,13 @@ private:
int start();
int getCameraIndex(int fd);
+ std::shared_ptr<V4L2CameraFile> cameraFile(int fd);
FileOperations fops_;
- CameraManager *cm_;
+ libcamera::CameraManager *cm_;
std::vector<std::unique_ptr<V4L2CameraProxy>> proxies_;
- std::map<int, V4L2CameraProxy *> devices_;
- std::map<void *, V4L2CameraProxy *> mmaps_;
+ std::map<int, std::shared_ptr<V4L2CameraFile>> files_;
+ std::map<void *, std::shared_ptr<V4L2CameraFile>> mmaps_;
};
-
-#endif /* __V4L2_COMPAT_MANAGER_H__ */