From 0455bbbf518cc834bd72ac65e13c9ed40bf21f3c Mon Sep 17 00:00:00 2001 From: Naushir Patuck Date: Wed, 8 Nov 2023 13:44:20 +0000 Subject: build: controls: Rework how controls and properties are generated Add support for using separate YAML files for controls and properties generation. The mapping of vendor/pipeline handler to control file is done through the controls_map variable in include/libcamera/meson.build. This simplifies management of vendor control definitions and avoids possible merge conflicts when changing the control_ids.yaml file for core and draft controls. With this change, libcamera and draft controls and properties files are designated the 'libcamera' vendor tag. In this change, we also rename control_ids.yaml -> control_ids_core.yaml and property_ids.yaml -> property_ids_core.yaml to designate these as core libcamera controls. Signed-off-by: Naushir Patuck Reviewed-by: Jacopo Mondi Reviewed-by: Kieran Bingham --- Documentation/guides/pipeline-handler.rst | 8 +- include/libcamera/meson.build | 50 +- meson.build | 2 + src/libcamera/control_ids.yaml | 1100 ----------------------------- src/libcamera/control_ids_core.yaml | 1100 +++++++++++++++++++++++++++++ src/libcamera/meson.build | 21 +- src/libcamera/property_ids.yaml | 737 ------------------- src/libcamera/property_ids_core.yaml | 737 +++++++++++++++++++ src/py/libcamera/gen-py-controls.py | 10 +- src/py/libcamera/meson.build | 12 +- utils/gen-controls.py | 14 +- 11 files changed, 1924 insertions(+), 1867 deletions(-) delete mode 100644 src/libcamera/control_ids.yaml create mode 100644 src/libcamera/control_ids_core.yaml delete mode 100644 src/libcamera/property_ids.yaml create mode 100644 src/libcamera/property_ids_core.yaml diff --git a/Documentation/guides/pipeline-handler.rst b/Documentation/guides/pipeline-handler.rst index 10b9c75c..66d428a1 100644 --- a/Documentation/guides/pipeline-handler.rst +++ b/Documentation/guides/pipeline-handler.rst @@ -587,12 +587,12 @@ immutable properties of the ``Camera`` device. The libcamera controls and properties are defined in YAML form which is processed to automatically generate documentation and interfaces. Controls are -defined by the src/libcamera/`control_ids.yaml`_ file and camera properties -are defined by src/libcamera/`properties_ids.yaml`_. +defined by the src/libcamera/`control_ids_core.yaml`_ file and camera properties +are defined by src/libcamera/`properties_ids_core.yaml`_. .. _controls framework: https://libcamera.org/api-html/controls_8h.html -.. _control_ids.yaml: https://libcamera.org/api-html/control__ids_8h.html -.. _properties_ids.yaml: https://libcamera.org/api-html/property__ids_8h.html +.. _control_ids_core.yaml: https://libcamera.org/api-html/control__ids_8h.html +.. _properties_ids_core.yaml: https://libcamera.org/api-html/property__ids_8h.html Pipeline handlers can optionally register the list of controls an application can set as well as a list of immutable camera properties. Being both diff --git a/include/libcamera/meson.build b/include/libcamera/meson.build index 5fb772e6..c46a4e70 100644 --- a/include/libcamera/meson.build +++ b/include/libcamera/meson.build @@ -32,22 +32,54 @@ install_headers(libcamera_public_headers, libcamera_headers_install_dir = get_option('includedir') / libcamera_include_dir -# control_ids.h and property_ids.h and associated modes -control_source_files = { - 'control_ids': 'controls', - 'property_ids': 'properties', +controls_map = { + 'controls': { + 'core': 'control_ids_core.yaml', + }, + + 'properties': { + 'core': 'property_ids_core.yaml', + } } control_headers = [] +controls_files = [] +properties_files = [] + +foreach mode, entry : controls_map + files_list = [] + input_files = [] + foreach vendor, header : entry + if vendor != 'core' and vendor != 'draft' + if vendor not in pipelines + continue + endif + endif + + if header in files_list + continue + endif + + files_list += header + input_files += files('../../src/libcamera/' + header) + endforeach + + outfile = '' + if mode == 'controls' + outfile = 'control_ids.h' + controls_files += files_list + else + outfile = 'property_ids.h' + properties_files += files_list + endif -foreach header, mode : control_source_files - input_files = files('../../src/libcamera/' + header +'.yaml') - template_file = files(header + '.h.in') + template_file = files(outfile + '.in') control_headers += custom_target(header + '_h', input : input_files, - output : header + '.h', + output : outfile, command : [gen_controls, '-o', '@OUTPUT@', - '--mode', mode, '-t', template_file, '@INPUT@'], + '--mode', mode, '-t', template_file, + '@INPUT@'], install : true, install_dir : libcamera_headers_install_dir) endforeach diff --git a/meson.build b/meson.build index e9a1c7e3..ee57cb78 100644 --- a/meson.build +++ b/meson.build @@ -267,6 +267,8 @@ py_mod.find_installation('python3', modules : py_modules) summary({ 'Enabled pipelines': pipelines, 'Enabled IPA modules': enabled_ipa_names, + 'Controls files': controls_files, + 'Properties files': properties_files, 'Hotplug support': libudev.found(), 'Tracing support': tracing_enabled, 'Android support': android_enabled, diff --git a/src/libcamera/control_ids.yaml b/src/libcamera/control_ids.yaml deleted file mode 100644 index ff74ce1d..00000000 --- a/src/libcamera/control_ids.yaml +++ /dev/null @@ -1,1100 +0,0 @@ -# SPDX-License-Identifier: LGPL-2.1-or-later -# -# Copyright (C) 2019, Google Inc. -# -%YAML 1.1 ---- -# Unless otherwise stated, all controls are bi-directional, i.e. they can be -# set through Request::controls() and returned out through Request::metadata(). -vendor: libcamera -controls: - - AeEnable: - type: bool - description: | - Enable or disable the AE. - - \sa ExposureTime AnalogueGain - - - AeLocked: - type: bool - description: | - Report the lock status of a running AE algorithm. - - If the AE algorithm is locked the value shall be set to true, if it's - converging it shall be set to false. If the AE algorithm is not - running the control shall not be present in the metadata control list. - - \sa AeEnable - - # AeMeteringMode needs further attention: - # - Auto-generate max enum value. - # - Better handling of custom types. - - AeMeteringMode: - type: int32_t - description: | - Specify a metering mode for the AE algorithm to use. The metering - modes determine which parts of the image are used to determine the - scene brightness. Metering modes may be platform specific and not - all metering modes may be supported. - enum: - - name: MeteringCentreWeighted - value: 0 - description: Centre-weighted metering mode. - - name: MeteringSpot - value: 1 - description: Spot metering mode. - - name: MeteringMatrix - value: 2 - description: Matrix metering mode. - - name: MeteringCustom - value: 3 - description: Custom metering mode. - - # AeConstraintMode needs further attention: - # - Auto-generate max enum value. - # - Better handling of custom types. - - AeConstraintMode: - type: int32_t - description: | - Specify a constraint mode for the AE algorithm to use. These determine - how the measured scene brightness is adjusted to reach the desired - target exposure. Constraint modes may be platform specific, and not - all constraint modes may be supported. - enum: - - name: ConstraintNormal - value: 0 - description: Default constraint mode. - This mode aims to balance the exposure of different parts of the - image so as to reach a reasonable average level. However, highlights - in the image may appear over-exposed and lowlights may appear - under-exposed. - - name: ConstraintHighlight - value: 1 - description: Highlight constraint mode. - This mode adjusts the exposure levels in order to try and avoid - over-exposing the brightest parts (highlights) of an image. - Other non-highlight parts of the image may appear under-exposed. - - name: ConstraintShadows - value: 2 - description: Shadows constraint mode. - This mode adjusts the exposure levels in order to try and avoid - under-exposing the dark parts (shadows) of an image. Other normally - exposed parts of the image may appear over-exposed. - - name: ConstraintCustom - value: 3 - description: Custom constraint mode. - - # AeExposureMode needs further attention: - # - Auto-generate max enum value. - # - Better handling of custom types. - - AeExposureMode: - type: int32_t - description: | - Specify an exposure mode for the AE algorithm to use. These specify - how the desired total exposure is divided between the shutter time - and the sensor's analogue gain. The exposure modes are platform - specific, and not all exposure modes may be supported. - enum: - - name: ExposureNormal - value: 0 - description: Default exposure mode. - - name: ExposureShort - value: 1 - description: Exposure mode allowing only short exposure times. - - name: ExposureLong - value: 2 - description: Exposure mode allowing long exposure times. - - name: ExposureCustom - value: 3 - description: Custom exposure mode. - - - ExposureValue: - type: float - description: | - Specify an Exposure Value (EV) parameter. The EV parameter will only be - applied if the AE algorithm is currently enabled. - - By convention EV adjusts the exposure as log2. For example - EV = [-2, -1, 0.5, 0, 0.5, 1, 2] results in an exposure adjustment - of [1/4x, 1/2x, 1/sqrt(2)x, 1x, sqrt(2)x, 2x, 4x]. - - \sa AeEnable - - - ExposureTime: - type: int32_t - description: | - Exposure time (shutter speed) for the frame applied in the sensor - device. This value is specified in micro-seconds. - - Setting this value means that it is now fixed and the AE algorithm may - not change it. Setting it back to zero returns it to the control of the - AE algorithm. - - \sa AnalogueGain AeEnable - - \todo Document the interactions between AeEnable and setting a fixed - value for this control. Consider interactions with other AE features, - such as aperture and aperture/shutter priority mode, and decide if - control of which features should be automatically adjusted shouldn't - better be handled through a separate AE mode control. - - - AnalogueGain: - type: float - description: | - Analogue gain value applied in the sensor device. - The value of the control specifies the gain multiplier applied to all - colour channels. This value cannot be lower than 1.0. - - Setting this value means that it is now fixed and the AE algorithm may - not change it. Setting it back to zero returns it to the control of the - AE algorithm. - - \sa ExposureTime AeEnable - - \todo Document the interactions between AeEnable and setting a fixed - value for this control. Consider interactions with other AE features, - such as aperture and aperture/shutter priority mode, and decide if - control of which features should be automatically adjusted shouldn't - better be handled through a separate AE mode control. - - - AeFlickerMode: - type: int32_t - description: | - Set the flicker mode, which determines whether, and how, the AGC/AEC - algorithm attempts to hide flicker effects caused by the duty cycle of - artificial lighting. - - Although implementation dependent, many algorithms for "flicker - avoidance" work by restricting this exposure time to integer multiples - of the cycle period, wherever possible. - - Implementations may not support all of the flicker modes listed below. - - By default the system will start in FlickerAuto mode if this is - supported, otherwise the flicker mode will be set to FlickerOff. - - enum: - - name: FlickerOff - value: 0 - description: No flicker avoidance is performed. - - name: FlickerManual - value: 1 - description: Manual flicker avoidance. - Suppress flicker effects caused by lighting running with a period - specified by the AeFlickerPeriod control. - \sa AeFlickerPeriod - - name: FlickerAuto - value: 2 - description: Automatic flicker period detection and avoidance. - The system will automatically determine the most likely value of - flicker period, and avoid flicker of this frequency. Once flicker - is being corrected, it is implementation dependent whether the - system is still able to detect a change in the flicker period. - \sa AeFlickerDetected - - - AeFlickerPeriod: - type: int32_t - description: Manual flicker period in microseconds. - This value sets the current flicker period to avoid. It is used when - AeFlickerMode is set to FlickerManual. - - To cancel 50Hz mains flicker, this should be set to 10000 (corresponding - to 100Hz), or 8333 (120Hz) for 60Hz mains. - - Setting the mode to FlickerManual when no AeFlickerPeriod has ever been - set means that no flicker cancellation occurs (until the value of this - control is updated). - - Switching to modes other than FlickerManual has no effect on the - value of the AeFlickerPeriod control. - - \sa AeFlickerMode - - - AeFlickerDetected: - type: int32_t - description: Flicker period detected in microseconds. - The value reported here indicates the currently detected flicker - period, or zero if no flicker at all is detected. - - When AeFlickerMode is set to FlickerAuto, there may be a period during - which the value reported here remains zero. Once a non-zero value is - reported, then this is the flicker period that has been detected and is - now being cancelled. - - In the case of 50Hz mains flicker, the value would be 10000 - (corresponding to 100Hz), or 8333 (120Hz) for 60Hz mains flicker. - - It is implementation dependent whether the system can continue to detect - flicker of different periods when another frequency is already being - cancelled. - - \sa AeFlickerMode - - - Brightness: - type: float - description: | - Specify a fixed brightness parameter. Positive values (up to 1.0) - produce brighter images; negative values (up to -1.0) produce darker - images and 0.0 leaves pixels unchanged. - - - Contrast: - type: float - description: | - Specify a fixed contrast parameter. Normal contrast is given by the - value 1.0; larger values produce images with more contrast. - - - Lux: - type: float - description: | - Report an estimate of the current illuminance level in lux. The Lux - control can only be returned in metadata. - - - AwbEnable: - type: bool - description: | - Enable or disable the AWB. - - \sa ColourGains - - # AwbMode needs further attention: - # - Auto-generate max enum value. - # - Better handling of custom types. - - AwbMode: - type: int32_t - description: | - Specify the range of illuminants to use for the AWB algorithm. The modes - supported are platform specific, and not all modes may be supported. - enum: - - name: AwbAuto - value: 0 - description: Search over the whole colour temperature range. - - name: AwbIncandescent - value: 1 - description: Incandescent AWB lamp mode. - - name: AwbTungsten - value: 2 - description: Tungsten AWB lamp mode. - - name: AwbFluorescent - value: 3 - description: Fluorescent AWB lamp mode. - - name: AwbIndoor - value: 4 - description: Indoor AWB lighting mode. - - name: AwbDaylight - value: 5 - description: Daylight AWB lighting mode. - - name: AwbCloudy - value: 6 - description: Cloudy AWB lighting mode. - - name: AwbCustom - value: 7 - description: Custom AWB mode. - - - AwbLocked: - type: bool - description: | - Report the lock status of a running AWB algorithm. - - If the AWB algorithm is locked the value shall be set to true, if it's - converging it shall be set to false. If the AWB algorithm is not - running the control shall not be present in the metadata control list. - - \sa AwbEnable - - - ColourGains: - type: float - description: | - Pair of gain values for the Red and Blue colour channels, in that - order. ColourGains can only be applied in a Request when the AWB is - disabled. - - \sa AwbEnable - size: [2] - - - ColourTemperature: - type: int32_t - description: Report the current estimate of the colour temperature, in - kelvin, for this frame. The ColourTemperature control can only be - returned in metadata. - - - Saturation: - type: float - description: | - Specify a fixed saturation parameter. Normal saturation is given by - the value 1.0; larger values produce more saturated colours; 0.0 - produces a greyscale image. - - - SensorBlackLevels: - type: int32_t - description: | - Reports the sensor black levels used for processing a frame, in the - order R, Gr, Gb, B. These values are returned as numbers out of a 16-bit - pixel range (as if pixels ranged from 0 to 65535). The SensorBlackLevels - control can only be returned in metadata. - size: [4] - - - Sharpness: - type: float - description: | - A value of 0.0 means no sharpening. The minimum value means - minimal sharpening, and shall be 0.0 unless the camera can't - disable sharpening completely. The default value shall give a - "reasonable" level of sharpening, suitable for most use cases. - The maximum value may apply extremely high levels of sharpening, - higher than anyone could reasonably want. Negative values are - not allowed. Note also that sharpening is not applied to raw - streams. - - - FocusFoM: - type: int32_t - description: | - Reports a Figure of Merit (FoM) to indicate how in-focus the frame is. - A larger FocusFoM value indicates a more in-focus frame. This singular - value may be based on a combination of statistics gathered from - multiple focus regions within an image. The number of focus regions and - method of combination is platform dependent. In this respect, it is not - necessarily aimed at providing a way to implement a focus algorithm by - the application, rather an indication of how in-focus a frame is. - - - ColourCorrectionMatrix: - type: float - description: | - The 3x3 matrix that converts camera RGB to sRGB within the - imaging pipeline. This should describe the matrix that is used - after pixels have been white-balanced, but before any gamma - transformation. The 3x3 matrix is stored in conventional reading - order in an array of 9 floating point values. - - size: [3,3] - - - ScalerCrop: - type: Rectangle - description: | - Sets the image portion that will be scaled to form the whole of - the final output image. The (x,y) location of this rectangle is - relative to the PixelArrayActiveAreas that is being used. The units - remain native sensor pixels, even if the sensor is being used in - a binning or skipping mode. - - This control is only present when the pipeline supports scaling. Its - maximum valid value is given by the properties::ScalerCropMaximum - property, and the two can be used to implement digital zoom. - - - DigitalGain: - type: float - description: | - Digital gain value applied during the processing steps applied - to the image as captured from the sensor. - - The global digital gain factor is applied to all the colour channels - of the RAW image. Different pipeline models are free to - specify how the global gain factor applies to each separate - channel. - - If an imaging pipeline applies digital gain in distinct - processing steps, this value indicates their total sum. - Pipelines are free to decide how to adjust each processing - step to respect the received gain factor and shall report - their total value in the request metadata. - - - FrameDuration: - type: int64_t - description: | - The instantaneous frame duration from start of frame exposure to start - of next exposure, expressed in microseconds. This control is meant to - be returned in metadata. - - - FrameDurationLimits: - type: int64_t - description: | - The minimum and maximum (in that order) frame duration, expressed in - microseconds. - - When provided by applications, the control specifies the sensor frame - duration interval the pipeline has to use. This limits the largest - exposure time the sensor can use. For example, if a maximum frame - duration of 33ms is requested (corresponding to 30 frames per second), - the sensor will not be able to raise the exposure time above 33ms. - A fixed frame duration is achieved by setting the minimum and maximum - values to be the same. Setting both values to 0 reverts to using the - camera defaults. - - The maximum frame duration provides the absolute limit to the shutter - speed computed by the AE algorithm and it overrides any exposure mode - setting specified with controls::AeExposureMode. Similarly, when a - manual exposure time is set through controls::ExposureTime, it also - gets clipped to the limits set by this control. When reported in - metadata, the control expresses the minimum and maximum frame - durations used after being clipped to the sensor provided frame - duration limits. - - \sa AeExposureMode - \sa ExposureTime - - \todo Define how to calculate the capture frame rate by - defining controls to report additional delays introduced by - the capture pipeline or post-processing stages (ie JPEG - conversion, frame scaling). - - \todo Provide an explicit definition of default control values, for - this and all other controls. - - size: [2] - - - SensorTemperature: - type: float - description: | - Temperature measure from the camera sensor in Celsius. This is typically - obtained by a thermal sensor present on-die or in the camera module. The - range of reported temperatures is device dependent. - - The SensorTemperature control will only be returned in metadata if a - themal sensor is present. - - - SensorTimestamp: - type: int64_t - description: | - The time when the first row of the image sensor active array is exposed. - - The timestamp, expressed in nanoseconds, represents a monotonically - increasing counter since the system boot time, as defined by the - Linux-specific CLOCK_BOOTTIME clock id. - - The SensorTimestamp control can only be returned in metadata. - - \todo Define how the sensor timestamp has to be used in the reprocessing - use case. - - - AfMode: - type: int32_t - description: | - Control to set the mode of the AF (autofocus) algorithm. - - An implementation may choose not to implement all the modes. - - enum: - - name: AfModeManual - value: 0 - description: | - The AF algorithm is in manual mode. In this mode it will never - perform any action nor move the lens of its own accord, but an - application can specify the desired lens position using the - LensPosition control. - - In this mode the AfState will always report AfStateIdle. - - If the camera is started in AfModeManual, it will move the focus - lens to the position specified by the LensPosition control. - - This mode is the recommended default value for the AfMode control. - External cameras (as reported by the Location property set to - CameraLocationExternal) may use a different default value. - - name: AfModeAuto - value: 1 - description: | - The AF algorithm is in auto mode. This means that the algorithm - will never move the lens or change state unless the AfTrigger - control is used. The AfTrigger control can be used to initiate a - focus scan, the results of which will be reported by AfState. - - If the autofocus algorithm is moved from AfModeAuto to another - mode while a scan is in progress, the scan is cancelled - immediately, without waiting for the scan to finish. - - When first entering this mode the AfState will report - AfStateIdle. When a trigger control is sent, AfState will - report AfStateScanning for a period before spontaneously - changing to AfStateFocused or AfStateFailed, depending on - the outcome of the scan. It will remain in this state until - another scan is initiated by the AfTrigger control. If a scan is - cancelled (without changing to another mode), AfState will return - to AfStateIdle. - - name: AfModeContinuous - value: 2 - description: | - The AF algorithm is in continuous mode. This means that the lens can - re-start a scan spontaneously at any moment, without any user - intervention. The AfState still reports whether the algorithm is - currently scanning or not, though the application has no ability to - initiate or cancel scans, nor to move the lens for itself. - - However, applications can pause the AF algorithm from continuously - scanning by using the AfPause control. This allows video or still - images to be captured whilst guaranteeing that the focus is fixed. - - When set to AfModeContinuous, the system will immediately initiate a - scan so AfState will report AfStateScanning, and will settle on one - of AfStateFocused or AfStateFailed, depending on the scan result. - - - AfRange: - type: int32_t - description: | - Control to set the range of focus distances that is scanned. An - implementation may choose not to implement all the options here. - enum: - - name: AfRangeNormal - value: 0 - description: | - A wide range of focus distances is scanned, all the way from - infinity down to close distances, though depending on the - implementation, possibly not including the very closest macro - positions. - - name: AfRangeMacro - value: 1 - description: Only close distances are scanned. - - name: AfRangeFull - value: 2 - description: | - The full range of focus distances is scanned just as with - AfRangeNormal but this time including the very closest macro - positions. - - - AfSpeed: - type: int32_t - description: | - Control that determines whether the AF algorithm is to move the lens - as quickly as possible or more steadily. For example, during video - recording it may be desirable not to move the lens too abruptly, but - when in a preview mode (waiting for a still capture) it may be - helpful to move the lens as quickly as is reasonably possible. - enum: - - name: AfSpeedNormal - value: 0 - description: Move the lens at its usual speed. - - name: AfSpeedFast - value: 1 - description: Move the lens more quickly. - - - AfMetering: - type: int32_t - description: | - Instruct the AF algorithm how it should decide which parts of the image - should be used to measure focus. - enum: - - name: AfMeteringAuto - value: 0 - description: The AF algorithm should decide for itself where it will - measure focus. - - name: AfMeteringWindows - value: 1 - description: The AF algorithm should use the rectangles defined by - the AfWindows control to measure focus. If no windows are specified - the behaviour is platform dependent. - - - AfWindows: - type: Rectangle - description: | - Sets the focus windows used by the AF algorithm when AfMetering is set - to AfMeteringWindows. The units used are pixels within the rectangle - returned by the ScalerCropMaximum property. - - In order to be activated, a rectangle must be programmed with non-zero - width and height. Internally, these rectangles are intersected with the - ScalerCropMaximum rectangle. If the window becomes empty after this - operation, then the window is ignored. If all the windows end up being - ignored, then the behaviour is platform dependent. - - On platforms that support the ScalerCrop control (for implementing - digital zoom, for example), no automatic recalculation or adjustment of - AF windows is performed internally if the ScalerCrop is changed. If any - window lies outside the output image after the scaler crop has been - applied, it is up to the application to recalculate them. - - The details of how the windows are used are platform dependent. We note - that when there is more than one AF window, a typical implementation - might find the optimal focus position for each one and finally select - the window where the focal distance for the objects shown in that part - of the image are closest to the camera. - - size: [n] - - - AfTrigger: - type: int32_t - description: | - This control starts an autofocus scan when AfMode is set to AfModeAuto, - and can also be used to terminate a scan early. - - It is ignored if AfMode is set to AfModeManual or AfModeContinuous. - - enum: - - name: AfTriggerStart - value: 0 - description: Start an AF scan. Ignored if a scan is in progress. - - name: AfTriggerCancel - value: 1 - description: Cancel an AF scan. This does not cause the lens to move - anywhere else. Ignored if no scan is in progress. - - - AfPause: - type: int32_t - description: | - This control has no effect except when in continuous autofocus mode - (AfModeContinuous). It can be used to pause any lens movements while - (for example) images are captured. The algorithm remains inactive - until it is instructed to resume. - - enum: - - name: AfPauseImmediate - value: 0 - description: | - Pause the continuous autofocus algorithm immediately, whether or not - any kind of scan is underway. AfPauseState will subsequently report - AfPauseStatePaused. AfState may report any of AfStateScanning, - AfStateFocused or AfStateFailed, depending on the algorithm's state - when it received this control. - - name: AfPauseDeferred - value: 1 - description: | - This is similar to AfPauseImmediate, and if the AfState is currently - reporting AfStateFocused or AfStateFailed it will remain in that - state and AfPauseState will report AfPauseStatePaused. - - However, if the algorithm is scanning (AfStateScanning), - AfPauseState will report AfPauseStatePausing until the scan is - finished, at which point AfState will report one of AfStateFocused - or AfStateFailed, and AfPauseState will change to - AfPauseStatePaused. - - - name: AfPauseResume - value: 2 - description: | - Resume continuous autofocus operation. The algorithm starts again - from exactly where it left off, and AfPauseState will report - AfPauseStateRunning. - - - LensPosition: - type: float - description: | - Acts as a control to instruct the lens to move to a particular position - and also reports back the position of the lens for each frame. - - The LensPosition control is ignored unless the AfMode is set to - AfModeManual, though the value is reported back unconditionally in all - modes. - - This value, which is generally a non-integer, is the reciprocal of the - focal distance in metres, also known as dioptres. That is, to set a - focal distance D, the lens position LP is given by - - \f$LP = \frac{1\mathrm{m}}{D}\f$ - - For example: - - 0 moves the lens to infinity. - 0.5 moves the lens to focus on objects 2m away. - 2 moves the lens to focus on objects 50cm away. - And larger values will focus the lens closer. - - The default value of the control should indicate a good general position - for the lens, often corresponding to the hyperfocal distance (the - closest position for which objects at infinity are still acceptably - sharp). The minimum will often be zero (meaning infinity), and the - maximum value defines the closest focus position. - - \todo Define a property to report the Hyperfocal distance of calibrated - lenses. - - - AfState: - type: int32_t - description: | - Reports the current state of the AF algorithm in conjunction with the - reported AfMode value and (in continuous AF mode) the AfPauseState - value. The possible state changes are described below, though we note - the following state transitions that occur when the AfMode is changed. - - If the AfMode is set to AfModeManual, then the AfState will always - report AfStateIdle (even if the lens is subsequently moved). Changing to - the AfModeManual state does not initiate any lens movement. - - If the AfMode is set to AfModeAuto then the AfState will report - AfStateIdle. However, if AfModeAuto and AfTriggerStart are sent together - then AfState will omit AfStateIdle and move straight to AfStateScanning - (and start a scan). - - If the AfMode is set to AfModeContinuous then the AfState will initially - report AfStateScanning. - - enum: - - name: AfStateIdle - value: 0 - description: | - The AF algorithm is in manual mode (AfModeManual) or in auto mode - (AfModeAuto) and a scan has not yet been triggered, or an - in-progress scan was cancelled. - - name: AfStateScanning - value: 1 - description: | - The AF algorithm is in auto mode (AfModeAuto), and a scan has been - started using the AfTrigger control. The scan can be cancelled by - sending AfTriggerCancel at which point the algorithm will either - move back to AfStateIdle or, if the scan actually completes before - the cancel request is processed, to one of AfStateFocused or - AfStateFailed. - - Alternatively the AF algorithm could be in continuous mode - (AfModeContinuous) at which point it may enter this state - spontaneously whenever it determines that a rescan is needed. - - name: AfStateFocused - value: 2 - description: | - The AF algorithm is in auto (AfModeAuto) or continuous - (AfModeContinuous) mode and a scan has completed with the result - that the algorithm believes the image is now in focus. - - name: AfStateFailed - value: 3 - description: | - The AF algorithm is in auto (AfModeAuto) or continuous - (AfModeContinuous) mode and a scan has completed with the result - that the algorithm did not find a good focus position. - - - AfPauseState: - type: int32_t - description: | - Only applicable in continuous (AfModeContinuous) mode, this reports - whether the algorithm is currently running, paused or pausing (that is, - will pause as soon as any in-progress scan completes). - - Any change to AfMode will cause AfPauseStateRunning to be reported. - - enum: - - name: AfPauseStateRunning - value: 0 - description: | - Continuous AF is running and the algorithm may restart a scan - spontaneously. - - name: AfPauseStatePausing - value: 1 - description: | - Continuous AF has been sent an AfPauseDeferred control, and will - pause as soon as any in-progress scan completes (and then report - AfPauseStatePaused). No new scans will be start spontaneously until - the AfPauseResume control is sent. - - name: AfPauseStatePaused - value: 2 - description: | - Continuous AF is paused. No further state changes or lens movements - will occur until the AfPauseResume control is sent. - - - HdrMode: - type: int32_t - description: | - Control to set the mode to be used for High Dynamic Range (HDR) - imaging. HDR techniques typically include multiple exposure, image - fusion and tone mapping techniques to improve the dynamic range of the - resulting images. - - When using an HDR mode, images are captured with different sets of AGC - settings called HDR channels. Channels indicate in particular the type - of exposure (short, medium or long) used to capture the raw image, - before fusion. Each HDR image is tagged with the corresponding channel - using the HdrChannel control. - - \sa HdrChannel - - enum: - - name: HdrModeOff - value: 0 - description: | - HDR is disabled. Metadata for this frame will not include the - HdrChannel control. - - name: HdrModeMultiExposureUnmerged - value: 1 - description: | - Multiple exposures will be generated in an alternating fashion. - However, they will not be merged together and will be returned to - the application as they are. Each image will be tagged with the - correct HDR channel, indicating what kind of exposure it is. The - tag should be the same as in the HdrModeMultiExposure case. - - The expectation is that an application using this mode would merge - the frames to create HDR images for itself if it requires them. - - name: HdrModeMultiExposure - value: 2 - description: | - Multiple exposures will be generated and merged to create HDR - images. Each image will be tagged with the HDR channel (long, medium - or short) that arrived and which caused this image to be output. - - Systems that use two channels for HDR will return images tagged - alternately as the short and long channel. Systems that use three - channels for HDR will cycle through the short, medium and long - channel before repeating. - - name: HdrModeSingleExposure - value: 3 - description: | - Multiple frames all at a single exposure will be used to create HDR - images. These images should be reported as all corresponding to the - HDR short channel. - - name: HdrModeNight - value: 4 - description: | - Multiple frames will be combined to produce "night mode" images. It - is up to the implementation exactly which HDR channels it uses, and - the images will all be tagged accordingly with the correct HDR - channel information. - - - HdrChannel: - type: int32_t - description: | - This value is reported back to the application so that it can discover - whether this capture corresponds to the short or long exposure image (or - any other image used by the HDR procedure). An application can monitor - the HDR channel to discover when the differently exposed images have - arrived. - - This metadata is only available when an HDR mode has been enabled. - - \sa HdrMode - - enum: - - name: HdrChannelNone - value: 0 - description: | - This image does not correspond to any of the captures used to create - an HDR image. - - name: HdrChannelShort - value: 1 - description: | - This is a short exposure image. - - name: HdrChannelMedium - value: 2 - description: | - This is a medium exposure image. - - name: HdrChannelLong - value: 3 - description: | - This is a long exposure image. - - # ---------------------------------------------------------------------------- - # Draft controls section - - - AePrecaptureTrigger: - type: int32_t - draft: true - description: | - Control for AE metering trigger. Currently identical to - ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER. - - Whether the camera device will trigger a precapture metering sequence - when it processes this request. - enum: - - name: AePrecaptureTriggerIdle - value: 0 - description: The trigger is idle. - - name: AePrecaptureTriggerStart - value: 1 - description: The pre-capture AE metering is started by the camera. - - name: AePrecaptureTriggerCancel - value: 2 - description: | - The camera will cancel any active or completed metering sequence. - The AE algorithm is reset to its initial state. - - - NoiseReductionMode: - type: int32_t - draft: true - description: | - Control to select the noise reduction algorithm mode. Currently - identical to ANDROID_NOISE_REDUCTION_MODE. - - Mode of operation for the noise reduction algorithm. - enum: - - name: NoiseReductionModeOff - value: 0 - description: No noise reduction is applied - - name: NoiseReductionModeFast - value: 1 - description: | - Noise reduction is applied without reducing the frame rate. - - name: NoiseReductionModeHighQuality - value: 2 - description: | - High quality noise reduction at the expense of frame rate. - - name: NoiseReductionModeMinimal - value: 3 - description: | - Minimal noise reduction is applied without reducing the frame rate. - - name: NoiseReductionModeZSL - value: 4 - description: | - Noise reduction is applied at different levels to different streams. - - - ColorCorrectionAberrationMode: - type: int32_t - draft: true - description: | - Control to select the color correction aberration mode. Currently - identical to ANDROID_COLOR_CORRECTION_ABERRATION_MODE. - - Mode of operation for the chromatic aberration correction algorithm. - enum: - - name: ColorCorrectionAberrationOff - value: 0 - description: No aberration correction is applied. - - name: ColorCorrectionAberrationFast - value: 1 - description: Aberration correction will not slow down the frame rate. - - name: ColorCorrectionAberrationHighQuality - value: 2 - description: | - High quality aberration correction which might reduce the frame - rate. - - - AeState: - type: int32_t - draft: true - description: | - Control to report the current AE algorithm state. Currently identical to - ANDROID_CONTROL_AE_STATE. - - Current state of the AE algorithm. - enum: - - name: AeStateInactive - value: 0 - description: The AE algorithm is inactive. - - name: AeStateSearching - value: 1 - description: The AE algorithm has not converged yet. - - name: AeStateConverged - value: 2 - description: The AE algorithm has converged. - - name: AeStateLocked - value: 3 - description: The AE algorithm is locked. - - name: AeStateFlashRequired - value: 4 - description: The AE algorithm would need a flash for good results - - name: AeStatePrecapture - value: 5 - description: | - The AE algorithm has started a pre-capture metering session. - \sa AePrecaptureTrigger - - - AwbState: - type: int32_t - draft: true - description: | - Control to report the current AWB algorithm state. Currently identical - to ANDROID_CONTROL_AWB_STATE. - - Current state of the AWB algorithm. - enum: - - name: AwbStateInactive - value: 0 - description: The AWB algorithm is inactive. - - name: AwbStateSearching - value: 1 - description: The AWB algorithm has not converged yet. - - name: AwbConverged - value: 2 - description: The AWB algorithm has converged. - - name: AwbLocked - value: 3 - description: The AWB algorithm is locked. - - - SensorRollingShutterSkew: - type: int64_t - draft: true - description: | - Control to report the time between the start of exposure of the first - row and the start of exposure of the last row. Currently identical to - ANDROID_SENSOR_ROLLING_SHUTTER_SKEW - - - LensShadingMapMode: - type: int32_t - draft: true - description: | - Control to report if the lens shading map is available. Currently - identical to ANDROID_STATISTICS_LENS_SHADING_MAP_MODE. - enum: - - name: LensShadingMapModeOff - value: 0 - description: No lens shading map mode is available. - - name: LensShadingMapModeOn - value: 1 - description: The lens shading map mode is available. - - - PipelineDepth: - type: int32_t - draft: true - description: | - Specifies the number of pipeline stages the frame went through from when - it was exposed to when the final completed result was available to the - framework. Always less than or equal to PipelineMaxDepth. Currently - identical to ANDROID_REQUEST_PIPELINE_DEPTH. - - The typical value for this control is 3 as a frame is first exposed, - captured and then processed in a single pass through the ISP. Any - additional processing step performed after the ISP pass (in example face - detection, additional format conversions etc) count as an additional - pipeline stage. - - - MaxLatency: - type: int32_t - draft: true - description: | - The maximum number of frames that can occur after a request (different - than the previous) has been submitted, and before the result's state - becomes synchronized. A value of -1 indicates unknown latency, and 0 - indicates per-frame control. Currently identical to - ANDROID_SYNC_MAX_LATENCY. - - - TestPatternMode: - type: int32_t - draft: true - description: | - Control to select the test pattern mode. Currently identical to - ANDROID_SENSOR_TEST_PATTERN_MODE. - enum: - - name: TestPatternModeOff - value: 0 - description: | - No test pattern mode is used. The camera device returns frames from - the image sensor. - - name: TestPatternModeSolidColor - value: 1 - description: | - Each pixel in [R, G_even, G_odd, B] is replaced by its respective - color channel provided in test pattern data. - \todo Add control for test pattern data. - - name: TestPatternModeColorBars - value: 2 - description: | - All pixel data is replaced with an 8-bar color pattern. The vertical - bars (left-to-right) are as follows; white, yellow, cyan, green, - magenta, red, blue and black. Each bar should take up 1/8 of the - sensor pixel array width. When this is not possible, the bar size - should be rounded down to the nearest integer and the pattern can - repeat on the right side. Each bar's height must always take up the - full sensor pixel array height. - - name: TestPatternModeColorBarsFadeToGray - value: 3 - description: | - The test pattern is similar to TestPatternModeColorBars, - except that each bar should start at its specified color at the top - and fade to gray at the bottom. Furthermore each bar is further - subdevided into a left and right half. The left half should have a - smooth gradient, and the right half should have a quantized - gradient. In particular, the right half's should consist of blocks - of the same color for 1/16th active sensor pixel array width. The - least significant bits in the quantized gradient should be copied - from the most significant bits of the smooth gradient. The height of - each bar should always be a multiple of 128. When this is not the - case, the pattern should repeat at the bottom of the image. - - name: TestPatternModePn9 - value: 4 - description: | - All pixel data is replaced by a pseudo-random sequence generated - from a PN9 512-bit sequence (typically implemented in hardware with - a linear feedback shift register). The generator should be reset at - the beginning of each frame, and thus each subsequent raw frame with - this test pattern should be exactly the same as the last. - - name: TestPatternModeCustom1 - value: 256 - description: | - The first custom test pattern. All custom patterns that are - available only on this camera device are at least this numeric - value. All of the custom test patterns will be static (that is the - raw image must not vary from frame to frame). - -... diff --git a/src/libcamera/control_ids_core.yaml b/src/libcamera/control_ids_core.yaml new file mode 100644 index 00000000..ff74ce1d --- /dev/null +++ b/src/libcamera/control_ids_core.yaml @@ -0,0 +1,1100 @@ +# SPDX-License-Identifier: LGPL-2.1-or-later +# +# Copyright (C) 2019, Google Inc. +# +%YAML 1.1 +--- +# Unless otherwise stated, all controls are bi-directional, i.e. they can be +# set through Request::controls() and returned out through Request::metadata(). +vendor: libcamera +controls: + - AeEnable: + type: bool + description: | + Enable or disable the AE. + + \sa ExposureTime AnalogueGain + + - AeLocked: + type: bool + description: | + Report the lock status of a running AE algorithm. + + If the AE algorithm is locked the value shall be set to true, if it's + converging it shall be set to false. If the AE algorithm is not + running the control shall not be present in the metadata control list. + + \sa AeEnable + + # AeMeteringMode needs further attention: + # - Auto-generate max enum value. + # - Better handling of custom types. + - AeMeteringMode: + type: int32_t + description: | + Specify a metering mode for the AE algorithm to use. The metering + modes determine which parts of the image are used to determine the + scene brightness. Metering modes may be platform specific and not + all metering modes may be supported. + enum: + - name: MeteringCentreWeighted + value: 0 + description: Centre-weighted metering mode. + - name: MeteringSpot + value: 1 + description: Spot metering mode. + - name: MeteringMatrix + value: 2 + description: Matrix metering mode. + - name: MeteringCustom + value: 3 + description: Custom metering mode. + + # AeConstraintMode needs further attention: + # - Auto-generate max enum value. + # - Better handling of custom types. + - AeConstraintMode: + type: int32_t + description: | + Specify a constraint mode for the AE algorithm to use. These determine + how the measured scene brightness is adjusted to reach the desired + target exposure. Constraint modes may be platform specific, and not + all constraint modes may be supported. + enum: + - name: ConstraintNormal + value: 0 + description: Default constraint mode. + This mode aims to balance the exposure of different parts of the + image so as to reach a reasonable average level. However, highlights + in the image may appear over-exposed and lowlights may appear + under-exposed. + - name: ConstraintHighlight + value: 1 + description: Highlight constraint mode. + This mode adjusts the exposure levels in order to try and avoid + over-exposing the brightest parts (highlights) of an image. + Other non-highlight parts of the image may appear under-exposed. + - name: ConstraintShadows + value: 2 + description: Shadows constraint mode. + This mode adjusts the exposure levels in order to try and avoid + under-exposing the dark parts (shadows) of an image. Other normally + exposed parts of the image may appear over-exposed. + - name: ConstraintCustom + value: 3 + description: Custom constraint mode. + + # AeExposureMode needs further attention: + # - Auto-generate max enum value. + # - Better handling of custom types. + - AeExposureMode: + type: int32_t + description: | + Specify an exposure mode for the AE algorithm to use. These specify + how the desired total exposure is divided between the shutter time + and the sensor's analogue gain. The exposure modes are platform + specific, and not all exposure modes may be supported. + enum: + - name: ExposureNormal + value: 0 + description: Default exposure mode. + - name: ExposureShort + value: 1 + description: Exposure mode allowing only short exposure times. + - name: ExposureLong + value: 2 + description: Exposure mode allowing long exposure times. + - name: ExposureCustom + value: 3 + description: Custom exposure mode. + + - ExposureValue: + type: float + description: | + Specify an Exposure Value (EV) parameter. The EV parameter will only be + applied if the AE algorithm is currently enabled. + + By convention EV adjusts the exposure as log2. For example + EV = [-2, -1, 0.5, 0, 0.5, 1, 2] results in an exposure adjustment + of [1/4x, 1/2x, 1/sqrt(2)x, 1x, sqrt(2)x, 2x, 4x]. + + \sa AeEnable + + - ExposureTime: + type: int32_t + description: | + Exposure time (shutter speed) for the frame applied in the sensor + device. This value is specified in micro-seconds. + + Setting this value means that it is now fixed and the AE algorithm may + not change it. Setting it back to zero returns it to the control of the + AE algorithm. + + \sa AnalogueGain AeEnable + + \todo Document the interactions between AeEnable and setting a fixed + value for this control. Consider interactions with other AE features, + such as aperture and aperture/shutter priority mode, and decide if + control of which features should be automatically adjusted shouldn't + better be handled through a separate AE mode control. + + - AnalogueGain: + type: float + description: | + Analogue gain value applied in the sensor device. + The value of the control specifies the gain multiplier applied to all + colour channels. This value cannot be lower than 1.0. + + Setting this value means that it is now fixed and the AE algorithm may + not change it. Setting it back to zero returns it to the control of the + AE algorithm. + + \sa ExposureTime AeEnable + + \todo Document the interactions between AeEnable and setting a fixed + value for this control. Consider interactions with other AE features, + such as aperture and aperture/shutter priority mode, and decide if + control of which features should be automatically adjusted shouldn't + better be handled through a separate AE mode control. + + - AeFlickerMode: + type: int32_t + description: | + Set the flicker mode, which determines whether, and how, the AGC/AEC + algorithm attempts to hide flicker effects caused by the duty cycle of + artificial lighting. + + Although implementation dependent, many algorithms for "flicker + avoidance" work by restricting this exposure time to integer multiples + of the cycle period, wherever possible. + + Implementations may not support all of the flicker modes listed below. + + By default the system will start in FlickerAuto mode if this is + supported, otherwise the flicker mode will be set to FlickerOff. + + enum: + - name: FlickerOff + value: 0 + description: No flicker avoidance is performed. + - name: FlickerManual + value: 1 + description: Manual flicker avoidance. + Suppress flicker effects caused by lighting running with a period + specified by the AeFlickerPeriod control. + \sa AeFlickerPeriod + - name: FlickerAuto + value: 2 + description: Automatic flicker period detection and avoidance. + The system will automatically determine the most likely value of + flicker period, and avoid flicker of this frequency. Once flicker + is being corrected, it is implementation dependent whether the + system is still able to detect a change in the flicker period. + \sa AeFlickerDetected + + - AeFlickerPeriod: + type: int32_t + description: Manual flicker period in microseconds. + This value sets the current flicker period to avoid. It is used when + AeFlickerMode is set to FlickerManual. + + To cancel 50Hz mains flicker, this should be set to 10000 (corresponding + to 100Hz), or 8333 (120Hz) for 60Hz mains. + + Setting the mode to FlickerManual when no AeFlickerPeriod has ever been + set means that no flicker cancellation occurs (until the value of this + control is updated). + + Switching to modes other than FlickerManual has no effect on the + value of the AeFlickerPeriod control. + + \sa AeFlickerMode + + - AeFlickerDetected: + type: int32_t + description: Flicker period detected in microseconds. + The value reported here indicates the currently detected flicker + period, or zero if no flicker at all is detected. + + When AeFlickerMode is set to FlickerAuto, there may be a period during + which the value reported here remains zero. Once a non-zero value is + reported, then this is the flicker period that has been detected and is + now being cancelled. + + In the case of 50Hz mains flicker, the value would be 10000 + (corresponding to 100Hz), or 8333 (120Hz) for 60Hz mains flicker. + + It is implementation dependent whether the system can continue to detect + flicker of different periods when another frequency is already being + cancelled. + + \sa AeFlickerMode + + - Brightness: + type: float + description: | + Specify a fixed brightness parameter. Positive values (up to 1.0) + produce brighter images; negative values (up to -1.0) produce darker + images and 0.0 leaves pixels unchanged. + + - Contrast: + type: float + description: | + Specify a fixed contrast parameter. Normal contrast is given by the + value 1.0; larger values produce images with more contrast. + + - Lux: + type: float + description: | + Report an estimate of the current illuminance level in lux. The Lux + control can only be returned in metadata. + + - AwbEnable: + type: bool + description: | + Enable or disable the AWB. + + \sa ColourGains + + # AwbMode needs further attention: + # - Auto-generate max enum value. + # - Better handling of custom types. + - AwbMode: + type: int32_t + description: | + Specify the range of illuminants to use for the AWB algorithm. The modes + supported are platform specific, and not all modes may be supported. + enum: + - name: AwbAuto + value: 0 + description: Search over the whole colour temperature range. + - name: AwbIncandescent + value: 1 + description: Incandescent AWB lamp mode. + - name: AwbTungsten + value: 2 + description: Tungsten AWB lamp mode. + - name: AwbFluorescent + value: 3 + description: Fluorescent AWB lamp mode. + - name: AwbIndoor + value: 4 + description: Indoor AWB lighting mode. + - name: AwbDaylight + value: 5 + description: Daylight AWB lighting mode. + - name: AwbCloudy + value: 6 + description: Cloudy AWB lighting mode. + - name: AwbCustom + value: 7 + description: Custom AWB mode. + + - AwbLocked: + type: bool + description: | + Report the lock status of a running AWB algorithm. + + If the AWB algorithm is locked the value shall be set to true, if it's + converging it shall be set to false. If the AWB algorithm is not + running the control shall not be present in the metadata control list. + + \sa AwbEnable + + - ColourGains: + type: float + description: | + Pair of gain values for the Red and Blue colour channels, in that + order. ColourGains can only be applied in a Request when the AWB is + disabled. + + \sa AwbEnable + size: [2] + + - ColourTemperature: + type: int32_t + description: Report the current estimate of the colour temperature, in + kelvin, for this frame. The ColourTemperature control can only be + returned in metadata. + + - Saturation: + type: float + description: | + Specify a fixed saturation parameter. Normal saturation is given by + the value 1.0; larger values produce more saturated colours; 0.0 + produces a greyscale image. + + - SensorBlackLevels: + type: int32_t + description: | + Reports the sensor black levels used for processing a frame, in the + order R, Gr, Gb, B. These values are returned as numbers out of a 16-bit + pixel range (as if pixels ranged from 0 to 65535). The SensorBlackLevels + control can only be returned in metadata. + size: [4] + + - Sharpness: + type: float + description: | + A value of 0.0 means no sharpening. The minimum value means + minimal sharpening, and shall be 0.0 unless the camera can't + disable sharpening completely. The default value shall give a + "reasonable" level of sharpening, suitable for most use cases. + The maximum value may apply extremely high levels of sharpening, + higher than anyone could reasonably want. Negative values are + not allowed. Note also that sharpening is not applied to raw + streams. + + - FocusFoM: + type: int32_t + description: | + Reports a Figure of Merit (FoM) to indicate how in-focus the frame is. + A larger FocusFoM value indicates a more in-focus frame. This singular + value may be based on a combination of statistics gathered from + multiple focus regions within an image. The number of focus regions and + method of combination is platform dependent. In this respect, it is not + necessarily aimed at providing a way to implement a focus algorithm by + the application, rather an indication of how in-focus a frame is. + + - ColourCorrectionMatrix: + type: float + description: | + The 3x3 matrix that converts camera RGB to sRGB within the + imaging pipeline. This should describe the matrix that is used + after pixels have been white-balanced, but before any gamma + transformation. The 3x3 matrix is stored in conventional reading + order in an array of 9 floating point values. + + size: [3,3] + + - ScalerCrop: + type: Rectangle + description: | + Sets the image portion that will be scaled to form the whole of + the final output image. The (x,y) location of this rectangle is + relative to the PixelArrayActiveAreas that is being used. The units + remain native sensor pixels, even if the sensor is being used in + a binning or skipping mode. + + This control is only present when the pipeline supports scaling. Its + maximum valid value is given by the properties::ScalerCropMaximum + property, and the two can be used to implement digital zoom. + + - DigitalGain: + type: float + description: | + Digital gain value applied during the processing steps applied + to the image as captured from the sensor. + + The global digital gain factor is applied to all the colour channels + of the RAW image. Different pipeline models are free to + specify how the global gain factor applies to each separate + channel. + + If an imaging pipeline applies digital gain in distinct + processing steps, this value indicates their total sum. + Pipelines are free to decide how to adjust each processing + step to respect the received gain factor and shall report + their total value in the request metadata. + + - FrameDuration: + type: int64_t + description: | + The instantaneous frame duration from start of frame exposure to start + of next exposure, expressed in microseconds. This control is meant to + be returned in metadata. + + - FrameDurationLimits: + type: int64_t + description: | + The minimum and maximum (in that order) frame duration, expressed in + microseconds. + + When provided by applications, the control specifies the sensor frame + duration interval the pipeline has to use. This limits the largest + exposure time the sensor can use. For example, if a maximum frame + duration of 33ms is requested (corresponding to 30 frames per second), + the sensor will not be able to raise the exposure time above 33ms. + A fixed frame duration is achieved by setting the minimum and maximum + values to be the same. Setting both values to 0 reverts to using the + camera defaults. + + The maximum frame duration provides the absolute limit to the shutter + speed computed by the AE algorithm and it overrides any exposure mode + setting specified with controls::AeExposureMode. Similarly, when a + manual exposure time is set through controls::ExposureTime, it also + gets clipped to the limits set by this control. When reported in + metadata, the control expresses the minimum and maximum frame + durations used after being clipped to the sensor provided frame + duration limits. + + \sa AeExposureMode + \sa ExposureTime + + \todo Define how to calculate the capture frame rate by + defining controls to report additional delays introduced by + the capture pipeline or post-processing stages (ie JPEG + conversion, frame scaling). + + \todo Provide an explicit definition of default control values, for + this and all other controls. + + size: [2] + + - SensorTemperature: + type: float + description: | + Temperature measure from the camera sensor in Celsius. This is typically + obtained by a thermal sensor present on-die or in the camera module. The + range of reported temperatures is device dependent. + + The SensorTemperature control will only be returned in metadata if a + themal sensor is present. + + - SensorTimestamp: + type: int64_t + description: | + The time when the first row of the image sensor active array is exposed. + + The timestamp, expressed in nanoseconds, represents a monotonically + increasing counter since the system boot time, as defined by the + Linux-specific CLOCK_BOOTTIME clock id. + + The SensorTimestamp control can only be returned in metadata. + + \todo Define how the sensor timestamp has to be used in the reprocessing + use case. + + - AfMode: + type: int32_t + description: | + Control to set the mode of the AF (autofocus) algorithm. + + An implementation may choose not to implement all the modes. + + enum: + - name: AfModeManual + value: 0 + description: | + The AF algorithm is in manual mode. In this mode it will never + perform any action nor move the lens of its own accord, but an + application can specify the desired lens position using the + LensPosition control. + + In this mode the AfState will always report AfStateIdle. + + If the camera is started in AfModeManual, it will move the focus + lens to the position specified by the LensPosition control. + + This mode is the recommended default value for the AfMode control. + External cameras (as reported by the Location property set to + CameraLocationExternal) may use a different default value. + - name: AfModeAuto + value: 1 + description: | + The AF algorithm is in auto mode. This means that the algorithm + will never move the lens or change state unless the AfTrigger + control is used. The AfTrigger control can be used to initiate a + focus scan, the results of which will be reported by AfState. + + If the autofocus algorithm is moved from AfModeAuto to another + mode while a scan is in progress, the scan is cancelled + immediately, without waiting for the scan to finish. + + When first entering this mode the AfState will report + AfStateIdle. When a trigger control is sent, AfState will + report AfStateScanning for a period before spontaneously + changing to AfStateFocused or AfStateFailed, depending on + the outcome of the scan. It will remain in this state until + another scan is initiated by the AfTrigger control. If a scan is + cancelled (without changing to another mode), AfState will return + to AfStateIdle. + - name: AfModeContinuous + value: 2 + description: | + The AF algorithm is in continuous mode. This means that the lens can + re-start a scan spontaneously at any moment, without any user + intervention. The AfState still reports whether the algorithm is + currently scanning or not, though the application has no ability to + initiate or cancel scans, nor to move the lens for itself. + + However, applications can pause the AF algorithm from continuously + scanning by using the AfPause control. This allows video or still + images to be captured whilst guaranteeing that the focus is fixed. + + When set to AfModeContinuous, the system will immediately initiate a + scan so AfState will report AfStateScanning, and will settle on one + of AfStateFocused or AfStateFailed, depending on the scan result. + + - AfRange: + type: int32_t + description: | + Control to set the range of focus distances that is scanned. An + implementation may choose not to implement all the options here. + enum: + - name: AfRangeNormal + value: 0 + description: | + A wide range of focus distances is scanned, all the way from + infinity down to close distances, though depending on the + implementation, possibly not including the very closest macro + positions. + - name: AfRangeMacro + value: 1 + description: Only close distances are scanned. + - name: AfRangeFull + value: 2 + description: | + The full range of focus distances is scanned just as with + AfRangeNormal but this time including the very closest macro + positions. + + - AfSpeed: + type: int32_t + description: | + Control that determines whether the AF algorithm is to move the lens + as quickly as possible or more steadily. For example, during video + recording it may be desirable not to move the lens too abruptly, but + when in a preview mode (waiting for a still capture) it may be + helpful to move the lens as quickly as is reasonably possible. + enum: + - name: AfSpeedNormal + value: 0 + description: Move the lens at its usual speed. + - name: AfSpeedFast + value: 1 + description: Move the lens more quickly. + + - AfMetering: + type: int32_t + description: | + Instruct the AF algorithm how it should decide which parts of the image + should be used to measure focus. + enum: + - name: AfMeteringAuto + value: 0 + description: The AF algorithm should decide for itself where it will + measure focus. + - name: AfMeteringWindows + value: 1 + description: The AF algorithm should use the rectangles defined by + the AfWindows control to measure focus. If no windows are specified + the behaviour is platform dependent. + + - AfWindows: + type: Rectangle + description: | + Sets the focus windows used by the AF algorithm when AfMetering is set + to AfMeteringWindows. The units used are pixels within the rectangle + returned by the ScalerCropMaximum property. + + In order to be activated, a rectangle must be programmed with non-zero + width and height. Internally, these rectangles are intersected with the + ScalerCropMaximum rectangle. If the window becomes empty after this + operation, then the window is ignored. If all the windows end up being + ignored, then the behaviour is platform dependent. + + On platforms that support the ScalerCrop control (for implementing + digital zoom, for example), no automatic recalculation or adjustment of + AF windows is performed internally if the ScalerCrop is changed. If any + window lies outside the output image after the scaler crop has been + applied, it is up to the application to recalculate them. + + The details of how the windows are used are platform dependent. We note + that when there is more than one AF window, a typical implementation + might find the optimal focus position for each one and finally select + the window where the focal distance for the objects shown in that part + of the image are closest to the camera. + + size: [n] + + - AfTrigger: + type: int32_t + description: | + This control starts an autofocus scan when AfMode is set to AfModeAuto, + and can also be used to terminate a scan early. + + It is ignored if AfMode is set to AfModeManual or AfModeContinuous. + + enum: + - name: AfTriggerStart + value: 0 + description: Start an AF scan. Ignored if a scan is in progress. + - name: AfTriggerCancel + value: 1 + description: Cancel an AF scan. This does not cause the lens to move + anywhere else. Ignored if no scan is in progress. + + - AfPause: + type: int32_t + description: | + This control has no effect except when in continuous autofocus mode + (AfModeContinuous). It can be used to pause any lens movements while + (for example) images are captured. The algorithm remains inactive + until it is instructed to resume. + + enum: + - name: AfPauseImmediate + value: 0 + description: | + Pause the continuous autofocus algorithm immediately, whether or not + any kind of scan is underway. AfPauseState will subsequently report + AfPauseStatePaused. AfState may report any of AfStateScanning, + AfStateFocused or AfStateFailed, depending on the algorithm's state + when it received this control. + - name: AfPauseDeferred + value: 1 + description: | + This is similar to AfPauseImmediate, and if the AfState is currently + reporting AfStateFocused or AfStateFailed it will remain in that + state and AfPauseState will report AfPauseStatePaused. + + However, if the algorithm is scanning (AfStateScanning), + AfPauseState will report AfPauseStatePausing until the scan is + finished, at which point AfState will report one of AfStateFocused + or AfStateFailed, and AfPauseState will change to + AfPauseStatePaused. + + - name: AfPauseResume + value: 2 + description: | + Resume continuous autofocus operation. The algorithm starts again + from exactly where it left off, and AfPauseState will report + AfPauseStateRunning. + + - LensPosition: + type: float + description: | + Acts as a control to instruct the lens to move to a particular position + and also reports back the position of the lens for each frame. + + The LensPosition control is ignored unless the AfMode is set to + AfModeManual, though the value is reported back unconditionally in all + modes. + + This value, which is generally a non-integer, is the reciprocal of the + focal distance in metres, also known as dioptres. That is, to set a + focal distance D, the lens position LP is given by + + \f$LP = \frac{1\mathrm{m}}{D}\f$ + + For example: + + 0 moves the lens to infinity. + 0.5 moves the lens to focus on objects 2m away. + 2 moves the lens to focus on objects 50cm away. + And larger values will focus the lens closer. + + The default value of the control should indicate a good general position + for the lens, often corresponding to the hyperfocal distance (the + closest position for which objects at infinity are still acceptably + sharp). The minimum will often be zero (meaning infinity), and the + maximum value defines the closest focus position. + + \todo Define a property to report the Hyperfocal distance of calibrated + lenses. + + - AfState: + type: int32_t + description: | + Reports the current state of the AF algorithm in conjunction with the + reported AfMode value and (in continuous AF mode) the AfPauseState + value. The possible state changes are described below, though we note + the following state transitions that occur when the AfMode is changed. + + If the AfMode is set to AfModeManual, then the AfState will always + report AfStateIdle (even if the lens is subsequently moved). Changing to + the AfModeManual state does not initiate any lens movement. + + If the AfMode is set to AfModeAuto then the AfState will report + AfStateIdle. However, if AfModeAuto and AfTriggerStart are sent together + then AfState will omit AfStateIdle and move straight to AfStateScanning + (and start a scan). + + If the AfMode is set to AfModeContinuous then the AfState will initially + report AfStateScanning. + + enum: + - name: AfStateIdle + value: 0 + description: | + The AF algorithm is in manual mode (AfModeManual) or in auto mode + (AfModeAuto) and a scan has not yet been triggered, or an + in-progress scan was cancelled. + - name: AfStateScanning + value: 1 + description: | + The AF algorithm is in auto mode (AfModeAuto), and a scan has been + started using the AfTrigger control. The scan can be cancelled by + sending AfTriggerCancel at which point the algorithm will either + move back to AfStateIdle or, if the scan actually completes before + the cancel request is processed, to one of AfStateFocused or + AfStateFailed. + + Alternatively the AF algorithm could be in continuous mode + (AfModeContinuous) at which point it may enter this state + spontaneously whenever it determines that a rescan is needed. + - name: AfStateFocused + value: 2 + description: | + The AF algorithm is in auto (AfModeAuto) or continuous + (AfModeContinuous) mode and a scan has completed with the result + that the algorithm believes the image is now in focus. + - name: AfStateFailed + value: 3 + description: | + The AF algorithm is in auto (AfModeAuto) or continuous + (AfModeContinuous) mode and a scan has completed with the result + that the algorithm did not find a good focus position. + + - AfPauseState: + type: int32_t + description: | + Only applicable in continuous (AfModeContinuous) mode, this reports + whether the algorithm is currently running, paused or pausing (that is, + will pause as soon as any in-progress scan completes). + + Any change to AfMode will cause AfPauseStateRunning to be reported. + + enum: + - name: AfPauseStateRunning + value: 0 + description: | + Continuous AF is running and the algorithm may restart a scan + spontaneously. + - name: AfPauseStatePausing + value: 1 + description: | + Continuous AF has been sent an AfPauseDeferred control, and will + pause as soon as any in-progress scan completes (and then report + AfPauseStatePaused). No new scans will be start spontaneously until + the AfPauseResume control is sent. + - name: AfPauseStatePaused + value: 2 + description: | + Continuous AF is paused. No further state changes or lens movements + will occur until the AfPauseResume control is sent. + + - HdrMode: + type: int32_t + description: | + Control to set the mode to be used for High Dynamic Range (HDR) + imaging. HDR techniques typically include multiple exposure, image + fusion and tone mapping techniques to improve the dynamic range of the + resulting images. + + When using an HDR mode, images are captured with different sets of AGC + settings called HDR channels. Channels indicate in particular the type + of exposure (short, medium or long) used to capture the raw image, + before fusion. Each HDR image is tagged with the corresponding channel + using the HdrChannel control. + + \sa HdrChannel + + enum: + - name: HdrModeOff + value: 0 + description: | + HDR is disabled. Metadata for this frame will not include the + HdrChannel control. + - name: HdrModeMultiExposureUnmerged + value: 1 + description: | + Multiple exposures will be generated in an alternating fashion. + However, they will not be merged together and will be returned to + the application as they are. Each image will be tagged with the + correct HDR channel, indicating what kind of exposure it is. The + tag should be the same as in the HdrModeMultiExposure case. + + The expectation is that an application using this mode would merge + the frames to create HDR images for itself if it requires them. + - name: HdrModeMultiExposure + value: 2 + description: | + Multiple exposures will be generated and merged to create HDR + images. Each image will be tagged with the HDR channel (long, medium + or short) that arrived and which caused this image to be output. + + Systems that use two channels for HDR will return images tagged + alternately as the short and long channel. Systems that use three + channels for HDR will cycle through the short, medium and long + channel before repeating. + - name: HdrModeSingleExposure + value: 3 + description: | + Multiple frames all at a single exposure will be used to create HDR + images. These images should be reported as all corresponding to the + HDR short channel. + - name: HdrModeNight + value: 4 + description: | + Multiple frames will be combined to produce "night mode" images. It + is up to the implementation exactly which HDR channels it uses, and + the images will all be tagged accordingly with the correct HDR + channel information. + + - HdrChannel: + type: int32_t + description: | + This value is reported back to the application so that it can discover + whether this capture corresponds to the short or long exposure image (or + any other image used by the HDR procedure). An application can monitor + the HDR channel to discover when the differently exposed images have + arrived. + + This metadata is only available when an HDR mode has been enabled. + + \sa HdrMode + + enum: + - name: HdrChannelNone + value: 0 + description: | + This image does not correspond to any of the captures used to create + an HDR image. + - name: HdrChannelShort + value: 1 + description: | + This is a short exposure image. + - name: HdrChannelMedium + value: 2 + description: | + This is a medium exposure image. + - name: HdrChannelLong + value: 3 + description: | + This is a long exposure image. + + # ---------------------------------------------------------------------------- + # Draft controls section + + - AePrecaptureTrigger: + type: int32_t + draft: true + description: | + Control for AE metering trigger. Currently identical to + ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER. + + Whether the camera device will trigger a precapture metering sequence + when it processes this request. + enum: + - name: AePrecaptureTriggerIdle + value: 0 + description: The trigger is idle. + - name: AePrecaptureTriggerStart + value: 1 + description: The pre-capture AE metering is started by the camera. + - name: AePrecaptureTriggerCancel + value: 2 + description: | + The camera will cancel any active or completed metering sequence. + The AE algorithm is reset to its initial state. + + - NoiseReductionMode: + type: int32_t + draft: true + description: | + Control to select the noise reduction algorithm mode. Currently + identical to ANDROID_NOISE_REDUCTION_MODE. + + Mode of operation for the noise reduction algorithm. + enum: + - name: NoiseReductionModeOff + value: 0 + description: No noise reduction is applied + - name: NoiseReductionModeFast + value: 1 + description: | + Noise reduction is applied without reducing the frame rate. + - name: NoiseReductionModeHighQuality + value: 2 + description: | + High quality noise reduction at the expense of frame rate. + - name: NoiseReductionModeMinimal + value: 3 + description: | + Minimal noise reduction is applied without reducing the frame rate. + - name: NoiseReductionModeZSL + value: 4 + description: | + Noise reduction is applied at different levels to different streams. + + - ColorCorrectionAberrationMode: + type: int32_t + draft: true + description: | + Control to select the color correction aberration mode. Currently + identical to ANDROID_COLOR_CORRECTION_ABERRATION_MODE. + + Mode of operation for the chromatic aberration correction algorithm. + enum: + - name: ColorCorrectionAberrationOff + value: 0 + description: No aberration correction is applied. + - name: ColorCorrectionAberrationFast + value: 1 + description: Aberration correction will not slow down the frame rate. + - name: ColorCorrectionAberrationHighQuality + value: 2 + description: | + High quality aberration correction which might reduce the frame + rate. + + - AeState: + type: int32_t + draft: true + description: | + Control to report the current AE algorithm state. Currently identical to + ANDROID_CONTROL_AE_STATE. + + Current state of the AE algorithm. + enum: + - name: AeStateInactive + value: 0 + description: The AE algorithm is inactive. + - name: AeStateSearching + value: 1 + description: The AE algorithm has not converged yet. + - name: AeStateConverged + value: 2 + description: The AE algorithm has converged. + - name: AeStateLocked + value: 3 + description: The AE algorithm is locked. + - name: AeStateFlashRequired + value: 4 + description: The AE algorithm would need a flash for good results + - name: AeStatePrecapture + value: 5 + description: | + The AE algorithm has started a pre-capture metering session. + \sa AePrecaptureTrigger + + - AwbState: + type: int32_t + draft: true + description: | + Control to report the current AWB algorithm state. Currently identical + to ANDROID_CONTROL_AWB_STATE. + + Current state of the AWB algorithm. + enum: + - name: AwbStateInactive + value: 0 + description: The AWB algorithm is inactive. + - name: AwbStateSearching + value: 1 + description: The AWB algorithm has not converged yet. + - name: AwbConverged + value: 2 + description: The AWB algorithm has converged. + - name: AwbLocked + value: 3 + description: The AWB algorithm is locked. + + - SensorRollingShutterSkew: + type: int64_t + draft: true + description: | + Control to report the time between the start of exposure of the first + row and the start of exposure of the last row. Currently identical to + ANDROID_SENSOR_ROLLING_SHUTTER_SKEW + + - LensShadingMapMode: + type: int32_t + draft: true + description: | + Control to report if the lens shading map is available. Currently + identical to ANDROID_STATISTICS_LENS_SHADING_MAP_MODE. + enum: + - name: LensShadingMapModeOff + value: 0 + description: No lens shading map mode is available. + - name: LensShadingMapModeOn + value: 1 + description: The lens shading map mode is available. + + - PipelineDepth: + type: int32_t + draft: true + description: | + Specifies the number of pipeline stages the frame went through from when + it was exposed to when the final completed result was available to the + framework. Always less than or equal to PipelineMaxDepth. Currently + identical to ANDROID_REQUEST_PIPELINE_DEPTH. + + The typical value for this control is 3 as a frame is first exposed, + captured and then processed in a single pass through the ISP. Any + additional processing step performed after the ISP pass (in example face + detection, additional format conversions etc) count as an additional + pipeline stage. + + - MaxLatency: + type: int32_t + draft: true + description: | + The maximum number of frames that can occur after a request (different + than the previous) has been submitted, and before the result's state + becomes synchronized. A value of -1 indicates unknown latency, and 0 + indicates per-frame control. Currently identical to + ANDROID_SYNC_MAX_LATENCY. + + - TestPatternMode: + type: int32_t + draft: true + description: | + Control to select the test pattern mode. Currently identical to + ANDROID_SENSOR_TEST_PATTERN_MODE. + enum: + - name: TestPatternModeOff + value: 0 + description: | + No test pattern mode is used. The camera device returns frames from + the image sensor. + - name: TestPatternModeSolidColor + value: 1 + description: | + Each pixel in [R, G_even, G_odd, B] is replaced by its respective + color channel provided in test pattern data. + \todo Add control for test pattern data. + - name: TestPatternModeColorBars + value: 2 + description: | + All pixel data is replaced with an 8-bar color pattern. The vertical + bars (left-to-right) are as follows; white, yellow, cyan, green, + magenta, red, blue and black. Each bar should take up 1/8 of the + sensor pixel array width. When this is not possible, the bar size + should be rounded down to the nearest integer and the pattern can + repeat on the right side. Each bar's height must always take up the + full sensor pixel array height. + - name: TestPatternModeColorBarsFadeToGray + value: 3 + description: | + The test pattern is similar to TestPatternModeColorBars, + except that each bar should start at its specified color at the top + and fade to gray at the bottom. Furthermore each bar is further + subdevided into a left and right half. The left half should have a + smooth gradient, and the right half should have a quantized + gradient. In particular, the right half's should consist of blocks + of the same color for 1/16th active sensor pixel array width. The + least significant bits in the quantized gradient should be copied + from the most significant bits of the smooth gradient. The height of + each bar should always be a multiple of 128. When this is not the + case, the pattern should repeat at the bottom of the image. + - name: TestPatternModePn9 + value: 4 + description: | + All pixel data is replaced by a pseudo-random sequence generated + from a PN9 512-bit sequence (typically implemented in hardware with + a linear feedback shift register). The generator should be reset at + the beginning of each frame, and thus each subsequent raw frame with + this test pattern should be exactly the same as the last. + - name: TestPatternModeCustom1 + value: 256 + description: | + The first custom test pattern. All custom patterns that are + available only on this camera device are at least this numeric + value. All of the custom test patterns will be static (that is the + raw image must not vary from frame to frame). + +... diff --git a/src/libcamera/meson.build b/src/libcamera/meson.build index 05ee38da..6d9902e6 100644 --- a/src/libcamera/meson.build +++ b/src/libcamera/meson.build @@ -127,12 +127,23 @@ endif control_sources = [] -foreach source, mode : control_source_files - input_files = files(source +'.yaml') - template_file = files(source + '.cpp.in') - control_sources += custom_target(source + '_cpp', +controls_mode_files = { + 'controls' : controls_files, + 'properties' : properties_files, +} + +foreach mode, input_files : controls_mode_files + input_files = files(input_files) + + if mode == 'controls' + template_file = files('control_ids.cpp.in') + else + template_file = files('property_ids.cpp.in') + endif + + control_sources += custom_target(mode + '_cpp', input : input_files, - output : source + '.cpp', + output : mode + '_ids.cpp', command : [gen_controls, '-o', '@OUTPUT@', '--mode', mode, '-t', template_file, '@INPUT@']) endforeach diff --git a/src/libcamera/property_ids.yaml b/src/libcamera/property_ids.yaml deleted file mode 100644 index 45f3609b..00000000 --- a/src/libcamera/property_ids.yaml +++ /dev/null @@ -1,737 +0,0 @@ -# SPDX-License-Identifier: LGPL-2.1-or-later -# -# Copyright (C) 2019, Google Inc. -# -%YAML 1.1 ---- -vendor: libcamera -controls: - - Location: - type: int32_t - description: | - Camera mounting location - enum: - - name: CameraLocationFront - value: 0 - description: | - The camera is mounted on the front side of the device, facing the - user - - name: CameraLocationBack - value: 1 - description: | - The camera is mounted on the back side of the device, facing away - from the user - - name: CameraLocationExternal - value: 2 - description: | - The camera is attached to the device in a way that allows it to - be moved freely - - - Rotation: - type: int32_t - description: | - The camera physical mounting rotation. It is expressed as the angular - difference in degrees between two reference systems, one relative to the - camera module, and one defined on the external world scene to be - captured when projected on the image sensor pixel array. - - A camera sensor has a 2-dimensional reference system 'Rc' defined by - its pixel array read-out order. The origin is set to the first pixel - being read out, the X-axis points along the column read-out direction - towards the last columns, and the Y-axis along the row read-out - direction towards the last row. - - A typical example for a sensor with a 2592x1944 pixel array matrix - observed from the front is - - 2591 X-axis 0 - <------------------------+ 0 - .......... ... ..........! - .......... ... ..........! Y-axis - ... ! - .......... ... ..........! - .......... ... ..........! 1943 - V - - - The external world scene reference system 'Rs' is a 2-dimensional - reference system on the focal plane of the camera module. The origin is - placed on the top-left corner of the visible scene, the X-axis points - towards the right, and the Y-axis points towards the bottom of the - scene. The top, bottom, left and right directions are intentionally not - defined and depend on the environment in which the camera is used. - - A typical example of a (very common) picture of a shark swimming from - left to right, as seen from the camera, is - - 0 X-axis - 0 +-------------------------------------> - ! - ! - ! - ! |\____)\___ - ! ) _____ __`< - ! |/ )/ - ! - ! - ! - V - Y-axis - - With the reference system 'Rs' placed on the camera focal plane. - - ¸.·˙! - ¸.·˙ ! - _ ¸.·˙ ! - +-/ \-+¸.·˙ ! - | (o) | ! Camera focal plane - +-----+˙·.¸ ! - ˙·.¸ ! - ˙·.¸ ! - ˙·.¸! - - When projected on the sensor's pixel array, the image and the associated - reference system 'Rs' are typically (but not always) inverted, due to - the camera module's lens optical inversion effect. - - Assuming the above represented scene of the swimming shark, the lens - inversion projects the scene and its reference system onto the sensor - pixel array, seen from the front of the camera sensor, as follow - - Y-axis - ^ - ! - ! - ! - ! |\_____)\__ - ! ) ____ ___.< - ! |/ )/ - ! - ! - ! - 0 +-------------------------------------> - 0 X-axis - - Note the shark being upside-down. - - The resulting projected reference system is named 'Rp'. - - The camera rotation property is then defined as the angular difference - in the counter-clockwise direction between the camera reference system - 'Rc' and the projected scene reference system 'Rp'. It is expressed in - degrees as a number in the range [0, 360[. - - Examples - - 0 degrees camera rotation - - - Y-Rp - ^ - Y-Rc ! - ^ ! - ! ! - ! ! - ! ! - ! ! - ! ! - ! ! - ! ! - ! 0 +-------------------------------------> - ! 0 X-Rp - 0 +-------------------------------------> - 0 X-Rc - - - X-Rc 0 - <------------------------------------+ 0 - X-Rp 0 ! - <------------------------------------+ 0 ! - ! ! - ! ! - ! ! - ! ! - ! ! - ! ! - ! ! - ! V - ! Y-Rc - V - Y-Rp - - 90 degrees camera rotation - - 0 Y-Rc - 0 +--------------------> - ! Y-Rp - ! ^ - ! ! - ! ! - ! ! - ! ! - ! ! - ! ! - ! ! - ! ! - ! ! - ! 0 +-------------------------------------> - ! 0 X-Rp - ! - ! - ! - ! - V - X-Rc - - 180 degrees camera rotation - - 0 - <------------------------------------+ 0 - X-Rc ! - Y-Rp ! - ^ ! - ! ! - ! ! - ! ! - ! ! - ! ! - ! ! - ! V - ! Y-Rc - 0 +-------------------------------------> - 0 X-Rp - - 270 degrees camera rotation - - 0 Y-Rc - 0 +--------------------> - ! 0 - ! <-----------------------------------+ 0 - ! X-Rp ! - ! ! - ! ! - ! ! - ! ! - ! ! - ! ! - ! ! - ! ! - ! V - ! Y-Rp - ! - ! - ! - ! - V - X-Rc - - - Example one - Webcam - - A camera module installed on the user facing part of a laptop screen - casing used for video calls. The captured images are meant to be - displayed in landscape mode (width > height) on the laptop screen. - - The camera is typically mounted upside-down to compensate the lens - optical inversion effect. - - Y-Rp - Y-Rc ^ - ^ ! - ! ! - ! ! |\_____)\__ - ! ! ) ____ ___.< - ! ! |/ )/ - ! ! - ! ! - ! ! - ! 0 +-------------------------------------> - ! 0 X-Rp - 0 +-------------------------------------> - 0 X-Rc - - The two reference systems are aligned, the resulting camera rotation is - 0 degrees, no rotation correction needs to be applied to the resulting - image once captured to memory buffers to correctly display it to users. - - +--------------------------------------+ - ! ! - ! ! - ! ! - ! |\____)\___ ! - ! ) _____ __`< ! - ! |/ )/ ! - ! ! - ! ! - ! ! - +--------------------------------------+ - - If the camera sensor is not mounted upside-down to compensate for the - lens optical inversion, the two reference systems will not be aligned, - with 'Rp' being rotated 180 degrees relatively to 'Rc'. - - - X-Rc 0 - <------------------------------------+ 0 - ! - Y-Rp ! - ^ ! - ! ! - ! |\_____)\__ ! - ! ) ____ ___.< ! - ! |/ )/ ! - ! ! - ! ! - ! V - ! Y-Rc - 0 +-------------------------------------> - 0 X-Rp - - The image once captured to memory will then be rotated by 180 degrees - - +--------------------------------------+ - ! ! - ! ! - ! ! - ! __/(_____/| ! - ! >.___ ____ ( ! - ! \( \| ! - ! ! - ! ! - ! ! - +--------------------------------------+ - - A software rotation correction of 180 degrees should be applied to - correctly display the image. - - +--------------------------------------+ - ! ! - ! ! - ! ! - ! |\____)\___ ! - ! ) _____ __`< ! - ! |/ )/ ! - ! ! - ! ! - ! ! - +--------------------------------------+ - - Example two - Phone camera - - A camera installed on the back side of a mobile device facing away from - the user. The captured images are meant to be displayed in portrait mode - (height > width) to match the device screen orientation and the device - usage orientation used when taking the picture. - - The camera sensor is typically mounted with its pixel array longer side - aligned to the device longer side, upside-down mounted to compensate for - the lens optical inversion effect. - - 0 Y-Rc - 0 +--------------------> - ! Y-Rp - ! ^ - ! ! - ! ! - ! ! - ! ! |\_____)\__ - ! ! ) ____ ___.< - ! ! |/ )/ - ! ! - ! ! - ! ! - ! 0 +-------------------------------------> - ! 0 X-Rp - ! - ! - ! - ! - V - X-Rc - - The two reference systems are not aligned and the 'Rp' reference - system is rotated by 90 degrees in the counter-clockwise direction - relatively to the 'Rc' reference system. - - The image once captured to memory will be rotated. - - +-------------------------------------+ - | _ _ | - | \ / | - | | | | - | | | | - | | > | - | < | | - | | | | - | . | - | V | - +-------------------------------------+ - - A correction of 90 degrees in counter-clockwise direction has to be - applied to correctly display the image in portrait mode on the device - screen. - - +--------------------+ - | | - | | - | | - | | - | | - | | - | |\____)\___ | - | ) _____ __`< | - | |/ )/ | - | | - | | - | | - | | - | | - +--------------------+ - - - Model: - type: string - description: | - The model name shall to the extent possible describe the sensor. For - most devices this is the model name of the sensor. While for some - devices the sensor model is unavailable as the sensor or the entire - camera is part of a larger unit and exposed as a black-box to the - system. In such cases the model name of the smallest device that - contains the camera sensor shall be used. - - The model name is not meant to be a camera name displayed to the - end-user, but may be combined with other camera information to create a - camera name. - - The model name is not guaranteed to be unique in the system nor is - it guaranteed to be stable or have any other properties required to make - it a good candidate to be used as a permanent identifier of a camera. - - The model name shall describe the camera in a human readable format and - shall be encoded in ASCII. - - Example model names are 'ov5670', 'imx219' or 'Logitech Webcam C930e'. - - - UnitCellSize: - type: Size - description: | - The pixel unit cell physical size, in nanometers. - - The UnitCellSize properties defines the horizontal and vertical sizes of - a single pixel unit, including its active and non-active parts. In - other words, it expresses the horizontal and vertical distance between - the top-left corners of adjacent pixels. - - The property can be used to calculate the physical size of the sensor's - pixel array area and for calibration purposes. - - - PixelArraySize: - type: Size - description: | - The camera sensor pixel array readable area vertical and horizontal - sizes, in pixels. - - The PixelArraySize property defines the size in pixel units of the - readable part of full pixel array matrix, including optical black - pixels used for calibration, pixels which are not considered valid for - capture and active pixels containing valid image data. - - The property describes the maximum size of the raw data captured by the - camera, which might not correspond to the physical size of the sensor - pixel array matrix, as some portions of the physical pixel array matrix - are not accessible and cannot be transmitted out. - - For example, let's consider a pixel array matrix assembled as follows - - +--------------------------------------------------+ - |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx| - |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx| - |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| - |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| - |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| - |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| - |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| - |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| - ... ... ... ... ... - - ... ... ... ... ... - |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| - |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| - |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx| - |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx| - +--------------------------------------------------+ - - starting with two lines of non-readable pixels (x), followed by N lines - of readable data (D) surrounded by two columns of non-readable pixels on - each side, and ending with two more lines of non-readable pixels. Only - the readable portion is transmitted to the receiving side, defining the - sizes of the largest possible buffer of raw data that can be presented - to applications. - - PixelArraySize.width - /----------------------------------------------/ - +----------------------------------------------+ / - |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | - |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | - |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | - |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | - |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | - |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | PixelArraySize.height - ... ... ... ... ... - ... ... ... ... ... - |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | - |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | - +----------------------------------------------+ / - - This defines a rectangle whose top-left corner is placed in position (0, - 0) and whose vertical and horizontal sizes are defined by this property. - All other rectangles that describe portions of the pixel array, such as - the optical black pixels rectangles and active pixel areas, are defined - relatively to this rectangle. - - All the coordinates are expressed relative to the default sensor readout - direction, without any transformation (such as horizontal and vertical - flipping) applied. When mapping them to the raw pixel buffer, - applications shall take any configured transformation into account. - - \todo Rename this property to Size once we will have property - categories (i.e. Properties::PixelArray::Size) - - - PixelArrayOpticalBlackRectangles: - type: Rectangle - size: [n] - description: | - The pixel array region(s) which contain optical black pixels - considered valid for calibration purposes. - - This property describes the position and size of optical black pixel - regions in the raw data buffer as stored in memory, which might differ - from their actual physical location in the pixel array matrix. - - It is important to note, in fact, that camera sensors might - automatically reorder or skip portions of their pixels array matrix when - transmitting data to the receiver. For instance, a sensor may merge the - top and bottom optical black rectangles into a single rectangle, - transmitted at the beginning of the frame. - - The pixel array contains several areas with different purposes, - interleaved by lines and columns which are said not to be valid for - capturing purposes. Invalid lines and columns are defined as invalid as - they could be positioned too close to the chip margins or to the optical - black shielding placed on top of optical black pixels. - - PixelArraySize.width - /----------------------------------------------/ - x1 x2 - +--o---------------------------------------o---+ / - |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| | - |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| | - y1 oIIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| | - |IIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| | - |IIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| | - y2 oIIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| | - |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| | - |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| | - y3 |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| | - |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| | PixelArraySize.height - |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| | - ... ... ... ... ... - ... ... ... ... ... - y4 |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| | - |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| | - |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| | - +----------------------------------------------+ / - - The readable pixel array matrix is composed by - 2 invalid lines (I) - 4 lines of valid optical black pixels (O) - 2 invalid lines (I) - n lines of valid pixel data (P) - 2 invalid lines (I) - - And the position of the optical black pixel rectangles is defined by - - PixelArrayOpticalBlackRectangles = { - { x1, y1, x2 - x1 + 1, y2 - y1 + 1 }, - { x1, y3, 2, y4 - y3 + 1 }, - { x2, y3, 2, y4 - y3 + 1 }, - }; - - If the camera, when capturing the full pixel array matrix, automatically - skips the invalid lines and columns, producing the following data - buffer, when captured to memory - - PixelArraySize.width - /----------------------------------------------/ - x1 - +--------------------------------------------o-+ / - |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| | - |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| | - |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| | - |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| | - y1 oOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| | - |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| | - |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| | PixelArraySize.height - ... ... ... ... ... | - ... ... ... ... ... | - |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| | - |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| | - +----------------------------------------------+ / - - then the invalid lines and columns should not be reported as part of the - PixelArraySize property in first place. - - In this case, the position of the black pixel rectangles will be - - PixelArrayOpticalBlackRectangles = { - { 0, 0, y1 + 1, PixelArraySize[0] }, - { 0, y1, 2, PixelArraySize[1] - y1 + 1 }, - { x1, y1, 2, PixelArraySize[1] - y1 + 1 }, - }; - - \todo Rename this property to Size once we will have property - categories (i.e. Properties::PixelArray::OpticalBlackRectangles) - - - PixelArrayActiveAreas: - type: Rectangle - size: [n] - description: | - The PixelArrayActiveAreas property defines the (possibly multiple and - overlapping) portions of the camera sensor readable pixel matrix - which are considered valid for image acquisition purposes. - - This property describes an arbitrary number of overlapping rectangles, - with each rectangle representing the maximum image size that the camera - sensor can produce for a particular aspect ratio. They are defined - relatively to the PixelArraySize rectangle. - - When multiple rectangles are reported, they shall be ordered from the - tallest to the shortest. - - Example 1 - A camera sensor which only produces images in the 4:3 image resolution - will report a single PixelArrayActiveAreas rectangle, from which all - other image formats are obtained by either cropping the field-of-view - and/or applying pixel sub-sampling techniques such as pixel skipping or - binning. - - PixelArraySize.width - /----------------/ - x1 x2 - (0,0)-> +-o------------o-+ / - y1 o +------------+ | | - | |////////////| | | - | |////////////| | | PixelArraySize.height - | |////////////| | | - y2 o +------------+ | | - +----------------+ / - - The property reports a single rectangle - - PixelArrayActiveAreas = (x1, y1, x2 - x1 + 1, y2 - y1 + 1) - - Example 2 - A camera sensor which can produce images in different native - resolutions will report several overlapping rectangles, one for each - natively supported resolution. - - PixelArraySize.width - /------------------/ - x1 x2 x3 x4 - (0,0)-> +o---o------o---o+ / - y1 o +------+ | | - | |//////| | | - y2 o+---+------+---+| | - ||///|//////|///|| | PixelArraySize.height - y3 o+---+------+---+| | - | |//////| | | - y4 o +------+ | | - +----+------+----+ / - - The property reports two rectangles - - PixelArrayActiveAreas = ((x2, y1, x3 - x2 + 1, y4 - y1 + 1), - (x1, y2, x4 - x1 + 1, y3 - y2 + 1)) - - The first rectangle describes the maximum field-of-view of all image - formats in the 4:3 resolutions, while the second one describes the - maximum field of view for all image formats in the 16:9 resolutions. - - Multiple rectangles shall only be reported when the sensor can't capture - the pixels in the corner regions. If all the pixels in the (x1,y1) - - (x4,y4) area can be captured, the PixelArrayActiveAreas property shall - contains the single rectangle (x1,y1) - (x4,y4). - - \todo Rename this property to ActiveAreas once we will have property - categories (i.e. Properties::PixelArray::ActiveAreas) - - - ScalerCropMaximum: - type: Rectangle - description: | - The maximum valid rectangle for the controls::ScalerCrop control. This - reflects the minimum mandatory cropping applied in the camera sensor and - the rest of the pipeline. Just as the ScalerCrop control, it defines a - rectangle taken from the sensor's active pixel array. - - This property is valid only after the camera has been successfully - configured and its value may change whenever a new configuration is - applied. - - \todo Turn this property into a "maximum control value" for the - ScalerCrop control once "dynamic" controls have been implemented. - - - SensorSensitivity: - type: float - description: | - The relative sensitivity of the chosen sensor mode. - - Some sensors have readout modes with different sensitivities. For example, - a binned camera mode might, with the same exposure and gains, produce - twice the signal level of the full resolution readout. This would be - signalled by the binned mode, when it is chosen, indicating a value here - that is twice that of the full resolution mode. This value will be valid - after the configure method has returned successfully. - - - SystemDevices: - type: int64_t - size: [n] - description: | - A list of integer values of type dev_t denoting the major and minor - device numbers of the underlying devices used in the operation of this - camera. - - Different cameras may report identical devices. - - # ---------------------------------------------------------------------------- - # Draft properties section - - - ColorFilterArrangement: - type: int32_t - draft: true - description: | - The arrangement of color filters on sensor; represents the colors in the - top-left 2x2 section of the sensor, in reading order. Currently - identical to ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT. - enum: - - name: RGGB - value: 0 - description: RGGB Bayer pattern - - name: GRBG - value: 1 - description: GRBG Bayer pattern - - name: GBRG - value: 2 - description: GBRG Bayer pattern - - name: BGGR - value: 3 - description: BGGR Bayer pattern - - name: RGB - value: 4 - description: | - Sensor is not Bayer; output has 3 16-bit values for each pixel, - instead of just 1 16-bit value per pixel. - - name: MONO - value: 5 - description: | - Sensor is not Bayer; output consists of a single colour channel. - -... diff --git a/src/libcamera/property_ids_core.yaml b/src/libcamera/property_ids_core.yaml new file mode 100644 index 00000000..45f3609b --- /dev/null +++ b/src/libcamera/property_ids_core.yaml @@ -0,0 +1,737 @@ +# SPDX-License-Identifier: LGPL-2.1-or-later +# +# Copyright (C) 2019, Google Inc. +# +%YAML 1.1 +--- +vendor: libcamera +controls: + - Location: + type: int32_t + description: | + Camera mounting location + enum: + - name: CameraLocationFront + value: 0 + description: | + The camera is mounted on the front side of the device, facing the + user + - name: CameraLocationBack + value: 1 + description: | + The camera is mounted on the back side of the device, facing away + from the user + - name: CameraLocationExternal + value: 2 + description: | + The camera is attached to the device in a way that allows it to + be moved freely + + - Rotation: + type: int32_t + description: | + The camera physical mounting rotation. It is expressed as the angular + difference in degrees between two reference systems, one relative to the + camera module, and one defined on the external world scene to be + captured when projected on the image sensor pixel array. + + A camera sensor has a 2-dimensional reference system 'Rc' defined by + its pixel array read-out order. The origin is set to the first pixel + being read out, the X-axis points along the column read-out direction + towards the last columns, and the Y-axis along the row read-out + direction towards the last row. + + A typical example for a sensor with a 2592x1944 pixel array matrix + observed from the front is + + 2591 X-axis 0 + <------------------------+ 0 + .......... ... ..........! + .......... ... ..........! Y-axis + ... ! + .......... ... ..........! + .......... ... ..........! 1943 + V + + + The external world scene reference system 'Rs' is a 2-dimensional + reference system on the focal plane of the camera module. The origin is + placed on the top-left corner of the visible scene, the X-axis points + towards the right, and the Y-axis points towards the bottom of the + scene. The top, bottom, left and right directions are intentionally not + defined and depend on the environment in which the camera is used. + + A typical example of a (very common) picture of a shark swimming from + left to right, as seen from the camera, is + + 0 X-axis + 0 +-------------------------------------> + ! + ! + ! + ! |\____)\___ + ! ) _____ __`< + ! |/ )/ + ! + ! + ! + V + Y-axis + + With the reference system 'Rs' placed on the camera focal plane. + + ¸.·˙! + ¸.·˙ ! + _ ¸.·˙ ! + +-/ \-+¸.·˙ ! + | (o) | ! Camera focal plane + +-----+˙·.¸ ! + ˙·.¸ ! + ˙·.¸ ! + ˙·.¸! + + When projected on the sensor's pixel array, the image and the associated + reference system 'Rs' are typically (but not always) inverted, due to + the camera module's lens optical inversion effect. + + Assuming the above represented scene of the swimming shark, the lens + inversion projects the scene and its reference system onto the sensor + pixel array, seen from the front of the camera sensor, as follow + + Y-axis + ^ + ! + ! + ! + ! |\_____)\__ + ! ) ____ ___.< + ! |/ )/ + ! + ! + ! + 0 +-------------------------------------> + 0 X-axis + + Note the shark being upside-down. + + The resulting projected reference system is named 'Rp'. + + The camera rotation property is then defined as the angular difference + in the counter-clockwise direction between the camera reference system + 'Rc' and the projected scene reference system 'Rp'. It is expressed in + degrees as a number in the range [0, 360[. + + Examples + + 0 degrees camera rotation + + + Y-Rp + ^ + Y-Rc ! + ^ ! + ! ! + ! ! + ! ! + ! ! + ! ! + ! ! + ! ! + ! 0 +-------------------------------------> + ! 0 X-Rp + 0 +-------------------------------------> + 0 X-Rc + + + X-Rc 0 + <------------------------------------+ 0 + X-Rp 0 ! + <------------------------------------+ 0 ! + ! ! + ! ! + ! ! + ! ! + ! ! + ! ! + ! ! + ! V + ! Y-Rc + V + Y-Rp + + 90 degrees camera rotation + + 0 Y-Rc + 0 +--------------------> + ! Y-Rp + ! ^ + ! ! + ! ! + ! ! + ! ! + ! ! + ! ! + ! ! + ! ! + ! ! + ! 0 +-------------------------------------> + ! 0 X-Rp + ! + ! + ! + ! + V + X-Rc + + 180 degrees camera rotation + + 0 + <------------------------------------+ 0 + X-Rc ! + Y-Rp ! + ^ ! + ! ! + ! ! + ! ! + ! ! + ! ! + ! ! + ! V + ! Y-Rc + 0 +-------------------------------------> + 0 X-Rp + + 270 degrees camera rotation + + 0 Y-Rc + 0 +--------------------> + ! 0 + ! <-----------------------------------+ 0 + ! X-Rp ! + ! ! + ! ! + ! ! + ! ! + ! ! + ! ! + ! ! + ! ! + ! V + ! Y-Rp + ! + ! + ! + ! + V + X-Rc + + + Example one - Webcam + + A camera module installed on the user facing part of a laptop screen + casing used for video calls. The captured images are meant to be + displayed in landscape mode (width > height) on the laptop screen. + + The camera is typically mounted upside-down to compensate the lens + optical inversion effect. + + Y-Rp + Y-Rc ^ + ^ ! + ! ! + ! ! |\_____)\__ + ! ! ) ____ ___.< + ! ! |/ )/ + ! ! + ! ! + ! ! + ! 0 +-------------------------------------> + ! 0 X-Rp + 0 +-------------------------------------> + 0 X-Rc + + The two reference systems are aligned, the resulting camera rotation is + 0 degrees, no rotation correction needs to be applied to the resulting + image once captured to memory buffers to correctly display it to users. + + +--------------------------------------+ + ! ! + ! ! + ! ! + ! |\____)\___ ! + ! ) _____ __`< ! + ! |/ )/ ! + ! ! + ! ! + ! ! + +--------------------------------------+ + + If the camera sensor is not mounted upside-down to compensate for the + lens optical inversion, the two reference systems will not be aligned, + with 'Rp' being rotated 180 degrees relatively to 'Rc'. + + + X-Rc 0 + <------------------------------------+ 0 + ! + Y-Rp ! + ^ ! + ! ! + ! |\_____)\__ ! + ! ) ____ ___.< ! + ! |/ )/ ! + ! ! + ! ! + ! V + ! Y-Rc + 0 +-------------------------------------> + 0 X-Rp + + The image once captured to memory will then be rotated by 180 degrees + + +--------------------------------------+ + ! ! + ! ! + ! ! + ! __/(_____/| ! + ! >.___ ____ ( ! + ! \( \| ! + ! ! + ! ! + ! ! + +--------------------------------------+ + + A software rotation correction of 180 degrees should be applied to + correctly display the image. + + +--------------------------------------+ + ! ! + ! ! + ! ! + ! |\____)\___ ! + ! ) _____ __`< ! + ! |/ )/ ! + ! ! + ! ! + ! ! + +--------------------------------------+ + + Example two - Phone camera + + A camera installed on the back side of a mobile device facing away from + the user. The captured images are meant to be displayed in portrait mode + (height > width) to match the device screen orientation and the device + usage orientation used when taking the picture. + + The camera sensor is typically mounted with its pixel array longer side + aligned to the device longer side, upside-down mounted to compensate for + the lens optical inversion effect. + + 0 Y-Rc + 0 +--------------------> + ! Y-Rp + ! ^ + ! ! + ! ! + ! ! + ! ! |\_____)\__ + ! ! ) ____ ___.< + ! ! |/ )/ + ! ! + ! ! + ! ! + ! 0 +-------------------------------------> + ! 0 X-Rp + ! + ! + ! + ! + V + X-Rc + + The two reference systems are not aligned and the 'Rp' reference + system is rotated by 90 degrees in the counter-clockwise direction + relatively to the 'Rc' reference system. + + The image once captured to memory will be rotated. + + +-------------------------------------+ + | _ _ | + | \ / | + | | | | + | | | | + | | > | + | < | | + | | | | + | . | + | V | + +-------------------------------------+ + + A correction of 90 degrees in counter-clockwise direction has to be + applied to correctly display the image in portrait mode on the device + screen. + + +--------------------+ + | | + | | + | | + | | + | | + | | + | |\____)\___ | + | ) _____ __`< | + | |/ )/ | + | | + | | + | | + | | + | | + +--------------------+ + + - Model: + type: string + description: | + The model name shall to the extent possible describe the sensor. For + most devices this is the model name of the sensor. While for some + devices the sensor model is unavailable as the sensor or the entire + camera is part of a larger unit and exposed as a black-box to the + system. In such cases the model name of the smallest device that + contains the camera sensor shall be used. + + The model name is not meant to be a camera name displayed to the + end-user, but may be combined with other camera information to create a + camera name. + + The model name is not guaranteed to be unique in the system nor is + it guaranteed to be stable or have any other properties required to make + it a good candidate to be used as a permanent identifier of a camera. + + The model name shall describe the camera in a human readable format and + shall be encoded in ASCII. + + Example model names are 'ov5670', 'imx219' or 'Logitech Webcam C930e'. + + - UnitCellSize: + type: Size + description: | + The pixel unit cell physical size, in nanometers. + + The UnitCellSize properties defines the horizontal and vertical sizes of + a single pixel unit, including its active and non-active parts. In + other words, it expresses the horizontal and vertical distance between + the top-left corners of adjacent pixels. + + The property can be used to calculate the physical size of the sensor's + pixel array area and for calibration purposes. + + - PixelArraySize: + type: Size + description: | + The camera sensor pixel array readable area vertical and horizontal + sizes, in pixels. + + The PixelArraySize property defines the size in pixel units of the + readable part of full pixel array matrix, including optical black + pixels used for calibration, pixels which are not considered valid for + capture and active pixels containing valid image data. + + The property describes the maximum size of the raw data captured by the + camera, which might not correspond to the physical size of the sensor + pixel array matrix, as some portions of the physical pixel array matrix + are not accessible and cannot be transmitted out. + + For example, let's consider a pixel array matrix assembled as follows + + +--------------------------------------------------+ + |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx| + |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx| + |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| + |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| + |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| + |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| + |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| + |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| + ... ... ... ... ... + + ... ... ... ... ... + |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| + |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| + |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx| + |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx| + +--------------------------------------------------+ + + starting with two lines of non-readable pixels (x), followed by N lines + of readable data (D) surrounded by two columns of non-readable pixels on + each side, and ending with two more lines of non-readable pixels. Only + the readable portion is transmitted to the receiving side, defining the + sizes of the largest possible buffer of raw data that can be presented + to applications. + + PixelArraySize.width + /----------------------------------------------/ + +----------------------------------------------+ / + |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | + |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | + |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | + |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | + |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | + |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | PixelArraySize.height + ... ... ... ... ... + ... ... ... ... ... + |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | + |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | + +----------------------------------------------+ / + + This defines a rectangle whose top-left corner is placed in position (0, + 0) and whose vertical and horizontal sizes are defined by this property. + All other rectangles that describe portions of the pixel array, such as + the optical black pixels rectangles and active pixel areas, are defined + relatively to this rectangle. + + All the coordinates are expressed relative to the default sensor readout + direction, without any transformation (such as horizontal and vertical + flipping) applied. When mapping them to the raw pixel buffer, + applications shall take any configured transformation into account. + + \todo Rename this property to Size once we will have property + categories (i.e. Properties::PixelArray::Size) + + - PixelArrayOpticalBlackRectangles: + type: Rectangle + size: [n] + description: | + The pixel array region(s) which contain optical black pixels + considered valid for calibration purposes. + + This property describes the position and size of optical black pixel + regions in the raw data buffer as stored in memory, which might differ + from their actual physical location in the pixel array matrix. + + It is important to note, in fact, that camera sensors might + automatically reorder or skip portions of their pixels array matrix when + transmitting data to the receiver. For instance, a sensor may merge the + top and bottom optical black rectangles into a single rectangle, + transmitted at the beginning of the frame. + + The pixel array contains several areas with different purposes, + interleaved by lines and columns which are said not to be valid for + capturing purposes. Invalid lines and columns are defined as invalid as + they could be positioned too close to the chip margins or to the optical + black shielding placed on top of optical black pixels. + + PixelArraySize.width + /----------------------------------------------/ + x1 x2 + +--o---------------------------------------o---+ / + |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| | + |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| | + y1 oIIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| | + |IIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| | + |IIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| | + y2 oIIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| | + |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| | + |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| | + y3 |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| | + |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| | PixelArraySize.height + |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| | + ... ... ... ... ... + ... ... ... ... ... + y4 |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| | + |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| | + |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| | + +----------------------------------------------+ / + + The readable pixel array matrix is composed by + 2 invalid lines (I) + 4 lines of valid optical black pixels (O) + 2 invalid lines (I) + n lines of valid pixel data (P) + 2 invalid lines (I) + + And the position of the optical black pixel rectangles is defined by + + PixelArrayOpticalBlackRectangles = { + { x1, y1, x2 - x1 + 1, y2 - y1 + 1 }, + { x1, y3, 2, y4 - y3 + 1 }, + { x2, y3, 2, y4 - y3 + 1 }, + }; + + If the camera, when capturing the full pixel array matrix, automatically + skips the invalid lines and columns, producing the following data + buffer, when captured to memory + + PixelArraySize.width + /----------------------------------------------/ + x1 + +--------------------------------------------o-+ / + |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| | + |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| | + |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| | + |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| | + y1 oOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| | + |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| | + |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| | PixelArraySize.height + ... ... ... ... ... | + ... ... ... ... ... | + |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| | + |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| | + +----------------------------------------------+ / + + then the invalid lines and columns should not be reported as part of the + PixelArraySize property in first place. + + In this case, the position of the black pixel rectangles will be + + PixelArrayOpticalBlackRectangles = { + { 0, 0, y1 + 1, PixelArraySize[0] }, + { 0, y1, 2, PixelArraySize[1] - y1 + 1 }, + { x1, y1, 2, PixelArraySize[1] - y1 + 1 }, + }; + + \todo Rename this property to Size once we will have property + categories (i.e. Properties::PixelArray::OpticalBlackRectangles) + + - PixelArrayActiveAreas: + type: Rectangle + size: [n] + description: | + The PixelArrayActiveAreas property defines the (possibly multiple and + overlapping) portions of the camera sensor readable pixel matrix + which are considered valid for image acquisition purposes. + + This property describes an arbitrary number of overlapping rectangles, + with each rectangle representing the maximum image size that the camera + sensor can produce for a particular aspect ratio. They are defined + relatively to the PixelArraySize rectangle. + + When multiple rectangles are reported, they shall be ordered from the + tallest to the shortest. + + Example 1 + A camera sensor which only produces images in the 4:3 image resolution + will report a single PixelArrayActiveAreas rectangle, from which all + other image formats are obtained by either cropping the field-of-view + and/or applying pixel sub-sampling techniques such as pixel skipping or + binning. + + PixelArraySize.width + /----------------/ + x1 x2 + (0,0)-> +-o------------o-+ / + y1 o +------------+ | | + | |////////////| | | + | |////////////| | | PixelArraySize.height + | |////////////| | | + y2 o +------------+ | | + +----------------+ / + + The property reports a single rectangle + + PixelArrayActiveAreas = (x1, y1, x2 - x1 + 1, y2 - y1 + 1) + + Example 2 + A camera sensor which can produce images in different native + resolutions will report several overlapping rectangles, one for each + natively supported resolution. + + PixelArraySize.width + /------------------/ + x1 x2 x3 x4 + (0,0)-> +o---o------o---o+ / + y1 o +------+ | | + | |//////| | | + y2 o+---+------+---+| | + ||///|//////|///|| | PixelArraySize.height + y3 o+---+------+---+| | + | |//////| | | + y4 o +------+ | | + +----+------+----+ / + + The property reports two rectangles + + PixelArrayActiveAreas = ((x2, y1, x3 - x2 + 1, y4 - y1 + 1), + (x1, y2, x4 - x1 + 1, y3 - y2 + 1)) + + The first rectangle describes the maximum field-of-view of all image + formats in the 4:3 resolutions, while the second one describes the + maximum field of view for all image formats in the 16:9 resolutions. + + Multiple rectangles shall only be reported when the sensor can't capture + the pixels in the corner regions. If all the pixels in the (x1,y1) - + (x4,y4) area can be captured, the PixelArrayActiveAreas property shall + contains the single rectangle (x1,y1) - (x4,y4). + + \todo Rename this property to ActiveAreas once we will have property + categories (i.e. Properties::PixelArray::ActiveAreas) + + - ScalerCropMaximum: + type: Rectangle + description: | + The maximum valid rectangle for the controls::ScalerCrop control. This + reflects the minimum mandatory cropping applied in the camera sensor and + the rest of the pipeline. Just as the ScalerCrop control, it defines a + rectangle taken from the sensor's active pixel array. + + This property is valid only after the camera has been successfully + configured and its value may change whenever a new configuration is + applied. + + \todo Turn this property into a "maximum control value" for the + ScalerCrop control once "dynamic" controls have been implemented. + + - SensorSensitivity: + type: float + description: | + The relative sensitivity of the chosen sensor mode. + + Some sensors have readout modes with different sensitivities. For example, + a binned camera mode might, with the same exposure and gains, produce + twice the signal level of the full resolution readout. This would be + signalled by the binned mode, when it is chosen, indicating a value here + that is twice that of the full resolution mode. This value will be valid + after the configure method has returned successfully. + + - SystemDevices: + type: int64_t + size: [n] + description: | + A list of integer values of type dev_t denoting the major and minor + device numbers of the underlying devices used in the operation of this + camera. + + Different cameras may report identical devices. + + # ---------------------------------------------------------------------------- + # Draft properties section + + - ColorFilterArrangement: + type: int32_t + draft: true + description: | + The arrangement of color filters on sensor; represents the colors in the + top-left 2x2 section of the sensor, in reading order. Currently + identical to ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT. + enum: + - name: RGGB + value: 0 + description: RGGB Bayer pattern + - name: GRBG + value: 1 + description: GRBG Bayer pattern + - name: GBRG + value: 2 + description: GBRG Bayer pattern + - name: BGGR + value: 3 + description: BGGR Bayer pattern + - name: RGB + value: 4 + description: | + Sensor is not Bayer; output has 3 16-bit values for each pixel, + instead of just 1 16-bit value per pixel. + - name: MONO + value: 5 + description: | + Sensor is not Bayer; output consists of a single colour channel. + +... diff --git a/src/py/libcamera/gen-py-controls.py b/src/py/libcamera/gen-py-controls.py index cfcfd4d1..8ae8d512 100755 --- a/src/py/libcamera/gen-py-controls.py +++ b/src/py/libcamera/gen-py-controls.py @@ -95,7 +95,7 @@ def main(argv): help='Output file name. Defaults to standard output if not specified.') parser.add_argument('--template', '-t', type=str, required=True, help='Template file name.') - parser.add_argument('input', type=str, + parser.add_argument('input', type=str, nargs='+', help='Input file name.') args = parser.parse_args(argv[1:]) @@ -103,11 +103,11 @@ def main(argv): print(f'Invalid mode option "{args.mode}"', file=sys.stderr) return -1 - data = open(args.input, 'rb').read() - controls = {} - vendor = yaml.safe_load(data)['vendor'] - controls[vendor] = yaml.safe_load(data)['controls'] + for input in args.input: + data = open(input, 'rb').read() + vendor = yaml.safe_load(data)['vendor'] + controls[vendor] = yaml.safe_load(data)['controls'] data = generate_py(controls, args.mode) diff --git a/src/py/libcamera/meson.build b/src/py/libcamera/meson.build index 1c3ea184..31af63ec 100644 --- a/src/py/libcamera/meson.build +++ b/src/py/libcamera/meson.build @@ -28,11 +28,15 @@ pycamera_sources = files([ # Generate controls -gen_py_controls_input_files = files('../../libcamera/control_ids.yaml') +gen_py_controls_input_files = [] gen_py_controls_template = files('py_controls_generated.cpp.in') gen_py_controls = files('gen-py-controls.py') +foreach file : controls_files + gen_py_controls_input_files += files('../../libcamera/' + file) +endforeach + pycamera_sources += custom_target('py_gen_controls', input : gen_py_controls_input_files, output : ['py_controls_generated.cpp'], @@ -41,9 +45,13 @@ pycamera_sources += custom_target('py_gen_controls', # Generate properties -gen_py_property_enums_input_files = files('../../libcamera/property_ids.yaml') +gen_py_property_enums_input_files = [] gen_py_properties_template = files('py_properties_generated.cpp.in') +foreach file : properties_files + gen_py_property_enums_input_files += files('../../libcamera/' + file) +endforeach + pycamera_sources += custom_target('py_gen_properties', input : gen_py_property_enums_input_files, output : ['py_properties_generated.cpp'], diff --git a/utils/gen-controls.py b/utils/gen-controls.py index 56e0c7ba..2a633cc0 100755 --- a/utils/gen-controls.py +++ b/utils/gen-controls.py @@ -12,6 +12,7 @@ import operator import string import sys import yaml +import os class ControlEnum(object): @@ -342,15 +343,18 @@ def main(argv): help='Output file name. Defaults to standard output if not specified.') parser.add_argument('--template', '-t', dest='template', type=str, required=True, help='Template file name.') - parser.add_argument('input', type=str, + parser.add_argument('input', type=str, nargs='+', help='Input file name.') args = parser.parse_args(argv[1:]) - data = open(args.input, 'rb').read() - vendor = yaml.safe_load(data)['vendor'] - controls = yaml.safe_load(data)['controls'] - controls = [Control(*ctrl.popitem(), vendor) for ctrl in controls] + controls = [] + for input in args.input: + with open(input, 'rb') as f: + data = f.read() + vendor = yaml.safe_load(data)['vendor'] + ctrls = yaml.safe_load(data)['controls'] + controls = controls + [Control(*ctrl.popitem(), vendor) for ctrl in ctrls] if args.template.endswith('.cpp.in'): data = generate_cpp(controls) -- cgit v1.2.1