summaryrefslogtreecommitdiff
path: root/Documentation
diff options
context:
space:
mode:
Diffstat (limited to 'Documentation')
-rw-r--r--Documentation/Doxyfile-common.in (renamed from Documentation/Doxyfile.in)25
-rw-r--r--Documentation/Doxyfile-internal.in33
-rw-r--r--Documentation/Doxyfile-public.in20
-rw-r--r--Documentation/api-html/index.rst4
-rw-r--r--Documentation/camera-sensor-model.rst2
-rw-r--r--Documentation/code-of-conduct.rst2
-rw-r--r--Documentation/coding-style.rst6
-rw-r--r--Documentation/conf.py10
-rw-r--r--Documentation/design/ae.rst331
-rw-r--r--Documentation/docs.rst400
-rw-r--r--Documentation/documentation-contents.rst35
-rw-r--r--Documentation/environment_variables.rst20
-rw-r--r--Documentation/feature_requirements.rst150
-rwxr-xr-xDocumentation/gen-doxyfile.py46
-rw-r--r--Documentation/getting-started.rst1
-rw-r--r--Documentation/guides/application-developer.rst18
-rw-r--r--Documentation/guides/introduction.rst319
-rw-r--r--Documentation/guides/ipa.rst2
-rw-r--r--Documentation/guides/pipeline-handler.rst55
-rw-r--r--Documentation/guides/tracing.rst2
-rw-r--r--Documentation/index.rst27
-rw-r--r--Documentation/internal-api-html/index.rst8
-rw-r--r--Documentation/introduction.rst224
-rw-r--r--Documentation/lens_driver_requirements.rst2
-rw-r--r--Documentation/libcamera_architecture.rst168
-rw-r--r--Documentation/mainpage.dox33
-rw-r--r--Documentation/mali-c55.dot25
-rw-r--r--Documentation/meson.build98
-rw-r--r--Documentation/python-bindings.rst2
-rw-r--r--Documentation/sensor_driver_requirements.rst2
-rw-r--r--Documentation/software-isp-benchmarking.rst79
-rw-r--r--Documentation/theme/static/css/theme.css6
-rw-r--r--Documentation/thread-safety.dox44
33 files changed, 1388 insertions, 811 deletions
diff --git a/Documentation/Doxyfile.in b/Documentation/Doxyfile-common.in
index 2be8d47b..045c19dd 100644
--- a/Documentation/Doxyfile.in
+++ b/Documentation/Doxyfile-common.in
@@ -20,36 +20,19 @@ TOC_INCLUDE_HEADINGS = 0
CASE_SENSE_NAMES = YES
QUIET = YES
-
-INPUT = "@TOP_SRCDIR@/include/libcamera" \
- "@TOP_SRCDIR@/src/ipa/ipu3" \
- "@TOP_SRCDIR@/src/ipa/libipa" \
- "@TOP_SRCDIR@/src/libcamera" \
- "@TOP_BUILDDIR@/include/libcamera" \
- "@TOP_BUILDDIR@/src/libcamera"
+WARN_AS_ERROR = @WARN_AS_ERROR@
FILE_PATTERNS = *.c \
*.cpp \
+ *.dox \
*.h
RECURSIVE = YES
-EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/span.h \
- @TOP_SRCDIR@/include/libcamera/internal/device_enumerator_sysfs.h \
- @TOP_SRCDIR@/include/libcamera/internal/device_enumerator_udev.h \
- @TOP_SRCDIR@/include/libcamera/internal/ipc_pipe_unixsocket.h \
- @TOP_SRCDIR@/src/libcamera/device_enumerator_sysfs.cpp \
- @TOP_SRCDIR@/src/libcamera/device_enumerator_udev.cpp \
- @TOP_SRCDIR@/src/libcamera/ipc_pipe_unixsocket.cpp \
- @TOP_SRCDIR@/src/libcamera/pipeline/ \
- @TOP_SRCDIR@/src/libcamera/tracepoints.cpp \
- @TOP_BUILDDIR@/include/libcamera/internal/tracepoints.h \
- @TOP_BUILDDIR@/include/libcamera/ipa/soft_ipa_interface.h \
- @TOP_BUILDDIR@/src/libcamera/proxy/
-
EXCLUDE_PATTERNS = @TOP_BUILDDIR@/include/libcamera/ipa/*_serializer.h \
@TOP_BUILDDIR@/include/libcamera/ipa/*_proxy.h \
@TOP_BUILDDIR@/include/libcamera/ipa/ipu3_*.h \
+ @TOP_BUILDDIR@/include/libcamera/ipa/mali-c55_*.h \
@TOP_BUILDDIR@/include/libcamera/ipa/raspberrypi_*.h \
@TOP_BUILDDIR@/include/libcamera/ipa/rkisp1_*.h \
@TOP_BUILDDIR@/include/libcamera/ipa/vimc_*.h
@@ -69,8 +52,6 @@ EXCLUDE_SYMBOLS = libcamera::BoundMethodArgs \
EXCLUDE_SYMLINKS = YES
-HTML_OUTPUT = api-html
-
GENERATE_LATEX = NO
MACRO_EXPANSION = YES
diff --git a/Documentation/Doxyfile-internal.in b/Documentation/Doxyfile-internal.in
new file mode 100644
index 00000000..5343bc2b
--- /dev/null
+++ b/Documentation/Doxyfile-internal.in
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: CC-BY-SA-4.0
+
+@INCLUDE_PATH = @TOP_BUILDDIR@/Documentation
+@INCLUDE = Doxyfile-common
+
+HIDE_UNDOC_CLASSES = NO
+HIDE_UNDOC_MEMBERS = NO
+HTML_OUTPUT = internal-api-html
+INTERNAL_DOCS = YES
+ENABLED_SECTIONS = internal
+
+INPUT = "@TOP_SRCDIR@/Documentation" \
+ "@TOP_SRCDIR@/include/libcamera" \
+ "@TOP_SRCDIR@/src/ipa/ipu3" \
+ "@TOP_SRCDIR@/src/ipa/libipa" \
+ "@TOP_SRCDIR@/src/libcamera" \
+ "@TOP_BUILDDIR@/include/libcamera" \
+ "@TOP_BUILDDIR@/src/libcamera"
+
+EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/span.h \
+ @TOP_SRCDIR@/include/libcamera/internal/device_enumerator_sysfs.h \
+ @TOP_SRCDIR@/include/libcamera/internal/device_enumerator_udev.h \
+ @TOP_SRCDIR@/include/libcamera/internal/ipc_pipe_unixsocket.h \
+ @TOP_SRCDIR@/src/libcamera/device_enumerator_sysfs.cpp \
+ @TOP_SRCDIR@/src/libcamera/device_enumerator_udev.cpp \
+ @TOP_SRCDIR@/src/libcamera/ipc_pipe_unixsocket.cpp \
+ @TOP_SRCDIR@/src/libcamera/pipeline/ \
+ @TOP_SRCDIR@/src/libcamera/sensor/camera_sensor_legacy.cpp \
+ @TOP_SRCDIR@/src/libcamera/sensor/camera_sensor_raw.cpp \
+ @TOP_SRCDIR@/src/libcamera/tracepoints.cpp \
+ @TOP_BUILDDIR@/include/libcamera/internal/tracepoints.h \
+ @TOP_BUILDDIR@/include/libcamera/ipa/soft_ipa_interface.h \
+ @TOP_BUILDDIR@/src/libcamera/proxy/
diff --git a/Documentation/Doxyfile-public.in b/Documentation/Doxyfile-public.in
new file mode 100644
index 00000000..36bb5758
--- /dev/null
+++ b/Documentation/Doxyfile-public.in
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: CC-BY-SA-4.0
+
+@INCLUDE_PATH = @TOP_BUILDDIR@/Documentation
+@INCLUDE = Doxyfile-common
+
+HIDE_UNDOC_CLASSES = YES
+HIDE_UNDOC_MEMBERS = YES
+HTML_OUTPUT = api-html
+INTERNAL_DOCS = NO
+
+INPUT = "@TOP_SRCDIR@/Documentation" \
+ ${inputs}
+
+EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/class.h \
+ @TOP_SRCDIR@/include/libcamera/base/object.h \
+ @TOP_SRCDIR@/include/libcamera/base/span.h \
+ @TOP_SRCDIR@/src/libcamera/base/class.cpp \
+ @TOP_SRCDIR@/src/libcamera/base/object.cpp
+
+PREDEFINED += __DOXYGEN_PUBLIC__
diff --git a/Documentation/api-html/index.rst b/Documentation/api-html/index.rst
index 9e630fc0..2f09833d 100644
--- a/Documentation/api-html/index.rst
+++ b/Documentation/api-html/index.rst
@@ -2,7 +2,7 @@
.. _api:
-API
-===
+API Reference
+=============
:: Placeholder for Doxygen documentation
diff --git a/Documentation/camera-sensor-model.rst b/Documentation/camera-sensor-model.rst
index b66c880a..87a25bf4 100644
--- a/Documentation/camera-sensor-model.rst
+++ b/Documentation/camera-sensor-model.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: documentation-contents.rst
+
.. _camera-sensor-model:
.. todo: Move to Doxygen-generated documentation
diff --git a/Documentation/code-of-conduct.rst b/Documentation/code-of-conduct.rst
index 38b7d7ad..0edd1e99 100644
--- a/Documentation/code-of-conduct.rst
+++ b/Documentation/code-of-conduct.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-4.0
+.. include:: documentation-contents.rst
+
.. _code-of-conduct:
Contributor Covenant Code of Conduct
diff --git a/Documentation/coding-style.rst b/Documentation/coding-style.rst
index 053fdd99..6ac3a4a0 100644
--- a/Documentation/coding-style.rst
+++ b/Documentation/coding-style.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: documentation-contents.rst
+
.. _coding-style-guidelines:
Coding Style Guidelines
@@ -59,7 +61,7 @@ document:
underscores in between
* All formatting rules specified in the selected sections of the Linux kernel
Code Style for indentation, braces, spacing, etc
-* Header guards are formatted as '__LIBCAMERA_FILE_NAME_H__'
+* Headers are guarded by the use of '#pragma once'
Order of Includes
~~~~~~~~~~~~~~~~~
@@ -215,7 +217,7 @@ shall be avoided when possible, but are allowed when required (for instance to
implement factories with auto-registration). They shall not depend on any other
global variable, should run a minimal amount of code in the constructor and
destructor, and code that contains dependencies should be moved to a later
-point in time.
+point in time.
Error Handling
~~~~~~~~~~~~~~
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 7eeea7f3..089f114c 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -37,8 +37,11 @@ author = u'Kieran Bingham, Jacopo Mondi, Laurent Pinchart, Niklas Söderlund'
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
+ 'sphinx.ext.graphviz'
]
+graphviz_output_format = 'svg'
+
# Add any paths that contain templates here, relative to this directory.
templates_path = []
@@ -61,7 +64,12 @@ language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
-exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+exclude_patterns = [
+ '_build',
+ 'Thumbs.db',
+ '.DS_Store',
+ 'documentation-contents.rst',
+]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
diff --git a/Documentation/design/ae.rst b/Documentation/design/ae.rst
new file mode 100644
index 00000000..df9b1fa7
--- /dev/null
+++ b/Documentation/design/ae.rst
@@ -0,0 +1,331 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+Design of Exposure and Gain controls
+====================================
+
+This document explains the design and rationale of the controls related to
+exposure and gain. This includes the all-encompassing auto-exposure (AE), the
+manual exposure control, and the manual gain control.
+
+Description of the problem
+--------------------------
+
+Sub controls
+^^^^^^^^^^^^
+
+There are more than one control that make up total exposure: exposure time,
+gain, and aperture (though for now we will not consider aperture). We already
+had individual controls for setting the values of manual exposure and manual
+gain, but for switching between auto mode and manual mode we only had a
+high-level boolean AeEnable control that would set *both* exposure and gain to
+auto mode or manual mode; we had no way to set one to auto and the other to
+manual.
+
+So, we need to introduce two new controls to act as "levers" to indicate
+individually for exposure and gain if the value would come from AEGC or if it
+would come from the manual control value.
+
+Aperture priority
+^^^^^^^^^^^^^^^^^
+
+We eventually may need to support aperture, and so whatever our solution is for
+having only some controls on auto and the others on manual needs to be
+extensible.
+
+Flickering when going from auto to manual
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When a manual exposure or gain value is requested by the application, it costs
+a few frames worth of time for them to take effect. This means that during a
+transition from auto to manual, there would be flickering in the control values
+and the transition won't be smooth.
+
+Take for instance the following flow, where we start on auto exposure (which
+for the purposes of the example increments by 1 each frame) and we want to
+switch seamlessly to manual exposure, which involves copying the exposure value
+computed by the auto exposure algorithm:
+
+::
+
+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
+ | N | | N+1 | | N+2 | | N+3 | | N+4 | | N+5 | | N+6 |
+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
+
+ Mode requested: Auto Auto Auto Manual Manual Manual Manual
+ Exp requested: N/A N/A N/A 2 2 2 2
+ Set in Frame: N+2 N+3 N+4 N+5 N+6 N+7 N+8
+
+ Mode used: Auto Auto Auto Auto Auto Manual Manual
+ Exp used: 0 1 2 3 4 2 2
+
+As we can see, after frame N+2 completes, we copy the exposure value that was
+used for frame N+2 (which was computed by AE algorithm), and queue that value
+into request N+3 with manual mode on. However, as it takes two frames for the
+exposure to be set, the exposure still changes since it is set by AE, and we
+get a flicker in the exposure during the switch from auto to manual.
+
+A solution is to *not submit* any exposure value when manual mode is enabled,
+and wait until the manual mode as been "applied" before copying the exposure
+value:
+
+::
+
+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
+ | N | | N+1 | | N+2 | | N+3 | | N+4 | | N+5 | | N+6 |
+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
+
+ Mode requested: Auto Auto Auto Manual Manual Manual Manual
+ Exp requested: N/A N/A N/A None None None 5
+ Set in Frame: N+2 N+3 N+4 N+5 N+6 N+7 N+8
+
+ Mode used: Auto Auto Auto Auto Auto Manual Manual
+ Exp used: 0 1 2 3 4 5 5
+
+In practice, this works. However, libcamera has a policy where once a control
+is submitted, its value is saved and does not need to be resubmitted. If the
+manual exposure value was set while auto mode was on, in theory the value would
+be saved, so when manual mode is enabled, the exposure value that was
+previously set would immediately be used. Clearly this solution isn't correct,
+but it can serve as the basis for a proper solution, with some more rigorous
+rules.
+
+Existing solutions
+------------------
+
+Raspberry Pi
+^^^^^^^^^^^^
+
+The Raspberry Pi IPA gets around the lack of individual AeEnable controls for
+exposure and gain by using magic values. When AeEnable is false, if one of the
+manual control values was set to 0 then the value computed by AEGC would be
+used for just that control. This solution isn't desirable, as it prevents
+that magic value from being used as a valid value.
+
+To get around the flickering issue, when AeEnable is false, the Raspberry Pi
+AEGC simply stops updating the values to be set, without restoring the
+previously set manual exposure time and gain. This works, but is not a proper
+solution.
+
+Android
+^^^^^^^
+
+The Android HAL specification requires that exposure and gain (sensitivity)
+must both be manual or both be auto. It cannot be that one is manual while the
+other is auto, so they simply don't support sub controls.
+
+For the flickering issue, the Android HAL has an AeLock control. To transition
+from auto to manual, the application would keep AE on auto, and turn on the
+lock. Once the lock has propagated through, then the value can be copied from
+the result into the request and the lock disabled and the mode set to manual.
+
+The problem with this solution is, besides the extra complexity, that it is
+ambiguous what happens if there is a state transition from manual to locked
+(even though it's a state transition that doesn't make sense). If locked is
+defined to "use the last automatically computed values" then it could use the
+values from the last time it AE was set to auto, or it would be undefined if AE
+was never auto (eg. it started out as manual), or if AE is implemented to run
+in the background it could just use the current values that are computed. If
+locked is defined to "use the last value that was set" there would be less
+ambiguity. Still, it's better if we can make it impossible to execute this
+nonsensical state transition, and if we can reduce the complexity of having
+this extra control or extra setting on a lever.
+
+Summary of goals
+----------------
+
+- We need a lock of some sort, to instruct the AEGC to not update output
+ results
+
+- We need manual modes, to override the values computed by the AEGC
+
+- We need to support seamless transitions from auto to manual, and do so
+ without flickering
+
+- We need custom minimum values for the manual controls; that is, no magic
+ values for enabling/disabling auto
+
+- All of these need to be done with AE sub-controls (exposure time, analogue
+ gain) and be extensible to aperture in the future
+
+Our solution
+------------
+
+A diagram of our solution:
+
+::
+
+ +----------------------------+-------------+------------------+-----------------+
+ | INPUT | ALGORITHM | RESULT | OUTPUT |
+ +----------------------------+-------------+------------------+-----------------+
+
+ ExposureTimeMode ExposureTimeMode
+ ---------------------+----------------------------------------+----------------->
+ 0: Auto | |
+ 1: Manual | V
+ | |\
+ | | \
+ | /----------------------------------> | 1| ExposureTime
+ | | +-------------+ exposure time | | -------------->
+ \--)--> | | --------------> | 0|
+ ExposureTime | | | | /
+ ------------------------+--> | | |/
+ | | AeState
+ | AEGC | ----------------------------------->
+ AnalogueGain | |
+ ------------------------+--> | | |\
+ | | | | \
+ /--)--> | | --------------> | 0| AnalogueGain
+ | | +-------------+ analogue gain | | -------------->
+ | \----------------------------------> | 1|
+ | | /
+ | |/
+ | ^
+ AnalogueGainMode | | AnalogueGainMode
+ ---------------------+----------------------------------------+----------------->
+ 0: Auto
+ 1: Manual
+
+ AeEnable
+ - True -> ExposureTimeMode:Auto + AnalogueGainMode:Auto
+ - False -> ExposureTimeMode:Manual + AnalogueGainMode:Manual
+
+
+The diagram is divided in four sections horizontally:
+
+- Input: The values received from the request controls
+
+- Algorithm: The algorithm itself
+
+- Result: The values calculated by the algorithm
+
+- Output: The values reported in result metadata and applied to the device
+
+The four input controls are divided between manual values (ExposureTime and
+AnalogueGain), and operation modes (ExposureTimeMode and AnalogueGainMode). The
+former are the manual values, the latter control how they're applied. The two
+modes are independent from each other, and each can take one of two values:
+
+- Auto (0): The AGC computes the value normally. The AGC result is applied
+ to the output. The manual value is ignored *and is not retained*.
+
+- Manual (1): The AGC uses the manual value internally. The corresponding
+ manual control from the request is applied to the output. The AGC result
+ is ignored.
+
+The AeState control reports the state of the unified AEGC block. If both
+ExposureTimeMode and AnalogueGainMode are set to manual then it will report
+Idle. If at least one of the two is set to auto, then AeState will report
+if the AEGC has Converged or not (Searching). This control replaces the old
+AeLocked control, as it was insufficient for reporting the AE state.
+
+There is a caveat to manual mode: the manual control value is not retained if
+it is set during auto mode. This means that if manual mode is entered without
+also setting the manual value, then it will enter a state similar to "locked",
+where the last automatically computed value while the mode was auto will be
+used. Once the manual value is set, then that will be used and retained as
+usual.
+
+This simulates an auto -> locked -> manual or auto -> manual state transition,
+and makes it impossible to do the nonsensical manual -> locked state
+transition.
+
+AeEnable still exists to allow applications to set the mode of all the
+sub-controls at once. Besides being for convenience, this will also be useful
+when we eventually implement an aperture control. This is because applications
+that will be made before aperture will have been available would still be able
+to set aperture mode to auto or manual, as opposed to having the aperture stuck
+at auto while the application really wanted manual. Although the aperture would
+still be stuck at an uncontrollable value, at least it would be at a static
+usable value as opposed to varying via the AEGC algorithm.
+
+With this solution, the earlier example would become:
+
+::
+
+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
+ | N+2 | | N+3 | | N+4 | | N+5 | | N+6 | | N+7 | | N+8 | | N+9 | | N+10|
+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
+ Mode requested: Auto Manual Manual Manual Manual Manual Manual Manual Manual
+ Exp requested: N/A None None None None 10 None 10 10
+ Set in Frame: N+4 N+5 N+6 N+7 N+8 N+9 N+10 N+11 N+12
+
+ Mode used: Auto Auto Auto Manual Manual Manual Manual Manual Manual
+ Exp used: 2 3 4 5 5 5 5 10 10
+
+This example is extended by a few frames to exhibit the simulated "locked"
+state. At frame N+5 the application has confirmed that the manual mode has been
+entered, but does not provide a manual value until request N+7. Thus, the value
+that is used in requests N+5 and N+6 (where the mode is disabled), comes from
+the last value that was used when the mode was auto, which comes from frame
+N+4.
+
+Then, in N+7, a manual value of 10 is supplied. It takes until frame N+9 for
+the exposure to be applied. N+8 does not supply a manual value, but the last
+supplied value is retained, so a manual value of 10 is still used and set in
+frame N+10.
+
+Although this behavior is the same as what we had with waiting for the manual
+mode to propagate (in the section "Description of the problem"), this time it
+is correct as we have defined specifically that if a manual value was specified
+while the mode was auto, it will not be retained.
+
+Description of the controls
+---------------------------
+
+As described above, libcamera offers the following controls related to exposure
+and gain:
+
+- AnalogueGain
+
+- AnalogueGainMode
+
+- ExposureTime
+
+- ExposureTimeMode
+
+- AeState
+
+- AeEnable
+
+Auto-exposure and auto-gain can be enabled and disabled separately using the
+ExposureTimeMode and AnalogueGainMode controls respectively. The AeEnable
+control can also be used, as it sets both of the modes simultaneously. The
+AeEnable control is not returned in metadata.
+
+When the respective mode is set to auto, the respective value that is computed
+by the AEGC algorithm is applied to the image sensor. Any value that is
+supplied in the manual ExposureTime/AnalogueGain control is ignored and not
+retained. Another way to understand this is that when the mode transitions from
+auto to manual, the internally stored control value is overwritten with the
+last value computed by the auto algorithm.
+
+This means that when we transition from auto to manual without supplying a
+manual control value, the last value that was set by the AEGC algorithm will
+keep be used. This can be used to do a flickerless transition from auto to
+manual as described earlier. If the camera started out in manual mode and no
+corresponding value has been supplied yet, then a best-effort default value
+shall be set.
+
+The manual control value can be set in the same request as setting the mode to
+auto if the desired manual control value is already known.
+
+Transitioning from manual to auto shall be implicitly flickerless, as the AEGC
+algorithms are expected to start running from the last manual value.
+
+The AeState metadata reports the state of the AE algorithm. As AE cannot
+compute exposure and gain separately, the state of the AE component is
+unified. There are three states: Idle, Searching, and Converged.
+
+The state shall be Idle if both ExposureTimeMode and AnalogueGainMode
+are set to Manual. If the camera only supports one of the two controls,
+then the state shall be Idle if that one control is set to Manual. If
+the camera does not support Manual for at least one of the two controls,
+then the state will never be Idle, as AE will always be running.
+
+The state shall be Searching if at least one of exposure or gain calculated
+by the AE algorithm is used (that is, at least one of the two modes is Auto),
+*and* the value(s) have not converged yet.
+
+The state shall be Converged if at least one of exposure or gain calculated
+by the AE algorithm is used (that is, at least one of the two modes is Auto),
+*and* the value(s) have converged.
diff --git a/Documentation/docs.rst b/Documentation/docs.rst
deleted file mode 100644
index a6e8a59a..00000000
--- a/Documentation/docs.rst
+++ /dev/null
@@ -1,400 +0,0 @@
-.. SPDX-License-Identifier: CC-BY-SA-4.0
-
-.. contents::
- :local:
-
-*************
-Documentation
-*************
-
-.. toctree::
- :hidden:
-
- API <api-html/index>
-
-API
-===
-
-The libcamera API is extensively documented using Doxygen. The :ref:`API
-nightly build <api>` contains the most up-to-date API documentation, built from
-the latest master branch.
-
-Feature Requirements
-====================
-
-Device enumeration
-------------------
-
-The library shall support enumerating all camera devices available in the
-system, including both fixed cameras and hotpluggable cameras. It shall
-support cameras plugged and unplugged after the initialization of the
-library, and shall offer a mechanism to notify applications of camera plug
-and unplug.
-
-The following types of cameras shall be supported:
-
-* Internal cameras designed for point-and-shoot still image and video
- capture usage, either controlled directly by the CPU, or exposed through
- an internal USB bus as a UVC device.
-
-* External UVC cameras designed for video conferencing usage.
-
-Other types of camera, including analog cameras, depth cameras, thermal
-cameras, external digital picture or movie cameras, are out of scope for
-this project.
-
-A hardware device that includes independent camera sensors, such as front
-and back sensors in a phone, shall be considered as multiple camera devices
-for the purpose of this library.
-
-Independent Camera Devices
---------------------------
-
-When multiple cameras are present in the system and are able to operate
-independently from each other, the library shall expose them as multiple
-camera devices and support parallel operation without any additional usage
-restriction apart from the limitations inherent to the hardware (such as
-memory bandwidth, CPU usage or number of CSI-2 receivers for instance).
-
-Independent processes shall be able to use independent cameras devices
-without interfering with each other. A single camera device shall be
-usable by a single process at a time.
-
-Multiple streams support
-------------------------
-
-The library shall support multiple video streams running in parallel
-for each camera device, within the limits imposed by the system.
-
-Per frame controls
-------------------
-
-The library shall support controlling capture parameters for each stream
-on a per-frame basis, on a best effort basis based on the capabilities of the
-hardware and underlying software stack (including kernel drivers and
-firmware). It shall apply capture parameters to the frame they target, and
-report the value of the parameters that have effectively been used for each
-captured frame.
-
-When a camera device supports multiple streams, the library shall allow both
-control of each stream independently, and control of multiple streams
-together. Streams that are controlled together shall be synchronized. No
-synchronization is required for streams controlled independently.
-
-Capability Enumeration
-----------------------
-
-The library shall expose capabilities of each camera device in a way that
-allows applications to discover those capabilities dynamically. Applications
-shall be allowed to cache capabilities for as long as they are using the
-library. If capabilities can change at runtime, the library shall offer a
-mechanism to notify applications of such changes. Applications shall not
-cache capabilities in long term storage between runs.
-
-Capabilities shall be discovered dynamically at runtime from the device when
-possible, and may come, in part or in full, from platform configuration
-data.
-
-Device Profiles
----------------
-
-The library may define different camera device profiles, each with a minimum
-set of required capabilities. Applications may use those profiles to quickly
-determine the level of features exposed by a device without parsing the full
-list of capabilities. Camera devices may implement additional capabilities
-on top of the minimum required set for the profile they expose.
-
-3A and Image Enhancement Algorithms
------------------------------------
-
-The camera devices shall implement auto exposure, auto gain and auto white
-balance. Camera devices that include a focus lens shall implement auto
-focus. Additional image enhancement algorithms, such as noise reduction or
-video stabilization, may be implemented.
-
-All algorithms may be implemented in hardware or firmware outside of the
-library, or in software in the library. They shall all be controllable by
-applications.
-
-The library shall be architectured to isolate the 3A and image enhancement
-algorithms in a component with a documented API, respectively called the 3A
-component and the 3A API. The 3A API shall be stable, and shall allow both
-open-source and closed-source implementations of the 3A component.
-
-The library may include statically-linked open-source 3A components, and
-shall support dynamically-linked open-source and closed-source 3A
-components.
-
-Closed-source 3A Component Sandboxing
--------------------------------------
-
-For security purposes, it may be desired to run closed-source 3A components
-in a separate process. The 3A API would in such a case be transported over
-IPC. The 3A API shall make it possible to use any IPC mechanism that
-supports passing file descriptors.
-
-The library may implement an IPC mechanism, and shall support third-party
-platform-specific IPC mechanisms through the implementation of a
-platform-specific 3A API wrapper. No modification to the library shall be
-needed to use such third-party IPC mechanisms.
-
-The 3A component shall not directly access any device node on the system.
-Such accesses shall instead be performed through the 3A API. The library
-shall validate all accesses and restrict them to what is absolutely required
-by 3A components.
-
-V4L2 Compatibility Layer
-------------------------
-
-The project shall support traditional V4L2 application through an additional
-libcamera wrapper library. The wrapper library shall trap all accesses to
-camera devices through `LD_PRELOAD`, and route them through libcamera to
-emulate a high-level V4L2 camera device. It shall expose camera device
-features on a best-effort basis, and aim for the level of features
-traditionally available from a UVC camera designed for video conferencing.
-
-Android Camera HAL v3 Compatibility
------------------------------------
-
-The library API shall expose all the features required to implement an
-Android Camera HAL v3 on top of libcamera. Some features of the HAL may be
-omitted as long as they can be implemented separately in the HAL, such as
-JPEG encoding, or YUV reprocessing.
-
-
-Camera Stack
-============
-
-::
-
- a c / +-------------+ +-------------+ +-------------+ +-------------+
- p a | | Native | | Framework | | Native | | Android |
- p t | | V4L2 | | Application | | libcamera | | Camera |
- l i | | Application | | (gstreamer) | | Application | | Framework |
- i o \ +-------------+ +-------------+ +-------------+ +-------------+
- n ^ ^ ^ ^
- | | | |
- l a | | | |
- i d v v | v
- b a / +-------------+ +-------------+ | +-------------+
- c p | | V4L2 | | Camera | | | Android |
- a t | | Compat. | | Framework | | | Camera |
- m a | | | | (gstreamer) | | | HAL |
- e t \ +-------------+ +-------------+ | +-------------+
- r i ^ ^ | ^
- a o | | | |
- n | | | |
- / | ,................................................
- | | ! : Language : !
- l f | | ! : Bindings : !
- i r | | ! : (optional) : !
- b a | | \...............................................'
- c m | | | | |
- a e | | | | |
- m w | v v v v
- e o | +----------------------------------------------------------------+
- r r | | |
- a k | | libcamera |
- | | |
- \ +----------------------------------------------------------------+
- ^ ^ ^
- Userspace | | |
- ------------------------ | ---------------- | ---------------- | ---------------
- Kernel | | |
- v v v
- +-----------+ +-----------+ +-----------+
- | Media | <--> | Video | <--> | V4L2 |
- | Device | | Device | | Subdev |
- +-----------+ +-----------+ +-----------+
-
-The camera stack comprises four software layers. From bottom to top:
-
-* The kernel drivers control the camera hardware and expose a
- low-level interface to userspace through the Linux kernel V4L2
- family of APIs (Media Controller API, V4L2 Video Device API and
- V4L2 Subdev API).
-
-* The libcamera framework is the core part of the stack. It
- handles all control of the camera devices in its core component,
- libcamera, and exposes a native C++ API to upper layers. Optional
- language bindings allow interfacing to libcamera from other
- programming languages.
-
- Those components live in the same source code repository and
- all together constitute the libcamera framework.
-
-* The libcamera adaptation is an umbrella term designating the
- components that interface to libcamera in other frameworks.
- Notable examples are a V4L2 compatibility layer, a gstreamer
- libcamera element, and an Android camera HAL implementation based
- on libcamera.
-
- Those components can live in the libcamera project source code
- in separate repositories, or move to their respective project's
- repository (for instance the gstreamer libcamera element).
-
-* The applications and upper level frameworks are based on the
- libcamera framework or libcamera adaptation, and are outside of
- the scope of the libcamera project.
-
-
-libcamera Architecture
-======================
-
-::
-
- ---------------------------< libcamera Public API >---------------------------
- ^ ^
- | |
- v v
- +-------------+ +-------------------------------------------------+
- | Camera | | Camera Device |
- | Devices | | +---------------------------------------------+ |
- | Manager | | | Device-Agnostic | |
- +-------------+ | | | |
- ^ | | +------------------------+ |
- | | | | ~~~~~~~~~~~~~~~~~~~~~ |
- | | | | { +---------------+ } |
- | | | | } | ////Image//// | { |
- | | | | <-> | /Processing// | } |
- | | | | } | /Algorithms// | { |
- | | | | { +---------------+ } |
- | | | | ~~~~~~~~~~~~~~~~~~~~~ |
- | | | | ======================== |
- | | | | +---------------+ |
- | | | | | //Pipeline/// | |
- | | | | <-> | ///Handler/// | |
- | | | | | ///////////// | |
- | | +--------------------+ +---------------+ |
- | | Device-Specific |
- | +-------------------------------------------------+
- | ^ ^
- | | |
- v v v
- +--------------------------------------------------------------------+
- | Helpers and Support Classes |
- | +-------------+ +-------------+ +-------------+ +-------------+ |
- | | MC & V4L2 | | Buffers | | Sandboxing | | Plugins | |
- | | Support | | Allocator | | IPC | | Manager | |
- | +-------------+ +-------------+ +-------------+ +-------------+ |
- | +-------------+ +-------------+ |
- | | Pipeline | | ... | |
- | | Runner | | | |
- | +-------------+ +-------------+ |
- +--------------------------------------------------------------------+
-
- /// Device-Specific Components
- ~~~ Sandboxing
-
-While offering a unified API towards upper layers, and presenting
-itself as a single library, libcamera isn't monolithic. It exposes
-multiple components through its public API, is built around a set of
-separate helpers internally, uses device-specific components and can
-load dynamic plugins.
-
-Camera Devices Manager
- The Camera Devices Manager provides a view of available cameras
- in the system. It performs cold enumeration and runtime camera
- management, and supports a hotplug notification mechanism in its
- public API.
-
- To avoid the cost associated with cold enumeration of all devices
- at application start, and to arbitrate concurrent access to camera
- devices, the Camera Devices Manager could later be split to a
- separate service, possibly with integration in platform-specific
- device management.
-
-Camera Device
- The Camera Device represents a camera device to upper layers. It
- exposes full control of the device through the public API, and is
- thus the highest level object exposed by libcamera.
-
- Camera Device instances are created by the Camera Devices
- Manager. An optional function to create new instances could be exposed
- through the public API to speed up initialization when the upper
- layer knows how to directly address camera devices present in the
- system.
-
-Pipeline Handler
- The Pipeline Handler manages complex pipelines exposed by the kernel drivers
- through the Media Controller and V4L2 APIs. It abstracts pipeline handling to
- hide device-specific details to the rest of the library, and implements both
- pipeline configuration based on stream configuration, and pipeline runtime
- execution and scheduling when needed by the device.
-
- This component is device-specific and is part of the libcamera code base. As
- such it is covered by the same free software license as the rest of libcamera
- and needs to be contributed upstream by device vendors. The Pipeline Handler
- lives in the same process as the rest of the library, and has access to all
- helpers and kernel camera-related devices.
-
-Image Processing Algorithms
- Together with the hardware image processing and hardware statistics
- collection, the Image Processing Algorithms implement 3A (Auto-Exposure,
- Auto-White Balance and Auto-Focus) and other algorithms. They run on the CPU
- and interact with the kernel camera devices to control hardware image
- processing based on the parameters supplied by upper layers, closing the
- control loop of the ISP.
-
- This component is device-specific and is loaded as an external plugin. It can
- be part of the libcamera code base, in which case it is covered by the same
- license, or provided externally as an open-source or closed-source component.
-
- The component is sandboxed and can only interact with libcamera through
- internal APIs specifically marked as such. In particular it will have no
- direct access to kernel camera devices, and all its accesses to image and
- metadata will be mediated by dmabuf instances explicitly passed to the
- component. The component must be prepared to run in a process separate from
- the main libcamera process, and to have a very restricted view of the system,
- including no access to networking APIs and limited access to file systems.
-
- The sandboxing mechanism isn't defined by libcamera. One example
- implementation will be provided as part of the project, and platforms vendors
- will be able to provide their own sandboxing mechanism as a plugin.
-
- libcamera should provide a basic implementation of Image Processing
- Algorithms, to serve as a reference for the internal API. Device vendors are
- expected to provide a full-fledged implementation compatible with their
- Pipeline Handler. One goal of the libcamera project is to create an
- environment in which the community will be able to compete with the
- closed-source vendor binaries and develop a high quality open source
- implementation.
-
-Helpers and Support Classes
- While Pipeline Handlers are device-specific, implementations are expected to
- share code due to usage of identical APIs towards the kernel camera drivers
- and the Image Processing Algorithms. This includes without limitation handling
- of the MC and V4L2 APIs, buffer management through dmabuf, and pipeline
- discovery, configuration and scheduling. Such code will be factored out to
- helpers when applicable.
-
- Other parts of libcamera will also benefit from factoring code out to
- self-contained support classes, even if such code is present only once in the
- code base, in order to keep the source code clean and easy to read. This
- should be the case for instance for plugin management.
-
-
-V4L2 Compatibility Layer
-------------------------
-
-V4L2 compatibility is achieved through a shared library that traps all
-accesses to camera devices and routes them to libcamera to emulate high-level
-V4L2 camera devices. It is injected in a process address space through
-`LD_PRELOAD` and is completely transparent for applications.
-
-The compatibility layer exposes camera device features on a best-effort basis,
-and aims for the level of features traditionally available from a UVC camera
-designed for video conferencing.
-
-
-Android Camera HAL
-------------------
-
-Camera support for Android is achieved through a generic Android
-camera HAL implementation on top of libcamera. The HAL will implement internally
-features required by Android and missing from libcamera, such as JPEG encoding
-support.
-
-The Android camera HAL implementation will initially target the
-LIMITED hardware level, with support for the FULL level then being gradually
-implemented.
diff --git a/Documentation/documentation-contents.rst b/Documentation/documentation-contents.rst
new file mode 100644
index 00000000..5c111849
--- /dev/null
+++ b/Documentation/documentation-contents.rst
@@ -0,0 +1,35 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+.. container:: documentation-nav
+
+ * **Documentation for Users**
+ * :doc:`Introduction </introduction>`
+ * :doc:`/feature_requirements`
+ * :doc:`/guides/application-developer`
+ * :doc:`/python-bindings`
+ * :doc:`/environment_variables`
+ * :doc:`/api-html/index`
+ * :doc:`/code-of-conduct`
+ * |
+ * **Documentation for Developers**
+ * :doc:`/libcamera_architecture`
+ * :doc:`/guides/pipeline-handler`
+ * :doc:`/guides/ipa`
+ * :doc:`/camera-sensor-model`
+ * :doc:`/guides/tracing`
+ * :doc:`/software-isp-benchmarking`
+ * :doc:`/coding-style`
+ * :doc:`/internal-api-html/index`
+ * |
+ * **Documentation for System Integrators**
+ * :doc:`/lens_driver_requirements`
+ * :doc:`/sensor_driver_requirements`
+
+..
+ The following directive adds the "documentation" class to all of the pages
+ generated by sphinx. This is not relevant in libcamera nor addressed in the
+ theme's CSS, since all of the pages here are documentation. It **is** used
+ to properly format the documentation pages on libcamera.org and so should not
+ be removed.
+
+.. rst-class:: documentation
diff --git a/Documentation/environment_variables.rst b/Documentation/environment_variables.rst
index a9b230bc..6f123558 100644
--- a/Documentation/environment_variables.rst
+++ b/Documentation/environment_variables.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: documentation-contents.rst
+
Environment variables
=====================
@@ -37,11 +39,29 @@ LIBCAMERA_IPA_MODULE_PATH
Example value: ``${HOME}/.libcamera/lib:/opt/libcamera/vendor/lib``
+LIBCAMERA_IPA_PROXY_PATH
+ Define custom full path for a proxy worker for a given executable name.
+
+ Example value: ``${HOME}/.libcamera/proxy/worker:/opt/libcamera/vendor/proxy/worker``
+
+LIBCAMERA_PIPELINES_MATCH_LIST
+ Define an ordered list of pipeline names to be used to match the media
+ devices in the system. The pipeline handler names used to populate the
+ variable are the ones passed to the REGISTER_PIPELINE_HANDLER() macro in the
+ source code.
+
+ Example value: ``rkisp1,simple``
+
LIBCAMERA_RPI_CONFIG_FILE
Define a custom configuration file to use in the Raspberry Pi pipeline handler.
Example value: ``/usr/local/share/libcamera/pipeline/rpi/vc4/minimal_mem.yaml``
+LIBCAMERA_<NAME>_TUNING_FILE
+ Define a custom IPA tuning file to use with the pipeline handler `NAME`.
+
+ Example value: ``/usr/local/share/libcamera/ipa/rpi/vc4/custom_sensor.json``
+
Further details
---------------
diff --git a/Documentation/feature_requirements.rst b/Documentation/feature_requirements.rst
new file mode 100644
index 00000000..e6b74a62
--- /dev/null
+++ b/Documentation/feature_requirements.rst
@@ -0,0 +1,150 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+.. include:: documentation-contents.rst
+
+Feature Requirements
+====================
+
+Device enumeration
+------------------
+
+The library shall support enumerating all camera devices available in the
+system, including both fixed cameras and hotpluggable cameras. It shall
+support cameras plugged and unplugged after the initialization of the
+library, and shall offer a mechanism to notify applications of camera plug
+and unplug.
+
+The following types of cameras shall be supported:
+
+* Internal cameras designed for point-and-shoot still image and video
+ capture usage, either controlled directly by the CPU, or exposed through
+ an internal USB bus as a UVC device.
+
+* External UVC cameras designed for video conferencing usage.
+
+Other types of camera, including analog cameras, depth cameras, thermal
+cameras, external digital picture or movie cameras, are out of scope for
+this project.
+
+A hardware device that includes independent camera sensors, such as front
+and back sensors in a phone, shall be considered as multiple camera devices
+for the purpose of this library.
+
+Independent Camera Devices
+--------------------------
+
+When multiple cameras are present in the system and are able to operate
+independently from each other, the library shall expose them as multiple
+camera devices and support parallel operation without any additional usage
+restriction apart from the limitations inherent to the hardware (such as
+memory bandwidth, CPU usage or number of CSI-2 receivers for instance).
+
+Independent processes shall be able to use independent cameras devices
+without interfering with each other. A single camera device shall be
+usable by a single process at a time.
+
+Multiple streams support
+------------------------
+
+The library shall support multiple video streams running in parallel
+for each camera device, within the limits imposed by the system.
+
+Per frame controls
+------------------
+
+The library shall support controlling capture parameters for each stream
+on a per-frame basis, on a best effort basis based on the capabilities of the
+hardware and underlying software stack (including kernel drivers and
+firmware). It shall apply capture parameters to the frame they target, and
+report the value of the parameters that have effectively been used for each
+captured frame.
+
+When a camera device supports multiple streams, the library shall allow both
+control of each stream independently, and control of multiple streams
+together. Streams that are controlled together shall be synchronized. No
+synchronization is required for streams controlled independently.
+
+Capability Enumeration
+----------------------
+
+The library shall expose capabilities of each camera device in a way that
+allows applications to discover those capabilities dynamically. Applications
+shall be allowed to cache capabilities for as long as they are using the
+library. If capabilities can change at runtime, the library shall offer a
+mechanism to notify applications of such changes. Applications shall not
+cache capabilities in long term storage between runs.
+
+Capabilities shall be discovered dynamically at runtime from the device when
+possible, and may come, in part or in full, from platform configuration
+data.
+
+Device Profiles
+---------------
+
+The library may define different camera device profiles, each with a minimum
+set of required capabilities. Applications may use those profiles to quickly
+determine the level of features exposed by a device without parsing the full
+list of capabilities. Camera devices may implement additional capabilities
+on top of the minimum required set for the profile they expose.
+
+3A and Image Enhancement Algorithms
+-----------------------------------
+
+The library shall provide a basic implementation of Image Processing Algorithms
+to serve as a reference for the internal API. This shall including auto exposure
+and gain and auto white balance. Camera devices that include a focus lens shall
+implement auto focus. Additional image enhancement algorithms, such as noise
+reduction or video stabilization, may be implemented. Device vendors are
+expected to provide a fully-fledged implementation compatible with their
+Pipeline Handler. One goal of the libcamera project is to create an environment
+in which the community will be able to compete with the closed-source vendor
+biaries and develop a high quality open source implementation.
+
+All algorithms may be implemented in hardware or firmware outside of the
+library, or in software in the library. They shall all be controllable by
+applications.
+
+The library shall be architectured to isolate the 3A and image enhancement
+algorithms in a component with a documented API, respectively called the 3A
+component and the 3A API. The 3A API shall be stable, and shall allow both
+open-source and closed-source implementations of the 3A component.
+
+The library may include statically-linked open-source 3A components, and
+shall support dynamically-linked open-source and closed-source 3A
+components.
+
+Closed-source 3A Component Sandboxing
+-------------------------------------
+
+For security purposes, it may be desired to run closed-source 3A components
+in a separate process. The 3A API would in such a case be transported over
+IPC. The 3A API shall make it possible to use any IPC mechanism that
+supports passing file descriptors.
+
+The library may implement an IPC mechanism, and shall support third-party
+platform-specific IPC mechanisms through the implementation of a
+platform-specific 3A API wrapper. No modification to the library shall be
+needed to use such third-party IPC mechanisms.
+
+The 3A component shall not directly access any device node on the system.
+Such accesses shall instead be performed through the 3A API. The library
+shall validate all accesses and restrict them to what is absolutely required
+by 3A components.
+
+V4L2 Compatibility Layer
+------------------------
+
+The project shall support traditional V4L2 application through an additional
+libcamera wrapper library. The wrapper library shall trap all accesses to
+camera devices through `LD_PRELOAD`, and route them through libcamera to
+emulate a high-level V4L2 camera device. It shall expose camera device
+features on a best-effort basis, and aim for the level of features
+traditionally available from a UVC camera designed for video conferencing.
+
+Android Camera HAL v3 Compatibility
+-----------------------------------
+
+The library API shall expose all the features required to implement an
+Android Camera HAL v3 on top of libcamera. Some features of the HAL may be
+omitted as long as they can be implemented separately in the HAL, such as
+JPEG encoding, or YUV reprocessing.
diff --git a/Documentation/gen-doxyfile.py b/Documentation/gen-doxyfile.py
new file mode 100755
index 00000000..c265bc2f
--- /dev/null
+++ b/Documentation/gen-doxyfile.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (C) 2024, Google Inc.
+#
+# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+#
+# Generate Doxyfile from a template
+
+import argparse
+import os
+import string
+import sys
+
+
+def fill_template(template, data):
+
+ template = open(template, 'rb').read()
+ template = template.decode('utf-8')
+ template = string.Template(template)
+
+ return template.substitute(data)
+
+
+def main(argv):
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-o', dest='output', metavar='file',
+ type=argparse.FileType('w', encoding='utf-8'),
+ default=sys.stdout,
+ help='Output file name (default: standard output)')
+ parser.add_argument('template', metavar='doxyfile.tmpl', type=str,
+ help='Doxyfile template')
+ parser.add_argument('inputs', type=str, nargs='*',
+ help='Input files')
+
+ args = parser.parse_args(argv[1:])
+
+ inputs = [f'"{os.path.realpath(input)}"' for input in args.inputs]
+ data = fill_template(args.template, {'inputs': (' \\\n' + ' ' * 25).join(inputs)})
+ args.output.write(data)
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/Documentation/getting-started.rst b/Documentation/getting-started.rst
index 987f43f7..63b050eb 100644
--- a/Documentation/getting-started.rst
+++ b/Documentation/getting-started.rst
@@ -1,4 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+
.. Getting started information is defined in the project README file.
.. include:: ../README.rst
:start-after: .. section-begin-getting-started
diff --git a/Documentation/guides/application-developer.rst b/Documentation/guides/application-developer.rst
index 9a9905b1..6501345a 100644
--- a/Documentation/guides/application-developer.rst
+++ b/Documentation/guides/application-developer.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: ../documentation-contents.rst
+
Using libcamera in a C++ application
====================================
@@ -116,19 +118,21 @@ available.
.. code:: cpp
- if (cm->cameras().empty()) {
+ auto cameras = cm->cameras();
+ if (cameras.empty()) {
std::cout << "No cameras were identified on the system."
<< std::endl;
cm->stop();
return EXIT_FAILURE;
}
- std::string cameraId = cm->cameras()[0]->id();
- camera = cm->get(cameraId);
+ std::string cameraId = cameras[0]->id();
+ camera = cm->get(cameraId);
/*
- * Note that is equivalent to:
- * camera = cm->cameras()[0];
+ * Note that `camera` may not compare equal to `cameras[0]`.
+ * In fact, it might simply be a `nullptr`, as the particular
+ * device might have disappeared (and reappeared) in the meantime.
*/
Once a camera has been selected an application needs to acquire an exclusive
@@ -479,7 +483,7 @@ instance. An example of how to write image data to disk is available in the
`FileSink class`_ which is a part of the ``cam`` utility application in the
libcamera repository.
-.. _FileSink class: https://git.libcamera.org/libcamera/libcamera.git/tree/src/cam/file_sink.cpp
+.. _FileSink class: https://git.libcamera.org/libcamera/libcamera.git/tree/src/apps/cam/file_sink.cpp
With the handling of this request completed, it is possible to re-use the
request and the associated buffers and re-queue it to the camera
@@ -614,7 +618,7 @@ accordingly. In this example, the application file has been named
simple_cam = executable('simple-cam',
'simple-cam.cpp',
- dependencies: dependency('libcamera', required : true))
+ dependencies: dependency('libcamera'))
The ``dependencies`` line instructs meson to ask ``pkgconfig`` (or ``cmake``) to
locate the ``libcamera`` library, which the test application will be
diff --git a/Documentation/guides/introduction.rst b/Documentation/guides/introduction.rst
deleted file mode 100644
index 700ec2d3..00000000
--- a/Documentation/guides/introduction.rst
+++ /dev/null
@@ -1,319 +0,0 @@
-.. SPDX-License-Identifier: CC-BY-SA-4.0
-
-Developers guide to libcamera
-=============================
-
-The Linux kernel handles multimedia devices through the 'Linux media' subsystem
-and provides a set of APIs (application programming interfaces) known
-collectively as V4L2 (`Video for Linux 2`_) and the `Media Controller`_ API
-which provide an interface to interact and control media devices.
-
-Included in this subsystem are drivers for camera sensors, CSI2 (Camera
-Serial Interface) receivers, and ISPs (Image Signal Processors)
-
-The usage of these drivers to provide a functioning camera stack is a
-responsibility that lies in userspace which is commonly implemented separately
-by vendors without a common architecture or API for application developers.
-
-libcamera provides a complete camera stack for Linux based systems to abstract
-functionality desired by camera application developers and process the
-configuration of hardware and image control algorithms required to obtain
-desirable results from the camera.
-
-.. _Video for Linux 2: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/v4l/v4l2.html
-.. _Media Controller: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/mediactl/media-controller.html
-
-
-In this developers guide, we will explore the `Camera Stack`_ and how it is
-can be visualised at a high level, and explore the internal `Architecture`_ of
-the libcamera library with its components. The current `Platform Support`_ is
-detailed, as well as an overview of the `Licensing`_ requirements of the
-project.
-
-This introduction is followed by a walkthrough tutorial to newcomers wishing to
-support a new platform with the `Pipeline Handler Writers Guide`_ and for those
-looking to make use of the libcamera native API an `Application Writers Guide`_
-provides a tutorial of the key APIs exposed by libcamera.
-
-.. _Pipeline Handler Writers Guide: pipeline-handler.html
-.. _Application Writers Guide: application-developer.html
-
-.. TODO: Correctly link to the other articles of the guide
-
-Camera Stack
-------------
-
-The libcamera library is implemented in userspace, and makes use of underlying
-kernel drivers that directly interact with hardware.
-
-Applications can make use of libcamera through the native `libcamera API`_'s or
-through an adaptation layer integrating libcamera into a larger framework.
-
-.. _libcamera API: https://www.libcamera.org/api-html/index.html
-
-::
-
- Application Layer
- / +--------------+ +--------------+ +--------------+ +--------------+
- | | Native | | Framework | | Native | | Android |
- | | V4L2 | | Application | | libcamera | | Camera |
- | | Application | | (gstreamer) | | Application | | Framework |
- \ +--------------+ +--------------+ +--------------+ +--------------+
-
- ^ ^ ^ ^
- | | | |
- | | | |
- v v | v
- Adaptation Layer |
- / +--------------+ +--------------+ | +--------------+
- | | V4L2 | | gstreamer | | | Android |
- | | Compatibility| | element | | | Camera |
- | | (preload) | |(libcamerasrc)| | | HAL |
- \ +--------------+ +--------------+ | +--------------+
- |
- ^ ^ | ^
- | | | |
- | | | |
- v v v v
- libcamera Framework
- / +--------------------------------------------------------------------+
- | | |
- | | libcamera |
- | | |
- \ +--------------------------------------------------------------------+
-
- ^ ^ ^
- Userspace | | |
- --------------------- | ---------------- | ---------------- | ---------------
- Kernel | | |
- v v v
-
- +-----------+ +-----------+ +-----------+
- | Media | <--> | Video | <--> | V4L2 |
- | Device | | Device | | Subdev |
- +-----------+ +-----------+ +-----------+
-
-The camera stack comprises of four software layers. From bottom to top:
-
-* The kernel drivers control the camera hardware and expose a low-level
- interface to userspace through the Linux kernel V4L2 family of APIs
- (Media Controller API, V4L2 Video Device API and V4L2 Subdev API).
-
-* The libcamera framework is the core part of the stack. It handles all control
- of the camera devices in its core component, libcamera, and exposes a native
- C++ API to upper layers.
-
-* The libcamera adaptation layer is an umbrella term designating the components
- that interface to libcamera in other frameworks. Notable examples are the V4L2
- compatibility layer, the gstreamer libcamera element, and the Android camera
- HAL implementation based on libcamera which are provided as a part of the
- libcamera project.
-
-* The applications and upper level frameworks are based on the libcamera
- framework or libcamera adaptation, and are outside of the scope of the
- libcamera project, however example native applications (cam, qcam) are
- provided for testing.
-
-
-V4L2 Compatibility Layer
- V4L2 compatibility is achieved through a shared library that traps all
- accesses to camera devices and routes them to libcamera to emulate high-level
- V4L2 camera devices. It is injected in a process address space through
- ``LD_PRELOAD`` and is completely transparent for applications.
-
- The compatibility layer exposes camera device features on a best-effort basis,
- and aims for the level of features traditionally available from a UVC camera
- designed for video conferencing.
-
-Android Camera HAL
- Camera support for Android is achieved through a generic Android camera HAL
- implementation on top of libcamera. The HAL implements features required by
- Android and out of scope from libcamera, such as JPEG encoding support.
-
- This component is used to provide support for ChromeOS platforms
-
-GStreamer element (gstlibcamerasrc)
- A `GStreamer element`_ is provided to allow capture from libcamera supported
- devices through GStreamer pipelines, and connect to other elements for further
- processing.
-
- Development of this element is ongoing and is limited to a single stream.
-
-Native libcamera API
- Applications can make use of the libcamera API directly using the C++
- API. An example application and walkthrough using the libcamera API can be
- followed in the `Application Writers Guide`_
-
-.. _GStreamer element: https://gstreamer.freedesktop.org/documentation/application-development/basics/elements.html
-
-Architecture
-------------
-
-While offering a unified API towards upper layers, and presenting itself as a
-single library, libcamera isn't monolithic. It exposes multiple components
-through its public API and is built around a set of separate helpers internally.
-Hardware abstractions are handled through the use of device-specific components
-where required and dynamically loadable plugins are used to separate image
-processing algorithms from the core libcamera codebase.
-
-::
-
- --------------------------< libcamera Public API >---------------------------
- ^ ^
- | |
- v v
- +-------------+ +---------------------------------------------------+
- | Camera | | Camera Device |
- | Manager | | +-----------------------------------------------+ |
- +-------------+ | | Device-Agnostic | |
- ^ | | | |
- | | | +--------------------------+ |
- | | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
- | | | | { +-----------------+ } |
- | | | | } | //// Image //// | { |
- | | | | <-> | / Processing // | } |
- | | | | } | / Algorithms // | { |
- | | | | { +-----------------+ } |
- | | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
- | | | | ========================== |
- | | | | +-----------------+ |
- | | | | | // Pipeline /// | |
- | | | | <-> | /// Handler /// | |
- | | | | | /////////////// | |
- | | +--------------------+ +-----------------+ |
- | | Device-Specific |
- | +---------------------------------------------------+
- | ^ ^
- | | |
- v v v
- +--------------------------------------------------------------------+
- | Helpers and Support Classes |
- | +-------------+ +-------------+ +-------------+ +-------------+ |
- | | MC & V4L2 | | Buffers | | Sandboxing | | Plugins | |
- | | Support | | Allocator | | IPC | | Manager | |
- | +-------------+ +-------------+ +-------------+ +-------------+ |
- | +-------------+ +-------------+ |
- | | Pipeline | | ... | |
- | | Runner | | | |
- | +-------------+ +-------------+ |
- +--------------------------------------------------------------------+
-
- /// Device-Specific Components
- ~~~ Sandboxing
-
-
-Camera Manager
- The Camera Manager enumerates cameras and instantiates Pipeline Handlers to
- manage each Camera that libcamera supports. The Camera Manager supports
- hotplug detection and notification events when supported by the underlying
- kernel devices.
-
- There is only ever one instance of the Camera Manager running per application.
- Each application's instance of the Camera Manager ensures that only a single
- application can take control of a camera device at once.
-
- Read the `Camera Manager API`_ documentation for more details.
-
-.. _Camera Manager API: https://libcamera.org/api-html/classlibcamera_1_1CameraManager.html
-
-Camera Device
- The Camera class represents a single item of camera hardware that is capable
- of producing one or more image streams, and provides the API to interact with
- the underlying device.
-
- If a system has multiple instances of the same hardware attached, each has its
- own instance of the camera class.
-
- The API exposes full control of the device to upper layers of libcamera through
- the public API, making it the highest level object libcamera exposes, and the
- object that all other API operations interact with from configuration to
- capture.
-
- Read the `Camera API`_ documentation for more details.
-
-.. _Camera API: https://libcamera.org/api-html/classlibcamera_1_1Camera.html
-
-Pipeline Handler
- The Pipeline Handler manages the complex pipelines exposed by the kernel
- drivers through the Media Controller and V4L2 APIs. It abstracts pipeline
- handling to hide device-specific details from the rest of the library, and
- implements both pipeline configuration based on stream configuration, and
- pipeline runtime execution and scheduling when needed by the device.
-
- The Pipeline Handler lives in the same process as the rest of the library, and
- has access to all helpers and kernel camera-related devices.
-
- Hardware abstraction is handled by device specific Pipeline Handlers which are
- derived from the Pipeline Handler base class allowing commonality to be shared
- among the implementations.
-
- Derived pipeline handlers create Camera device instances based on the devices
- they detect and support on the running system, and are responsible for
- managing the interactions with a camera device.
-
- More details can be found in the `PipelineHandler API`_ documentation, and the
- `Pipeline Handler Writers Guide`_.
-
-.. _PipelineHandler API: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html
-
-Image Processing Algorithms
- An image processing algorithm (IPA) component is a loadable plugin that
- implements 3A (Auto-Exposure, Auto-White Balance, and Auto-Focus) and other
- algorithms.
-
- The algorithms run on the CPU and interact with the camera devices through the
- Pipeline Handler to control hardware image processing based on the parameters
- supplied by upper layers, maintaining state and closing the control loop
- of the ISP.
-
- The component is sandboxed and can only interact with libcamera through the
- API provided by the Pipeline Handler and an IPA has no direct access to kernel
- camera devices.
-
- Open source IPA modules built with libcamera can be run in the same process
- space as libcamera, however external IPA modules are run in a separate process
- from the main libcamera process. IPA modules have a restricted view of the
- system, including no access to networking APIs and limited access to file
- systems.
-
- IPA modules are only required for platforms and devices with an ISP controlled
- by the host CPU. Camera sensors which have an integrated ISP are not
- controlled through the IPA module.
-
-Platform Support
-----------------
-
-The library currently supports the following hardware platforms specifically
-with dedicated pipeline handlers:
-
- - Intel IPU3 (ipu3)
- - Rockchip RK3399 (rkisp1)
- - RaspberryPi 3 and 4 (rpi/vc4)
-
-Furthermore, generic platform support is provided for the following:
-
- - USB video device class cameras (uvcvideo)
- - iMX7, Allwinner Sun6i (simple)
- - Virtual media controller driver for test use cases (vimc)
-
-Licensing
----------
-
-The libcamera core, is covered by the `LGPL-2.1-or-later`_ license. Pipeline
-Handlers are a part of the libcamera code base and need to be contributed
-upstream by device vendors. IPA modules included in libcamera are covered by a
-free software license, however third-parties may develop IPA modules outside of
-libcamera and distribute them under a closed-source license, provided they do
-not include source code from the libcamera project.
-
-The libcamera project itself contains multiple libraries, applications and
-utilities. Licenses are expressed through SPDX tags in text-based files that
-support comments, and through the .reuse/dep5 file otherwise. A copy of all
-licenses are stored in the LICENSES directory, and a full summary of the
-licensing used throughout the project can be found in the COPYING.rst document.
-
-Applications which link dynamically against libcamera and use only the public
-API are an independent work of the authors and have no license restrictions
-imposed upon them from libcamera.
-
-.. _LGPL-2.1-or-later: https://spdx.org/licenses/LGPL-2.1-or-later.html
diff --git a/Documentation/guides/ipa.rst b/Documentation/guides/ipa.rst
index 25deadef..cd640563 100644
--- a/Documentation/guides/ipa.rst
+++ b/Documentation/guides/ipa.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: ../documentation-contents.rst
+
IPA Writer's Guide
==================
diff --git a/Documentation/guides/pipeline-handler.rst b/Documentation/guides/pipeline-handler.rst
index 728e9676..fe752975 100644
--- a/Documentation/guides/pipeline-handler.rst
+++ b/Documentation/guides/pipeline-handler.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: ../documentation-contents.rst
+
Pipeline Handler Writers Guide
==============================
@@ -151,13 +153,14 @@ integrates with the libcamera build system, and a *vivid.cpp* file that matches
the name of the pipeline.
In the *meson.build* file, add the *vivid.cpp* file as a build source for
-libcamera by adding it to the global meson ``libcamera_sources`` variable:
+libcamera by adding it to the global meson ``libcamera_internal_sources``
+variable:
.. code-block:: none
# SPDX-License-Identifier: CC0-1.0
- libcamera_sources += files([
+ libcamera_internal_sources += files([
'vivid.cpp',
])
@@ -183,7 +186,7 @@ to the libcamera build options in the top level ``meson_options.txt``.
option('pipelines',
type : 'array',
- choices : ['ipu3', 'rkisp1', 'rpi/vc4', 'simple', 'uvcvideo', 'vimc', 'vivid'],
+ choices : ['ipu3', 'rkisp1', 'rpi/pisp', 'rpi/vc4', 'simple', 'uvcvideo', 'vimc', 'vivid'],
description : 'Select which pipeline handlers to include')
@@ -258,7 +261,7 @@ implementations for the overridden class members.
return false;
}
- REGISTER_PIPELINE_HANDLER(PipelineHandlerVivid)
+ REGISTER_PIPELINE_HANDLER(PipelineHandlerVivid, "vivid")
} /* namespace libcamera */
@@ -266,6 +269,8 @@ Note that you must register the ``PipelineHandler`` subclass with the pipeline
handler factory using the `REGISTER_PIPELINE_HANDLER`_ macro which
registers it and creates a global symbol to reference the class and make it
available to try and match devices.
+String "vivid" is the name assigned to the pipeline, matching the pipeline
+subdirectory name in the source tree.
.. _REGISTER_PIPELINE_HANDLER: https://libcamera.org/api-html/pipeline__handler_8h.html
@@ -516,14 +521,14 @@ handler and camera manager using `registerCamera`_.
Finally with a successful construction, we return 'true' indicating that the
PipelineHandler successfully matched and constructed a device.
-.. _Camera::create: https://libcamera.org/api-html/classlibcamera_1_1Camera.html#a453740e0d2a2f495048ae307a85a2574
+.. _Camera::create: https://libcamera.org/internal-api-html/classlibcamera_1_1Camera.html#adf5e6c22411f953bfaa1ae21155d6c31
.. _registerCamera: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html#adf02a7f1bbd87aca73c0e8d8e0e6c98b
.. code-block:: cpp
std::set<Stream *> streams{ &data->stream_ };
- std::shared_ptr<Camera> camera = Camera::create(this, data->video_->deviceName(), streams);
- registerCamera(std::move(camera), std::move(data));
+ std::shared_ptr<Camera> camera = Camera::create(std::move(data), data->video_->deviceName(), streams);
+ registerCamera(std::move(camera));
return true;
@@ -549,8 +554,7 @@ Our match function should now look like the following:
/* Create and register the camera. */
std::set<Stream *> streams{ &data->stream_ };
- const std::string &id = data->video_->deviceName();
- std::shared_ptr<Camera> camera = Camera::create(data.release(), id, streams);
+ std::shared_ptr<Camera> camera = Camera::create(std::move(data), data->video_->deviceName(), streams);
registerCamera(std::move(camera));
return true;
@@ -588,11 +592,11 @@ immutable properties of the ``Camera`` device.
The libcamera controls and properties are defined in YAML form which is
processed to automatically generate documentation and interfaces. Controls are
defined by the src/libcamera/`control_ids_core.yaml`_ file and camera properties
-are defined by src/libcamera/`properties_ids_core.yaml`_.
+are defined by src/libcamera/`property_ids_core.yaml`_.
.. _controls framework: https://libcamera.org/api-html/controls_8h.html
.. _control_ids_core.yaml: https://libcamera.org/api-html/control__ids_8h.html
-.. _properties_ids_core.yaml: https://libcamera.org/api-html/property__ids_8h.html
+.. _property_ids_core.yaml: https://libcamera.org/api-html/property__ids_8h.html
Pipeline handlers can optionally register the list of controls an application
can set as well as a list of immutable camera properties. Being both
@@ -795,8 +799,7 @@ derived class, and assign it to a base class pointer.
.. code-block:: cpp
- VividCameraData *data = cameraData(camera);
- CameraConfiguration *config = new VividCameraConfiguration();
+ auto config = std::make_unique<VividCameraConfiguration>();
A ``CameraConfiguration`` is specific to each pipeline, so you can only create
it from the pipeline handler code path. Applications can also generate an empty
@@ -824,9 +827,7 @@ To generate a ``StreamConfiguration``, you need a list of pixel formats and
frame sizes which are supported as outputs of the stream. You can fetch a map of
the ``V4LPixelFormat`` and ``SizeRange`` supported by the underlying output
device, but the pipeline handler needs to convert this to a
-``libcamera::PixelFormat`` type to pass to applications. We do this here using
-``std::transform`` to convert the formats and populate a new ``PixelFormat`` map
-as shown below.
+``libcamera::PixelFormat`` type to pass to applications.
Continue adding the following code example to our ``generateConfiguration``
implementation.
@@ -836,14 +837,12 @@ implementation.
std::map<V4L2PixelFormat, std::vector<SizeRange>> v4l2Formats =
data->video_->formats();
std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
- std::transform(v4l2Formats.begin(), v4l2Formats.end(),
- std::inserter(deviceFormats, deviceFormats.begin()),
- [&](const decltype(v4l2Formats)::value_type &format) {
- return decltype(deviceFormats)::value_type{
- format.first.toPixelFormat(),
- format.second
- };
- });
+
+ for (auto &[v4l2PixelFormat, sizes] : v4l2Formats) {
+ PixelFormat pixelFormat = v4l2PixelFormat.toPixelFormat();
+ if (pixelFormat.isValid())
+ deviceFormats.try_emplace(pixelFormat, std::move(sizes));
+ }
The `StreamFormats`_ class holds information about the pixel formats and frame
sizes that a stream can support. The class groups size information by the pixel
@@ -933,9 +932,9 @@ Add the following function implementation to your file:
StreamConfiguration &cfg = config_[0];
- const std::vector<libcamera::PixelFormat> formats = cfg.formats().pixelformats();
+ const std::vector<libcamera::PixelFormat> &formats = cfg.formats().pixelformats();
if (std::find(formats.begin(), formats.end(), cfg.pixelFormat) == formats.end()) {
- cfg.pixelFormat = cfg.formats().pixelformats()[0];
+ cfg.pixelFormat = formats[0];
LOG(VIVID, Debug) << "Adjusting format to " << cfg.pixelFormat.toString();
status = Adjusted;
}
@@ -1345,7 +1344,7 @@ before being set.
continue;
}
- int32_t value = lroundf(it.second.get<float>() * 128 + offset);
+ int32_t value = std::lround(it.second.get<float>() * 128 + offset);
controls.set(cid, std::clamp(value, 0, 255));
}
@@ -1409,7 +1408,7 @@ value translation operations:
.. code-block:: cpp
- #include <math.h>
+ #include <cmath>
Frame completion and event handling
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/guides/tracing.rst b/Documentation/guides/tracing.rst
index ae960d85..537dce50 100644
--- a/Documentation/guides/tracing.rst
+++ b/Documentation/guides/tracing.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: ../documentation-contents.rst
+
Tracing Guide
=============
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 63fac72d..251112fb 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -1,26 +1,31 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
-.. Front page matter is defined in the project README file.
-.. include:: ../README.rst
- :start-after: .. section-begin-libcamera
- :end-before: .. section-end-libcamera
+.. include:: introduction.rst
.. toctree::
:maxdepth: 1
:caption: Contents:
Home <self>
- Docs <docs>
Contribute <contributing>
Getting Started <getting-started>
- Developer Guide <guides/introduction>
Application Writer's Guide <guides/application-developer>
- Pipeline Handler Writer's Guide <guides/pipeline-handler>
- IPA Writer's guide <guides/ipa>
- Tracing guide <guides/tracing>
+ Camera Sensor Model <camera-sensor-model>
Environment variables <environment_variables>
- Sensor driver requirements <sensor_driver_requirements>
+ Feature Requirements <feature_requirements>
+ IPA Writer's guide <guides/ipa>
Lens driver requirements <lens_driver_requirements>
+ libcamera Architecture <libcamera_architecture>
+ Pipeline Handler Writer's Guide <guides/pipeline-handler>
Python Bindings <python-bindings>
- Camera Sensor Model <camera-sensor-model>
+ Sensor driver requirements <sensor_driver_requirements>
+ SoftwareISP Benchmarking <software-isp-benchmarking>
+ Tracing guide <guides/tracing>
+
+ Design document: AE <design/ae>
+
+.. toctree::
+ :hidden:
+
+ introduction
diff --git a/Documentation/internal-api-html/index.rst b/Documentation/internal-api-html/index.rst
new file mode 100644
index 00000000..43768648
--- /dev/null
+++ b/Documentation/internal-api-html/index.rst
@@ -0,0 +1,8 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+.. _internal-api:
+
+Internal API Reference
+======================
+
+:: Placeholder for Doxygen documentation
diff --git a/Documentation/introduction.rst b/Documentation/introduction.rst
new file mode 100644
index 00000000..82aa11a3
--- /dev/null
+++ b/Documentation/introduction.rst
@@ -0,0 +1,224 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+.. include:: documentation-contents.rst
+
+************
+Introduction
+************
+
+.. toctree::
+ :hidden:
+
+ API <api-html/index>
+ Internal API <internal-api-html/index>
+
+What is libcamera?
+==================
+
+libcamera is an open source complex camera support library for Linux, Android
+and ChromeOS. The library interfaces with Linux kernel device drivers and
+provides an intuitive API to developers in order to simplify the complexity
+involved in capturing images from complex cameras on Linux systems.
+
+What is a "complex camera"?
+===========================
+
+A modern "camera" tends to infact be several different pieces of hardware which
+must all be controlled together in order to produce and capture images of
+appropriate quality. A hardware pipeline typically consists of a camera sensor
+that captures raw frames and transmits them on a bus, a receiver that decodes
+the bus signals, and an image signal processor that processes raw frames to
+produce usable images in a standard format. The Linux kernel handles these
+multimedia devices through the 'Linux media' subsystem and provides a set of
+application programming interfaces known collectively as the
+V4L2 (`Video for Linux 2`_) and the `Media Controller`_ APIs, which provide an
+interface to interact and control media devices.
+
+.. _Video for Linux 2: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/v4l/v4l2.html
+.. _Media Controller: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/mediactl/media-controller.html
+
+Included in this subsystem are drivers for camera sensors, CSI2 (Camera
+Serial Interface) receivers, and ISPs (Image Signal Processors).
+
+The usage of these drivers to provide a functioning camera stack is a
+responsibility that lies in userspace, and is commonly implemented separately
+by vendors without a common architecture or API for application developers. This
+adds a lot of complexity to the task, particularly when considering that the
+differences in hardware pipelines and their representation in the kernel's APIs
+often necessitate bespoke handling.
+
+What is libcamera for?
+======================
+
+libcamera provides a complete camera stack for Linux-based systems to abstract
+the configuration of hardware and image control algorithms required to obtain
+desirable results from the camera through the kernel's APIs, reducing those
+operations to a simple and consistent method for developers. In short instead of
+having to deal with this:
+
+.. graphviz:: mali-c55.dot
+
+you can instead simply deal with:
+
+.. code-block:: python
+
+ >>> import libcamera as lc
+ >>> camera_manager = lc.CameraManager.singleton()
+ [0:15:59.582029920] [504] INFO Camera camera_manager.cpp:313 libcamera v0.3.0+182-01e57380
+ >>> for camera in camera_manager.cameras:
+ ... print(f' - {camera.id}')
+ ...
+ - mali-c55 tpg
+ - imx415 1-001a
+
+The library handles the rest for you. These documentary pages give more
+information on the internal workings of libcamera (and the kernel camera stack
+that lies behind it) as well as guidance on using libcamera in an application or
+extending the library with support for your hardware (through the pipeline
+handler and IPA module writer's guides).
+
+How should I use it?
+====================
+
+There are a few ways you might want to use libcamera, depending on your
+application. It's always possible to use the library directly, and you can find
+detailed information on how to do so in the
+:doc:`application writer's guide <guides/application-developer>`.
+
+It is often more appropriate to use one of the frameworks with libcamera
+support. For example an application powering an embedded media device
+incorporating capture, encoding and streaming of both video and audio would
+benefit from using `GStreamer`_, for which libcamera provides a plugin.
+Similarly an application for user-facing devices like a laptop would likely
+benefit accessing cameras through the XDG camera portal and `pipewire`_, which
+brings the advantages of resource sharing (multiple applications accessing the
+stream at the same time) and access control.
+
+.. _GStreamer: https://gstreamer.freedesktop.org/
+.. _pipewire: https://pipewire.org/
+
+Camera Stack
+============
+
+::
+
+ a c / +-------------+ +-------------+ +-------------+ +-------------+
+ p a | | Native | | Framework | | Native | | Android |
+ p t | | V4L2 | | Application | | libcamera | | Camera |
+ l i | | Application | | (gstreamer) | | Application | | Framework |
+ i o \ +-------------+ +-------------+ +-------------+ +-------------+
+ n ^ ^ ^ ^
+ | | | |
+ l a | | | |
+ i d v v | v
+ b a / +-------------+ +-------------+ | +-------------+
+ c p | | V4L2 | | Camera | | | Android |
+ a t | | Compat. | | Framework | | | Camera |
+ m a | | | | (gstreamer) | | | HAL |
+ e t \ +-------------+ +-------------+ | +-------------+
+ r i ^ ^ | ^
+ a o | | | |
+ n | | | |
+ / | ,................................................
+ | | ! : Language : !
+ l f | | ! : Bindings : !
+ i r | | ! : (optional) : !
+ b a | | \...............................................'
+ c m | | | | |
+ a e | | | | |
+ m w | v v v v
+ e o | +----------------------------------------------------------------+
+ r r | | |
+ a k | | libcamera |
+ | | |
+ \ +----------------------------------------------------------------+
+ ^ ^ ^
+ Userspace | | |
+ ------------------------ | ---------------- | ---------------- | ---------------
+ Kernel | | |
+ v v v
+ +-----------+ +-----------+ +-----------+
+ | Media | <--> | Video | <--> | V4L2 |
+ | Device | | Device | | Subdev |
+ +-----------+ +-----------+ +-----------+
+
+The camera stack comprises four software layers. From bottom to top:
+
+* The kernel drivers control the camera hardware and expose a
+ low-level interface to userspace through the Linux kernel V4L2
+ family of APIs (Media Controller API, V4L2 Video Device API and
+ V4L2 Subdev API).
+
+* The libcamera framework is the core part of the stack. It
+ handles all control of the camera devices in its core component,
+ libcamera, and exposes a native C++ API to upper layers. Optional
+ language bindings allow interfacing to libcamera from other
+ programming languages.
+
+ Those components live in the same source code repository and
+ all together constitute the libcamera framework.
+
+* The libcamera adaptation is an umbrella term designating the
+ components that interface to libcamera in other frameworks.
+ Notable examples are a V4L2 compatibility layer, a gstreamer
+ libcamera element, and an Android camera HAL implementation based
+ on libcamera.
+
+ Those components can live in the libcamera project source code
+ in separate repositories, or move to their respective project's
+ repository (for instance the gstreamer libcamera element).
+
+* The applications and upper level frameworks are based on the
+ libcamera framework or libcamera adaptation, and are outside of
+ the scope of the libcamera project.
+
+V4L2 Compatibility Layer
+ V4L2 compatibility is achieved through a shared library that traps all
+ accesses to camera devices and routes them to libcamera to emulate high-level
+ V4L2 camera devices. It is injected in a process address space through
+ ``LD_PRELOAD`` and is completely transparent for applications.
+
+ The compatibility layer exposes camera device features on a best-effort basis,
+ and aims for the level of features traditionally available from a UVC camera
+ designed for video conferencing.
+
+Android Camera HAL
+ Camera support for Android is achieved through a generic Android camera HAL
+ implementation on top of libcamera. The HAL implements features required by
+ Android and out of scope from libcamera, such as JPEG encoding support.
+
+ This component is used to provide support for ChromeOS platforms.
+
+GStreamer element (gstlibcamerasrc)
+ A `GStreamer element`_ is provided to allow capture from libcamera supported
+ devices through GStreamer pipelines, and connect to other elements for further
+ processing.
+
+Native libcamera API
+ Applications can make use of the libcamera API directly using the C++
+ API. An example application and walkthrough using the libcamera API can be
+ followed in the :doc:`Application writer's guide </guides/application-developer>`
+
+.. _GStreamer element: https://gstreamer.freedesktop.org/documentation/application-development/basics/elements.html
+
+Licensing
+=========
+
+The libcamera core is covered by the `LGPL-2.1-or-later`_ license. Pipeline
+Handlers are a part of the libcamera code base and need to be contributed
+upstream by device vendors. IPA modules included in libcamera are covered by a
+free software license, however third-parties may develop IPA modules outside of
+libcamera and distribute them under a closed-source license, provided they do
+not include source code from the libcamera project.
+
+The libcamera project itself contains multiple libraries, applications and
+utilities. Licenses are expressed through SPDX tags in text-based files that
+support comments, and through the .reuse/dep5 file otherwise. A copy of all
+licenses are stored in the LICENSES directory, and a full summary of the
+licensing used throughout the project can be found in the COPYING.rst document.
+
+Applications which link dynamically against libcamera and use only the public
+API are an independent work of the authors and have no license restrictions
+imposed upon them from libcamera.
+
+.. _LGPL-2.1-or-later: https://spdx.org/licenses/LGPL-2.1-or-later.html
diff --git a/Documentation/lens_driver_requirements.rst b/Documentation/lens_driver_requirements.rst
index b96e502d..85fef76f 100644
--- a/Documentation/lens_driver_requirements.rst
+++ b/Documentation/lens_driver_requirements.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: documentation-contents.rst
+
.. _lens-driver-requirements:
Lens Driver Requirements
diff --git a/Documentation/libcamera_architecture.rst b/Documentation/libcamera_architecture.rst
new file mode 100644
index 00000000..abbb0d17
--- /dev/null
+++ b/Documentation/libcamera_architecture.rst
@@ -0,0 +1,168 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+.. include:: documentation-contents.rst
+
+libcamera Architecture
+======================
+
+While offering a unified API towards upper layers, and presenting itself as a
+single library, libcamera isn't monolithic. It exposes multiple components
+through its public API and is built around a set of separate helpers internally.
+Hardware abstractions are handled through the use of device-specific components
+where required and dynamically loadable plugins are used to separate image
+processing algorithms from the core libcamera codebase.
+
+::
+
+ --------------------------< libcamera Public API >---------------------------
+ ^ ^
+ | |
+ v v
+ +-------------+ +---------------------------------------------------+
+ | Camera | | Camera Device |
+ | Manager | | +-----------------------------------------------+ |
+ +-------------+ | | Device-Agnostic | |
+ ^ | | | |
+ | | | +--------------------------+ |
+ | | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
+ | | | | { +-----------------+ } |
+ | | | | } | //// Image //// | { |
+ | | | | <-> | / Processing // | } |
+ | | | | } | / Algorithms // | { |
+ | | | | { +-----------------+ } |
+ | | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
+ | | | | ========================== |
+ | | | | +-----------------+ |
+ | | | | | // Pipeline /// | |
+ | | | | <-> | /// Handler /// | |
+ | | | | | /////////////// | |
+ | | +--------------------+ +-----------------+ |
+ | | Device-Specific |
+ | +---------------------------------------------------+
+ | ^ ^
+ | | |
+ v v v
+ +--------------------------------------------------------------------+
+ | Helpers and Support Classes |
+ | +-------------+ +-------------+ +-------------+ +-------------+ |
+ | | MC & V4L2 | | Buffers | | Sandboxing | | Plugins | |
+ | | Support | | Allocator | | IPC | | Manager | |
+ | +-------------+ +-------------+ +-------------+ +-------------+ |
+ | +-------------+ +-------------+ |
+ | | Pipeline | | ... | |
+ | | Runner | | | |
+ | +-------------+ +-------------+ |
+ +--------------------------------------------------------------------+
+
+ /// Device-Specific Components
+ ~~~ Sandboxing
+
+
+Camera Manager
+ The Camera Manager enumerates cameras and instantiates Pipeline Handlers to
+ manage each Camera that libcamera supports. The Camera Manager supports
+ hotplug detection and notification events when supported by the underlying
+ kernel devices.
+
+ There is only ever one instance of the Camera Manager running per application.
+ Each application's instance of the Camera Manager ensures that only a single
+ application can take control of a camera device at once.
+
+ Read the `Camera Manager API`_ documentation for more details.
+
+.. _Camera Manager API: https://libcamera.org/api-html/classlibcamera_1_1CameraManager.html
+
+Camera Device
+ The Camera class represents a single item of camera hardware that is capable
+ of producing one or more image streams, and provides the API to interact with
+ the underlying device.
+
+ If a system has multiple instances of the same hardware attached, each has its
+ own instance of the camera class.
+
+ The API exposes full control of the device to upper layers of libcamera through
+ the public API, making it the highest level object libcamera exposes, and the
+ object that all other API operations interact with from configuration to
+ capture.
+
+ Read the `Camera API`_ documentation for more details.
+
+.. _Camera API: https://libcamera.org/api-html/classlibcamera_1_1Camera.html
+
+Pipeline Handler
+ The Pipeline Handler manages the complex pipelines exposed by the kernel
+ drivers through the Media Controller and V4L2 APIs. It abstracts pipeline
+ handling to hide device-specific details from the rest of the library, and
+ implements both pipeline configuration based on stream configuration, and
+ pipeline runtime execution and scheduling when needed by the device.
+
+ The Pipeline Handler lives in the same process as the rest of the library, and
+ has access to all helpers and kernel camera-related devices.
+
+ Hardware abstraction is handled by device specific Pipeline Handlers which are
+ derived from the Pipeline Handler base class allowing commonality to be shared
+ among the implementations.
+
+ Derived pipeline handlers create Camera device instances based on the devices
+ they detect and support on the running system, and are responsible for
+ managing the interactions with a camera device.
+
+ More details can be found in the `PipelineHandler API`_ documentation, and the
+ :doc:`Pipeline Handler Writers Guide <guides/pipeline-handler>`.
+
+.. _PipelineHandler API: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html
+
+Image Processing Algorithms
+ Together with the hardware image processing and hardware statistics
+ collection, the Image Processing Algorithms (IPA) implement 3A (Auto-Exposure,
+ Auto-White Balance and Auto-Focus) and other algorithms. They run on the CPU
+ and control hardware image processing based on the parameters supplied by
+ upper layers, closing the control loop of the ISP.
+
+ IPAs are loaded as external plugins named IPA Modules. IPA Modules can be part
+ of the libcamera code base or provided externally by camera vendors as
+ open-source or closed-source components.
+
+ Open source IPA Modules built with libcamera are run in the same process space
+ as libcamera. External IPA Modules are run in a separate sandboxed process. In
+ either case, they can only interact with libcamera through the API provided by
+ the Pipeline Handler. They have a restricted view of the system, with no direct
+ access to kernel camera devices, no access to networking APIs, and limited
+ access to file systems. All their accesses to image and metadata are mediated
+ by dmabuf instances explicitly passed by the Pipeline Handler to the IPA
+ Module.
+
+ IPA Modules are only required for platforms and devices with an ISP controlled
+ by the host CPU. Camera sensors which have an integrated ISP are not
+ controlled through the IPA Module.
+
+Helpers and Support Classes
+ While Pipeline Handlers are device-specific, implementations are expected to
+ share code due to usage of identical APIs towards the kernel camera drivers
+ and the Image Processing Algorithms. This includes without limitation handling
+ of the MC and V4L2 APIs, buffer management through dmabuf, and pipeline
+ discovery, configuration and scheduling. Such code will be factored out to
+ helpers when applicable.
+
+ Other parts of libcamera will also benefit from factoring code out to
+ self-contained support classes, even if such code is present only once in the
+ code base, in order to keep the source code clean and easy to read. This
+ should be the case for instance for plugin management.
+
+Platform Support
+----------------
+
+The library currently supports the following hardware platforms specifically
+with dedicated pipeline handlers:
+
+ - Arm Mali-C55
+ - Intel IPU3 (ipu3)
+ - NXP i.MX8MP (imx8-isi and rkisp1)
+ - RaspberryPi 3, 4 and zero (rpi/vc4)
+ - Rockchip RK3399 (rkisp1)
+
+Furthermore, generic platform support is provided for the following:
+
+ - USB video device class cameras (uvcvideo)
+ - iMX7, IPU6, Allwinner Sun6i (simple)
+ - Virtual media controller driver for test use cases (vimc)
diff --git a/Documentation/mainpage.dox b/Documentation/mainpage.dox
new file mode 100644
index 00000000..cbee9bab
--- /dev/null
+++ b/Documentation/mainpage.dox
@@ -0,0 +1,33 @@
+/**
+\mainpage libcamera API reference
+
+Welcome to the API reference for <a href="https://libcamera.org/">libcamera</a>,
+a complex camera support library for Linux, Android and ChromeOS. These pages
+are automatically generated from the libcamera source code and describe the API
+in detail - if this is your first interaction with libcamera then you may find
+it useful to visit the [documentation](../introduction.html) in
+the first instance, which can provide a more generic introduction to the
+library's concepts.
+
+\if internal
+
+As a follow-on to the developer's guide, to assist you in adding support for
+your platform the [pipeline handler writer's guide](../guides/pipeline-handler.html)
+and the [ipa module writer's guide](../guides/ipa.html) should be helpful.
+
+The full libcamera API is documented here. If you wish to see only the public
+part of the API you can use [these pages](../api-html/index.html) instead.
+
+\else
+
+As a follow-on to the developer's guide, to assist you in using libcamera within
+your project the [application developer's guide](../guides/application-developer.html)
+gives an overview on how to achieve that.
+
+Only the public part of the libcamera API is documented here; if you are a
+developer seeking to add support for your hardware to the library or make other
+improvements, you should switch to the internal API
+[reference pages](../internal-api-html/index.html) instead.
+
+\endif
+*/
diff --git a/Documentation/mali-c55.dot b/Documentation/mali-c55.dot
new file mode 100644
index 00000000..7bfc44c0
--- /dev/null
+++ b/Documentation/mali-c55.dot
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: CC-BY-SA-4.0 */
+
+digraph board {
+ rankdir=TB
+ n00000001 [label="{{} | mali-c55 tpg\n/dev/v4l-subdev0 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000001:port0 -> n00000003:port0 [style=dashed]
+ n00000003 [label="{{<port0> 0 | <port4> 4} | mali-c55 isp\n/dev/v4l-subdev1 | {<port1> 1 | <port2> 2 | <port3> 3}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000003:port1 -> n00000009:port0 [style=bold]
+ n00000003:port2 -> n00000009:port2 [style=bold]
+ n00000003:port1 -> n0000000d:port0 [style=bold]
+ n00000003:port3 -> n0000001c
+ n00000009 [label="{{<port0> 0 | <port2> 2} | mali-c55 resizer fr\n/dev/v4l-subdev2 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000009:port1 -> n00000010
+ n0000000d [label="{{<port0> 0} | mali-c55 resizer ds\n/dev/v4l-subdev3 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000000d:port1 -> n00000014
+ n00000010 [label="mali-c55 fr\n/dev/video0", shape=box, style=filled, fillcolor=yellow]
+ n00000014 [label="mali-c55 ds\n/dev/video1", shape=box, style=filled, fillcolor=yellow]
+ n00000018 [label="mali-c55 3a params\n/dev/video2", shape=box, style=filled, fillcolor=yellow]
+ n00000018 -> n00000003:port4
+ n0000001c [label="mali-c55 3a stats\n/dev/video3", shape=box, style=filled, fillcolor=yellow]
+ n00000030 [label="{{<port0> 0} | lte-csi2-rx\n/dev/v4l-subdev4 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000030:port1 -> n00000003:port0
+ n00000035 [label="{{} | imx415 1-001a\n/dev/v4l-subdev5 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000035:port0 -> n00000030:port0 [style=bold]
+}
diff --git a/Documentation/meson.build b/Documentation/meson.build
index 7a58fec8..0fc5909d 100644
--- a/Documentation/meson.build
+++ b/Documentation/meson.build
@@ -15,6 +15,7 @@ if doxygen.found() and dot.found()
cdata.set('TOP_SRCDIR', meson.project_source_root())
cdata.set('TOP_BUILDDIR', meson.project_build_root())
cdata.set('OUTPUT_DIR', meson.current_build_dir())
+ cdata.set('WARN_AS_ERROR', get_option('doc_werror') ? 'YES' : 'NO')
doxygen_predefined = []
foreach key : config_h.keys()
@@ -23,44 +24,100 @@ if doxygen.found() and dot.found()
cdata.set('PREDEFINED', ' \\\n\t\t\t '.join(doxygen_predefined))
- doxyfile = configure_file(input : 'Doxyfile.in',
- output : 'Doxyfile',
- configuration : cdata)
+ doxyfile_common = configure_file(input : 'Doxyfile-common.in',
+ output : 'Doxyfile-common',
+ configuration : cdata)
+
+ doxygen_public_input = [
+ libcamera_base_public_headers,
+ libcamera_base_public_sources,
+ libcamera_public_headers,
+ libcamera_public_sources,
+ ]
- doxygen_input = [
- doxyfile,
- libcamera_base_headers,
- libcamera_base_sources,
+ doxygen_internal_input = [
+ libcamera_base_private_headers,
+ libcamera_base_internal_sources,
libcamera_internal_headers,
+ libcamera_internal_sources,
libcamera_ipa_headers,
libcamera_ipa_interfaces,
- libcamera_public_headers,
- libcamera_sources,
libipa_headers,
libipa_sources,
]
if is_variable('ipu3_ipa_sources')
- doxygen_input += [ipu3_ipa_sources]
+ doxygen_internal_input += [ipu3_ipa_sources]
endif
- custom_target('doxygen',
- input : doxygen_input,
+ # We run doxygen twice - the first run excludes internal API objects as it
+ # is intended to document the public API only. A second run covers all of
+ # the library's objects for libcamera developers. Common configuration is
+ # set in an initially generated Doxyfile, which is then included by the two
+ # final Doxyfiles.
+
+ # This is the "public" run of doxygen generating an abridged version of the
+ # API's documentation.
+
+ doxyfile_tmpl = configure_file(input : 'Doxyfile-public.in',
+ output : 'Doxyfile-public.tmpl',
+ configuration : cdata)
+
+ # The set of public input files stored in the doxygen_public_input array
+ # needs to be set in Doxyfile public. We can't pass them through cdata
+ # cdata, as some of the array members are custom_tgt instances, which
+ # configuration_data.set() doesn't support. Using a separate script invoked
+ # through custom_target(), which supports custom_tgt instances as inputs.
+
+ doxyfile = custom_target('doxyfile-public',
+ input : [
+ doxygen_public_input,
+ ],
+ output : 'Doxyfile-public',
+ command : [
+ 'gen-doxyfile.py',
+ '-o', '@OUTPUT@',
+ doxyfile_tmpl,
+ '@INPUT@',
+ ])
+
+ custom_target('doxygen-public',
+ input : [
+ doxyfile,
+ doxyfile_common,
+ ],
output : 'api-html',
command : [doxygen, doxyfile],
install : true,
install_dir : doc_install_dir,
install_tag : 'doc')
+
+ # This is the internal documentation, which hard-codes a list of directories
+ # to parse in its doxyfile.
+
+ doxyfile = configure_file(input : 'Doxyfile-internal.in',
+ output : 'Doxyfile-internal',
+ configuration : cdata)
+
+ custom_target('doxygen-internal',
+ input : [
+ doxyfile,
+ doxyfile_common,
+ doxygen_internal_input,
+ ],
+ output : 'internal-api-html',
+ command : [doxygen, doxyfile],
+ install : true,
+ install_dir : doc_install_dir,
+ install_tag : 'doc-internal')
endif
#
# Sphinx
#
-sphinx = find_program('sphinx-build-3', required : false)
-if not sphinx.found()
- sphinx = find_program('sphinx-build', required : get_option('documentation'))
-endif
+sphinx = find_program('sphinx-build-3', 'sphinx-build',
+ required : get_option('documentation'))
if sphinx.found()
docs_sources = [
@@ -69,17 +126,22 @@ if sphinx.found()
'coding-style.rst',
'conf.py',
'contributing.rst',
- 'docs.rst',
+ 'design/ae.rst',
+ 'documentation-contents.rst',
'environment_variables.rst',
+ 'feature_requirements.rst',
'guides/application-developer.rst',
- 'guides/introduction.rst',
'guides/ipa.rst',
'guides/pipeline-handler.rst',
'guides/tracing.rst',
'index.rst',
+ 'introduction.rst',
'lens_driver_requirements.rst',
+ 'libcamera_architecture.rst',
+ 'mali-c55.dot',
'python-bindings.rst',
'sensor_driver_requirements.rst',
+ 'software-isp-benchmarking.rst',
'../README.rst',
]
diff --git a/Documentation/python-bindings.rst b/Documentation/python-bindings.rst
index ed9f686b..94712238 100644
--- a/Documentation/python-bindings.rst
+++ b/Documentation/python-bindings.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: documentation-contents.rst
+
.. _python-bindings:
Python Bindings for libcamera
diff --git a/Documentation/sensor_driver_requirements.rst b/Documentation/sensor_driver_requirements.rst
index 0e516b34..fb4269d0 100644
--- a/Documentation/sensor_driver_requirements.rst
+++ b/Documentation/sensor_driver_requirements.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. include:: documentation-contents.rst
+
.. _sensor-driver-requirements:
Sensor Driver Requirements
diff --git a/Documentation/software-isp-benchmarking.rst b/Documentation/software-isp-benchmarking.rst
new file mode 100644
index 00000000..9c2a409b
--- /dev/null
+++ b/Documentation/software-isp-benchmarking.rst
@@ -0,0 +1,79 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+.. include:: documentation-contents.rst
+
+.. _software-isp-benchmarking:
+
+Software ISP benchmarking
+=========================
+
+The Software ISP is particularly sensitive to performance regressions therefore
+it is a good idea to always benchmark the Software ISP before and after making
+changes to it and ensure that there are no performance regressions.
+
+DebayerCpu class builtin benchmark
+----------------------------------
+
+The DebayerCpu class has a builtin benchmark. This benchmark measures the time
+spent on processing (collecting statistics and debayering) only, it does not
+measure the time spent on capturing or outputting the frames.
+
+The builtin benchmark always runs. So this can be used by simply running "cam"
+or "qcam" with a pipeline using the Software ISP.
+
+When it runs it will skip measuring the first 30 frames to allow the caches and
+the CPU temperature (turbo-ing) to warm-up and then it measures 30 fps and shows
+the total and per frame processing time using an info level log message:
+
+.. code-block:: text
+
+ INFO Debayer debayer_cpu.cpp:907 Processed 30 frames in 244317us, 8143 us/frame
+
+To get stable measurements it is advised to disable any other processes which
+may cause significant CPU usage (e.g. disable wifi, bluetooth and browsers).
+When possible it is also advisable to disable CPU turbo-ing and
+frequency-scaling.
+
+For example when benchmarking on a Lenovo ThinkPad X1 Yoga Gen 8, with the
+charger plugged in, the CPU can be fixed to run at 2 GHz using:
+
+.. code-block:: shell
+
+ sudo x86_energy_perf_policy --turbo-enable 0
+ sudo cpupower frequency-set -d 2GHz -u 2GHz
+
+with these settings the builtin bench reports a processing time of ~7.8ms/frame
+on this laptop for FHD SGRBG10 (unpacked) bayer data.
+
+Measuring power consumption
+---------------------------
+
+Since the Software ISP is often used on mobile devices it is also important to
+measure power consumption and ensure that that does not regress.
+
+For example to measure power consumption on a Lenovo ThinkPad X1 Yoga Gen 8 it
+needs to be running on battery and it should be configured with its
+platform-profile (/sys/firmware/acpi/platform_profile) set to balanced and with
+its default turbo and frequency-scaling behavior to match real world usage.
+
+Then start qcam to capture a FHD picture at 30 fps and position the qcam window
+so that it is fully visible. After this run the following command to monitor the
+power consumption:
+
+.. code-block:: shell
+
+ watch -n 10 cat /sys/class/power_supply/BAT0/power_now /sys/class/hwmon/hwmon6/fan?_input
+
+Note this not only measures the power consumption in µW it also monitors the
+speed of this laptop's 2 fans. This is important because depending on the
+ambient temperature the 2 fans may spin up while testing and this will cause an
+additional power consumption of approx. 0.5 W messing up the measurement.
+
+After starting qcam + the watch command let the laptop sit without using it for
+2 minutes for the readings to stabilize. Then check that the fans have not
+turned on and manually take a couple of consecutive power readings and average
+these.
+
+On the example Lenovo ThinkPad X1 Yoga Gen 8 laptop this results in a measured
+power consumption of approx. 13 W while running qcam versus approx. 4-5 W while
+setting idle with its OLED panel on.
diff --git a/Documentation/theme/static/css/theme.css b/Documentation/theme/static/css/theme.css
index d4274ea6..a6d43195 100644
--- a/Documentation/theme/static/css/theme.css
+++ b/Documentation/theme/static/css/theme.css
@@ -283,9 +283,13 @@ div#signature {
font-size: 12px;
}
-#libcamera div.toctree-wrapper {
+#licensing div.toctree-wrapper {
height: 0px;
margin: 0px;
padding: 0px;
visibility: hidden;
}
+
+.documentation-nav {
+ display: none;
+}
diff --git a/Documentation/thread-safety.dox b/Documentation/thread-safety.dox
new file mode 100644
index 00000000..df4c457c
--- /dev/null
+++ b/Documentation/thread-safety.dox
@@ -0,0 +1,44 @@
+/**
+ * \page thread-safety Reentrancy and Thread-Safety
+ *
+ * Through the documentation, several terms are used to define how classes and
+ * their member functions can be used from multiple threads.
+ *
+ * - A **reentrant** function may be called simultaneously from multiple
+ * threads if and only if each invocation uses a different instance of the
+ * class. This is the default for all member functions not explictly marked
+ * otherwise.
+ *
+ * - \anchor thread-safe A **thread-safe** function may be called
+ * simultaneously from multiple threads on the same instance of a class. A
+ * thread-safe function is thus reentrant. Thread-safe functions may also be
+ * called simultaneously with any other reentrant function of the same class
+ * on the same instance.
+ *
+ * \internal
+ * - \anchor thread-bound A **thread-bound** function may be called only from
+ * the thread that the class instances lives in (see section \ref
+ * thread-objects). For instances of classes that do not derive from the
+ * Object class, this is the thread in which the instance was created. A
+ * thread-bound function is not thread-safe, and may or may not be reentrant.
+ * \endinternal
+ *
+ * Neither reentrancy nor thread-safety, in this context, mean that a function
+ * may be called simultaneously from the same thread, for instance from a
+ * callback invoked by the function. This may deadlock and isn't allowed unless
+ * separately documented.
+ *
+ * \if internal
+ * A class is defined as reentrant, thread-safe or thread-bound if all its
+ * member functions are reentrant, thread-safe or thread-bound respectively.
+ * \else
+ * A class is defined as reentrant or thread-safe if all its member functions
+ * are reentrant or thread-safe respectively.
+ * \endif
+ * Some member functions may additionally be documented as having additional
+ * thread-related attributes.
+ *
+ * Most classes are reentrant but not thread-safe, as making them fully
+ * thread-safe would incur locking costs considered prohibitive for the
+ * expected use cases.
+ */