#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-or-later # Copyright (C) 2020, Google Inc. # # Author: Paul Elder # # Test the V4L2 compatibility layer import argparse import glob import os from packaging import version import re import shutil import signal import subprocess import sys MIN_V4L_UTILS_VERSION = version.parse("1.21.0") TestPass = 0 TestFail = -1 TestSkip = 77 supported_pipelines = [ 'bcm2835-isp', 'uvcvideo', 'vimc', ] def grep(exp, arr): return [s for s in arr if re.search(exp, s)] def run_with_stdout(*args, env={}): try: with open(os.devnull, 'w') as devnull: output = subprocess.check_output(args, env=env, stderr=devnull) ret = 0 except subprocess.CalledProcessError as err: output = err.output ret = err.returncode return ret, output.decode('utf-8').split('\n') def extract_result(result): res = result.split(', ') ret = {} ret['total'] = int(res[0].split(': ')[-1]) ret['succeeded'] = int(res[1].split(': ')[-1]) ret['failed'] = int(res[2].split(': ')[-1]) ret['warnings'] = int(res[3].split(': ')[-1]) ret['device'] = res[0].split()[4].strip(':') ret['driver'] = res[0].split()[2] return ret def test_v4l2_compliance(v4l2_compliance, ld_preload, device, base_driver): ret, output = run_with_stdout(v4l2_compliance, '-s', '-d', device, env={'LD_PRELOAD': ld_preload}) if ret < 0: output.append(f'Test for {device} terminated due to signal {signal.Signals(-ret).name}') return TestFail, output result = extract_result(output[-2]) if result['failed'] == 0: return TestPass, output # vimc will fail s_fmt because it only supports framesizes that are # multiples of 3 if base_driver == 'vimc' and result['failed'] == 1: failures = grep('fail', output) if re.search('S_FMT cannot handle an invalid format', failures[0]) is None: return TestFail, output return TestPass, output return TestFail, output def main(argv): parser = argparse.ArgumentParser() parser.add_argument('-a', '--all', action='store_true', help='Test all available cameras') parser.add_argument('-s', '--sanitizer', type=str, help='Path to the address sanitizer (ASan) runtime') parser.add_argument('-v', '--verbose', action='store_true', help='Make the output verbose') parser.add_argument('v4l2_compat', type=str, help='Path to v4l2-compat.so') args = parser.parse_args(argv[1:]) # Compute the LD_PRELOAD value by first loading ASan (if specified) and # then the V4L2 compat layer. ld_preload = [] if args.sanitizer: ld_preload.append(args.sanitizer) ld_preload.append(args.v4l2_compat) ld_preload = ':'.join(ld_preload) v4l2_compliance = shutil.which('v4l2-compliance') if v4l2_compliance is None: print('v4l2-compliance is not available') return TestSkip ret, out = run_with_stdout(v4l2_compliance, '--version') if ret != 0 or version.parse(out[0].split()[1].replace(',', '')) < MIN_V4L_UTILS_VERSION: print('v4l2-compliance version >= 1.21.0 required') return TestSkip v4l2_ctl = shutil.which('v4l2-ctl') if v4l2_ctl is None: print('v4l2-ctl is not available') return TestSkip ret, out = run_with_stdout(v4l2_ctl, '--version') if ret != 0 or version.parse(out[0].split()[-1]) < MIN_V4L_UTILS_VERSION: print('v4l2-ctl version >= 1.21.0 required') return TestSkip dev_nodes = glob.glob('/dev/video*') if len(dev_nodes) == 0: print('no video nodes available to test with') return TestSkip failed = [] drivers_tested = {} for device in dev_nodes: ret, out = run_with_stdout(v4l2_ctl, '-D', '-d', device, env={'LD_PRELOAD': ld_preload}) if ret < 0: failed.append(device) print(f'v4l2-ctl failed on {device} with v4l2-compat') continue driver = grep('Driver name', out)[0].split(':')[-1].strip() if driver != "libcamera": continue ret, out = run_with_stdout(v4l2_ctl, '-D', '-d', device) if ret < 0: failed.append(device) print(f'v4l2-ctl failed on {device} without v4l2-compat') continue driver = grep('Driver name', out)[0].split(':')[-1].strip() if driver not in supported_pipelines: continue # TODO: Add kernel version check when vimc supports scaling if driver == "vimc": continue if not args.all and driver in drivers_tested: continue print(f'Testing {device} with {driver} driver... ', end='') ret, msg = test_v4l2_compliance(v4l2_compliance, ld_preload, device, driver) if ret == TestFail: failed.append(device) print('failed') else: print('success') if ret == TestFail or args.verbose: print('\n'.join(msg)) drivers_tested[driver] = True if len(drivers_tested) == 0: print(f'No compatible drivers found') return TestSkip if len(failed) > 0: print(f'Failed {len(failed)} tests:') for device in failed: print(f'- {device}') return TestPass if not failed else TestFail if __name__ == '__main__': sys.exit(main(sys.argv)) 1' href='#n71'>71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
.. SPDX-License-Identifier: CC-BY-SA-4.0

IPU3 IPA Architecture Design and Overview
=========================================

The IPU3 IPA is built as a modular and extensible framework with an
upper layer to manage the interactions with the pipeline handler, and
the image processing algorithms split to compartmentalise the processing
required for each processing block, making use of the fixed-function
accelerators provided by the ImgU ISP.

The core IPU3 class is responsible for initialisation and construction
of the algorithm components, processing controls set by the requests
from applications, and managing events from the pipeline handler.

::

      ┌───────────────────────────────────────────┐
      │      IPU3 Pipeline Handler                │
      │   ┌────────┐    ┌────────┐    ┌────────┐  │
      │   │        │    │        │    │        │  │
      │   │ Sensor ├───►│  CIO2  ├───►│  ImgU  ├──►
      │   │        │    │        │    │        │  │
      │   └────────┘    └────────┘    └─▲────┬─┘  │    P: Parameter Buffer
      │                                 │P   │    │    S: Statistics Buffer
      │                                 │    │S   │
      └─┬───┬───┬──────┬────┬────┬────┬─┴────▼─┬──┘    1: init()
        │   │   │      │ ▲  │ ▲  │ ▲  │ ▲      │       2: configure()
        │1  │2  │3     │4│  │4│  │4│  │4│      │5      3: mapBuffers(), start()
        │   │   │      │ │  │ │  │ │  │ │      │       4: (▼) queueRequest(), fillParamsBuffer(), processStatsBuffer()
        ▼   ▼   ▼      ▼ │  ▼ │  ▼ │  ▼ │      ▼          (▲) setSensorControls, paramsBufferReady, metadataReady Signals
      ┌──────────────────┴────┴────┴────┴─────────┐    5: stop(), unmapBuffers()
      │ IPU3 IPA                                  │
      │                 ┌───────────────────────┐ │
      │ ┌───────────┐   │ Algorithms            │ │
      │ │IPAContext │   │          ┌─────────┐  │ │
      │ │ ┌───────┐ │   │          │ ...     │  │ │
      │ │ │       │ │   │        ┌─┴───────┐ │  │ │
      │ │ │  SC   │ │   │        │ Tonemap ├─┘  │ │
      │ │ │       │ ◄───►      ┌─┴───────┐ │    │ │
      │ │ ├───────┤ │   │      │ AWB     ├─┘    │ │
      │ │ │       │ │   │    ┌─┴───────┐ │      │ │
      │ │ │  FC   │ │   │    │ AGC     ├─┘      │ │
      │ │ │       │ │   │    │         │        │ │
      │ │ └───────┘ │   │    └─────────┘        │ │
      │ └───────────┘   └───────────────────────┘ │
      └───────────────────────────────────────────┘
        SC: IPASessionConfiguration
        FC: IPAFrameContext(s)

The IPA instance is constructed and initialised at the point a Camera is
created by the IPU3 pipeline handler. The initialisation call provides
details about which camera sensor is being used, and the controls that
it has available, along with their default values and ranges.

Buffers
~~~~~~~

The IPA will have Parameter and Statistics buffers shared with it from
the IPU3 Pipeline handler. These buffers will be passed to the IPA using
the ``mapBuffers()`` call before the ``start()`` operation occurs.

The IPA will map the buffers into CPU-accessible memory, associated with
a buffer ID, and further events for sending or receiving parameter and
statistics buffers will reference the ID to avoid expensive memory
mapping operations, or the passing of file handles during streaming.

After the ``stop()`` operation occurs, these buffers will be unmapped
when requested by the pipeline handler using the ``unmapBuffers()`` call
and no further access to the buffers is permitted.

Context
~~~~~~~

Algorithm calls will always have the ``IPAContext`` available to them.
This context comprises of two parts:

-  IPA Session Configuration
-  IPA Frame Context

The session configuration structure ``IPASessionConfiguration``
represents constant parameters determined before streaming commenced
during ``configure()``.

The IPA Frame Context provides the storage for algorithms for a single
frame operation.

The ``IPAFrameContext`` structure may be extended to an array, list, or
queue to store historical state for each frame, allowing algorithms to
obtain and reference results of calculations which are deeply pipelined.
This may only be done if an algorithm needs to know the context that was
applied at the frame the statistics were produced for, rather than the
previous or current frame.

Presently there is a single ``IPAFrameContext`` without historical data,
and the context is maintained and updated through successive processing
operations.

Operating
~~~~~~~~~

There are three main interactions with the algorithms for the IPU3 IPA
to operate when running:

-  configure()
-  queueRequest()
-  fillParamsBuffer()
-  processStatsBuffer()

The configuration phase allows the pipeline-handler to inform the IPA of
the current stream configurations, which is then passed into each
algorithm to provide an opportunity to identify and track state of the
hardware, such as image size or ImgU pipeline configurations.

Pre-frame preparation
~~~~~~~~~~~~~~~~~~~~~

When configured, the IPA is notified by the pipeline handler of the
Camera ``start()`` event, after which incoming requests will be queued
for processing, requiring a parameter buffer (``ipu3_uapi_params``) to
be populated for the ImgU. This is given to the IPA through
``fillParamsBuffer()``, and then passed directly to each algorithm
through the ``prepare()`` call allowing the ISP configuration to be
updated for the needs of each component that the algorithm is
responsible for.

The algorithm should set the use flag (``ipu3_uapi_flags``) for any
structure that it modifies, and it should take care to ensure that any
structure set by a use flag is fully initialised to suitable values.

The parameter buffer is returned to the pipeline handler through the
``paramsBufferReady`` signal, and from there queued to the ImgU along
with a raw frame captured with the CIO2.

Post-frame completion
~~~~~~~~~~~~~~~~~~~~~

When the capture of an image is completed, and successfully processed
through the ImgU, the generated statistics buffer
(``ipu3_uapi_stats_3a``) is given to the IPA through
``processStatsBuffer()``. This provides the IPA with an opportunity to
examine the results of the ISP and run the calculations required by each
algorithm on the new data. The algorithms may require context from the
operations of other algorithms, for example, the AWB might choose to use
a scene brightness determined by the AGC. It is important that the
algorithms are ordered to ensure that required results are determined
before they are needed.

The ordering of the algorithm processing is determined by their
placement in the ``IPU3::algorithms_`` ordered list.

Finally, the IPA metadata for the completed frame is returned back via
the ``metadataReady`` signal.

Sensor Controls
~~~~~~~~~~~~~~~

The AutoExposure and AutoGain (AGC) algorithm differs slightly from the
others as it requires operating directly on the sensor, as opposed to
through the ImgU ISP. To support this, there is a ``setSensorControls``
signal to allow the IPA to request controls to be set on the camera
sensor through the pipeline handler.