/* SPDX-License-Identifier: LGPL-2.1-or-later */ /* * Copyright (C) 2021, Ideas On Board * * algorithm.cpp - IPA control algorithm interface */ #include "algorithm.h" /** * \file algorithm.h * \brief Algorithm common interface */ namespace libcamera { namespace ipa { /** * \class Algorithm * \brief The base class for all IPA algorithms * \tparam Module The IPA module type for this class of algorithms * * The Algorithm class defines a standard interface for IPA algorithms * compatible with the \a Module. By abstracting algorithms, it makes possible * the implementation of generic code to manage algorithms regardless of their * specific type. * * To specialize the Algorithm class template, an IPA module shall specialize * the Module class template with module-specific context and configuration * types, and pass the specialized Module class as the \a Module template * argument. */ /** * \typedef Algorithm::Module * \brief The IPA module type for this class of algorithms */ /** * \fn Algorithm::init() * \brief Initialize the Algorithm with tuning data * \param[in] context The shared IPA context * \param[in] tuningData The tuning data for the algorithm * * This function is called once, when the IPA module is initialized, to * initialize the algorithm. The \a tuningData YamlObject contains the tuning * data for algorithm. * * \return 0 if successful, an error code otherwise */ /** * \fn Algorithm::configure() * \brief Configure the Algorithm given an IPAConfigInfo * \param[in] context The shared IPA context * \param[in] configInfo The IPA configuration data, received from the pipeline * handler * * Algorithms may implement a configure operation to pre-calculate * parameters prior to commencing streaming. * * Configuration state may be stored in the IPASessionConfiguration structure of * the IPAContext. * * \return 0 if successful, an error code otherwise */ /** * \fn Algorithm::queueRequest() * \brief Provide control values to the algorithm * \param[in] context The shared IPA context * \param[in] frame The frame number to apply the control values * \param[in] frameContext The current frame's context * \param[in] controls The list of user controls * * This function is called for each request queued to the camera. It provides * the controls stored in the request to the algorithm. The \a frame number * is the Request sequence number and identifies the desired corresponding * frame to target for the controls to take effect. * * Algorithms shall read the applicable controls and store their value for later * use during frame processing. */ /** * \fn Algorithm::prepare() * \brief Fill the \a params buffer with ISP processing parameters for a frame * \param[in] context The shared IPA context * \param[in] frame The frame context sequence number * \param[in] frameContext The FrameContext for this frame * \param[out] params The ISP specific parameters * * This function is called for every frame when the camera is running before it * is processed by the ISP to prepare the ISP processing parameters for that * frame. * * Algorithms shall fill in the parameter structure fields appropriately to * configure the ISP processing blocks that they are responsible for. This * includes setting fields and flags that enable those processing blocks. */ /** * \fn Algorithm::process() * \brief Process ISP statistics, and run algorithm operations * \param[in] context The shared IPA context * \param[in] frame The frame context sequence number * \param[in] frameContext The current frame's context * \param[in] stats The IPA statistics and ISP results * \param[out] metadata Metadata for the frame, to be filled by the algorithm * * This function is called while camera is running for every frame processed by * the ISP, to process statistics generated from that frame by the ISP. * Algorithms shall use this data to run calculations, update their state * accordingly, and fill the frame metadata. * * Processing shall not take an undue amount of time, and any extended or * computationally expensive calculations or operations must be handled * asynchronously in a separate thread. * * Algorithms can store state in their respective IPAFrameContext structures, * and reference state from the IPAFrameContext of other algorithms. * * \todo Historical data may be required as part of the processing. * Either the previous frame, or the IPAFrameContext state of the frame * that generated the statistics for this operation may be required for * some advanced algorithms to prevent oscillations or support control * loops correctly. Only a single IPAFrameContext is available currently, * and so any data stored may represent the results of the previously * completed operations. * * Care shall be taken to ensure the ordering of access to the information * such that the algorithms use up to date state as required. */ /** * \class AlgorithmFactory * \brief Registration of Algorithm classes and creation of instances * \tparam _Algorithm The algorithm class type for this factory * * To facilitate instantiation of Algorithm classes, the AlgorithmFactory class * implements auto-registration of algorithms with the IPA Module class. Each * Algorithm subclass shall register itself using the REGISTER_IPA_ALGORITHM() * macro, which will create a corresponding instance of an AlgorithmFactory and * register it with the IPA Module. */ /** * \fn AlgorithmFactory::AlgorithmFactory() * \brief Construct an algorithm factory * \param[in] name Name of the algorithm class * * Creating an instance of the factory automatically registers is with the IPA * Module class, enabling creation of algorithm instances through * Module::createAlgorithm(). * * The factory \a name identifies the algorithm and shall be unique. */ /** * \fn AlgorithmFactory::create() * \brief Create an instance of the Algorithm corresponding to the factory * \return A pointer to a newly constructed instance of the Algorithm subclass * corresponding to the factory */ /** * \def REGISTER_IPA_ALGORITHM * \brief Register an algorithm with the IPA module * \param[in] algorithm Class name of Algorithm derived class to register * \param[in] name Name of the algorithm * * Register an Algorithm subclass with the IPA module to make it available for * instantiation through Module::createAlgorithm(). The \a name identifies the * algorithm and must be unique across all algorithms registered for the IPA * module. */ } /* namespace ipa */ } /* namespace libcamera */ id='n106' href='#n106'>106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Collabora Ltd.
* Author: Nicolas Dufresne <nicolas.dufresne@collabora.com>
*
* gstlibcamera-utils.c - GStreamer libcamera Utility Function
*/
#include "gstlibcamera-utils.h"
#include <libcamera/formats.h>
using namespace libcamera;
static struct {
GstVideoFormat gst_format;
PixelFormat format;
} format_map[] = {
/* Compressed */
{ GST_VIDEO_FORMAT_ENCODED, formats::MJPEG },
/* RGB */
{ GST_VIDEO_FORMAT_RGB, formats::BGR888 },
{ GST_VIDEO_FORMAT_BGR, formats::RGB888 },
{ GST_VIDEO_FORMAT_ARGB, formats::BGRA8888 },
/* YUV Semiplanar */
{ GST_VIDEO_FORMAT_NV12, formats::NV12 },
{ GST_VIDEO_FORMAT_NV21, formats::NV21 },
{ GST_VIDEO_FORMAT_NV16, formats::NV16 },
{ GST_VIDEO_FORMAT_NV61, formats::NV61 },
{ GST_VIDEO_FORMAT_NV24, formats::NV24 },
/* YUV Planar */
{ GST_VIDEO_FORMAT_I420, formats::YUV420 },
{ GST_VIDEO_FORMAT_YV12, formats::YVU420 },
{ GST_VIDEO_FORMAT_Y42B, formats::YUV422 },
/* YUV Packed */
{ GST_VIDEO_FORMAT_UYVY, formats::UYVY },
{ GST_VIDEO_FORMAT_VYUY, formats::VYUY },
{ GST_VIDEO_FORMAT_YUY2, formats::YUYV },
{ GST_VIDEO_FORMAT_YVYU, formats::YVYU },
/* \todo NV42 is used in libcamera but is not mapped in GStreamer yet. */
};
static GstVideoFormat
pixel_format_to_gst_format(const PixelFormat &format)
{
for (const auto &item : format_map) {
if (item.format == format)
return item.gst_format;
}
return GST_VIDEO_FORMAT_UNKNOWN;
}
static PixelFormat
gst_format_to_pixel_format(GstVideoFormat gst_format)
{
if (gst_format == GST_VIDEO_FORMAT_ENCODED)
return PixelFormat{};
for (const auto &item : format_map)
if (item.gst_format == gst_format)
return item.format;
return PixelFormat{};
}
static GstStructure *
bare_structure_from_format(const PixelFormat &format)
{
GstVideoFormat gst_format = pixel_format_to_gst_format(format);
if (gst_format == GST_VIDEO_FORMAT_UNKNOWN)
return nullptr;
if (gst_format != GST_VIDEO_FORMAT_ENCODED)
return gst_structure_new("video/x-raw", "format", G_TYPE_STRING,
gst_video_format_to_string(gst_format), nullptr);
switch (format) {
case formats::MJPEG:
return gst_structure_new_empty("image/jpeg");
default:
return nullptr;
}
}
GstCaps *
gst_libcamera_stream_formats_to_caps(const StreamFormats &formats)
{
GstCaps *caps = gst_caps_new_empty();
for (PixelFormat pixelformat : formats.pixelformats()) {
g_autoptr(GstStructure) bare_s = bare_structure_from_format(pixelformat);
if (!bare_s) {
GST_WARNING("Unsupported DRM format %" GST_FOURCC_FORMAT,
GST_FOURCC_ARGS(pixelformat));
continue;
}
for (const Size &size : formats.sizes(pixelformat)) {
GstStructure *s = gst_structure_copy(bare_s);
gst_structure_set(s,
"width", G_TYPE_INT, size.width,
"height", G_TYPE_INT, size.height,
nullptr);
gst_caps_append_structure(caps, s);
}
const SizeRange &range = formats.range(pixelformat);
if (range.hStep && range.vStep) {
GstStructure *s = gst_structure_copy(bare_s);
GValue val = G_VALUE_INIT;
g_value_init(&val, GST_TYPE_INT_RANGE);
gst_value_set_int_range_step(&val, range.min.width, range.max.width, range.hStep);
gst_structure_set_value(s, "width", &val);
gst_value_set_int_range_step(&val, range.min.height, range.max.height, range.vStep);
gst_structure_set_value(s, "height", &val);
g_value_unset(&val);
gst_caps_append_structure(caps, s);
}
}
return caps;
}
GstCaps *
gst_libcamera_stream_configuration_to_caps(const StreamConfiguration &stream_cfg)
{
GstCaps *caps = gst_caps_new_empty();
GstStructure *s = bare_structure_from_format(stream_cfg.pixelFormat);
gst_structure_set(s,
"width", G_TYPE_INT, stream_cfg.size.width,
"height", G_TYPE_INT, stream_cfg.size.height,
nullptr);
gst_caps_append_structure(caps, s);
return caps;
}
void
gst_libcamera_configure_stream_from_caps(StreamConfiguration &stream_cfg,
GstCaps *caps)
{
GstVideoFormat gst_format = pixel_format_to_gst_format(stream_cfg.pixelFormat);
guint i;
gint best_fixed = -1, best_in_range = -1;
GstStructure *s;
/*
* These are delta weight computed from:
* ABS(width - stream_cfg.size.width) * ABS(height - stream_cfg.size.height)
*/
guint best_fixed_delta = G_MAXUINT;
guint best_in_range_delta = G_MAXUINT;
/* First fixate the caps using default configuration value. */
g_assert(gst_caps_is_writable(caps));
/* Lookup the structure for a close match to the stream_cfg.size */
for (i = 0; i < gst_caps_get_size(caps); i++) {
s = gst_caps_get_structure(caps, i);
gint width, height;
guint delta;
if (gst_structure_has_field_typed(s, "width", G_TYPE_INT) &&
gst_structure_has_field_typed(s, "height", G_TYPE_INT)) {
gst_structure_get_int(s, "width", &width);
gst_structure_get_int(s, "height", &height);
delta = ABS(width - (gint)stream_cfg.size.width) * ABS(height - (gint)stream_cfg.size.height);
if (delta < best_fixed_delta) {
best_fixed_delta = delta;
best_fixed = i;
}
} else {
gst_structure_fixate_field_nearest_int(s, "width", stream_cfg.size.width);
gst_structure_fixate_field_nearest_int(s, "height", stream_cfg.size.height);
gst_structure_get_int(s, "width", &width);
gst_structure_get_int(s, "height", &height);
delta = ABS(width - (gint)stream_cfg.size.width) * ABS(height - (gint)stream_cfg.size.height);
if (delta < best_in_range_delta) {
best_in_range_delta = delta;
best_in_range = i;
}
}
}
/* Prefer reliable fixed value over ranges */
if (best_fixed >= 0)
s = gst_caps_get_structure(caps, best_fixed);
else
s = gst_caps_get_structure(caps, best_in_range);
if (gst_structure_has_name(s, "video/x-raw")) {
const gchar *format = gst_video_format_to_string(gst_format);
gst_structure_fixate_field_string(s, "format", format);
}
/* Then configure the stream with the result. */
if (gst_structure_has_name(s, "video/x-raw")) {