summaryrefslogtreecommitdiff
path: root/src/libcamera/pipeline_handler.cpp
blob: bf8c86d35c4fb6f440fded3b75cac1fe6ee331e7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
 * Copyright (C) 2018, Google Inc.
 *
 * pipeline_handler.cpp - Pipeline handler infrastructure
 */

#include "pipeline_handler.h"

#include <libcamera/buffer.h>
#include <libcamera/camera.h>
#include <libcamera/camera_manager.h>

#include "device_enumerator.h"
#include "log.h"
#include "media_device.h"
#include "utils.h"

/**
 * \file pipeline_handler.h
 * \brief Create pipelines and cameras from a set of media devices
 *
 * Each pipeline supported by libcamera needs to be backed by a pipeline
 * handler implementation that operate on a set of media devices. The pipeline
 * handler is responsible for matching the media devices it requires with the
 * devices present in the system, and once all those devices can be acquired,
 * create corresponding Camera instances.
 *
 * Every subclass of PipelineHandler shall be registered with libcamera using
 * the REGISTER_PIPELINE_HANDLER() macro.
 */

namespace libcamera {

LOG_DEFINE_CATEGORY(Pipeline)

/**
 * \class CameraData
 * \brief Base class for platform-specific data associated with a camera
 *
 * The CameraData base abstract class represents platform specific-data
 * a pipeline handler might want to associate with a Camera to access them
 * at a later time.
 *
 * Pipeline handlers are expected to extend this base class with platform
 * specific implementation, associate instances of the derived classes
 * using the setCameraData() method, and access them at a later time
 * with cameraData().
 */

/**
 * \fn CameraData::CameraData(PipelineHandler *pipe)
 * \brief Construct a CameraData instance for the given pipeline handler
 * \param[in] pipe The pipeline handler
 *
 * The reference to the pipeline handler is stored internally, the caller shall
 * guarantee that the pointer remains valid as long as the CameraData instance
 * exists.
 */

/**
 * \var CameraData::camera_
 * \brief The camera related to this CameraData instance
 *
 * The camera_ pointer provides access to the Camera object that this instance
 * is related to. It is set when the Camera is registered with
 * PipelineHandler::registerCamera() and remains valid until the CameraData
 * instance is destroyed.
 */

/**
 * \var CameraData::pipe_
 * \brief The pipeline handler related to this CameraData instance
 *
 * The pipe_ pointer provides access to the PipelineHandler object that this
 * instance is related to. It is set when the CameraData instance is created
 * and remains valid until the instance is destroyed.
 */

/**
 * \var CameraData::queuedRequests_
 * \brief The list of queued and not yet completed request
 *
 * The list of queued request is used to track requests queued in order to
 * ensure completion of all requests when the pipeline handler is stopped.
 *
 * \sa PipelineHandler::queueRequest(), PipelineHandler::stop(),
 * PipelineHandler::completeRequest()
 */

/**
 * \var CameraData::controlInfo_
 * \brief The set of controls supported by the camera
 *
 * The control information shall be initialised by the pipeline handler when
 * creating the camera, and shall not be modified afterwards.
 */

/**
 * \var CameraData::ipa_
 * \brief The IPA module used by the camera
 *
 * Reference to the Image Processing Algorithms (IPA) operating on the camera's
 * stream(s). If no IPA exists for the camera, this field is set to nullptr.
 */

/**
 * \class PipelineHandler
 * \brief Create and manage cameras based on a set of media devices
 *
 * The PipelineHandler matches the media devices provided by a DeviceEnumerator
 * with the pipelines it supports and creates corresponding Camera devices.
 *
 * Pipeline handler instances are reference-counted through std::shared_ptr<>.
 * They implement std::enable_shared_from_this<> in order to create new
 * std::shared_ptr<> in code paths originating from member functions of the
 * PipelineHandler class where only the 'this' pointer is available.
 */

/**
 * \brief Construct a PipelineHandler instance
 * \param[in] manager The camera manager
 *
 * In order to honour the std::enable_shared_from_this<> contract,
 * PipelineHandler instances shall never be constructed manually, but always
 * through the PipelineHandlerFactory::create() method implemented by the
 * respective factories.
 */
PipelineHandler::PipelineHandler(CameraManager *manager)
	: manager_(manager)
{
}

PipelineHandler::~PipelineHandler()
{
	for (std::shared_ptr<MediaDevice> media : mediaDevices_)
		media->release();
};

/**
 * \fn PipelineHandler::match(DeviceEnumerator *enumerator)
 * \brief Match media devices and create camera instances
 * \param[in] enumerator The enumerator providing all media devices found in the
 * system
 *
 * This function is the main entry point of the pipeline handler. It is called
 * by the camera manager with the \a enumerator passed as an argument. It shall
 * acquire from the \a enumerator all the media devices it needs for a single
 * pipeline, create one or multiple Camera instances and register them with the
 * camera manager.
 *
 * If all media devices needed by the pipeline handler are found, they must all
 * be acquired by a call to MediaDevice::acquire(). This function shall then
 * create the corresponding Camera instances, store them internally, and return
 * true. Otherwise it shall not acquire any media device (or shall release all
 * the media devices is has acquired by calling MediaDevice::release()) and
 * return false.
 *
 * If multiple instances of a pipeline are available in the system, the
 * PipelineHandler class will be instantiated once per instance, and its match()
 * function called for every instance. Each call shall acquire media devices for
 * one pipeline instance, until all compatible media devices are exhausted.
 *
 * If this function returns true, a new instance of the pipeline handler will
 * be created and its match() function called.
 *
 * \return true if media devices have been acquired and camera instances
 * created, or false otherwise
 */

/**
 * \brief Search and acquire a MediDevice matching a device pattern
 * \param[in] enumerator Enumerator containing all media devices in the system
 * \param[in] dm Device match pattern
 *
 * Search the device \a enumerator for an available media device matching the
 * device match pattern \a dm. Matching media device that have previously been
 * acquired by MediaDevice::acquire() are not considered. If a match is found,
 * the media device is acquired and returned. The caller shall not release the
 * device explicitly, it will be automatically released when the pipeline
 * handler is destroyed.
 *
 * \return A pointer to the matching MediaDevice, or nullptr if no match is found
 */
MediaDevice *PipelineHandler::acquireMediaDevice(DeviceEnumerator *enumerator,
						 const DeviceMatch &dm)
{
	std::shared_ptr<MediaDevice> media = enumerator->search(dm);
	if (!media)
		return nullptr;

	if (!media->acquire())
		return nullptr;

	mediaDevices_.push_back(media);

	return media.get();
}

/**
 * \brief Lock all media devices acquired by the pipeline
 *
 * This method shall not be called from pipeline handler implementation, as the
 * Camera class handles locking directly.
 *
 * \return True if the devices could be locked, false otherwise
 * \sa unlock()
 * \sa MediaDevice::lock()
 */
bool PipelineHandler::lock()
{
	for (std::shared_ptr<MediaDevice> &media : mediaDevices_) {
		if (!media->lock()) {
			unlock();
			return false;
		}
	}

	return true;
}

/**
 * \brief Unlock all media devices acquired by the pipeline
 *
 * This method shall not be called from pipeline handler implementation, as the
 * Camera class handles locking directly.
 *
 * \sa lock()
 */
void PipelineHandler::unlock()
{
	for (std::shared_ptr<MediaDevice> &media : mediaDevices_)
		media->unlock();
}

/**
 * \brief Retrieve the list of controls for a camera
 * \param[in] camera The camera
 * \return A ControlInfoMap listing the controls support by \a camera
 */
const ControlInfoMap &PipelineHandler::controls(Camera *camera)
{
	CameraData *data = cameraData(camera);
	return data->controlInfo_;
}

/**
 * \fn PipelineHandler::generateConfiguration()
 * \brief Generate a camera configuration for a specified camera
 * \param[in] camera The camera to generate a default configuration for
 * \param[in] roles A list of stream roles
 *
 * Generate a default configuration for the \a camera for a specified list of
 * stream roles. The caller shall populate the \a roles with the use-cases it
 * wishes to fetch the default configuration for. The returned configuration
 * can then be examined by the caller to learn about the selected streams and
 * their default parameters.
 *
 * The intended companion to this is \a configure() which can be used to change
 * the group of streams parameters.
 *
 * \return A valid CameraConfiguration if the requested roles can be satisfied,
 * or a null pointer otherwise. The ownership of the returned configuration is
 * passed to the caller.
 */

/**
 * \fn PipelineHandler::configure()
 * \brief Configure a group of streams for capture
 * \param[in] camera The camera to configure
 * \param[in] config The camera configurations to setup
 *
 * Configure the specified group of streams for \a camera according to the
 * configuration specified in \a config. The intended caller of this interface
 * is the Camera class which will receive configuration to apply from the
 * application.
 *
 * The configuration is guaranteed to have been validated with
 * CameraConfiguration::valid(). The pipeline handler implementation shall not
 * perform further validation and may rely on any custom field stored in its
 * custom CameraConfiguration derived class.
 *
 * When configuring the camera the pipeline handler shall associate a Stream
 * instance to each StreamConfiguration entry in the CameraConfiguration using
 * the StreamConfiguration::setStream() method.
 *
 * \return 0 on success or a negative error code otherwise
 */

/**
 * \fn PipelineHandler::allocateBuffers()
 * \brief Allocate buffers for a stream
 * \param[in] camera The camera the \a stream belongs to
 * \param[in] streams The set of streams to allocate buffers for
 *
 * This method allocates buffers internally in the pipeline handler for each
 * stream in the \a streams buffer set, and associates them with the stream's
 * buffer pool.
 *
 * The intended caller of this method is the Camera class.
 *
 * \return 0 on success or a negative error code otherwise
 */

/**
 * \fn PipelineHandler::freeBuffers()
 * \brief Free all buffers associated with a stream
 * \param[in] camera The camera the \a stream belongs to
 * \param[in] streams The set of streams to free buffers from
 *
 * After a capture session has been stopped all buffers associated with each
 * stream shall be freed.
 *
 * The intended caller of this method is the Camera class.
 *
 * \return 0 on success or a negative error code otherwise
 */

/**
 * \fn PipelineHandler::start()
 * \brief Start capturing from a group of streams
 * \param[in] camera The camera to start
 *
 * Start the group of streams that have been configured for capture by
 * \a configure(). The intended caller of this method is the Camera class which
 * will in turn be called from the application to indicate that it has
 * configured the streams and is ready to capture.
 *
 * \return 0 on success or a negative error code otherwise
 */

/**
 * \fn PipelineHandler::stop()
 * \brief Stop capturing from all running streams
 * \param[in] camera The camera to stop
 *
 * This method stops capturing and processing requests immediately. All pending
 * requests are cancelled and complete immediately in an error state.
 */

/**
 * \fn PipelineHandler::queueRequest()
 * \brief Queue a request to the camera
 * \param[in] camera The camera to queue the request to
 * \param[in] request The request to queue
 *
 * This method queues a capture request to the pipeline handler for processing.
 * The request contains a set of buffers associated with streams and a set of
 * parameters. The pipeline handler shall program the device to ensure that the
 * parameters will be applied to the frames captured in the buffers provided in
 * the request.
 *
 * Pipeline handlers shall override this method. The base implementation in the
 * PipelineHandler class keeps track of queued requests in order to ensure
 * completion of all requests when the pipeline handler is stopped with stop().
 * Requests completion shall be signalled by the pipeline handler using the
 * completeRequest() method.
 *
 * \return 0 on success or a negative error code otherwise
 */
int PipelineHandler::queueRequest(Camera *camera, Request *request)
{
	CameraData *data = cameraData(camera);
	data->queuedRequests_.push_back(request);

	return 0;
}

/**
 * \brief Complete a buffer for a request
 * \param[in] camera The camera the request belongs to
 * \param[in] request The request the buffer belongs to
 * \param[in] buffer The buffer that has completed
 *
 * This method shall be called by pipeline handlers to signal completion of the
 * \a buffer part of the \a request. It notifies applications of buffer
 * completion and updates the request's internal buffer tracking. The request
 * is not completed automatically when the last buffer completes to give
 * pipeline handlers a chance to perform any operation that may still be
 * needed. They shall complete requests explicitly with completeRequest().
 *
 * \return True if all buffers contained in the request have completed, false
 * otherwise
 */
bool PipelineHandler::completeBuffer(Camera *camera, Request *request,
				     Buffer *buffer)
{
	camera->bufferCompleted.emit(request, buffer);
	return request->completeBuffer(buffer);
}

/**
 * \brief Signal request completion
 * \param[in] camera The camera that the request belongs to
 * \param[in] request The request that has completed
 *
 * The pipeline handler shall call this method to notify the \a camera that the
 * request has completed. The request is deleted and shall not be accessed once
 * this method returns.
 *
 * This method ensures that requests will be returned to the application in
 * submission order, the pipeline handler may call it on any complete request
 * without any ordering constraint.
 */
void PipelineHandler::completeRequest(Camera *camera, Request *request)
{
	request->complete();

	CameraData *data = cameraData(camera);

	while (!data->queuedRequests_.empty()) {
		request = data->queuedRequests_.front();
		if (request->status() == Request::RequestPending)
			break;

		ASSERT(!request->hasPendingBuffers());
		data->queuedRequests_.pop_front();
		camera->requestComplete(request);
	}
}

/**
 * \brief Register a camera to the camera manager and pipeline handler
 * \param[in] camera The camera to be added
 * \param[in] data Pipeline-specific data for the camera
 *
 * This method is called by pipeline handlers to register the cameras they
 * handle with the camera manager. It associates the pipeline-specific \a data
 * with the camera, for later retrieval with cameraData(). Ownership of \a data
 * is transferred to the PipelineHandler.
 */
void PipelineHandler::registerCamera(std::shared_ptr<Camera> camera,
				     std::unique_ptr<CameraData> data)
{
	data->camera_ = camera.get();
	cameraData_[camera.get()] = std::move(data);
	cameras_.push_back(camera);
	manager_->addCamera(std::move(camera));
}

/**
 * \brief Enable hotplug handling for a media device
 * \param[in] media The media device
 *
 * This function enables hotplug handling, and especially hot-unplug handling,
 * of the \a media device. It shall be called by pipeline handlers for all the
 * media devices that can be disconnected.
 *
 * When a media device passed to this function is later unplugged, the pipeline
 * handler gets notified and automatically disconnects all the cameras it has
 * registered without requiring any manual intervention.
 */
void PipelineHandler::hotplugMediaDevice(MediaDevice *media)
{
	media->disconnected.connect(this, &PipelineHandler::mediaDeviceDisconnected);
}

/**
 * \brief Slot for the MediaDevice disconnected signal
 */
void PipelineHandler::mediaDeviceDisconnected(MediaDevice *media)
{
	media->disconnected.disconnect(this);

	if (cameras_.empty())
		return;

	disconnect();
}

/**
 * \brief Device disconnection handler
 *
 * This virtual function is called to notify the pipeline handler that the
 * device it handles has been disconnected. It notifies all cameras created by
 * the pipeline handler that they have been disconnected, and unregisters them
 * from the camera manager.
 *
 * The function can be overloaded by pipeline handlers to perform custom
 * operations at disconnection time. Any overloaded version shall call the
 * PipelineHandler::disconnect() base function for proper hot-unplug operation.
 */
void PipelineHandler::disconnect()
{
	for (std::weak_ptr<Camera> ptr : cameras_) {
		std::shared_ptr<Camera> camera = ptr.lock();
		if (!camera)
			continue;

		camera->disconnect();
		manager_->removeCamera(camera.get());
	}

	cameras_.clear();
}

/**
 * \brief Retrieve the pipeline-specific data associated with a Camera
 * \param[in] camera The camera whose data to retrieve
 * \return A pointer to the pipeline-specific data passed to registerCamera().
 * The returned pointer is a borrowed reference and is guaranteed to remain
 * valid until the pipeline handler is destroyed. It shall not be deleted
 * manually by the caller.
 */
CameraData *PipelineHandler::cameraData(const Camera *camera)
{
	ASSERT(cameraData_.count(camera));
	return cameraData_[camera].get();
}

/**
 * \var PipelineHandler::manager_
 * \brief The Camera manager associated with the pipeline handler
 *
 * The camera manager pointer is stored in the pipeline handler for the
 * convenience of pipeline handler implementations. It remains valid and
 * constant for the whole lifetime of the pipeline handler.
 */

/**
 * \fn PipelineHandler::name()
 * \brief Retrieve the pipeline handler name
 * \return The pipeline handler name
 */

/**
 * \class PipelineHandlerFactory
 * \brief Registration of PipelineHandler classes and creation of instances
 *
 * To facilitate discovery and instantiation of PipelineHandler classes, the
 * PipelineHandlerFactory class maintains a registry of pipeline handler
 * classes. Each PipelineHandler subclass shall register itself using the
 * REGISTER_PIPELINE_HANDLER() macro, which will create a corresponding
 * instance of a PipelineHandlerFactory subclass and register it with the
 * static list of factories.
 */

/**
 * \brief Construct a pipeline handler factory
 * \param[in] name Name of the pipeline handler class
 *
 * Creating an instance of the factory registers is with the global list of
 * factories, accessible through the factories() function.
 *
 * The factory \a name is used for debug purpose and shall be unique.
 */
PipelineHandlerFactory::PipelineHandlerFactory(const char *name)
	: name_(name)
{
	registerType(this);
}

/**
 * \brief Create an instance of the PipelineHandler corresponding to the factory
 * \param[in] manager The camera manager
 *
 * \return A shared pointer to a new instance of the PipelineHandler subclass
 * corresponding to the factory
 */
std::shared_ptr<PipelineHandler> PipelineHandlerFactory::create(CameraManager *manager)
{
	PipelineHandler *handler = createInstance(manager);
	handler->name_ = name_.c_str();
	return std::shared_ptr<PipelineHandler>(handler);
}

/**
 * \fn PipelineHandlerFactory::name()
 * \brief Retrieve the factory name
 * \return The factory name
 */

/**
 * \brief Add a pipeline handler class to the registry
 * \param[in] factory Factory to use to construct the pipeline handler
 *
 * The caller is responsible to guarantee the uniqueness of the pipeline handler
 * name.
 */
void PipelineHandlerFactory::registerType(PipelineHandlerFactory *factory)
{
	std::vector<PipelineHandlerFactory *> &factories = PipelineHandlerFactory::factories();

	factories.push_back(factory);

	LOG(Pipeline, Debug)
		<< "Registered pipeline handler \"" << factory->name() << "\"";
}

/**
 * \brief Retrieve the list of all pipeline handler factories
 *
 * The static factories map is defined inside the function to ensures it gets
 * initialized on first use, without any dependency on link order.
 *
 * \return the list of pipeline handler factories
 */
std::vector<PipelineHandlerFactory *> &PipelineHandlerFactory::factories()
{
	static std::vector<PipelineHandlerFactory *> factories;
	return factories;
}

/**
 * \fn PipelineHandlerFactory::createInstance()
 * \brief Create an instance of the PipelineHandler corresponding to the factory
 * \param[in] manager The camera manager
 *
 * This virtual function is implemented by the REGISTER_PIPELINE_HANDLER()
 * macro. It creates a pipeline handler instance associated with the camera
 * \a manager.
 *
 * \return a pointer to a newly constructed instance of the PipelineHandler
 * subclass corresponding to the factory
 */

/**
 * \def REGISTER_PIPELINE_HANDLER
 * \brief Register a pipeline handler with the pipeline handler factory
 * \param[in] handler Class name of PipelineHandler derived class to register
 *
 * Register a PipelineHandler subclass with the factory and make it available to
 * try and match devices.
 */

} /* namespace libcamera */
/ if (config_.size() > IPU3_MAX_STREAMS) { config_.resize(IPU3_MAX_STREAMS); status = Adjusted; } /* Validate the requested stream configuration */ unsigned int rawCount = 0; unsigned int yuvCount = 0; Size maxYuvSize; Size rawSize; for (const StreamConfiguration &cfg : config_) { const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat); if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) { rawCount++; rawSize = cfg.size; } else { yuvCount++; maxYuvSize.expandTo(cfg.size); } } if (rawCount > 1 || yuvCount > 2) { LOG(IPU3, Debug) << "Camera configuration not supported"; return Invalid; } /* * Generate raw configuration from CIO2. * * \todo The image sensor frame size should be selected to optimize * operations based on the sizes of the requested streams. However such * a selection makes the pipeline configuration procedure fail for small * resolutions (for example: 640x480 with OV5670) and causes the capture * operations to stall for some stream size combinations (see the * commit message of the patch that introduced this comment for more * failure examples). * * Until the sensor frame size calculation criteria are clarified, when * capturing from ImgU always use the largest possible size which * guarantees better results at the expense of the frame rate and CSI-2 * bus bandwidth. When only a raw stream is requested use the requested * size instead, as the ImgU is not involved. */ if (!yuvCount) cio2Configuration_ = data_->cio2_.generateConfiguration(rawSize); else cio2Configuration_ = data_->cio2_.generateConfiguration({}); if (!cio2Configuration_.pixelFormat.isValid()) return Invalid; LOG(IPU3, Debug) << "CIO2 configuration: " << cio2Configuration_.toString(); ImgUDevice::Pipe pipe{}; pipe.input = cio2Configuration_.size; /* * Adjust the configurations if needed and assign streams while * iterating them. */ bool mainOutputAvailable = true; for (unsigned int i = 0; i < config_.size(); ++i) { const PixelFormatInfo &info = PixelFormatInfo::info(config_[i].pixelFormat); const StreamConfiguration originalCfg = config_[i]; StreamConfiguration *cfg = &config_[i]; LOG(IPU3, Debug) << "Validating stream: " << config_[i].toString(); if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) { /* Initialize the RAW stream with the CIO2 configuration. */ cfg->size = cio2Configuration_.size; cfg->pixelFormat = cio2Configuration_.pixelFormat; cfg->bufferCount = cio2Configuration_.bufferCount; cfg->stride = info.stride(cfg->size.width, 0, 64); cfg->frameSize = info.frameSize(cfg->size, 64); cfg->setStream(const_cast<Stream *>(&data_->rawStream_)); LOG(IPU3, Debug) << "Assigned " << cfg->toString() << " to the raw stream"; } else { /* Assign and configure the main and viewfinder outputs. */ /* * Clamp the size to match the ImgU size limits and the * margins from the CIO2 output frame size. * * The ImgU outputs needs to be strictly smaller than * the CIO2 output frame and rounded down to 64 pixels * in width and 32 pixels in height. This assumption * comes from inspecting the pipe configuration script * results and the available suggested configurations in * the ChromeOS BSP .xml camera tuning files and shall * be validated. * * \todo Clarify what are the hardware constraints * that require this alignements, if any. It might * depend on the BDS scaling factor of 1/32, as the main * output has no YUV scaler as the viewfinder output has. */ unsigned int limit; limit = utils::alignDown(cio2Configuration_.size.width - 1, IMGU_OUTPUT_WIDTH_MARGIN); cfg->size.width = std::clamp(cfg->size.width, IMGU_OUTPUT_MIN_SIZE.width, limit); limit = utils::alignDown(cio2Configuration_.size.height - 1, IMGU_OUTPUT_HEIGHT_MARGIN); cfg->size.height = std::clamp(cfg->size.height, IMGU_OUTPUT_MIN_SIZE.height, limit); cfg->size.alignDownTo(IMGU_OUTPUT_WIDTH_ALIGN, IMGU_OUTPUT_HEIGHT_ALIGN); cfg->pixelFormat = formats::NV12; cfg->bufferCount = IPU3_BUFFER_COUNT; cfg->stride = info.stride(cfg->size.width, 0, 1); cfg->frameSize = info.frameSize(cfg->size, 1); /* * Use the main output stream in case only one stream is * requested or if the current configuration is the one * with the maximum YUV output size. */ if (mainOutputAvailable && (originalCfg.size == maxYuvSize || yuvCount == 1)) { cfg->setStream(const_cast<Stream *>(&data_->outStream_)); mainOutputAvailable = false; pipe.main = cfg->size; if (yuvCount == 1) pipe.viewfinder = pipe.main; LOG(IPU3, Debug) << "Assigned " << cfg->toString() << " to the main output"; } else { cfg->setStream(const_cast<Stream *>(&data_->vfStream_)); pipe.viewfinder = cfg->size; LOG(IPU3, Debug) << "Assigned " << cfg->toString() << " to the viewfinder output"; } } if (cfg->pixelFormat != originalCfg.pixelFormat || cfg->size != originalCfg.size) { LOG(IPU3, Debug) << "Stream " << i << " configuration adjusted to " << cfg->toString(); status = Adjusted; } } /* Only compute the ImgU configuration if a YUV stream has been requested. */ if (yuvCount) { pipeConfig_ = data_->imgu_->calculatePipeConfig(&pipe); if (pipeConfig_.isNull()) { LOG(IPU3, Error) << "Failed to calculate pipe configuration: " << "unsupported resolutions."; return Invalid; } } return status; } PipelineHandlerIPU3::PipelineHandlerIPU3(CameraManager *manager) : PipelineHandler(manager), cio2MediaDev_(nullptr), imguMediaDev_(nullptr) { } CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera, const StreamRoles &roles) { IPU3CameraData *data = cameraData(camera); IPU3CameraConfiguration *config = new IPU3CameraConfiguration(data); if (roles.empty()) return config; Size sensorResolution = data->cio2_.sensor()->resolution(); for (const StreamRole role : roles) { std::map<PixelFormat, std::vector<SizeRange>> streamFormats; unsigned int bufferCount; PixelFormat pixelFormat; Size size; switch (role) { case StreamRole::StillCapture: /* * Use as default full-frame configuration a value * strictly smaller than the sensor resolution (limited * to the ImgU maximum output size) and aligned down to * the required frame margin. * * \todo Clarify the alignment constraints as explained * in validate() */ size = sensorResolution.boundedTo(IMGU_OUTPUT_MAX_SIZE); size.width = utils::alignDown(size.width - 1, IMGU_OUTPUT_WIDTH_MARGIN); size.height = utils::alignDown(size.height - 1, IMGU_OUTPUT_HEIGHT_MARGIN); pixelFormat = formats::NV12; bufferCount = IPU3_BUFFER_COUNT; streamFormats[pixelFormat] = { { IMGU_OUTPUT_MIN_SIZE, size } }; break; case StreamRole::Raw: { StreamConfiguration cio2Config = data->cio2_.generateConfiguration(sensorResolution); pixelFormat = cio2Config.pixelFormat; size = cio2Config.size; bufferCount = cio2Config.bufferCount; for (const PixelFormat &format : data->cio2_.formats()) streamFormats[format] = data->cio2_.sizes(); break; } case StreamRole::Viewfinder: case StreamRole::VideoRecording: { /* * Default viewfinder and videorecording to 1280x720, * capped to the maximum sensor resolution and aligned * to the ImgU output constraints. */ size = sensorResolution.boundedTo(IPU3ViewfinderSize) .alignedDownTo(IMGU_OUTPUT_WIDTH_ALIGN, IMGU_OUTPUT_HEIGHT_ALIGN); pixelFormat = formats::NV12; bufferCount = IPU3_BUFFER_COUNT; streamFormats[pixelFormat] = { { IMGU_OUTPUT_MIN_SIZE, size } }; break; } default: LOG(IPU3, Error) << "Requested stream role not supported: " << role; delete config; return nullptr; } StreamFormats formats(streamFormats); StreamConfiguration cfg(formats); cfg.size = size; cfg.pixelFormat = pixelFormat; cfg.bufferCount = bufferCount; config->addConfiguration(cfg); } if (config->validate() == CameraConfiguration::Invalid) return {}; return config; } int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c) { IPU3CameraConfiguration *config = static_cast<IPU3CameraConfiguration *>(c); IPU3CameraData *data = cameraData(camera); Stream *outStream = &data->outStream_; Stream *vfStream = &data->vfStream_; CIO2Device *cio2 = &data->cio2_; ImgUDevice *imgu = data->imgu_; V4L2DeviceFormat outputFormat; int ret; /* * FIXME: enabled links in one ImgU pipe interfere with capture * operations on the other one. This can be easily triggered by * capturing from one camera and then trying to capture from the other * one right after, without disabling media links on the first used * pipe. * * The tricky part here is where to disable links on the ImgU instance * which is currently not in use: * 1) Link enable/disable cannot be done at start()/stop() time as video * devices needs to be linked first before format can be configured on * them. * 2) As link enable has to be done at the least in configure(), * before configuring formats, the only place where to disable links * would be 'stop()', but the Camera class state machine allows * start()<->stop() sequences without any configure() in between. * * As of now, disable all links in the ImgU media graph before * configuring the device, to allow alternate the usage of the two * ImgU pipes. * * As a consequence, a Camera using an ImgU shall be configured before * any start()/stop() sequence. An application that wants to * pre-configure all the camera and then start/stop them alternatively * without going through any re-configuration (a sequence that is * allowed by the Camera state machine) would now fail on the IPU3. */ ret = imguMediaDev_->disableLinks(); if (ret) return ret; /* * \todo: Enable links selectively based on the requested streams. * As of now, enable all links unconditionally. * \todo Don't configure the ImgU at all if we only have a single * stream which is for raw capture, in which case no buffers will * ever be queued to the ImgU. */ ret = data->imgu_->enableLinks(true); if (ret) return ret; /* * Pass the requested stream size to the CIO2 unit and get back the * adjusted format to be propagated to the ImgU output devices. */ const Size &sensorSize = config->cio2Format().size; V4L2DeviceFormat cio2Format; ret = cio2->configure(sensorSize, &cio2Format); if (ret) return ret; IPACameraSensorInfo sensorInfo; cio2->sensor()->sensorInfo(&sensorInfo); data->cropRegion_ = sensorInfo.analogCrop; /* * Configure the H/V flip controls based on the combination of * the sensor and user transform. */ if (data->supportsFlips_) { ControlList sensorCtrls(cio2->sensor()->controls()); sensorCtrls.set(V4L2_CID_HFLIP, static_cast<int32_t>(!!(config->combinedTransform_ & Transform::HFlip))); sensorCtrls.set(V4L2_CID_VFLIP, static_cast<int32_t>(!!(config->combinedTransform_ & Transform::VFlip))); ret = cio2->sensor()->setControls(&sensorCtrls); if (ret) return ret; } /* * If the ImgU gets configured, its driver seems to expect that * buffers will be queued to its outputs, as otherwise the next * capture session that uses the ImgU fails when queueing * buffers to its input. * * If no ImgU configuration has been computed, it means only a RAW * stream has been requested: return here to skip the ImgU configuration * part. */ ImgUDevice::PipeConfig imguConfig = config->imguConfig(); if (imguConfig.isNull()) return 0; ret = imgu->configure(imguConfig, &cio2Format); if (ret) return ret; /* Apply the format to the configured streams output devices. */ StreamConfiguration *mainCfg = nullptr; StreamConfiguration *vfCfg = nullptr; for (unsigned int i = 0; i < config->size(); ++i) { StreamConfiguration &cfg = (*config)[i]; Stream *stream = cfg.stream(); if (stream == outStream) { mainCfg = &cfg; ret = imgu->configureOutput(cfg, &outputFormat); if (ret) return ret; } else if (stream == vfStream) { vfCfg = &cfg; ret = imgu->configureViewfinder(cfg, &outputFormat); if (ret) return ret; } } /* * As we need to set format also on the non-active streams, use * the configuration of the active one for that purpose (there should * be at least one active stream in the configuration request). */ if (!vfCfg) { ret = imgu->configureViewfinder(*mainCfg, &outputFormat); if (ret) return ret; } /* Apply the "pipe_mode" control to the ImgU subdevice. */ ControlList ctrls(imgu->imgu_->controls()); /* * Set the ImgU pipe mode to 'Video' unconditionally to have statistics * generated. * * \todo Figure out what the 'Still Capture' mode is meant for, and use * it accordingly. */ ctrls.set(V4L2_CID_IPU3_PIPE_MODE, static_cast<int32_t>(IPU3PipeModeVideo)); ret = imgu->imgu_->setControls(&ctrls); if (ret) { LOG(IPU3, Error) << "Unable to set pipe_mode control"; return ret; } ipa::ipu3::IPAConfigInfo configInfo; configInfo.entityControls.emplace(0, data->cio2_.sensor()->controls()); configInfo.sensorInfo = sensorInfo; configInfo.bdsOutputSize = config->imguConfig().bds; configInfo.iif = config->imguConfig().iif; ret = data->ipa_->configure(configInfo); if (ret) { LOG(IPU3, Error) << "Failed to configure IPA: " << strerror(-ret); return ret; } return 0; } int PipelineHandlerIPU3::exportFrameBuffers(Camera *camera, Stream *stream, std::vector<std::unique_ptr<FrameBuffer>> *buffers) { IPU3CameraData *data = cameraData(camera); unsigned int count = stream->configuration().bufferCount; if (stream == &data->outStream_) return data->imgu_->output_->exportBuffers(count, buffers); else if (stream == &data->vfStream_) return data->imgu_->viewfinder_->exportBuffers(count, buffers); else if (stream == &data->rawStream_) return data->cio2_.exportBuffers(count, buffers); return -EINVAL; } /** * \todo Clarify if 'viewfinder' and 'stat' nodes have to be set up and * started even if not in use. As of now, if not properly configured and * enabled, the ImgU processing pipeline stalls. * * In order to be able to start the 'viewfinder' and 'stat' nodes, we need * memory to be reserved. */ int PipelineHandlerIPU3::allocateBuffers(Camera *camera) { IPU3CameraData *data = cameraData(camera); ImgUDevice *imgu = data->imgu_; unsigned int bufferCount; int ret; bufferCount = std::max({ data->outStream_.configuration().bufferCount, data->vfStream_.configuration().bufferCount, data->rawStream_.configuration().bufferCount, }); ret = imgu->allocateBuffers(bufferCount); if (ret < 0) return ret; /* Map buffers to the IPA. */ unsigned int ipaBufferId = 1; for (const std::unique_ptr<FrameBuffer> &buffer : imgu->paramBuffers_) { buffer->setCookie(ipaBufferId++); ipaBuffers_.emplace_back(buffer->cookie(), buffer->planes()); } for (const std::unique_ptr<FrameBuffer> &buffer : imgu->statBuffers_) { buffer->setCookie(ipaBufferId++); ipaBuffers_.emplace_back(buffer->cookie(), buffer->planes()); } data->ipa_->mapBuffers(ipaBuffers_); data->frameInfos_.init(imgu->paramBuffers_, imgu->statBuffers_); data->frameInfos_.bufferAvailable.connect( data, &IPU3CameraData::queuePendingRequests); return 0; } int PipelineHandlerIPU3::freeBuffers(Camera *camera) { IPU3CameraData *data = cameraData(camera); data->frameInfos_.clear(); std::vector<unsigned int> ids; for (IPABuffer &ipabuf : ipaBuffers_) ids.push_back(ipabuf.id); data->ipa_->unmapBuffers(ids); ipaBuffers_.clear(); data->imgu_->freeBuffers(); return 0; } int PipelineHandlerIPU3::start(Camera *camera, [[maybe_unused]] const ControlList *controls) { IPU3CameraData *data = cameraData(camera); CIO2Device *cio2 = &data->cio2_; ImgUDevice *imgu = data->imgu_; int ret; /* Allocate buffers for internal pipeline usage. */ ret = allocateBuffers(camera); if (ret) return ret; ret = data->ipa_->start(); if (ret) goto error; /* * Start the ImgU video devices, buffers will be queued to the * ImgU output and viewfinder when requests will be queued. */ ret = cio2->start(); if (ret) goto error; ret = imgu->start(); if (ret) goto error; return 0; error: imgu->stop(); cio2->stop(); data->ipa_->stop(); freeBuffers(camera); LOG(IPU3, Error) << "Failed to start camera " << camera->id(); return ret; } void PipelineHandlerIPU3::stop(Camera *camera) { IPU3CameraData *data = cameraData(camera); int ret = 0; data->cancelPendingRequests(); data->ipa_->stop(); ret |= data->imgu_->stop(); ret |= data->cio2_.stop(); if (ret) LOG(IPU3, Warning) << "Failed to stop camera " << camera->id(); freeBuffers(camera); } void IPU3CameraData::cancelPendingRequests() { while (!pendingRequests_.empty()) { Request *request = pendingRequests_.front(); for (auto it : request->buffers()) { FrameBuffer *buffer = it.second; buffer->cancel(); pipe_->completeBuffer(request, buffer); } pipe_->completeRequest(request); pendingRequests_.pop(); } } void IPU3CameraData::queuePendingRequests() { while (!pendingRequests_.empty()) { Request *request = pendingRequests_.front(); IPU3Frames::Info *info = frameInfos_.create(request); if (!info) break; /* * Queue a buffer on the CIO2, using the raw stream buffer * provided in the request, if any, or a CIO2 internal buffer * otherwise. */ FrameBuffer *reqRawBuffer = request->findBuffer(&rawStream_); FrameBuffer *rawBuffer = cio2_.queueBuffer(request, reqRawBuffer); /* * \todo If queueBuffer fails in queuing a buffer to the device, * report the request as error by cancelling the request and * calling PipelineHandler::completeRequest(). */ if (!rawBuffer) { frameInfos_.remove(info); break; } info->rawBuffer = rawBuffer; ipa::ipu3::IPU3Event ev; ev.op = ipa::ipu3::EventProcessControls; ev.frame = info->id; ev.controls = request->controls(); ipa_->processEvent(ev); pendingRequests_.pop(); } } int PipelineHandlerIPU3::queueRequestDevice(Camera *camera, Request *request) { IPU3CameraData *data = cameraData(camera); data->pendingRequests_.push(request); data->queuePendingRequests(); return 0; } bool PipelineHandlerIPU3::match(DeviceEnumerator *enumerator) { int ret; DeviceMatch cio2_dm("ipu3-cio2"); cio2_dm.add("ipu3-csi2 0"); cio2_dm.add("ipu3-cio2 0"); cio2_dm.add("ipu3-csi2 1"); cio2_dm.add("ipu3-cio2 1"); cio2_dm.add("ipu3-csi2 2"); cio2_dm.add("ipu3-cio2 2"); cio2_dm.add("ipu3-csi2 3"); cio2_dm.add("ipu3-cio2 3"); DeviceMatch imgu_dm("ipu3-imgu"); imgu_dm.add("ipu3-imgu 0"); imgu_dm.add("ipu3-imgu 0 input"); imgu_dm.add("ipu3-imgu 0 parameters"); imgu_dm.add("ipu3-imgu 0 output"); imgu_dm.add("ipu3-imgu 0 viewfinder"); imgu_dm.add("ipu3-imgu 0 3a stat"); imgu_dm.add("ipu3-imgu 1"); imgu_dm.add("ipu3-imgu 1 input"); imgu_dm.add("ipu3-imgu 1 parameters"); imgu_dm.add("ipu3-imgu 1 output"); imgu_dm.add("ipu3-imgu 1 viewfinder"); imgu_dm.add("ipu3-imgu 1 3a stat"); cio2MediaDev_ = acquireMediaDevice(enumerator, cio2_dm); if (!cio2MediaDev_) return false; imguMediaDev_ = acquireMediaDevice(enumerator, imgu_dm); if (!imguMediaDev_) return false; /* * Disable all links that are enabled by default on CIO2, as camera * creation enables all valid links it finds. */ if (cio2MediaDev_->disableLinks()) return false; ret = imguMediaDev_->disableLinks(); if (ret) return ret; ret = registerCameras(); return ret == 0; } /** * \brief Initialize the camera controls * \param[in] data The camera data * * Initialize the camera controls as the union of the static pipeline handler * controls (IPU3Controls) and controls created dynamically from the sensor * capabilities. * * \return 0 on success or a negative error code otherwise */ int PipelineHandlerIPU3::initControls(IPU3CameraData *data) { /* * \todo The controls initialized here depend on sensor configuration * and their limits should be updated once the configuration gets * changed. * * Initialize the sensor using its resolution and compute the control * limits. */ CameraSensor *sensor = data->cio2_.sensor(); V4L2SubdeviceFormat sensorFormat = {}; sensorFormat.size = sensor->resolution(); int ret = sensor->setFormat(&sensorFormat); if (ret) return ret; IPACameraSensorInfo sensorInfo{}; ret = sensor->sensorInfo(&sensorInfo); if (ret) return ret; ControlInfoMap::Map controls = IPU3Controls; const ControlInfoMap &sensorControls = sensor->controls(); const std::vector<int32_t> &testPatternModes = sensor->testPatternModes(); if (!testPatternModes.empty()) { std::vector<ControlValue> values; values.reserve(testPatternModes.size()); for (int32_t pattern : testPatternModes) values.emplace_back(pattern); controls[&controls::draft::TestPatternMode] = ControlInfo(values); } /* * Compute exposure time limits. * * Initialize the control using the line length and pixel rate of the * current configuration converted to microseconds. Use the * V4L2_CID_EXPOSURE control to get exposure min, max and default and * convert it from lines to microseconds. */ double lineDuration = sensorInfo.lineLength / (sensorInfo.pixelRate / 1e6); const ControlInfo &v4l2Exposure = sensorControls.find(V4L2_CID_EXPOSURE)->second; int32_t minExposure = v4l2Exposure.min().get<int32_t>() * lineDuration; int32_t maxExposure = v4l2Exposure.max().get<int32_t>() * lineDuration; int32_t defExposure = v4l2Exposure.def().get<int32_t>() * lineDuration; /* * \todo Report the actual exposure time, use the default for the * moment. */ data->exposureTime_ = defExposure; controls[&controls::ExposureTime] = ControlInfo(minExposure, maxExposure, defExposure); /* * Compute the frame duration limits. * * The frame length is computed assuming a fixed line length combined * with the vertical frame sizes. */ const ControlInfo &v4l2HBlank = sensorControls.find(V4L2_CID_HBLANK)->second; uint32_t hblank = v4l2HBlank.def().get<int32_t>(); uint32_t lineLength = sensorInfo.outputSize.width + hblank; const ControlInfo &v4l2VBlank = sensorControls.find(V4L2_CID_VBLANK)->second; std::array<uint32_t, 3> frameHeights{ v4l2VBlank.min().get<int32_t>() + sensorInfo.outputSize.height, v4l2VBlank.max().get<int32_t>() + sensorInfo.outputSize.height, v4l2VBlank.def().get<int32_t>() + sensorInfo.outputSize.height, }; std::array<int64_t, 3> frameDurations; for (unsigned int i = 0; i < frameHeights.size(); ++i) { uint64_t frameSize = lineLength * frameHeights[i]; frameDurations[i] = frameSize / (sensorInfo.pixelRate / 1000000U); } controls[&controls::FrameDurationLimits] = ControlInfo(frameDurations[0], frameDurations[1], frameDurations[2]); /* * Compute the scaler crop limits. * * Initialize the control use the 'Viewfinder' configuration (1280x720) * as the pipeline output resolution and the full sensor size as input * frame (see the todo note in the validate() function about the usage * of the sensor's full frame as ImgU input). */ /* * The maximum scaler crop rectangle is the analogue crop used to * produce the maximum frame size. */ const Rectangle &analogueCrop = sensorInfo.analogCrop; Rectangle maxCrop = analogueCrop; /* * As the ImgU cannot up-scale, the minimum selection rectangle has to * be as large as the pipeline output size. Use the default viewfinder * configuration as the desired output size and calculate the minimum * rectangle required to satisfy the ImgU processing margins, unless the * sensor resolution is smaller. * * \todo This implementation is based on the same assumptions about the * ImgU pipeline configuration described in then viewfinder and main * output sizes calculation in the validate() function. */ /* The strictly smaller size than the sensor resolution, aligned to margins. */ Size minSize = Size(sensor->resolution().width - 1, sensor->resolution().height - 1) .alignedDownTo(IMGU_OUTPUT_WIDTH_MARGIN, IMGU_OUTPUT_HEIGHT_MARGIN); /* * Either the smallest margin-aligned size larger than the viewfinder * size or the adjusted sensor resolution. */ minSize = Size(IPU3ViewfinderSize.width + 1, IPU3ViewfinderSize.height + 1) .alignedUpTo(IMGU_OUTPUT_WIDTH_MARGIN, IMGU_OUTPUT_HEIGHT_MARGIN) .boundedTo(minSize); /* * Re-scale in the sensor's native coordinates. Report (0,0) as * top-left corner as we allow application to freely pan the crop area. */ Rectangle minCrop = Rectangle(minSize).scaledBy(analogueCrop.size(), sensorInfo.outputSize); controls[&controls::ScalerCrop] = ControlInfo(minCrop, maxCrop, maxCrop); data->controlInfo_ = std::move(controls); return 0; } /** * \brief Initialise ImgU and CIO2 devices associated with cameras * * Initialise the two ImgU instances and create cameras with an associated * CIO2 device instance. * * \return 0 on success or a negative error code for error or if no camera * has been created * \retval -ENODEV no camera has been created */ int PipelineHandlerIPU3::registerCameras() { int ret; ret = imgu0_.init(imguMediaDev_, 0); if (ret) return ret; ret = imgu1_.init(imguMediaDev_, 1); if (ret) return ret; /* * For each CSI-2 receiver on the IPU3, create a Camera if an * image sensor is connected to it and the sensor can produce images * in a compatible format. */ unsigned int numCameras = 0; for (unsigned int id = 0; id < 4 && numCameras < 2; ++id) { std::unique_ptr<IPU3CameraData> data = std::make_unique<IPU3CameraData>(this); std::set<Stream *> streams = { &data->outStream_, &data->vfStream_, &data->rawStream_, }; CIO2Device *cio2 = &data->cio2_; ret = cio2->init(cio2MediaDev_, id); if (ret) continue; ret = data->loadIPA(); if (ret) continue; /* Initialize the camera properties. */ data->properties_ = cio2->sensor()->properties(); ret = initControls(data.get()); if (ret) continue; /* * \todo Read delay values from the sensor itself or from a * a sensor database. For now use generic values taken from * the Raspberry Pi and listed as 'generic values'. */ std::unordered_map<uint32_t, DelayedControls::ControlParams> params = { { V4L2_CID_ANALOGUE_GAIN, { 1, false } }, { V4L2_CID_EXPOSURE, { 2, false } }, }; data->delayedCtrls_ = std::make_unique<DelayedControls>(cio2->sensor()->device(), params); data->cio2_.frameStart().connect(data->delayedCtrls_.get(), &DelayedControls::applyControls); /* Convert the sensor rotation to a transformation */ int32_t rotation = 0; if (data->properties_.contains(properties::Rotation)) rotation = data->properties_.get(properties::Rotation); else LOG(IPU3, Warning) << "Rotation control not exposed by " << cio2->sensor()->id() << ". Assume rotation 0"; bool success; data->rotationTransform_ = transformFromRotation(rotation, &success); if (!success) LOG(IPU3, Warning) << "Invalid rotation of " << rotation << " degrees: ignoring"; ControlList ctrls = cio2->sensor()->getControls({ V4L2_CID_HFLIP }); if (!ctrls.empty()) /* We assume the sensor supports VFLIP too. */ data->supportsFlips_ = true; /** * \todo Dynamically assign ImgU and output devices to each * stream and camera; as of now, limit support to two cameras * only, and assign imgu0 to the first one and imgu1 to the * second. */ data->imgu_ = numCameras ? &imgu1_ : &imgu0_; /* * Connect video devices' 'bufferReady' signals to their * slot to implement the image processing pipeline. * * Frames produced by the CIO2 unit are passed to the * associated ImgU input where they get processed and * returned through the ImgU main and secondary outputs. */ data->cio2_.bufferReady().connect(data.get(), &IPU3CameraData::cio2BufferReady); data->cio2_.bufferAvailable.connect( data.get(), &IPU3CameraData::queuePendingRequests); data->imgu_->input_->bufferReady.connect(&data->cio2_, &CIO2Device::tryReturnBuffer); data->imgu_->output_->bufferReady.connect(data.get(), &IPU3CameraData::imguOutputBufferReady); data->imgu_->viewfinder_->bufferReady.connect(data.get(), &IPU3CameraData::imguOutputBufferReady); data->imgu_->param_->bufferReady.connect(data.get(), &IPU3CameraData::paramBufferReady); data->imgu_->stat_->bufferReady.connect(data.get(), &IPU3CameraData::statBufferReady); /* Create and register the Camera instance. */ std::string cameraId = cio2->sensor()->id(); std::shared_ptr<Camera> camera = Camera::create(this, cameraId, streams); registerCamera(std::move(camera), std::move(data)); LOG(IPU3, Info) << "Registered Camera[" << numCameras << "] \"" << cameraId << "\"" << " connected to CSI-2 receiver " << id; numCameras++; } return numCameras ? 0 : -ENODEV; } int IPU3CameraData::loadIPA() { ipa_ = IPAManager::createIPA<ipa::ipu3::IPAProxyIPU3>(pipe_, 1, 1); if (!ipa_) return -ENOENT; ipa_->queueFrameAction.connect(this, &IPU3CameraData::queueFrameAction); CameraSensor *sensor = cio2_.sensor(); int ret = ipa_->init(IPASettings{ "", sensor->model() }); if (ret) { LOG(IPU3, Error) << "Failed to initialise the IPU3 IPA"; return ret; } return 0; } void IPU3CameraData::queueFrameAction(unsigned int id, const ipa::ipu3::IPU3Action &action) { switch (action.op) { case ipa::ipu3::ActionSetSensorControls: { const ControlList &controls = action.controls; delayedCtrls_->push(controls); break; } case ipa::ipu3::ActionParamFilled: { IPU3Frames::Info *info = frameInfos_.find(id); if (!info) break; /* Queue all buffers from the request aimed for the ImgU. */ for (auto it : info->request->buffers()) { const Stream *stream = it.first; FrameBuffer *outbuffer = it.second; if (stream == &outStream_) imgu_->output_->queueBuffer(outbuffer); else if (stream == &vfStream_) imgu_->viewfinder_->queueBuffer(outbuffer); } imgu_->param_->queueBuffer(info->paramBuffer); imgu_->stat_->queueBuffer(info->statBuffer); imgu_->input_->queueBuffer(info->rawBuffer); break; } case ipa::ipu3::ActionMetadataReady: { IPU3Frames::Info *info = frameInfos_.find(id); if (!info) break; Request *request = info->request; request->metadata().merge(action.controls); info->metadataProcessed = true; if (frameInfos_.tryComplete(info)) pipe_->completeRequest(request); break; } default: LOG(IPU3, Error) << "Unknown action " << action.op; break; } } /* ----------------------------------------------------------------------------- * Buffer Ready slots */ /** * \brief Handle buffers completion at the ImgU output * \param[in] buffer The completed buffer * * Buffers completed from the ImgU output are directed to the application. */ void IPU3CameraData::imguOutputBufferReady(FrameBuffer *buffer) { IPU3Frames::Info *info = frameInfos_.find(buffer); if (!info) return; Request *request = info->request; pipe_->completeBuffer(request, buffer); request->metadata().set(controls::draft::PipelineDepth, 3); /* \todo Move the ExposureTime control to the IPA. */ request->metadata().set(controls::ExposureTime, exposureTime_); /* \todo Actually apply the scaler crop region to the ImgU. */ if (request->controls().contains(controls::ScalerCrop)) cropRegion_ = request->controls().get(controls::ScalerCrop); request->metadata().set(controls::ScalerCrop, cropRegion_); if (frameInfos_.tryComplete(info)) pipe_->completeRequest(request); } /** * \brief Handle buffers completion at the CIO2 output * \param[in] buffer The completed buffer * * Buffers completed from the CIO2 are immediately queued to the ImgU unit * for further processing. */ void IPU3CameraData::cio2BufferReady(FrameBuffer *buffer) { IPU3Frames::Info *info = frameInfos_.find(buffer); if (!info) return; Request *request = info->request; /* * Record the sensor's timestamp in the request metadata. * * \todo The sensor timestamp should be better estimated by connecting * to the V4L2Device::frameStart signal. */ request->metadata().set(controls::SensorTimestamp, buffer->metadata().timestamp); /* If the buffer is cancelled force a complete of the whole request. */ if (buffer->metadata().status == FrameMetadata::FrameCancelled) { for (auto it : request->buffers()) { FrameBuffer *b = it.second; b->cancel(); pipe_->completeBuffer(request, b); } frameInfos_.remove(info); pipe_->completeRequest(request); return; } if (request->findBuffer(&rawStream_)) pipe_->completeBuffer(request, buffer); ipa::ipu3::IPU3Event ev; ev.op = ipa::ipu3::EventFillParams; ev.frame = info->id; ev.bufferId = info->paramBuffer->cookie(); ipa_->processEvent(ev); } void IPU3CameraData::paramBufferReady(FrameBuffer *buffer) { IPU3Frames::Info *info = frameInfos_.find(buffer); if (!info) return; info->paramDequeued = true; /* * tryComplete() will delete info if it completes the IPU3Frame. * In that event, we must have obtained the Request before hand. * * \todo Improve the FrameInfo API to avoid this type of issue */ Request *request = info->request; if (frameInfos_.tryComplete(info)) pipe_->completeRequest(request); } void IPU3CameraData::statBufferReady(FrameBuffer *buffer) { IPU3Frames::Info *info = frameInfos_.find(buffer); if (!info) return; Request *request = info->request; if (buffer->metadata().status == FrameMetadata::FrameCancelled) { info->metadataProcessed = true; /* * tryComplete() will delete info if it completes the IPU3Frame. * In that event, we must have obtained the Request before hand. */ if (frameInfos_.tryComplete(info)) pipe_->completeRequest(request); return; } ipa::ipu3::IPU3Event ev; ev.op = ipa::ipu3::EventStatReady; ev.frame = info->id; ev.bufferId = info->statBuffer->cookie(); ev.frameTimestamp = request->metadata().get(controls::SensorTimestamp); ipa_->processEvent(ev); } REGISTER_PIPELINE_HANDLER(PipelineHandlerIPU3) } /* namespace libcamera */