summaryrefslogtreecommitdiff
path: root/src/cam/drm.cpp
blob: 46e34eb546fb1226cedae3562db101a0636c1f2b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
 * Copyright (C) 2021, Ideas on Board Oy
 *
 * drm.cpp - DRM/KMS Helpers
 */

#include "drm.h"

#include <algorithm>
#include <errno.h>
#include <fcntl.h>
#include <iostream>
#include <set>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>

#include <libcamera/framebuffer.h>
#include <libcamera/geometry.h>
#include <libcamera/pixel_format.h>

#include <libdrm/drm_mode.h>

#include "event_loop.h"

namespace DRM {

Object::Object(Device *dev, uint32_t id, Type type)
	: id_(id), dev_(dev), type_(type)
{
	/* Retrieve properties from the objects that support them. */
	if (type != TypeConnector && type != TypeCrtc &&
	    type != TypeEncoder && type != TypePlane)
		return;

	/*
	 * We can't distinguish between failures due to the object having no
	 * property and failures due to other conditions. Assume we use the API
	 * correctly and consider the object has no property.
	 */
	drmModeObjectProperties *properties = drmModeObjectGetProperties(dev->fd(), id, type);
	if (!properties)
		return;

	properties_.reserve(properties->count_props);
	for (uint32_t i = 0; i < properties->count_props; ++i)
		properties_.emplace_back(properties->props[i],
					 properties->prop_values[i]);

	drmModeFreeObjectProperties(properties);
}

Object::~Object()
{
}

const Property *Object::property(const std::string &name) const
{
	for (const PropertyValue &pv : properties_) {
		const Property *property = static_cast<const Property *>(dev_->object(pv.id()));
		if (property && property->name() == name)
			return property;
	}

	return nullptr;
}

const PropertyValue *Object::propertyValue(const std::string &name) const
{
	for (const PropertyValue &pv : properties_) {
		const Property *property = static_cast<const Property *>(dev_->object(pv.id()));
		if (property && property->name() == name)
			return &pv;
	}

	return nullptr;
}

Property::Property(Device *dev, drmModePropertyRes *property)
	: Object(dev, property->prop_id, TypeProperty),
	  name_(property->name), flags_(property->flags),
	  values_(property->values, property->values + property->count_values),
	  blobs_(property->blob_ids, property->blob_ids + property->count_blobs)
{
	if (drm_property_type_is(property, DRM_MODE_PROP_RANGE))
		type_ = TypeRange;
	else if (drm_property_type_is(property, DRM_MODE_PROP_ENUM))
		type_ = TypeEnum;
	else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
		type_ = TypeBlob;
	else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK))
		type_ = TypeBitmask;
	else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT))
		type_ = TypeObject;
	else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE))
		type_ = TypeSignedRange;
	else
		type_ = TypeUnknown;

	for (int i = 0; i < property->count_enums; ++i)
		enums_[property->enums[i].value] = property->enums[i].name;
}

Blob::Blob(Device *dev, const libcamera::Span<const uint8_t> &data)
	: Object(dev, 0, Object::TypeBlob)
{
	drmModeCreatePropertyBlob(dev->fd(), data.data(), data.size(), &id_);
}

Blob::~Blob()
{
	if (isValid())
		drmModeDestroyPropertyBlob(device()->fd(), id());
}

Mode::Mode(const drmModeModeInfo &mode)
	: drmModeModeInfo(mode)
{
}

std::unique_ptr<Blob> Mode::toBlob(Device *dev) const
{
	libcamera::Span<const uint8_t> data{ reinterpret_cast<const uint8_t *>(this),
					     sizeof(*this) };
	return std::make_unique<Blob>(dev, data);
}

Crtc::Crtc(Device *dev, const drmModeCrtc *crtc, unsigned int index)
	: Object(dev, crtc->crtc_id, Object::TypeCrtc), index_(index)
{
}

Encoder::Encoder(Device *dev, const drmModeEncoder *encoder)
	: Object(dev, encoder->encoder_id, Object::TypeEncoder),
	  type_(encoder->encoder_type)
{
	const std::list<Crtc> &crtcs = dev->crtcs();
	possibleCrtcs_.reserve(crtcs.size());

	for (const Crtc &crtc : crtcs) {
		if (encoder->possible_crtcs & (1 << crtc.index()))
			possibleCrtcs_.push_back(&crtc);
	}

	possibleCrtcs_.shrink_to_fit();
}

namespace {

const std::map<uint32_t, const char *> connectorTypeNames{
	{ DRM_MODE_CONNECTOR_Unknown, "Unknown" },
	{ DRM_MODE_CONNECTOR_VGA, "VGA" },
	{ DRM_MODE_CONNECTOR_DVII, "DVI-I" },
	{ DRM_MODE_CONNECTOR_DVID, "DVI-D" },
	{ DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
	{ DRM_MODE_CONNECTOR_Composite, "Composite" },
	{ DRM_MODE_CONNECTOR_SVIDEO, "S-Video" },
	{ DRM_MODE_CONNECTOR_LVDS, "LVDS" },
	{ DRM_MODE_CONNECTOR_Component, "Component" },
	{ DRM_MODE_CONNECTOR_9PinDIN, "9-Pin-DIN" },
	{ DRM_MODE_CONNECTOR_DisplayPort, "DP" },
	{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
	{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
	{ DRM_MODE_CONNECTOR_TV, "TV" },
	{ DRM_MODE_CONNECTOR_eDP, "eDP" },
	{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
	{ DRM_MODE_CONNECTOR_DSI, "DSI" },
	{ DRM_MODE_CONNECTOR_DPI, "DPI" },
};

} /* namespace */

Connector::Connector(Device *dev, const drmModeConnector *connector)
	: Object(dev, connector->connector_id, Object::TypeConnector),
	  type_(connector->connector_type)
{
	auto typeName = connectorTypeNames.find(connector->connector_type);
	if (typeName == connectorTypeNames.end()) {
		std::cerr
			<< "Invalid connector type "
			<< connector->connector_type << std::endl;
		typeName = connectorTypeNames.find(DRM_MODE_CONNECTOR_Unknown);
	}

	name_ = std::string(typeName->second) + "-"
	      + std::to_string(connector->connector_type_id);

	switch (connector->connection) {
	case DRM_MODE_CONNECTED:
		status_ = Status::Connected;
		break;

	case DRM_MODE_DISCONNECTED:
		status_ = Status::Disconnected;
		break;

	case DRM_MODE_UNKNOWNCONNECTION:
	default:
		status_ = Status::Unknown;
		break;
	}

	const std::list<Encoder> &encoders = dev->encoders();

	encoders_.reserve(connector->count_encoders);

	for (int i = 0; i < connector->count_encoders; ++i) {
		uint32_t encoderId = connector->encoders[i];
		auto encoder = std::find_if(encoders.begin(), encoders.end(),
					    [=](const Encoder &e) {
						    return e.id() == encoderId;
					    });
		if (encoder == encoders.end()) {
			std::cerr
				<< "Encoder " << encoderId << " not found"
				<< std::endl;
			continue;
		}

		encoders_.push_back(&*encoder);
	}

	encoders_.shrink_to_fit();

	modes_ = { connector->modes, connector->modes + connector->count_modes };
}

Plane::Plane(Device *dev, const drmModePlane *plane)
	: Object(dev, plane->plane_id, Object::TypePlane),
	  possibleCrtcsMask_(plane->possible_crtcs)
{
	formats_ = { plane->formats, plane->formats + plane->count_formats };

	const std::list<Crtc> &crtcs = dev->crtcs();
	possibleCrtcs_.reserve(crtcs.size());

	for (const Crtc &crtc : crtcs) {
		if (plane->possible_crtcs & (1 << crtc.index()))
			possibleCrtcs_.push_back(&crtc);
	}

	possibleCrtcs_.shrink_to_fit();
}

bool Plane::supportsFormat(const libcamera::PixelFormat &format) const
{
	return std::find(formats_.begin(), formats_.end(), format.fourcc())
		!= formats_.end();
}

int Plane::setup()
{
	const PropertyValue *pv = propertyValue("type");
	if (!pv)
		return -EINVAL;

	switch (pv->value()) {
	case DRM_PLANE_TYPE_OVERLAY:
		type_ = TypeOverlay;
		break;

	case DRM_PLANE_TYPE_PRIMARY:
		type_ = TypePrimary;
		break;

	case DRM_PLANE_TYPE_CURSOR:
		type_ = TypeCursor;
		break;

	default:
		return -EINVAL;
	}

	return 0;
}

FrameBuffer::FrameBuffer(Device *dev)
	: Object(dev, 0, Object::TypeFb)
{
}

FrameBuffer::~FrameBuffer()
{
	for (const auto &plane : planes_) {
		struct drm_gem_close gem_close = {
			.handle = plane.second.handle,
			.pad = 0,
		};
		int ret;

		do {
			ret = ioctl(device()->fd(), DRM_IOCTL_GEM_CLOSE, &gem_close);
		} while (ret == -1 && (errno == EINTR || errno == EAGAIN));

		if (ret == -1) {
			ret = -errno;
			std::cerr
				<< "Failed to close GEM object: "
				<< strerror(-ret) << std::endl;
		}
	}

	drmModeRmFB(device()->fd(), id());
}

AtomicRequest::AtomicRequest(Device *dev)
	: dev_(dev), valid_(true)
{
	request_ = drmModeAtomicAlloc();
	if (!request_)
		valid_ = false;
}

AtomicRequest::~AtomicRequest()
{
	if (request_)
		drmModeAtomicFree(request_);
}

int AtomicRequest::addProperty(const Object *object, const std::string &property,
			       uint64_t value)
{
	if (!valid_)
		return -EINVAL;

	const Property *prop = object->property(property);
	if (!prop) {
		valid_ = false;
		return -EINVAL;
	}

	return addProperty(object->id(), prop->id(), value);
}

int AtomicRequest::addProperty(const Object *object, const std::string &property,
			       std::unique_ptr<Blob> blob)
{
	if (!valid_)
		return -EINVAL;

	const Property *prop = object->property(property);
	if (!prop) {
		valid_ = false;
		return -EINVAL;
	}

	int ret = addProperty(object->id(), prop->id(), blob->id());
	if (ret < 0)
		return ret;

	blobs_.emplace_back(std::move(blob));

	return 0;
}

int AtomicRequest::addProperty(uint32_t object, uint32_t property, uint64_t value)
{
	int ret = drmModeAtomicAddProperty(request_, object, property, value);
	if (ret < 0) {
		valid_ = false;
		return ret;
	}

	return 0;
}

int AtomicRequest::commit(unsigned int flags)
{
	if (!valid_)
		return -EINVAL;

	uint32_t drmFlags = 0;
	if (flags & FlagAllowModeset)
		drmFlags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
	if (flags & FlagAsync)
		drmFlags |= DRM_MODE_PAGE_FLIP_EVENT | DRM_MODE_ATOMIC_NONBLOCK;

	return drmModeAtomicCommit(dev_->fd(), request_, drmFlags, this);
}

Device::Device()
	: fd_(-1)
{
}

Device::~Device()
{
	if (fd_ != -1)
		drmClose(fd_);
}

int Device::init()
{
	constexpr size_t NODE_NAME_MAX = sizeof("/dev/dri/card255");
	char name[NODE_NAME_MAX];
	int ret;

	/*
	 * Open the first DRM/KMS device. The libdrm drmOpen*() functions
	 * require either a module name or a bus ID, which we don't have, so
	 * bypass them. The automatic module loading and device node creation
	 * from drmOpen() is of no practical use as any modern system will
	 * handle that through udev or an equivalent component.
	 */
	snprintf(name, sizeof(name), "/dev/dri/card%u", 0);
	fd_ = open(name, O_RDWR | O_CLOEXEC);
	if (fd_ < 0) {
		ret = -errno;
		std::cerr
			<< "Failed to open DRM/KMS device " << name << ": "
			<< strerror(-ret) << std::endl;
		return ret;
	}

	/*
	 * Enable the atomic APIs. This also automatically enables the
	 * universal planes API.
	 */
	ret = drmSetClientCap(fd_, DRM_CLIENT_CAP_ATOMIC, 1);
	if (ret < 0) {
		ret = -errno;
		std::cerr
			<< "Failed to enable atomic capability: "
			<< strerror(-ret) << std::endl;
		return ret;
	}

	/* List all the resources. */
	ret = getResources();
	if (ret < 0)
		return ret;

	EventLoop::instance()->addEvent(fd_, EventLoop::Read,
					std::bind(&Device::drmEvent, this));

	return 0;
}

int Device::getResources()
{
	int ret;

	std::unique_ptr<drmModeRes, decltype(&drmModeFreeResources)> resources{
		drmModeGetResources(fd_),
		&drmModeFreeResources
	};
	if (!resources) {
		ret = -errno;
		std::cerr
			<< "Failed to get DRM/KMS resources: "
			<< strerror(-ret) << std::endl;
		return ret;
	}

	for (int i = 0; i < resources->count_crtcs; ++i) {
		drmModeCrtc *crtc = drmModeGetCrtc(fd_, resources->crtcs[i]);
		if (!crtc) {
			ret = -errno;
			std::cerr
				<< "Failed to get CRTC: " << strerror(-ret)
				<< std::endl;
			return ret;
		}

		crtcs_.emplace_back(this, crtc, i);
		drmModeFreeCrtc(crtc);

		Crtc &obj = crtcs_.back();
		objects_[obj.id()] = &obj;
	}

	for (int i = 0; i < resources->count_encoders; ++i) {
		drmModeEncoder *encoder =
			drmModeGetEncoder(fd_, resources->encoders[i]);
		if (!encoder) {
			ret = -errno;
			std::cerr
				<< "Failed to get encoder: " << strerror(-ret)
				<< std::endl;
			return ret;
		}

		encoders_.emplace_back(this, encoder);
		drmModeFreeEncoder(encoder);

		Encoder &obj = encoders_.back();
		objects_[obj.id()] = &obj;
	}

	for (int i = 0; i < resources->count_connectors; ++i) {
		drmModeConnector *connector =
			drmModeGetConnector(fd_, resources->connectors[i]);
		if (!connector) {
			ret = -errno;
			std::cerr
				<< "Failed to get connector: " << strerror(-ret)
				<< std::endl;
			return ret;
		}

		connectors_.emplace_back(this, connector);
		drmModeFreeConnector(connector);

		Connector &obj = connectors_.back();
		objects_[obj.id()] = &obj;
	}

	std::unique_ptr<drmModePlaneRes, decltype(&drmModeFreePlaneResources)> planes{
		drmModeGetPlaneResources(fd_),
		&drmModeFreePlaneResources
	};
	if (!planes) {
		ret = -errno;
		std::cerr
			<< "Failed to get DRM/KMS planes: "
			<< strerror(-ret) << std::endl;
		return ret;
	}

	for (uint32_t i = 0; i < planes->count_planes; ++i) {
		drmModePlane *plane =
			drmModeGetPlane(fd_, planes->planes[i]);
		if (!plane) {
			ret = -errno;
			std::cerr
				<< "Failed to get plane: " << strerror(-ret)
				<< std::endl;
			return ret;
		}

		planes_.emplace_back(this, plane);
		drmModeFreePlane(plane);

		Plane &obj = planes_.back();
		objects_[obj.id()] = &obj;
	}

	/* Set the possible planes for each CRTC. */
	for (Crtc &crtc : crtcs_) {
		for (const Plane &plane : planes_) {
			if (plane.possibleCrtcsMask_ & (1 << crtc.index()))
				crtc.planes_.push_back(&plane);
		}
	}

	/* Collect all property IDs and create Property instances. */
	std::set<uint32_t> properties;
	for (const auto &object : objects_) {
		for (const PropertyValue &value : object.second->properties())
			properties.insert(value.id());
	}

	for (uint32_t id : properties) {
		drmModePropertyRes *property = drmModeGetProperty(fd_, id);
		if (!property) {
			ret = -errno;
			std::cerr
				<< "Failed to get property: " << strerror(-ret)
				<< std::endl;
			continue;
		}

		properties_.emplace_back(this, property);
		drmModeFreeProperty(property);

		Property &obj = properties_.back();
		objects_[obj.id()] = &obj;
	}

	/* Finally, perform all delayed setup of mode objects. */
	for (auto &object : objects_) {
		ret = object.second->setup();
		if (ret < 0) {
			std::cerr
				<< "Failed to setup object " << object.second->id()
				<< ": " << strerror(-ret) << std::endl;
			return ret;
		}
	}

	return 0;
}

const Object *Device::object(uint32_t id)
{
	const auto iter = objects_.find(id);
	if (iter == objects_.end())
		return nullptr;

	return iter->second;
}

std::unique_ptr<FrameBuffer> Device::createFrameBuffer(
	const libcamera::FrameBuffer &buffer,
	const libcamera::PixelFormat &format,
	const libcamera::Size &size,
	const std::array<uint32_t, 4> &strides)
{
	std::unique_ptr<FrameBuffer> fb{ new FrameBuffer(this) };

	uint32_t handles[4] = {};
	uint32_t offsets[4] = {};
	int ret;

	const std::vector<libcamera::FrameBuffer::Plane> &planes = buffer.planes();

	unsigned int i = 0;
	for (const libcamera::FrameBuffer::Plane &plane : planes) {
		int fd = plane.fd.get();
		uint32_t handle;

		auto iter = fb->planes_.find(fd);
		if (iter == fb->planes_.end()) {
			ret = drmPrimeFDToHandle(fd_, plane.fd.get(), &handle);
			if (ret < 0) {
				ret = -errno;
				std::cerr
					<< "Unable to import framebuffer dmabuf: "
					<< strerror(-ret) << std::endl;
				return nullptr;
			}

			fb->planes_[fd] = { handle };
		} else {
			handle = iter->second.handle;
		}

		handles[i] = handle;
		offsets[i] = plane.offset;
		++i;
	}

	ret = drmModeAddFB2(fd_, size.width, size.height, format.fourcc(), handles,
			    strides.data(), offsets, &fb->id_, 0);
	if (ret < 0) {
		ret = -errno;
		std::cerr
			<< "Failed to add framebuffer: "
			<< strerror(-ret) << std::endl;
		return nullptr;
	}

	return fb;
}

void Device::drmEvent()
{
	drmEventContext ctx{};
	ctx.version = DRM_EVENT_CONTEXT_VERSION;
	ctx.page_flip_handler = &Device::pageFlipComplete;

	drmHandleEvent(fd_, &ctx);
}

void Device::pageFlipComplete([[maybe_unused]] int fd,
			      [[maybe_unused]] unsigned int sequence,
			      [[maybe_unused]] unsigned int tv_sec,
			      [[maybe_unused]] unsigned int tv_usec,
			      void *user_data)
{
	AtomicRequest *request = static_cast<AtomicRequest *>(user_data);
	request->device()->requestComplete.emit(request);
}

} /* namespace DRM */
> */ /** * \typedef V4L2VideoDevice::Formats * \brief A map of supported V4L2 pixel formats to frame sizes */ /** * \brief Construct a V4L2VideoDevice * \param[in] deviceNode The file-system path to the video device node */ V4L2VideoDevice::V4L2VideoDevice(const std::string &deviceNode) : V4L2Device(deviceNode), cache_(nullptr), fdBufferNotifier_(nullptr), streaming_(false) { /* * We default to an MMAP based CAPTURE video device, however this will * be updated based upon the device capabilities. */ bufferType_ = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; memoryType_ = V4L2_MEMORY_MMAP; } /** * \brief Construct a V4L2VideoDevice from a MediaEntity * \param[in] entity The MediaEntity to build the video device from * * Construct a V4L2VideoDevice from a MediaEntity's device node path. */ V4L2VideoDevice::V4L2VideoDevice(const MediaEntity *entity) : V4L2VideoDevice(entity->deviceNode()) { } V4L2VideoDevice::~V4L2VideoDevice() { close(); } /** * \brief Open the V4L2 video device node and query its capabilities * * \return 0 on success or a negative error code otherwise */ int V4L2VideoDevice::open() { int ret; ret = V4L2Device::open(O_RDWR | O_NONBLOCK); if (ret < 0) return ret; ret = ioctl(VIDIOC_QUERYCAP, &caps_); if (ret < 0) { LOG(V4L2, Error) << "Failed to query device capabilities: " << strerror(-ret); return ret; } if (caps_.version < KERNEL_VERSION(5, 0, 0)) { LOG(V4L2, Error) << "V4L2 API v" << (caps_.version >> 16) << "." << ((caps_.version >> 8) & 0xff) << "." << (caps_.version & 0xff) << " too old, v5.0.0 or later is required"; return -EINVAL; } if (!caps_.hasStreaming()) { LOG(V4L2, Error) << "Device does not support streaming I/O"; return -EINVAL; } /* * Set buffer type and wait for read notifications on CAPTURE video * devices (POLLIN), and write notifications for OUTPUT video devices * (POLLOUT). */ EventNotifier::Type notifierType; if (caps_.isVideoCapture()) { notifierType = EventNotifier::Read; bufferType_ = caps_.isMultiplanar() ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE; } else if (caps_.isVideoOutput()) { notifierType = EventNotifier::Write; bufferType_ = caps_.isMultiplanar() ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE : V4L2_BUF_TYPE_VIDEO_OUTPUT; } else if (caps_.isMetaCapture()) { notifierType = EventNotifier::Read; bufferType_ = V4L2_BUF_TYPE_META_CAPTURE; } else if (caps_.isMetaOutput()) { notifierType = EventNotifier::Write; bufferType_ = V4L2_BUF_TYPE_META_OUTPUT; } else { LOG(V4L2, Error) << "Device is not a supported type"; return -EINVAL; } fdBufferNotifier_ = new EventNotifier(fd(), notifierType); fdBufferNotifier_->activated.connect(this, &V4L2VideoDevice::bufferAvailable); fdBufferNotifier_->setEnabled(false); LOG(V4L2, Debug) << "Opened device " << caps_.bus_info() << ": " << caps_.driver() << ": " << caps_.card(); return 0; } /** * \brief Open a V4L2 video device from an opened file handle and query its * capabilities * \param[in] handle The file descriptor to set * \param[in] type The device type to operate on * * This methods opens a video device from the existing file descriptor \a * handle. Like open(), this method queries the capabilities of the device, but * handles it according to the given device \a type instead of determining its * type from the capabilities. This can be used to force a given device type for * memory-to-memory devices. * * The file descriptor \a handle is duplicated, and the caller is responsible * for closing the \a handle when it has no further use for it. The close() * method will close the duplicated file descriptor, leaving \a handle * untouched. * * \return 0 on success or a negative error code otherwise */ int V4L2VideoDevice::open(int handle, enum v4l2_buf_type type) { int ret; int newFd; newFd = dup(handle); if (newFd < 0) { ret = -errno; LOG(V4L2, Error) << "Failed to duplicate file handle: " << strerror(-ret); return ret; } ret = V4L2Device::setFd(newFd); if (ret < 0) { LOG(V4L2, Error) << "Failed to set file handle: " << strerror(-ret); ::close(newFd); return ret; } ret = ioctl(VIDIOC_QUERYCAP, &caps_); if (ret < 0) { LOG(V4L2, Error) << "Failed to query device capabilities: " << strerror(-ret); return ret; } if (!caps_.hasStreaming()) { LOG(V4L2, Error) << "Device does not support streaming I/O"; return -EINVAL; } /* * Set buffer type and wait for read notifications on CAPTURE video * devices (POLLIN), and write notifications for OUTPUT video devices * (POLLOUT). */ EventNotifier::Type notifierType; switch (type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT: notifierType = EventNotifier::Write; bufferType_ = caps_.isMultiplanar() ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE : V4L2_BUF_TYPE_VIDEO_OUTPUT; break; case V4L2_BUF_TYPE_VIDEO_CAPTURE: notifierType = EventNotifier::Read; bufferType_ = caps_.isMultiplanar() ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE; break; default: LOG(V4L2, Error) << "Unsupported buffer type"; return -EINVAL; } fdBufferNotifier_ = new EventNotifier(fd(), notifierType); fdBufferNotifier_->activated.connect(this, &V4L2VideoDevice::bufferAvailable); fdBufferNotifier_->setEnabled(false); LOG(V4L2, Debug) << "Opened device " << caps_.bus_info() << ": " << caps_.driver() << ": " << caps_.card(); return 0; } /** * \brief Close the video device, releasing any resources acquired by open() */ void V4L2VideoDevice::close() { if (!isOpen()) return; releaseBuffers(); delete fdBufferNotifier_; V4L2Device::close(); } /** * \fn V4L2VideoDevice::driverName() * \brief Retrieve the name of the V4L2 device driver * \return The string containing the driver name */ /** * \fn V4L2VideoDevice::deviceName() * \brief Retrieve the name of the V4L2 video device * \return The string containing the device name */ /** * \fn V4L2VideoDevice::busName() * \brief Retrieve the location of the device in the system * \return The string containing the device location */ /** * \fn V4L2VideoDevice::caps() * \brief Retrieve the device V4L2 capabilities * \return The device V4L2 capabilities */ std::string V4L2VideoDevice::logPrefix() const { return deviceNode() + "[" + std::to_string(fd()) + (V4L2_TYPE_IS_OUTPUT(bufferType_) ? ":out]" : ":cap]"); } /** * \brief Retrieve the image format set on the V4L2 video device * \param[out] format The image format applied on the video device * \return 0 on success or a negative error code otherwise */ int V4L2VideoDevice::getFormat(V4L2DeviceFormat *format) { if (caps_.isMeta()) return getFormatMeta(format); else if (caps_.isMultiplanar()) return getFormatMultiplane(format); else return getFormatSingleplane(format); } /** * \brief Try an image format on the V4L2 video device * \param[inout] format The image format to test applicability to the video device * * Try the supplied \a format on the video device without applying it, returning * the format that would be applied. This is equivalent to setFormat(), except * that the device configuration is not changed. * * \return 0 on success or a negative error code otherwise */ int V4L2VideoDevice::tryFormat(V4L2DeviceFormat *format) { if (caps_.isMeta()) return trySetFormatMeta(format, false); else if (caps_.isMultiplanar()) return trySetFormatMultiplane(format, false); else return trySetFormatSingleplane(format, false); } /** * \brief Configure an image format on the V4L2 video device * \param[inout] format The image format to apply to the video device * * Apply the supplied \a format to the video device, and return the actually * applied format parameters, as \ref V4L2VideoDevice::getFormat would do. * * \return 0 on success or a negative error code otherwise */ int V4L2VideoDevice::setFormat(V4L2DeviceFormat *format) { if (caps_.isMeta()) return trySetFormatMeta(format, true); else if (caps_.isMultiplanar()) return trySetFormatMultiplane(format, true); else return trySetFormatSingleplane(format, true); } int V4L2VideoDevice::getFormatMeta(V4L2DeviceFormat *format) { struct v4l2_format v4l2Format = {}; struct v4l2_meta_format *pix = &v4l2Format.fmt.meta; int ret; v4l2Format.type = bufferType_; ret = ioctl(VIDIOC_G_FMT, &v4l2Format); if (ret) { LOG(V4L2, Error) << "Unable to get format: " << strerror(-ret); return ret; } format->size.width = 0; format->size.height = 0; format->fourcc = V4L2PixelFormat(pix->dataformat); format->planesCount = 1; format->planes[0].bpl = pix->buffersize; format->planes[0].size = pix->buffersize; return 0; } int V4L2VideoDevice::trySetFormatMeta(V4L2DeviceFormat *format, bool set) { struct v4l2_format v4l2Format = {}; struct v4l2_meta_format *pix = &v4l2Format.fmt.meta; int ret; v4l2Format.type = bufferType_; pix->dataformat = format->fourcc; pix->buffersize = format->planes[0].size; ret = ioctl(set ? VIDIOC_S_FMT : VIDIOC_TRY_FMT, &v4l2Format); if (ret) { LOG(V4L2, Error) << "Unable to " << (set ? "set" : "try") << " format: " << strerror(-ret); return ret; } /* * Return to caller the format actually applied on the video device, * which might differ from the requested one. */ format->size.width = 0; format->size.height = 0; format->fourcc = V4L2PixelFormat(pix->dataformat); format->planesCount = 1; format->planes[0].bpl = pix->buffersize; format->planes[0].size = pix->buffersize; return 0; } int V4L2VideoDevice::getFormatMultiplane(V4L2DeviceFormat *format) { struct v4l2_format v4l2Format = {}; struct v4l2_pix_format_mplane *pix = &v4l2Format.fmt.pix_mp; int ret; v4l2Format.type = bufferType_; ret = ioctl(VIDIOC_G_FMT, &v4l2Format); if (ret) { LOG(V4L2, Error) << "Unable to get format: " << strerror(-ret); return ret; } format->size.width = pix->width; format->size.height = pix->height; format->fourcc = V4L2PixelFormat(pix->pixelformat); format->planesCount = pix->num_planes; for (unsigned int i = 0; i < format->planesCount; ++i) { format->planes[i].bpl = pix->plane_fmt[i].bytesperline; format->planes[i].size = pix->plane_fmt[i].sizeimage; } return 0; } int V4L2VideoDevice::trySetFormatMultiplane(V4L2DeviceFormat *format, bool set) { struct v4l2_format v4l2Format = {}; struct v4l2_pix_format_mplane *pix = &v4l2Format.fmt.pix_mp; int ret; v4l2Format.type = bufferType_; pix->width = format->size.width; pix->height = format->size.height; pix->pixelformat = format->fourcc; pix->num_planes = format->planesCount; pix->field = V4L2_FIELD_NONE; ASSERT(pix->num_planes <= std::size(pix->plane_fmt)); for (unsigned int i = 0; i < pix->num_planes; ++i) { pix->plane_fmt[i].bytesperline = format->planes[i].bpl; pix->plane_fmt[i].sizeimage = format->planes[i].size; } ret = ioctl(set ? VIDIOC_S_FMT : VIDIOC_TRY_FMT, &v4l2Format); if (ret) { LOG(V4L2, Error) << "Unable to " << (set ? "set" : "try") << " format: " << strerror(-ret); return ret; } /* * Return to caller the format actually applied on the video device, * which might differ from the requested one. */ format->size.width = pix->width; format->size.height = pix->height; format->fourcc = V4L2PixelFormat(pix->pixelformat); format->planesCount = pix->num_planes; for (unsigned int i = 0; i < format->planesCount; ++i) { format->planes[i].bpl = pix->plane_fmt[i].bytesperline; format->planes[i].size = pix->plane_fmt[i].sizeimage; } return 0; } int V4L2VideoDevice::getFormatSingleplane(V4L2DeviceFormat *format) { struct v4l2_format v4l2Format = {}; struct v4l2_pix_format *pix = &v4l2Format.fmt.pix; int ret; v4l2Format.type = bufferType_; ret = ioctl(VIDIOC_G_FMT, &v4l2Format); if (ret) { LOG(V4L2, Error) << "Unable to get format: " << strerror(-ret); return ret; } format->size.width = pix->width; format->size.height = pix->height; format->fourcc = V4L2PixelFormat(pix->pixelformat); format->planesCount = 1; format->planes[0].bpl = pix->bytesperline; format->planes[0].size = pix->sizeimage; return 0; } int V4L2VideoDevice::trySetFormatSingleplane(V4L2DeviceFormat *format, bool set) { struct v4l2_format v4l2Format = {}; struct v4l2_pix_format *pix = &v4l2Format.fmt.pix; int ret; v4l2Format.type = bufferType_; pix->width = format->size.width; pix->height = format->size.height; pix->pixelformat = format->fourcc; pix->bytesperline = format->planes[0].bpl; pix->field = V4L2_FIELD_NONE; ret = ioctl(set ? VIDIOC_S_FMT : VIDIOC_TRY_FMT, &v4l2Format); if (ret) { LOG(V4L2, Error) << "Unable to " << (set ? "set" : "try") << " format: " << strerror(-ret); return ret; } /* * Return to caller the format actually applied on the device, * which might differ from the requested one. */ format->size.width = pix->width; format->size.height = pix->height; format->fourcc = V4L2PixelFormat(pix->pixelformat); format->planesCount = 1; format->planes[0].bpl = pix->bytesperline; format->planes[0].size = pix->sizeimage; return 0; } /** * \brief Enumerate all pixel formats and frame sizes * \param[in] code Restrict formats to this media bus code. * * Enumerate all pixel formats and frame sizes supported by the video device. * If the \a code argument is not zero, only formats compatible with that media * bus code will be enumerated. * * \return A list of the supported video device formats */ V4L2VideoDevice::Formats V4L2VideoDevice::formats(uint32_t code) { Formats formats; for (V4L2PixelFormat pixelFormat : enumPixelformats(code)) { std::vector<SizeRange> sizes = enumSizes(pixelFormat); if (sizes.empty()) return {}; if (formats.find(pixelFormat) != formats.end()) { LOG(V4L2, Error) << "Could not add sizes for pixel format " << pixelFormat; return {}; } formats.emplace(pixelFormat, sizes); } return formats; } std::vector<V4L2PixelFormat> V4L2VideoDevice::enumPixelformats(uint32_t code) { std::vector<V4L2PixelFormat> formats; int ret; if (code && !(caps_.device_caps() & V4L2_CAP_IO_MC)) { LOG(V4L2, Error) << "Media bus code filtering not supported by the device"; return {}; } for (unsigned int index = 0; ; index++) { struct v4l2_fmtdesc pixelformatEnum = {}; pixelformatEnum.index = index; pixelformatEnum.type = bufferType_; pixelformatEnum.mbus_code = code; ret = ioctl(VIDIOC_ENUM_FMT, &pixelformatEnum); if (ret) break; formats.push_back(V4L2PixelFormat(pixelformatEnum.pixelformat)); } if (ret && ret != -EINVAL) { LOG(V4L2, Error) << "Unable to enumerate pixel formats: " << strerror(-ret); return {}; } return formats; } std::vector<SizeRange> V4L2VideoDevice::enumSizes(V4L2PixelFormat pixelFormat) { std::vector<SizeRange> sizes; int ret; for (unsigned int index = 0;; index++) { struct v4l2_frmsizeenum frameSize = {}; frameSize.index = index; frameSize.pixel_format = pixelFormat; ret = ioctl(VIDIOC_ENUM_FRAMESIZES, &frameSize); if (ret) break; if (index != 0 && frameSize.type != V4L2_FRMSIZE_TYPE_DISCRETE) { LOG(V4L2, Error) << "Non-zero index for non discrete type"; return {}; } switch (frameSize.type) { case V4L2_FRMSIZE_TYPE_DISCRETE: sizes.emplace_back(Size{ frameSize.discrete.width, frameSize.discrete.height }); break; case V4L2_FRMSIZE_TYPE_CONTINUOUS: sizes.emplace_back(Size{ frameSize.stepwise.min_width, frameSize.stepwise.min_height }, Size{ frameSize.stepwise.max_width, frameSize.stepwise.max_height }); break; case V4L2_FRMSIZE_TYPE_STEPWISE: sizes.emplace_back(Size{ frameSize.stepwise.min_width, frameSize.stepwise.min_height }, Size{ frameSize.stepwise.max_width, frameSize.stepwise.max_height }, frameSize.stepwise.step_width, frameSize.stepwise.step_height); break; default: LOG(V4L2, Error) << "Unknown VIDIOC_ENUM_FRAMESIZES type " << frameSize.type; return {}; } } if (ret && ret != -EINVAL) { LOG(V4L2, Error) << "Unable to enumerate frame sizes: " << strerror(-ret); return {}; } return sizes; } /** * \brief Set a selection rectangle \a rect for \a target * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags * \param[inout] rect The selection rectangle to be applied * * \todo Define a V4L2SelectionTarget enum for the selection target * * \return 0 on success or a negative error code otherwise */ int V4L2VideoDevice::setSelection(unsigned int target, Rectangle *rect) { struct v4l2_selection sel = {}; sel.type = bufferType_; sel.target = target; sel.flags = 0; sel.r.left = rect->x; sel.r.top = rect->y; sel.r.width = rect->width; sel.r.height = rect->height; int ret = ioctl(VIDIOC_S_SELECTION, &sel); if (ret < 0) { LOG(V4L2, Error) << "Unable to set rectangle " << target << ": " << strerror(-ret); return ret; } rect->x = sel.r.left; rect->y = sel.r.top; rect->width = sel.r.width; rect->height = sel.r.height; return 0; } int V4L2VideoDevice::requestBuffers(unsigned int count, enum v4l2_memory memoryType) { struct v4l2_requestbuffers rb = {}; int ret; rb.count = count; rb.type = bufferType_; rb.memory = memoryType; ret = ioctl(VIDIOC_REQBUFS, &rb); if (ret < 0) { LOG(V4L2, Error) << "Unable to request " << count << " buffers: " << strerror(-ret); return ret; } if (rb.count < count) { LOG(V4L2, Error) << "Not enough buffers provided by V4L2VideoDevice"; requestBuffers(0, memoryType); return -ENOMEM; } LOG(V4L2, Debug) << rb.count << " buffers requested."; return 0; } /** * \brief Allocate and export buffers from the video device * \param[in] count Number of buffers to allocate * \param[out] buffers Vector to store allocated buffers * * This function wraps buffer allocation with the V4L2 MMAP memory type. It * requests \a count buffers from the driver, allocating the corresponding * memory, and exports them as a set of FrameBuffer objects in \a buffers. Upon * successful return the driver's internal buffer management is initialized in * MMAP mode, and the video device is ready to accept queueBuffer() calls. * * The number of planes and the plane sizes for the allocation are determined * by the currently active format on the device as set by setFormat(). * * Buffers allocated with this function shall later be free with * releaseBuffers(). If buffers have already been allocated with * allocateBuffers() or imported with importBuffers(), this function returns * -EBUSY. * * \return The number of allocated buffers on success or a negative error code * otherwise * \retval -EBUSY buffers have already been allocated or imported */ int V4L2VideoDevice::allocateBuffers(unsigned int count, std::vector<std::unique_ptr<FrameBuffer>> *buffers) { int ret = createBuffers(count, buffers); if (ret < 0) return ret; cache_ = new V4L2BufferCache(*buffers); memoryType_ = V4L2_MEMORY_MMAP; return ret; } /** * \brief Export buffers from the video device * \param[in] count Number of buffers to allocate * \param[out] buffers Vector to store allocated buffers * * This function allocates \a count buffer from the video device and exports * them as dmabuf objects, stored in \a buffers. Unlike allocateBuffers(), this * function leaves the driver's internal buffer management uninitialized. The * video device shall be initialized with importBuffers() or allocateBuffers() * before it can accept queueBuffer() calls. The exported buffers are directly * usable with any V4L2 video device in DMABUF mode, or with other dmabuf * importers. * * The number of planes and the plane sizes for the allocation are determined * by the currently active format on the device as set by setFormat(). * * Multiple independent sets of buffers can be allocated with multiple calls to * this function. Device-specific limitations may apply regarding the minimum * and maximum number of buffers per set, or to total amount of allocated * memory. The exported dmabuf lifetime is tied to the returned \a buffers. To * free a buffer, the caller shall delete the corresponding FrameBuffer * instance. No bookkeeping and automatic free is performed by the * V4L2VideoDevice class. * * If buffers have already been allocated with allocateBuffers() or imported * with importBuffers(), this function returns -EBUSY. * * \return The number of allocated buffers on success or a negative error code * otherwise * \retval -EBUSY buffers have already been allocated or imported */ int V4L2VideoDevice::exportBuffers(unsigned int count, std::vector<std::unique_ptr<FrameBuffer>> *buffers) { int ret = createBuffers(count, buffers); if (ret < 0) return ret; requestBuffers(0, V4L2_MEMORY_MMAP); return ret; } int V4L2VideoDevice::createBuffers(unsigned int count, std::vector<std::unique_ptr<FrameBuffer>> *buffers) { if (cache_) { LOG(V4L2, Error) << "Buffers already allocated"; return -EINVAL; } int ret = requestBuffers(count, V4L2_MEMORY_MMAP); if (ret < 0) return ret; for (unsigned i = 0; i < count; ++i) { std::unique_ptr<FrameBuffer> buffer = createBuffer(i); if (!buffer) { LOG(V4L2, Error) << "Unable to create buffer"; requestBuffers(0, V4L2_MEMORY_MMAP); buffers->clear(); return -EINVAL; } buffers->push_back(std::move(buffer)); } return count; } std::unique_ptr<FrameBuffer> V4L2VideoDevice::createBuffer(unsigned int index) { struct v4l2_plane v4l2Planes[VIDEO_MAX_PLANES] = {}; struct v4l2_buffer buf = {}; buf.index = index; buf.type = bufferType_; buf.length = std::size(v4l2Planes); buf.m.planes = v4l2Planes; int ret = ioctl(VIDIOC_QUERYBUF, &buf); if (ret < 0) { LOG(V4L2, Error) << "Unable to query buffer " << index << ": " << strerror(-ret); return nullptr; } const bool multiPlanar = V4L2_TYPE_IS_MULTIPLANAR(buf.type); const unsigned int numPlanes = multiPlanar ? buf.length : 1; if (numPlanes == 0 || numPlanes > VIDEO_MAX_PLANES) { LOG(V4L2, Error) << "Invalid number of planes"; return nullptr; } std::vector<FrameBuffer::Plane> planes; for (unsigned int nplane = 0; nplane < numPlanes; nplane++) { FileDescriptor fd = exportDmabufFd(buf.index, nplane); if (!fd.isValid()) return nullptr; FrameBuffer::Plane plane; plane.fd = std::move(fd); plane.length = multiPlanar ? buf.m.planes[nplane].length : buf.length; planes.push_back(std::move(plane)); } return std::make_unique<FrameBuffer>(std::move(planes)); } FileDescriptor V4L2VideoDevice::exportDmabufFd(unsigned int index, unsigned int plane) { struct v4l2_exportbuffer expbuf = {}; int ret; expbuf.type = bufferType_; expbuf.index = index; expbuf.plane = plane; expbuf.flags = O_RDWR; ret = ioctl(VIDIOC_EXPBUF, &expbuf); if (ret < 0) { LOG(V4L2, Error) << "Failed to export buffer: " << strerror(-ret); return FileDescriptor(); } return FileDescriptor(std::move(expbuf.fd)); } /** * \brief Prepare the device to import \a count buffers * \param[in] count Number of buffers to prepare to import * * This function initializes the driver's buffer management to import buffers * in DMABUF mode. It requests buffers from the driver, but doesn't allocate * memory. * * Upon successful return, the video device is ready to accept queueBuffer() * calls. The buffers to be imported are provided to queueBuffer(), and may be * supplied externally, or come from a previous exportBuffers() call. * * Device initialization performed by this function shall later be cleaned up * with releaseBuffers(). If buffers have already been allocated with * allocateBuffers() or imported with importBuffers(), this function returns * -EBUSY. * * \return 0 on success or a negative error code otherwise * \retval -EBUSY buffers have already been allocated or imported */ int V4L2VideoDevice::importBuffers(unsigned int count) { if (cache_) { LOG(V4L2, Error) << "Buffers already allocated"; return -EINVAL; } memoryType_ = V4L2_MEMORY_DMABUF; int ret = requestBuffers(count, V4L2_MEMORY_DMABUF); if (ret) return ret; cache_ = new V4L2BufferCache(count); LOG(V4L2, Debug) << "Prepared to import " << count << " buffers"; return 0; } /** * \brief Release resources allocated by allocateBuffers() or importBuffers() * * This function resets the driver's internal buffer management that was * initialized by a previous call to allocateBuffers() or importBuffers(). Any * memory allocated by allocateBuffers() is freed. Buffer exported by * exportBuffers(), if any, are not affected. */ int V4L2VideoDevice::releaseBuffers() { LOG(V4L2, Debug) << "Releasing buffers"; delete cache_; cache_ = nullptr; return requestBuffers(0, memoryType_); } /** * \brief Queue a buffer to the video device * \param[in] buffer The buffer to be queued * * For capture video devices the \a buffer will be filled with data by the * device. For output video devices the \a buffer shall contain valid data and * will be processed by the device. Once the device has finished processing the * buffer, it will be available for dequeue. * * The best available V4L2 buffer is picked for \a buffer using the V4L2 buffer * cache. * * \return 0 on success or a negative error code otherwise */ int V4L2VideoDevice::queueBuffer(FrameBuffer *buffer) { struct v4l2_plane v4l2Planes[VIDEO_MAX_PLANES] = {}; struct v4l2_buffer buf = {}; int ret; /* * Pipeline handlers should not requeue buffers after releasing the * buffers on the device. Any occurence of this error should be fixed * in the pipeline handler directly. */ if (!cache_) { LOG(V4L2, Fatal) << "No BufferCache available to queue."; return -ENOENT; } ret = cache_->get(*buffer); if (ret < 0) return ret; buf.index = ret; buf.type = bufferType_; buf.memory = memoryType_; buf.field = V4L2_FIELD_NONE; bool multiPlanar = V4L2_TYPE_IS_MULTIPLANAR(buf.type); const std::vector<FrameBuffer::Plane> &planes = buffer->planes(); if (buf.memory == V4L2_MEMORY_DMABUF) { if (multiPlanar) { for (unsigned int p = 0; p < planes.size(); ++p) v4l2Planes[p].m.fd = planes[p].fd.fd(); } else { buf.m.fd = planes[0].fd.fd(); } } if (multiPlanar) { buf.length = planes.size(); buf.m.planes = v4l2Planes; } if (V4L2_TYPE_IS_OUTPUT(buf.type)) { const FrameMetadata &metadata = buffer->metadata(); if (multiPlanar) { unsigned int nplane = 0; for (const FrameMetadata::Plane &plane : metadata.planes) { v4l2Planes[nplane].bytesused = plane.bytesused; v4l2Planes[nplane].length = buffer->planes()[nplane].length; nplane++; } } else { if (metadata.planes.size()) buf.bytesused = metadata.planes[0].bytesused; } buf.sequence = metadata.sequence; buf.timestamp.tv_sec = metadata.timestamp / 1000000000; buf.timestamp.tv_usec = (metadata.timestamp / 1000) % 1000000; } LOG(V4L2, Debug) << "Queueing buffer " << buf.index; ret = ioctl(VIDIOC_QBUF, &buf); if (ret < 0) { LOG(V4L2, Error) << "Failed to queue buffer " << buf.index << ": " << strerror(-ret); return ret; } if (queuedBuffers_.empty()) fdBufferNotifier_->setEnabled(true); queuedBuffers_[buf.index] = buffer; return 0; } /** * \brief Slot to handle completed buffer events from the V4L2 video device * \param[in] notifier The event notifier * * When this slot is called, a Buffer has become available from the device, and * will be emitted through the bufferReady Signal. * * For Capture video devices the FrameBuffer will contain valid data. * For Output video devices the FrameBuffer can be considered empty. */ void V4L2VideoDevice::bufferAvailable([[maybe_unused]] EventNotifier *notifier) { FrameBuffer *buffer = dequeueBuffer(); if (!buffer) return; /* Notify anyone listening to the device. */ bufferReady.emit(buffer); } /** * \brief Dequeue the next available buffer from the video device * * This method dequeues the next available buffer from the device. If no buffer * is available to be dequeued it will return nullptr immediately. * * \return A pointer to the dequeued buffer on success, or nullptr otherwise */ FrameBuffer *V4L2VideoDevice::dequeueBuffer() { struct v4l2_buffer buf = {}; struct v4l2_plane planes[VIDEO_MAX_PLANES] = {}; int ret; buf.type = bufferType_; buf.memory = memoryType_; bool multiPlanar = V4L2_TYPE_IS_MULTIPLANAR(buf.type); if (multiPlanar) { buf.length = VIDEO_MAX_PLANES; buf.m.planes = planes; } ret = ioctl(VIDIOC_DQBUF, &buf); if (ret < 0) { LOG(V4L2, Error) << "Failed to dequeue buffer: " << strerror(-ret); return nullptr; } LOG(V4L2, Debug) << "Dequeuing buffer " << buf.index; cache_->put(buf.index); auto it = queuedBuffers_.find(buf.index); FrameBuffer *buffer = it->second; queuedBuffers_.erase(it); if (queuedBuffers_.empty()) fdBufferNotifier_->setEnabled(false); buffer->metadata_.status = buf.flags & V4L2_BUF_FLAG_ERROR ? FrameMetadata::FrameError : FrameMetadata::FrameSuccess; buffer->metadata_.sequence = buf.sequence; buffer->metadata_.timestamp = buf.timestamp.tv_sec * 1000000000ULL + buf.timestamp.tv_usec * 1000ULL; buffer->metadata_.planes.clear(); if (multiPlanar) { for (unsigned int nplane = 0; nplane < buf.length; nplane++) buffer->metadata_.planes.push_back({ planes[nplane].bytesused }); } else { buffer->metadata_.planes.push_back({ buf.bytesused }); } return buffer; } /** * \var V4L2VideoDevice::bufferReady * \brief A Signal emitted when a framebuffer completes */ /** * \brief Start the video stream * \return 0 on success or a negative error code otherwise */ int V4L2VideoDevice::streamOn() { int ret; ret = ioctl(VIDIOC_STREAMON, &bufferType_); if (ret < 0) { LOG(V4L2, Error) << "Failed to start streaming: " << strerror(-ret); return ret; } streaming_ = true; return 0; } /** * \brief Stop the video stream * * Buffers that are still queued when the video stream is stopped are * immediately dequeued with their status set to FrameMetadata::FrameCancelled, * and the bufferReady signal is emitted for them. The order in which those * buffers are dequeued is not specified. * * This will be a no-op if the stream is not started in the first place and * has no queued buffers. * * \return 0 on success or a negative error code otherwise */ int V4L2VideoDevice::streamOff() { int ret; if (!streaming_ && queuedBuffers_.empty()) return 0; ret = ioctl(VIDIOC_STREAMOFF, &bufferType_); if (ret < 0) { LOG(V4L2, Error) << "Failed to stop streaming: " << strerror(-ret); return ret; } /* Send back all queued buffers. */ for (auto it : queuedBuffers_) { FrameBuffer *buffer = it.second; buffer->metadata_.status = FrameMetadata::FrameCancelled; bufferReady.emit(buffer); } queuedBuffers_.clear(); fdBufferNotifier_->setEnabled(false); streaming_ = false; return 0; } /** * \brief Create a new video device instance from \a entity in media device * \a media * \param[in] media The media device where the entity is registered * \param[in] entity The media entity name * * \return A newly created V4L2VideoDevice on success, nullptr otherwise */ std::unique_ptr<V4L2VideoDevice> V4L2VideoDevice::fromEntityName(const MediaDevice *media, const std::string &entity) { MediaEntity *mediaEntity = media->getEntityByName(entity); if (!mediaEntity) return nullptr; return std::make_unique<V4L2VideoDevice>(mediaEntity); } /** * \brief Convert \a PixelFormat to its corresponding V4L2 FourCC * \param[in] pixelFormat The PixelFormat to convert * * For multiplanar formats, the V4L2 format variant (contiguous or * non-contiguous planes) is selected automatically based on the capabilities * of the video device. If the video device supports the V4L2 multiplanar API, * non-contiguous formats are preferred. * * \return The V4L2_PIX_FMT_* pixel format code corresponding to \a pixelFormat */ V4L2PixelFormat V4L2VideoDevice::toV4L2PixelFormat(const PixelFormat &pixelFormat) { return V4L2PixelFormat::fromPixelFormat(pixelFormat, caps_.isMultiplanar()); } /** * \class V4L2M2MDevice * \brief Memory-to-Memory video device * * The V4L2M2MDevice manages two V4L2VideoDevice instances on the same * deviceNode which operate together using two queues to implement the V4L2 * Memory to Memory API. * * The two devices should be opened by calling open() on the V4L2M2MDevice, and * can be closed by calling close on the V4L2M2MDevice. * * Calling V4L2VideoDevice::open() and V4L2VideoDevice::close() on the capture * or output V4L2VideoDevice is not permitted. */ /** * \fn V4L2M2MDevice::output * \brief Retrieve the output V4L2VideoDevice instance * \return The output V4L2VideoDevice instance */ /** * \fn V4L2M2MDevice::capture * \brief Retrieve the capture V4L2VideoDevice instance * \return The capture V4L2VideoDevice instance */ /** * \brief Create a new V4L2M2MDevice from the \a deviceNode * \param[in] deviceNode The file-system path to the video device node */ V4L2M2MDevice::V4L2M2MDevice(const std::string &deviceNode) : deviceNode_(deviceNode) { output_ = new V4L2VideoDevice(deviceNode); capture_ = new V4L2VideoDevice(deviceNode); } V4L2M2MDevice::~V4L2M2MDevice() { delete capture_; delete output_; } /** * \brief Open a V4L2 Memory to Memory device * * Open the device node and prepare the two V4L2VideoDevice instances to handle * their respective buffer queues. * * \return 0 on success or a negative error code otherwise */ int V4L2M2MDevice::open() { int fd; int ret; /* * The output and capture V4L2VideoDevice instances use the same file * handle for the same device node. The local file handle can be closed * as the V4L2VideoDevice::open() retains a handle by duplicating the * fd passed in. */ fd = syscall(SYS_openat, AT_FDCWD, deviceNode_.c_str(), O_RDWR | O_NONBLOCK); if (fd < 0) { ret = -errno; LOG(V4L2, Error) << "Failed to open V4L2 M2M device: " << strerror(-ret); return ret; } ret = output_->open(fd, V4L2_BUF_TYPE_VIDEO_OUTPUT); if (ret) goto err; ret = capture_->open(fd, V4L2_BUF_TYPE_VIDEO_CAPTURE); if (ret) goto err; ::close(fd); return 0; err: close(); ::close(fd); return ret; } /** * \brief Close the memory-to-memory device, releasing any resources acquired by * open() */ void V4L2M2MDevice::close() { capture_->close(); output_->close(); } } /* namespace libcamera */