summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/libcamera/pipeline/simple/simple.cpp118
1 files changed, 67 insertions, 51 deletions
diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
index b09368ae..3c90bdec 100644
--- a/src/libcamera/pipeline/simple/simple.cpp
+++ b/src/libcamera/pipeline/simple/simple.cpp
@@ -250,6 +250,8 @@ public:
std::queue<std::map<unsigned int, FrameBuffer *>> converterQueue_;
private:
+ void tryPipeline(unsigned int code, const Size &size);
+
void converterInputDone(FrameBuffer *buffer);
void converterOutputDone(FrameBuffer *buffer);
};
@@ -466,58 +468,11 @@ int SimpleCameraData::init()
return ret;
/*
- * Enumerate the possible pipeline configurations. For each media bus
- * format supported by the sensor, propagate the formats through the
- * pipeline, and enumerate the corresponding possible V4L2 pixel
- * formats on the video node.
+ * Generate the list of possible pipeline configurations by trying each
+ * media bus format supported by the sensor.
*/
- for (unsigned int code : sensor_->mbusCodes()) {
- V4L2SubdeviceFormat format{};
- format.mbus_code = code;
- format.size = sensor_->resolution();
-
- ret = setupFormats(&format, V4L2Subdevice::TryFormat);
- if (ret < 0) {
- LOG(SimplePipeline, Debug)
- << "Media bus code " << utils::hex(code, 4)
- << " not supported for this pipeline";
- /* Try next mbus_code supported by the sensor */
- continue;
- }
-
- V4L2VideoDevice::Formats videoFormats =
- video_->formats(format.mbus_code);
-
- LOG(SimplePipeline, Debug)
- << "Adding configuration for " << format.size
- << " in pixel formats [ "
- << utils::join(videoFormats, ", ",
- [](const auto &f) {
- return f.first.toString();
- })
- << " ]";
-
- for (const auto &videoFormat : videoFormats) {
- PixelFormat pixelFormat = videoFormat.first.toPixelFormat();
- if (!pixelFormat)
- continue;
-
- Configuration config;
- config.code = code;
- config.captureFormat = pixelFormat;
- config.captureSize = format.size;
-
- if (!converter_) {
- config.outputFormats = { pixelFormat };
- config.outputSizes = config.captureSize;
- } else {
- config.outputFormats = converter_->formats(pixelFormat);
- config.outputSizes = converter_->sizes(format.size);
- }
-
- configs_.push_back(config);
- }
- }
+ for (unsigned int code : sensor_->mbusCodes())
+ tryPipeline(code, sensor_->resolution());
if (configs_.empty()) {
LOG(SimplePipeline, Error) << "No valid configuration found";
@@ -541,6 +496,67 @@ int SimpleCameraData::init()
return 0;
}
+/*
+ * Generate a list of supported pipeline configurations for a sensor media bus
+ * code and size.
+ *
+ * First propagate the media bus code and size through the pipeline from the
+ * camera sensor to the video node. Then, query the video node for all supported
+ * pixel formats compatible with the media bus code. For each pixel format, store
+ * a full pipeline configuration in the configs_ vector.
+ */
+void SimpleCameraData::tryPipeline(unsigned int code, const Size &size)
+{
+ /*
+ * Propagate the format through the pipeline, and enumerate the
+ * corresponding possible V4L2 pixel formats on the video node.
+ */
+ V4L2SubdeviceFormat format{};
+ format.mbus_code = code;
+ format.size = size;
+
+ int ret = setupFormats(&format, V4L2Subdevice::TryFormat);
+ if (ret < 0) {
+ /* Pipeline configuration failed, skip this configuration. */
+ LOG(SimplePipeline, Debug)
+ << "Media bus code " << utils::hex(code, 4)
+ << " not supported for this pipeline";
+ return;
+ }
+
+ V4L2VideoDevice::Formats videoFormats = video_->formats(format.mbus_code);
+
+ LOG(SimplePipeline, Debug)
+ << "Adding configuration for " << format.size
+ << " in pixel formats [ "
+ << utils::join(videoFormats, ", ",
+ [](const auto &f) {
+ return f.first.toString();
+ })
+ << " ]";
+
+ for (const auto &videoFormat : videoFormats) {
+ PixelFormat pixelFormat = videoFormat.first.toPixelFormat();
+ if (!pixelFormat)
+ continue;
+
+ Configuration config;
+ config.code = code;
+ config.captureFormat = pixelFormat;
+ config.captureSize = format.size;
+
+ if (!converter_) {
+ config.outputFormats = { pixelFormat };
+ config.outputSizes = config.captureSize;
+ } else {
+ config.outputFormats = converter_->formats(pixelFormat);
+ config.outputSizes = converter_->sizes(format.size);
+ }
+
+ configs_.push_back(config);
+ }
+}
+
int SimpleCameraData::setupLinks()
{
int ret;
href='#n234'>234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
 * Copyright (C) 2021, Ideas on Board Oy
 *
 * kms_sink.cpp - KMS Sink
 */

#include "kms_sink.h"

#include <array>
#include <algorithm>
#include <assert.h>
#include <iostream>
#include <limits.h>
#include <memory>
#include <stdint.h>
#include <string.h>

#include <libcamera/camera.h>
#include <libcamera/formats.h>
#include <libcamera/framebuffer.h>
#include <libcamera/stream.h>

#include "drm.h"

KMSSink::KMSSink(const std::string &connectorName)
	: connector_(nullptr), crtc_(nullptr), plane_(nullptr), mode_(nullptr)
{
	int ret = dev_.init();
	if (ret < 0)
		return;

	/*
	 * Find the requested connector. If no specific connector is requested,
	 * pick the first connected connector or, if no connector is connected,
	 * the first connector with unknown status.
	 */
	for (const DRM::Connector &conn : dev_.connectors()) {
		if (!connectorName.empty()) {
			if (conn.name() != connectorName)
				continue;

			connector_ = &conn;
			break;
		}

		if (conn.status() == DRM::Connector::Connected) {
			connector_ = &conn;
			break;
		}

		if (!connector_ && conn.status() == DRM::Connector::Unknown)
			connector_ = &conn;
	}

	if (!connector_) {
		if (!connectorName.empty())
			std::cerr
				<< "Connector " << connectorName << " not found"
				<< std::endl;
		else
			std::cerr << "No connected connector found" << std::endl;
		return;
	}

	dev_.requestComplete.connect(this, &KMSSink::requestComplete);
}

void KMSSink::mapBuffer(libcamera::FrameBuffer *buffer)
{
	std::array<uint32_t, 4> strides = {};

	/* \todo Should libcamera report per-plane strides ? */
	unsigned int uvStrideMultiplier;

	switch (format_) {
	case libcamera::formats::NV24:
	case libcamera::formats::NV42:
		uvStrideMultiplier = 4;
		break;
	case libcamera::formats::YUV420:
	case libcamera::formats::YVU420:
	case libcamera::formats::YUV422:
		uvStrideMultiplier = 1;
		break;
	default:
		uvStrideMultiplier = 2;
		break;
	}

	strides[0] = stride_;
	for (unsigned int i = 1; i < buffer->planes().size(); ++i)
		strides[i] = stride_ * uvStrideMultiplier / 2;

	std::unique_ptr<DRM::FrameBuffer> drmBuffer =
		dev_.createFrameBuffer(*buffer, format_, size_, strides);
	if (!drmBuffer)
		return;

	buffers_.emplace(std::piecewise_construct,
			 std::forward_as_tuple(buffer),
			 std::forward_as_tuple(std::move(drmBuffer)));
}

int KMSSink::configure(const libcamera::CameraConfiguration &config)
{
	if (!connector_)
		return -EINVAL;

	crtc_ = nullptr;
	plane_ = nullptr;
	mode_ = nullptr;

	const libcamera::StreamConfiguration &cfg = config.at(0);

	/* Find the best mode for the stream size. */
	const std::vector<DRM::Mode> &modes = connector_->modes();

	unsigned int cfgArea = cfg.size.width * cfg.size.height;
	unsigned int bestDistance = UINT_MAX;

	for (const DRM::Mode &mode : modes) {
		unsigned int modeArea = mode.hdisplay * mode.vdisplay;
		unsigned int distance = modeArea > cfgArea ? modeArea - cfgArea
				      : cfgArea - modeArea;

		if (distance < bestDistance) {
			mode_ = &mode;
			bestDistance = distance;

			/*
			 * If the sizes match exactly, there will be no better
			 * match.
			 */
			if (distance == 0)
				break;
		}
	}

	if (!mode_) {
		std::cerr << "No modes\n";
		return -EINVAL;
	}

	int ret = configurePipeline(cfg.pixelFormat);
	if (ret < 0)
		return ret;

	size_ = cfg.size;
	stride_ = cfg.stride;

	/* Configure color space. */
	colorEncoding_ = std::nullopt;
	colorRange_ = std::nullopt;

	if (cfg.colorSpace->ycbcrEncoding == libcamera::ColorSpace::YcbcrEncoding::None)
		return 0;

	/*
	 * The encoding and range enums are defined in the kernel but not
	 * exposed in public headers.
	 */
	enum drm_color_encoding {
		DRM_COLOR_YCBCR_BT601,
		DRM_COLOR_YCBCR_BT709,
		DRM_COLOR_YCBCR_BT2020,
	};

	enum drm_color_range {
		DRM_COLOR_YCBCR_LIMITED_RANGE,
		DRM_COLOR_YCBCR_FULL_RANGE,
	};

	const DRM::Property *colorEncoding = plane_->property("COLOR_ENCODING");
	const DRM::Property *colorRange = plane_->property("COLOR_RANGE");

	if (colorEncoding) {
		drm_color_encoding encoding;

		switch (cfg.colorSpace->ycbcrEncoding) {
		case libcamera::ColorSpace::YcbcrEncoding::Rec601:
		default:
			encoding = DRM_COLOR_YCBCR_BT601;
			break;
		case libcamera::ColorSpace::YcbcrEncoding::Rec709:
			encoding = DRM_COLOR_YCBCR_BT709;
			break;
		case libcamera::ColorSpace::YcbcrEncoding::Rec2020:
			encoding = DRM_COLOR_YCBCR_BT2020;
			break;
		}

		for (const auto &[id, name] : colorEncoding->enums()) {
			if (id == encoding) {
				colorEncoding_ = encoding;
				break;
			}
		}
	}

	if (colorRange) {
		drm_color_range range;

		switch (cfg.colorSpace->range) {
		case libcamera::ColorSpace::Range::Limited:
		default:
			range = DRM_COLOR_YCBCR_LIMITED_RANGE;
			break;
		case libcamera::ColorSpace::Range::Full:
			range = DRM_COLOR_YCBCR_FULL_RANGE;
			break;
		}

		for (const auto &[id, name] : colorRange->enums()) {
			if (id == range) {
				colorRange_ = range;
				break;
			}
		}
	}

	if (!colorEncoding_ || !colorRange_)
		std::cerr << "Color space " << cfg.colorSpace->toString()
			  << " not supported by the display device."
			  << " Colors may be wrong." << std::endl;

	return 0;
}

int KMSSink::selectPipeline(const libcamera::PixelFormat &format)
{
	/*
	 * If the requested format has an alpha channel, also consider the X
	 * variant.
	 */
	libcamera::PixelFormat xFormat;

	switch (format) {
	case libcamera::formats::ABGR8888:
		xFormat = libcamera::formats::XBGR8888;
		break;
	case libcamera::formats::ARGB8888:
		xFormat = libcamera::formats::XRGB8888;
		break;
	case libcamera::formats::BGRA8888:
		xFormat = libcamera::formats::BGRX8888;
		break;
	case libcamera::formats::RGBA8888:
		xFormat = libcamera::formats::RGBX8888;
		break;
	}

	/*
	 * Find a CRTC and plane suitable for the request format and the
	 * connector at the end of the pipeline. Restrict the search to primary
	 * planes for now.
	 */
	for (const DRM::Encoder *encoder : connector_->encoders()) {
		for (const DRM::Crtc *crtc : encoder->possibleCrtcs()) {
			for (const DRM::Plane *plane : crtc->planes()) {
				if (plane->type() != DRM::Plane::TypePrimary)
					continue;

				if (plane->supportsFormat(format)) {
					crtc_ = crtc;
					plane_ = plane;
					format_ = format;
					return 0;
				}

				if (plane->supportsFormat(xFormat)) {
					crtc_ = crtc;
					plane_ = plane;
					format_ = xFormat;
					return 0;
				}
			}
		}
	}

	return -EPIPE;
}

int KMSSink::configurePipeline(const libcamera::PixelFormat &format)
{
	const int ret = selectPipeline(format);
	if (ret) {
		std::cerr
			<< "Unable to find display pipeline for format "
			<< format << std::endl;

		return ret;
	}

	std::cout
		<< "Using KMS plane " << plane_->id() << ", CRTC " << crtc_->id()
		<< ", connector " << connector_->name()
		<< " (" << connector_->id() << "), mode " << mode_->hdisplay
		<< "x" << mode_->vdisplay << "@" << mode_->vrefresh << std::endl;

	return 0;
}

int KMSSink::start()
{
	int ret = FrameSink::start();
	if (ret < 0)
		return ret;

	/* Disable all CRTCs and planes to start from a known valid state. */
	DRM::AtomicRequest request(&dev_);

	for (const DRM::Crtc &crtc : dev_.crtcs())
		request.addProperty(&crtc, "ACTIVE", 0);

	for (const DRM::Plane &plane : dev_.planes()) {
		request.addProperty(&plane, "CRTC_ID", 0);
		request.addProperty(&plane, "FB_ID", 0);
	}

	ret = request.commit(DRM::AtomicRequest::FlagAllowModeset);
	if (ret < 0) {
		std::cerr
			<< "Failed to disable CRTCs and planes: "
			<< strerror(-ret) << std::endl;
		return ret;
	}

	return 0;
}

int KMSSink::stop()
{
	/* Display pipeline. */
	DRM::AtomicRequest request(&dev_);

	request.addProperty(connector_, "CRTC_ID", 0);
	request.addProperty(crtc_, "ACTIVE", 0);
	request.addProperty(crtc_, "MODE_ID", 0);
	request.addProperty(plane_, "CRTC_ID", 0);
	request.addProperty(plane_, "FB_ID", 0);

	int ret = request.commit(DRM::AtomicRequest::FlagAllowModeset);
	if (ret < 0) {
		std::cerr
			<< "Failed to stop display pipeline: "
			<< strerror(-ret) << std::endl;
		return ret;
	}

	/* Free all buffers. */
	pending_.reset();
	queued_.reset();
	active_.reset();
	buffers_.clear();

	return FrameSink::stop();
}

bool KMSSink::testModeSet(DRM::FrameBuffer *drmBuffer,
			  const libcamera::Rectangle &src,
			  const libcamera::Rectangle &dst)
{
	DRM::AtomicRequest drmRequest{ &dev_ };

	drmRequest.addProperty(connector_, "CRTC_ID", crtc_->id());

	drmRequest.addProperty(crtc_, "ACTIVE", 1);
	drmRequest.addProperty(crtc_, "MODE_ID", mode_->toBlob(&dev_));

	drmRequest.addProperty(plane_, "CRTC_ID", crtc_->id());
	drmRequest.addProperty(plane_, "FB_ID", drmBuffer->id());
	drmRequest.addProperty(plane_, "SRC_X", src.x << 16);
	drmRequest.addProperty(plane_, "SRC_Y", src.y << 16);
	drmRequest.addProperty(plane_, "SRC_W", src.width << 16);
	drmRequest.addProperty(plane_, "SRC_H", src.height << 16);
	drmRequest.addProperty(plane_, "CRTC_X", dst.x);
	drmRequest.addProperty(plane_, "CRTC_Y", dst.y);
	drmRequest.addProperty(plane_, "CRTC_W", dst.width);
	drmRequest.addProperty(plane_, "CRTC_H", dst.height);

	return !drmRequest.commit(DRM::AtomicRequest::FlagAllowModeset |
				  DRM::AtomicRequest::FlagTestOnly);
}

bool KMSSink::setupComposition(DRM::FrameBuffer *drmBuffer)
{
	/*
	 * Test composition options, from most to least desirable, to select the
	 * best one.
	 */
	const libcamera::Rectangle framebuffer{ size_ };
	const libcamera::Rectangle display{ 0, 0, mode_->hdisplay, mode_->vdisplay };

	/* 1. Scale the frame buffer to full screen, preserving aspect ratio. */