/* SPDX-License-Identifier: LGPL-2.1-or-later */ /* * Copyright (C) 2021, Red Hat * * af.cpp - IPU3 auto focus algorithm */ #include "af.h" #include #include #include #include #include #include #include #include #include #include #include #include #include "libipa/histogram.h" /** * \file af.h */ /* * Static variables from ChromiumOS Intel Camera HAL and ia_imaging library: * - https://chromium.googlesource.com/chromiumos/platform/arc-camera/+/master/hal/intel/psl/ipu3/statsConverter/ipu3-stats.h * - https://chromium.googlesource.com/chromiumos/platform/camera/+/refs/heads/main/hal/intel/ipu3/include/ia_imaging/af_public.h */ /** The minimum horizontal grid dimension. */ static constexpr uint8_t kAfMinGridWidth = 16; /** The minimum vertical grid dimension. */ static constexpr uint8_t kAfMinGridHeight = 16; /** The maximum horizontal grid dimension. */ static constexpr uint8_t kAfMaxGridWidth = 32; /** The maximum vertical grid dimension. */ static constexpr uint8_t kAfMaxGridHeight = 24; /** The minimum value of Log2 of the width of the grid cell. */ static constexpr uint16_t kAfMinGridBlockWidth = 4; /** The minimum value of Log2 of the height of the grid cell. */ static constexpr uint16_t kAfMinGridBlockHeight = 3; /** The maximum value of Log2 of the width of the grid cell. */ static constexpr uint16_t kAfMaxGridBlockWidth = 6; /** The maximum value of Log2 of the height of the grid cell. */ static constexpr uint16_t kAfMaxGridBlockHeight = 6; /** The number of blocks in vertical axis per slice. */ static constexpr uint16_t kAfDefaultHeightPerSlice = 2; namespace libcamera { using namespace std::literals::chrono_literals; namespace ipa::ipu3::algorithms { LOG_DEFINE_CATEGORY(IPU3Af) /** * Maximum focus steps of the VCM control * \todo should be obtained from the VCM driver */ static constexpr uint32_t kMaxFocusSteps = 1023; /* Minimum focus step for searching appropriate focus */ static constexpr uint32_t kCoarseSearchStep = 30; static constexpr uint32_t kFineSearchStep = 1; /* Max ratio of variance change, 0.0 < kMaxChange < 1.0 */ static constexpr double kMaxChange = 0.5; /* The numbers of frame to be ignored, before performing focus scan. */ static constexpr uint32_t kIgnoreFrame = 10; /* Fine scan range 0 < kFineRange < 1 */ static constexpr double kFineRange = 0.05; /* Settings for IPU3 AF filter */ static struct ipu3_uapi_af_filter_config afFilterConfigDefault = { .y1_coeff_0 = { 0, 1, 3, 7 }, .y1_coeff_1 = { 11, 13, 1, 2 }, .y1_coeff_2 = { 8, 19, 34, 242 }, .y1_sign_vec = 0x7fdffbfe, .y2_coeff_0 = { 0, 1, 6, 6 }, .y2_coeff_1 = { 13, 25, 3, 0 }, .y2_coeff_2 = { 25, 3, 177, 254 }, .y2_sign_vec = 0x4e53ca72, .y_calc = { 8, 8, 8, 8 }, .nf = { 0, 9, 0, 9, 0 }, }; /** * \class Af * \brief An auto-focus algorithm based on IPU3 statistics * * This algorithm is used to determine the position of the lens to make a * focused image. The IPU3 AF processing block computes the statistics that * are composed by two types of filtered value and stores in a AF buffer. * Typically, for a clear image, it has a relatively higher contrast than a * blurred one. Therefore, if an image with the highest contrast can be * found through the scan, the position of the len indicates to a clearest * image. */ Af::Af() : focus_(0), bestFocus_(0), currentVariance_(0.0), previousVariance_(0.0), coarseCompleted_(false), fineCompleted_(false) { } /** * \brief Configure the Af given a configInfo * \param[in] context The shared IPA context * \param[in] configInfo The IPA configuration data * \return 0 on success, a negative error code otherwise */ int Af::configure(IPAContext &context, const IPAConfigInfo &configInfo) { struct ipu3_uapi_grid_config &grid = context.configuration.af.afGrid; grid.width = kAfMinGridWidth; grid.height = kAfMinGridHeight; grid.block_width_log2 = kAfMinGridBlockWidth; grid.block_height_log2 = kAfMinGridBlockHeight; /* * \todo - while this clamping code is effectively a no-op, it satisfies * the compiler that the constant definitions of the hardware limits * are used, and paves the way to support dynamic grid sizing in the * future. While the block_{width,height}_log2 remain assigned to the * minimum, this code should be optimized out by the compiler. */ grid.width = std::clamp(grid.width, kAfMinGridWidth, kAfMaxGridWidth); grid.height = std::clamp(grid.height, kAfMinGridHeight, kAfMaxGridHeight); grid.block_width_log2 = std::clamp(grid.block_width_log2, kAfMinGridBlockWidth, kAfMaxGridBlockWidth); grid.block_height_log2 = std::clamp(grid.block_height_log2, kAfMinGridBlockHeight, kAfMaxGridBlockHeight); grid.height_per_slice = kAfDefaultHeightPerSlice; /* Position the AF grid in the center of the BDS output. */ Rectangle bds(configInfo.bdsOutputSize); Size gridSize(grid.width << grid.block_width_log2, grid.height << grid.block_height_log2); /* * \todo - Support request metadata * - Set the ROI based on any input controls in the request * - Return the AF ROI as metadata in the Request */ Rectangle roi = gridSize.centeredTo(bds.center()); Point start = roi.topLeft(); /* x_start and y_start should be even */ grid.x_start = utils::alignDown(start.x, 2); grid.y_start = utils::alignDown(start.y, 2); grid.y_start |= IPU3_UAPI_GRID_Y_START_EN; /* Initial max focus step */ maxStep_ = kMaxFocusSteps; /* Initial frame ignore counter */ afIgnoreFrameReset(); /* Initial focus value */ context.activeState.af.focus = 0; /* Maximum variance of the AF statistics */ context.activeState.af.maxVariance = 0; /* The stable AF value flag. if it is true, the AF should be in a stable state. */ context.activeState.af.stable = false; return 0; } /** * \copydoc libcamera::ipa::Algorithm::prepare */ void Af::prepare(IPAContext &context, [[maybe_unused]] const uint32_t frame, [[maybe_unused]] IPAFrameContext &frameContext, ipu3_uapi_params *params) { const struct ipu3_uapi_grid_config &grid = context.configuration.af.afGrid; params->acc_param.af.grid_cfg = grid; params->acc_param.af.filter_config = afFilterConfigDefault; /* Enable AF processing block */ params->use.acc_af = 1; } /** * \brief AF coarse scan * \param[in] context The shared IPA context * * Find a near focused image using a coarse step. The step is determined by * kCoarseSearchStep. */ void Af::afCoarseScan(IPAContext &context) { if (coarseCompleted_) return; if (afNeedIgnoreFrame()) return; if (afScan(context, kCoarseSearchStep)) { coarseCompleted_ = true; context.activeState.af.maxVariance = 0; focus_ = context.activeState.af.focus - (context.activeState.af.focus * kFineRange); context.activeState.af.focus = focus_; previousVariance_ = 0; maxStep_ = std::clamp(focus_ + static_cast((focus_ * kFineRange)), 0U, kMaxFocusSteps); } } /** * \brief AF fine scan * \param[in] context The shared IPA context * * Find an optimum lens position with moving 1 step for each search. */ void Af::afFineScan(IPAContext &context) { if (!coarseCompleted_) return; if (afNeedIgnoreFrame()) return; if (afScan(context, kFineSearchStep)) { context.activeState.af.stable = true; fineCompleted_ = true; } } /** * \brief AF reset * \param[in] context The shared IPA context * * Reset all the parameters to start over the AF process. */ void Af::afReset(IPAContext &context) { if (afNeedIgnoreFrame()) return; context.activeState.af.maxVariance = 0; context.activeState.af.focus = 0; focus_ = 0; context.activeState.af.stable = false; ignoreCounter_ = kIgnoreFrame; previousVariance_ = 0.0; coarseCompleted_ = false; fineCompleted_ = false; maxStep_ = kMaxFocusSteps; } /** * \brief AF variance comparison * \param[in] context The IPA context * \param[in] min_step The VCM movement step * * We always pick the largest variance to replace the previous one. The image * with a larger variance also indicates it is a clearer image than previous * one. If we find a negative derivative, we return immediately. * * \return True, if it finds a AF value. */ bool Af::afScan(IPAContext &context, int min_step) { if (focus_ > maxStep_) { /* If reach the max step, move lens to the position. */ context.activeState.af.focus = bestFocus_; return true; } else { /* * Find the maximum of the variance by estimating its * derivative. If the direction changes, it means we have * passed a maximum one step before. */ if ((currentVariance_ - context.activeState.af.maxVariance) >= -(context.activeState.af.maxVariance * 0.1)) { /* * Positive and zero derivative: * The variance is still increasing. The focus could be * increased for the next comparison. Also, the max variance * and previous focus value are updated. */ bestFocus_ = focus_; focus_ += min_step; context.activeState.af.focus = focus_; context.activeState.af.maxVariance = currentVariance_; } else { /* * Negative derivative: * The variance starts to decrease which means the maximum * variance is found. Set focus step to previous good one * then return immediately. */ context.activeState.af.focus = bestFocus_; return true; } } previousVariance_ = currentVariance_; LOG(IPU3Af, Debug) << " Previous step is " << bestFocus_ << " Current step is " << focus_; return false; } /** * \brief Determine the frame to be ignored * \return Return True if the frame should be ignored, false otherwise */ bool Af::afNeedIgnoreFrame() { if (ignoreCounter_ == 0) return false; else ignoreCounter_--; return true; } /** * \brief Reset frame ignore counter */ void Af::afIgnoreFrameReset() { ignoreCounter_ = kIgnoreFrame; } /** * \brief Estimate variance * \param[in] y_items The AF filter data set from the IPU3 statistics buffer * \param[in] isY1 Selects between filter Y1 or Y2 to calculate the variance * * Calculate the mean of the data set provided by \a y_item, and then calculate * the variance of that data set from the mean. * * The operation can work on one of two sets of values contained within the * y_item data set supplied by the IPU3. The two data sets are the results of * both the Y1 and Y2 filters which are used to support coarse (Y1) and fine * (Y2) calculations of the contrast. * * \return The variance of the values in the data set \a y_item selected by \a isY1 */ double Af::afEstimateVariance(Span y_items, bool isY1) { uint32_t total = 0; double mean; double var_sum = 0; for (auto y : y_items) total += isY1 ? y.y1_avg : y.y2_avg; mean = total / y_items.size(); for (auto y : y_items) { double avg = isY1 ? y.y1_avg : y.y2_avg; var_sum += pow(avg - mean, 2); } return var_sum / y_items.size(); } /** * \brief Determine out-of-focus situation * \param[in] context The IPA context * * Out-of-focus means that the variance change rate for a focused and a new * variance is greater than a threshold. * * \return True if the variance threshold is crossed indicating lost focus, * false otherwise */ bool Af::afIsOutOfFocus(IPAContext &context) { const uint32_t diff_var = std::abs(currentVariance_ - context.activeState.af.maxVariance); const double var_ratio = diff_var / context.activeState.af.maxVariance; LOG(IPU3Af, Debug) << "Variance change rate: " << var_ratio << " Current VCM step: " << context.activeState.af.focus; if (var_ratio > kMaxChange) return true; else return false; } /** * \brief Determine the max contrast image and lens position * \param[in] context The IPA context * \param[in] frame The frame context sequence number * \param[in] frameContext The current frame context * \param[in] stats The statistics buffer of IPU3 * \param[out] metadata Metadata for the frame, to be filled by the algorithm * * Ideally, a clear image also has a relatively higher contrast. So, every * image for each focus step should be tested to find an optimal focus step. * * The Hill Climbing Algorithm[1] is used to find the maximum variance of the * AF statistics which is the AF output of IPU3. The focus step is increased * then the variance of the AF statistics are estimated. If it finds the * negative derivative we have just passed the peak, and we infer that the best * focus is found. * * [1] Hill Climbing Algorithm, https://en.wikipedia.org/wiki/Hill_climbing */ void Af::process(IPAContext &context, [[maybe_unused]] const uint32_t frame, [[maybe_unused]] IPAFrameContext &frameContext, const ipu3_uapi_stats_3a *stats, [[maybe_unused]] ControlList &metadata) { /* Evaluate the AF buffer length */ uint32_t afRawBufferLen = context.configuration.af.afGrid.width * context.configuration.af.afGrid.height; ASSERT(afRawBufferLen < IPU3_UAPI_AF_Y_TABLE_MAX_SIZE); Span y_items(reinterpret_cast(&stats->af_raw_buffer.y_table), afRawBufferLen); /* * Calculate the mean and the variance of AF statistics for a given grid. * For coarse: y1 are used. * For fine: y2 results are used. */ currentVariance_ = afEstimateVariance(y_items, !coarseCompleted_); if (!context.activeState.af.stable) { afCoarseScan(context); afFineScan(context); } else { if (afIsOutOfFocus(context)) afReset(context); else afIgnoreFrameReset(); } } REGISTER_IPA_ALGORITHM(Af, "Af") } /* namespace ipa::ipu3::algorithms */ } /* namespace libcamera */ ' href='#n308'>308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
 * Copyright (C) 2019, Google Inc.
 *
 * ipu3.cpp - Pipeline handler for Intel IPU3
 */

#include <algorithm>
#include <iomanip>
#include <memory>
#include <queue>
#include <vector>

#include <libcamera/camera.h>
#include <libcamera/control_ids.h>
#include <libcamera/formats.h>
#include <libcamera/ipa/ipu3.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>

#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/delayed_controls.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/log.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/utils.h"
#include "libcamera/internal/v4l2_controls.h"

#include "cio2.h"
#include "frames.h"
#include "imgu.h"

namespace libcamera {

LOG_DEFINE_CATEGORY(IPU3)

static constexpr unsigned int IPU3_BUFFER_COUNT = 4;
static constexpr unsigned int IPU3_MAX_STREAMS = 3;
static const Size IMGU_OUTPUT_MIN_SIZE = { 2, 2 };
static const Size IMGU_OUTPUT_MAX_SIZE = { 4480, 34004 };
static constexpr unsigned int IMGU_OUTPUT_WIDTH_ALIGN = 64;
static constexpr unsigned int IMGU_OUTPUT_HEIGHT_ALIGN = 4;
static constexpr unsigned int IMGU_OUTPUT_WIDTH_MARGIN = 64;
static constexpr unsigned int IMGU_OUTPUT_HEIGHT_MARGIN = 32;
static constexpr Size IPU3ViewfinderSize(1280, 720);

static const ControlInfoMap::Map IPU3Controls = {
	{ &controls::draft::PipelineDepth, ControlInfo(2, 3) },
};

class IPU3CameraData : public CameraData
{
public:
	IPU3CameraData(PipelineHandler *pipe)
		: CameraData(pipe), exposureTime_(0)
	{
	}

	int loadIPA();

	void imguOutputBufferReady(FrameBuffer *buffer);
	void cio2BufferReady(FrameBuffer *buffer);
	void paramBufferReady(FrameBuffer *buffer);
	void statBufferReady(FrameBuffer *buffer);

	CIO2Device cio2_;
	ImgUDevice *imgu_;

	Stream outStream_;
	Stream vfStream_;
	Stream rawStream_;

	uint32_t exposureTime_;
	Rectangle cropRegion_;
	std::unique_ptr<DelayedControls> delayedCtrls_;
	IPU3Frames frameInfos_;

private:
	void queueFrameAction(unsigned int id, const IPAOperationData &op);
};

class IPU3CameraConfiguration : public CameraConfiguration
{
public:
	IPU3CameraConfiguration(IPU3CameraData *data);

	Status validate() override;

	const StreamConfiguration &cio2Format() const { return cio2Configuration_; }
	const ImgUDevice::PipeConfig imguConfig() const { return pipeConfig_; }

private:
	/*
	 * The IPU3CameraData instance is guaranteed to be valid as long as the
	 * corresponding Camera instance is valid. In order to borrow a
	 * reference to the camera data, store a new reference to the camera.
	 */
	const IPU3CameraData *data_;

	StreamConfiguration cio2Configuration_;
	ImgUDevice::PipeConfig pipeConfig_;
};

class PipelineHandlerIPU3 : public PipelineHandler
{
public:
	static constexpr unsigned int V4L2_CID_IPU3_PIPE_MODE = 0x009819c1;

	enum IPU3PipeModes {
		IPU3PipeModeVideo = 0,
		IPU3PipeModeStillCapture = 1,
	};

	PipelineHandlerIPU3(CameraManager *manager);

	CameraConfiguration *generateConfiguration(Camera *camera,
		const StreamRoles &roles) override;
	int configure(Camera *camera, CameraConfiguration *config) override;

	int exportFrameBuffers(Camera *camera, Stream *stream,
			       std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;

	int start(Camera *camera, ControlList *controls) override;
	void stop(Camera *camera) override;

	int queueRequestDevice(Camera *camera, Request *request) override;

	bool match(DeviceEnumerator *enumerator) override;

private:
	IPU3CameraData *cameraData(const Camera *camera)
	{
		return static_cast<IPU3CameraData *>(
			PipelineHandler::cameraData(camera));
	}

	int initControls(IPU3CameraData *data);
	int registerCameras();

	int allocateBuffers(Camera *camera);
	int freeBuffers(Camera *camera);

	ImgUDevice imgu0_;
	ImgUDevice imgu1_;
	MediaDevice *cio2MediaDev_;
	MediaDevice *imguMediaDev_;

	std::vector<IPABuffer> ipaBuffers_;
};

IPU3CameraConfiguration::IPU3CameraConfiguration(IPU3CameraData *data)
	: CameraConfiguration()
{
	data_ = data;
}

CameraConfiguration::Status IPU3CameraConfiguration::validate()
{
	Status status = Valid;

	if (config_.empty())
		return Invalid;

	if (transform != Transform::Identity) {
		transform = Transform::Identity;
		status = Adjusted;
	}

	/* Cap the number of entries to the available streams. */
	if (config_.size() > IPU3_MAX_STREAMS) {
		config_.resize(IPU3_MAX_STREAMS);
		status = Adjusted;
	}

	/* Validate the requested stream configuration */
	unsigned int rawCount = 0;
	unsigned int yuvCount = 0;
	Size maxYuvSize;
	Size rawSize;

	for (const StreamConfiguration &cfg : config_) {
		const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);

		if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
			rawCount++;
			rawSize = cfg.size;
		} else {
			yuvCount++;
			maxYuvSize.expandTo(cfg.size);
		}
	}

	if (rawCount > 1 || yuvCount > 2) {
		LOG(IPU3, Debug) << "Camera configuration not supported";
		return Invalid;
	}

	/*
	 * Generate raw configuration from CIO2.
	 *
	 * \todo The image sensor frame size should be selected to optimize
	 * operations based on the sizes of the requested streams. However such
	 * a selection makes the pipeline configuration procedure fail for small
	 * resolutions (for example: 640x480 with OV5670) and causes the capture
	 * operations to stall for some stream size combinations (see the
	 * commit message of the patch that introduced this comment for more
	 * failure examples).
	 *
	 * Until the sensor frame size calculation criteria are clarified, when
	 * capturing from ImgU always use the largest possible size which
	 * guarantees better results at the expense of the frame rate and CSI-2
	 * bus bandwidth. When only a raw stream is requested use the requested
	 * size instead, as the ImgU is not involved.
	 */
	if (!yuvCount)
		cio2Configuration_ = data_->cio2_.generateConfiguration(rawSize);
	else
		cio2Configuration_ = data_->cio2_.generateConfiguration({});
	if (!cio2Configuration_.pixelFormat.isValid())
		return Invalid;

	LOG(IPU3, Debug) << "CIO2 configuration: " << cio2Configuration_.toString();

	ImgUDevice::Pipe pipe{};
	pipe.input = cio2Configuration_.size;

	/*
	 * Adjust the configurations if needed and assign streams while
	 * iterating them.
	 */
	bool mainOutputAvailable = true;
	for (unsigned int i = 0; i < config_.size(); ++i) {
		const PixelFormatInfo &info = PixelFormatInfo::info(config_[i].pixelFormat);
		const StreamConfiguration originalCfg = config_[i];
		StreamConfiguration *cfg = &config_[i];

		LOG(IPU3, Debug) << "Validating stream: " << config_[i].toString();

		if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
			/* Initialize the RAW stream with the CIO2 configuration. */
			cfg->size = cio2Configuration_.size;
			cfg->pixelFormat = cio2Configuration_.pixelFormat;
			cfg->bufferCount = cio2Configuration_.bufferCount;
			cfg->stride = info.stride(cfg->size.width, 0, 64);
			cfg->frameSize = info.frameSize(cfg->size, 64);
			cfg->setStream(const_cast<Stream *>(&data_->rawStream_));

			LOG(IPU3, Debug) << "Assigned " << cfg->toString()
					 << " to the raw stream";
		} else {
			/* Assign and configure the main and viewfinder outputs. */

			/*
			 * Clamp the size to match the ImgU size limits and the
			 * margins from the CIO2 output frame size.
			 *
			 * The ImgU outputs needs to be strictly smaller than
			 * the CIO2 output frame and rounded down to 64 pixels
			 * in width and 32 pixels in height. This assumption
			 * comes from inspecting the pipe configuration script
			 * results and the available suggested configurations in
			 * the ChromeOS BSP .xml camera tuning files and shall
			 * be validated.
			 *
			 * \todo Clarify what are the hardware constraints
			 * that require this alignements, if any. It might
			 * depend on the BDS scaling factor of 1/32, as the main
			 * output has no YUV scaler as the viewfinder output has.
			 */
			unsigned int limit;
			limit = utils::alignDown(cio2Configuration_.size.width - 1,
						 IMGU_OUTPUT_WIDTH_MARGIN);
			cfg->size.width = std::clamp(cfg->size.width,
						     IMGU_OUTPUT_MIN_SIZE.width,
						     limit);

			limit = utils::alignDown(cio2Configuration_.size.height - 1,
						 IMGU_OUTPUT_HEIGHT_MARGIN);
			cfg->size.height = std::clamp(cfg->size.height,
						      IMGU_OUTPUT_MIN_SIZE.height,
						      limit);

			cfg->size.alignDownTo(IMGU_OUTPUT_WIDTH_ALIGN,
					      IMGU_OUTPUT_HEIGHT_ALIGN);

			cfg->pixelFormat = formats::NV12;
			cfg->bufferCount = IPU3_BUFFER_COUNT;
			cfg->stride = info.stride(cfg->size.width, 0, 1);
			cfg->frameSize = info.frameSize(cfg->size, 1);

			/*
			 * Use the main output stream in case only one stream is
			 * requested or if the current configuration is the one
			 * with the maximum YUV output size.
			 */
			if (mainOutputAvailable &&
			    (originalCfg.size == maxYuvSize || yuvCount == 1)) {
				cfg->setStream(const_cast<Stream *>(&data_->outStream_));
				mainOutputAvailable = false;

				pipe.main = cfg->size;
				if (yuvCount == 1)
					pipe.viewfinder = pipe.main;

				LOG(IPU3, Debug) << "Assigned " << cfg->toString()
						 << " to the main output";
			} else {
				cfg->setStream(const_cast<Stream *>(&data_->vfStream_));
				pipe.viewfinder = cfg->size;

				LOG(IPU3, Debug) << "Assigned " << cfg->toString()
						 << " to the viewfinder output";
			}
		}

		if (cfg->pixelFormat != originalCfg.pixelFormat ||
		    cfg->size != originalCfg.size) {
			LOG(IPU3, Debug)
				<< "Stream " << i << " configuration adjusted to "
				<< cfg->toString();
			status = Adjusted;
		}
	}

	/* Only compute the ImgU configuration if a YUV stream has been requested. */
	if (yuvCount) {
		pipeConfig_ = data_->imgu_->calculatePipeConfig(&pipe);
		if (pipeConfig_.isNull()) {
			LOG(IPU3, Error) << "Failed to calculate pipe configuration: "
					 << "unsupported resolutions.";
			return Invalid;
		}
	}

	return status;
}

PipelineHandlerIPU3::PipelineHandlerIPU3(CameraManager *manager)
	: PipelineHandler(manager), cio2MediaDev_(nullptr), imguMediaDev_(nullptr)
{
}

CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
								const StreamRoles &roles)
{
	IPU3CameraData *data = cameraData(camera);
	IPU3CameraConfiguration *config = new IPU3CameraConfiguration(data);

	if (roles.empty())
		return config;

	Size sensorResolution = data->cio2_.sensor()->resolution();
	for (const StreamRole role : roles) {
		std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
		unsigned int bufferCount;
		PixelFormat pixelFormat;
		Size size;

		switch (role) {
		case StreamRole::StillCapture:
			/*
			 * Use as default full-frame configuration a value
			 * strictly smaller than the sensor resolution (limited
			 * to the ImgU  maximum output size) and aligned down to
			 * the required frame margin.
			 *
			 * \todo Clarify the alignment constraints as explained
			 * in validate()
			 */
			size = sensorResolution.boundedTo(IMGU_OUTPUT_MAX_SIZE);
			size.width = utils::alignDown(size.width - 1,
						      IMGU_OUTPUT_WIDTH_MARGIN);
			size.height = utils::alignDown(size.height - 1,
						       IMGU_OUTPUT_HEIGHT_MARGIN);
			pixelFormat = formats::NV12;
			bufferCount = IPU3_BUFFER_COUNT;
			streamFormats[pixelFormat] = { { IMGU_OUTPUT_MIN_SIZE, size } };

			break;

		case StreamRole::Raw: {
			StreamConfiguration cio2Config =
				data->cio2_.generateConfiguration(sensorResolution);
			pixelFormat = cio2Config.pixelFormat;
			size = cio2Config.size;
			bufferCount = cio2Config.bufferCount;

			for (const PixelFormat &format : data->cio2_.formats())
				streamFormats[format] = data->cio2_.sizes();

			break;
		}

		case StreamRole::Viewfinder:
		case StreamRole::VideoRecording: {
			/*
			 * Default viewfinder and videorecording to 1280x720,
			 * capped to the maximum sensor resolution and aligned
			 * to the ImgU output constraints.
			 */
			size = sensorResolution.boundedTo(IPU3ViewfinderSize)
					       .alignedDownTo(IMGU_OUTPUT_WIDTH_ALIGN,
							      IMGU_OUTPUT_HEIGHT_ALIGN);
			pixelFormat = formats::NV12;
			bufferCount = IPU3_BUFFER_COUNT;
			streamFormats[pixelFormat] = { { IMGU_OUTPUT_MIN_SIZE, size } };

			break;
		}

		default:
			LOG(IPU3, Error)
				<< "Requested stream role not supported: " << role;
			delete config;
			return nullptr;
		}

		StreamFormats formats(streamFormats);
		StreamConfiguration cfg(formats);
		cfg.size = size;
		cfg.pixelFormat = pixelFormat;
		cfg.bufferCount = bufferCount;
		config->addConfiguration(cfg);
	}

	if (config->validate() == CameraConfiguration::Invalid)
		return {};

	return config;
}

int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
{
	IPU3CameraConfiguration *config =
		static_cast<IPU3CameraConfiguration *>(c);
	IPU3CameraData *data = cameraData(camera);
	Stream *outStream = &data->outStream_;
	Stream *vfStream = &data->vfStream_;
	CIO2Device *cio2 = &data->cio2_;
	ImgUDevice *imgu = data->imgu_;
	V4L2DeviceFormat outputFormat;
	int ret;

	/*
	 * FIXME: enabled links in one ImgU pipe interfere with capture
	 * operations on the other one. This can be easily triggered by
	 * capturing from one camera and then trying to capture from the other
	 * one right after, without disabling media links on the first used
	 * pipe.
	 *
	 * The tricky part here is where to disable links on the ImgU instance
	 * which is currently not in use:
	 * 1) Link enable/disable cannot be done at start()/stop() time as video
	 * devices needs to be linked first before format can be configured on
	 * them.
	 * 2) As link enable has to be done at the least in configure(),
	 * before configuring formats, the only place where to disable links
	 * would be 'stop()', but the Camera class state machine allows
	 * start()<->stop() sequences without any configure() in between.
	 *
	 * As of now, disable all links in the ImgU media graph before
	 * configuring the device, to allow alternate the usage of the two
	 * ImgU pipes.
	 *
	 * As a consequence, a Camera using an ImgU shall be configured before
	 * any start()/stop() sequence. An application that wants to
	 * pre-configure all the camera and then start/stop them alternatively
	 * without going through any re-configuration (a sequence that is
	 * allowed by the Camera state machine) would now fail on the IPU3.
	 */
	ret = imguMediaDev_->disableLinks();
	if (ret)
		return ret;

	/*
	 * \todo: Enable links selectively based on the requested streams.
	 * As of now, enable all links unconditionally.
	 * \todo Don't configure the ImgU at all if we only have a single
	 * stream which is for raw capture, in which case no buffers will
	 * ever be queued to the ImgU.
	 */
	ret = data->imgu_->enableLinks(true);
	if (ret)
		return ret;

	/*
	 * Pass the requested stream size to the CIO2 unit and get back the
	 * adjusted format to be propagated to the ImgU output devices.
	 */
	const Size &sensorSize = config->cio2Format().size;
	V4L2DeviceFormat cio2Format;
	ret = cio2->configure(sensorSize, &cio2Format);
	if (ret)
		return ret;

	CameraSensorInfo sensorInfo;
	cio2->sensor()->sensorInfo(&sensorInfo);
	data->cropRegion_ = sensorInfo.analogCrop;

	/*
	 * If the ImgU gets configured, its driver seems to expect that
	 * buffers will be queued to its outputs, as otherwise the next
	 * capture session that uses the ImgU fails when queueing
	 * buffers to its input.
	 *
	 * If no ImgU configuration has been computed, it means only a RAW
	 * stream has been requested: return here to skip the ImgU configuration
	 * part.
	 */
	ImgUDevice::PipeConfig imguConfig = config->imguConfig();
	if (imguConfig.isNull())
		return 0;

	ret = imgu->configure(imguConfig, &cio2Format);
	if (ret)
		return ret;

	/* Apply the format to the configured streams output devices. */
	StreamConfiguration *mainCfg = nullptr;
	StreamConfiguration *vfCfg = nullptr;

	for (unsigned int i = 0; i < config->size(); ++i) {
		StreamConfiguration &cfg = (*config)[i];
		Stream *stream = cfg.stream();

		if (stream == outStream) {
			mainCfg = &cfg;
			ret = imgu->configureOutput(cfg, &outputFormat);
			if (ret)
				return ret;
		} else if (stream == vfStream) {
			vfCfg = &cfg;
			ret = imgu->configureViewfinder(cfg, &outputFormat);
			if (ret)
				return ret;
		}
	}

	/*
	 * As we need to set format also on the non-active streams, use
	 * the configuration of the active one for that purpose (there should
	 * be at least one active stream in the configuration request).
	 */
	if (!vfCfg) {
		ret = imgu->configureViewfinder(*mainCfg, &outputFormat);
		if (ret)
			return ret;
	}

	/* Apply the "pipe_mode" control to the ImgU subdevice. */
	ControlList ctrls(imgu->imgu_->controls());
	ctrls.set(V4L2_CID_IPU3_PIPE_MODE,
		  static_cast<int32_t>(vfCfg ? IPU3PipeModeVideo :
				       IPU3PipeModeStillCapture));
	ret = imgu->imgu_->setControls(&ctrls);
	if (ret) {
		LOG(IPU3, Error) << "Unable to set pipe_mode control";
		return ret;
	}

	return 0;
}

int PipelineHandlerIPU3::exportFrameBuffers(Camera *camera, Stream *stream,
					    std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
	IPU3CameraData *data = cameraData(camera);
	unsigned int count = stream->configuration().bufferCount;

	if (stream == &data->outStream_)
		return data->imgu_->output_->exportBuffers(count, buffers);
	else if (stream == &data->vfStream_)
		return data->imgu_->viewfinder_->exportBuffers(count, buffers);
	else if (stream == &data->rawStream_)
		return data->cio2_.exportBuffers(count, buffers);

	return -EINVAL;
}

/**
 * \todo Clarify if 'viewfinder' and 'stat' nodes have to be set up and
 * started even if not in use. As of now, if not properly configured and
 * enabled, the ImgU processing pipeline stalls.
 *
 * In order to be able to start the 'viewfinder' and 'stat' nodes, we need
 * memory to be reserved.
 */
int PipelineHandlerIPU3::allocateBuffers(Camera *camera)
{
	IPU3CameraData *data = cameraData(camera);
	ImgUDevice *imgu = data->imgu_;
	unsigned int bufferCount;
	int ret;

	bufferCount = std::max({
		data->outStream_.configuration().bufferCount,
		data->vfStream_.configuration().bufferCount,
		data->rawStream_.configuration().bufferCount,
	});

	ret = imgu->allocateBuffers(bufferCount);
	if (ret < 0)
		return ret;

	/* Map buffers to the IPA. */
	unsigned int ipaBufferId = 1;

	for (const std::unique_ptr<FrameBuffer> &buffer : imgu->paramBuffers_) {
		buffer->setCookie(ipaBufferId++);
		ipaBuffers_.push_back({
			.id = buffer->cookie(),
			.planes = buffer->planes()
		});
	}

	for (const std::unique_ptr<FrameBuffer> &buffer : imgu->statBuffers_) {
		buffer->setCookie(ipaBufferId++);
		ipaBuffers_.push_back({
			.id = buffer->cookie(),
			.planes = buffer->planes()
		});
	}

	data->ipa_->mapBuffers(ipaBuffers_);

	data->frameInfos_.init(imgu->paramBuffers_, imgu->statBuffers_);

	return 0;
}

int PipelineHandlerIPU3::freeBuffers(Camera *camera)
{
	IPU3CameraData *data = cameraData(camera);

	data->frameInfos_.clear();

	std::vector<unsigned int> ids;
	for (IPABuffer &ipabuf : ipaBuffers_)
		ids.push_back(ipabuf.id);

	data->ipa_->unmapBuffers(ids);
	ipaBuffers_.clear();

	data->imgu_->freeBuffers();

	return 0;
}

int PipelineHandlerIPU3::start(Camera *camera, [[maybe_unused]] ControlList *controls)
{
	IPU3CameraData *data = cameraData(camera);
	CIO2Device *cio2 = &data->cio2_;
	ImgUDevice *imgu = data->imgu_;

	CameraSensorInfo sensorInfo = {};
	std::map<unsigned int, IPAStream> streamConfig;
	std::map<unsigned int, const ControlInfoMap &> entityControls;
	IPAOperationData ipaConfig;
	IPAOperationData result = {};

	int ret;

	/* Allocate buffers for internal pipeline usage. */
	ret = allocateBuffers(camera);
	if (ret)
		return ret;

	IPAOperationData ipaData = {};
	ret = data->ipa_->start(ipaData, nullptr);
	if (ret)
		goto error;

	/*
	 * Start the ImgU video devices, buffers will be queued to the
	 * ImgU output and viewfinder when requests will be queued.
	 */