/* SPDX-License-Identifier: LGPL-2.1-or-later */ /* * Copyright (C) 2020, Google Inc. * * ipa_data_serializer.cpp - Image Processing Algorithm data serializer */ #include "libcamera/internal/ipa_data_serializer.h" #include #include /** * \file ipa_data_serializer.h * \brief IPA Data Serializer */ namespace libcamera { LOG_DEFINE_CATEGORY(IPADataSerializer) /** * \class IPADataSerializer * \brief IPA Data Serializer * * Static template class that provides functions for serializing and * deserializing IPA data. * * \todo Switch to Span instead of byte and fd vector * * \todo Harden the vector and map deserializer * * \todo For SharedFDs, instead of storing a validity flag, store an * index into the fd array. This will allow us to use views instead of copying. */ namespace { /** * \fn template void appendPOD(std::vector &vec, T val) * \brief Append POD to end of byte vector, in little-endian order * \tparam T Type of POD to append * \param[in] vec Byte vector to append to * \param[in] val Value to append * * This function is meant to be used by the IPA data serializer, and the * generated IPA proxies. */ /** * \fn template T readPOD(std::vector::iterator it, size_t pos, * std::vector::iterator end) * \brief Read POD from byte vector, in little-endian order * \tparam T Type of POD to read * \param[in] it Iterator of byte vector to read from * \param[in] pos Index in byte vector to read from * \param[in] end Iterator marking end of byte vector * * This function is meant to be used by the IPA data serializer, and the * generated IPA proxies. * * If the \a pos plus the byte-width of the desired POD is past \a end, it is * a fata error will occur, as it means there is insufficient data for * deserialization, which should never happen. * * \return The POD read from \a it at index \a pos */ /** * \fn template T readPOD(std::vector &vec, size_t pos) * \brief Read POD from byte vector, in little-endian order * \tparam T Type of POD to read * \param[in] vec Byte vector to read from * \param[in] pos Index in vec to start reading from * * This function is meant to be used by the IPA data serializer, and the * generated IPA proxies. * * If the \a pos plus the byte-width of the desired POD is past the end of * \a vec, a fatal error will occur, as it means there is insufficient data * for deserialization, which should never happen. * * \return The POD read from \a vec at index \a pos */ } /* namespace */ /** * \fn template IPADataSerializer::serialize( * T data, * ControlSerializer *cs = nullptr) * \brief Serialize an object into byte vector and fd vector * \tparam T Type of object to serialize * \param[in] data Object to serialize * \param[in] cs ControlSerializer * * \a cs is only necessary if the object type \a T or its members contain * ControlList or ControlInfoMap. * * \return Tuple of byte vector and fd vector, that is the serialized form * of \a data */ /** * \fn template IPADataSerializer::deserialize( * const std::vector &data, * ControlSerializer *cs = nullptr) * \brief Deserialize byte vector into an object * \tparam T Type of object to deserialize to * \param[in] data Byte vector to deserialize from * \param[in] cs ControlSerializer * * This version of deserialize() can be used if the object type \a T and its * members don't have any SharedFD. * * \a cs is only necessary if the object type \a T or its members contain * ControlList or ControlInfoMap. * * \return The deserialized object */ /** * \fn template IPADataSerializer::deserialize( * std::vector::const_iterator dataBegin, * std::vector::const_iterator dataEnd, * ControlSerializer *cs = nullptr) * \brief Deserialize byte vector into an object * \tparam T Type of object to deserialize to * \param[in] dataBegin Begin iterator of byte vector to deserialize from * \param[in] dataEnd End iterator of byte vector to deserialize from * \param[in] cs ControlSerializer * * This version of deserialize() can be used if the object type \a T and its * members don't have any SharedFD. * * \a cs is only necessary if the object type \a T or its members contain * ControlList or ControlInfoMap. * * \return The deserialized object */ /** * \fn template IPADataSerializer::deserialize( * const std::vector &data, * const std::vector &fds, * ControlSerializer *cs = nullptr) * \brief Deserialize byte vector and fd vector into an object * \tparam T Type of object to deserialize to * \param[in] data Byte vector to deserialize from * \param[in] fds Fd vector to deserialize from * \param[in] cs ControlSerializer * * This version of deserialize() (or the iterator version) must be used if * the object type \a T or its members contain SharedFD. * * \a cs is only necessary if the object type \a T or its members contain * ControlList or ControlInfoMap. * * \return The deserialized object */ /** * \fn template IPADataSerializer::deserialize( * std::vector::const_iterator dataBegin, * std::vector::const_iterator dataEnd, * std::vector::const_iterator fdsBegin, * std::vector::const_iterator fdsEnd, * ControlSerializer *cs = nullptr) * \brief Deserialize byte vector and fd vector into an object * \tparam T Type of object to deserialize to * \param[in] dataBegin Begin iterator of byte vector to deserialize from * \param[in] dataEnd End iterator of byte vector to deserialize from * \param[in] fdsBegin Begin iterator of fd vector to deserialize from * \param[in] fdsEnd End iterator of fd vector to deserialize from * \param[in] cs ControlSerializer * * This version of deserialize() (or the vector version) must be used if * the object type \a T or its members contain SharedFD. * * \a cs is only necessary if the object type \a T or its members contain * ControlList or ControlInfoMap. * * \return The deserialized object */ #ifndef __DOXYGEN__ #define DEFINE_POD_SERIALIZER(type) \ \ template<> \ std::tuple, std::vector> \ IPADataSerializer::serialize(const type &data, \ [[maybe_unused]] ControlSerializer *cs) \ { \ std::vector dataVec; \ dataVec.reserve(sizeof(type)); \ appendPOD(dataVec, data); \ \ return { dataVec, {} }; \ } \ \ template<> \ type IPADataSerializer::deserialize(std::vector::const_iterator dataBegin, \ std::vector::const_iterator dataEnd, \ [[maybe_unused]] ControlSerializer *cs) \ { \ return readPOD(dataBegin, 0, dataEnd); \ } \ \ template<> \ type IPADataSerializer::deserialize(const std::vector &data, \ ControlSerializer *cs) \ { \ return deserialize(data.cbegin(), data.end(), cs); \ } \ \ template<> \ type IPADataSerializer::deserialize(const std::vector &data, \ [[maybe_unused]] const std::vector &fds, \ ControlSerializer *cs) \ { \ return deserialize(data.cbegin(), data.end(), cs); \ } \ \ template<> \ type IPADataSerializer::deserialize(std::vector::const_iterator dataBegin, \ std::vector::const_iterator dataEnd, \ [[maybe_unused]] std::vector::const_iterator fdsBegin, \ [[maybe_unused]] std::vector::const_iterator fdsEnd, \ ControlSerializer *cs) \ { \ return deserialize(dataBegin, dataEnd, cs); \ } DEFINE_POD_SERIALIZER(bool) DEFINE_POD_SERIALIZER(uint8_t) DEFINE_POD_SERIALIZER(uint16_t) DEFINE_POD_SERIALIZER(uint32_t) DEFINE_POD_SERIALIZER(uint64_t) DEFINE_POD_SERIALIZER(int8_t) DEFINE_POD_SERIALIZER(int16_t) DEFINE_POD_SERIALIZER(int32_t) DEFINE_POD_SERIALIZER(int64_t) DEFINE_POD_SERIALIZER(float) DEFINE_POD_SERIALIZER(double) /* * Strings are serialized simply by converting by {string.cbegin(), string.end()}. * The size of the string is recorded by the container (struct, vector, map, or * function parameter serdes). */ template<> std::tuple, std::vector> IPADataSerializer::serialize(const std::string &data, [[maybe_unused]] ControlSerializer *cs) { return { { data.cbegin(), data.end() }, {} }; } template<> std::string IPADataSerializer::deserialize(const std::vector &data, [[maybe_unused]] ControlSerializer *cs) { return { data.cbegin(), data.cend() }; } template<> std::string IPADataSerializer::deserialize(std::vector::const_iterator dataBegin, std::vector::const_iterator dataEnd, [[maybe_unused]] ControlSerializer *cs) { return { dataBegin, dataEnd }; } template<> std::string IPADataSerializer::deserialize(const std::vector &data, [[maybe_unused]] const std::vector &fds, [[maybe_unused]] ControlSerializer *cs) { return { data.cbegin(), data.cend() }; } template<> std::string IPADataSerializer::deserialize(std::vector::const_iterator dataBegin, std::vector::const_iterator dataEnd, [[maybe_unused]] std::vector::const_iterator fdsBegin, [[maybe_unused]] std::vector::const_iterator fdsEnd, [[maybe_unused]] ControlSerializer *cs) { return { dataBegin, dataEnd }; } /* * ControlList is serialized as: * * 4 bytes - uint32_t Size of serialized ControlInfoMap, in bytes * 4 bytes - uint32_t Size of serialized ControlList, in bytes * X bytes - Serialized ControlInfoMap (using ControlSerializer) * X bytes - Serialized ControlList (using ControlSerializer) * * If data.infoMap() is nullptr, then the default controls::controls will * be used. The serialized ControlInfoMap will have zero length. */ template<> std::tuple, std::vector> IPADataSerializer::serialize(const ControlList &data, ControlSerializer *cs) { if (!cs) LOG(IPADataSerializer, Fatal) << "ControlSerializer not provided for serialization of ControlList"; size_t size; std::vector infoData; int ret; /* * \todo Revisit this opportunistic serialization of the * ControlInfoMap, as it could be fragile */ if (data.infoMap() && !cs->isCached(*data.infoMap())) { size = cs->binarySize(*data.infoMap()); infoData.resize(size); ByteStreamBuffer buffer(infoData.data(), infoData.size()); ret = cs->serialize(*data.infoMap(), buffer); if (ret < 0 || buffer.overflow()) { LOG(IPADataSerializer, Error) << "Failed to serialize ControlList's ControlInfoMap"; return { {}, {} }; } } size = cs->binarySize(data); std::vector listData(size); ByteStreamBuffer buffer(listData.data(), listData.size()); ret = cs->serialize(data, buffer); if (ret < 0 || buffer.overflow()) { LOG(IPADataSerializer, Error) << "Failed to serialize ControlList"; return { {}, {} }; } std::vector dataVec; dataVec.reserve(8 + infoData.size() + listData.size()); appendPOD(dataVec, infoData.size()); appendPOD(dataVec, listData.size()); dataVec.insert(dataVec.end(), infoData.begin(), infoData.end()); dataVec.insert(dataVec.end(), listData.begin(), listData.end()); return { dataVec, {} }; } template<> ControlList IPADataSerializer::deserialize(std::vector::const_iterator dataBegin, std::vector::const_iterator dataEnd, ControlSerializer *cs) { if (!cs) LOG(IPADataSerializer, Fatal) << "ControlSerializer not provided for deserialization of ControlList"; if (std::distance(dataBegin, dataEnd) < 8) return {}; uint32_t infoDataSize = readPOD(dataBegin, 0, dataEnd); uint32_t listDataSize = readPOD(dataBegin, 4, dataEnd); std::vector::const_iterator it = dataBegin + 8; if (infoDataSize + listDataSize < infoDataSize || static_cast(std::distance(it, dataEnd)) < infoDataSize + listDataSize) return {}; if (infoDataSize > 0) { ByteStreamBuffer buffer(&*it, infoDataSize); ControlInfoMap map = cs->deserialize(buffer); /* It's fine if map is empty. */ if (buffer.overflow()) { LOG(IPADataSerializer, Error) << "Failed to deserialize ControlLists's ControlInfoMap: buffer overflow"; return ControlList(); } } it += infoDataSize; ByteStreamBuffer buffer(&*it, listDataSize); ControlList list = cs->deserialize(buffer); if (buffer.overflow()) LOG(IPADataSerializer, Error) << "Failed to deserialize ControlList: buffer overflow"; return list; } template<> ControlList IPADataSerializer::deserialize(const std::vector &data, ControlSerializer *cs) { return deserialize(data.cbegin(), data.end(), cs); } template<> ControlList IPADataSerializer::deserialize(const std::vector &data, [[maybe_unused]] const std::vector &fds, ControlSerializer *cs) { return deserialize(data.cbegin(), data.end(), cs); } template<> ControlList IPADataSerializer::deserialize(std::vector::const_iterator dataBegin, std::vector::const_iterator dataEnd, [[maybe_unused]] std::vector::const_iterator fdsBegin, [[maybe_unused]] std::vector::const_iterator fdsEnd, ControlSerializer *cs) { return deserialize(dataBegin, dataEnd, cs); } /* * const ControlInfoMap is serialized as: * * 4 bytes - uint32_t Size of serialized ControlInfoMap, in bytes * X bytes - Serialized ControlInfoMap (using ControlSerializer) */ template<> std::tuple, std::vector> IPADataSerializer::serialize(const ControlInfoMap &map, ControlSerializer *cs) { if (!cs) LOG(IPADataSerializer, Fatal) << "ControlSerializer not provided for serialization of ControlInfoMap"; size_t size = cs->binarySize(map); std::vector infoData(size); ByteStreamBuffer buffer(infoData.data(), infoData.size()); int ret = cs->serialize(map, buffer); if (ret < 0 || buffer.overflow()) { LOG(IPADataSerializer, Error) << "Failed to serialize ControlInfoMap"; return { {}, {} }; } std::vector dataVec; appendPOD(dataVec, infoData.size()); dataVec.insert(dataVec.end(), infoData.begin(), infoData.end()); return { dataVec, {} }; } template<> ControlInfoMap IPADataSerializer::deserialize(std::vector::const_iterator dataBegin, std::vector::const_iterator dataEnd, ControlSerializer *cs) { if (!cs) LOG(IPADataSerializer, Fatal) << "ControlSerializer not provided for deserialization of ControlInfoMap"; if (std::distance(dataBegin, dataEnd) < 4) return {}; uint32_t infoDataSize = readPOD(dataBegin, 0, dataEnd); std::vector::const_iterator it = dataBegin + 4; if (static_cast(std::distance(it, dataEnd)) < infoDataSize) return {}; ByteStreamBuffer buffer(&*it, infoDataSize); ControlInfoMap map = cs->deserialize(buffer); return map; } template<> ControlInfoMap IPADataSerializer::deserialize(const std::vector &data, ControlSerializer *cs) { return deserialize(data.cbegin(), data.end(), cs); } template<> ControlInfoMap IPADataSerializer::deserialize(const std::vector &data, [[maybe_unused]] const std::vector &fds, ControlSerializer *cs) { return deserialize(data.cbegin(), data.end(), cs); } template<> ControlInfoMap IPADataSerializer::deserialize(std::vector::const_iterator dataBegin, std::vector::const_iterator dataEnd, [[maybe_unused]] std::vector::const_iterator fdsBegin, [[maybe_unused]] std::vector::const_iterator fdsEnd, ControlSerializer *cs) { return deserialize(dataBegin, dataEnd, cs); } /* * SharedFD instances are serialized into four bytes that tells if the SharedFD * is valid or not. If it is valid, then for serialization the fd will be * written to the fd vector, or for deserialization the fd vector const_iterator * will be valid. * * This validity is necessary so that we don't send -1 fd over sendmsg(). It * also allows us to simply send the entire fd vector into the deserializer * and it will be recursively consumed as necessary. */ template<> std::tuple, std::vector> IPADataSerializer::serialize(const SharedFD &data, [[maybe_unused]] ControlSerializer *cs) { std::vector dataVec; std::vector fdVec; /* * Store as uint32_t to prepare for conversion from validity flag * to index, and for alignment. */ appendPOD(dataVec, data.isValid()); if (data.isValid()) fdVec.push_back(data); return { dataVec, fdVec }; } template<> SharedFD IPADataSerializer::deserialize([[maybe_unused]] std::vector::const_iterator dataBegin, [[maybe_unused]] std::vector::const_iterator dataEnd, std::vector::const_iterator fdsBegin, std::vector::const_iterator fdsEnd, [[maybe_unused]] ControlSerializer *cs) { ASSERT(std::distance(dataBegin, dataEnd) >= 4); uint32_t valid = readPOD(dataBegin, 0, dataEnd); ASSERT(!(valid && std::distance(fdsBegin, fdsEnd) < 1)); return valid ? *fdsBegin : SharedFD(); } template<> SharedFD IPADataSerializer::deserialize(const std::vector &data, const std::vector &fds, [[maybe_unused]] ControlSerializer *cs) { return deserialize(data.cbegin(), data.end(), fds.cbegin(), fds.end()); } /* * FrameBuffer::Plane is serialized as: * * 4 byte - SharedFD * 4 bytes - uint32_t Offset * 4 bytes - uint32_t Length */ template<> std::tuple, std::vector> IPADataSerializer::serialize(const FrameBuffer::Plane &data, [[maybe_unused]] ControlSerializer *cs) { std::vector dataVec; std::vector fdsVec; std::vector fdBuf; std::vector fdFds; std::tie(fdBuf, fdFds) = IPADataSerializer::serialize(data.fd); dataVec.insert(dataVec.end(), fdBuf.begin(), fdBuf.end()); fdsVec.insert(fdsVec.end(), fdFds.begin(), fdFds.end()); appendPOD(dataVec, data.offset); appendPOD(dataVec, data.length); return { dataVec, fdsVec }; } template<> FrameBuffer::Plane IPADataSerializer::deserialize(std::vector::const_iterator dataBegin, std::vector::const_iterator dataEnd, std::vector::const_iterator fdsBegin, [[maybe_unused]] std::vector::const_iterator fdsEnd, [[maybe_unused]] ControlSerializer *cs) { FrameBuffer::Plane ret; ret.fd = IPADataSerializer::deserialize(dataBegin, dataBegin + 4, fdsBegin, fdsBegin + 1); ret.offset = readPOD(dataBegin, 4, dataEnd); ret.length = readPOD(dataBegin, 8, dataEnd); return ret; } template<> FrameBuffer::Plane IPADataSerializer::deserialize(const std::vector &data, const std::vector &fds, ControlSerializer *cs) { return deserialize(data.cbegin(), data.end(), fds.cbegin(), fds.end(), cs); } #endif /* __DOXYGEN__ */ } /* namespace libcamera */ a> 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
/* SPDX-License-Identifier: BSD-2-Clause */
/*
 * Copyright (C) 2019, Raspberry Pi (Trading) Limited
 *
 * agc.cpp - AGC/AEC control algorithm
 */

#include <map>

#include "linux/bcm2835-isp.h"

#include "libcamera/internal/log.h"

#include "../awb_status.h"
#include "../device_status.h"
#include "../histogram.hpp"
#include "../lux_status.h"
#include "../metadata.hpp"

#include "agc.hpp"

using namespace RPiController;
using namespace libcamera;

LOG_DEFINE_CATEGORY(RPiAgc)

#define NAME "rpi.agc"

#define PIPELINE_BITS 13 // seems to be a 13-bit pipeline

void AgcMeteringMode::Read(boost::property_tree::ptree const &params)
{
	int num = 0;
	for (auto &p : params.get_child("weights")) {
		if (num == AGC_STATS_SIZE)
			throw std::runtime_error("AgcConfig: too many weights");
		weights[num++] = p.second.get_value<double>();
	}
	if (num != AGC_STATS_SIZE)
		throw std::runtime_error("AgcConfig: insufficient weights");
}

static std::string
read_metering_modes(std::map<std::string, AgcMeteringMode> &metering_modes,
		    boost::property_tree::ptree const &params)
{
	std::string first;
	for (auto &p : params) {
		AgcMeteringMode metering_mode;
		metering_mode.Read(p.second);
		metering_modes[p.first] = std::move(metering_mode);
		if (first.empty())
			first = p.first;
	}
	return first;
}

static int read_double_list(std::vector<double> &list,
			    boost::property_tree::ptree const &params)
{
	for (auto &p : params)
		list.push_back(p.second.get_value<double>());
	return list.size();
}

void AgcExposureMode::Read(boost::property_tree::ptree const &params)
{
	int num_shutters =
		read_double_list(shutter, params.get_child("shutter"));
	int num_ags = read_double_list(gain, params.get_child("gain"));
	if (num_shutters < 2 || num_ags < 2)
		throw std::runtime_error(
			"AgcConfig: must have at least two entries in exposure profile");
	if (num_shutters != num_ags)
		throw std::runtime_error(
			"AgcConfig: expect same number of exposure and gain entries in exposure profile");
}

static std::string
read_exposure_modes(std::map<std::string, AgcExposureMode> &exposure_modes,
		    boost::property_tree::ptree const &params)
{
	std::string first;
	for (auto &p : params) {
		AgcExposureMode exposure_mode;
		exposure_mode.Read(p.second);
		exposure_modes[p.first] = std::move(exposure_mode);
		if (first.empty())
			first = p.first;
	}
	return first;
}

void AgcConstraint::Read(boost::property_tree::ptree const &params)
{
	std::string bound_string = params.get<std::string>("bound", "");
	transform(bound_string.begin(), bound_string.end(),
		  bound_string.begin(), ::toupper);
	if (bound_string != "UPPER" && bound_string != "LOWER")
		throw std::runtime_error(
			"AGC constraint type should be UPPER or LOWER");
	bound = bound_string == "UPPER" ? Bound::UPPER : Bound::LOWER;
	q_lo = params.get<double>("q_lo");
	q_hi = params.get<double>("q_hi");
	Y_target.Read(params.get_child("y_target"));
}

static AgcConstraintMode
read_constraint_mode(boost::property_tree::ptree const &params)
{
	AgcConstraintMode mode;
	for (auto &p : params) {
		AgcConstraint constraint;
		constraint.Read(p.second);
		mode.push_back(std::move(constraint));
	}
	return mode;
}

static std::string read_constraint_modes(
	std::map<std::string, AgcConstraintMode> &constraint_modes,
	boost::property_tree::ptree const &params)
{
	std::string first;
	for (auto &p : params) {
		constraint_modes[p.first] = read_constraint_mode(p.second);
		if (first.empty())
			first = p.first;
	}
	return first;
}

void AgcConfig::Read(boost::property_tree::ptree const &params)
{
	LOG(RPiAgc, Debug) << "AgcConfig";
	default_metering_mode = read_metering_modes(
		metering_modes, params.get_child("metering_modes"));
	default_exposure_mode = read_exposure_modes(
		exposure_modes, params.get_child("exposure_modes"));
	default_constraint_mode = read_constraint_modes(
		constraint_modes, params.get_child("constraint_modes"));
	Y_target.Read(params.get_child("y_target"));
	speed = params.get<double>("speed", 0.2);
	startup_frames = params.get<uint16_t>("startup_frames", 10);
	convergence_frames = params.get<unsigned int>("convergence_frames", 6);
	fast_reduce_threshold =
		params.get<double>("fast_reduce_threshold", 0.4);
	base_ev = params.get<double>("base_ev", 1.0);
	// Start with quite a low value as ramping up is easier than ramping down.
	default_exposure_time = params.get<double>("default_exposure_time", 1000);
	default_analogue_gain = params.get<double>("default_analogue_gain", 1.0);
}

Agc::Agc(Controller *controller)
	: AgcAlgorithm(controller), metering_mode_(nullptr),
	  exposure_mode_(nullptr), constraint_mode_(nullptr),
	  frame_count_(0), lock_count_(0),
	  last_target_exposure_(0.0),
	  ev_(1.0), flicker_period_(0.0),
	  fixed_shutter_(0), fixed_analogue_gain_(0.0)
{
	memset(&awb_, 0, sizeof(awb_));
	// Setting status_.total_exposure_value_ to zero initially tells us
	// it's not been calculated yet (i.e. Process hasn't yet run).
	memset(&status_, 0, sizeof(status_));
	status_.ev = ev_;
	memset(&last_device_status_, 0, sizeof(last_device_status_));
}

char const *Agc::Name() const
{
	return NAME;
}

void Agc::Read(boost::property_tree::ptree const &params)
{
	LOG(RPiAgc, Debug) << "Agc";
	config_.Read(params);
	// Set the config's defaults (which are the first ones it read) as our
	// current modes, until someone changes them.  (they're all known to
	// exist at this point)
	metering_mode_name_ = config_.default_metering_mode;
	metering_mode_ = &config_.metering_modes[metering_mode_name_];
	exposure_mode_name_ = config_.default_exposure_mode;
	exposure_mode_ = &config_.exposure_modes[exposure_mode_name_];
	constraint_mode_name_ = config_.default_constraint_mode;
	constraint_mode_ = &config_.constraint_modes[constraint_mode_name_];
	// Set up the "last shutter/gain" values, in case AGC starts "disabled".
	status_.shutter_time = config_.default_exposure_time;
	status_.analogue_gain = config_.default_analogue_gain;
}

bool Agc::IsPaused() const
{
	return false;
}

void Agc::Pause()
{
	fixed_shutter_ = status_.shutter_time;
	fixed_analogue_gain_ = status_.analogue_gain;
}

void Agc::Resume()
{
	fixed_shutter_ = 0;
	fixed_analogue_gain_ = 0;
}

unsigned int Agc::GetConvergenceFrames() const
{
	// If shutter and gain have been explicitly set, there is no
	// convergence to happen, so no need to drop any frames - return zero.
	if (fixed_shutter_ && fixed_analogue_gain_)
		return 0;
	else
		return config_.convergence_frames;
}

void Agc::SetEv(double ev)
{
	ev_ = ev;
}

void Agc::SetFlickerPeriod(double flicker_period)
{
	flicker_period_ = flicker_period;
}

void Agc::SetFixedShutter(double fixed_shutter)
{
	fixed_shutter_ = fixed_shutter;
	// Set this in case someone calls Pause() straight after.
	status_.shutter_time = fixed_shutter;
}

void Agc::SetFixedAnalogueGain(double fixed_analogue_gain)
{
	fixed_analogue_gain_ = fixed_analogue_gain;
	// Set this in case someone calls Pause() straight after.
	status_.analogue_gain = fixed_analogue_gain;
}

void Agc::SetMeteringMode(std::string const &metering_mode_name)
{
	metering_mode_name_ = metering_mode_name;
}

void Agc::SetExposureMode(std::string const &exposure_mode_name)
{
	exposure_mode_name_ = exposure_mode_name;
}

void Agc::SetConstraintMode(std::string const &constraint_mode_name)
{
	constraint_mode_name_ = constraint_mode_name;
}

void Agc::SwitchMode([[maybe_unused]] CameraMode const &camera_mode,
		     Metadata *metadata)
{
	housekeepConfig();

	if (fixed_shutter_ != 0.0 && fixed_analogue_gain_ != 0.0) {
		// We're going to reset the algorithm here with these fixed values.

		fetchAwbStatus(metadata);
		double min_colour_gain = std::min({ awb_.gain_r, awb_.gain_g, awb_.gain_b, 1.0 });
		ASSERT(min_colour_gain != 0.0);

		// This is the equivalent of computeTargetExposure and applyDigitalGain.
		target_.total_exposure_no_dg = fixed_shutter_ * fixed_analogue_gain_;
		target_.total_exposure = target_.total_exposure_no_dg / min_colour_gain;

		// Equivalent of filterExposure. This resets any "history".
		filtered_ = target_;

		// Equivalent of divideUpExposure.
		filtered_.shutter = fixed_shutter_;
		filtered_.analogue_gain = fixed_analogue_gain_;
	} else if (status_.total_exposure_value) {
		// On a mode switch, it's possible the exposure profile could change,
		// or a fixed exposure/gain might be set so we divide up the exposure/
		// gain again, but we don't change any target values.
		divideUpExposure();
	} else {
		// We come through here on startup, when at least one of the shutter
		// or gain has not been fixed. We must still write those values out so
		// that they will be applied immediately. We supply some arbitrary defaults
		// for any that weren't set.

		// Equivalent of divideUpExposure.
		filtered_.shutter = fixed_shutter_ ? fixed_shutter_ : config_.default_exposure_time;
		filtered_.analogue_gain = fixed_analogue_gain_ ? fixed_analogue_gain_ : config_.default_analogue_gain;
	}

	writeAndFinish(metadata, false);
}

void Agc::Prepare(Metadata *image_metadata)
{
	status_.digital_gain = 1.0;
	fetchAwbStatus(image_metadata); // always fetch it so that Process knows it's been done

	if (status_.total_exposure_value) {
		// Process has run, so we have meaningful values.
		DeviceStatus device_status;
		if (image_metadata->Get("device.status", device_status) == 0) {
			double actual_exposure = device_status.shutter_speed *
						 device_status.analogue_gain;
			if (actual_exposure) {
				status_.digital_gain =
					status_.total_exposure_value /
					actual_exposure;
				LOG(RPiAgc, Debug) << "Want total exposure " << status_.total_exposure_value;
				// Never ask for a gain < 1.0, and also impose
				// some upper limit. Make it customisable?
				status_.digital_gain = std::max(
					1.0,
					std::min(status_.digital_gain, 4.0));
				LOG(RPiAgc, Debug) << "Actual exposure " << actual_exposure;
				LOG(RPiAgc, Debug) << "Use digital_gain " << status_.digital_gain;
				LOG(RPiAgc, Debug) << "Effective exposure " << actual_exposure * status_.digital_gain;
				// Decide whether AEC/AGC has converged.
				updateLockStatus(device_status);
			}
		} else
			LOG(RPiAgc, Warning) << Name() << ": no device metadata";
		image_metadata->Set("agc.status", status_);
	}
}

void Agc::Process(StatisticsPtr &stats, Metadata *image_metadata)
{
	frame_count_++;
	// First a little bit of housekeeping, fetching up-to-date settings and
	// configuration, that kind of thing.
	housekeepConfig();
	// Get the current exposure values for the frame that's just arrived.
	fetchCurrentExposure(image_metadata);
	// Compute the total gain we require relative to the current exposure.
	double gain, target_Y;
	computeGain(stats.get(), image_metadata, gain, target_Y);
	// Now compute the target (final) exposure which we think we want.
	computeTargetExposure(gain);
	// Some of the exposure has to be applied as digital gain, so work out
	// what that is. This function also tells us whether it's decided to
	// "desaturate" the image more quickly.
	bool desaturate = applyDigitalGain(gain, target_Y);
	// The results have to be filtered so as not to change too rapidly.
	filterExposure(desaturate);
	// The last thing is to divide up the exposure value into a shutter time
	// and analogue_gain, according to the current exposure mode.
	divideUpExposure();
	// Finally advertise what we've done.
	writeAndFinish(image_metadata, desaturate);
}

void Agc::updateLockStatus(DeviceStatus const &device_status)
{
	const double ERROR_FACTOR = 0.10; // make these customisable?
	const int MAX_LOCK_COUNT = 5;
	// Reset "lock count" when we exceed this multiple of ERROR_FACTOR
	const double RESET_MARGIN = 1.5;

	// Add 200us to the exposure time error to allow for line quantisation.
	double exposure_error = last_device_status_.shutter_speed * ERROR_FACTOR + 200;
	double gain_error = last_device_status_.analogue_gain * ERROR_FACTOR;
	double target_error = last_target_exposure_ * ERROR_FACTOR;

	// Note that we don't know the exposure/gain limits of the sensor, so
	// the values we keep requesting may be unachievable. For this reason
	// we only insist that we're close to values in the past few frames.
	if (device_status.shutter_speed > last_device_status_.shutter_speed - exposure_error &&
	    device_status.shutter_speed < last_device_status_.shutter_speed + exposure_error &&
	    device_status.analogue_gain > last_device_status_.analogue_gain - gain_error &&
	    device_status.analogue_gain < last_device_status_.analogue_gain + gain_error &&
	    status_.target_exposure_value > last_target_exposure_ - target_error &&
	    status_.target_exposure_value < last_target_exposure_ + target_error)
		lock_count_ = std::min(lock_count_ + 1, MAX_LOCK_COUNT);
	else if (device_status.shutter_speed < last_device_status_.shutter_speed - RESET_MARGIN * exposure_error ||
		 device_status.shutter_speed > last_device_status_.shutter_speed + RESET_MARGIN * exposure_error ||
		 device_status.analogue_gain < last_device_status_.analogue_gain - RESET_MARGIN * gain_error ||
		 device_status.analogue_gain > last_device_status_.analogue_gain + RESET_MARGIN * gain_error ||
		 status_.target_exposure_value < last_target_exposure_ - RESET_MARGIN * target_error ||
		 status_.target_exposure_value > last_target_exposure_ + RESET_MARGIN * target_error)
		lock_count_ = 0;

	last_device_status_ = device_status;
	last_target_exposure_ = status_.target_exposure_value;

	LOG(RPiAgc, Debug) << "Lock count updated to " << lock_count_;
	status_.locked = lock_count_ == MAX_LOCK_COUNT;
}

static void copy_string(std::string const &s, char *d, size_t size)
{
	size_t length = s.copy(d, size - 1);
	d[length] = '\0';
}

void Agc::housekeepConfig()
{
	// First fetch all the up-to-date settings, so no one else has to do it.
	status_.ev = ev_;
	status_.fixed_shutter = fixed_shutter_;
	status_.fixed_analogue_gain = fixed_analogue_gain_;
	status_.flicker_period = flicker_period_;
	LOG(RPiAgc, Debug) << "ev " << status_.ev << " fixed_shutter "
			   << status_.fixed_shutter << " fixed_analogue_gain "
			   << status_.fixed_analogue_gain;
	// Make sure the "mode" pointers point to the up-to-date things, if
	// they've changed.
	if (strcmp(metering_mode_name_.c_str(), status_.metering_mode)) {
		auto it = config_.metering_modes.find(metering_mode_name_);
		if (it == config_.metering_modes.end())
			throw std::runtime_error("Agc: no metering mode " +
						 metering_mode_name_);
		metering_mode_ = &it->second;
		copy_string(metering_mode_name_, status_.metering_mode,
			    sizeof(status_.metering_mode));
	}
	if (strcmp(exposure_mode_name_.c_str(), status_.exposure_mode)) {
		auto it = config_.exposure_modes.find(exposure_mode_name_);
		if (it == config_.exposure_modes.end())
			throw std::runtime_error("Agc: no exposure profile " +
						 exposure_mode_name_);
		exposure_mode_ = &it->second;
		copy_string(exposure_mode_name_, status_.exposure_mode,
			    sizeof(status_.exposure_mode));
	}
	if (strcmp(constraint_mode_name_.c_str(), status_.constraint_mode)) {
		auto it =
			config_.constraint_modes.find(constraint_mode_name_);
		if (it == config_.constraint_modes.end())
			throw std::runtime_error("Agc: no constraint list " +
						 constraint_mode_name_);
		constraint_mode_ = &it->second;
		copy_string(constraint_mode_name_, status_.constraint_mode,
			    sizeof(status_.constraint_mode));
	}
	LOG(RPiAgc, Debug) << "exposure_mode "
			   << exposure_mode_name_ << " constraint_mode "
			   << constraint_mode_name_ << " metering_mode "
			   << metering_mode_name_;
}

void Agc::fetchCurrentExposure(Metadata *image_metadata)
{
	std::unique_lock<Metadata> lock(*image_metadata);
	DeviceStatus *device_status =
		image_metadata->GetLocked<DeviceStatus>("device.status");
	if (!device_status)
		throw std::runtime_error("Agc: no device metadata");
	current_.shutter = device_status->shutter_speed;
	current_.analogue_gain = device_status->analogue_gain;
	AgcStatus *agc_status =
		image_metadata->GetLocked<AgcStatus>("agc.status");
	current_.total_exposure = agc_status ? agc_status->total_exposure_value : 0;
	current_.total_exposure_no_dg = current_.shutter * current_.analogue_gain;
}

void Agc::fetchAwbStatus(Metadata *image_metadata)
{
	awb_.gain_r = 1.0; // in case not found in metadata
	awb_.gain_g = 1.0;
	awb_.gain_b = 1.0;
	if (image_metadata->Get("awb.status", awb_) != 0)
		LOG(RPiAgc, Warning) << "Agc: no AWB status found";
}

static double compute_initial_Y(bcm2835_isp_stats *stats, AwbStatus const &awb,
				double weights[], double gain)
{
	bcm2835_isp_stats_region *regions = stats->agc_stats;
	// Note how the calculation below means that equal weights give you
	// "average" metering (i.e. all pixels equally important).
	double R_sum = 0, G_sum = 0, B_sum = 0, pixel_sum = 0;
	for (int i = 0; i < AGC_STATS_SIZE; i++) {
		double counted = regions[i].counted;
		double r_sum = std::min(regions[i].r_sum * gain, ((1 << PIPELINE_BITS) - 1) * counted);
		double g_sum = std::min(regions[i].g_sum * gain, ((1 << PIPELINE_BITS) - 1) * counted);
		double b_sum = std::min(regions[i].b_sum * gain, ((1 << PIPELINE_BITS) - 1) * counted);
		R_sum += r_sum * weights[i];
		G_sum += g_sum * weights[i];
		B_sum += b_sum * weights[i];
		pixel_sum += counted * weights[i];
	}
	if (pixel_sum == 0.0) {
		LOG(RPiAgc, Warning) << "compute_initial_Y: pixel_sum is zero";
		return 0;
	}
	double Y_sum = R_sum * awb.gain_r * .299 +
		       G_sum * awb.gain_g * .587 +
		       B_sum * awb.gain_b * .114;
	return Y_sum / pixel_sum / (1 << PIPELINE_BITS);
}

// We handle extra gain through EV by adjusting our Y targets. However, you
// simply can't monitor histograms once they get very close to (or beyond!)
// saturation, so we clamp the Y targets to this value. It does mean that EV
// increases don't necessarily do quite what you might expect in certain
// (contrived) cases.

#define EV_GAIN_Y_TARGET_LIMIT 0.9

static double constraint_compute_gain(AgcConstraint &c, Histogram &h,
				      double lux, double ev_gain,
				      double &target_Y)
{
	target_Y = c.Y_target.Eval(c.Y_target.Domain().Clip(lux));
	target_Y = std::min(EV_GAIN_Y_TARGET_LIMIT, target_Y * ev_gain);
	double iqm = h.InterQuantileMean(c.q_lo, c.q_hi);
	return (target_Y * NUM_HISTOGRAM_BINS) / iqm;
}

void Agc::computeGain(bcm2835_isp_stats *statistics, Metadata *image_metadata,
		      double &gain, double &target_Y)
{
	struct LuxStatus lux = {};
	lux.lux = 400; // default lux level to 400 in case no metadata found
	if (image_metadata->Get("lux.status", lux) != 0)
		LOG(RPiAgc, Warning) << "Agc: no lux level found";
	Histogram h(statistics->hist[0].g_hist, NUM_HISTOGRAM_BINS);
	double ev_gain = status_.ev * config_.base_ev;
	// The initial gain and target_Y come from some of the regions. After
	// that we consider the histogram constraints.
	target_Y =
		config_.Y_target.Eval(config_.Y_target.Domain().Clip(lux.lux));
	target_Y = std::min(EV_GAIN_Y_TARGET_LIMIT, target_Y * ev_gain);

	// Do this calculation a few times as brightness increase can be
	// non-linear when there are saturated regions.
	gain = 1.0;
	for (int i = 0; i < 8; i++) {
		double initial_Y = compute_initial_Y(statistics, awb_,
						     metering_mode_->weights, gain);
		double extra_gain = std::min(10.0, target_Y / (initial_Y + .001));
		gain *= extra_gain;
		LOG(RPiAgc, Debug) << "Initial Y " << initial_Y << " target " << target_Y
				   << " gives gain " << gain;
		if (extra_gain < 1.01) // close enough
			break;
	}

	for (auto &c : *constraint_mode_) {
		double new_target_Y;
		double new_gain =
			constraint_compute_gain(c, h, lux.lux, ev_gain,
						new_target_Y);
		LOG(RPiAgc, Debug) << "Constraint has target_Y "
				   << new_target_Y << " giving gain " << new_gain;
		if (c.bound == AgcConstraint::Bound::LOWER &&
		    new_gain > gain) {
			LOG(RPiAgc, Debug) << "Lower bound constraint adopted";
			gain = new_gain, target_Y = new_target_Y;
		} else if (c.bound == AgcConstraint::Bound::UPPER &&
			   new_gain < gain) {
			LOG(RPiAgc, Debug) << "Upper bound constraint adopted";
			gain = new_gain, target_Y = new_target_Y;
		}
	}
	LOG(RPiAgc, Debug) << "Final gain " << gain << " (target_Y " << target_Y << " ev "
			   << status_.ev << " base_ev " << config_.base_ev
			   << ")";
}

void Agc::computeTargetExposure(double gain)
{
	if (status_.fixed_shutter != 0.0 && status_.fixed_analogue_gain != 0.0) {
		// When ag and shutter are both fixed, we need to drive the
		// total exposure so that we end up with a digital gain of at least
		// 1/min_colour_gain. Otherwise we'd desaturate channels causing
		// white to go cyan or magenta.
		double min_colour_gain = std::min({ awb_.gain_r, awb_.gain_g, awb_.gain_b, 1.0 });
		ASSERT(min_colour_gain != 0.0);
		target_.total_exposure =
			status_.fixed_shutter * status_.fixed_analogue_gain / min_colour_gain;
	} else {
		// The statistics reflect the image without digital gain, so the final
		// total exposure we're aiming for is:
		target_.total_exposure = current_.total_exposure_no_dg * gain;
		// The final target exposure is also limited to what the exposure
		// mode allows.
		double max_total_exposure =
			(status_.fixed_shutter != 0.0
			 ? status_.fixed_shutter
			 : exposure_mode_->shutter.back()) *
			(status_.fixed_analogue_gain != 0.0
			 ? status_.fixed_analogue_gain
			 : exposure_mode_->gain.back());
		target_.total_exposure = std::min(target_.total_exposure,
						  max_total_exposure);
	}
	LOG(RPiAgc, Debug) << "Target total_exposure " << target_.total_exposure;
}

bool Agc::applyDigitalGain(double gain, double target_Y)
{
	double min_colour_gain = std::min({ awb_.gain_r, awb_.gain_g, awb_.gain_b, 1.0 });
	ASSERT(min_colour_gain != 0.0);
	double dg = 1.0 / min_colour_gain;
	// I think this pipeline subtracts black level and rescales before we
	// get the stats, so no need to worry about it.
	LOG(RPiAgc, Debug) << "after AWB, target dg " << dg << " gain " << gain
			   << " target_Y " << target_Y;
	// Finally, if we're trying to reduce exposure but the target_Y is
	// "close" to 1.0, then the gain computed for that constraint will be
	// only slightly less than one, because the measured Y can never be
	// larger than 1.0. When this happens, demand a large digital gain so
	// that the exposure can be reduced, de-saturating the image much more
	// quickly (and we then approach the correct value more quickly from
	// below).
	bool desaturate = target_Y > config_.fast_reduce_threshold &&
			  gain < sqrt(target_Y);
	if (desaturate)
		dg /= config_.fast_reduce_threshold;
	LOG(RPiAgc, Debug) << "Digital gain " << dg << " desaturate? " << desaturate;
	target_.total_exposure_no_dg = target_.total_exposure / dg;
	LOG(RPiAgc, Debug) << "Target total_exposure_no_dg " << target_.total_exposure_no_dg;
	return desaturate;
}

void Agc::filterExposure(bool desaturate)
{
	double speed = config_.speed;
	// AGC adapts instantly if both shutter and gain are directly specified
	// or we're in the startup phase.
	if ((status_.fixed_shutter && status_.fixed_analogue_gain) ||
	    frame_count_ <= config_.startup_frames)
		speed = 1.0;
	if (filtered_.total_exposure == 0.0) {
		filtered_.total_exposure = target_.total_exposure;
		filtered_.total_exposure_no_dg = target_.total_exposure_no_dg;
	} else {
		// If close to the result go faster, to save making so many
		// micro-adjustments on the way. (Make this customisable?)
		if (filtered_.total_exposure < 1.2 * target_.total_exposure &&
		    filtered_.total_exposure > 0.8 * target_.total_exposure)
			speed = sqrt(speed);
		filtered_.total_exposure = speed * target_.total_exposure +
					   filtered_.total_exposure * (1.0 - speed);
		// When desaturing, take a big jump down in exposure_no_dg,
		// which we'll hide with digital gain.
		if (desaturate)
			filtered_.total_exposure_no_dg =
				target_.total_exposure_no_dg;
		else
			filtered_.total_exposure_no_dg =
				speed * target_.total_exposure_no_dg +
				filtered_.total_exposure_no_dg * (1.0 - speed);
	}
	// We can't let the no_dg exposure deviate too far below the
	// total exposure, as there might not be enough digital gain available
	// in the ISP to hide it (which will cause nasty oscillation).
	if (filtered_.total_exposure_no_dg <
	    filtered_.total_exposure * config_.fast_reduce_threshold)
		filtered_.total_exposure_no_dg = filtered_.total_exposure *
						 config_.fast_reduce_threshold;
	LOG(RPiAgc, Debug) << "After filtering, total_exposure " << filtered_.total_exposure
			   << " no dg " << filtered_.total_exposure_no_dg;
}

void Agc::divideUpExposure()
{
	// Sending the fixed shutter/gain cases through the same code may seem
	// unnecessary, but it will make more sense when extend this to cover
	// variable aperture.
	double exposure_value = filtered_.total_exposure_no_dg;
	double shutter_time, analogue_gain;
	shutter_time = status_.fixed_shutter != 0.0
			       ? status_.fixed_shutter
			       : exposure_mode_->shutter[0];
	analogue_gain = status_.fixed_analogue_gain != 0.0
				? status_.fixed_analogue_gain
				: exposure_mode_->gain[0];
	if (shutter_time * analogue_gain < exposure_value) {
		for (unsigned int stage = 1;
		     stage < exposure_mode_->gain.size(); stage++) {
			if (status_.fixed_shutter == 0.0) {
				if (exposure_mode_->shutter[stage] *
					    analogue_gain >=
				    exposure_value) {
					shutter_time =
						exposure_value / analogue_gain;
					break;
				}
				shutter_time = exposure_mode_->shutter[stage];
			}
			if (status_.fixed_analogue_gain == 0.0) {
				if (exposure_mode_->gain[stage] *
					    shutter_time >=
				    exposure_value) {
					analogue_gain =
						exposure_value / shutter_time;
					break;
				}
				analogue_gain = exposure_mode_->gain[stage];
			}
		}
	}
	LOG(RPiAgc, Debug) << "Divided up shutter and gain are " << shutter_time << " and "
			   << analogue_gain;
	// Finally adjust shutter time for flicker avoidance (require both
	// shutter and gain not to be fixed).
	if (status_.fixed_shutter == 0.0 &&
	    status_.fixed_analogue_gain == 0.0 &&
	    status_.flicker_period != 0.0) {
		int flicker_periods = shutter_time / status_.flicker_period;
		if (flicker_periods > 0) {
			double new_shutter_time = flicker_periods * status_.flicker_period;
			analogue_gain *= shutter_time / new_shutter_time;
			// We should still not allow the ag to go over the
			// largest value in the exposure mode. Note that this
			// may force more of the total exposure into the digital
			// gain as a side-effect.
			analogue_gain = std::min(analogue_gain,
						 exposure_mode_->gain.back());
			shutter_time = new_shutter_time;
		}
		LOG(RPiAgc, Debug) << "After flicker avoidance, shutter "
				   << shutter_time << " gain " << analogue_gain;
	}
	filtered_.shutter = shutter_time;
	filtered_.analogue_gain = analogue_gain;
}

void Agc::writeAndFinish(Metadata *image_metadata, bool desaturate)
{
	status_.total_exposure_value = filtered_.total_exposure;
	status_.target_exposure_value = desaturate ? 0 : target_.total_exposure_no_dg;
	status_.shutter_time = filtered_.shutter;
	status_.analogue_gain = filtered_.analogue_gain;
	// Write to metadata as well, in case anyone wants to update the camera
	// immediately.
	image_metadata->Set("agc.status", status_);
	LOG(RPiAgc, Debug) << "Output written, total exposure requested is "
			   << filtered_.total_exposure;
	LOG(RPiAgc, Debug) << "Camera exposure update: shutter time " << filtered_.shutter
			   << " analogue gain " << filtered_.analogue_gain;
}

// Register algorithm with the system.
static Algorithm *Create(Controller *controller)
{
	return (Algorithm *)new Agc(controller);
}
static RegisterAlgorithm reg(NAME, &Create);