diff options
Diffstat (limited to 'src')
323 files changed, 18537 insertions, 5568 deletions
diff --git a/src/android/camera_capabilities.cpp b/src/android/camera_capabilities.cpp index 71043e12..b161bc6b 100644 --- a/src/android/camera_capabilities.cpp +++ b/src/android/camera_capabilities.cpp @@ -11,6 +11,7 @@ #include <array> #include <cmath> #include <map> +#include <stdint.h> #include <type_traits> #include <hardware/camera3.h> @@ -1176,11 +1177,46 @@ int CameraCapabilities::initializeStaticMetadata() maxFrameDuration_); /* Statistics static metadata. */ - uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF; - staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES, - faceDetectMode); - int32_t maxFaceCount = 0; + auto iter = camera_->controls().find(controls::draft::FaceDetectMode.id()); + if (iter != camera_->controls().end()) { + const ControlInfo &faceDetectCtrlInfo = iter->second; + std::vector<uint8_t> faceDetectModes; + bool hasFaceDetection = false; + + for (const auto &value : faceDetectCtrlInfo.values()) { + int32_t mode = value.get<int32_t>(); + uint8_t androidMode = 0; + + switch (mode) { + case controls::draft::FaceDetectModeOff: + androidMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF; + break; + case controls::draft::FaceDetectModeSimple: + androidMode = ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE; + hasFaceDetection = true; + break; + default: + LOG(HAL, Fatal) << "Received invalid face detect mode: " << mode; + } + faceDetectModes.push_back(androidMode); + } + if (hasFaceDetection) { + /* + * \todo Create new libcamera controls to query max + * possible faces detected. + */ + maxFaceCount = 10; + staticMetadata_->addEntry( + ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES, + faceDetectModes.data(), faceDetectModes.size()); + } + } else { + uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF; + staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES, + faceDetectMode); + } + staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT, maxFaceCount); diff --git a/src/android/camera_device.cpp b/src/android/camera_device.cpp index 493f66e7..a038131a 100644 --- a/src/android/camera_device.cpp +++ b/src/android/camera_device.cpp @@ -22,6 +22,7 @@ #include <libcamera/controls.h> #include <libcamera/fence.h> #include <libcamera/formats.h> +#include <libcamera/geometry.h> #include <libcamera/property_ids.h> #include "system/graphics.h" @@ -813,6 +814,11 @@ int CameraDevice::processControls(Camera3RequestDescriptor *descriptor) controls.set(controls::ScalerCrop, cropRegion); } + if (settings.getEntry(ANDROID_STATISTICS_FACE_DETECT_MODE, &entry)) { + const uint8_t *data = entry.data.u8; + controls.set(controls::draft::FaceDetectMode, data[0]); + } + if (settings.getEntry(ANDROID_SENSOR_TEST_PATTERN_MODE, &entry)) { const int32_t data = *entry.data.i32; int32_t testPatternMode = controls::draft::TestPatternModeOff; @@ -1540,8 +1546,9 @@ CameraDevice::getResultMetadata(const Camera3RequestDescriptor &descriptor) cons value32 = ANDROID_SENSOR_TEST_PATTERN_MODE_OFF; resultMetadata->addEntry(ANDROID_SENSOR_TEST_PATTERN_MODE, value32); - value = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF; - resultMetadata->addEntry(ANDROID_STATISTICS_FACE_DETECT_MODE, value); + if (settings.getEntry(ANDROID_STATISTICS_FACE_DETECT_MODE, &entry)) + resultMetadata->addEntry(ANDROID_STATISTICS_FACE_DETECT_MODE, + entry.data.u8, 1); value = ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF; resultMetadata->addEntry(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE, @@ -1580,6 +1587,61 @@ CameraDevice::getResultMetadata(const Camera3RequestDescriptor &descriptor) cons resultMetadata->addEntry(ANDROID_SENSOR_FRAME_DURATION, *frameDuration * 1000); + const auto &faceDetectRectangles = + metadata.get(controls::draft::FaceDetectFaceRectangles); + if (faceDetectRectangles) { + std::vector<int32_t> flatRectangles; + for (const Rectangle &rect : *faceDetectRectangles) { + flatRectangles.push_back(rect.x); + flatRectangles.push_back(rect.y); + flatRectangles.push_back(rect.x + rect.width); + flatRectangles.push_back(rect.y + rect.height); + } + resultMetadata->addEntry( + ANDROID_STATISTICS_FACE_RECTANGLES, flatRectangles); + } + + const auto &faceDetectFaceScores = + metadata.get(controls::draft::FaceDetectFaceScores); + if (faceDetectRectangles && faceDetectFaceScores) { + if (faceDetectFaceScores->size() != faceDetectRectangles->size()) { + LOG(HAL, Error) << "Pipeline returned wrong number of face scores; " + << "Expected: " << faceDetectRectangles->size() + << ", got: " << faceDetectFaceScores->size(); + } + resultMetadata->addEntry(ANDROID_STATISTICS_FACE_SCORES, + *faceDetectFaceScores); + } + + const auto &faceDetectFaceLandmarks = + metadata.get(controls::draft::FaceDetectFaceLandmarks); + if (faceDetectRectangles && faceDetectFaceLandmarks) { + size_t expectedLandmarks = faceDetectRectangles->size() * 3; + if (faceDetectFaceLandmarks->size() != expectedLandmarks) { + LOG(HAL, Error) << "Pipeline returned wrong number of face landmarks; " + << "Expected: " << expectedLandmarks + << ", got: " << faceDetectFaceLandmarks->size(); + } + + std::vector<int32_t> androidLandmarks; + for (const Point &landmark : *faceDetectFaceLandmarks) { + androidLandmarks.push_back(landmark.x); + androidLandmarks.push_back(landmark.y); + } + resultMetadata->addEntry( + ANDROID_STATISTICS_FACE_LANDMARKS, androidLandmarks); + } + + const auto &faceDetectFaceIds = metadata.get(controls::draft::FaceDetectFaceIds); + if (faceDetectRectangles && faceDetectFaceIds) { + if (faceDetectFaceIds->size() != faceDetectRectangles->size()) { + LOG(HAL, Error) << "Pipeline returned wrong number of face ids; " + << "Expected: " << faceDetectRectangles->size() + << ", got: " << faceDetectFaceIds->size(); + } + resultMetadata->addEntry(ANDROID_STATISTICS_FACE_IDS, *faceDetectFaceIds); + } + const auto &scalerCrop = metadata.get(controls::ScalerCrop); if (scalerCrop) { const Rectangle &crop = *scalerCrop; diff --git a/src/android/jpeg/encoder_libjpeg.cpp b/src/android/jpeg/encoder_libjpeg.cpp index 7fc6287e..cb242b5e 100644 --- a/src/android/jpeg/encoder_libjpeg.cpp +++ b/src/android/jpeg/encoder_libjpeg.cpp @@ -125,7 +125,7 @@ void EncoderLibJpeg::compressRGB(const std::vector<Span<uint8_t>> &planes) */ void EncoderLibJpeg::compressNV(const std::vector<Span<uint8_t>> &planes) { - uint8_t tmprowbuf[compress_.image_width * 3]; + std::vector<uint8_t> tmprowbuf(compress_.image_width * 3); /* * \todo Use the raw api, and only unpack the cb/cr samples to new line @@ -149,10 +149,10 @@ void EncoderLibJpeg::compressNV(const std::vector<Span<uint8_t>> &planes) const unsigned char *src_c = planes[1].data(); JSAMPROW row_pointer[1]; - row_pointer[0] = &tmprowbuf[0]; + row_pointer[0] = tmprowbuf.data(); for (unsigned int y = 0; y < compress_.image_height; y++) { - unsigned char *dst = &tmprowbuf[0]; + unsigned char *dst = tmprowbuf.data(); const unsigned char *src_y = src + y * y_stride; const unsigned char *src_cb = src_c + (y / vertSubSample) * c_stride + cb_pos; diff --git a/src/android/meson.build b/src/android/meson.build index 68646120..7b226a4b 100644 --- a/src/android/meson.build +++ b/src/android/meson.build @@ -4,6 +4,7 @@ android_deps = [ dependency('libexif', required : get_option('android')), dependency('libjpeg', required : get_option('android')), libcamera_private, + libyuv_dep, ] android_enabled = true @@ -15,27 +16,6 @@ foreach dep : android_deps endif endforeach -libyuv_dep = dependency('libyuv', required : false) - -# Fallback to a subproject if libyuv isn't found, as it's typically not -# provided by distributions. -if not libyuv_dep.found() - cmake = import('cmake') - - libyuv_vars = cmake.subproject_options() - libyuv_vars.add_cmake_defines({'CMAKE_POSITION_INDEPENDENT_CODE': 'ON'}) - libyuv_vars.set_override_option('cpp_std', 'c++17') - libyuv_vars.append_compile_args('cpp', - '-Wno-sign-compare', - '-Wno-unused-variable', - '-Wno-unused-parameter') - libyuv_vars.append_link_args('-ljpeg') - libyuv = cmake.subproject('libyuv', options : libyuv_vars) - libyuv_dep = libyuv.dependency('yuv') -endif - -android_deps += [libyuv_dep] - android_hal_sources = files([ 'camera3_hal.cpp', 'camera_capabilities.cpp', diff --git a/src/apps/cam/camera_session.cpp b/src/apps/cam/camera_session.cpp index 097dc479..9e934827 100644 --- a/src/apps/cam/camera_session.cpp +++ b/src/apps/cam/camera_session.cpp @@ -159,8 +159,43 @@ CameraSession::~CameraSession() void CameraSession::listControls() const { for (const auto &[id, info] : camera_->controls()) { - std::cout << "Control: " << id->name() << ": " - << info.toString() << std::endl; + std::stringstream io; + io << "[" + << (id->isInput() ? "in" : " ") + << (id->isOutput() ? "out" : " ") + << "] "; + + if (info.values().empty()) { + std::cout << "Control: " << io.str() + << id->vendor() << "::" << id->name() << ": " + << info.toString() << std::endl; + } else { + std::cout << "Control: " << io.str() + << id->vendor() << "::" << id->name() << ":" + << std::endl; + for (const auto &value : info.values()) { + int32_t val = value.get<int32_t>(); + const auto &it = id->enumerators().find(val); + + std::cout << " - "; + if (it == id->enumerators().end()) + std::cout << "UNKNOWN"; + else + std::cout << it->second; + std::cout << " (" << val << ")" << std::endl; + } + } + + if (id->isArray()) { + std::size_t size = id->size(); + + std::cout << " Size: "; + if (size == std::numeric_limits<std::size_t>::max()) + std::cout << "n"; + else + std::cout << std::to_string(size); + std::cout << std::endl; + } } } @@ -230,11 +265,16 @@ int CameraSession::start() #endif if (options_.isSet(OptFile)) { - if (!options_[OptFile].toString().empty()) - sink_ = std::make_unique<FileSink>(camera_.get(), streamNames_, - options_[OptFile]); - else - sink_ = std::make_unique<FileSink>(camera_.get(), streamNames_); + std::unique_ptr<FileSink> sink = + std::make_unique<FileSink>(camera_.get(), streamNames_); + + if (!options_[OptFile].toString().empty()) { + ret = sink->setFilePattern(options_[OptFile]); + if (ret) + return ret; + } + + sink_ = std::move(sink); } if (sink_) { diff --git a/src/apps/cam/file_sink.cpp b/src/apps/cam/file_sink.cpp index 3e000d2f..76e21db9 100644 --- a/src/apps/cam/file_sink.cpp +++ b/src/apps/cam/file_sink.cpp @@ -5,6 +5,7 @@ * File Sink */ +#include <array> #include <assert.h> #include <fcntl.h> #include <iomanip> @@ -12,6 +13,7 @@ #include <sstream> #include <string.h> #include <unistd.h> +#include <utility> #include <libcamera/camera.h> @@ -24,13 +26,13 @@ using namespace libcamera; FileSink::FileSink([[maybe_unused]] const libcamera::Camera *camera, - const std::map<const libcamera::Stream *, std::string> &streamNames, - const std::string &pattern) + const std::map<const libcamera::Stream *, std::string> &streamNames) : #ifdef HAVE_TIFF camera_(camera), #endif - streamNames_(streamNames), pattern_(pattern) + pattern_(kDefaultFilePattern), fileType_(FileType::Binary), + streamNames_(streamNames) { } @@ -38,6 +40,41 @@ FileSink::~FileSink() { } +int FileSink::setFilePattern(const std::string &pattern) +{ + static const std::array<std::pair<std::string, FileType>, 2> types{{ + { ".dng", FileType::Dng }, + { ".ppm", FileType::Ppm }, + }}; + + pattern_ = pattern; + + if (pattern_.empty() || pattern_.back() == '/') + pattern_ += kDefaultFilePattern; + + fileType_ = FileType::Binary; + + for (const auto &type : types) { + if (pattern_.size() < type.first.size()) + continue; + + if (pattern_.find(type.first, pattern_.size() - type.first.size()) != + std::string::npos) { + fileType_ = type.second; + break; + } + } + +#ifndef HAVE_TIFF + if (fileType_ == FileType::Dng) { + std::cerr << "DNG support not available" << std::endl; + return -EINVAL; + } +#endif /* HAVE_TIFF */ + + return 0; +} + int FileSink::configure(const libcamera::CameraConfiguration &config) { int ret = FrameSink::configure(config); @@ -67,21 +104,10 @@ bool FileSink::processRequest(Request *request) void FileSink::writeBuffer(const Stream *stream, FrameBuffer *buffer, [[maybe_unused]] const ControlList &metadata) { - std::string filename; + std::string filename = pattern_; size_t pos; int fd, ret = 0; - if (!pattern_.empty()) - filename = pattern_; - -#ifdef HAVE_TIFF - bool dng = filename.find(".dng", filename.size() - 4) != std::string::npos; -#endif /* HAVE_TIFF */ - bool ppm = filename.find(".ppm", filename.size() - 4) != std::string::npos; - - if (filename.empty() || filename.back() == '/') - filename += "frame-#.bin"; - pos = filename.find_first_of('#'); if (pos != std::string::npos) { std::stringstream ss; @@ -93,7 +119,7 @@ void FileSink::writeBuffer(const Stream *stream, FrameBuffer *buffer, Image *image = mappedBuffers_[buffer].get(); #ifdef HAVE_TIFF - if (dng) { + if (fileType_ == FileType::Dng) { ret = DNGWriter::write(filename.c_str(), camera_, stream->configuration(), metadata, buffer, image->data(0).data()); @@ -104,7 +130,7 @@ void FileSink::writeBuffer(const Stream *stream, FrameBuffer *buffer, return; } #endif /* HAVE_TIFF */ - if (ppm) { + if (fileType_ == FileType::Ppm) { ret = PPMWriter::write(filename.c_str(), stream->configuration(), image->data(0)); if (ret < 0) diff --git a/src/apps/cam/file_sink.h b/src/apps/cam/file_sink.h index 9d560783..71b7fe0f 100644 --- a/src/apps/cam/file_sink.h +++ b/src/apps/cam/file_sink.h @@ -21,10 +21,11 @@ class FileSink : public FrameSink { public: FileSink(const libcamera::Camera *camera, - const std::map<const libcamera::Stream *, std::string> &streamNames, - const std::string &pattern = ""); + const std::map<const libcamera::Stream *, std::string> &streamNames); ~FileSink(); + int setFilePattern(const std::string &pattern); + int configure(const libcamera::CameraConfiguration &config) override; void mapBuffer(libcamera::FrameBuffer *buffer) override; @@ -32,6 +33,14 @@ public: bool processRequest(libcamera::Request *request) override; private: + static constexpr const char *kDefaultFilePattern = "frame-#.bin"; + + enum class FileType { + Binary, + Dng, + Ppm, + }; + void writeBuffer(const libcamera::Stream *stream, libcamera::FrameBuffer *buffer, const libcamera::ControlList &metadata); @@ -39,7 +48,10 @@ private: #ifdef HAVE_TIFF const libcamera::Camera *camera_; #endif - std::map<const libcamera::Stream *, std::string> streamNames_; + std::string pattern_; + FileType fileType_; + + std::map<const libcamera::Stream *, std::string> streamNames_; std::map<libcamera::FrameBuffer *, std::unique_ptr<Image>> mappedBuffers_; }; diff --git a/src/apps/cam/main.cpp b/src/apps/cam/main.cpp index 4f87f200..460dbc81 100644 --- a/src/apps/cam/main.cpp +++ b/src/apps/cam/main.cpp @@ -344,12 +344,16 @@ std::string CamApp::cameraName(const Camera *camera) return name; } +namespace { + void signalHandler([[maybe_unused]] int signal) { std::cout << "Exiting" << std::endl; CamApp::instance()->quit(); } +} /* namespace */ + int main(int argc, char **argv) { CamApp app; diff --git a/src/apps/common/dng_writer.cpp b/src/apps/common/dng_writer.cpp index 59f1fa23..ac461951 100644 --- a/src/apps/common/dng_writer.cpp +++ b/src/apps/common/dng_writer.cpp @@ -8,8 +8,10 @@ #include "dng_writer.h" #include <algorithm> +#include <endian.h> #include <iostream> #include <map> +#include <vector> #include <tiffio.h> @@ -126,7 +128,9 @@ struct Matrix3d { float m[9]; }; -void packScanlineSBGGR8(void *output, const void *input, unsigned int width) +namespace { + +void packScanlineRaw8(void *output, const void *input, unsigned int width) { const uint8_t *in = static_cast<const uint8_t *>(input); uint8_t *out = static_cast<uint8_t *>(output); @@ -134,7 +138,67 @@ void packScanlineSBGGR8(void *output, const void *input, unsigned int width) std::copy(in, in + width, out); } -void packScanlineSBGGR10P(void *output, const void *input, unsigned int width) +void packScanlineRaw10(void *output, const void *input, unsigned int width) +{ + const uint8_t *in = static_cast<const uint8_t *>(input); + uint8_t *out = static_cast<uint8_t *>(output); + + for (unsigned int i = 0; i < width; i += 4) { + *out++ = in[1] << 6 | in[0] >> 2; + *out++ = in[0] << 6 | (in[3] & 0x03) << 4 | in[2] >> 4; + *out++ = in[2] << 4 | (in[5] & 0x03) << 2 | in[4] >> 6; + *out++ = in[4] << 2 | (in[7] & 0x03) << 0; + *out++ = in[6]; + in += 8; + } +} + +void packScanlineRaw12(void *output, const void *input, unsigned int width) +{ + const uint8_t *in = static_cast<const uint8_t *>(input); + uint8_t *out = static_cast<uint8_t *>(output); + + for (unsigned int i = 0; i < width; i += 2) { + *out++ = in[1] << 4 | in[0] >> 4; + *out++ = in[0] << 4 | (in[3] & 0x0f); + *out++ = in[2]; + in += 4; + } +} + +void packScanlineRaw16(void *output, const void *input, unsigned int width) +{ + const uint16_t *in = static_cast<const uint16_t *>(input); + uint16_t *out = static_cast<uint16_t *>(output); + + std::copy(in, in + width, out); +} + +/* Thumbnail function for raw data with each pixel aligned to 16bit. */ +void thumbScanlineRaw(const FormatInfo &info, void *output, const void *input, + unsigned int width, unsigned int stride) +{ + const uint16_t *in = static_cast<const uint16_t *>(input); + const uint16_t *in2 = static_cast<const uint16_t *>(input) + stride / 2; + uint8_t *out = static_cast<uint8_t *>(output); + + /* Shift down to 8. */ + unsigned int shift = info.bitsPerSample - 8; + + /* Simple averaging that produces greyscale RGB values. */ + for (unsigned int x = 0; x < width; x++) { + uint16_t value = (le16toh(in[0]) + le16toh(in[1]) + + le16toh(in2[0]) + le16toh(in2[1])) >> 2; + value = value >> shift; + *out++ = value; + *out++ = value; + *out++ = value; + in += 16; + in2 += 16; + } +} + +void packScanlineRaw10_CSI2P(void *output, const void *input, unsigned int width) { const uint8_t *in = static_cast<const uint8_t *>(input); uint8_t *out = static_cast<uint8_t *>(output); @@ -150,7 +214,7 @@ void packScanlineSBGGR10P(void *output, const void *input, unsigned int width) } } -void packScanlineSBGGR12P(void *output, const void *input, unsigned int width) +void packScanlineRaw12_CSI2P(void *output, const void *input, unsigned int width) { const uint8_t *in = static_cast<const uint8_t *>(input); uint8_t *out = static_cast<uint8_t *>(output); @@ -164,7 +228,7 @@ void packScanlineSBGGR12P(void *output, const void *input, unsigned int width) } } -void thumbScanlineSBGGRxxP(const FormatInfo &info, void *output, +void thumbScanlineRaw_CSI2P(const FormatInfo &info, void *output, const void *input, unsigned int width, unsigned int stride) { @@ -282,78 +346,150 @@ void thumbScanlineIPU3([[maybe_unused]] const FormatInfo &info, void *output, } } -static const std::map<PixelFormat, FormatInfo> formatInfo = { +const std::map<PixelFormat, FormatInfo> formatInfo = { { formats::SBGGR8, { .bitsPerSample = 8, .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed }, - .packScanline = packScanlineSBGGR8, - .thumbScanline = thumbScanlineSBGGRxxP, + .packScanline = packScanlineRaw8, + .thumbScanline = thumbScanlineRaw_CSI2P, } }, { formats::SGBRG8, { .bitsPerSample = 8, .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen }, - .packScanline = packScanlineSBGGR8, - .thumbScanline = thumbScanlineSBGGRxxP, + .packScanline = packScanlineRaw8, + .thumbScanline = thumbScanlineRaw_CSI2P, } }, { formats::SGRBG8, { .bitsPerSample = 8, .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen }, - .packScanline = packScanlineSBGGR8, - .thumbScanline = thumbScanlineSBGGRxxP, + .packScanline = packScanlineRaw8, + .thumbScanline = thumbScanlineRaw_CSI2P, } }, { formats::SRGGB8, { .bitsPerSample = 8, .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue }, - .packScanline = packScanlineSBGGR8, - .thumbScanline = thumbScanlineSBGGRxxP, + .packScanline = packScanlineRaw8, + .thumbScanline = thumbScanlineRaw_CSI2P, + } }, + { formats::SBGGR10, { + .bitsPerSample = 10, + .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed }, + .packScanline = packScanlineRaw10, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SGBRG10, { + .bitsPerSample = 10, + .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen }, + .packScanline = packScanlineRaw10, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SGRBG10, { + .bitsPerSample = 10, + .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen }, + .packScanline = packScanlineRaw10, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SRGGB10, { + .bitsPerSample = 10, + .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue }, + .packScanline = packScanlineRaw10, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SBGGR12, { + .bitsPerSample = 12, + .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed }, + .packScanline = packScanlineRaw12, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SGBRG12, { + .bitsPerSample = 12, + .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen }, + .packScanline = packScanlineRaw12, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SGRBG12, { + .bitsPerSample = 12, + .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen }, + .packScanline = packScanlineRaw12, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SRGGB12, { + .bitsPerSample = 12, + .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue }, + .packScanline = packScanlineRaw12, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SBGGR16, { + .bitsPerSample = 16, + .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed }, + .packScanline = packScanlineRaw16, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SGBRG16, { + .bitsPerSample = 16, + .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen }, + .packScanline = packScanlineRaw16, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SGRBG16, { + .bitsPerSample = 16, + .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen }, + .packScanline = packScanlineRaw16, + .thumbScanline = thumbScanlineRaw, + } }, + { formats::SRGGB16, { + .bitsPerSample = 16, + .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue }, + .packScanline = packScanlineRaw16, + .thumbScanline = thumbScanlineRaw, } }, { formats::SBGGR10_CSI2P, { .bitsPerSample = 10, .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed }, - .packScanline = packScanlineSBGGR10P, - .thumbScanline = thumbScanlineSBGGRxxP, + .packScanline = packScanlineRaw10_CSI2P, + .thumbScanline = thumbScanlineRaw_CSI2P, } }, { formats::SGBRG10_CSI2P, { .bitsPerSample = 10, .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen }, - .packScanline = packScanlineSBGGR10P, - .thumbScanline = thumbScanlineSBGGRxxP, + .packScanline = packScanlineRaw10_CSI2P, + .thumbScanline = thumbScanlineRaw_CSI2P, } }, { formats::SGRBG10_CSI2P, { .bitsPerSample = 10, .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen }, - .packScanline = packScanlineSBGGR10P, - .thumbScanline = thumbScanlineSBGGRxxP, + .packScanline = packScanlineRaw10_CSI2P, + .thumbScanline = thumbScanlineRaw_CSI2P, } }, { formats::SRGGB10_CSI2P, { .bitsPerSample = 10, .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue }, - .packScanline = packScanlineSBGGR10P, - .thumbScanline = thumbScanlineSBGGRxxP, + .packScanline = packScanlineRaw10_CSI2P, + .thumbScanline = thumbScanlineRaw_CSI2P, } }, { formats::SBGGR12_CSI2P, { .bitsPerSample = 12, .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed }, - .packScanline = packScanlineSBGGR12P, - .thumbScanline = thumbScanlineSBGGRxxP, + .packScanline = packScanlineRaw12_CSI2P, + .thumbScanline = thumbScanlineRaw_CSI2P, } }, { formats::SGBRG12_CSI2P, { .bitsPerSample = 12, .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen }, - .packScanline = packScanlineSBGGR12P, - .thumbScanline = thumbScanlineSBGGRxxP, + .packScanline = packScanlineRaw12_CSI2P, + .thumbScanline = thumbScanlineRaw_CSI2P, } }, { formats::SGRBG12_CSI2P, { .bitsPerSample = 12, .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen }, - .packScanline = packScanlineSBGGR12P, - .thumbScanline = thumbScanlineSBGGRxxP, + .packScanline = packScanlineRaw12_CSI2P, + .thumbScanline = thumbScanlineRaw_CSI2P, } }, { formats::SRGGB12_CSI2P, { .bitsPerSample = 12, .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue }, - .packScanline = packScanlineSBGGR12P, - .thumbScanline = thumbScanlineSBGGRxxP, + .packScanline = packScanlineRaw12_CSI2P, + .thumbScanline = thumbScanlineRaw_CSI2P, } }, { formats::SBGGR10_IPU3, { .bitsPerSample = 16, @@ -381,6 +517,8 @@ static const std::map<PixelFormat, FormatInfo> formatInfo = { } }, }; +} /* namespace */ + int DNGWriter::write(const char *filename, const Camera *camera, const StreamConfiguration &config, const ControlList &metadata, @@ -407,7 +545,7 @@ int DNGWriter::write(const char *filename, const Camera *camera, * or a thumbnail scanline. The latter will always be much smaller than * the former as we downscale by 16 in both directions. */ - uint8_t scanline[(config.size.width * info->bitsPerSample + 7) / 8]; + std::vector<uint8_t> scanline((config.size.width * info->bitsPerSample + 7) / 8); toff_t rawIFDOffset = 0; toff_t exifIFDOffset = 0; @@ -507,10 +645,10 @@ int DNGWriter::write(const char *filename, const Camera *camera, /* Write the thumbnail. */ const uint8_t *row = static_cast<const uint8_t *>(data); for (unsigned int y = 0; y < config.size.height / 16; y++) { - info->thumbScanline(*info, &scanline, row, + info->thumbScanline(*info, scanline.data(), row, config.size.width / 16, config.stride); - if (TIFFWriteScanline(tif, &scanline, y, 0) != 1) { + if (TIFFWriteScanline(tif, scanline.data(), y, 0) != 1) { std::cerr << "Failed to write thumbnail scanline" << std::endl; TIFFClose(tif); @@ -522,6 +660,23 @@ int DNGWriter::write(const char *filename, const Camera *camera, TIFFWriteDirectory(tif); + /* + * Workaround for a bug introduced in libtiff version 4.5.1 and no fix + * released. In these versions the CFA* tags were missing in the field + * info. + * Introduced by: https://gitlab.com/libtiff/libtiff/-/commit/738e04099b13192bb1f654e74e9b5829313f3161 + * Fixed by: https://gitlab.com/libtiff/libtiff/-/commit/49856998c3d82e65444b47bb4fb11b7830a0c2be + */ + if (!TIFFFindField(tif, TIFFTAG_CFAREPEATPATTERNDIM, TIFF_ANY)) { + static const TIFFFieldInfo infos[] = { + { TIFFTAG_CFAREPEATPATTERNDIM, 2, 2, TIFF_SHORT, FIELD_CUSTOM, + 1, 0, const_cast<char *>("CFARepeatPatternDim") }, + { TIFFTAG_CFAPATTERN, -1, -1, TIFF_BYTE, FIELD_CUSTOM, + 1, 1, const_cast<char *>("CFAPattern") }, + }; + TIFFMergeFieldInfo(tif, infos, 2); + } + /* Create a new IFD for the RAW image. */ const uint16_t cfaRepeatPatternDim[] = { 2, 2 }; const uint8_t cfaPlaneColor[] = { @@ -593,9 +748,9 @@ int DNGWriter::write(const char *filename, const Camera *camera, /* Write RAW content. */ row = static_cast<const uint8_t *>(data); for (unsigned int y = 0; y < config.size.height; y++) { - info->packScanline(&scanline, row, config.size.width); + info->packScanline(scanline.data(), row, config.size.width); - if (TIFFWriteScanline(tif, &scanline, y, 0) != 1) { + if (TIFFWriteScanline(tif, scanline.data(), y, 0) != 1) { std::cerr << "Failed to write RAW scanline" << std::endl; TIFFClose(tif); diff --git a/src/apps/common/dng_writer.h b/src/apps/common/dng_writer.h index 917713e6..aaa8a852 100644 --- a/src/apps/common/dng_writer.h +++ b/src/apps/common/dng_writer.h @@ -8,7 +8,6 @@ #pragma once #ifdef HAVE_TIFF -#define HAVE_DNG #include <libcamera/camera.h> #include <libcamera/controls.h> diff --git a/src/apps/common/options.cpp b/src/apps/common/options.cpp index ab19aa3d..ece268d0 100644 --- a/src/apps/common/options.cpp +++ b/src/apps/common/options.cpp @@ -10,6 +10,7 @@ #include <iomanip> #include <iostream> #include <string.h> +#include <vector> #include "options.h" @@ -879,8 +880,8 @@ OptionsParser::Options OptionsParser::parse(int argc, char **argv) * Allocate short and long options arrays large enough to contain all * options. */ - char shortOptions[optionsMap_.size() * 3 + 2]; - struct option longOptions[optionsMap_.size() + 1]; + std::vector<char> shortOptions(optionsMap_.size() * 3 + 2); + std::vector<struct option> longOptions(optionsMap_.size() + 1); unsigned int ids = 0; unsigned int idl = 0; @@ -922,7 +923,8 @@ OptionsParser::Options OptionsParser::parse(int argc, char **argv) opterr = 0; while (true) { - int c = getopt_long(argc, argv, shortOptions, longOptions, nullptr); + int c = getopt_long(argc, argv, shortOptions.data(), + longOptions.data(), nullptr); if (c == -1) break; diff --git a/src/apps/common/ppm_writer.cpp b/src/apps/common/ppm_writer.cpp index d6c8641d..368de8bf 100644 --- a/src/apps/common/ppm_writer.cpp +++ b/src/apps/common/ppm_writer.cpp @@ -7,6 +7,7 @@ #include "ppm_writer.h" +#include <errno.h> #include <fstream> #include <iostream> @@ -28,7 +29,7 @@ int PPMWriter::write(const char *filename, std::ofstream output(filename, std::ios::binary); if (!output) { std::cerr << "Failed to open ppm file: " << filename << std::endl; - return -EINVAL; + return -EIO; } output << "P6" << std::endl @@ -36,7 +37,7 @@ int PPMWriter::write(const char *filename, << "255" << std::endl; if (!output) { std::cerr << "Failed to write the file header" << std::endl; - return -EINVAL; + return -EIO; } const unsigned int rowLength = config.size.width * 3; @@ -45,7 +46,7 @@ int PPMWriter::write(const char *filename, output.write(row, rowLength); if (!output) { std::cerr << "Failed to write image data at row " << y << std::endl; - return -EINVAL; + return -EIO; } } diff --git a/src/apps/qcam/assets/shader/bayer_8.vert b/src/apps/qcam/assets/shader/bayer_8.vert index 3695a5e9..fb5109ee 100644 --- a/src/apps/qcam/assets/shader/bayer_8.vert +++ b/src/apps/qcam/assets/shader/bayer_8.vert @@ -19,6 +19,8 @@ Copyright (C) 2021, Linaro attribute vec4 vertexIn; attribute vec2 textureIn; +uniform mat4 proj_matrix; + uniform vec2 tex_size; /* The texture size in pixels */ uniform vec2 tex_step; @@ -47,5 +49,5 @@ void main(void) { yCoord = center.y + vec4(-2.0 * tex_step.y, -tex_step.y, tex_step.y, 2.0 * tex_step.y); - gl_Position = vertexIn; + gl_Position = proj_matrix * vertexIn; } diff --git a/src/apps/qcam/assets/shader/identity.vert b/src/apps/qcam/assets/shader/identity.vert index 12c41377..907e8741 100644 --- a/src/apps/qcam/assets/shader/identity.vert +++ b/src/apps/qcam/assets/shader/identity.vert @@ -9,10 +9,11 @@ attribute vec4 vertexIn; attribute vec2 textureIn; varying vec2 textureOut; +uniform mat4 proj_matrix; uniform float stride_factor; void main(void) { - gl_Position = vertexIn; + gl_Position = proj_matrix * vertexIn; textureOut = vec2(textureIn.x * stride_factor, textureIn.y); } diff --git a/src/apps/qcam/cam_select_dialog.cpp b/src/apps/qcam/cam_select_dialog.cpp index c51f5974..6b6d0713 100644 --- a/src/apps/qcam/cam_select_dialog.cpp +++ b/src/apps/qcam/cam_select_dialog.cpp @@ -15,7 +15,9 @@ #include <QComboBox> #include <QDialogButtonBox> #include <QFormLayout> +#include <QGuiApplication> #include <QLabel> +#include <QScreen> #include <QString> CameraSelectorDialog::CameraSelectorDialog(libcamera::CameraManager *cameraManager, @@ -53,6 +55,14 @@ CameraSelectorDialog::CameraSelectorDialog(libcamera::CameraManager *cameraManag layout->addRow("Location:", cameraLocation_); layout->addRow("Model:", cameraModel_); layout->addWidget(buttonBox); + + /* + * Decrease the minimum width of dialog to fit on narrow screens, with a + * 20 pixels margin. + */ + QRect screenGeometry = qGuiApp->primaryScreen()->availableGeometry(); + if (screenGeometry.width() < minimumWidth()) + setMinimumWidth(screenGeometry.width() - 20); } CameraSelectorDialog::~CameraSelectorDialog() = default; diff --git a/src/apps/qcam/main.cpp b/src/apps/qcam/main.cpp index 9846fba5..d0bde141 100644 --- a/src/apps/qcam/main.cpp +++ b/src/apps/qcam/main.cpp @@ -21,6 +21,8 @@ using namespace libcamera; +namespace { + void signalHandler([[maybe_unused]] int signal) { qInfo() << "Exiting"; @@ -52,6 +54,8 @@ OptionsParser::Options parseOptions(int argc, char *argv[]) return options; } +} /* namespace */ + int main(int argc, char **argv) { QApplication app(argc, argv); diff --git a/src/apps/qcam/main_window.cpp b/src/apps/qcam/main_window.cpp index d515beed..3880a846 100644 --- a/src/apps/qcam/main_window.cpp +++ b/src/apps/qcam/main_window.cpp @@ -37,16 +37,6 @@ using namespace libcamera; -#if QT_VERSION < QT_VERSION_CHECK(5, 14, 0) -/* - * Qt::fixed was introduced in v5.14, and ::fixed deprecated in v5.15. Allow - * usage of Qt::fixed unconditionally. - */ -namespace Qt { -constexpr auto fixed = ::fixed; -} /* namespace Qt */ -#endif - /** * \brief Custom QEvent to signal capture completion */ @@ -221,7 +211,7 @@ int MainWindow::createToolbars() action->setShortcut(QKeySequence::SaveAs); connect(action, &QAction::triggered, this, &MainWindow::saveImageAs); -#ifdef HAVE_DNG +#ifdef HAVE_TIFF /* Save Raw action. */ action = toolbar_->addAction(QIcon::fromTheme("camera-photo", QIcon(":aperture.svg")), @@ -261,16 +251,14 @@ void MainWindow::updateTitle() void MainWindow::switchCamera() { /* Get and acquire the new camera. */ - std::string newCameraId = chooseCamera(); + std::shared_ptr<Camera> cam = chooseCamera(); - if (newCameraId.empty()) + if (!cam) return; - if (camera_ && newCameraId == camera_->id()) + if (camera_ && cam == camera_) return; - const std::shared_ptr<Camera> &cam = cm_->get(newCameraId); - if (cam->acquire()) { qInfo() << "Failed to acquire camera" << cam->id().c_str(); return; @@ -292,40 +280,41 @@ void MainWindow::switchCamera() startStopAction_->setChecked(true); /* Display the current cameraId in the toolbar .*/ - cameraSelectButton_->setText(QString::fromStdString(newCameraId)); + cameraSelectButton_->setText(QString::fromStdString(cam->id())); } -std::string MainWindow::chooseCamera() +std::shared_ptr<Camera> MainWindow::chooseCamera() { if (cameraSelectorDialog_->exec() != QDialog::Accepted) - return std::string(); + return {}; - return cameraSelectorDialog_->getCameraId(); + std::string id = cameraSelectorDialog_->getCameraId(); + return cm_->get(id); } int MainWindow::openCamera() { - std::string cameraName; - /* - * Use the camera specified on the command line, if any, or display the - * camera selection dialog box otherwise. + * If a camera is specified on the command line, get it. Otherwise, if + * only one camera is available, pick it automatically, else, display + * the selector dialog box. */ - if (options_.isSet(OptCamera)) - cameraName = static_cast<std::string>(options_[OptCamera]); - else - cameraName = chooseCamera(); - - if (cameraName == "") - return -EINVAL; + if (options_.isSet(OptCamera)) { + std::string cameraName = static_cast<std::string>(options_[OptCamera]); + camera_ = cm_->get(cameraName); + if (!camera_) + qInfo() << "Camera" << cameraName.c_str() << "not found"; + } else { + std::vector<std::shared_ptr<Camera>> cameras = cm_->cameras(); + camera_ = (cameras.size() == 1) ? cameras[0] : chooseCamera(); + if (!camera_) + qInfo() << "No camera detected"; + } - /* Get and acquire the camera. */ - camera_ = cm_->get(cameraName); - if (!camera_) { - qInfo() << "Camera" << cameraName.c_str() << "not found"; + if (!camera_) return -ENODEV; - } + /* Acquire the camera. */ if (camera_->acquire()) { qInfo() << "Failed to acquire camera"; camera_.reset(); @@ -333,7 +322,7 @@ int MainWindow::openCamera() } /* Set the camera switch button with the currently selected Camera id. */ - cameraSelectButton_->setText(QString::fromStdString(cameraName)); + cameraSelectButton_->setText(QString::fromStdString(camera_->id())); return 0; } @@ -656,7 +645,7 @@ void MainWindow::captureRaw() void MainWindow::processRaw(FrameBuffer *buffer, [[maybe_unused]] const ControlList &metadata) { -#ifdef HAVE_DNG +#ifdef HAVE_TIFF QString defaultPath = QStandardPaths::writableLocation(QStandardPaths::PicturesLocation); QString filename = QFileDialog::getSaveFileName(this, "Save DNG", defaultPath, "DNG Files (*.dng)"); diff --git a/src/apps/qcam/main_window.h b/src/apps/qcam/main_window.h index 4cead734..81fcf915 100644 --- a/src/apps/qcam/main_window.h +++ b/src/apps/qcam/main_window.h @@ -73,7 +73,7 @@ private Q_SLOTS: private: int createToolbars(); - std::string chooseCamera(); + std::shared_ptr<libcamera::Camera> chooseCamera(); int openCamera(); int startCapture(); diff --git a/src/apps/qcam/meson.build b/src/apps/qcam/meson.build index 6cf4c171..f7c14064 100644 --- a/src/apps/qcam/meson.build +++ b/src/apps/qcam/meson.build @@ -1,13 +1,13 @@ # SPDX-License-Identifier: CC0-1.0 -qt5 = import('qt5') -qt5_dep = dependency('qt5', +qt6 = import('qt6') +qt6_dep = dependency('qt6', method : 'pkg-config', - modules : ['Core', 'Gui', 'Widgets'], + modules : ['Core', 'Gui', 'OpenGL', 'OpenGLWidgets', 'Widgets'], required : get_option('qcam'), - version : '>=5.4') + version : '>=6.2') -if not qt5_dep.found() +if not qt6_dep.found() qcam_enabled = false subdir_done() endif @@ -20,46 +20,31 @@ qcam_sources = files([ 'main.cpp', 'main_window.cpp', 'message_handler.cpp', + 'viewfinder_gl.cpp', 'viewfinder_qt.cpp', ]) qcam_moc_headers = files([ 'cam_select_dialog.h', 'main_window.h', + 'viewfinder_gl.h', 'viewfinder_qt.h', ]) qcam_resources = files([ 'assets/feathericons/feathericons.qrc', + 'assets/shader/shaders.qrc', ]) -qt5_cpp_args = [apps_cpp_args, '-DQT_NO_KEYWORDS'] +qt6_cpp_args = [ + apps_cpp_args, + '-DQT_NO_KEYWORDS', + '-Wno-extra-semi', +] -if cxx.has_header_symbol('QOpenGLWidget', 'QOpenGLWidget', - dependencies : qt5_dep, args : '-fPIC') - qcam_sources += files([ - 'viewfinder_gl.cpp', - ]) - qcam_moc_headers += files([ - 'viewfinder_gl.h', - ]) - qcam_resources += files([ - 'assets/shader/shaders.qrc' - ]) -endif - -# gcc 9 introduced a deprecated-copy warning that is triggered by Qt until -# Qt 5.13. clang 10 introduced the same warning, but detects more issues -# that are not fixed in Qt yet. Disable the warning manually in both cases. -if ((cc.get_id() == 'gcc' and cc.version().version_compare('>=9.0') and - qt5_dep.version().version_compare('<5.13')) or - (cc.get_id() == 'clang' and cc.version().version_compare('>=10.0'))) - qt5_cpp_args += ['-Wno-deprecated-copy'] -endif - -resources = qt5.preprocess(moc_headers : qcam_moc_headers, +resources = qt6.preprocess(moc_headers : qcam_moc_headers, qresources : qcam_resources, - dependencies : qt5_dep) + dependencies : qt6_dep) qcam = executable('qcam', qcam_sources, resources, install : true, @@ -69,6 +54,6 @@ qcam = executable('qcam', qcam_sources, resources, libatomic, libcamera_public, libtiff, - qt5_dep, + qt6_dep, ], - cpp_args : qt5_cpp_args) + cpp_args : qt6_cpp_args) diff --git a/src/apps/qcam/viewfinder_gl.cpp b/src/apps/qcam/viewfinder_gl.cpp index 9d2a6960..f31956ff 100644 --- a/src/apps/qcam/viewfinder_gl.cpp +++ b/src/apps/qcam/viewfinder_gl.cpp @@ -12,6 +12,7 @@ #include <QByteArray> #include <QFile> #include <QImage> +#include <QMatrix4x4> #include <QStringList> #include <libcamera/formats.h> @@ -443,15 +444,11 @@ bool ViewFinderGL::createFragmentShader() close(); } - /* Bind shader pipeline for use */ - if (!shaderProgram_.bind()) { - qWarning() << "[ViewFinderGL]:" << shaderProgram_.log(); - close(); - } - attributeVertex = shaderProgram_.attributeLocation("vertexIn"); attributeTexture = shaderProgram_.attributeLocation("textureIn"); + vertexBuffer_.bind(); + shaderProgram_.enableAttributeArray(attributeVertex); shaderProgram_.setAttributeBuffer(attributeVertex, GL_FLOAT, @@ -466,6 +463,9 @@ bool ViewFinderGL::createFragmentShader() 2, 2 * sizeof(GLfloat)); + vertexBuffer_.release(); + + projMatrixUniform_ = shaderProgram_.uniformLocation("proj_matrix"); textureUniformY_ = shaderProgram_.uniformLocation("tex_y"); textureUniformU_ = shaderProgram_.uniformLocation("tex_u"); textureUniformV_ = shaderProgram_.uniformLocation("tex_v"); @@ -511,7 +511,7 @@ void ViewFinderGL::initializeGL() glEnable(GL_TEXTURE_2D); glDisable(GL_DEPTH_TEST); - static const GLfloat coordinates[2][4][2]{ + const GLfloat coordinates[2][4][2]{ { /* Vertex coordinates */ { -1.0f, -1.0f }, @@ -535,8 +535,6 @@ void ViewFinderGL::initializeGL() /* Create Vertex Shader */ if (!createVertexShader()) qWarning() << "[ViewFinderGL]: create vertex shader failed."; - - glClearColor(1.0f, 1.0f, 1.0f, 0.0f); } void ViewFinderGL::doRender() @@ -805,28 +803,42 @@ void ViewFinderGL::doRender() shaderProgram_.setUniformValue(textureUniformStrideFactor_, static_cast<float>(size_.width() - 1) / (stridePixels - 1)); + + /* + * Place the viewfinder in the centre of the widget, preserving the + * aspect ratio of the image. + */ + QMatrix4x4 projMatrix; + QSizeF scaledSize = size_.scaled(size(), Qt::KeepAspectRatio); + projMatrix.scale(scaledSize.width() / size().width(), + scaledSize.height() / size().height()); + + shaderProgram_.setUniformValue(projMatrixUniform_, projMatrix); } void ViewFinderGL::paintGL() { - if (!fragmentShader_) + if (!fragmentShader_) { if (!createFragmentShader()) { qWarning() << "[ViewFinderGL]:" << "create fragment shader failed."; } + } - if (image_) { - glClearColor(0.0, 0.0, 0.0, 1.0); - glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); - - doRender(); - glDrawArrays(GL_TRIANGLE_FAN, 0, 4); + /* Bind shader pipeline for use. */ + if (!shaderProgram_.bind()) { + qWarning() << "[ViewFinderGL]:" << shaderProgram_.log(); + close(); } -} -void ViewFinderGL::resizeGL(int w, int h) -{ - glViewport(0, 0, w, h); + if (!image_) + return; + + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + doRender(); + glDrawArrays(GL_TRIANGLE_FAN, 0, 4); } QSize ViewFinderGL::sizeHint() const diff --git a/src/apps/qcam/viewfinder_gl.h b/src/apps/qcam/viewfinder_gl.h index 23744b41..23c657bc 100644 --- a/src/apps/qcam/viewfinder_gl.h +++ b/src/apps/qcam/viewfinder_gl.h @@ -51,7 +51,6 @@ Q_SIGNALS: protected: void initializeGL() override; void paintGL() override; - void resizeGL(int w, int h) override; QSize sizeHint() const override; private: @@ -88,6 +87,7 @@ private: /* Common texture parameters */ GLuint textureMinMagFilters_; + GLuint projMatrixUniform_; /* YUV texture parameters */ GLuint textureUniformU_; diff --git a/src/apps/qcam/viewfinder_qt.cpp b/src/apps/qcam/viewfinder_qt.cpp index 4821c27d..1a238922 100644 --- a/src/apps/qcam/viewfinder_qt.cpp +++ b/src/apps/qcam/viewfinder_qt.cpp @@ -18,6 +18,7 @@ #include <QMap> #include <QMutexLocker> #include <QPainter> +#include <QResizeEvent> #include <QtDebug> #include "../common/image.h" @@ -26,23 +27,23 @@ static const QMap<libcamera::PixelFormat, QImage::Format> nativeFormats { -#if QT_VERSION >= QT_VERSION_CHECK(5, 2, 0) { libcamera::formats::ABGR8888, QImage::Format_RGBX8888 }, { libcamera::formats::XBGR8888, QImage::Format_RGBX8888 }, -#endif { libcamera::formats::ARGB8888, QImage::Format_RGB32 }, { libcamera::formats::XRGB8888, QImage::Format_RGB32 }, -#if QT_VERSION >= QT_VERSION_CHECK(5, 14, 0) { libcamera::formats::RGB888, QImage::Format_BGR888 }, -#endif { libcamera::formats::BGR888, QImage::Format_RGB888 }, { libcamera::formats::RGB565, QImage::Format_RGB16 }, }; ViewFinderQt::ViewFinderQt(QWidget *parent) - : QWidget(parent), buffer_(nullptr) + : QWidget(parent), place_(rect()), buffer_(nullptr) { icon_ = QIcon(":camera-off.svg"); + + QPalette pal = palette(); + pal.setColor(QPalette::Window, Qt::black); + setPalette(pal); } ViewFinderQt::~ViewFinderQt() @@ -117,6 +118,11 @@ void ViewFinderQt::render(libcamera::FrameBuffer *buffer, Image *image) } } + /* + * Indicate the widget paints all its pixels, to optimize rendering by + * skipping erasing the widget before painting. + */ + setAttribute(Qt::WA_OpaquePaintEvent, true); update(); if (buffer) @@ -132,6 +138,11 @@ void ViewFinderQt::stop() buffer_ = nullptr; } + /* + * The logo has a transparent background, reenable erasing the widget + * before painting. + */ + setAttribute(Qt::WA_OpaquePaintEvent, false); update(); } @@ -146,9 +157,23 @@ void ViewFinderQt::paintEvent(QPaintEvent *) { QPainter painter(this); - /* If we have an image, draw it. */ + painter.setBrush(palette().window()); + + /* If we have an image, draw it, with black letterbox rectangles. */ if (!image_.isNull()) { - painter.drawImage(rect(), image_, image_.rect()); + if (place_.width() < width()) { + QRect rect{ 0, 0, (width() - place_.width()) / 2, height() }; + painter.drawRect(rect); + rect.moveLeft(place_.right()); + painter.drawRect(rect); + } else { + QRect rect{ 0, 0, width(), (height() - place_.height()) / 2 }; + painter.drawRect(rect); + rect.moveTop(place_.bottom()); + painter.drawRect(rect); + } + + painter.drawImage(place_, image_, image_.rect()); return; } @@ -173,7 +198,6 @@ void ViewFinderQt::paintEvent(QPaintEvent *) else point.setY((height() - pixmap_.height()) / 2); - painter.setBackgroundMode(Qt::OpaqueMode); painter.drawPixmap(point, pixmap_); } @@ -181,3 +205,14 @@ QSize ViewFinderQt::sizeHint() const { return size_.isValid() ? size_ : QSize(640, 480); } + +void ViewFinderQt::resizeEvent(QResizeEvent *event) +{ + if (!size_.isValid()) + return; + + place_.setSize(size_.scaled(event->size(), Qt::KeepAspectRatio)); + place_.moveCenter(rect().center()); + + QWidget::resizeEvent(event); +} diff --git a/src/apps/qcam/viewfinder_qt.h b/src/apps/qcam/viewfinder_qt.h index 4f4b9f11..50fde88e 100644 --- a/src/apps/qcam/viewfinder_qt.h +++ b/src/apps/qcam/viewfinder_qt.h @@ -44,6 +44,7 @@ Q_SIGNALS: protected: void paintEvent(QPaintEvent *) override; + void resizeEvent(QResizeEvent *) override; QSize sizeHint() const override; private: @@ -51,6 +52,7 @@ private: libcamera::PixelFormat format_; QSize size_; + QRect place_; /* Camera stopped icon */ QSize vfSize_; diff --git a/src/gstreamer/gstlibcamera-controls.cpp.in b/src/gstreamer/gstlibcamera-controls.cpp.in new file mode 100644 index 00000000..d937b19e --- /dev/null +++ b/src/gstreamer/gstlibcamera-controls.cpp.in @@ -0,0 +1,332 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Jaslo Ziska + * + * GStreamer Camera Controls + * + * This file is auto-generated. Do not edit. + */ + +#include <vector> + +#include <libcamera/control_ids.h> +#include <libcamera/controls.h> +#include <libcamera/geometry.h> + +#include "gstlibcamera-controls.h" + +using namespace libcamera; + +static void value_set_rectangle(GValue *value, const Rectangle &rect) +{ + Point top_left = rect.topLeft(); + Size size = rect.size(); + + GValue x = G_VALUE_INIT; + g_value_init(&x, G_TYPE_INT); + g_value_set_int(&x, top_left.x); + gst_value_array_append_and_take_value(value, &x); + + GValue y = G_VALUE_INIT; + g_value_init(&y, G_TYPE_INT); + g_value_set_int(&y, top_left.y); + gst_value_array_append_and_take_value(value, &y); + + GValue width = G_VALUE_INIT; + g_value_init(&width, G_TYPE_INT); + g_value_set_int(&width, size.width); + gst_value_array_append_and_take_value(value, &width); + + GValue height = G_VALUE_INIT; + g_value_init(&height, G_TYPE_INT); + g_value_set_int(&height, size.height); + gst_value_array_append_and_take_value(value, &height); +} + +static Rectangle value_get_rectangle(const GValue *value) +{ + const GValue *r; + r = gst_value_array_get_value(value, 0); + int x = g_value_get_int(r); + r = gst_value_array_get_value(value, 1); + int y = g_value_get_int(r); + r = gst_value_array_get_value(value, 2); + int w = g_value_get_int(r); + r = gst_value_array_get_value(value, 3); + int h = g_value_get_int(r); + + return Rectangle(x, y, w, h); +} + +{% for vendor, ctrls in controls %} +{%- for ctrl in ctrls if ctrl.is_enum %} +static const GEnumValue {{ ctrl.name|snake_case }}_types[] = { +{%- for enum in ctrl.enum_values %} + { + controls::{{ ctrl.namespace }}{{ enum.name }}, + {{ enum.description|format_description|indent_str('\t\t') }}, + "{{ enum.gst_name }}" + }, +{%- endfor %} + {0, NULL, NULL} +}; + +#define TYPE_{{ ctrl.name|snake_case|upper }} \ + ({{ ctrl.name|snake_case }}_get_type()) +static GType {{ ctrl.name|snake_case }}_get_type() +{ + static GType {{ ctrl.name|snake_case }}_type = 0; + + if (!{{ ctrl.name|snake_case }}_type) + {{ ctrl.name|snake_case }}_type = + g_enum_register_static("{{ ctrl.name }}", + {{ ctrl.name|snake_case }}_types); + + return {{ ctrl.name|snake_case }}_type; +} +{% endfor %} +{%- endfor %} + +void GstCameraControls::installProperties(GObjectClass *klass, int lastPropId) +{ +{%- for vendor, ctrls in controls %} +{%- for ctrl in ctrls %} + +{%- set spec %} +{%- if ctrl.is_rectangle -%} +gst_param_spec_array( +{%- else -%} +g_param_spec_{{ ctrl.gtype }}( +{%- endif -%} +{%- if ctrl.is_array %} + "{{ ctrl.vendor_prefix }}{{ ctrl.name|kebab_case }}-value", + "{{ ctrl.name }} Value", + "One {{ ctrl.name }} element value", +{%- else %} + "{{ ctrl.vendor_prefix }}{{ ctrl.name|kebab_case }}", + "{{ ctrl.name }}", + {{ ctrl.description|format_description|indent_str('\t') }}, +{%- endif %} +{%- if ctrl.is_enum %} + TYPE_{{ ctrl.name|snake_case|upper }}, + {{ ctrl.default }}, +{%- elif ctrl.is_rectangle %} + g_param_spec_int( + "rectangle-value", + "Rectangle Value", + "One rectangle value, either x, y, width or height.", + {{ ctrl.min }}, {{ ctrl.max }}, {{ ctrl.default }}, + (GParamFlags) (GST_PARAM_CONTROLLABLE | G_PARAM_READWRITE | + G_PARAM_STATIC_STRINGS) + ), +{%- elif ctrl.gtype == 'boolean' %} + {{ ctrl.default }}, +{%- elif ctrl.gtype in ['float', 'int', 'int64', 'uchar'] %} + {{ ctrl.min }}, {{ ctrl.max }}, {{ ctrl.default }}, +{%- endif %} + (GParamFlags) (GST_PARAM_CONTROLLABLE | G_PARAM_READWRITE | + G_PARAM_STATIC_STRINGS) +) +{%- endset %} + + g_object_class_install_property( + klass, + lastPropId + controls::{{ ctrl.namespace }}{{ ctrl.name|snake_case|upper }}, +{%- if ctrl.is_array %} + gst_param_spec_array( + "{{ ctrl.vendor_prefix }}{{ ctrl.name|kebab_case }}", + "{{ ctrl.name }}", + {{ ctrl.description|format_description|indent_str('\t\t\t') }}, + {{ spec|indent_str('\t\t\t') }}, + (GParamFlags) (GST_PARAM_CONTROLLABLE | + G_PARAM_READWRITE | + G_PARAM_STATIC_STRINGS) + ) +{%- else %} + {{ spec|indent_str('\t\t') }} +{%- endif %} + ); +{%- endfor %} +{%- endfor %} +} + +bool GstCameraControls::getProperty(guint propId, GValue *value, + [[maybe_unused]] GParamSpec *pspec) +{ + if (!controls_acc_.contains(propId)) { + GST_WARNING("Control '%s' is not available, default value will " + "be returned", + controls::controls.at(propId)->name().c_str()); + return true; + } + const ControlValue &cv = controls_acc_.get(propId); + + switch (propId) { +{%- for vendor, ctrls in controls %} +{%- for ctrl in ctrls %} + + case controls::{{ ctrl.namespace }}{{ ctrl.name|snake_case|upper }}: { + auto control = cv.get<{{ ctrl.type }}>(); + +{%- if ctrl.is_array %} + for (size_t i = 0; i < control.size(); ++i) { + GValue element = G_VALUE_INIT; +{%- if ctrl.is_rectangle %} + g_value_init(&element, GST_TYPE_PARAM_ARRAY_LIST); + value_set_rectangle(&element, control[i]); +{%- else %} + g_value_init(&element, G_TYPE_{{ ctrl.gtype|upper }}); + g_value_set_{{ ctrl.gtype }}(&element, control[i]); +{%- endif %} + gst_value_array_append_and_take_value(value, &element); + } +{%- else %} +{%- if ctrl.is_rectangle %} + value_set_rectangle(value, control); +{%- else %} + g_value_set_{{ ctrl.gtype }}(value, control); +{%- endif %} +{%- endif %} + + return true; + } +{%- endfor %} +{%- endfor %} + + default: + return false; + } +} + +bool GstCameraControls::setProperty(guint propId, const GValue *value, + [[maybe_unused]] GParamSpec *pspec) +{ + /* + * Check whether the camera capabilities are already available. + * They might not be available if the pipeline has not started yet. + */ + if (!capabilities_.empty()) { + /* If so, check that the control is supported by the camera. */ + const ControlId *cid = capabilities_.idmap().at(propId); + auto info = capabilities_.find(cid); + + if (info == capabilities_.end()) { + GST_WARNING("Control '%s' is not supported by the " + "camera and will be ignored", + cid->name().c_str()); + return true; + } + } + + switch (propId) { +{%- for vendor, ctrls in controls %} +{%- for ctrl in ctrls %} + + case controls::{{ ctrl.namespace }}{{ ctrl.name|snake_case|upper }}: { + ControlValue control; +{%- if ctrl.is_array %} + size_t size = gst_value_array_get_size(value); +{%- if ctrl.size != 0 %} + if (size != {{ ctrl.size }}) { + GST_ERROR("Incorrect array size for control " + "'{{ ctrl.name|kebab_case }}', must be of " + "size {{ ctrl.size }}"); + return true; + } +{%- endif %} + + std::vector<{{ ctrl.element_type }}> values(size); + for (size_t i = 0; i < size; ++i) { + const GValue *element = + gst_value_array_get_value(value, i); +{%- if ctrl.is_rectangle %} + if (gst_value_array_get_size(element) != 4) { + GST_ERROR("Rectangle in control " + "'{{ ctrl.name|kebab_case }}' at" + "index %zu must be an array of size 4", + i); + return true; + } + values[i] = value_get_rectangle(element); +{%- else %} + values[i] = g_value_get_{{ ctrl.gtype }}(element); +{%- endif %} + } + +{%- if ctrl.size == 0 %} + control.set(Span<const {{ ctrl.element_type }}>(values.data(), + size)); +{%- else %} + control.set(Span<const {{ ctrl.element_type }}, + {{ ctrl.size }}>(values.data(), + {{ ctrl.size }})); +{%- endif %} +{%- else %} +{%- if ctrl.is_rectangle %} + if (gst_value_array_get_size(value) != 4) { + GST_ERROR("Rectangle in control " + "'{{ ctrl.name|kebab_case }}' must be an " + "array of size 4"); + return true; + } + Rectangle val = value_get_rectangle(value); +{%- else %} + auto val = g_value_get_{{ ctrl.gtype }}(value); +{%- endif %} + control.set(val); +{%- endif %} + controls_.set(propId, control); + controls_acc_.set(propId, control); + return true; + } +{%- endfor %} +{%- endfor %} + + default: + return false; + } +} + +void GstCameraControls::setCamera(const std::shared_ptr<libcamera::Camera> &cam) +{ + capabilities_ = cam->controls(); + + /* + * Check the controls which were set before the camera capabilities were + * known. This is required because GStreamer may set properties before + * the pipeline has started and thus before the camera was known. + */ + ControlList new_controls; + for (auto control = controls_acc_.begin(); + control != controls_acc_.end(); + ++control) { + unsigned int id = control->first; + ControlValue value = control->second; + + const ControlId *cid = capabilities_.idmap().at(id); + auto info = capabilities_.find(cid); + + /* Only add controls which are supported. */ + if (info != capabilities_.end()) + new_controls.set(id, value); + else + GST_WARNING("Control '%s' is not supported by the " + "camera and will be ignored", + cid->name().c_str()); + } + + controls_acc_ = new_controls; + controls_ = new_controls; +} + +void GstCameraControls::applyControls(std::unique_ptr<libcamera::Request> &request) +{ + request->controls().merge(controls_); + controls_.clear(); +} + +void GstCameraControls::readMetadata(libcamera::Request *request) +{ + controls_acc_.merge(request->metadata(), + ControlList::MergePolicy::OverwriteExisting); +} diff --git a/src/gstreamer/gstlibcamera-controls.h b/src/gstreamer/gstlibcamera-controls.h new file mode 100644 index 00000000..749220b5 --- /dev/null +++ b/src/gstreamer/gstlibcamera-controls.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2023, Collabora Ltd. + * Author: Nicolas Dufresne <nicolas.dufresne@collabora.com> + * + * GStreamer Camera Controls + */ + +#pragma once + +#include <memory> + +#include <libcamera/camera.h> +#include <libcamera/controls.h> +#include <libcamera/request.h> + +#include "gstlibcamerasrc.h" + +namespace libcamera { + +class GstCameraControls +{ +public: + static void installProperties(GObjectClass *klass, int lastProp); + + bool getProperty(guint propId, GValue *value, GParamSpec *pspec); + bool setProperty(guint propId, const GValue *value, GParamSpec *pspec); + + void setCamera(const std::shared_ptr<libcamera::Camera> &cam); + + void applyControls(std::unique_ptr<libcamera::Request> &request); + void readMetadata(libcamera::Request *request); + +private: + /* Supported controls and limits of camera. */ + ControlInfoMap capabilities_; + /* Set of user modified controls. */ + ControlList controls_; + /* Accumulator of all controls ever set and metadata returned by camera */ + ControlList controls_acc_; +}; + +} /* namespace libcamera */ diff --git a/src/gstreamer/gstlibcamera-utils.cpp b/src/gstreamer/gstlibcamera-utils.cpp index ec4da435..a466b305 100644 --- a/src/gstreamer/gstlibcamera-utils.cpp +++ b/src/gstreamer/gstlibcamera-utils.cpp @@ -85,7 +85,7 @@ static struct { }; static GstVideoColorimetry -colorimetry_from_colorspace(const ColorSpace &colorSpace) +colorimetry_from_colorspace(const ColorSpace &colorSpace, GstVideoTransferFunction transfer) { GstVideoColorimetry colorimetry; @@ -113,6 +113,8 @@ colorimetry_from_colorspace(const ColorSpace &colorSpace) break; case ColorSpace::TransferFunction::Rec709: colorimetry.transfer = GST_VIDEO_TRANSFER_BT709; + if (transfer != GST_VIDEO_TRANSFER_UNKNOWN) + colorimetry.transfer = transfer; break; } @@ -144,7 +146,8 @@ colorimetry_from_colorspace(const ColorSpace &colorSpace) } static std::optional<ColorSpace> -colorspace_from_colorimetry(const GstVideoColorimetry &colorimetry) +colorspace_from_colorimetry(const GstVideoColorimetry &colorimetry, + GstVideoTransferFunction *transfer) { std::optional<ColorSpace> colorspace = ColorSpace::Raw; @@ -188,6 +191,7 @@ colorspace_from_colorimetry(const GstVideoColorimetry &colorimetry) case GST_VIDEO_TRANSFER_BT2020_12: case GST_VIDEO_TRANSFER_BT709: colorspace->transferFunction = ColorSpace::TransferFunction::Rec709; + *transfer = colorimetry.transfer; break; default: GST_WARNING("Colorimetry transfer function %d not mapped in gstlibcamera", @@ -254,52 +258,50 @@ gst_format_to_pixel_format(GstVideoFormat gst_format) return PixelFormat{}; } +static const struct { + PixelFormat format; + const gchar *name; +} bayer_map[]{ + { formats::SBGGR8, "bggr" }, + { formats::SGBRG8, "gbrg" }, + { formats::SGRBG8, "grbg" }, + { formats::SRGGB8, "rggb" }, + { formats::SBGGR10, "bggr10le" }, + { formats::SGBRG10, "gbrg10le" }, + { formats::SGRBG10, "grbg10le" }, + { formats::SRGGB10, "rggb10le" }, + { formats::SBGGR12, "bggr12le" }, + { formats::SGBRG12, "gbrg12le" }, + { formats::SGRBG12, "grbg12le" }, + { formats::SRGGB12, "rggb12le" }, + { formats::SBGGR14, "bggr14le" }, + { formats::SGBRG14, "gbrg14le" }, + { formats::SGRBG14, "grbg14le" }, + { formats::SRGGB14, "rggb14le" }, + { formats::SBGGR16, "bggr16le" }, + { formats::SGBRG16, "gbrg16le" }, + { formats::SGRBG16, "grbg16le" }, + { formats::SRGGB16, "rggb16le" }, +}; + static const gchar * -bayer_format_to_string(int format) +bayer_format_to_string(PixelFormat format) { - switch (format) { - case formats::SBGGR8: - return "bggr"; - case formats::SGBRG8: - return "gbrg"; - case formats::SGRBG8: - return "grbg"; - case formats::SRGGB8: - return "rggb"; - case formats::SBGGR10: - return "bggr10le"; - case formats::SGBRG10: - return "gbrg10le"; - case formats::SGRBG10: - return "grbg10le"; - case formats::SRGGB10: - return "rggb10le"; - case formats::SBGGR12: - return "bggr12le"; - case formats::SGBRG12: - return "gbrg12le"; - case formats::SGRBG12: - return "grbg12le"; - case formats::SRGGB12: - return "rggb12le"; - case formats::SBGGR14: - return "bggr14le"; - case formats::SGBRG14: - return "gbrg14le"; - case formats::SGRBG14: - return "grbg14le"; - case formats::SRGGB14: - return "rggb14le"; - case formats::SBGGR16: - return "bggr16le"; - case formats::SGBRG16: - return "gbrg16le"; - case formats::SGRBG16: - return "grbg16le"; - case formats::SRGGB16: - return "rggb16le"; + for (auto &b : bayer_map) { + if (b.format == format) + return b.name; + } + return nullptr; +} + +static PixelFormat +bayer_format_from_string(const gchar *name) +{ + for (auto &b : bayer_map) { + if (strcmp(b.name, name) == 0) + return b.format; } - return NULL; + return PixelFormat{}; } static GstStructure * @@ -359,13 +361,21 @@ gst_libcamera_stream_formats_to_caps(const StreamFormats &formats) GValue val = G_VALUE_INIT; g_value_init(&val, GST_TYPE_INT_RANGE); - gst_value_set_int_range_step(&val, range.min.width, range.max.width, range.hStep); - gst_structure_set_value(s, "width", &val); - gst_value_set_int_range_step(&val, range.min.height, range.max.height, range.vStep); - gst_structure_set_value(s, "height", &val); + if (range.min.width == range.max.width) { + gst_structure_set(s, "width", G_TYPE_INT, range.min.width, nullptr); + } else { + gst_value_set_int_range_step(&val, range.min.width, range.max.width, range.hStep); + gst_structure_set_value(s, "width", &val); + } + if (range.min.height == range.max.height) { + gst_structure_set(s, "height", G_TYPE_INT, range.min.height, nullptr); + } else { + gst_value_set_int_range_step(&val, range.min.height, range.max.height, range.vStep); + gst_structure_set_value(s, "height", &val); + } g_value_unset(&val); - gst_caps_append_structure(caps, s); + caps = gst_caps_merge_structure(caps, s); } } @@ -373,7 +383,8 @@ gst_libcamera_stream_formats_to_caps(const StreamFormats &formats) } GstCaps * -gst_libcamera_stream_configuration_to_caps(const StreamConfiguration &stream_cfg) +gst_libcamera_stream_configuration_to_caps(const StreamConfiguration &stream_cfg, + GstVideoTransferFunction transfer) { GstCaps *caps = gst_caps_new_empty(); GstStructure *s = bare_structure_from_format(stream_cfg.pixelFormat); @@ -384,7 +395,7 @@ gst_libcamera_stream_configuration_to_caps(const StreamConfiguration &stream_cfg nullptr); if (stream_cfg.colorSpace) { - GstVideoColorimetry colorimetry = colorimetry_from_colorspace(stream_cfg.colorSpace.value()); + GstVideoColorimetry colorimetry = colorimetry_from_colorspace(stream_cfg.colorSpace.value(), transfer); g_autofree gchar *colorimetry_str = gst_video_colorimetry_to_string(&colorimetry); if (colorimetry_str) @@ -399,9 +410,8 @@ gst_libcamera_stream_configuration_to_caps(const StreamConfiguration &stream_cfg return caps; } -void -gst_libcamera_configure_stream_from_caps(StreamConfiguration &stream_cfg, - GstCaps *caps) +void gst_libcamera_configure_stream_from_caps(StreamConfiguration &stream_cfg, + GstCaps *caps, GstVideoTransferFunction *transfer) { GstVideoFormat gst_format = pixel_format_to_gst_format(stream_cfg.pixelFormat); guint i; @@ -466,6 +476,9 @@ gst_libcamera_configure_stream_from_caps(StreamConfiguration &stream_cfg, const gchar *format = gst_structure_get_string(s, "format"); gst_format = gst_video_format_from_string(format); stream_cfg.pixelFormat = gst_format_to_pixel_format(gst_format); + } else if (gst_structure_has_name(s, "video/x-bayer")) { + const gchar *format = gst_structure_get_string(s, "format"); + stream_cfg.pixelFormat = bayer_format_from_string(format); } else if (gst_structure_has_name(s, "image/jpeg")) { stream_cfg.pixelFormat = formats::MJPEG; } else { @@ -486,7 +499,7 @@ gst_libcamera_configure_stream_from_caps(StreamConfiguration &stream_cfg, if (!gst_video_colorimetry_from_string(&colorimetry, colorimetry_str)) g_critical("Invalid colorimetry %s", colorimetry_str); - stream_cfg.colorSpace = colorspace_from_colorimetry(colorimetry); + stream_cfg.colorSpace = colorspace_from_colorimetry(colorimetry, transfer); } } diff --git a/src/gstreamer/gstlibcamera-utils.h b/src/gstreamer/gstlibcamera-utils.h index cab1c814..4978987c 100644 --- a/src/gstreamer/gstlibcamera-utils.h +++ b/src/gstreamer/gstlibcamera-utils.h @@ -16,9 +16,10 @@ #include <gst/video/video.h> GstCaps *gst_libcamera_stream_formats_to_caps(const libcamera::StreamFormats &formats); -GstCaps *gst_libcamera_stream_configuration_to_caps(const libcamera::StreamConfiguration &stream_cfg); +GstCaps *gst_libcamera_stream_configuration_to_caps(const libcamera::StreamConfiguration &stream_cfg, + GstVideoTransferFunction transfer); void gst_libcamera_configure_stream_from_caps(libcamera::StreamConfiguration &stream_cfg, - GstCaps *caps); + GstCaps *caps, GstVideoTransferFunction *transfer); void gst_libcamera_get_framerate_from_caps(GstCaps *caps, GstStructure *element_caps); void gst_libcamera_clamp_and_set_frameduration(libcamera::ControlList &controls, const libcamera::ControlInfoMap &camera_controls, diff --git a/src/gstreamer/gstlibcameraallocator.cpp b/src/gstreamer/gstlibcameraallocator.cpp index 741ed592..d4492d99 100644 --- a/src/gstreamer/gstlibcameraallocator.cpp +++ b/src/gstreamer/gstlibcameraallocator.cpp @@ -8,6 +8,8 @@ #include "gstlibcameraallocator.h" +#include <utility> + #include <libcamera/camera.h> #include <libcamera/framebuffer_allocator.h> #include <libcamera/stream.h> @@ -100,6 +102,11 @@ struct _GstLibcameraAllocator { * FrameWrap. */ GHashTable *pools; + /* + * The camera manager represents the library, which needs to be kept + * alive until all the memory has been released. + */ + std::shared_ptr<CameraManager> *cm_ptr; }; G_DEFINE_TYPE(GstLibcameraAllocator, gst_libcamera_allocator, @@ -173,6 +180,9 @@ gst_libcamera_allocator_finalize(GObject *object) delete self->fb_allocator; + /* Keep last. */ + delete self->cm_ptr; + G_OBJECT_CLASS(gst_libcamera_allocator_parent_class)->finalize(object); } @@ -191,16 +201,20 @@ GstLibcameraAllocator * gst_libcamera_allocator_new(std::shared_ptr<Camera> camera, CameraConfiguration *config_) { - auto *self = GST_LIBCAMERA_ALLOCATOR(g_object_new(GST_TYPE_LIBCAMERA_ALLOCATOR, - nullptr)); + g_autoptr(GstLibcameraAllocator) self = GST_LIBCAMERA_ALLOCATOR(g_object_new(GST_TYPE_LIBCAMERA_ALLOCATOR, + nullptr)); + gint ret; + + self->cm_ptr = new std::shared_ptr<CameraManager>(gst_libcamera_get_camera_manager(ret)); + if (ret) + return nullptr; self->fb_allocator = new FrameBufferAllocator(camera); for (StreamConfiguration &streamCfg : *config_) { Stream *stream = streamCfg.stream(); - gint ret; ret = self->fb_allocator->allocate(stream); - if (ret == 0) + if (ret <= 0) return nullptr; GQueue *pool = g_queue_new(); @@ -214,7 +228,7 @@ gst_libcamera_allocator_new(std::shared_ptr<Camera> camera, g_hash_table_insert(self->pools, stream, pool); } - return self; + return std::exchange(self, nullptr); } bool diff --git a/src/gstreamer/gstlibcamerapool.cpp b/src/gstreamer/gstlibcamerapool.cpp index 9661c67a..9cd7eccb 100644 --- a/src/gstreamer/gstlibcamerapool.cpp +++ b/src/gstreamer/gstlibcamerapool.cpp @@ -8,6 +8,8 @@ #include "gstlibcamerapool.h" +#include <deque> + #include <libcamera/stream.h> #include "gstlibcamera-utils.h" @@ -24,24 +26,41 @@ static guint signals[N_SIGNALS]; struct _GstLibcameraPool { GstBufferPool parent; - GstAtomicQueue *queue; + std::deque<GstBuffer *> *queue; GstLibcameraAllocator *allocator; Stream *stream; }; G_DEFINE_TYPE(GstLibcameraPool, gst_libcamera_pool, GST_TYPE_BUFFER_POOL) +static GstBuffer * +gst_libcamera_pool_pop_buffer(GstLibcameraPool *self) +{ + GLibLocker lock(GST_OBJECT(self)); + GstBuffer *buf; + + if (self->queue->empty()) + return nullptr; + + buf = self->queue->front(); + self->queue->pop_front(); + + return buf; +} + static GstFlowReturn gst_libcamera_pool_acquire_buffer(GstBufferPool *pool, GstBuffer **buffer, [[maybe_unused]] GstBufferPoolAcquireParams *params) { GstLibcameraPool *self = GST_LIBCAMERA_POOL(pool); - GstBuffer *buf = GST_BUFFER(gst_atomic_queue_pop(self->queue)); + GstBuffer *buf = gst_libcamera_pool_pop_buffer(self); + if (!buf) return GST_FLOW_ERROR; if (!gst_libcamera_allocator_prepare_buffer(self->allocator, self->stream, buf)) { - gst_atomic_queue_push(self->queue, buf); + GLibLocker lock(GST_OBJECT(self)); + self->queue->push_back(buf); return GST_FLOW_ERROR; } @@ -64,9 +83,13 @@ static void gst_libcamera_pool_release_buffer(GstBufferPool *pool, GstBuffer *buffer) { GstLibcameraPool *self = GST_LIBCAMERA_POOL(pool); - bool do_notify = gst_atomic_queue_length(self->queue) == 0; + bool do_notify; - gst_atomic_queue_push(self->queue, buffer); + { + GLibLocker lock(GST_OBJECT(self)); + do_notify = self->queue->empty(); + self->queue->push_back(buffer); + } if (do_notify) g_signal_emit(self, signals[SIGNAL_BUFFER_NOTIFY], 0); @@ -75,7 +98,7 @@ gst_libcamera_pool_release_buffer(GstBufferPool *pool, GstBuffer *buffer) static void gst_libcamera_pool_init(GstLibcameraPool *self) { - self->queue = gst_atomic_queue_new(4); + self->queue = new std::deque<GstBuffer *>(); } static void @@ -84,10 +107,10 @@ gst_libcamera_pool_finalize(GObject *object) GstLibcameraPool *self = GST_LIBCAMERA_POOL(object); GstBuffer *buf; - while ((buf = GST_BUFFER(gst_atomic_queue_pop(self->queue)))) + while ((buf = gst_libcamera_pool_pop_buffer(self))) gst_buffer_unref(buf); - gst_atomic_queue_unref(self->queue); + delete self->queue; g_object_unref(self->allocator); G_OBJECT_CLASS(gst_libcamera_pool_parent_class)->finalize(object); @@ -122,7 +145,7 @@ gst_libcamera_pool_new(GstLibcameraAllocator *allocator, Stream *stream) gsize pool_size = gst_libcamera_allocator_get_pool_size(allocator, stream); for (gsize i = 0; i < pool_size; i++) { GstBuffer *buffer = gst_buffer_new(); - gst_atomic_queue_push(pool->queue, buffer); + pool->queue->push_back(buffer); } return pool; diff --git a/src/gstreamer/gstlibcameraprovider.cpp b/src/gstreamer/gstlibcameraprovider.cpp index 4fb1b007..5da96ea3 100644 --- a/src/gstreamer/gstlibcameraprovider.cpp +++ b/src/gstreamer/gstlibcameraprovider.cpp @@ -33,7 +33,6 @@ GST_DEBUG_CATEGORY_STATIC(provider_debug); enum { PROP_DEVICE_NAME = 1, - PROP_AUTO_FOCUS_MODE = 2, }; #define GST_TYPE_LIBCAMERA_DEVICE gst_libcamera_device_get_type() @@ -43,7 +42,6 @@ G_DECLARE_FINAL_TYPE(GstLibcameraDevice, gst_libcamera_device, struct _GstLibcameraDevice { GstDevice parent; gchar *name; - controls::AfModeEnum auto_focus_mode = controls::AfModeManual; }; G_DEFINE_TYPE(GstLibcameraDevice, gst_libcamera_device, GST_TYPE_DEVICE) @@ -60,7 +58,6 @@ gst_libcamera_device_create_element(GstDevice *device, const gchar *name) g_assert(source); g_object_set(source, "camera-name", GST_LIBCAMERA_DEVICE(device)->name, nullptr); - g_object_set(source, "auto-focus-mode", GST_LIBCAMERA_DEVICE(device)->auto_focus_mode, nullptr); return source; } @@ -87,9 +84,6 @@ gst_libcamera_device_set_property(GObject *object, guint prop_id, case PROP_DEVICE_NAME: device->name = g_value_dup_string(value); break; - case PROP_AUTO_FOCUS_MODE: - device->auto_focus_mode = static_cast<controls::AfModeEnum>(g_value_get_enum(value)); - break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec); break; @@ -129,15 +123,6 @@ gst_libcamera_device_class_init(GstLibcameraDeviceClass *klass) (GParamFlags)(G_PARAM_STATIC_STRINGS | G_PARAM_WRITABLE | G_PARAM_CONSTRUCT_ONLY)); g_object_class_install_property(object_class, PROP_DEVICE_NAME, pspec); - - pspec = g_param_spec_enum("auto-focus-mode", - "Set auto-focus mode", - "Available options: AfModeManual, " - "AfModeAuto or AfModeContinuous.", - gst_libcamera_auto_focus_get_type(), - static_cast<gint>(controls::AfModeManual), - G_PARAM_WRITABLE); - g_object_class_install_property(object_class, PROP_AUTO_FOCUS_MODE, pspec); } static GstDevice * diff --git a/src/gstreamer/gstlibcamerasrc.cpp b/src/gstreamer/gstlibcamerasrc.cpp index 9680d809..5e9e843d 100644 --- a/src/gstreamer/gstlibcamerasrc.cpp +++ b/src/gstreamer/gstlibcamerasrc.cpp @@ -37,10 +37,11 @@ #include <gst/base/base.h> +#include "gstlibcamera-controls.h" +#include "gstlibcamera-utils.h" #include "gstlibcameraallocator.h" #include "gstlibcamerapad.h" #include "gstlibcamerapool.h" -#include "gstlibcamera-utils.h" using namespace libcamera; @@ -128,6 +129,7 @@ struct GstLibcameraSrcState { ControlList initControls_; guint group_id_; + GstCameraControls controls_; int queueRequest(); void requestCompleted(Request *request); @@ -142,7 +144,6 @@ struct _GstLibcameraSrc { GstTask *task; gchar *camera_name; - controls::AfModeEnum auto_focus_mode = controls::AfModeManual; std::atomic<GstEvent *> pending_eos; @@ -154,10 +155,15 @@ struct _GstLibcameraSrc { enum { PROP_0, PROP_CAMERA_NAME, - PROP_AUTO_FOCUS_MODE, + PROP_LAST }; +static void gst_libcamera_src_child_proxy_init(gpointer g_iface, + gpointer iface_data); + G_DEFINE_TYPE_WITH_CODE(GstLibcameraSrc, gst_libcamera_src, GST_TYPE_ELEMENT, + G_IMPLEMENT_INTERFACE(GST_TYPE_CHILD_PROXY, + gst_libcamera_src_child_proxy_init) GST_DEBUG_CATEGORY_INIT(source_debug, "libcamerasrc", 0, "libcamera Source")) @@ -180,6 +186,9 @@ int GstLibcameraSrcState::queueRequest() if (!request) return -ENOMEM; + /* Apply controls */ + controls_.applyControls(request); + std::unique_ptr<RequestWrap> wrap = std::make_unique<RequestWrap>(std::move(request)); @@ -223,6 +232,9 @@ GstLibcameraSrcState::requestCompleted(Request *request) { GLibLocker locker(&lock_); + + controls_.readMetadata(request); + wrap = std::move(queuedRequests_.front()); queuedRequests_.pop(); } @@ -405,6 +417,8 @@ gst_libcamera_src_open(GstLibcameraSrc *self) return false; } + self->state->controls_.setCamera(cam); + cam->requestCompleted.connect(self->state, &GstLibcameraSrcState::requestCompleted); /* No need to lock here, we didn't start our threads yet. */ @@ -419,6 +433,8 @@ static bool gst_libcamera_src_negotiate(GstLibcameraSrc *self) { GstLibcameraSrcState *state = self->state; + std::vector<GstVideoTransferFunction> transfer(state->srcpads_.size(), + GST_VIDEO_TRANSFER_UNKNOWN); g_autoptr(GstStructure) element_caps = gst_structure_new_empty("caps"); @@ -434,7 +450,7 @@ gst_libcamera_src_negotiate(GstLibcameraSrc *self) /* Fixate caps and configure the stream. */ caps = gst_caps_make_writable(caps); - gst_libcamera_configure_stream_from_caps(stream_cfg, caps); + gst_libcamera_configure_stream_from_caps(stream_cfg, caps, &transfer[i]); gst_libcamera_get_framerate_from_caps(caps, element_caps); } @@ -462,7 +478,7 @@ gst_libcamera_src_negotiate(GstLibcameraSrc *self) GstPad *srcpad = state->srcpads_[i]; const StreamConfiguration &stream_cfg = state->config_->at(i); - g_autoptr(GstCaps) caps = gst_libcamera_stream_configuration_to_caps(stream_cfg); + g_autoptr(GstCaps) caps = gst_libcamera_stream_configuration_to_caps(stream_cfg, transfer[i]); gst_libcamera_framerate_to_caps(caps, element_caps); if (!gst_pad_push_event(srcpad, gst_event_new_caps(caps))) @@ -658,18 +674,6 @@ gst_libcamera_src_task_enter(GstTask *task, [[maybe_unused]] GThread *thread, gst_pad_push_event(srcpad, gst_event_new_segment(&segment)); } - if (self->auto_focus_mode != controls::AfModeManual) { - const ControlInfoMap &infoMap = state->cam_->controls(); - if (infoMap.find(&controls::AfMode) != infoMap.end()) { - state->initControls_.set(controls::AfMode, self->auto_focus_mode); - } else { - GST_ELEMENT_ERROR(self, RESOURCE, SETTINGS, - ("Failed to enable auto focus"), - ("AfMode not supported by this camera, " - "please retry with 'auto-focus-mode=AfModeManual'")); - } - } - ret = state->cam_->start(&state->initControls_); if (ret) { GST_ELEMENT_ERROR(self, RESOURCE, SETTINGS, @@ -731,17 +735,16 @@ gst_libcamera_src_set_property(GObject *object, guint prop_id, { GLibLocker lock(GST_OBJECT(object)); GstLibcameraSrc *self = GST_LIBCAMERA_SRC(object); + GstLibcameraSrcState *state = self->state; switch (prop_id) { case PROP_CAMERA_NAME: g_free(self->camera_name); self->camera_name = g_value_dup_string(value); break; - case PROP_AUTO_FOCUS_MODE: - self->auto_focus_mode = static_cast<controls::AfModeEnum>(g_value_get_enum(value)); - break; default: - G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec); + if (!state->controls_.setProperty(prop_id - PROP_LAST, value, pspec)) + G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec); break; } } @@ -752,16 +755,15 @@ gst_libcamera_src_get_property(GObject *object, guint prop_id, GValue *value, { GLibLocker lock(GST_OBJECT(object)); GstLibcameraSrc *self = GST_LIBCAMERA_SRC(object); + GstLibcameraSrcState *state = self->state; switch (prop_id) { case PROP_CAMERA_NAME: g_value_set_string(value, self->camera_name); break; - case PROP_AUTO_FOCUS_MODE: - g_value_set_enum(value, static_cast<gint>(self->auto_focus_mode)); - break; default: - G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec); + if (!state->controls_.getProperty(prop_id - PROP_LAST, value, pspec)) + G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec); break; } } @@ -865,8 +867,10 @@ gst_libcamera_src_init(GstLibcameraSrc *self) g_mutex_init(&state->lock_); - state->srcpads_.push_back(gst_pad_new_from_template(templ, "src")); - gst_element_add_pad(GST_ELEMENT(self), state->srcpads_.back()); + GstPad *pad = gst_pad_new_from_template(templ, "src"); + state->srcpads_.push_back(pad); + gst_element_add_pad(GST_ELEMENT(self), pad); + gst_child_proxy_child_added(GST_CHILD_PROXY(self), G_OBJECT(pad), GST_OBJECT_NAME(pad)); GST_OBJECT_FLAG_SET(self, GST_ELEMENT_FLAG_SOURCE); @@ -897,6 +901,8 @@ gst_libcamera_src_request_new_pad(GstElement *element, GstPadTemplate *templ, return NULL; } + gst_child_proxy_child_added(GST_CHILD_PROXY(self), G_OBJECT(pad), GST_OBJECT_NAME(pad)); + return reinterpret_cast<GstPad *>(g_steal_pointer(&pad)); } @@ -905,6 +911,8 @@ gst_libcamera_src_release_pad(GstElement *element, GstPad *pad) { GstLibcameraSrc *self = GST_LIBCAMERA_SRC(element); + gst_child_proxy_child_removed(GST_CHILD_PROXY(self), G_OBJECT(pad), GST_OBJECT_NAME(pad)); + GST_DEBUG_OBJECT(self, "Pad %" GST_PTR_FORMAT " being released", pad); { @@ -940,7 +948,7 @@ gst_libcamera_src_class_init(GstLibcameraSrcClass *klass) gst_element_class_set_metadata(element_class, "libcamera Source", "Source/Video", "Linux Camera source using libcamera", - "Nicolas Dufresne <nicolas.dufresne@collabora.com"); + "Nicolas Dufresne <nicolas.dufresne@collabora.com>"); gst_element_class_add_static_pad_template_with_gtype(element_class, &src_template, GST_TYPE_LIBCAMERA_PAD); @@ -956,12 +964,35 @@ gst_libcamera_src_class_init(GstLibcameraSrcClass *klass) | G_PARAM_STATIC_STRINGS)); g_object_class_install_property(object_class, PROP_CAMERA_NAME, spec); - spec = g_param_spec_enum("auto-focus-mode", - "Set auto-focus mode", - "Available options: AfModeManual, " - "AfModeAuto or AfModeContinuous.", - gst_libcamera_auto_focus_get_type(), - static_cast<gint>(controls::AfModeManual), - G_PARAM_WRITABLE); - g_object_class_install_property(object_class, PROP_AUTO_FOCUS_MODE, spec); + GstCameraControls::installProperties(object_class, PROP_LAST); +} + +/* GstChildProxy implementation */ +static GObject * +gst_libcamera_src_child_proxy_get_child_by_index(GstChildProxy *child_proxy, + guint index) +{ + GLibLocker lock(GST_OBJECT(child_proxy)); + GObject *obj = nullptr; + + obj = reinterpret_cast<GObject *>(g_list_nth_data(GST_ELEMENT(child_proxy)->srcpads, index)); + if (obj) + gst_object_ref(obj); + + return obj; +} + +static guint +gst_libcamera_src_child_proxy_get_children_count(GstChildProxy *child_proxy) +{ + GLibLocker lock(GST_OBJECT(child_proxy)); + return GST_ELEMENT_CAST(child_proxy)->numsrcpads; +} + +static void +gst_libcamera_src_child_proxy_init(gpointer g_iface, [[maybe_unused]] gpointer iface_data) +{ + GstChildProxyInterface *iface = reinterpret_cast<GstChildProxyInterface *>(g_iface); + iface->get_child_by_index = gst_libcamera_src_child_proxy_get_child_by_index; + iface->get_children_count = gst_libcamera_src_child_proxy_get_children_count; } diff --git a/src/gstreamer/gstlibcamerasrc.h b/src/gstreamer/gstlibcamerasrc.h index fd1f8193..a27db9ca 100644 --- a/src/gstreamer/gstlibcamerasrc.h +++ b/src/gstreamer/gstlibcamerasrc.h @@ -8,8 +8,6 @@ #pragma once -#include <libcamera/control_ids.h> - #include <gst/gst.h> G_BEGIN_DECLS @@ -19,32 +17,3 @@ G_DECLARE_FINAL_TYPE(GstLibcameraSrc, gst_libcamera_src, GST_LIBCAMERA, SRC, GstElement) G_END_DECLS - -inline GType -gst_libcamera_auto_focus_get_type() -{ - static GType type = 0; - static const GEnumValue values[] = { - { - static_cast<gint>(libcamera::controls::AfModeManual), - "AfModeManual", - "manual-focus", - }, - { - static_cast<gint>(libcamera::controls::AfModeAuto), - "AfModeAuto", - "automatic-auto-focus", - }, - { - static_cast<gint>(libcamera::controls::AfModeContinuous), - "AfModeContinuous", - "continuous-auto-focus", - }, - { 0, NULL, NULL } - }; - - if (!type) - type = g_enum_register_static("GstLibcameraAutoFocus", values); - - return type; -} diff --git a/src/gstreamer/meson.build b/src/gstreamer/meson.build index c2a01e7b..6b7e53b5 100644 --- a/src/gstreamer/meson.build +++ b/src/gstreamer/meson.build @@ -25,6 +25,16 @@ libcamera_gst_sources = [ 'gstlibcamerasrc.cpp', ] +# Generate gstreamer control properties + +gen_gst_controls_template = files('gstlibcamera-controls.cpp.in') +libcamera_gst_sources += custom_target('gstlibcamera-controls.cpp', + input : controls_files, + output : 'gstlibcamera-controls.cpp', + command : [gen_gst_controls, '-o', '@OUTPUT@', + '-t', gen_gst_controls_template, '@INPUT@'], + env : py_build_env) + libcamera_gst_cpp_args = [ '-DVERSION="@0@"'.format(libcamera_git_version), '-DPACKAGE="@0@"'.format(meson.project_name()), diff --git a/src/ipa/ipu3/algorithms/af.cpp b/src/ipa/ipu3/algorithms/af.cpp index 29eb7355..cf68fb59 100644 --- a/src/ipa/ipu3/algorithms/af.cpp +++ b/src/ipa/ipu3/algorithms/af.cpp @@ -11,7 +11,6 @@ #include <chrono> #include <cmath> #include <fcntl.h> -#include <numeric> #include <sys/ioctl.h> #include <sys/stat.h> #include <sys/types.h> @@ -23,8 +22,6 @@ #include <libcamera/ipa/core_ipa_interface.h> -#include "libipa/histogram.h" - /** * \file af.h */ diff --git a/src/ipa/ipu3/algorithms/agc.cpp b/src/ipa/ipu3/algorithms/agc.cpp index 0e0114f6..39d0aebb 100644 --- a/src/ipa/ipu3/algorithms/agc.cpp +++ b/src/ipa/ipu3/algorithms/agc.cpp @@ -9,14 +9,15 @@ #include <algorithm> #include <chrono> -#include <cmath> #include <libcamera/base/log.h> #include <libcamera/base/utils.h> #include <libcamera/control_ids.h> + #include <libcamera/ipa/core_ipa_interface.h> +#include "libipa/colours.h" #include "libipa/histogram.h" /** @@ -33,7 +34,7 @@ namespace ipa::ipu3::algorithms { * \class Agc * \brief A mean-based auto-exposure algorithm * - * This algorithm calculates a shutter time and an analogue gain so that the + * This algorithm calculates an exposure time and an analogue gain so that the * average value of the green channel of the brightest 2% of pixels approaches * 0.5. The AWB gains are not used here, and all cells in the grid have the same * weight, like an average-metering case. In this metering mode, the camera uses @@ -51,13 +52,13 @@ LOG_DEFINE_CATEGORY(IPU3Agc) static constexpr double kMinAnalogueGain = 1.0; /* \todo Honour the FrameDurationLimits control instead of hardcoding a limit */ -static constexpr utils::Duration kMaxShutterSpeed = 60ms; +static constexpr utils::Duration kMaxExposureTime = 60ms; /* Histogram constants */ static constexpr uint32_t knumHistogramBins = 256; Agc::Agc() - : minShutterSpeed_(0s), maxShutterSpeed_(0s) + : minExposureTime_(0s), maxExposureTime_(0s) { } @@ -100,9 +101,9 @@ int Agc::configure(IPAContext &context, stride_ = configuration.grid.stride; bdsGrid_ = configuration.grid.bdsGrid; - minShutterSpeed_ = configuration.agc.minShutterSpeed; - maxShutterSpeed_ = std::min(configuration.agc.maxShutterSpeed, - kMaxShutterSpeed); + minExposureTime_ = configuration.agc.minExposureTime; + maxExposureTime_ = std::min(configuration.agc.maxExposureTime, + kMaxExposureTime); minAnalogueGain_ = std::max(configuration.agc.minAnalogueGain, kMinAnalogueGain); maxAnalogueGain_ = configuration.agc.maxAnalogueGain; @@ -115,7 +116,7 @@ int Agc::configure(IPAContext &context, context.activeState.agc.exposureMode = exposureModeHelpers().begin()->first; /* \todo Run this again when FrameDurationLimits is passed in */ - setLimits(minShutterSpeed_, maxShutterSpeed_, minAnalogueGain_, + setLimits(minExposureTime_, maxExposureTime_, minAnalogueGain_, maxAnalogueGain_); resetFrameCount(); @@ -177,18 +178,16 @@ Histogram Agc::parseStatistics(const ipu3_uapi_stats_3a *stats, */ double Agc::estimateLuminance(double gain) const { - double redSum = 0, greenSum = 0, blueSum = 0; + RGB<double> sum{ 0.0 }; for (unsigned int i = 0; i < rgbTriples_.size(); i++) { - redSum += std::min(std::get<0>(rgbTriples_[i]) * gain, 255.0); - greenSum += std::min(std::get<1>(rgbTriples_[i]) * gain, 255.0); - blueSum += std::min(std::get<2>(rgbTriples_[i]) * gain, 255.0); + sum.r() += std::min(std::get<0>(rgbTriples_[i]) * gain, 255.0); + sum.g() += std::min(std::get<1>(rgbTriples_[i]) * gain, 255.0); + sum.b() += std::min(std::get<2>(rgbTriples_[i]) * gain, 255.0); } - double ySum = redSum * rGain_ * 0.299 - + greenSum * gGain_ * 0.587 - + blueSum * bGain_ * 0.114; - + RGB<double> gains{{ rGain_, gGain_, bGain_ }}; + double ySum = rec601LuminanceFromRGB(sum * gains); return ySum / (bdsGrid_.height * bdsGrid_.width) / 255; } @@ -222,20 +221,20 @@ void Agc::process(IPAContext &context, [[maybe_unused]] const uint32_t frame, double analogueGain = frameContext.sensor.gain; utils::Duration effectiveExposureValue = exposureTime * analogueGain; - utils::Duration shutterTime; + utils::Duration newExposureTime; double aGain, dGain; - std::tie(shutterTime, aGain, dGain) = + std::tie(newExposureTime, aGain, dGain) = calculateNewEv(context.activeState.agc.constraintMode, context.activeState.agc.exposureMode, hist, effectiveExposureValue); LOG(IPU3Agc, Debug) - << "Divided up shutter, analogue gain and digital gain are " - << shutterTime << ", " << aGain << " and " << dGain; + << "Divided up exposure time, analogue gain and digital gain are " + << newExposureTime << ", " << aGain << " and " << dGain; IPAActiveState &activeState = context.activeState; - /* Update the estimated exposure and gain. */ - activeState.agc.exposure = shutterTime / context.configuration.sensor.lineDuration; + /* Update the estimated exposure time and gain. */ + activeState.agc.exposure = newExposureTime / context.configuration.sensor.lineDuration; activeState.agc.gain = aGain; metadata.set(controls::AnalogueGain, frameContext.sensor.gain); @@ -247,7 +246,6 @@ void Agc::process(IPAContext &context, [[maybe_unused]] const uint32_t frame, utils::Duration frameDuration = context.configuration.sensor.lineDuration * vTotal; metadata.set(controls::FrameDuration, frameDuration.get<std::micro>()); - } REGISTER_IPA_ALGORITHM(Agc, "Agc") diff --git a/src/ipa/ipu3/algorithms/agc.h b/src/ipa/ipu3/algorithms/agc.h index 411f4da0..890c271b 100644 --- a/src/ipa/ipu3/algorithms/agc.h +++ b/src/ipa/ipu3/algorithms/agc.h @@ -42,8 +42,8 @@ private: Histogram parseStatistics(const ipu3_uapi_stats_3a *stats, const ipu3_uapi_grid_config &grid); - utils::Duration minShutterSpeed_; - utils::Duration maxShutterSpeed_; + utils::Duration minExposureTime_; + utils::Duration maxExposureTime_; double minAnalogueGain_; double maxAnalogueGain_; diff --git a/src/ipa/ipu3/algorithms/awb.cpp b/src/ipa/ipu3/algorithms/awb.cpp index 4d6e3994..55de05d9 100644 --- a/src/ipa/ipu3/algorithms/awb.cpp +++ b/src/ipa/ipu3/algorithms/awb.cpp @@ -13,6 +13,8 @@ #include <libcamera/control_ids.h> +#include "libipa/colours.h" + /** * \file awb.h */ @@ -301,51 +303,24 @@ void Awb::prepare(IPAContext &context, params->use.acc_ccm = 1; } -/** - * The function estimates the correlated color temperature using - * from RGB color space input. - * In physics and color science, the Planckian locus or black body locus is - * the path or locus that the color of an incandescent black body would take - * in a particular chromaticity space as the blackbody temperature changes. - * - * If a narrow range of color temperatures is considered (those encapsulating - * daylight being the most practical case) one can approximate the Planckian - * locus in order to calculate the CCT in terms of chromaticity coordinates. - * - * More detailed information can be found in: - * https://en.wikipedia.org/wiki/Color_temperature#Approximation - */ -uint32_t Awb::estimateCCT(double red, double green, double blue) -{ - /* Convert the RGB values to CIE tristimulus values (XYZ) */ - double X = (-0.14282) * (red) + (1.54924) * (green) + (-0.95641) * (blue); - double Y = (-0.32466) * (red) + (1.57837) * (green) + (-0.73191) * (blue); - double Z = (-0.68202) * (red) + (0.77073) * (green) + (0.56332) * (blue); - - /* Calculate the normalized chromaticity values */ - double x = X / (X + Y + Z); - double y = Y / (X + Y + Z); - - /* Calculate CCT */ - double n = (x - 0.3320) / (0.1858 - y); - return 449 * n * n * n + 3525 * n * n + 6823.3 * n + 5520.33; -} - /* Generate an RGB vector with the average values for each zone */ void Awb::generateZones() { zones_.clear(); for (unsigned int i = 0; i < kAwbStatsSizeX * kAwbStatsSizeY; i++) { - RGB zone; double counted = awbStats_[i].counted; if (counted >= cellsPerZoneThreshold_) { - zone.G = awbStats_[i].sum.green / counted; - if (zone.G >= kMinGreenLevelInZone) { - zone.R = awbStats_[i].sum.red / counted; - zone.B = awbStats_[i].sum.blue / counted; + RGB<double> zone{{ + static_cast<double>(awbStats_[i].sum.red), + static_cast<double>(awbStats_[i].sum.green), + static_cast<double>(awbStats_[i].sum.blue) + }}; + + zone /= counted; + + if (zone.g() >= kMinGreenLevelInZone) zones_.push_back(zone); - } } } } @@ -412,32 +387,32 @@ void Awb::awbGreyWorld() * consider some variations, such as normalising all the zones first, or * doing an L2 average etc. */ - std::vector<RGB> &redDerivative(zones_); - std::vector<RGB> blueDerivative(redDerivative); + std::vector<RGB<double>> &redDerivative(zones_); + std::vector<RGB<double>> blueDerivative(redDerivative); std::sort(redDerivative.begin(), redDerivative.end(), - [](RGB const &a, RGB const &b) { - return a.G * b.R < b.G * a.R; + [](RGB<double> const &a, RGB<double> const &b) { + return a.g() * b.r() < b.g() * a.r(); }); std::sort(blueDerivative.begin(), blueDerivative.end(), - [](RGB const &a, RGB const &b) { - return a.G * b.B < b.G * a.B; + [](RGB<double> const &a, RGB<double> const &b) { + return a.g() * b.b() < b.g() * a.b(); }); /* Average the middle half of the values. */ int discard = redDerivative.size() / 4; - RGB sumRed(0, 0, 0); - RGB sumBlue(0, 0, 0); + RGB<double> sumRed{ 0.0 }; + RGB<double> sumBlue{ 0.0 }; for (auto ri = redDerivative.begin() + discard, bi = blueDerivative.begin() + discard; ri != redDerivative.end() - discard; ri++, bi++) sumRed += *ri, sumBlue += *bi; - double redGain = sumRed.G / (sumRed.R + 1), - blueGain = sumBlue.G / (sumBlue.B + 1); + double redGain = sumRed.g() / (sumRed.r() + 1), + blueGain = sumBlue.g() / (sumBlue.b() + 1); /* Color temperature is not relevant in Grey world but still useful to estimate it :-) */ - asyncResults_.temperatureK = estimateCCT(sumRed.R, sumRed.G, sumBlue.B); + asyncResults_.temperatureK = estimateCCT({{ sumRed.r(), sumRed.g(), sumBlue.b() }}); /* * Gain values are unsigned integer value ranging [0, 8) with 13 bit diff --git a/src/ipa/ipu3/algorithms/awb.h b/src/ipa/ipu3/algorithms/awb.h index c0202823..dbf69c90 100644 --- a/src/ipa/ipu3/algorithms/awb.h +++ b/src/ipa/ipu3/algorithms/awb.h @@ -13,6 +13,8 @@ #include <libcamera/geometry.h> +#include "libcamera/internal/vector.h" + #include "algorithm.h" namespace libcamera { @@ -48,20 +50,6 @@ public: ControlList &metadata) override; private: - /* \todo Make these structs available to all the ISPs ? */ - struct RGB { - RGB(double _R = 0, double _G = 0, double _B = 0) - : R(_R), G(_G), B(_B) - { - } - double R, G, B; - RGB &operator+=(RGB const &other) - { - R += other.R, G += other.G, B += other.B; - return *this; - } - }; - struct AwbStatus { double temperatureK; double redGain; @@ -75,11 +63,10 @@ private: void generateAwbStats(const ipu3_uapi_stats_3a *stats); void clearAwbStats(); void awbGreyWorld(); - uint32_t estimateCCT(double red, double green, double blue); static constexpr uint16_t threshold(float value); static constexpr uint16_t gainValue(double gain); - std::vector<RGB> zones_; + std::vector<RGB<double>> zones_; Accumulator awbStats_[kAwbStatsSizeX * kAwbStatsSizeY]; AwbStatus asyncResults_; diff --git a/src/ipa/ipu3/algorithms/blc.cpp b/src/ipa/ipu3/algorithms/blc.cpp index 257f40e2..35748fb2 100644 --- a/src/ipa/ipu3/algorithms/blc.cpp +++ b/src/ipa/ipu3/algorithms/blc.cpp @@ -7,8 +7,6 @@ #include "blc.h" -#include <string.h> - /** * \file blc.h * \brief IPU3 Black Level Correction control @@ -57,8 +55,8 @@ void BlackLevelCorrection::prepare([[maybe_unused]] IPAContext &context, * tuning processes. This is a first rough approximation. */ params->obgrid_param.gr = 64; - params->obgrid_param.r = 64; - params->obgrid_param.b = 64; + params->obgrid_param.r = 64; + params->obgrid_param.b = 64; params->obgrid_param.gb = 64; /* Enable the custom black level correction processing */ diff --git a/src/ipa/ipu3/ipa_context.cpp b/src/ipa/ipu3/ipa_context.cpp index 917d0654..3b22f791 100644 --- a/src/ipa/ipu3/ipa_context.cpp +++ b/src/ipa/ipu3/ipa_context.cpp @@ -39,6 +39,10 @@ namespace libcamera::ipa::ipu3 { * \struct IPAContext * \brief Global IPA context data shared between all algorithms * + * \fn IPAContext::IPAContext + * \brief Initialize the instance with the given number of frame contexts + * \param[in] frameContextSize Size of the frame context ring buffer + * * \var IPAContext::configuration * \brief The IPA session configuration, immutable during the session * @@ -92,11 +96,11 @@ namespace libcamera::ipa::ipu3 { * \var IPASessionConfiguration::agc * \brief AGC parameters configuration of the IPA * - * \var IPASessionConfiguration::agc.minShutterSpeed - * \brief Minimum shutter speed supported with the configured sensor + * \var IPASessionConfiguration::agc.minExposureTime + * \brief Minimum exposure time supported with the configured sensor * - * \var IPASessionConfiguration::agc.maxShutterSpeed - * \brief Maximum shutter speed supported with the configured sensor + * \var IPASessionConfiguration::agc.maxExposureTime + * \brief Maximum exposure time supported with the configured sensor * * \var IPASessionConfiguration::agc.minAnalogueGain * \brief Minimum analogue gain supported with the configured sensor diff --git a/src/ipa/ipu3/ipa_context.h b/src/ipa/ipu3/ipa_context.h index c85d1e34..97fcf06c 100644 --- a/src/ipa/ipu3/ipa_context.h +++ b/src/ipa/ipu3/ipa_context.h @@ -33,8 +33,8 @@ struct IPASessionConfiguration { } af; struct { - utils::Duration minShutterSpeed; - utils::Duration maxShutterSpeed; + utils::Duration minExposureTime; + utils::Duration maxExposureTime; double minAnalogueGain; double maxAnalogueGain; } agc; @@ -84,6 +84,11 @@ struct IPAFrameContext : public FrameContext { }; struct IPAContext { + IPAContext(unsigned int frameContextSize) + : frameContexts(frameContextSize) + { + } + IPASessionConfiguration configuration; IPAActiveState activeState; diff --git a/src/ipa/ipu3/ipu3-ipa-design-guide.rst b/src/ipa/ipu3/ipu3-ipa-design-guide.rst index 72506397..85d735c6 100644 --- a/src/ipa/ipu3/ipu3-ipa-design-guide.rst +++ b/src/ipa/ipu3/ipu3-ipa-design-guide.rst @@ -27,8 +27,8 @@ from applications, and managing events from the pipeline handler. └─┬───┬───┬──────┬────┬────┬────┬─┴────▼─┬──┘ 1: init() │ │ │ │ ▲ │ ▲ │ ▲ │ ▲ │ 2: configure() │1 │2 │3 │4│ │4│ │4│ │4│ │5 3: mapBuffers(), start() - │ │ │ │ │ │ │ │ │ │ │ │ 4: (▼) queueRequest(), fillParamsBuffer(), processStatsBuffer() - ▼ ▼ ▼ ▼ │ ▼ │ ▼ │ ▼ │ ▼ (▲) setSensorControls, paramsBufferReady, metadataReady Signals + │ │ │ │ │ │ │ │ │ │ │ │ 4: (▼) queueRequest(), computeParams(), processStats() + ▼ ▼ ▼ ▼ │ ▼ │ ▼ │ ▼ │ ▼ (▲) setSensorControls, paramsComputed, metadataReady Signals ┌──────────────────┴────┴────┴────┴─────────┐ 5: stop(), unmapBuffers() │ IPU3 IPA │ │ ┌───────────────────────┐ │ @@ -104,8 +104,8 @@ to operate when running: - configure() - queueRequest() -- fillParamsBuffer() -- processStatsBuffer() +- computeParams() +- processStats() The configuration phase allows the pipeline-handler to inform the IPA of the current stream configurations, which is then passed into each @@ -119,7 +119,7 @@ When configured, the IPA is notified by the pipeline handler of the Camera ``start()`` event, after which incoming requests will be queued for processing, requiring a parameter buffer (``ipu3_uapi_params``) to be populated for the ImgU. This is given to the IPA through -``fillParamsBuffer()``, and then passed directly to each algorithm +``computeParams()``, and then passed directly to each algorithm through the ``prepare()`` call allowing the ISP configuration to be updated for the needs of each component that the algorithm is responsible for. @@ -129,7 +129,7 @@ structure that it modifies, and it should take care to ensure that any structure set by a use flag is fully initialised to suitable values. The parameter buffer is returned to the pipeline handler through the -``paramsBufferReady`` signal, and from there queued to the ImgU along +``paramsComputed`` signal, and from there queued to the ImgU along with a raw frame captured with the CIO2. Post-frame completion @@ -138,7 +138,7 @@ Post-frame completion When the capture of an image is completed, and successfully processed through the ImgU, the generated statistics buffer (``ipu3_uapi_stats_3a``) is given to the IPA through -``processStatsBuffer()``. This provides the IPA with an opportunity to +``processStats()``. This provides the IPA with an opportunity to examine the results of the ISP and run the calculations required by each algorithm on the new data. The algorithms may require context from the operations of other algorithms, for example, the AWB might choose to use diff --git a/src/ipa/ipu3/ipu3.cpp b/src/ipa/ipu3/ipu3.cpp index cdcdf1fb..1cae08bf 100644 --- a/src/ipa/ipu3/ipu3.cpp +++ b/src/ipa/ipu3/ipu3.cpp @@ -23,24 +23,22 @@ #include <libcamera/base/utils.h> #include <libcamera/control_ids.h> +#include <libcamera/controls.h> #include <libcamera/framebuffer.h> +#include <libcamera/geometry.h> +#include <libcamera/request.h> + #include <libcamera/ipa/ipa_interface.h> #include <libcamera/ipa/ipa_module_info.h> #include <libcamera/ipa/ipu3_ipa_interface.h> -#include <libcamera/request.h> #include "libcamera/internal/mapped_framebuffer.h" #include "libcamera/internal/yaml_parser.h" -#include "algorithms/af.h" -#include "algorithms/agc.h" -#include "algorithms/algorithm.h" -#include "algorithms/awb.h" -#include "algorithms/blc.h" -#include "algorithms/tone_mapping.h" #include "libipa/camera_sensor_helper.h" #include "ipa_context.h" +#include "module.h" /* Minimum grid width, expressed as a number of cells */ static constexpr uint32_t kMinGridWidth = 16; @@ -89,14 +87,14 @@ namespace ipa::ipu3 { * parameter buffer, and adapting the settings of the sensor attached to the * IPU3 CIO2 through sensor-specific V4L2 controls. * - * In fillParamsBuffer(), we populate the ImgU parameter buffer with + * In computeParams(), we populate the ImgU parameter buffer with * settings to configure the device in preparation for handling the frame * queued in the Request. * * When the frame has completed processing, the ImgU will generate a statistics - * buffer which is given to the IPA with processStatsBuffer(). In this we run the + * buffer which is given to the IPA with processStats(). In this we run the * algorithms to parse the statistics and cache any results for the next - * fillParamsBuffer() call. + * computeParams() call. * * The individual algorithms are split into modular components that are called * iteratively to allow them to process statistics from the ImgU in the order @@ -114,7 +112,7 @@ namespace ipa::ipu3 { * blue gains to apply to generate a neutral grey frame overall. * * AGC is handled by calculating a histogram of the green channel to estimate an - * analogue gain and shutter time which will provide a well exposed frame. A + * analogue gain and exposure time which will provide a well exposed frame. A * low-pass IIR filter is used to smooth the changes to the sensor to reduce * perceivable steps. * @@ -157,10 +155,10 @@ public: void unmapBuffers(const std::vector<unsigned int> &ids) override; void queueRequest(const uint32_t frame, const ControlList &controls) override; - void fillParamsBuffer(const uint32_t frame, const uint32_t bufferId) override; - void processStatsBuffer(const uint32_t frame, const int64_t frameTimestamp, - const uint32_t bufferId, - const ControlList &sensorControls) override; + void computeParams(const uint32_t frame, const uint32_t bufferId) override; + void processStats(const uint32_t frame, const int64_t frameTimestamp, + const uint32_t bufferId, + const ControlList &sensorControls) override; protected: std::string logPrefix() const override; @@ -189,7 +187,7 @@ private: }; IPAIPU3::IPAIPU3() - : context_({ {}, {}, { kMaxFrameContexts }, {} }) + : context_(kMaxFrameContexts) { } @@ -217,13 +215,13 @@ void IPAIPU3::updateSessionConfiguration(const ControlInfoMap &sensorControls) /* * When the AGC computes the new exposure values for a frame, it needs - * to know the limits for shutter speed and analogue gain. + * to know the limits for exposure time and analogue gain. * As it depends on the sensor, update it with the controls. * - * \todo take VBLANK into account for maximum shutter speed + * \todo take VBLANK into account for maximum exposure time */ - context_.configuration.agc.minShutterSpeed = minExposure * context_.configuration.sensor.lineDuration; - context_.configuration.agc.maxShutterSpeed = maxExposure * context_.configuration.sensor.lineDuration; + context_.configuration.agc.minExposureTime = minExposure * context_.configuration.sensor.lineDuration; + context_.configuration.agc.maxExposureTime = maxExposure * context_.configuration.sensor.lineDuration; context_.configuration.agc.minAnalogueGain = camHelper_->gain(minGain); context_.configuration.agc.maxAnalogueGain = camHelper_->gain(maxGain); } @@ -313,8 +311,8 @@ int IPAIPU3::init(const IPASettings &settings, /* Clean context */ context_.configuration = {}; - context_.configuration.sensor.lineDuration = sensorInfo.minLineLength - * 1.0s / sensorInfo.pixelRate; + context_.configuration.sensor.lineDuration = + sensorInfo.minLineLength * 1.0s / sensorInfo.pixelRate; /* Load the tuning data file. */ File file(settings.configurationFile); @@ -477,8 +475,8 @@ int IPAIPU3::configure(const IPAConfigInfo &configInfo, context_.frameContexts.clear(); /* Initialise the sensor configuration. */ - context_.configuration.sensor.lineDuration = sensorInfo_.minLineLength - * 1.0s / sensorInfo_.pixelRate; + context_.configuration.sensor.lineDuration = + sensorInfo_.minLineLength * 1.0s / sensorInfo_.pixelRate; context_.configuration.sensor.size = sensorInfo_.outputSize; /* @@ -540,7 +538,7 @@ void IPAIPU3::unmapBuffers(const std::vector<unsigned int> &ids) * Algorithms are expected to fill the IPU3 parameter buffer for the next * frame given their most recent processing of the ImgU statistics. */ -void IPAIPU3::fillParamsBuffer(const uint32_t frame, const uint32_t bufferId) +void IPAIPU3::computeParams(const uint32_t frame, const uint32_t bufferId) { auto it = buffers_.find(bufferId); if (it == buffers_.end()) { @@ -568,7 +566,7 @@ void IPAIPU3::fillParamsBuffer(const uint32_t frame, const uint32_t bufferId) for (auto const &algo : algorithms()) algo->prepare(context_, frame, frameContext, params); - paramsBufferReady.emit(frame); + paramsComputed.emit(frame); } /** @@ -582,9 +580,9 @@ void IPAIPU3::fillParamsBuffer(const uint32_t frame, const uint32_t bufferId) * statistics are passed to each algorithm module to run their calculations and * update their state accordingly. */ -void IPAIPU3::processStatsBuffer(const uint32_t frame, - [[maybe_unused]] const int64_t frameTimestamp, - const uint32_t bufferId, const ControlList &sensorControls) +void IPAIPU3::processStats(const uint32_t frame, + [[maybe_unused]] const int64_t frameTimestamp, + const uint32_t bufferId, const ControlList &sensorControls) { auto it = buffers_.find(bufferId); if (it == buffers_.end()) { diff --git a/src/ipa/ipu3/meson.build b/src/ipa/ipu3/meson.build index e76f97c0..34de6213 100644 --- a/src/ipa/ipu3/meson.build +++ b/src/ipa/ipu3/meson.build @@ -12,8 +12,7 @@ ipu3_ipa_sources = files([ ipu3_ipa_sources += ipu3_ipa_algorithms -mod = shared_module(ipa_name, - [ipu3_ipa_sources, libcamera_generated_ipa_headers], +mod = shared_module(ipa_name, ipu3_ipa_sources, name_prefix : '', include_directories : [ipa_includes], dependencies : [libcamera_private, libipa_dep], diff --git a/src/ipa/libipa/agc_mean_luminance.cpp b/src/ipa/libipa/agc_mean_luminance.cpp index f97ef117..02555a44 100644 --- a/src/ipa/libipa/agc_mean_luminance.cpp +++ b/src/ipa/libipa/agc_mean_luminance.cpp @@ -89,10 +89,10 @@ static constexpr double kDefaultRelativeLuminanceTarget = 0.16; * \class AgcMeanLuminance * \brief A mean-based auto-exposure algorithm * - * This algorithm calculates a shutter time, analogue and digital gain such that - * the normalised mean luminance value of an image is driven towards a target, - * which itself is discovered from tuning data. The algorithm is a two-stage - * process. + * This algorithm calculates an exposure time, analogue and digital gain such + * that the normalised mean luminance value of an image is driven towards a + * target, which itself is discovered from tuning data. The algorithm is a + * two-stage process. * * In the first stage, an initial gain value is derived by iteratively comparing * the gain-adjusted mean luminance across the entire image against a target, @@ -109,7 +109,7 @@ static constexpr double kDefaultRelativeLuminanceTarget = 0.16; * stage is then clamped to the gain from this stage. * * The final gain is used to adjust the effective exposure value of the image, - * and that new exposure value is divided into shutter time, analogue gain and + * and that new exposure value is divided into exposure time, analogue gain and * digital gain according to the selected AeExposureMode. This class uses the * \ref ExposureModeHelper class to assist in that division, and expects the * data needed to initialise that class to be present in tuning data in a @@ -247,27 +247,27 @@ int AgcMeanLuminance::parseExposureModes(const YamlObject &tuningData) return -EINVAL; } - std::vector<uint32_t> shutters = - modeValues["shutter"].getList<uint32_t>().value_or(std::vector<uint32_t>{}); + std::vector<uint32_t> exposureTimes = + modeValues["exposureTime"].getList<uint32_t>().value_or(std::vector<uint32_t>{}); std::vector<double> gains = modeValues["gain"].getList<double>().value_or(std::vector<double>{}); - if (shutters.size() != gains.size()) { + if (exposureTimes.size() != gains.size()) { LOG(AgcMeanLuminance, Error) - << "Shutter and gain array sizes unequal"; + << "Exposure time and gain array sizes unequal"; return -EINVAL; } - if (shutters.empty()) { + if (exposureTimes.empty()) { LOG(AgcMeanLuminance, Error) - << "Shutter and gain arrays are empty"; + << "Exposure time and gain arrays are empty"; return -EINVAL; } std::vector<std::pair<utils::Duration, double>> stages; - for (unsigned int i = 0; i < shutters.size(); i++) { + for (unsigned int i = 0; i < exposureTimes.size(); i++) { stages.push_back({ - std::chrono::microseconds(shutters[i]), + std::chrono::microseconds(exposureTimes[i]), gains[i] }); } @@ -283,7 +283,7 @@ int AgcMeanLuminance::parseExposureModes(const YamlObject &tuningData) /* * If we don't have any exposure modes in the tuning data we create an * ExposureModeHelper using an empty vector of stages. This will result - * in the ExposureModeHelper simply driving the shutter as high as + * in the ExposureModeHelper simply driving the exposure time as high as * possible before touching gain. */ if (availableExposureModes.empty()) { @@ -338,18 +338,18 @@ int AgcMeanLuminance::parseExposureModes(const YamlObject &tuningData) * For the AeExposureMode control the data should contain a dictionary called * AeExposureMode containing per-mode setting dictionaries with the key being a * value from \ref controls::AeExposureModeNameValueMap. Each mode dict should - * contain an array of shutter times with the key "shutter" and an array of gain - * values with the key "gain", in this format: + * contain an array of exposure times with the key "exposureTime" and an array + * of gain values with the key "gain", in this format: * * \code{.unparsed} * algorithms: * - Agc: * AeExposureMode: * ExposureNormal: - * shutter: [ 100, 10000, 30000, 60000, 120000 ] + * exposureTime: [ 100, 10000, 30000, 60000, 120000 ] * gain: [ 2.0, 4.0, 6.0, 8.0, 10.0 ] * ExposureShort: - * shutter: [ 100, 10000, 30000, 60000, 120000 ] + * exposureTime: [ 100, 10000, 30000, 60000, 120000 ] * gain: [ 2.0, 4.0, 6.0, 8.0, 10.0 ] * * \endcode @@ -371,20 +371,20 @@ int AgcMeanLuminance::parseTuningData(const YamlObject &tuningData) /** * \brief Set the ExposureModeHelper limits for this class - * \param[in] minShutter Minimum shutter time to allow - * \param[in] maxShutter Maximum shutter time to allow + * \param[in] minExposureTime Minimum exposure time to allow + * \param[in] maxExposureTime Maximum ewposure time to allow * \param[in] minGain Minimum gain to allow * \param[in] maxGain Maximum gain to allow * * This function calls \ref ExposureModeHelper::setLimits() for each * ExposureModeHelper that has been created for this class. */ -void AgcMeanLuminance::setLimits(utils::Duration minShutter, - utils::Duration maxShutter, +void AgcMeanLuminance::setLimits(utils::Duration minExposureTime, + utils::Duration maxExposureTime, double minGain, double maxGain) { for (auto &[id, helper] : exposureModeHelpers_) - helper->setLimits(minShutter, maxShutter, minGain, maxGain); + helper->setLimits(minExposureTime, maxExposureTime, minGain, maxGain); } /** @@ -513,7 +513,8 @@ utils::Duration AgcMeanLuminance::filterExposure(utils::Duration exposureValue) } /** - * \brief Calculate the new exposure value and splut it between shutter time and gain + * \brief Calculate the new exposure value and splut it between exposure time + * and gain * \param[in] constraintModeIndex The index of the current constraint mode * \param[in] exposureModeIndex The index of the current exposure mode * \param[in] yHist A Histogram from the ISP statistics to use in constraining @@ -523,9 +524,9 @@ utils::Duration AgcMeanLuminance::filterExposure(utils::Duration exposureValue) * * Calculate a new exposure value to try to obtain the target. The calculated * exposure value is filtered to prevent rapid changes from frame to frame, and - * divided into shutter time, analogue and digital gain. + * divided into exposure time, analogue and digital gain. * - * \return Tuple of shutter time, analogue gain, and digital gain + * \return Tuple of exposure time, analogue gain, and digital gain */ std::tuple<utils::Duration, double, double> AgcMeanLuminance::calculateNewEv(uint32_t constraintModeIndex, diff --git a/src/ipa/libipa/agc_mean_luminance.h b/src/ipa/libipa/agc_mean_luminance.h index 576d28be..c41391cb 100644 --- a/src/ipa/libipa/agc_mean_luminance.h +++ b/src/ipa/libipa/agc_mean_luminance.h @@ -44,7 +44,7 @@ public: int parseTuningData(const YamlObject &tuningData); - void setLimits(utils::Duration minShutter, utils::Duration maxShutter, + void setLimits(utils::Duration minExposureTime, utils::Duration maxExposureTime, double minGain, double maxGain); std::map<int32_t, std::vector<AgcConstraint>> constraintModes() diff --git a/src/ipa/libipa/camera_sensor_helper.cpp b/src/ipa/libipa/camera_sensor_helper.cpp index 782ff990..7c66cd57 100644 --- a/src/ipa/libipa/camera_sensor_helper.cpp +++ b/src/ipa/libipa/camera_sensor_helper.cpp @@ -8,6 +8,7 @@ #include "camera_sensor_helper.h" #include <cmath> +#include <limits> #include <libcamera/base/log.h> @@ -40,6 +41,7 @@ namespace ipa { */ /** + * \fn CameraSensorHelper::CameraSensorHelper() * \brief Construct a CameraSensorHelper instance * * CameraSensorHelper derived class instances shall never be constructed @@ -48,6 +50,33 @@ namespace ipa { */ /** + * \fn CameraSensorHelper::blackLevel() + * \brief Fetch the black level of the sensor + * + * This function returns the black level of the sensor scaled to a 16bit pixel + * width. If it is unknown an empty optional is returned. + * + * \todo Fill the blanks and add pedestal values for all supported sensors. Once + * done, drop the std::optional<>. + * + * Black levels are typically the result of the following phenomena: + * - Pedestal added by the sensor to pixel values. They are typically fixed, + * sometimes programmable and should be reported in datasheets (but + * documentation is not always available). + * - Dark currents and other physical effects that add charge to pixels in the + * absence of light. Those can depend on the integration time and the sensor + * die temperature, and their contribution to pixel values depend on the + * sensor gains. + * + * The pedestal is usually the value with the biggest contribution to the + * overall black level. In most cases it is either known before or in rare cases + * (there is not a single driver with such a control in the linux kernel) can be + * queried from the sensor. This function provides that fixed, known value. + * + * \return The black level of the sensor, or std::nullopt if not known + */ + +/** * \brief Compute gain code from the analogue gain absolute value * \param[in] gain The real gain to pass * @@ -58,21 +87,16 @@ namespace ipa { */ uint32_t CameraSensorHelper::gainCode(double gain) const { - const AnalogueGainConstants &k = gainConstants_; - - switch (gainType_) { - case AnalogueGainLinear: - ASSERT(k.linear.m0 == 0 || k.linear.m1 == 0); + if (auto *l = std::get_if<AnalogueGainLinear>(&gain_)) { + ASSERT(l->m0 == 0 || l->m1 == 0); - return (k.linear.c0 - k.linear.c1 * gain) / - (k.linear.m1 * gain - k.linear.m0); + return (l->c0 - l->c1 * gain) / + (l->m1 * gain - l->m0); + } else if (auto *e = std::get_if<AnalogueGainExp>(&gain_)) { + ASSERT(e->a != 0 && e->m != 0); - case AnalogueGainExponential: - ASSERT(k.exp.a != 0 && k.exp.m != 0); - - return std::log2(gain / k.exp.a) / k.exp.m; - - default: + return std::log2(gain / e->a) / e->m; + } else { ASSERT(false); return 0; } @@ -90,38 +114,26 @@ uint32_t CameraSensorHelper::gainCode(double gain) const */ double CameraSensorHelper::gain(uint32_t gainCode) const { - const AnalogueGainConstants &k = gainConstants_; double gain = static_cast<double>(gainCode); - switch (gainType_) { - case AnalogueGainLinear: - ASSERT(k.linear.m0 == 0 || k.linear.m1 == 0); - - return (k.linear.m0 * gain + k.linear.c0) / - (k.linear.m1 * gain + k.linear.c1); + if (auto *l = std::get_if<AnalogueGainLinear>(&gain_)) { + ASSERT(l->m0 == 0 || l->m1 == 0); - case AnalogueGainExponential: - ASSERT(k.exp.a != 0 && k.exp.m != 0); + return (l->m0 * gain + l->c0) / + (l->m1 * gain + l->c1); + } else if (auto *e = std::get_if<AnalogueGainExp>(&gain_)) { + ASSERT(e->a != 0 && e->m != 0); - return k.exp.a * std::exp2(k.exp.m * gain); - - default: + return e->a * std::exp2(e->m * gain); + } else { ASSERT(false); return 0.0; } } /** - * \enum CameraSensorHelper::AnalogueGainType - * \brief The gain calculation modes as defined by the MIPI CCS - * - * Describes the image sensor analogue gain capabilities. - * Two modes are possible, depending on the sensor: Linear and Exponential. - */ - -/** - * \var CameraSensorHelper::AnalogueGainLinear - * \brief Gain is computed using linear gain estimation + * \struct CameraSensorHelper::AnalogueGainLinear + * \brief Analogue gain constants for the linear gain model * * The relationship between the integer gain parameter and the resulting gain * multiplier is given by the following equation: @@ -136,11 +148,27 @@ double CameraSensorHelper::gain(uint32_t gainCode) const * The full Gain equation therefore reduces to either: * * \f$gain=\frac{c0}{m1x+c1}\f$ or \f$\frac{m0x+c0}{c1}\f$ + * + * \var CameraSensorHelper::AnalogueGainLinear::m0 + * \brief Constant used in the linear gain coding/decoding + * + * \note Either m0 or m1 shall be zero. + * + * \var CameraSensorHelper::AnalogueGainLinear::c0 + * \brief Constant used in the linear gain coding/decoding + * + * \var CameraSensorHelper::AnalogueGainLinear::m1 + * \brief Constant used in the linear gain coding/decoding + * + * \note Either m0 or m1 shall be zero. + * + * \var CameraSensorHelper::AnalogueGainLinear::c1 + * \brief Constant used in the linear gain coding/decoding */ /** - * \var CameraSensorHelper::AnalogueGainExponential - * \brief Gain is expressed using an exponential model + * \struct CameraSensorHelper::AnalogueGainExp + * \brief Analogue gain constants for the exponential gain model * * The relationship between the integer gain parameter and the resulting gain * multiplier is given by the following equation: @@ -156,61 +184,22 @@ double CameraSensorHelper::gain(uint32_t gainCode) const * * When the gain is expressed in dB, 'a' is equal to 1 and 'm' to * \f$log_{2}{10^{\frac{1}{20}}}\f$. - */ - -/** - * \struct CameraSensorHelper::AnalogueGainLinearConstants - * \brief Analogue gain constants for the linear gain model - * - * \var CameraSensorHelper::AnalogueGainLinearConstants::m0 - * \brief Constant used in the linear gain coding/decoding - * - * \note Either m0 or m1 shall be zero. - * - * \var CameraSensorHelper::AnalogueGainLinearConstants::c0 - * \brief Constant used in the linear gain coding/decoding - * - * \var CameraSensorHelper::AnalogueGainLinearConstants::m1 - * \brief Constant used in the linear gain coding/decoding - * - * \note Either m0 or m1 shall be zero. * - * \var CameraSensorHelper::AnalogueGainLinearConstants::c1 - * \brief Constant used in the linear gain coding/decoding - */ - -/** - * \struct CameraSensorHelper::AnalogueGainExpConstants - * \brief Analogue gain constants for the exponential gain model - * - * \var CameraSensorHelper::AnalogueGainExpConstants::a + * \var CameraSensorHelper::AnalogueGainExp::a * \brief Constant used in the exponential gain coding/decoding * - * \var CameraSensorHelper::AnalogueGainExpConstants::m + * \var CameraSensorHelper::AnalogueGainExp::m * \brief Constant used in the exponential gain coding/decoding */ /** - * \struct CameraSensorHelper::AnalogueGainConstants - * \brief Analogue gain model constants - * - * This union stores the constants used to calculate the analogue gain. The - * CameraSensorHelper::gainType_ variable selects which union member is valid. - * - * \var CameraSensorHelper::AnalogueGainConstants::linear - * \brief Constants for the linear gain model - * - * \var CameraSensorHelper::AnalogueGainConstants::exp - * \brief Constants for the exponential gain model - */ - -/** - * \var CameraSensorHelper::gainType_ - * \brief The analogue gain model type + * \var CameraSensorHelper::blackLevel_ + * \brief The black level of the sensor + * \sa CameraSensorHelper::blackLevel() */ /** - * \var CameraSensorHelper::gainConstants_ + * \var CameraSensorHelper::gain_ * \brief The analogue gain parameters used for calculation * * The analogue gain is calculated through a formula, and its parameters are @@ -366,6 +355,99 @@ static constexpr double expGainDb(double step) return log2_10 * step / 20; } +class CameraSensorHelperAr0144 : public CameraSensorHelper +{ +public: + CameraSensorHelperAr0144() + { + /* Power-on default value: 168 at 12bits. */ + blackLevel_ = 2688; + } + + uint32_t gainCode(double gain) const override + { + /* The recommended minimum gain is 1.6842 to avoid artifacts. */ + gain = std::clamp(gain, 1.0 / (1.0 - 13.0 / 32.0), 18.45); + + /* + * The analogue gain is made of a coarse exponential gain in + * the range [2^0, 2^4] and a fine inversely linear gain in the + * range [1.0, 2.0[. There is an additional fixed 1.153125 + * multiplier when the coarse gain reaches 2^2. + */ + + if (gain > 4.0) + gain /= 1.153125; + + unsigned int coarse = std::log2(gain); + unsigned int fine = (1 - (1 << coarse) / gain) * 32; + + /* The fine gain rounding depends on the coarse gain. */ + if (coarse == 1 || coarse == 3) + fine &= ~1; + else if (coarse == 4) + fine &= ~3; + + return (coarse << 4) | (fine & 0xf); + } + + double gain(uint32_t gainCode) const override + { + unsigned int coarse = gainCode >> 4; + unsigned int fine = gainCode & 0xf; + unsigned int d1; + double d2, m; + + switch (coarse) { + default: + case 0: + d1 = 1; + d2 = 32.0; + m = 1.0; + break; + case 1: + d1 = 2; + d2 = 16.0; + m = 1.0; + break; + case 2: + d1 = 1; + d2 = 32.0; + m = 1.153125; + break; + case 3: + d1 = 2; + d2 = 16.0; + m = 1.153125; + break; + case 4: + d1 = 4; + d2 = 8.0; + m = 1.153125; + break; + } + + /* + * With infinite precision, the calculated gain would be exact, + * and the reverse conversion with gainCode() would produce the + * same gain code. In the real world, rounding errors may cause + * the calculated gain to be lower by an amount negligible for + * all purposes, except for the reverse conversion. Converting + * the gain to a gain code could then return the quantized value + * just lower than the original gain code. To avoid this, tests + * showed that adding the machine epsilon to the multiplier m is + * sufficient. + */ + m += std::numeric_limits<decltype(m)>::epsilon(); + + return m * (1 << coarse) / (1.0 - (fine / d1) / d2); + } + +private: + static constexpr double kStep_ = 16; +}; +REGISTER_CAMERA_SENSOR_HELPER("ar0144", CameraSensorHelperAr0144) + class CameraSensorHelperAr0521 : public CameraSensorHelper { public: @@ -391,13 +473,50 @@ private: }; REGISTER_CAMERA_SENSOR_HELPER("ar0521", CameraSensorHelperAr0521) +class CameraSensorHelperGc05a2 : public CameraSensorHelper +{ +public: + CameraSensorHelperGc05a2() + { + /* From datasheet: 64 at 10bits. */ + blackLevel_ = 4096; + gain_ = AnalogueGainLinear{ 100, 0, 0, 1024 }; + } +}; +REGISTER_CAMERA_SENSOR_HELPER("gc05a2", CameraSensorHelperGc05a2) + +class CameraSensorHelperGc08a3 : public CameraSensorHelper +{ +public: + CameraSensorHelperGc08a3() + { + /* From datasheet: 64 at 10bits. */ + blackLevel_ = 4096; + gain_ = AnalogueGainLinear{ 100, 0, 0, 1024 }; + } +}; +REGISTER_CAMERA_SENSOR_HELPER("gc08a3", CameraSensorHelperGc08a3) + +class CameraSensorHelperImx214 : public CameraSensorHelper +{ +public: + CameraSensorHelperImx214() + { + /* From datasheet: 64 at 10bits. */ + blackLevel_ = 4096; + gain_ = AnalogueGainLinear{ 0, 512, -1, 512 }; + } +}; +REGISTER_CAMERA_SENSOR_HELPER("imx214", CameraSensorHelperImx214) + class CameraSensorHelperImx219 : public CameraSensorHelper { public: CameraSensorHelperImx219() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 0, 256, -1, 256 }; + /* From datasheet: 64 at 10bits. */ + blackLevel_ = 4096; + gain_ = AnalogueGainLinear{ 0, 256, -1, 256 }; } }; REGISTER_CAMERA_SENSOR_HELPER("imx219", CameraSensorHelperImx219) @@ -407,8 +526,9 @@ class CameraSensorHelperImx258 : public CameraSensorHelper public: CameraSensorHelperImx258() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 0, 512, -1, 512 }; + /* From datasheet: 0x40 at 10bits. */ + blackLevel_ = 4096; + gain_ = AnalogueGainLinear{ 0, 512, -1, 512 }; } }; REGISTER_CAMERA_SENSOR_HELPER("imx258", CameraSensorHelperImx258) @@ -418,8 +538,9 @@ class CameraSensorHelperImx283 : public CameraSensorHelper public: CameraSensorHelperImx283() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 0, 2048, -1, 2048 }; + /* From datasheet: 0x32 at 10bits. */ + blackLevel_ = 3200; + gain_ = AnalogueGainLinear{ 0, 2048, -1, 2048 }; } }; REGISTER_CAMERA_SENSOR_HELPER("imx283", CameraSensorHelperImx283) @@ -429,8 +550,9 @@ class CameraSensorHelperImx290 : public CameraSensorHelper public: CameraSensorHelperImx290() { - gainType_ = AnalogueGainExponential; - gainConstants_.exp = { 1.0, expGainDb(0.3) }; + /* From datasheet: 0xf0 at 12bits. */ + blackLevel_ = 3840; + gain_ = AnalogueGainExp{ 1.0, expGainDb(0.3) }; } }; REGISTER_CAMERA_SENSOR_HELPER("imx290", CameraSensorHelperImx290) @@ -440,8 +562,7 @@ class CameraSensorHelperImx296 : public CameraSensorHelper public: CameraSensorHelperImx296() { - gainType_ = AnalogueGainExponential; - gainConstants_.exp = { 1.0, expGainDb(0.1) }; + gain_ = AnalogueGainExp{ 1.0, expGainDb(0.1) }; } }; REGISTER_CAMERA_SENSOR_HELPER("imx296", CameraSensorHelperImx296) @@ -456,8 +577,9 @@ class CameraSensorHelperImx335 : public CameraSensorHelper public: CameraSensorHelperImx335() { - gainType_ = AnalogueGainExponential; - gainConstants_.exp = { 1.0, expGainDb(0.3) }; + /* From datasheet: 0x32 at 10bits. */ + blackLevel_ = 3200; + gain_ = AnalogueGainExp{ 1.0, expGainDb(0.3) }; } }; REGISTER_CAMERA_SENSOR_HELPER("imx335", CameraSensorHelperImx335) @@ -467,19 +589,22 @@ class CameraSensorHelperImx415 : public CameraSensorHelper public: CameraSensorHelperImx415() { - gainType_ = AnalogueGainExponential; - gainConstants_.exp = { 1.0, expGainDb(0.3) }; + gain_ = AnalogueGainExp{ 1.0, expGainDb(0.3) }; } }; REGISTER_CAMERA_SENSOR_HELPER("imx415", CameraSensorHelperImx415) +class CameraSensorHelperImx462 : public CameraSensorHelperImx290 +{ +}; +REGISTER_CAMERA_SENSOR_HELPER("imx462", CameraSensorHelperImx462) + class CameraSensorHelperImx477 : public CameraSensorHelper { public: CameraSensorHelperImx477() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 0, 1024, -1, 1024 }; + gain_ = AnalogueGainLinear{ 0, 1024, -1, 1024 }; } }; REGISTER_CAMERA_SENSOR_HELPER("imx477", CameraSensorHelperImx477) @@ -493,8 +618,7 @@ public: * The Sensor Manual doesn't appear to document the gain model. * This has been validated with some empirical testing only. */ - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 128 }; + gain_ = AnalogueGainLinear{ 1, 0, 0, 128 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov2685", CameraSensorHelperOv2685) @@ -504,8 +628,7 @@ class CameraSensorHelperOv2740 : public CameraSensorHelper public: CameraSensorHelperOv2740() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 128 }; + gain_ = AnalogueGainLinear{ 1, 0, 0, 128 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov2740", CameraSensorHelperOv2740) @@ -515,8 +638,9 @@ class CameraSensorHelperOv4689 : public CameraSensorHelper public: CameraSensorHelperOv4689() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 128 }; + /* From datasheet: 0x40 at 12bits. */ + blackLevel_ = 1024; + gain_ = AnalogueGainLinear{ 1, 0, 0, 128 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov4689", CameraSensorHelperOv4689) @@ -526,8 +650,9 @@ class CameraSensorHelperOv5640 : public CameraSensorHelper public: CameraSensorHelperOv5640() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 16 }; + /* From datasheet: 0x10 at 10bits. */ + blackLevel_ = 1024; + gain_ = AnalogueGainLinear{ 1, 0, 0, 16 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov5640", CameraSensorHelperOv5640) @@ -537,8 +662,7 @@ class CameraSensorHelperOv5647 : public CameraSensorHelper public: CameraSensorHelperOv5647() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 16 }; + gain_ = AnalogueGainLinear{ 1, 0, 0, 16 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov5647", CameraSensorHelperOv5647) @@ -548,8 +672,7 @@ class CameraSensorHelperOv5670 : public CameraSensorHelper public: CameraSensorHelperOv5670() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 128 }; + gain_ = AnalogueGainLinear{ 1, 0, 0, 128 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov5670", CameraSensorHelperOv5670) @@ -559,8 +682,9 @@ class CameraSensorHelperOv5675 : public CameraSensorHelper public: CameraSensorHelperOv5675() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 128 }; + /* From Linux kernel driver: 0x40 at 10bits. */ + blackLevel_ = 4096; + gain_ = AnalogueGainLinear{ 1, 0, 0, 128 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov5675", CameraSensorHelperOv5675) @@ -570,8 +694,7 @@ class CameraSensorHelperOv5693 : public CameraSensorHelper public: CameraSensorHelperOv5693() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 16 }; + gain_ = AnalogueGainLinear{ 1, 0, 0, 16 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov5693", CameraSensorHelperOv5693) @@ -581,8 +704,7 @@ class CameraSensorHelperOv64a40 : public CameraSensorHelper public: CameraSensorHelperOv64a40() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 128 }; + gain_ = AnalogueGainLinear{ 1, 0, 0, 128 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov64a40", CameraSensorHelperOv64a40) @@ -592,15 +714,13 @@ class CameraSensorHelperOv8858 : public CameraSensorHelper public: CameraSensorHelperOv8858() { - gainType_ = AnalogueGainLinear; - /* * \todo Validate the selected 1/128 step value as it differs * from what the sensor manual describes. * * See: https://patchwork.linuxtv.org/project/linux-media/patch/20221106171129.166892-2-nicholas@rothemail.net/#142267 */ - gainConstants_.linear = { 1, 0, 0, 128 }; + gain_ = AnalogueGainLinear{ 1, 0, 0, 128 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov8858", CameraSensorHelperOv8858) @@ -610,8 +730,7 @@ class CameraSensorHelperOv8865 : public CameraSensorHelper public: CameraSensorHelperOv8865() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 128 }; + gain_ = AnalogueGainLinear{ 1, 0, 0, 128 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov8865", CameraSensorHelperOv8865) @@ -621,8 +740,7 @@ class CameraSensorHelperOv13858 : public CameraSensorHelper public: CameraSensorHelperOv13858() { - gainType_ = AnalogueGainLinear; - gainConstants_.linear = { 1, 0, 0, 128 }; + gain_ = AnalogueGainLinear{ 1, 0, 0, 128 }; } }; REGISTER_CAMERA_SENSOR_HELPER("ov13858", CameraSensorHelperOv13858) diff --git a/src/ipa/libipa/camera_sensor_helper.h b/src/ipa/libipa/camera_sensor_helper.h index 0d99073b..a9300a64 100644 --- a/src/ipa/libipa/camera_sensor_helper.h +++ b/src/ipa/libipa/camera_sensor_helper.h @@ -7,10 +7,11 @@ #pragma once -#include <stdint.h> - #include <memory> +#include <optional> +#include <stdint.h> #include <string> +#include <variant> #include <vector> #include <libcamera/base/class.h> @@ -25,34 +26,25 @@ public: CameraSensorHelper() = default; virtual ~CameraSensorHelper() = default; + std::optional<int16_t> blackLevel() const { return blackLevel_; } virtual uint32_t gainCode(double gain) const; virtual double gain(uint32_t gainCode) const; protected: - enum AnalogueGainType { - AnalogueGainLinear, - AnalogueGainExponential, - }; - - struct AnalogueGainLinearConstants { + struct AnalogueGainLinear { int16_t m0; int16_t c0; int16_t m1; int16_t c1; }; - struct AnalogueGainExpConstants { + struct AnalogueGainExp { double a; double m; }; - union AnalogueGainConstants { - AnalogueGainLinearConstants linear; - AnalogueGainExpConstants exp; - }; - - AnalogueGainType gainType_; - AnalogueGainConstants gainConstants_; + std::optional<int16_t> blackLevel_; + std::variant<std::monostate, AnalogueGainLinear, AnalogueGainExp> gain_; private: LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraSensorHelper) diff --git a/src/ipa/libipa/colours.cpp b/src/ipa/libipa/colours.cpp new file mode 100644 index 00000000..97124cf4 --- /dev/null +++ b/src/ipa/libipa/colours.cpp @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * Copyright (C) 2024, Ideas on Board Oy + * + * libipa miscellaneous colour helpers + */ + +#include "colours.h" + +#include <algorithm> +#include <cmath> + +namespace libcamera { + +namespace ipa { + +/** + * \file colours.h + * \brief Functions to reduce code duplication between IPA modules + */ + +/** + * \brief Estimate luminance from RGB values following ITU-R BT.601 + * \param[in] rgb The RGB value + * + * This function estimates a luminance value from a triplet of Red, Green and + * Blue values, following the formula defined by ITU-R Recommendation BT.601-7 + * which can be found at https://www.itu.int/rec/R-REC-BT.601 + * + * \return The estimated luminance value + */ +double rec601LuminanceFromRGB(const RGB<double> &rgb) +{ + static const Vector<double, 3> rgb2y{{ + 0.299, 0.587, 0.114 + }}; + + return rgb.dot(rgb2y); +} + +/** + * \brief Estimate correlated colour temperature from RGB color space input + * \param[in] rgb The RGB value + * + * This function estimates the correlated color temperature RGB color space + * input. In physics and color science, the Planckian locus or black body locus + * is the path or locus that the color of an incandescent black body would take + * in a particular chromaticity space as the black body temperature changes. + * + * If a narrow range of color temperatures is considered (those encapsulating + * daylight being the most practical case) one can approximate the Planckian + * locus in order to calculate the CCT in terms of chromaticity coordinates. + * + * More detailed information can be found in: + * https://en.wikipedia.org/wiki/Color_temperature#Approximation + * + * \return The estimated color temperature + */ +uint32_t estimateCCT(const RGB<double> &rgb) +{ + /* + * Convert the RGB values to CIE tristimulus values (XYZ) and divide by + * the sum of X, Y and Z to calculate the CIE xy chromaticity. + */ + static const Matrix<double, 3, 3> rgb2xyz({ + -0.14282, 1.54924, -0.95641, + -0.32466, 1.57837, -0.73191, + -0.68202, 0.77073, 0.56332 + }); + + Vector<double, 3> xyz = rgb2xyz * rgb; + xyz /= xyz.sum(); + + /* Calculate CCT */ + double n = (xyz.x() - 0.3320) / (0.1858 - xyz.y()); + return 449 * n * n * n + 3525 * n * n + 6823.3 * n + 5520.33; +} + +} /* namespace ipa */ + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/colours.h b/src/ipa/libipa/colours.h new file mode 100644 index 00000000..d39b2ca8 --- /dev/null +++ b/src/ipa/libipa/colours.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * Copyright (C) 2024, Ideas on Board Oy + * + * libipa miscellaneous colour helpers + */ + +#pragma once + +#include <stdint.h> + +#include "libcamera/internal/vector.h" + +namespace libcamera { + +namespace ipa { + +double rec601LuminanceFromRGB(const RGB<double> &rgb); +uint32_t estimateCCT(const RGB<double> &rgb); + +} /* namespace ipa */ + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/exposure_mode_helper.cpp b/src/ipa/libipa/exposure_mode_helper.cpp index 683a564a..f235316d 100644 --- a/src/ipa/libipa/exposure_mode_helper.cpp +++ b/src/ipa/libipa/exposure_mode_helper.cpp @@ -14,9 +14,9 @@ * \file exposure_mode_helper.h * \brief Helper class that performs computations relating to exposure * - * AEGC algorithms have a need to split exposure between shutter time, analogue + * AEGC algorithms have a need to split exposure between exposure time, analogue * and digital gain. Multiple implementations do so based on paired stages of - * shutter time and gain limits; provide a helper to avoid duplicating the code. + * exposure time and gain limits; provide a helper to avoid duplicating the code. */ namespace libcamera { @@ -29,24 +29,24 @@ namespace ipa { /** * \class ExposureModeHelper - * \brief Class for splitting exposure into shutter time and total gain + * \brief Class for splitting exposure into exposure time and total gain * * The ExposureModeHelper class provides a standard interface through which an - * AEGC algorithm can divide exposure between shutter time and gain. It is - * configured with a set of shutter time and gain pairs and works by initially - * fixing gain at 1.0 and increasing shutter time up to the shutter time value + * AEGC algorithm can divide exposure between exposure time and gain. It is + * configured with a set of exposure time and gain pairs and works by initially + * fixing gain at 1.0 and increasing exposure time up to the exposure time value * from the first pair in the set in an attempt to meet the required exposure * value. * - * If the required exposure is not achievable by the first shutter time value + * If the required exposure is not achievable by the first exposure time value * alone it ramps gain up to the value from the first pair in the set. If the - * required exposure is still not met it then allows shutter time to ramp up to - * the shutter time value from the second pair in the set, and continues in this + * required exposure is still not met it then allows exposure time to ramp up to + * the exposure time value from the second pair in the set, and continues in this * vein until either the required exposure time is met, or else the hardware's - * shutter time or gain limits are reached. + * exposure time or gain limits are reached. * * This method allows users to strike a balance between a well-exposed image and - * an acceptable frame-rate, as opposed to simply maximising shutter time + * an acceptable frame-rate, as opposed to simply maximising exposure time * followed by gain. The same helpers can be used to perform the latter * operation if needed by passing an empty set of pairs to the initialisation * function. @@ -61,56 +61,56 @@ namespace ipa { /** * \brief Construct an ExposureModeHelper instance - * \param[in] stages The vector of paired shutter time and gain limits + * \param[in] stages The vector of paired exposure time and gain limits * - * The input stages are shutter time and _total_ gain pairs; the gain + * The input stages are exposure time and _total_ gain pairs; the gain * encompasses both analogue and digital gain. * * The vector of stages may be empty. In that case, the helper will simply use - * the runtime limits set through setShutterGainLimits() instead. + * the runtime limits set through setLimits() instead. */ ExposureModeHelper::ExposureModeHelper(const Span<std::pair<utils::Duration, double>> stages) { - minShutter_ = 0us; - maxShutter_ = 0us; + minExposureTime_ = 0us; + maxExposureTime_ = 0us; minGain_ = 0; maxGain_ = 0; for (const auto &[s, g] : stages) { - shutters_.push_back(s); + exposureTimes_.push_back(s); gains_.push_back(g); } } /** - * \brief Set the shutter time and gain limits - * \param[in] minShutter The minimum shutter time supported - * \param[in] maxShutter The maximum shutter time supported + * \brief Set the exposure time and gain limits + * \param[in] minExposureTime The minimum exposure time supported + * \param[in] maxExposureTime The maximum exposure time supported * \param[in] minGain The minimum analogue gain supported * \param[in] maxGain The maximum analogue gain supported * - * This function configures the shutter time and analogue gain limits that need + * This function configures the exposure time and analogue gain limits that need * to be adhered to as the helper divides up exposure. Note that this function * *must* be called whenever those limits change and before splitExposure() is * used. * - * If the algorithm using the helpers needs to indicate that either shutter time + * If the algorithm using the helpers needs to indicate that either exposure time * or analogue gain or both should be fixed it can do so by setting both the * minima and maxima to the same value. */ -void ExposureModeHelper::setLimits(utils::Duration minShutter, - utils::Duration maxShutter, +void ExposureModeHelper::setLimits(utils::Duration minExposureTime, + utils::Duration maxExposureTime, double minGain, double maxGain) { - minShutter_ = minShutter; - maxShutter_ = maxShutter; + minExposureTime_ = minExposureTime; + maxExposureTime_ = maxExposureTime; minGain_ = minGain; maxGain_ = maxGain; } -utils::Duration ExposureModeHelper::clampShutter(utils::Duration shutter) const +utils::Duration ExposureModeHelper::clampExposureTime(utils::Duration exposureTime) const { - return std::clamp(shutter, minShutter_, maxShutter_); + return std::clamp(exposureTime, minExposureTime_, maxExposureTime_); } double ExposureModeHelper::clampGain(double gain) const @@ -119,125 +119,119 @@ double ExposureModeHelper::clampGain(double gain) const } /** - * \brief Split exposure time into shutter time and gain - * \param[in] exposure Exposure time + * \brief Split exposure into exposure time and gain + * \param[in] exposure Exposure value * - * This function divides a given exposure time into shutter time, analogue and - * digital gain by iterating through stages of shutter time and gain limits. At - * each stage the current stage's shutter time limit is multiplied by the + * This function divides a given exposure into exposure time, analogue and + * digital gain by iterating through stages of exposure time and gain limits. + * At each stage the current stage's exposure time limit is multiplied by the * previous stage's gain limit (or 1.0 initially) to see if the combination of - * the two can meet the required exposure time. If they cannot then the current - * stage's shutter time limit is multiplied by the same stage's gain limit to + * the two can meet the required exposure. If they cannot then the current + * stage's exposure time limit is multiplied by the same stage's gain limit to * see if that combination can meet the required exposure time. If they cannot * then the function moves to consider the next stage. * - * When a combination of shutter time and gain _stage_ limits are found that are - * sufficient to meet the required exposure time, the function attempts to - * reduce shutter time as much as possible whilst fixing gain and still meeting - * the exposure time. If a _runtime_ limit prevents shutter time from being - * lowered enough to meet the exposure time with gain fixed at the stage limit, - * gain is also lowered to compensate. + * When a combination of exposure time and gain _stage_ limits are found that + * are sufficient to meet the required exposure, the function attempts to reduce + * exposure time as much as possible whilst fixing gain and still meeting the + * exposure. If a _runtime_ limit prevents exposure time from being lowered + * enough to meet the exposure with gain fixed at the stage limit, gain is also + * lowered to compensate. * - * Once the shutter time and gain values are ascertained, gain is assigned as + * Once the exposure time and gain values are ascertained, gain is assigned as * analogue gain as much as possible, with digital gain only in use if the * maximum analogue gain runtime limit is unable to accommodate the exposure * value. * - * If no combination of shutter time and gain limits is found that meets the - * required exposure time, the helper falls-back to simply maximising the - * shutter time first, followed by analogue gain, followed by digital gain. + * If no combination of exposure time and gain limits is found that meets the + * required exposure, the helper falls-back to simply maximising the exposure + * time first, followed by analogue gain, followed by digital gain. * - * \return Tuple of shutter time, analogue gain, and digital gain + * \return Tuple of exposure time, analogue gain, and digital gain */ std::tuple<utils::Duration, double, double> ExposureModeHelper::splitExposure(utils::Duration exposure) const { - ASSERT(maxShutter_); + ASSERT(maxExposureTime_); ASSERT(maxGain_); bool gainFixed = minGain_ == maxGain_; - bool shutterFixed = minShutter_ == maxShutter_; + bool exposureTimeFixed = minExposureTime_ == maxExposureTime_; /* * There's no point entering the loop if we cannot change either gain - * nor shutter anyway. + * nor exposure time anyway. */ - if (shutterFixed && gainFixed) - return { minShutter_, minGain_, exposure / (minShutter_ * minGain_) }; + if (exposureTimeFixed && gainFixed) + return { minExposureTime_, minGain_, exposure / (minExposureTime_ * minGain_) }; - utils::Duration shutter; - double stageGain; + utils::Duration exposureTime; + double stageGain = 1.0; double gain; for (unsigned int stage = 0; stage < gains_.size(); stage++) { double lastStageGain = stage == 0 ? 1.0 : clampGain(gains_[stage - 1]); - utils::Duration stageShutter = clampShutter(shutters_[stage]); + utils::Duration stageExposureTime = clampExposureTime(exposureTimes_[stage]); stageGain = clampGain(gains_[stage]); /* - * We perform the clamping on both shutter and gain in case the - * helper has had limits set that prevent those values being - * lowered beyond a certain minimum...this can happen at runtime - * for various reasons and so would not be known when the stage - * limits are initialised. + * We perform the clamping on both exposure time and gain in + * case the helper has had limits set that prevent those values + * being lowered beyond a certain minimum...this can happen at + * runtime for various reasons and so would not be known when + * the stage limits are initialised. */ - if (stageShutter * lastStageGain >= exposure) { - shutter = clampShutter(exposure / clampGain(lastStageGain)); - gain = clampGain(exposure / shutter); + if (stageExposureTime * lastStageGain >= exposure) { + exposureTime = clampExposureTime(exposure / clampGain(lastStageGain)); + gain = clampGain(exposure / exposureTime); - return { shutter, gain, exposure / (shutter * gain) }; + return { exposureTime, gain, exposure / (exposureTime * gain) }; } - if (stageShutter * stageGain >= exposure) { - shutter = clampShutter(exposure / clampGain(stageGain)); - gain = clampGain(exposure / shutter); + if (stageExposureTime * stageGain >= exposure) { + exposureTime = clampExposureTime(exposure / clampGain(stageGain)); + gain = clampGain(exposure / exposureTime); - return { shutter, gain, exposure / (shutter * gain) }; + return { exposureTime, gain, exposure / (exposureTime * gain) }; } } /* - * From here on all we can do is max out the shutter time, followed by + * From here on all we can do is max out the exposure time, followed by * the analogue gain. If we still haven't achieved the target we send * the rest of the exposure time to digital gain. If we were given no - * stages to use then set stageGain to 1.0 so that shutter time is maxed - * before gain touched at all. + * stages to use then the default stageGain of 1.0 is used so that + * exposure time is maxed before gain is touched at all. */ - if (gains_.empty()) - stageGain = 1.0; + exposureTime = clampExposureTime(exposure / clampGain(stageGain)); + gain = clampGain(exposure / exposureTime); - shutter = clampShutter(exposure / clampGain(stageGain)); - gain = clampGain(exposure / shutter); - - return { shutter, gain, exposure / (shutter * gain) }; + return { exposureTime, gain, exposure / (exposureTime * gain) }; } /** - * \fn ExposureModeHelper::minShutter() - * \brief Retrieve the configured minimum shutter time limit set through - * setShutterGainLimits() - * \return The minShutter_ value + * \fn ExposureModeHelper::minExposureTime() + * \brief Retrieve the configured minimum exposure time limit set through + * setLimits() + * \return The minExposureTime_ value */ /** - * \fn ExposureModeHelper::maxShutter() - * \brief Retrieve the configured maximum shutter time set through - * setShutterGainLimits() - * \return The maxShutter_ value + * \fn ExposureModeHelper::maxExposureTime() + * \brief Retrieve the configured maximum exposure time set through setLimits() + * \return The maxExposureTime_ value */ /** * \fn ExposureModeHelper::minGain() - * \brief Retrieve the configured minimum gain set through - * setShutterGainLimits() + * \brief Retrieve the configured minimum gain set through setLimits() * \return The minGain_ value */ /** * \fn ExposureModeHelper::maxGain() - * \brief Retrieve the configured maximum gain set through - * setShutterGainLimits() + * \brief Retrieve the configured maximum gain set through setLimits() * \return The maxGain_ value */ diff --git a/src/ipa/libipa/exposure_mode_helper.h b/src/ipa/libipa/exposure_mode_helper.h index 85c665d7..c5be1b67 100644 --- a/src/ipa/libipa/exposure_mode_helper.h +++ b/src/ipa/libipa/exposure_mode_helper.h @@ -24,26 +24,26 @@ public: ExposureModeHelper(const Span<std::pair<utils::Duration, double>> stages); ~ExposureModeHelper() = default; - void setLimits(utils::Duration minShutter, utils::Duration maxShutter, + void setLimits(utils::Duration minExposureTime, utils::Duration maxExposureTime, double minGain, double maxGain); std::tuple<utils::Duration, double, double> splitExposure(utils::Duration exposure) const; - utils::Duration minShutter() const { return minShutter_; } - utils::Duration maxShutter() const { return maxShutter_; } + utils::Duration minExposureTime() const { return minExposureTime_; } + utils::Duration maxExposureTime() const { return maxExposureTime_; } double minGain() const { return minGain_; } double maxGain() const { return maxGain_; } private: - utils::Duration clampShutter(utils::Duration shutter) const; + utils::Duration clampExposureTime(utils::Duration exposureTime) const; double clampGain(double gain) const; - std::vector<utils::Duration> shutters_; + std::vector<utils::Duration> exposureTimes_; std::vector<double> gains_; - utils::Duration minShutter_; - utils::Duration maxShutter_; + utils::Duration minExposureTime_; + utils::Duration maxExposureTime_; double minGain_; double maxGain_; }; diff --git a/src/ipa/libipa/fc_queue.h b/src/ipa/libipa/fc_queue.h index 24d9e82b..a1d13652 100644 --- a/src/ipa/libipa/fc_queue.h +++ b/src/ipa/libipa/fc_queue.h @@ -25,6 +25,7 @@ struct FrameContext { private: template<typename T> friend class FCQueue; uint32_t frame; + bool initialised = false; }; template<typename FrameContext> @@ -38,8 +39,10 @@ public: void clear() { - for (FrameContext &ctx : contexts_) + for (FrameContext &ctx : contexts_) { + ctx.initialised = false; ctx.frame = 0; + } } FrameContext &alloc(const uint32_t frame) @@ -83,6 +86,21 @@ public: << " has been overwritten by " << frameContext.frame; + if (frame == 0 && !frameContext.initialised) { + /* + * If the IPA calls get() at start() time it will get an + * un-intialized FrameContext as the below "frame == + * frameContext.frame" check will return success because + * FrameContexts are zeroed at creation time. + * + * Make sure the FrameContext gets initialised if get() + * is called before alloc() by the IPA for frame#0. + */ + init(frameContext, frame); + + return frameContext; + } + if (frame == frameContext.frame) return frameContext; @@ -108,6 +126,7 @@ private: { frameContext = {}; frameContext.frame = frame; + frameContext.initialised = true; } std::vector<FrameContext> contexts_; diff --git a/src/ipa/rkisp1/utils.cpp b/src/ipa/libipa/fixedpoint.cpp index 960ec64e..6b698fc5 100644 --- a/src/ipa/rkisp1/utils.cpp +++ b/src/ipa/libipa/fixedpoint.cpp @@ -2,18 +2,18 @@ /* * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> * - * Miscellaneous utility functions specific to rkisp1 + * Fixed / floating point conversions */ -#include "utils.h" +#include "fixedpoint.h" /** - * \file utils.h + * \file fixedpoint.h */ namespace libcamera { -namespace ipa::rkisp1::utils { +namespace ipa { /** * \fn R floatingToFixedPoint(T number) @@ -37,6 +37,6 @@ namespace ipa::rkisp1::utils { * \return The converted value */ -} /* namespace ipa::rkisp1::utils */ +} /* namespace ipa */ } /* namespace libcamera */ diff --git a/src/ipa/rkisp1/utils.h b/src/ipa/libipa/fixedpoint.h index 450f2244..709cf50f 100644 --- a/src/ipa/rkisp1/utils.h +++ b/src/ipa/libipa/fixedpoint.h @@ -2,18 +2,17 @@ /* * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> * - * Miscellaneous utility functions specific to rkisp1 + * Fixed / floating point conversions */ #pragma once #include <cmath> -#include <limits> #include <type_traits> namespace libcamera { -namespace ipa::rkisp1::utils { +namespace ipa { #ifndef __DOXYGEN__ template<unsigned int I, unsigned int F, typename R, typename T, @@ -61,6 +60,6 @@ constexpr R fixedToFloatingPoint(T number) return static_cast<R>(t) / static_cast<R>(1 << F); } -} /* namespace ipa::rkisp1::utils */ +} /* namespace ipa */ } /* namespace libcamera */ diff --git a/src/ipa/libipa/histogram.cpp b/src/ipa/libipa/histogram.cpp index 5fbfadf5..10e44b54 100644 --- a/src/ipa/libipa/histogram.cpp +++ b/src/ipa/libipa/histogram.cpp @@ -63,6 +63,12 @@ Histogram::Histogram(Span<const uint32_t> data) */ /** + * \fn Histogram::data() + * \brief Retrieve the internal data + * \return The data + */ + +/** * \fn Histogram::total() * \brief Retrieve the total number of values in the data set * \return Number of values diff --git a/src/ipa/libipa/histogram.h b/src/ipa/libipa/histogram.h index 032adca0..a926002c 100644 --- a/src/ipa/libipa/histogram.h +++ b/src/ipa/libipa/histogram.h @@ -7,7 +7,6 @@ #pragma once -#include <assert.h> #include <limits.h> #include <stdint.h> #include <type_traits> @@ -37,6 +36,7 @@ public: } size_t bins() const { return cumulative_.size() - 1; } + const Span<const uint64_t> data() const { return cumulative_; } uint64_t total() const { return cumulative_[cumulative_.size() - 1]; } uint64_t cumulativeFrequency(double bin) const; double quantile(double q, uint32_t first = 0, uint32_t last = UINT_MAX) const; diff --git a/src/ipa/libipa/interpolator.cpp b/src/ipa/libipa/interpolator.cpp new file mode 100644 index 00000000..73e8d3b7 --- /dev/null +++ b/src/ipa/libipa/interpolator.cpp @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> + * + * Helper class for interpolating objects + */ +#include "interpolator.h" + +#include <algorithm> +#include <string> + +#include <libcamera/base/log.h> + +#include "libcamera/internal/yaml_parser.h" + +#include "interpolator.h" + +/** + * \file interpolator.h + * \brief Helper class for linear interpolating a set of objects + */ + +namespace libcamera { + +LOG_DEFINE_CATEGORY(Interpolator) + +namespace ipa { + +/** + * \class Interpolator + * \brief Class for storing, retrieving, and interpolating objects + * \tparam T Type of objects stored in the interpolator + * + * The main use case is to pass a map from color temperatures to corresponding + * objects (eg. matrices for color correction), and then requesting a + * interpolated object for a specific color temperature. This class will + * abstract away the interpolation portion. + */ + +/** + * \fn Interpolator::Interpolator() + * \brief Construct an empty interpolator + */ + +/** + * \fn Interpolator::Interpolator(const std::map<unsigned int, T> &data) + * \brief Construct an interpolator from a map of objects + * \param data Map from which to construct the interpolator + */ + +/** + * \fn Interpolator::Interpolator(std::map<unsigned int, T> &&data) + * \brief Construct an interpolator from a map of objects + * \param data Map from which to construct the interpolator + */ + +/** + * \fn int Interpolator<T>::readYaml(const libcamera::YamlObject &yaml, + const std::string &key_name, + const std::string &value_name) + * \brief Initialize an Interpolator instance from yaml + * \tparam T Type of data stored in the interpolator + * \param[in] yaml The yaml object that contains the map of unsigned integers to + * objects + * \param[in] key_name The name of the key in the yaml object + * \param[in] value_name The name of the value in the yaml object + * + * The yaml object is expected to be a list of maps. Each map has two or more + * pairs: one of \a key_name to the key value (usually color temperature), and + * one or more of \a value_name to the object. This is a bit difficult to + * explain, so here is an example (in python, as it is easier to parse than + * yaml): + * [ + * { + * 'ct': 2860, + * 'ccm': [ 2.12089, -0.52461, -0.59629, + * -0.85342, 2.80445, -0.95103, + * -0.26897, -1.14788, 2.41685 ], + * 'offsets': [ 0, 0, 0 ] + * }, + * + * { + * 'ct': 2960, + * 'ccm': [ 2.26962, -0.54174, -0.72789, + * -0.77008, 2.60271, -0.83262, + * -0.26036, -1.51254, 2.77289 ], + * 'offsets': [ 0, 0, 0 ] + * }, + * + * { + * 'ct': 3603, + * 'ccm': [ 2.18644, -0.66148, -0.52496, + * -0.77828, 2.69474, -0.91645, + * -0.25239, -0.83059, 2.08298 ], + * 'offsets': [ 0, 0, 0 ] + * }, + * ] + * + * In this case, \a key_name would be 'ct', and \a value_name can be either + * 'ccm' or 'offsets'. This way multiple interpolators can be defined in + * one set of color temperature ranges in the tuning file, and they can be + * retrieved separately with the \a value_name parameter. + * + * \return Zero on success, negative error code otherwise + */ + +/** + * \fn void Interpolator<T>::setQuantization(const unsigned int q) + * \brief Set the quantization value + * \param[in] q The quantization value + * + * Sets the quantization value. When this is set, 'key' gets quantized to this + * size, before doing the interpolation. This can help in reducing the number of + * updates pushed to the hardware. + * + * Note that normally a threshold needs to be combined with quantization. + * Otherwise a value that swings around the edge of the quantization step will + * lead to constant updates. + */ + +/** + * \fn void Interpolator<T>::setData(std::map<unsigned int, T> &&data) + * \brief Set the internal map + * + * Overwrites the internal map using move semantics. + */ + +/** + * \fn const T& Interpolator<T>::getInterpolated() + * \brief Retrieve an interpolated value for the given key + * \param[in] key The unsigned integer key of the object to retrieve + * \param[out] quantizedKey If provided, the key value after quantization + * \return The object corresponding to the key. The object is cached internally, + * so on successive calls with the same key (after quantization) interpolation + * is not recalculated. + */ + +/** + * \fn void Interpolator<T>::interpolate(const T &a, const T &b, T &dest, double + * lambda) + * \brief Interpolate between two instances of T + * \param a The first value to interpolate + * \param b The second value to interpolate + * \param dest The destination for the interpolated value + * \param lambda The interpolation factor (0..1) + * + * Interpolates between \a a and \a b according to \a lambda. It calculates + * dest = a * (1.0 - lambda) + b * lambda; + * + * If T supports multiplication with double and addition, this function can be + * used as is. For other types this function can be overwritten using partial + * template specialization. + */ + +} /* namespace ipa */ + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/interpolator.h b/src/ipa/libipa/interpolator.h new file mode 100644 index 00000000..fffce214 --- /dev/null +++ b/src/ipa/libipa/interpolator.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> + * + * Helper class for interpolating maps of objects + */ + +#pragma once + +#include <algorithm> +#include <cmath> +#include <map> +#include <string> +#include <tuple> + +#include <libcamera/base/log.h> + +#include "libcamera/internal/yaml_parser.h" + +namespace libcamera { + +LOG_DECLARE_CATEGORY(Interpolator) + +namespace ipa { + +template<typename T> +class Interpolator +{ +public: + Interpolator() = default; + Interpolator(const std::map<unsigned int, T> &data) + : data_(data) + { + } + Interpolator(std::map<unsigned int, T> &&data) + : data_(std::move(data)) + { + } + + ~Interpolator() = default; + + int readYaml(const libcamera::YamlObject &yaml, + const std::string &key_name, + const std::string &value_name) + { + data_.clear(); + lastInterpolatedKey_.reset(); + + if (!yaml.isList()) { + LOG(Interpolator, Error) << "yaml object must be a list"; + return -EINVAL; + } + + for (const auto &value : yaml.asList()) { + unsigned int ct = std::stoul(value[key_name].get<std::string>("")); + std::optional<T> data = + value[value_name].get<T>(); + if (!data) { + return -EINVAL; + } + + data_[ct] = *data; + } + + if (data_.size() < 1) { + LOG(Interpolator, Error) << "Need at least one element"; + return -EINVAL; + } + + return 0; + } + + void setQuantization(const unsigned int q) + { + quantization_ = q; + } + + void setData(std::map<unsigned int, T> &&data) + { + data_ = std::move(data); + lastInterpolatedKey_.reset(); + } + + const T &getInterpolated(unsigned int key, unsigned int *quantizedKey = nullptr) + { + ASSERT(data_.size() > 0); + + if (quantization_ > 0) + key = std::lround(key / static_cast<double>(quantization_)) * quantization_; + + if (quantizedKey) + *quantizedKey = key; + + if (lastInterpolatedKey_.has_value() && + *lastInterpolatedKey_ == key) + return lastInterpolatedValue_; + + auto it = data_.lower_bound(key); + + if (it == data_.begin()) + return it->second; + + if (it == data_.end()) + return std::prev(it)->second; + + if (it->first == key) + return it->second; + + auto it2 = std::prev(it); + double lambda = (key - it2->first) / static_cast<double>(it->first - it2->first); + interpolate(it2->second, it->second, lastInterpolatedValue_, lambda); + lastInterpolatedKey_ = key; + + return lastInterpolatedValue_; + } + + void interpolate(const T &a, const T &b, T &dest, double lambda) + { + dest = a * (1.0 - lambda) + b * lambda; + } + +private: + std::map<unsigned int, T> data_; + T lastInterpolatedValue_; + std::optional<unsigned int> lastInterpolatedKey_; + unsigned int quantization_ = 0; +}; + +} /* namespace ipa */ + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/lsc_polynomial.cpp b/src/ipa/libipa/lsc_polynomial.cpp new file mode 100644 index 00000000..f607d86c --- /dev/null +++ b/src/ipa/libipa/lsc_polynomial.cpp @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas On Board + * + * Polynomial class to represent lens shading correction + */ + +#include "lsc_polynomial.h" + +#include <libcamera/base/log.h> + +/** + * \file lsc_polynomial.h + * \brief LscPolynomial class + */ + +namespace libcamera { + +LOG_DEFINE_CATEGORY(LscPolynomial) + +namespace ipa { + +/** + * \class LscPolynomial + * \brief Class for handling even polynomials used in lens shading correction + * + * Shading artifacts of camera lenses can be modeled using even radial + * polynomials. This class implements a polynomial with 5 coefficients which + * follows the definition of the FixVignetteRadial opcode in the Adobe DNG + * specification. + */ + +/** + * \fn LscPolynomial::LscPolynomial(double cx = 0.0, double cy = 0.0, double k0 = 0.0, + double k1 = 0.0, double k2 = 0.0, double k3 = 0.0, + double k4 = 0.0) + * \brief Construct a polynomial using the given coefficients + * \param cx Center-x relative to the image in normalized coordinates (0..1) + * \param cy Center-y relative to the image in normalized coordinates (0..1) + * \param k0 Coefficient of the polynomial + * \param k1 Coefficient of the polynomial + * \param k2 Coefficient of the polynomial + * \param k3 Coefficient of the polynomial + * \param k4 Coefficient of the polynomial + */ + +/** + * \fn LscPolynomial::sampleAtNormalizedPixelPos(double x, double y) + * \brief Sample the polynomial at the given normalized pixel position + * + * This functions samples the polynomial at the given pixel position divided by + * the value returned by getM(). + * + * \param x x position in normalized coordinates + * \param y y position in normalized coordinates + * \return The sampled value + */ + +/** + * \fn LscPolynomial::getM() + * \brief Get the value m as described in the dng specification + * + * Returns m according to dng spec. m represents the Euclidean distance + * (in pixels) from the optical center to the farthest pixel in the + * image. + * + * \return The sampled value + */ + +/** + * \fn LscPolynomial::setReferenceImageSize(const Size &size) + * \brief Set the reference image size + * + * Set the reference image size that is used for subsequent calls to getM() and + * sampleAtNormalizedPixelPos() + * + * \param size The size of the reference image + */ + +} // namespace ipa +} // namespace libcamera diff --git a/src/ipa/libipa/lsc_polynomial.h b/src/ipa/libipa/lsc_polynomial.h new file mode 100644 index 00000000..c898faeb --- /dev/null +++ b/src/ipa/libipa/lsc_polynomial.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas On Board + * + * Helper for radial polynomial used in lens shading correction. + */ +#pragma once + +#include <algorithm> +#include <array> +#include <assert.h> +#include <cmath> + +#include <libcamera/base/log.h> +#include <libcamera/base/span.h> + +#include "libcamera/internal/yaml_parser.h" + +namespace libcamera { + +LOG_DECLARE_CATEGORY(LscPolynomial) + +namespace ipa { + +class LscPolynomial +{ +public: + LscPolynomial(double cx = 0.0, double cy = 0.0, double k0 = 0.0, + double k1 = 0.0, double k2 = 0.0, double k3 = 0.0, + double k4 = 0.0) + : cx_(cx), cy_(cy), cnx_(0), cny_(0), + coefficients_({ k0, k1, k2, k3, k4 }) + { + } + + double sampleAtNormalizedPixelPos(double x, double y) const + { + double dx = x - cnx_; + double dy = y - cny_; + double r = sqrt(dx * dx + dy * dy); + double res = 1.0; + for (unsigned int i = 0; i < coefficients_.size(); i++) { + res += coefficients_[i] * std::pow(r, (i + 1) * 2); + } + return res; + } + + double getM() const + { + double cpx = imageSize_.width * cx_; + double cpy = imageSize_.height * cy_; + double mx = std::max(cpx, std::fabs(imageSize_.width - cpx)); + double my = std::max(cpy, std::fabs(imageSize_.height - cpy)); + + return sqrt(mx * mx + my * my); + } + + void setReferenceImageSize(const Size &size) + { + assert(!size.isNull()); + imageSize_ = size; + + /* Calculate normalized centers */ + double m = getM(); + cnx_ = (size.width * cx_) / m; + cny_ = (size.height * cy_) / m; + } + +private: + double cx_; + double cy_; + double cnx_; + double cny_; + std::array<double, 5> coefficients_; + + Size imageSize_; +}; + +} /* namespace ipa */ + +#ifndef __DOXYGEN__ + +template<> +struct YamlObject::Getter<ipa::LscPolynomial> { + std::optional<ipa::LscPolynomial> get(const YamlObject &obj) const + { + std::optional<double> cx = obj["cx"].get<double>(); + std::optional<double> cy = obj["cy"].get<double>(); + std::optional<double> k0 = obj["k0"].get<double>(); + std::optional<double> k1 = obj["k1"].get<double>(); + std::optional<double> k2 = obj["k2"].get<double>(); + std::optional<double> k3 = obj["k3"].get<double>(); + std::optional<double> k4 = obj["k4"].get<double>(); + + if (!(cx && cy && k0 && k1 && k2 && k3 && k4)) + LOG(LscPolynomial, Error) + << "Polynomial is missing a parameter"; + + return ipa::LscPolynomial(*cx, *cy, *k0, *k1, *k2, *k3, *k4); + } +}; + +#endif + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/lux.cpp b/src/ipa/libipa/lux.cpp new file mode 100644 index 00000000..61f8fea8 --- /dev/null +++ b/src/ipa/libipa/lux.cpp @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * Copyright (C) 2019, Raspberry Pi Ltd + * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> + * + * Helper class that implements lux estimation + */ +#include "lux.h" + +#include <algorithm> +#include <chrono> + +#include <libcamera/base/log.h> + +#include "libcamera/internal/yaml_parser.h" + +#include "histogram.h" + +/** + * \file lux.h + * \brief Helper class that implements lux estimation + * + * Estimating the lux level of an image is a common operation that can for + * instance be used to adjust the target Y value in AGC or for Bayesian AWB + * estimation. + */ + +namespace libcamera { + +using namespace std::literals::chrono_literals; + +LOG_DEFINE_CATEGORY(Lux) + +namespace ipa { + +/** + * \class Lux + * \brief Class that implements lux estimation + * + * IPAs that wish to use lux estimation should create a Lux algorithm module + * that lightly wraps this module by providing the platform-specific luminance + * histogram. The Lux entry in the tuning file must then precede the algorithms + * that depend on the estimated lux value. + */ + +/** + * \var Lux::binSize_ + * \brief The maximum count of each bin + */ + +/** + * \var Lux::referenceExposureTime_ + * \brief The exposure time of the reference image, in microseconds + */ + +/** + * \var Lux::referenceAnalogueGain_ + * \brief The analogue gain of the reference image + */ + +/** + * \var Lux::referenceDigitalGain_ + * \brief The analogue gain of the reference image + */ + +/** + * \var Lux::referenceY_ + * \brief The measured luminance of the reference image, out of the bin size + * + * \sa binSize_ + */ + +/** + * \var Lux::referenceLux_ + * \brief The estimated lux level of the reference image + */ + +/** + * \brief Construct the Lux helper module + * \param[in] binSize The maximum count of each bin + */ +Lux::Lux(unsigned int binSize) + : binSize_(binSize) +{ +} + +/** + * \brief Parse tuning data + * \param[in] tuningData The YamlObject representing the tuning data + * + * This function parses yaml tuning data for the common Lux module. It requires + * reference exposure time, analogue gain, digital gain, and lux values. + * + * \code{.unparsed} + * algorithms: + * - Lux: + * referenceExposureTime: 10000 + * referenceAnalogueGain: 4.0 + * referenceDigitalGain: 1.0 + * referenceY: 12000 + * referenceLux: 1000 + * \endcode + * + * \return 0 on success or a negative error code + */ +int Lux::parseTuningData(const YamlObject &tuningData) +{ + auto value = tuningData["referenceExposureTime"].get<double>(); + if (!value) { + LOG(Lux, Error) << "Missing tuning parameter: " + << "'referenceExposureTime'"; + return -EINVAL; + } + referenceExposureTime_ = *value * 1.0us; + + value = tuningData["referenceAnalogueGain"].get<double>(); + if (!value) { + LOG(Lux, Error) << "Missing tuning parameter: " + << "'referenceAnalogueGain'"; + return -EINVAL; + } + referenceAnalogueGain_ = *value; + + value = tuningData["referenceDigitalGain"].get<double>(); + if (!value) { + LOG(Lux, Error) << "Missing tuning parameter: " + << "'referenceDigitalGain'"; + return -EINVAL; + } + referenceDigitalGain_ = *value; + + value = tuningData["referenceY"].get<double>(); + if (!value) { + LOG(Lux, Error) << "Missing tuning parameter: " + << "'referenceY'"; + return -EINVAL; + } + referenceY_ = *value; + + value = tuningData["referenceLux"].get<double>(); + if (!value) { + LOG(Lux, Error) << "Missing tuning parameter: " + << "'referenceLux'"; + return -EINVAL; + } + referenceLux_ = *value; + + return 0; +} + +/** + * \brief Estimate lux given runtime values + * \param[in] exposureTime Exposure time applied to the frame + * \param[in] aGain Analogue gain applied to the frame + * \param[in] dGain Digital gain applied to the frame + * \param[in] yHist Histogram from the ISP statistics + * + * Estimate the lux given the exposure time, gain, and histogram. + * + * \return Estimated lux value + */ +double Lux::estimateLux(utils::Duration exposureTime, + double aGain, double dGain, + const Histogram &yHist) const +{ + double currentY = yHist.interQuantileMean(0, 1); + double exposureTimeRatio = referenceExposureTime_ / exposureTime; + double aGainRatio = referenceAnalogueGain_ / aGain; + double dGainRatio = referenceDigitalGain_ / dGain; + double yRatio = currentY * (binSize_ / yHist.bins()) / referenceY_; + + double estimatedLux = exposureTimeRatio * aGainRatio * dGainRatio * + yRatio * referenceLux_; + + LOG(Lux, Debug) << "Estimated lux " << estimatedLux; + return estimatedLux; +} + +} /* namespace ipa */ + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/lux.h b/src/ipa/libipa/lux.h new file mode 100644 index 00000000..93ca6479 --- /dev/null +++ b/src/ipa/libipa/lux.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * Copyright (C) 2019, Raspberry Pi Ltd + * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> + * + * Helper class that implements lux estimation + */ + +#pragma once + +#include <libcamera/base/utils.h> + +namespace libcamera { + +class YamlObject; + +namespace ipa { + +class Histogram; + +class Lux +{ +public: + Lux(unsigned int binSize); + + int parseTuningData(const YamlObject &tuningData); + double estimateLux(utils::Duration exposureTime, + double aGain, double dGain, + const Histogram &yHist) const; + +private: + unsigned int binSize_; + utils::Duration referenceExposureTime_; + double referenceAnalogueGain_; + double referenceDigitalGain_; + double referenceY_; + double referenceLux_; +}; + +} /* namespace ipa */ + +} /* namespace libcamera */ diff --git a/src/ipa/libipa/matrix.h b/src/ipa/libipa/matrix.h deleted file mode 100644 index 8aa8f343..00000000 --- a/src/ipa/libipa/matrix.h +++ /dev/null @@ -1,204 +0,0 @@ -/* SPDX-License-Identifier: LGPL-2.1-or-later */ -/* - * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> - * - * Matrix and related operations - */ -#pragma once - -#include <algorithm> -#include <cmath> -#include <sstream> -#include <vector> - -#include <libcamera/base/log.h> -#include <libcamera/base/span.h> - -#include "libcamera/internal/yaml_parser.h" - -namespace libcamera { - -LOG_DECLARE_CATEGORY(Matrix) - -namespace ipa { - -#ifndef __DOXYGEN__ -template<typename T, unsigned int Rows, unsigned int Cols, - std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr> -#else -template<typename T, unsigned int Rows, unsigned int Cols> -#endif /* __DOXYGEN__ */ -class Matrix -{ -public: - Matrix() - { - data_.fill(static_cast<T>(0)); - } - - Matrix(const std::vector<T> &data) - { - std::copy(data.begin(), data.end(), data_.begin()); - } - - static Matrix identity() - { - Matrix ret; - for (size_t i = 0; i < std::min(Rows, Cols); i++) - ret[i][i] = static_cast<T>(1); - return ret; - } - - ~Matrix() = default; - - const std::string toString() const - { - std::stringstream out; - - out << "Matrix { "; - for (unsigned int i = 0; i < Rows; i++) { - out << "[ "; - for (unsigned int j = 0; j < Cols; j++) { - out << (*this)[i][j]; - out << ((j + 1 < Cols) ? ", " : " "); - } - out << ((i + 1 < Rows) ? "], " : "]"); - } - out << " }"; - - return out.str(); - } - - Span<const T, Cols> operator[](size_t i) const - { - return Span<const T, Cols>{ &data_.data()[i * Cols], Cols }; - } - - Span<T, Cols> operator[](size_t i) - { - return Span<T, Cols>{ &data_.data()[i * Cols], Cols }; - } - -#ifndef __DOXYGEN__ - template<typename U, std::enable_if_t<std::is_arithmetic_v<U>>> -#else - template<typename U> -#endif /* __DOXYGEN__ */ - Matrix<T, Rows, Cols> &operator*=(U d) - { - for (unsigned int i = 0; i < Rows * Cols; i++) - data_[i] *= d; - return *this; - } - -private: - std::array<T, Rows * Cols> data_; -}; - -#ifndef __DOXYGEN__ -template<typename T, typename U, unsigned int Rows, unsigned int Cols, - std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr> -#else -template<typename T, typename U, unsigned int Rows, unsigned int Cols> -#endif /* __DOXYGEN__ */ -Matrix<U, Rows, Cols> operator*(T d, const Matrix<U, Rows, Cols> &m) -{ - Matrix<U, Rows, Cols> result; - - for (unsigned int i = 0; i < Rows; i++) { - for (unsigned int j = 0; j < Cols; j++) - result[i][j] = d * m[i][j]; - } - - return result; -} - -#ifndef __DOXYGEN__ -template<typename T, typename U, unsigned int Rows, unsigned int Cols, - std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr> -#else -template<typename T, typename U, unsigned int Rows, unsigned int Cols> -#endif /* __DOXYGEN__ */ -Matrix<U, Rows, Cols> operator*(const Matrix<U, Rows, Cols> &m, T d) -{ - return d * m; -} - -#ifndef __DOXYGEN__ -template<typename T, - unsigned int R1, unsigned int C1, - unsigned int R2, unsigned int C2, - std::enable_if_t<C1 == R2> * = nullptr> -#else -template<typename T, unsigned int R1, unsigned int C1, unsigned int R2, unsigned in C2> -#endif /* __DOXYGEN__ */ -Matrix<T, R1, C2> operator*(const Matrix<T, R1, C1> &m1, const Matrix<T, R2, C2> &m2) -{ - Matrix<T, R1, C2> result; - - for (unsigned int i = 0; i < R1; i++) { - for (unsigned int j = 0; j < C2; j++) { - T sum = 0; - - for (unsigned int k = 0; k < C1; k++) - sum += m1[i][k] * m2[k][j]; - - result[i][j] = sum; - } - } - - return result; -} - -template<typename T, unsigned int Rows, unsigned int Cols> -Matrix<T, Rows, Cols> operator+(const Matrix<T, Rows, Cols> &m1, const Matrix<T, Rows, Cols> &m2) -{ - Matrix<T, Rows, Cols> result; - - for (unsigned int i = 0; i < Rows; i++) { - for (unsigned int j = 0; j < Cols; j++) - result[i][j] = m1[i][j] + m2[i][j]; - } - - return result; -} - -#ifndef __DOXYGEN__ -bool matrixValidateYaml(const YamlObject &obj, unsigned int size); -#endif /* __DOXYGEN__ */ - -} /* namespace ipa */ - -#ifndef __DOXYGEN__ -template<typename T, unsigned int Rows, unsigned int Cols> -std::ostream &operator<<(std::ostream &out, const ipa::Matrix<T, Rows, Cols> &m) -{ - out << m.toString(); - return out; -} - -template<typename T, unsigned int Rows, unsigned int Cols> -struct YamlObject::Getter<ipa::Matrix<T, Rows, Cols>> { - std::optional<ipa::Matrix<T, Rows, Cols>> get(const YamlObject &obj) const - { - if (!ipa::matrixValidateYaml(obj, Rows * Cols)) - return std::nullopt; - - ipa::Matrix<T, Rows, Cols> matrix; - T *data = &matrix[0][0]; - - unsigned int i = 0; - for (const YamlObject &entry : obj.asList()) { - const auto value = entry.get<T>(); - if (!value) - return std::nullopt; - - data[i++] = *value; - } - - return matrix; - } -}; -#endif /* __DOXYGEN__ */ - -} /* namespace libcamera */ diff --git a/src/ipa/libipa/matrix_interpolator.cpp b/src/ipa/libipa/matrix_interpolator.cpp deleted file mode 100644 index 04ca177f..00000000 --- a/src/ipa/libipa/matrix_interpolator.cpp +++ /dev/null @@ -1,110 +0,0 @@ -/* SPDX-License-Identifier: LGPL-2.1-or-later */ -/* - * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> - * - * Helper class for interpolating maps of matrices - */ -#include "matrix_interpolator.h" - -#include <algorithm> -#include <string> - -#include <libcamera/base/log.h> - -#include "libcamera/internal/yaml_parser.h" - -#include "matrix.h" - -/** - * \file matrix_interpolator.h - * \brief Helper class for interpolating maps of matrices - */ - -namespace libcamera { - -LOG_DEFINE_CATEGORY(MatrixInterpolator) - -namespace ipa { - -/** - * \class MatrixInterpolator - * \brief Class for storing, retrieving, and interpolating matrices - * \tparam T Type of numerical values to be stored in the matrices - * \tparam R Number of rows in the matrices - * \tparam C Number of columns in the matrices - * - * The main use case is to pass a map from color temperatures to corresponding - * matrices (eg. color correction), and then requesting a matrix for a specific - * color temperature. This class will abstract away the interpolation portion. - */ - -/** - * \fn MatrixInterpolator::MatrixInterpolator(const std::map<unsigned int, Matrix<T, R, C>> &matrices) - * \brief Construct a matrix interpolator from a map of matrices - * \param matrices Map from which to construct the matrix interpolator - */ - -/** - * \fn MatrixInterpolator::reset() - * \brief Reset the matrix interpolator content to a single identity matrix - */ - -/** - * \fn int MatrixInterpolator<T, R, C>::readYaml() - * \brief Initialize an MatrixInterpolator instance from yaml - * \tparam T Type of data stored in the matrices - * \tparam R Number of rows of the matrices - * \tparam C Number of columns of the matrices - * \param[in] yaml The yaml object that contains the map of unsigned integers to matrices - * \param[in] key_name The name of the key in the yaml object - * \param[in] matrix_name The name of the matrix in the yaml object - * - * The yaml object is expected to be a list of maps. Each map has two or more - * pairs: one of \a key_name to the key value (usually color temperature), and - * one or more of \a matrix_name to the matrix. This is a bit difficult to - * explain, so here is an example (in python, as it is easier to parse than - * yaml): - * [ - * { - * 'ct': 2860, - * 'ccm': [ 2.12089, -0.52461, -0.59629, - * -0.85342, 2.80445, -0.95103, - * -0.26897, -1.14788, 2.41685 ], - * 'offsets': [ 0, 0, 0 ] - * }, - * - * { - * 'ct': 2960, - * 'ccm': [ 2.26962, -0.54174, -0.72789, - * -0.77008, 2.60271, -0.83262, - * -0.26036, -1.51254, 2.77289 ], - * 'offsets': [ 0, 0, 0 ] - * }, - * - * { - * 'ct': 3603, - * 'ccm': [ 2.18644, -0.66148, -0.52496, - * -0.77828, 2.69474, -0.91645, - * -0.25239, -0.83059, 2.08298 ], - * 'offsets': [ 0, 0, 0 ] - * }, - * ] - * - * In this case, \a key_name would be 'ct', and \a matrix_name can be either - * 'ccm' or 'offsets'. This way multiple matrix interpolators can be defined in - * one set of color temperature ranges in the tuning file, and they can be - * retrieved separately with the \a matrix_name parameter. - * - * \return Zero on success, negative error code otherwise - */ - -/** - * \fn Matrix<T, R, C> MatrixInterpolator<T, R, C>::get(unsigned int key) - * \brief Retrieve a matrix from the list of matrices, interpolating if necessary - * \param[in] key The unsigned integer key of the matrix to retrieve - * \return The matrix corresponding to the color temperature - */ - -} /* namespace ipa */ - -} /* namespace libcamera */ diff --git a/src/ipa/libipa/matrix_interpolator.h b/src/ipa/libipa/matrix_interpolator.h deleted file mode 100644 index 087c4fd1..00000000 --- a/src/ipa/libipa/matrix_interpolator.h +++ /dev/null @@ -1,122 +0,0 @@ -/* SPDX-License-Identifier: LGPL-2.1-or-later */ -/* - * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> - * - * Helper class for interpolating maps of matrices - */ - -#pragma once - -#include <algorithm> -#include <map> -#include <string> -#include <tuple> - -#include <libcamera/base/log.h> - -#include "libcamera/internal/yaml_parser.h" - -#include "matrix.h" - -namespace libcamera { - -LOG_DECLARE_CATEGORY(MatrixInterpolator) - -namespace ipa { - -#ifndef __DOXYGEN__ -template<typename T, unsigned int R, unsigned int C, - std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr> -#else -template<typename T, unsigned int R, unsigned int C> -#endif /* __DOXYGEN__ */ -class MatrixInterpolator -{ -public: - MatrixInterpolator() - { - reset(); - } - - MatrixInterpolator(const std::map<unsigned int, Matrix<T, R, C>> &matrices) - { - for (const auto &pair : matrices) - matrices_[pair.first] = pair.second; - } - - ~MatrixInterpolator() {} - - void reset() - { - matrices_.clear(); - matrices_[0] = Matrix<T, R, C>::identity(); - } - - int readYaml(const libcamera::YamlObject &yaml, - const std::string &key_name, - const std::string &matrix_name) - { - matrices_.clear(); - - if (!yaml.isList()) { - LOG(MatrixInterpolator, Error) << "yaml object must be a list"; - return -EINVAL; - } - - for (const auto &value : yaml.asList()) { - unsigned int ct = std::stoul(value[key_name].get<std::string>("")); - std::optional<Matrix<T, R, C>> matrix = - value[matrix_name].get<Matrix<T, R, C>>(); - if (!matrix) { - LOG(MatrixInterpolator, Error) << "Failed to read matrix"; - return -EINVAL; - } - - matrices_[ct] = *matrix; - - LOG(MatrixInterpolator, Debug) - << "Read matrix '" << matrix_name << "' for key '" - << key_name << "' " << ct << ": " - << matrices_[ct].toString(); - } - - if (matrices_.size() < 1) { - LOG(MatrixInterpolator, Error) << "Need at least one matrix"; - return -EINVAL; - } - - return 0; - } - - Matrix<T, R, C> get(unsigned int ct) - { - ASSERT(matrices_.size() > 0); - - if (matrices_.size() == 1 || - ct <= matrices_.begin()->first) - return matrices_.begin()->second; - - if (ct >= matrices_.rbegin()->first) - return matrices_.rbegin()->second; - - if (matrices_.find(ct) != matrices_.end()) - return matrices_[ct]; - - /* The above four guarantee that this will succeed */ - auto iter = matrices_.upper_bound(ct); - unsigned int ctUpper = iter->first; - unsigned int ctLower = (--iter)->first; - - double lambda = (ct - ctLower) / static_cast<double>(ctUpper - ctLower); - Matrix<T, R, C> ret = - lambda * matrices_[ctUpper] + (1.0 - lambda) * matrices_[ctLower]; - return ret; - } - -private: - std::map<unsigned int, Matrix<T, R, C>> matrices_; -}; - -} /* namespace ipa */ - -} /* namespace libcamera */ diff --git a/src/ipa/libipa/meson.build b/src/ipa/libipa/meson.build index eff8ce26..12d8d15b 100644 --- a/src/ipa/libipa/meson.build +++ b/src/ipa/libipa/meson.build @@ -4,28 +4,32 @@ libipa_headers = files([ 'agc_mean_luminance.h', 'algorithm.h', 'camera_sensor_helper.h', + 'colours.h', 'exposure_mode_helper.h', 'fc_queue.h', + 'fixedpoint.h', 'histogram.h', - 'matrix.h', - 'matrix_interpolator.h', + 'interpolator.h', + 'lsc_polynomial.h', + 'lux.h', 'module.h', 'pwl.h', - 'vector.h', ]) libipa_sources = files([ 'agc_mean_luminance.cpp', 'algorithm.cpp', 'camera_sensor_helper.cpp', + 'colours.cpp', 'exposure_mode_helper.cpp', 'fc_queue.cpp', + 'fixedpoint.cpp', 'histogram.cpp', - 'matrix.cpp', - 'matrix_interpolator.cpp', + 'interpolator.cpp', + 'lsc_polynomial.cpp', + 'lux.cpp', 'module.cpp', 'pwl.cpp', - 'vector.cpp', ]) libipa_includes = include_directories('..') diff --git a/src/ipa/libipa/pwl.cpp b/src/ipa/libipa/pwl.cpp index 9b213754..88fe2022 100644 --- a/src/ipa/libipa/pwl.cpp +++ b/src/ipa/libipa/pwl.cpp @@ -8,10 +8,8 @@ #include "pwl.h" -#include <assert.h> #include <cmath> #include <sstream> -#include <stdexcept> /** * \file pwl.h diff --git a/src/ipa/libipa/pwl.h b/src/ipa/libipa/pwl.h index b6f93494..8fdc7053 100644 --- a/src/ipa/libipa/pwl.h +++ b/src/ipa/libipa/pwl.h @@ -7,15 +7,12 @@ #pragma once #include <algorithm> -#include <cmath> #include <functional> #include <string> #include <utility> #include <vector> -#include "libcamera/internal/yaml_parser.h" - -#include "vector.h" +#include "libcamera/internal/vector.h" namespace libcamera { diff --git a/src/ipa/libipa/vector.cpp b/src/ipa/libipa/vector.cpp deleted file mode 100644 index bd00b019..00000000 --- a/src/ipa/libipa/vector.cpp +++ /dev/null @@ -1,168 +0,0 @@ -/* SPDX-License-Identifier: LGPL-2.1-or-later */ -/* - * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> - * - * Vector and related operations - */ - -#include "vector.h" - -#include <libcamera/base/log.h> - -/** - * \file vector.h - * \brief Vector class - */ - -namespace libcamera { - -LOG_DEFINE_CATEGORY(Vector) - -namespace ipa { - -/** - * \class Vector - * \brief Vector class - * \tparam T Type of numerical values to be stored in the vector - * \tparam Rows Number of dimension of the vector (= number of elements) - */ - -/** - * \fn Vector::Vector() - * \brief Construct a zero vector - */ - -/** - * \fn Vector::Vector(const std::array<T, Rows> &data) - * \brief Construct vector from supplied data - * \param data Data from which to construct a vector - * - * The size of \a data must be equal to the dimension size Rows of the vector. - */ - -/** - * \fn T Vector::operator[](size_t i) const - * \brief Index to an element in the vector - * \param i Index of element to retrieve - * \return Element at index \a i from the vector - */ - -/** - * \fn T &Vector::operator[](size_t i) - * \copydoc Vector::operator[](size_t i) const - */ - -/** - * \fn Vector::x() - * \brief Convenience function to access the first element of the vector - * \return The first element of the vector - */ - -/** - * \fn Vector::y() - * \brief Convenience function to access the second element of the vector - * \return The second element of the vector - */ - -/** - * \fn Vector::z() - * \brief Convenience function to access the third element of the vector - * \return The third element of the vector - */ - -/** - * \fn Vector::operator-() const - * \brief Negate a Vector by negating both all of its coordinates - * \return The negated vector - */ - -/** - * \fn Vector::operator-(Vector const &other) const - * \brief Subtract one vector from another - * \param[in] other The other vector - * \return The difference of \a other from this vector - */ - -/** - * \fn Vector::operator+() - * \brief Add two vectors together - * \param[in] other The other vector - * \return The sum of the two vectors - */ - -/** - * \fn Vector::operator*(const Vector<T, Rows> &other) const - * \brief Compute the dot product - * \param[in] other The other vector - * \return The dot product of the two vectors - */ - -/** - * \fn Vector::operator*(T factor) const - * \brief Multiply the vector by a scalar - * \param[in] factor The factor - * \return The vector multiplied by \a factor - */ - -/** - * \fn Vector::operator/() - * \brief Divide the vector by a scalar - * \param[in] factor The factor - * \return The vector divided by \a factor - */ - -/** - * \fn Vector::length2() - * \brief Get the squared length of the vector - * \return The squared length of the vector - */ - -/** - * \fn Vector::length() - * \brief Get the length of the vector - * \return The length of the vector - */ - -/** - * \fn Vector<T, Rows> operator*(const Matrix<T, Rows, Cols> &m, const Vector<T, Cols> &v) - * \brief Multiply a matrix by a vector - * \tparam T Numerical type of the contents of the matrix and vector - * \tparam Rows The number of rows in the matrix - * \tparam Cols The number of columns in the matrix (= rows in the vector) - * \param m The matrix - * \param v The vector - * \return Product of matrix \a m and vector \a v - */ - -/** - * \fn bool operator==(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs) - * \brief Compare vectors for equality - * \return True if the two vectors are equal, false otherwise - */ - -/** - * \fn bool operator!=(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs) - * \brief Compare vectors for inequality - * \return True if the two vectors are not equal, false otherwise - */ - -#ifndef __DOXYGEN__ -bool vectorValidateYaml(const YamlObject &obj, unsigned int size) -{ - if (!obj.isList()) - return false; - - if (obj.size() != size) { - LOG(Vector, Error) - << "Wrong number of values in YAML vector: expected " - << size << ", got " << obj.size(); - return false; - } - - return true; -} -#endif /* __DOXYGEN__ */ - -} /* namespace ipa */ - -} /* namespace libcamera */ diff --git a/src/ipa/libipa/vector.h b/src/ipa/libipa/vector.h deleted file mode 100644 index 556e0967..00000000 --- a/src/ipa/libipa/vector.h +++ /dev/null @@ -1,219 +0,0 @@ -/* SPDX-License-Identifier: LGPL-2.1-or-later */ -/* - * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> - * - * Vector and related operations - */ -#pragma once - -#include <algorithm> -#include <array> -#include <cmath> -#include <sstream> - -#include <libcamera/base/log.h> -#include <libcamera/base/span.h> - -#include "libcamera/internal/yaml_parser.h" - -#include "matrix.h" - -namespace libcamera { - -LOG_DECLARE_CATEGORY(Vector) - -namespace ipa { - -#ifndef __DOXYGEN__ -template<typename T, unsigned int Rows, - std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr> -#else -template<typename T, unsigned int Rows> -#endif /* __DOXYGEN__ */ -class Vector -{ -public: - constexpr Vector() = default; - - constexpr Vector(const std::array<T, Rows> &data) - { - for (unsigned int i = 0; i < Rows; i++) - data_[i] = data[i]; - } - - const T &operator[](size_t i) const - { - ASSERT(i < data_.size()); - return data_[i]; - } - - T &operator[](size_t i) - { - ASSERT(i < data_.size()); - return data_[i]; - } - -#ifndef __DOXYGEN__ - template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>> -#endif /* __DOXYGEN__ */ - constexpr T x() const - { - return data_[0]; - } - -#ifndef __DOXYGEN__ - template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>> -#endif /* __DOXYGEN__ */ - constexpr T y() const - { - return data_[1]; - } - -#ifndef __DOXYGEN__ - template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>> -#endif /* __DOXYGEN__ */ - constexpr T z() const - { - return data_[2]; - } - - constexpr Vector<T, Rows> operator-() const - { - Vector<T, Rows> ret; - for (unsigned int i = 0; i < Rows; i++) - ret[i] = -data_[i]; - return ret; - } - - constexpr Vector<T, Rows> operator-(const Vector<T, Rows> &other) const - { - Vector<T, Rows> ret; - for (unsigned int i = 0; i < Rows; i++) - ret[i] = data_[i] - other[i]; - return ret; - } - - constexpr Vector<T, Rows> operator+(const Vector<T, Rows> &other) const - { - Vector<T, Rows> ret; - for (unsigned int i = 0; i < Rows; i++) - ret[i] = data_[i] + other[i]; - return ret; - } - - constexpr T operator*(const Vector<T, Rows> &other) const - { - T ret = 0; - for (unsigned int i = 0; i < Rows; i++) - ret += data_[i] * other[i]; - return ret; - } - - constexpr Vector<T, Rows> operator*(T factor) const - { - Vector<T, Rows> ret; - for (unsigned int i = 0; i < Rows; i++) - ret[i] = data_[i] * factor; - return ret; - } - - constexpr Vector<T, Rows> operator/(T factor) const - { - Vector<T, Rows> ret; - for (unsigned int i = 0; i < Rows; i++) - ret[i] = data_[i] / factor; - return ret; - } - - constexpr double length2() const - { - double ret = 0; - for (unsigned int i = 0; i < Rows; i++) - ret += data_[i] * data_[i]; - return ret; - } - - constexpr double length() const - { - return std::sqrt(length2()); - } - -private: - std::array<T, Rows> data_; -}; - -template<typename T, unsigned int Rows, unsigned int Cols> -Vector<T, Rows> operator*(const Matrix<T, Rows, Cols> &m, const Vector<T, Cols> &v) -{ - Vector<T, Rows> result; - - for (unsigned int i = 0; i < Rows; i++) { - T sum = 0; - for (unsigned int j = 0; j < Cols; j++) - sum += m[i][j] * v[j]; - result[i] = sum; - } - - return result; -} - -template<typename T, unsigned int Rows> -bool operator==(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs) -{ - for (unsigned int i = 0; i < Rows; i++) { - if (lhs[i] != rhs[i]) - return false; - } - - return true; -} - -template<typename T, unsigned int Rows> -bool operator!=(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs) -{ - return !(lhs == rhs); -} - -#ifndef __DOXYGEN__ -bool vectorValidateYaml(const YamlObject &obj, unsigned int size); -#endif /* __DOXYGEN__ */ - -} /* namespace ipa */ - -#ifndef __DOXYGEN__ -template<typename T, unsigned int Rows> -std::ostream &operator<<(std::ostream &out, const ipa::Vector<T, Rows> &v) -{ - out << "Vector { "; - for (unsigned int i = 0; i < Rows; i++) { - out << v[i]; - out << ((i + 1 < Rows) ? ", " : " "); - } - out << " }"; - - return out; -} - -template<typename T, unsigned int Rows> -struct YamlObject::Getter<ipa::Vector<T, Rows>> { - std::optional<ipa::Vector<T, Rows>> get(const YamlObject &obj) const - { - if (!ipa::vectorValidateYaml(obj, Rows)) - return std::nullopt; - - ipa::Vector<T, Rows> vector; - - unsigned int i = 0; - for (const YamlObject &entry : obj.asList()) { - const auto value = entry.get<T>(); - if (!value) - return std::nullopt; - vector[i++] = *value; - } - - return vector; - } -}; -#endif /* __DOXYGEN__ */ - -} /* namespace libcamera */ diff --git a/src/ipa/mali-c55/algorithms/agc.cpp b/src/ipa/mali-c55/algorithms/agc.cpp new file mode 100644 index 00000000..70667db3 --- /dev/null +++ b/src/ipa/mali-c55/algorithms/agc.cpp @@ -0,0 +1,410 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas On Board Oy + * + * agc.cpp - AGC/AEC mean-based control algorithm + */ + +#include "agc.h" + +#include <cmath> + +#include <libcamera/base/log.h> +#include <libcamera/base/utils.h> + +#include <libcamera/control_ids.h> +#include <libcamera/property_ids.h> + +#include "libipa/colours.h" +#include "libipa/fixedpoint.h" + +namespace libcamera { + +using namespace std::literals::chrono_literals; + +namespace ipa::mali_c55::algorithms { + +LOG_DEFINE_CATEGORY(MaliC55Agc) + +/* + * Number of histogram bins. This is only true for the specific configuration we + * set to the ISP; 4 separate histograms of 256 bins each. If that configuration + * ever changes then this constant will need updating. + */ +static constexpr unsigned int kNumHistogramBins = 256; + +/* + * The Mali-C55 ISP has a digital gain block which allows setting gain in Q5.8 + * format, a range of 0.0 to (very nearly) 32.0. We clamp from 1.0 to the actual + * max value which is 8191 * 2^-8. + */ +static constexpr double kMinDigitalGain = 1.0; +static constexpr double kMaxDigitalGain = 31.99609375; + +uint32_t AgcStatistics::decodeBinValue(uint16_t binVal) +{ + int exponent = (binVal & 0xf000) >> 12; + int mantissa = binVal & 0xfff; + + if (!exponent) + return mantissa * 2; + else + return (mantissa + 4096) * std::pow(2, exponent); +} + +/* + * We configure the ISP to give us 4 histograms of 256 bins each, with + * a single histogram per colour channel (R/Gr/Gb/B). The memory space + * containing the data is a single block containing all 4 histograms + * with the position of each colour's histogram within it dependent on + * the bayer pattern of the data input to the ISP. + * + * NOTE: The validity of this function depends on the parameters we have + * configured. With different skip/offset x, y values not all of the + * colour channels would be populated, and they may not be in the same + * planes as calculated here. + */ +int AgcStatistics::setBayerOrderIndices(BayerFormat::Order bayerOrder) +{ + switch (bayerOrder) { + case BayerFormat::Order::RGGB: + rIndex_ = 0; + grIndex_ = 1; + gbIndex_ = 2; + bIndex_ = 3; + break; + case BayerFormat::Order::GRBG: + grIndex_ = 0; + rIndex_ = 1; + bIndex_ = 2; + gbIndex_ = 3; + break; + case BayerFormat::Order::GBRG: + gbIndex_ = 0; + bIndex_ = 1; + rIndex_ = 2; + grIndex_ = 3; + break; + case BayerFormat::Order::BGGR: + bIndex_ = 0; + gbIndex_ = 1; + grIndex_ = 2; + rIndex_ = 3; + break; + default: + LOG(MaliC55Agc, Error) + << "Invalid bayer format " << bayerOrder; + return -EINVAL; + } + + return 0; +} + +void AgcStatistics::parseStatistics(const mali_c55_stats_buffer *stats) +{ + uint32_t r[256], g[256], b[256], y[256]; + + /* + * We need to decode the bin values for each histogram from their 16-bit + * compressed values to a 32-bit value. We also take the average of the + * Gr/Gb values into a single green histogram. + */ + for (unsigned int i = 0; i < 256; i++) { + r[i] = decodeBinValue(stats->ae_1024bin_hist.bins[i + (256 * rIndex_)]); + g[i] = (decodeBinValue(stats->ae_1024bin_hist.bins[i + (256 * grIndex_)]) + + decodeBinValue(stats->ae_1024bin_hist.bins[i + (256 * gbIndex_)])) / 2; + b[i] = decodeBinValue(stats->ae_1024bin_hist.bins[i + (256 * bIndex_)]); + + y[i] = rec601LuminanceFromRGB({ { static_cast<double>(r[i]), + static_cast<double>(g[i]), + static_cast<double>(b[i]) } }); + } + + rHist = Histogram(Span<uint32_t>(r, kNumHistogramBins)); + gHist = Histogram(Span<uint32_t>(g, kNumHistogramBins)); + bHist = Histogram(Span<uint32_t>(b, kNumHistogramBins)); + yHist = Histogram(Span<uint32_t>(y, kNumHistogramBins)); +} + +Agc::Agc() + : AgcMeanLuminance() +{ +} + +int Agc::init(IPAContext &context, const YamlObject &tuningData) +{ + int ret = parseTuningData(tuningData); + if (ret) + return ret; + + context.ctrlMap[&controls::AeEnable] = ControlInfo(false, true); + context.ctrlMap[&controls::DigitalGain] = ControlInfo( + static_cast<float>(kMinDigitalGain), + static_cast<float>(kMaxDigitalGain), + static_cast<float>(kMinDigitalGain) + ); + context.ctrlMap.merge(controls()); + + return 0; +} + +int Agc::configure(IPAContext &context, + [[maybe_unused]] const IPACameraSensorInfo &configInfo) +{ + int ret = statistics_.setBayerOrderIndices(context.configuration.sensor.bayerOrder); + if (ret) + return ret; + + /* + * Defaults; we use whatever the sensor's default exposure is and the + * minimum analogue gain. AEGC is _active_ by default. + */ + context.activeState.agc.autoEnabled = true; + context.activeState.agc.automatic.sensorGain = context.configuration.agc.minAnalogueGain; + context.activeState.agc.automatic.exposure = context.configuration.agc.defaultExposure; + context.activeState.agc.automatic.ispGain = kMinDigitalGain; + context.activeState.agc.manual.sensorGain = context.configuration.agc.minAnalogueGain; + context.activeState.agc.manual.exposure = context.configuration.agc.defaultExposure; + context.activeState.agc.manual.ispGain = kMinDigitalGain; + context.activeState.agc.constraintMode = constraintModes().begin()->first; + context.activeState.agc.exposureMode = exposureModeHelpers().begin()->first; + + /* \todo Run this again when FrameDurationLimits is passed in */ + setLimits(context.configuration.agc.minShutterSpeed, + context.configuration.agc.maxShutterSpeed, + context.configuration.agc.minAnalogueGain, + context.configuration.agc.maxAnalogueGain); + + resetFrameCount(); + + return 0; +} + +void Agc::queueRequest(IPAContext &context, const uint32_t frame, + [[maybe_unused]] IPAFrameContext &frameContext, + const ControlList &controls) +{ + auto &agc = context.activeState.agc; + + const auto &constraintMode = controls.get(controls::AeConstraintMode); + agc.constraintMode = constraintMode.value_or(agc.constraintMode); + + const auto &exposureMode = controls.get(controls::AeExposureMode); + agc.exposureMode = exposureMode.value_or(agc.exposureMode); + + const auto &agcEnable = controls.get(controls::AeEnable); + if (agcEnable && *agcEnable != agc.autoEnabled) { + agc.autoEnabled = *agcEnable; + + LOG(MaliC55Agc, Info) + << (agc.autoEnabled ? "Enabling" : "Disabling") + << " AGC"; + } + + /* + * If the automatic exposure and gain is enabled we have no further work + * to do here... + */ + if (agc.autoEnabled) + return; + + /* + * ...otherwise we need to look for exposure and gain controls and use + * those to set the activeState. + */ + const auto &exposure = controls.get(controls::ExposureTime); + if (exposure) { + agc.manual.exposure = *exposure * 1.0us / context.configuration.sensor.lineDuration; + + LOG(MaliC55Agc, Debug) + << "Exposure set to " << agc.manual.exposure + << " on request sequence " << frame; + } + + const auto &analogueGain = controls.get(controls::AnalogueGain); + if (analogueGain) { + agc.manual.sensorGain = *analogueGain; + + LOG(MaliC55Agc, Debug) + << "Analogue gain set to " << agc.manual.sensorGain + << " on request sequence " << frame; + } + + const auto &digitalGain = controls.get(controls::DigitalGain); + if (digitalGain) { + agc.manual.ispGain = *digitalGain; + + LOG(MaliC55Agc, Debug) + << "Digital gain set to " << agc.manual.ispGain + << " on request sequence " << frame; + } +} + +size_t Agc::fillGainParamBlock(IPAContext &context, IPAFrameContext &frameContext, + mali_c55_params_block block) +{ + IPAActiveState &activeState = context.activeState; + double gain; + + if (activeState.agc.autoEnabled) + gain = activeState.agc.automatic.ispGain; + else + gain = activeState.agc.manual.ispGain; + + block.header->type = MALI_C55_PARAM_BLOCK_DIGITAL_GAIN; + block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE; + block.header->size = sizeof(struct mali_c55_params_digital_gain); + + block.digital_gain->gain = floatingToFixedPoint<5, 8, uint16_t, double>(gain); + frameContext.agc.ispGain = gain; + + return block.header->size; +} + +size_t Agc::fillParamsBuffer(mali_c55_params_block block, + enum mali_c55_param_block_type type) +{ + block.header->type = type; + block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE; + block.header->size = sizeof(struct mali_c55_params_aexp_hist); + + /* Collect every 3rd pixel horizontally */ + block.aexp_hist->skip_x = 1; + /* Start from first column */ + block.aexp_hist->offset_x = 0; + /* Collect every pixel vertically */ + block.aexp_hist->skip_y = 0; + /* Start from the first row */ + block.aexp_hist->offset_y = 0; + /* 1x scaling (i.e. none) */ + block.aexp_hist->scale_bottom = 0; + block.aexp_hist->scale_top = 0; + /* Collect all Bayer planes into 4 separate histograms */ + block.aexp_hist->plane_mode = 1; + /* Tap the data immediately after the digital gain block */ + block.aexp_hist->tap_point = MALI_C55_AEXP_HIST_TAP_FS; + + return block.header->size; +} + +size_t Agc::fillWeightsArrayBuffer(mali_c55_params_block block, + enum mali_c55_param_block_type type) +{ + block.header->type = type; + block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE; + block.header->size = sizeof(struct mali_c55_params_aexp_weights); + + /* We use every zone - a 15x15 grid */ + block.aexp_weights->nodes_used_horiz = 15; + block.aexp_weights->nodes_used_vert = 15; + + /* + * We uniformly weight the zones to 1 - this results in the collected + * histograms containing a true pixel count, which we can then use to + * approximate colour channel averages for the image. + */ + Span<uint8_t> weights{ + block.aexp_weights->zone_weights, + MALI_C55_MAX_ZONES + }; + std::fill(weights.begin(), weights.end(), 1); + + return block.header->size; +} + +void Agc::prepare(IPAContext &context, const uint32_t frame, + IPAFrameContext &frameContext, mali_c55_params_buffer *params) +{ + mali_c55_params_block block; + + block.data = ¶ms->data[params->total_size]; + params->total_size += fillGainParamBlock(context, frameContext, block); + + if (frame > 0) + return; + + block.data = ¶ms->data[params->total_size]; + params->total_size += fillParamsBuffer(block, + MALI_C55_PARAM_BLOCK_AEXP_HIST); + + block.data = ¶ms->data[params->total_size]; + params->total_size += fillWeightsArrayBuffer(block, + MALI_C55_PARAM_BLOCK_AEXP_HIST_WEIGHTS); + + block.data = ¶ms->data[params->total_size]; + params->total_size += fillParamsBuffer(block, + MALI_C55_PARAM_BLOCK_AEXP_IHIST); + + block.data = ¶ms->data[params->total_size]; + params->total_size += fillWeightsArrayBuffer(block, + MALI_C55_PARAM_BLOCK_AEXP_IHIST_WEIGHTS); +} + +double Agc::estimateLuminance(const double gain) const +{ + double rAvg = statistics_.rHist.interQuantileMean(0, 1) * gain; + double gAvg = statistics_.gHist.interQuantileMean(0, 1) * gain; + double bAvg = statistics_.bHist.interQuantileMean(0, 1) * gain; + double yAvg = rec601LuminanceFromRGB({ { rAvg, gAvg, bAvg } }); + + return yAvg / kNumHistogramBins; +} + +void Agc::process(IPAContext &context, + [[maybe_unused]] const uint32_t frame, + IPAFrameContext &frameContext, + const mali_c55_stats_buffer *stats, + [[maybe_unused]] ControlList &metadata) +{ + IPASessionConfiguration &configuration = context.configuration; + IPAActiveState &activeState = context.activeState; + + if (!stats) { + LOG(MaliC55Agc, Error) << "No statistics buffer passed to Agc"; + return; + } + + statistics_.parseStatistics(stats); + context.activeState.agc.temperatureK = estimateCCT({ { statistics_.rHist.interQuantileMean(0, 1), + statistics_.gHist.interQuantileMean(0, 1), + statistics_.bHist.interQuantileMean(0, 1) } }); + + /* + * The Agc algorithm needs to know the effective exposure value that was + * applied to the sensor when the statistics were collected. + */ + uint32_t exposure = frameContext.agc.exposure; + double analogueGain = frameContext.agc.sensorGain; + double digitalGain = frameContext.agc.ispGain; + double totalGain = analogueGain * digitalGain; + utils::Duration currentShutter = exposure * configuration.sensor.lineDuration; + utils::Duration effectiveExposureValue = currentShutter * totalGain; + + utils::Duration shutterTime; + double aGain, dGain; + std::tie(shutterTime, aGain, dGain) = + calculateNewEv(activeState.agc.constraintMode, + activeState.agc.exposureMode, statistics_.yHist, + effectiveExposureValue); + + dGain = std::clamp(dGain, kMinDigitalGain, kMaxDigitalGain); + + LOG(MaliC55Agc, Debug) + << "Divided up shutter, analogue gain and digital gain are " + << shutterTime << ", " << aGain << " and " << dGain; + + activeState.agc.automatic.exposure = shutterTime / configuration.sensor.lineDuration; + activeState.agc.automatic.sensorGain = aGain; + activeState.agc.automatic.ispGain = dGain; + + metadata.set(controls::ExposureTime, currentShutter.get<std::micro>()); + metadata.set(controls::AnalogueGain, frameContext.agc.sensorGain); + metadata.set(controls::DigitalGain, frameContext.agc.ispGain); + metadata.set(controls::ColourTemperature, context.activeState.agc.temperatureK); +} + +REGISTER_IPA_ALGORITHM(Agc, "Agc") + +} /* namespace ipa::mali_c55::algorithms */ + +} /* namespace libcamera */ diff --git a/src/ipa/mali-c55/algorithms/agc.h b/src/ipa/mali-c55/algorithms/agc.h new file mode 100644 index 00000000..c5c574e5 --- /dev/null +++ b/src/ipa/mali-c55/algorithms/agc.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2023, Ideas on Board Oy + * + * agc.h - Mali C55 AGC/AEC mean-based control algorithm + */ + +#pragma once + +#include <libcamera/base/utils.h> + +#include "libcamera/internal/bayer_format.h" + +#include "libipa/agc_mean_luminance.h" +#include "libipa/histogram.h" + +#include "algorithm.h" +#include "ipa_context.h" + +namespace libcamera { + +namespace ipa::mali_c55::algorithms { + +class AgcStatistics +{ +public: + AgcStatistics() + { + } + + int setBayerOrderIndices(BayerFormat::Order bayerOrder); + uint32_t decodeBinValue(uint16_t binVal); + void parseStatistics(const mali_c55_stats_buffer *stats); + + Histogram rHist; + Histogram gHist; + Histogram bHist; + Histogram yHist; +private: + unsigned int rIndex_; + unsigned int grIndex_; + unsigned int gbIndex_; + unsigned int bIndex_; +}; + +class Agc : public Algorithm, public AgcMeanLuminance +{ +public: + Agc(); + ~Agc() = default; + + int init(IPAContext &context, const YamlObject &tuningData) override; + int configure(IPAContext &context, + const IPACameraSensorInfo &configInfo) override; + void queueRequest(IPAContext &context, const uint32_t frame, + IPAFrameContext &frameContext, + const ControlList &controls) override; + void prepare(IPAContext &context, const uint32_t frame, + IPAFrameContext &frameContext, + mali_c55_params_buffer *params) override; + void process(IPAContext &context, const uint32_t frame, + IPAFrameContext &frameContext, + const mali_c55_stats_buffer *stats, + ControlList &metadata) override; + +private: + double estimateLuminance(const double gain) const override; + size_t fillGainParamBlock(IPAContext &context, + IPAFrameContext &frameContext, + mali_c55_params_block block); + size_t fillParamsBuffer(mali_c55_params_block block, + enum mali_c55_param_block_type type); + size_t fillWeightsArrayBuffer(mali_c55_params_block block, + enum mali_c55_param_block_type type); + + AgcStatistics statistics_; +}; + +} /* namespace ipa::mali_c55::algorithms */ + +} /* namespace libcamera */ diff --git a/src/ipa/mali-c55/algorithms/algorithm.h b/src/ipa/mali-c55/algorithms/algorithm.h new file mode 100644 index 00000000..36a3bff0 --- /dev/null +++ b/src/ipa/mali-c55/algorithms/algorithm.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas On Board + * + * algorithm.h - Mali-C55 control algorithm interface + */ + +#pragma once + +#include <linux/mali-c55-config.h> + +#include <libipa/algorithm.h> + +#include "module.h" + +namespace libcamera { + +namespace ipa::mali_c55 { + +class Algorithm : public libcamera::ipa::Algorithm<Module> +{ +}; + +union mali_c55_params_block { + struct mali_c55_params_block_header *header; + struct mali_c55_params_sensor_off_preshading *sensor_offs; + struct mali_c55_params_aexp_hist *aexp_hist; + struct mali_c55_params_aexp_weights *aexp_weights; + struct mali_c55_params_digital_gain *digital_gain; + struct mali_c55_params_awb_gains *awb_gains; + struct mali_c55_params_awb_config *awb_config; + struct mali_c55_params_mesh_shading_config *shading_config; + struct mali_c55_params_mesh_shading_selection *shading_selection; + __u8 *data; +}; + +} /* namespace ipa::mali_c55 */ + +} /* namespace libcamera */ diff --git a/src/ipa/mali-c55/algorithms/awb.cpp b/src/ipa/mali-c55/algorithms/awb.cpp new file mode 100644 index 00000000..050b191b --- /dev/null +++ b/src/ipa/mali-c55/algorithms/awb.cpp @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas On Board Oy + * + * awb.cpp - Mali C55 grey world auto white balance algorithm + */ + +#include "awb.h" + +#include <cmath> + +#include <libcamera/base/log.h> +#include <libcamera/base/utils.h> + +#include <libcamera/control_ids.h> + +#include "libipa/fixedpoint.h" + +namespace libcamera { + +namespace ipa::mali_c55::algorithms { + +LOG_DEFINE_CATEGORY(MaliC55Awb) + +/* Number of frames at which we should run AWB at full speed */ +static constexpr uint32_t kNumStartupFrames = 4; + +Awb::Awb() +{ +} + +int Awb::configure([[maybe_unused]] IPAContext &context, + [[maybe_unused]] const IPACameraSensorInfo &configInfo) +{ + /* + * Initially we have no idea what the colour balance will be like, so + * for the first frame we will make no assumptions and leave the R/B + * channels unmodified. + */ + context.activeState.awb.rGain = 1.0; + context.activeState.awb.bGain = 1.0; + + return 0; +} + +size_t Awb::fillGainsParamBlock(mali_c55_params_block block, IPAContext &context, + IPAFrameContext &frameContext) +{ + block.header->type = MALI_C55_PARAM_BLOCK_AWB_GAINS; + block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE; + block.header->size = sizeof(struct mali_c55_params_awb_gains); + + double rGain = context.activeState.awb.rGain; + double bGain = context.activeState.awb.bGain; + + /* + * The gains here map as follows: + * gain00 = R + * gain01 = Gr + * gain10 = Gb + * gain11 = B + * + * This holds true regardless of the bayer order of the input data, as + * the mapping is done internally in the ISP. + */ + block.awb_gains->gain00 = floatingToFixedPoint<4, 8, uint16_t, double>(rGain); + block.awb_gains->gain01 = floatingToFixedPoint<4, 8, uint16_t, double>(1.0); + block.awb_gains->gain10 = floatingToFixedPoint<4, 8, uint16_t, double>(1.0); + block.awb_gains->gain11 = floatingToFixedPoint<4, 8, uint16_t, double>(bGain); + + frameContext.awb.rGain = rGain; + frameContext.awb.bGain = bGain; + + return sizeof(struct mali_c55_params_awb_gains); +} + +size_t Awb::fillConfigParamBlock(mali_c55_params_block block) +{ + block.header->type = MALI_C55_PARAM_BLOCK_AWB_CONFIG; + block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE; + block.header->size = sizeof(struct mali_c55_params_awb_config); + + /* Tap the stats after the purple fringe block */ + block.awb_config->tap_point = MALI_C55_AWB_STATS_TAP_PF; + + /* Get R/G and B/G ratios as statistics */ + block.awb_config->stats_mode = MALI_C55_AWB_MODE_RGBG; + + /* Default white level */ + block.awb_config->white_level = 1023; + + /* Default black level */ + block.awb_config->black_level = 0; + + /* + * By default pixels are included who's colour ratios are bounded in a + * region (on a cr ratio x cb ratio graph) defined by four points: + * (0.25, 0.25) + * (0.25, 1.99609375) + * (1.99609375, 1.99609375) + * (1.99609375, 0.25) + * + * The ratios themselves are stored in Q4.8 format. + * + * \todo should these perhaps be tunable? + */ + block.awb_config->cr_max = 511; + block.awb_config->cr_min = 64; + block.awb_config->cb_max = 511; + block.awb_config->cb_min = 64; + + /* We use the full 15x15 zoning scheme */ + block.awb_config->nodes_used_horiz = 15; + block.awb_config->nodes_used_vert = 15; + + /* + * We set the trimming boundaries equivalent to the main boundaries. In + * other words; no trimming. + */ + block.awb_config->cr_high = 511; + block.awb_config->cr_low = 64; + block.awb_config->cb_high = 511; + block.awb_config->cb_low = 64; + + return sizeof(struct mali_c55_params_awb_config); +} + +void Awb::prepare(IPAContext &context, const uint32_t frame, + IPAFrameContext &frameContext, mali_c55_params_buffer *params) +{ + mali_c55_params_block block; + block.data = ¶ms->data[params->total_size]; + + params->total_size += fillGainsParamBlock(block, context, frameContext); + + if (frame > 0) + return; + + block.data = ¶ms->data[params->total_size]; + params->total_size += fillConfigParamBlock(block); +} + +void Awb::process(IPAContext &context, const uint32_t frame, + IPAFrameContext &frameContext, const mali_c55_stats_buffer *stats, + [[maybe_unused]] ControlList &metadata) +{ + const struct mali_c55_awb_average_ratios *awb_ratios = stats->awb_ratios; + + /* + * The ISP produces average R:G and B:G ratios for zones. We take the + * average of all the zones with data and simply invert them to provide + * gain figures that we can apply to approximate a grey world. + */ + unsigned int counted_zones = 0; + double rgSum = 0, bgSum = 0; + + for (unsigned int i = 0; i < 225; i++) { + if (!awb_ratios[i].num_pixels) + continue; + + /* + * The statistics are in Q4.8 format, so we convert to double + * here. + */ + rgSum += fixedToFloatingPoint<4, 8, double, uint16_t>(awb_ratios[i].avg_rg_gr); + bgSum += fixedToFloatingPoint<4, 8, double, uint16_t>(awb_ratios[i].avg_bg_br); + counted_zones++; + } + + /* + * Sometimes the first frame's statistics have no valid pixels, in which + * case we'll just assume a grey world until they say otherwise. + */ + double rgAvg, bgAvg; + if (!counted_zones) { + rgAvg = 1.0; + bgAvg = 1.0; + } else { + rgAvg = rgSum / counted_zones; + bgAvg = bgSum / counted_zones; + } + + /* + * The statistics are generated _after_ white balancing is performed in + * the ISP. To get the true ratio we therefore have to adjust the stats + * figure by the gains that were applied when the statistics for this + * frame were generated. + */ + double rRatio = rgAvg / frameContext.awb.rGain; + double bRatio = bgAvg / frameContext.awb.bGain; + + /* + * And then we can simply invert the ratio to find the gain we should + * apply. + */ + double rGain = 1 / rRatio; + double bGain = 1 / bRatio; + + /* + * Running at full speed, this algorithm results in oscillations in the + * colour balance. To remove those we dampen the speed at which it makes + * changes in gain, unless we're in the startup phase in which case we + * want to fix the miscolouring as quickly as possible. + */ + double speed = frame < kNumStartupFrames ? 1.0 : 0.2; + rGain = speed * rGain + context.activeState.awb.rGain * (1.0 - speed); + bGain = speed * bGain + context.activeState.awb.bGain * (1.0 - speed); + + context.activeState.awb.rGain = rGain; + context.activeState.awb.bGain = bGain; + + metadata.set(controls::ColourGains, { + static_cast<float>(frameContext.awb.rGain), + static_cast<float>(frameContext.awb.bGain), + }); + + LOG(MaliC55Awb, Debug) << "For frame number " << frame << ": " + << "Average R/G Ratio: " << rgAvg + << ", Average B/G Ratio: " << bgAvg + << "\nrGain applied to this frame: " << frameContext.awb.rGain + << ", bGain applied to this frame: " << frameContext.awb.bGain + << "\nrGain to apply: " << context.activeState.awb.rGain + << ", bGain to apply: " << context.activeState.awb.bGain; +} + +REGISTER_IPA_ALGORITHM(Awb, "Awb") + +} /* namespace ipa::mali_c55::algorithms */ + +} /* namespace libcamera */ diff --git a/src/ipa/mali-c55/algorithms/awb.h b/src/ipa/mali-c55/algorithms/awb.h new file mode 100644 index 00000000..800c2e83 --- /dev/null +++ b/src/ipa/mali-c55/algorithms/awb.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas on Board Oy + * + * awb.h - Mali C55 grey world auto white balance algorithm + */ + +#include "algorithm.h" +#include "ipa_context.h" + +namespace libcamera { + +namespace ipa::mali_c55::algorithms { + +class Awb : public Algorithm +{ +public: + Awb(); + ~Awb() = default; + + int configure(IPAContext &context, + const IPACameraSensorInfo &configInfo) override; + void prepare(IPAContext &context, const uint32_t frame, + IPAFrameContext &frameContext, + mali_c55_params_buffer *params) override; + void process(IPAContext &context, const uint32_t frame, + IPAFrameContext &frameContext, + const mali_c55_stats_buffer *stats, + ControlList &metadata) override; + +private: + size_t fillGainsParamBlock(mali_c55_params_block block, + IPAContext &context, + IPAFrameContext &frameContext); + size_t fillConfigParamBlock(mali_c55_params_block block); +}; + +} /* namespace ipa::mali_c55::algorithms */ + +} /* namespace libcamera */ diff --git a/src/ipa/mali-c55/algorithms/blc.cpp b/src/ipa/mali-c55/algorithms/blc.cpp new file mode 100644 index 00000000..2a54c86a --- /dev/null +++ b/src/ipa/mali-c55/algorithms/blc.cpp @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas On Board + * + * Mali-C55 sensor offset (black level) correction + */ + +#include "blc.h" + +#include <libcamera/base/log.h> +#include <libcamera/control_ids.h> + +#include "libcamera/internal/yaml_parser.h" + +/** + * \file blc.h + */ + +namespace libcamera { + +namespace ipa::mali_c55::algorithms { + +/** + * \class BlackLevelCorrection + * \brief MaliC55 Black Level Correction control + */ + +LOG_DEFINE_CATEGORY(MaliC55Blc) + +BlackLevelCorrection::BlackLevelCorrection() + : tuningParameters_(false) +{ +} + +/** + * \copydoc libcamera::ipa::Algorithm::init + */ +int BlackLevelCorrection::init([[maybe_unused]] IPAContext &context, + const YamlObject &tuningData) +{ + offset00 = tuningData["offset00"].get<uint32_t>(0); + offset01 = tuningData["offset01"].get<uint32_t>(0); + offset10 = tuningData["offset10"].get<uint32_t>(0); + offset11 = tuningData["offset11"].get<uint32_t>(0); + + if (offset00 > kMaxOffset || offset01 > kMaxOffset || + offset10 > kMaxOffset || offset11 > kMaxOffset) { + LOG(MaliC55Blc, Error) << "Invalid black level offsets"; + return -EINVAL; + } + + tuningParameters_ = true; + + LOG(MaliC55Blc, Debug) + << "Black levels: 00 " << offset00 << ", 01 " << offset01 + << ", 10 " << offset10 << ", 11 " << offset11; + + return 0; +} + +/** + * \copydoc libcamera::ipa::Algorithm::configure + */ +int BlackLevelCorrection::configure(IPAContext &context, + [[maybe_unused]] const IPACameraSensorInfo &configInfo) +{ + /* + * If no Black Levels were passed in through tuning data then we could + * use the value from the CameraSensorHelper if one is available. + */ + if (context.configuration.sensor.blackLevel && + !(offset00 + offset01 + offset10 + offset11)) { + offset00 = context.configuration.sensor.blackLevel; + offset01 = context.configuration.sensor.blackLevel; + offset10 = context.configuration.sensor.blackLevel; + offset11 = context.configuration.sensor.blackLevel; + } + + return 0; +} + +/** + * \copydoc libcamera::ipa::Algorithm::prepare + */ +void BlackLevelCorrection::prepare([[maybe_unused]] IPAContext &context, + const uint32_t frame, + [[maybe_unused]] IPAFrameContext &frameContext, + mali_c55_params_buffer *params) +{ + mali_c55_params_block block; + block.data = ¶ms->data[params->total_size]; + + if (frame > 0) + return; + + if (!tuningParameters_) + return; + + block.header->type = MALI_C55_PARAM_BLOCK_SENSOR_OFFS; + block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE; + block.header->size = sizeof(mali_c55_params_sensor_off_preshading); + + block.sensor_offs->chan00 = offset00; + block.sensor_offs->chan01 = offset01; + block.sensor_offs->chan10 = offset10; + block.sensor_offs->chan11 = offset11; + + params->total_size += block.header->size; +} + +void BlackLevelCorrection::process([[maybe_unused]] IPAContext &context, + [[maybe_unused]] const uint32_t frame, + [[maybe_unused]] IPAFrameContext &frameContext, + [[maybe_unused]] const mali_c55_stats_buffer *stats, + ControlList &metadata) +{ + /* + * Black Level Offsets in tuning data need to be 20-bit, whereas the + * metadata expects values from a 16-bit range. Right-shift to remove + * the 4 least significant bits. + * + * The black levels should be reported in the order R, Gr, Gb, B. We + * ignore that here given we're using matching values so far, but it + * would be safer to check the sensor's bayer order. + * + * \todo Account for bayer order. + */ + metadata.set(controls::SensorBlackLevels, { + static_cast<int32_t>(offset00 >> 4), + static_cast<int32_t>(offset01 >> 4), + static_cast<int32_t>(offset10 >> 4), + static_cast<int32_t>(offset11 >> 4), + }); +} + +REGISTER_IPA_ALGORITHM(BlackLevelCorrection, "BlackLevelCorrection") + +} /* namespace ipa::mali_c55::algorithms */ + +} /* namespace libcamera */ diff --git a/src/ipa/mali-c55/algorithms/blc.h b/src/ipa/mali-c55/algorithms/blc.h new file mode 100644 index 00000000..9696e8e9 --- /dev/null +++ b/src/ipa/mali-c55/algorithms/blc.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas On Board + * + * Mali-C55 sensor offset (black level) correction + */ + +#include "algorithm.h" + +namespace libcamera { + +namespace ipa::mali_c55::algorithms { + +class BlackLevelCorrection : public Algorithm +{ +public: + BlackLevelCorrection(); + ~BlackLevelCorrection() = default; + + int init(IPAContext &context, const YamlObject &tuningData) override; + int configure(IPAContext &context, + const IPACameraSensorInfo &configInfo) override; + void prepare(IPAContext &context, const uint32_t frame, + IPAFrameContext &frameContext, + mali_c55_params_buffer *params) override; + void process(IPAContext &context, const uint32_t frame, + IPAFrameContext &frameContext, + const mali_c55_stats_buffer *stats, + ControlList &metadata) override; + +private: + static constexpr uint32_t kMaxOffset = 0xfffff; + + bool tuningParameters_; + uint32_t offset00; + uint32_t offset01; + uint32_t offset10; + uint32_t offset11; +}; + +} /* namespace ipa::mali_c55::algorithms */ +} /* namespace libcamera */ diff --git a/src/ipa/mali-c55/algorithms/lsc.cpp b/src/ipa/mali-c55/algorithms/lsc.cpp new file mode 100644 index 00000000..c5afc04d --- /dev/null +++ b/src/ipa/mali-c55/algorithms/lsc.cpp @@ -0,0 +1,216 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas On Board Oy + * + * lsc.cpp - Mali-C55 Lens shading correction algorithm + */ + +#include "lsc.h" + +#include "libcamera/internal/yaml_parser.h" + +namespace libcamera { + +namespace ipa::mali_c55::algorithms { + +LOG_DEFINE_CATEGORY(MaliC55Lsc) + +int Lsc::init([[maybe_unused]] IPAContext &context, const YamlObject &tuningData) +{ + if (!tuningData.contains("meshScale")) { + LOG(MaliC55Lsc, Error) << "meshScale missing from tuningData"; + return -EINVAL; + } + + meshScale_ = tuningData["meshScale"].get<uint32_t>(0); + + const YamlObject &yamlSets = tuningData["sets"]; + if (!yamlSets.isList()) { + LOG(MaliC55Lsc, Error) << "LSC tables missing or invalid"; + return -EINVAL; + } + + size_t tableSize = 0; + const auto &sets = yamlSets.asList(); + for (const auto &yamlSet : sets) { + uint32_t ct = yamlSet["ct"].get<uint32_t>(0); + + if (!ct) { + LOG(MaliC55Lsc, Error) << "Invalid colour temperature"; + return -EINVAL; + } + + if (std::count(colourTemperatures_.begin(), + colourTemperatures_.end(), ct)) { + LOG(MaliC55Lsc, Error) + << "Multiple sets found for colour temperature"; + return -EINVAL; + } + + std::vector<uint8_t> rTable = + yamlSet["r"].getList<uint8_t>().value_or(std::vector<uint8_t>{}); + std::vector<uint8_t> gTable = + yamlSet["g"].getList<uint8_t>().value_or(std::vector<uint8_t>{}); + std::vector<uint8_t> bTable = + yamlSet["b"].getList<uint8_t>().value_or(std::vector<uint8_t>{}); + + /* + * Some validation to do; only 16x16 and 32x32 tables of + * coefficients are acceptable, and all tables across all of the + * sets must be the same size. The first time we encounter a + * table we check that it is an acceptable size and if so make + * sure all other tables are of equal size. + */ + if (!tableSize) { + if (rTable.size() != 256 && rTable.size() != 1024) { + LOG(MaliC55Lsc, Error) + << "Invalid table size for colour temperature " << ct; + return -EINVAL; + } + tableSize = rTable.size(); + } + + if (rTable.size() != tableSize || + gTable.size() != tableSize || + bTable.size() != tableSize) { + LOG(MaliC55Lsc, Error) + << "Invalid or mismatched table size for colour temperature " << ct; + return -EINVAL; + } + + if (colourTemperatures_.size() >= 3) { + LOG(MaliC55Lsc, Error) + << "A maximum of 3 colour temperatures are supported"; + return -EINVAL; + } + + for (unsigned int i = 0; i < tableSize; i++) { + mesh_[kRedOffset + i] |= + (rTable[i] << (colourTemperatures_.size() * 8)); + mesh_[kGreenOffset + i] |= + (gTable[i] << (colourTemperatures_.size() * 8)); + mesh_[kBlueOffset + i] |= + (bTable[i] << (colourTemperatures_.size() * 8)); + } + + colourTemperatures_.push_back(ct); + } + + /* + * The mesh has either 16x16 or 32x32 nodes, we tell the driver which it + * is based on the number of values in the tuning data's table. + */ + if (tableSize == 256) + meshSize_ = 15; + else + meshSize_ = 31; + + return 0; +} + +size_t Lsc::fillConfigParamsBlock(mali_c55_params_block block) const +{ + block.header->type = MALI_C55_PARAM_MESH_SHADING_CONFIG; + block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE; + block.header->size = sizeof(struct mali_c55_params_mesh_shading_config); + + block.shading_config->mesh_show = false; + block.shading_config->mesh_scale = meshScale_; + block.shading_config->mesh_page_r = 0; + block.shading_config->mesh_page_g = 1; + block.shading_config->mesh_page_b = 2; + block.shading_config->mesh_width = meshSize_; + block.shading_config->mesh_height = meshSize_; + + std::copy(mesh_.begin(), mesh_.end(), block.shading_config->mesh); + + return block.header->size; +} + +size_t Lsc::fillSelectionParamsBlock(mali_c55_params_block block, uint8_t bank, + uint8_t alpha) const +{ + block.header->type = MALI_C55_PARAM_MESH_SHADING_SELECTION; + block.header->flags = MALI_C55_PARAM_BLOCK_FL_NONE; + block.header->size = sizeof(struct mali_c55_params_mesh_shading_selection); + + block.shading_selection->mesh_alpha_bank_r = bank; + block.shading_selection->mesh_alpha_bank_g = bank; + block.shading_selection->mesh_alpha_bank_b = bank; + block.shading_selection->mesh_alpha_r = alpha; + block.shading_selection->mesh_alpha_g = alpha; + block.shading_selection->mesh_alpha_b = alpha; + block.shading_selection->mesh_strength = 0x1000; /* Otherwise known as 1.0 */ + + return block.header->size; +} + +std::tuple<uint8_t, uint8_t> Lsc::findBankAndAlpha(uint32_t ct) const +{ + unsigned int i; + + ct = std::clamp<uint32_t>(ct, colourTemperatures_.front(), + colourTemperatures_.back()); + + for (i = 0; i < colourTemperatures_.size() - 1; i++) { + if (ct >= colourTemperatures_[i] && + ct <= colourTemperatures_[i + 1]) + break; + } + + /* + * With the clamping, we're guaranteed an index into colourTemperatures_ + * that's <= colourTemperatures_.size() - 1. + */ + uint8_t alpha = (255 * (ct - colourTemperatures_[i])) / + (colourTemperatures_[i + 1] - colourTemperatures_[i]); + + return { i, alpha }; +} + +void Lsc::prepare(IPAContext &context, [[maybe_unused]] const uint32_t frame, + [[maybe_unused]] IPAFrameContext &frameContext, + mali_c55_params_buffer *params) +{ + /* + * For each frame we assess the colour temperature of the **last** frame + * and then select an appropriately blended table of coefficients based + * on that ct. As a bit of a shortcut, if we've only a single table the + * handling is somewhat simpler; if it's the first frame we just select + * that table and if we're past the first frame then we can just do + * nothing - the config will never change. + */ + uint32_t temperatureK = context.activeState.agc.temperatureK; + uint8_t bank, alpha; + + if (colourTemperatures_.size() == 1) { + if (frame > 0) + return; + + bank = 0; + alpha = 0; + } else { + std::tie(bank, alpha) = findBankAndAlpha(temperatureK); + } + + mali_c55_params_block block; + block.data = ¶ms->data[params->total_size]; + + params->total_size += fillSelectionParamsBlock(block, bank, alpha); + + if (frame > 0) + return; + + /* + * If this is the first frame, we need to load the parsed coefficient + * tables from tuning data to the ISP. + */ + block.data = ¶ms->data[params->total_size]; + params->total_size += fillConfigParamsBlock(block); +} + +REGISTER_IPA_ALGORITHM(Lsc, "Lsc") + +} /* namespace ipa::mali_c55::algorithms */ + +} /* namespace libcamera */ diff --git a/src/ipa/mali-c55/algorithms/lsc.h b/src/ipa/mali-c55/algorithms/lsc.h new file mode 100644 index 00000000..e613277a --- /dev/null +++ b/src/ipa/mali-c55/algorithms/lsc.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas On Board Oy + * + * lsc.h - Mali-C55 Lens shading correction algorithm + */ + +#include <map> +#include <tuple> + +#include "algorithm.h" + +namespace libcamera { + +namespace ipa::mali_c55::algorithms { + +class Lsc : public Algorithm +{ +public: + Lsc() = default; + ~Lsc() = default; + + int init(IPAContext &context, const YamlObject &tuningData) override; + void prepare(IPAContext &context, const uint32_t frame, + IPAFrameContext &frameContext, + mali_c55_params_buffer *params) override; +private: + static constexpr unsigned int kRedOffset = 0; + static constexpr unsigned int kGreenOffset = 1024; + static constexpr unsigned int kBlueOffset = 2048; + + size_t fillConfigParamsBlock(mali_c55_params_block block) const; + size_t fillSelectionParamsBlock(mali_c55_params_block block, + uint8_t bank, uint8_t alpha) const; + std::tuple<uint8_t, uint8_t> findBankAndAlpha(uint32_t ct) const; + + std::vector<uint32_t> mesh_ = std::vector<uint32_t>(3072); + std::vector<uint32_t> colourTemperatures_; + uint32_t meshScale_; + uint32_t meshSize_; +}; + +} /* namespace ipa::mali_c55::algorithms */ + +} /* namespace libcamera */ diff --git a/src/ipa/mali-c55/algorithms/meson.build b/src/ipa/mali-c55/algorithms/meson.build new file mode 100644 index 00000000..1665da07 --- /dev/null +++ b/src/ipa/mali-c55/algorithms/meson.build @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: CC0-1.0 + +mali_c55_ipa_algorithms = files([ + 'agc.cpp', + 'awb.cpp', + 'blc.cpp', + 'lsc.cpp', +]) diff --git a/src/ipa/mali-c55/data/imx415.yaml b/src/ipa/mali-c55/data/imx415.yaml new file mode 100644 index 00000000..126b427a --- /dev/null +++ b/src/ipa/mali-c55/data/imx415.yaml @@ -0,0 +1,325 @@ +# SPDX-License-Identifier: CC0-1.0 +%YAML 1.1 +--- +version: 1 +algorithms: + - Agc: + - Awb: + - BlackLevelCorrection: + offset00: 51200 + offset01: 51200 + offset10: 51200 + offset11: 51200 + - Lsc: + meshScale: 4 # 1.0 - 2.0 Gain + sets: + - ct: 2500 + r: [ + 21, 20, 19, 17, 15, 14, 12, 11, 9, 9, 9, 9, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 13, 13, 10, 10, 13, 16, 17, 18, 21, 22, + 21, 20, 18, 16, 14, 13, 12, 11, 10, 9, 9, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 12, 15, 17, 18, 21, 21, + 20, 19, 17, 16, 14, 13, 12, 11, 10, 9, 8, 8, 8, 7, 7, 7, 8, 8, 8, 8, 8, 9, 9, 8, 8, 8, 11, 15, 17, 18, 21, 21, + 19, 19, 17, 15, 14, 13, 12, 11, 10, 8, 8, 7, 7, 7, 6, 6, 7, 7, 8, 8, 8, 8, 8, 7, 7, 8, 10, 14, 17, 18, 20, 22, + 19, 18, 17, 15, 14, 13, 11, 11, 9, 8, 8, 7, 7, 6, 5, 5, 5, 5, 6, 7, 8, 7, 7, 6, 7, 7, 10, 12, 16, 18, 20, 22, + 18, 18, 16, 15, 14, 12, 11, 10, 9, 8, 6, 6, 5, 5, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 8, 12, 16, 18, 19, 20, + 18, 18, 16, 14, 13, 12, 11, 9, 9, 7, 6, 5, 5, 5, 4, 4, 4, 5, 5, 4, 4, 5, 5, 5, 5, 6, 8, 11, 15, 18, 18, 19, + 18, 17, 15, 14, 13, 12, 11, 9, 8, 7, 6, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 6, 7, 9, 14, 17, 18, 18, + 18, 17, 15, 14, 13, 12, 11, 9, 8, 7, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 6, 8, 12, 17, 18, 18, + 18, 16, 15, 13, 12, 11, 10, 9, 8, 7, 5, 4, 4, 4, 4, 3, 3, 4, 4, 4, 3, 3, 4, 4, 4, 5, 6, 8, 12, 16, 19, 19, + 17, 16, 15, 13, 12, 11, 10, 8, 7, 6, 4, 4, 3, 3, 3, 3, 3, 3, 4, 4, 3, 3, 3, 4, 4, 5, 6, 9, 12, 16, 19, 20, + 17, 15, 15, 13, 12, 11, 10, 8, 6, 6, 5, 4, 3, 3, 3, 2, 3, 3, 4, 4, 3, 3, 2, 3, 4, 5, 6, 9, 11, 16, 19, 20, + 17, 15, 15, 14, 11, 11, 10, 8, 6, 5, 5, 4, 3, 3, 2, 2, 2, 3, 3, 3, 3, 3, 2, 3, 4, 5, 6, 8, 11, 16, 18, 19, + 16, 16, 15, 13, 11, 11, 10, 7, 6, 5, 4, 4, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 2, 3, 4, 6, 8, 11, 14, 17, 18, + 16, 16, 14, 13, 11, 10, 9, 7, 6, 5, 4, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 3, 4, 6, 8, 10, 14, 17, 18, + 16, 15, 14, 13, 13, 10, 9, 7, 6, 4, 4, 2, 2, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 4, 4, 6, 8, 11, 17, 18, 19, + 16, 15, 14, 14, 13, 12, 9, 8, 7, 5, 4, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 4, 5, 6, 8, 12, 17, 19, 20, + 17, 15, 15, 14, 13, 12, 9, 8, 7, 5, 3, 2, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 5, 6, 8, 13, 16, 19, 21, + 17, 16, 15, 13, 13, 12, 9, 8, 7, 5, 3, 2, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 5, 5, 7, 8, 13, 16, 19, 20, + 17, 16, 15, 14, 13, 12, 9, 8, 7, 5, 3, 2, 0, 0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 5, 6, 7, 9, 13, 17, 19, 20, + 18, 16, 15, 14, 13, 12, 9, 8, 7, 5, 4, 2, 1, 0, 0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 6, 8, 9, 13, 17, 20, 20, + 18, 16, 16, 15, 14, 12, 10, 9, 7, 6, 5, 3, 2, 1, 0, 0, 0, 1, 2, 3, 3, 3, 4, 5, 6, 7, 9, 10, 14, 18, 20, 20, + 18, 17, 16, 15, 14, 12, 10, 9, 8, 7, 6, 5, 3, 3, 1, 0, 0, 1, 2, 3, 4, 4, 5, 6, 6, 7, 9, 12, 15, 19, 20, 20, + 18, 18, 17, 16, 14, 13, 11, 10, 9, 8, 7, 6, 5, 5, 3, 1, 1, 1, 2, 3, 5, 5, 5, 6, 7, 9, 12, 15, 17, 19, 20, 20, + 18, 18, 17, 16, 15, 13, 12, 10, 10, 9, 8, 7, 6, 5, 4, 2, 1, 2, 3, 4, 5, 5, 6, 6, 8, 10, 13, 16, 18, 20, 20, 21, + 19, 18, 17, 16, 15, 14, 13, 11, 10, 10, 9, 8, 7, 6, 5, 4, 3, 2, 3, 3, 5, 5, 6, 7, 10, 11, 14, 17, 19, 20, 21, 22, + 20, 19, 18, 17, 16, 15, 13, 12, 11, 10, 10, 9, 8, 7, 6, 5, 4, 3, 3, 3, 5, 6, 6, 7, 10, 12, 14, 18, 20, 21, 22, 23, + 21, 20, 19, 18, 17, 16, 14, 13, 12, 11, 10, 10, 9, 7, 7, 5, 5, 4, 4, 5, 6, 6, 7, 8, 11, 13, 16, 19, 21, 22, 22, 22, + 22, 21, 20, 19, 18, 17, 16, 14, 13, 12, 12, 10, 9, 8, 7, 6, 6, 5, 5, 6, 7, 7, 8, 9, 12, 14, 18, 20, 21, 22, 22, 22, + 23, 22, 21, 20, 19, 17, 16, 15, 14, 14, 13, 12, 10, 9, 8, 7, 6, 5, 5, 6, 7, 8, 8, 10, 12, 15, 18, 20, 21, 22, 22, 22, + 24, 23, 22, 21, 20, 18, 17, 16, 15, 15, 14, 14, 13, 11, 9, 8, 6, 6, 6, 6, 7, 8, 9, 11, 14, 17, 19, 20, 21, 21, 21, 21, + 24, 24, 23, 21, 20, 19, 17, 16, 15, 15, 15, 14, 14, 14, 11, 9, 6, 5, 5, 6, 8, 8, 10, 12, 15, 17, 20, 20, 21, 21, 21, 21, + ] + g: [ + 19, 18, 17, 15, 13, 12, 10, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 12, 12, 9, 9, 11, 15, 15, 16, 19, 20, + 19, 18, 16, 15, 12, 12, 10, 10, 8, 7, 7, 7, 7, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 7, 7, 11, 14, 15, 16, 19, 19, + 18, 17, 16, 14, 12, 12, 10, 10, 8, 7, 7, 6, 6, 5, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 10, 14, 15, 17, 19, 19, + 17, 17, 16, 14, 12, 12, 10, 10, 8, 7, 6, 6, 6, 5, 5, 4, 5, 5, 6, 6, 6, 7, 7, 5, 6, 6, 9, 13, 15, 17, 18, 20, + 17, 17, 15, 14, 12, 11, 10, 9, 8, 7, 6, 5, 5, 4, 4, 4, 4, 4, 5, 6, 6, 6, 5, 5, 5, 6, 8, 11, 15, 17, 18, 20, + 17, 17, 15, 13, 12, 11, 9, 9, 8, 7, 5, 5, 4, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 7, 11, 15, 17, 18, 18, + 17, 16, 15, 13, 12, 11, 9, 8, 8, 6, 5, 4, 4, 4, 3, 3, 4, 4, 3, 3, 3, 4, 4, 4, 4, 5, 6, 9, 14, 17, 17, 18, + 17, 16, 14, 13, 12, 11, 9, 8, 7, 6, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 6, 8, 13, 16, 17, 17, + 17, 15, 14, 13, 12, 11, 9, 8, 7, 6, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 5, 7, 12, 16, 17, 17, + 17, 15, 14, 12, 11, 10, 9, 8, 7, 6, 4, 4, 3, 3, 3, 2, 2, 3, 3, 3, 2, 2, 3, 3, 3, 4, 5, 7, 11, 15, 18, 18, + 16, 14, 13, 12, 11, 10, 9, 7, 7, 5, 4, 3, 3, 3, 2, 2, 2, 3, 3, 3, 2, 2, 2, 3, 3, 4, 5, 8, 11, 15, 18, 19, + 16, 14, 13, 12, 11, 10, 9, 7, 5, 5, 4, 3, 3, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 3, 4, 5, 8, 10, 15, 18, 19, + 16, 14, 14, 13, 11, 10, 9, 7, 5, 5, 4, 3, 2, 2, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 3, 3, 5, 7, 10, 15, 17, 18, + 16, 15, 14, 12, 11, 10, 9, 7, 5, 5, 4, 3, 2, 2, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 3, 5, 6, 10, 14, 17, 17, + 15, 15, 13, 12, 11, 10, 9, 7, 5, 4, 3, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 1, 2, 2, 3, 5, 7, 10, 14, 17, 18, + 15, 14, 13, 12, 12, 10, 9, 7, 6, 4, 3, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 4, 5, 7, 10, 17, 18, 18, + 15, 14, 14, 13, 12, 11, 9, 7, 6, 4, 3, 2, 1, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 5, 7, 12, 17, 19, 19, + 16, 14, 14, 13, 12, 12, 9, 7, 6, 4, 3, 2, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 3, 4, 4, 5, 8, 12, 17, 19, 20, + 16, 15, 14, 13, 12, 12, 9, 7, 7, 4, 3, 1, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 6, 8, 12, 17, 19, 20, + 17, 15, 14, 13, 12, 12, 9, 7, 7, 5, 3, 1, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 8, 12, 17, 19, 20, + 18, 15, 15, 14, 13, 12, 9, 8, 7, 5, 4, 2, 1, 0, 0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 6, 7, 9, 13, 17, 19, 20, + 18, 16, 15, 14, 13, 12, 9, 9, 7, 6, 5, 3, 2, 1, 0, 0, 0, 1, 2, 3, 3, 3, 4, 5, 6, 7, 9, 10, 14, 18, 20, 20, + 18, 16, 16, 15, 13, 12, 10, 9, 8, 7, 6, 5, 3, 3, 1, 0, 0, 1, 2, 3, 4, 4, 5, 5, 6, 7, 9, 12, 15, 19, 20, 20, + 18, 18, 16, 16, 14, 13, 10, 10, 9, 8, 7, 6, 5, 5, 3, 1, 1, 1, 2, 3, 5, 5, 5, 6, 6, 8, 11, 15, 17, 19, 20, 20, + 18, 18, 16, 16, 14, 13, 12, 10, 9, 9, 8, 7, 6, 5, 4, 3, 1, 2, 3, 5, 5, 5, 5, 6, 7, 10, 12, 15, 18, 20, 20, 20, + 18, 18, 17, 16, 15, 14, 12, 11, 10, 10, 9, 8, 7, 6, 5, 4, 3, 2, 3, 3, 5, 5, 5, 6, 9, 11, 14, 16, 19, 20, 20, 21, + 19, 19, 18, 17, 16, 15, 13, 12, 11, 10, 10, 9, 8, 7, 5, 4, 4, 3, 3, 3, 5, 5, 6, 7, 10, 12, 14, 18, 20, 20, 21, 22, + 21, 20, 18, 18, 17, 16, 14, 12, 11, 11, 10, 10, 8, 7, 7, 5, 4, 4, 4, 5, 6, 6, 6, 7, 11, 13, 16, 19, 20, 21, 21, 22, + 22, 21, 20, 19, 18, 16, 15, 14, 12, 12, 12, 10, 9, 8, 7, 6, 5, 4, 5, 6, 6, 7, 7, 8, 12, 13, 17, 19, 21, 21, 21, 21, + 23, 22, 21, 20, 18, 17, 16, 16, 14, 14, 13, 12, 10, 10, 8, 7, 6, 5, 5, 6, 7, 7, 8, 9, 12, 15, 18, 19, 21, 21, 21, 20, + 23, 22, 22, 21, 20, 18, 17, 16, 15, 15, 15, 14, 13, 11, 9, 8, 6, 6, 6, 6, 7, 8, 9, 10, 13, 16, 19, 20, 20, 21, 21, 20, + 24, 23, 22, 21, 20, 19, 17, 17, 16, 16, 15, 15, 14, 14, 11, 9, 6, 6, 6, 6, 8, 8, 10, 12, 15, 17, 19, 20, 20, 21, 21, 20, + ] + b: [ + 11, 9, 9, 7, 6, 5, 4, 4, 3, 3, 3, 3, 3, 4, 5, 5, 5, 4, 5, 4, 4, 4, 7, 7, 3, 3, 5, 8, 8, 9, 11, 11, + 11, 10, 8, 7, 5, 5, 5, 4, 3, 3, 3, 3, 3, 3, 4, 4, 5, 4, 5, 4, 4, 4, 4, 4, 2, 2, 5, 8, 8, 9, 11, 11, + 10, 10, 7, 7, 5, 5, 5, 4, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 3, 1, 1, 5, 7, 9, 9, 11, 11, + 10, 9, 8, 7, 6, 5, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 3, 2, 1, 1, 4, 7, 9, 10, 12, 13, + 9, 9, 8, 7, 6, 5, 5, 5, 4, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3, 4, 3, 2, 1, 1, 1, 4, 6, 9, 10, 12, 13, + 9, 9, 9, 7, 6, 6, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2, 3, 3, 2, 1, 2, 2, 2, 1, 1, 1, 2, 6, 9, 10, 11, 12, + 8, 9, 9, 7, 7, 6, 5, 4, 4, 3, 3, 2, 2, 2, 2, 2, 3, 3, 2, 1, 1, 1, 1, 0, 0, 1, 2, 5, 9, 11, 11, 11, + 8, 9, 9, 7, 7, 6, 5, 4, 4, 3, 3, 2, 2, 2, 2, 2, 3, 3, 2, 1, 1, 1, 1, 0, 0, 0, 1, 3, 8, 11, 11, 11, + 9, 9, 8, 7, 7, 7, 6, 4, 4, 3, 3, 2, 2, 2, 2, 2, 3, 3, 2, 1, 1, 1, 1, 0, 0, 0, 1, 3, 7, 11, 11, 11, + 9, 9, 8, 7, 7, 7, 6, 5, 4, 3, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 0, 0, 0, 0, 0, 1, 3, 7, 10, 12, 13, + 9, 9, 8, 8, 7, 7, 6, 5, 4, 3, 2, 1, 2, 1, 2, 2, 2, 3, 2, 1, 0, 0, 0, 0, 0, 0, 1, 4, 7, 10, 13, 13, + 9, 8, 8, 8, 7, 7, 6, 5, 3, 3, 2, 1, 1, 1, 1, 1, 2, 3, 3, 2, 1, 0, 0, 0, 0, 1, 2, 4, 7, 11, 13, 13, + 9, 8, 8, 7, 7, 6, 6, 5, 3, 3, 2, 2, 2, 1, 1, 1, 2, 3, 3, 2, 1, 0, 0, 0, 0, 1, 2, 4, 6, 11, 13, 13, + 9, 8, 8, 7, 7, 6, 6, 4, 3, 3, 2, 2, 1, 1, 1, 1, 2, 3, 3, 3, 1, 1, 0, 0, 1, 1, 2, 3, 6, 10, 12, 13, + 9, 8, 8, 7, 7, 6, 6, 4, 3, 3, 2, 1, 1, 1, 1, 1, 1, 3, 3, 3, 1, 1, 1, 1, 1, 2, 2, 4, 6, 10, 13, 13, + 9, 9, 8, 8, 8, 6, 6, 4, 4, 3, 2, 1, 1, 1, 1, 1, 2, 2, 3, 3, 1, 1, 1, 1, 2, 2, 3, 4, 6, 13, 14, 14, + 9, 9, 8, 8, 8, 8, 6, 5, 4, 3, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 1, 1, 2, 2, 3, 3, 5, 9, 13, 15, 15, + 10, 9, 9, 9, 10, 10, 6, 6, 5, 3, 2, 1, 0, 0, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 4, 5, 9, 13, 15, 16, + 10, 10, 9, 9, 10, 10, 7, 6, 5, 3, 2, 1, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 4, 5, 9, 13, 15, 16, + 11, 10, 9, 9, 10, 10, 7, 6, 6, 3, 2, 1, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 5, 6, 9, 13, 15, 16, + 12, 10, 10, 10, 10, 10, 7, 6, 6, 4, 3, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 5, 5, 6, 7, 10, 14, 15, 16, + 12, 11, 10, 10, 10, 10, 8, 7, 6, 6, 4, 3, 2, 1, 1, 1, 1, 2, 2, 3, 4, 4, 4, 4, 5, 5, 6, 8, 11, 15, 16, 16, + 12, 12, 11, 11, 10, 10, 9, 8, 7, 6, 6, 4, 4, 3, 2, 2, 2, 2, 3, 4, 4, 4, 5, 5, 5, 6, 7, 10, 13, 15, 16, 15, + 12, 12, 12, 12, 11, 10, 10, 8, 8, 7, 7, 6, 5, 5, 3, 2, 2, 2, 3, 4, 5, 5, 5, 5, 6, 7, 10, 13, 14, 16, 16, 16, + 12, 12, 13, 12, 12, 11, 11, 9, 8, 8, 8, 7, 6, 6, 5, 4, 3, 3, 3, 5, 6, 6, 6, 6, 7, 9, 11, 14, 15, 17, 17, 16, + 13, 13, 13, 13, 12, 12, 11, 11, 10, 10, 9, 8, 7, 7, 6, 5, 5, 4, 4, 4, 5, 6, 6, 6, 9, 10, 12, 14, 16, 17, 17, 18, + 13, 13, 14, 13, 13, 13, 12, 12, 11, 10, 10, 9, 8, 7, 6, 6, 6, 5, 4, 4, 6, 6, 6, 7, 9, 11, 12, 16, 17, 17, 17, 18, + 15, 15, 15, 15, 14, 13, 13, 13, 12, 11, 11, 10, 9, 9, 8, 7, 6, 5, 5, 5, 6, 7, 7, 8, 10, 12, 14, 17, 17, 18, 17, 17, + 16, 16, 16, 16, 15, 14, 14, 14, 13, 13, 12, 11, 11, 9, 8, 7, 7, 6, 6, 6, 7, 7, 8, 8, 10, 12, 16, 17, 18, 18, 17, 17, + 18, 17, 17, 16, 16, 15, 14, 14, 14, 14, 14, 13, 11, 11, 9, 8, 7, 7, 6, 6, 7, 7, 8, 9, 10, 13, 16, 17, 17, 18, 17, 16, + 18, 17, 17, 17, 16, 15, 15, 15, 15, 15, 15, 14, 14, 12, 10, 9, 7, 7, 6, 6, 7, 7, 8, 9, 11, 14, 16, 16, 17, 17, 16, 15, + 18, 18, 17, 17, 17, 16, 15, 15, 15, 16, 15, 15, 14, 14, 12, 9, 7, 6, 6, 6, 7, 7, 9, 10, 12, 14, 15, 16, 16, 16, 15, 15, + ] + - ct: 5500 + r: [ + 19, 18, 17, 16, 15, 13, 11, 10, 9, 9, 9, 8, 8, 8, 8, 8, 8, 9, 10, 10, 8, 8, 9, 9, 9, 11, 14, 15, 16, 16, 18, 18, + 18, 18, 17, 15, 14, 13, 11, 11, 9, 9, 8, 8, 7, 7, 7, 7, 7, 8, 9, 8, 8, 8, 9, 9, 8, 8, 11, 14, 16, 17, 17, 18, + 18, 17, 17, 15, 14, 13, 12, 11, 9, 9, 8, 7, 7, 6, 6, 6, 6, 8, 8, 8, 8, 8, 8, 8, 7, 7, 10, 13, 16, 17, 18, 18, + 17, 17, 16, 15, 14, 12, 11, 11, 9, 8, 7, 7, 6, 5, 5, 5, 5, 6, 8, 7, 8, 8, 8, 6, 6, 6, 9, 12, 16, 16, 18, 19, + 17, 17, 16, 14, 13, 12, 11, 10, 10, 8, 7, 7, 5, 5, 5, 5, 5, 5, 6, 7, 7, 7, 6, 6, 6, 6, 8, 11, 15, 16, 17, 19, + 18, 17, 16, 14, 13, 12, 11, 10, 10, 8, 7, 6, 5, 5, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 10, 14, 16, 17, 18, + 18, 17, 16, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 3, 4, 5, 5, 5, 5, 5, 6, 10, 13, 16, 16, 17, + 18, 16, 15, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 4, 4, 4, 4, 5, 6, 8, 12, 16, 16, 16, + 17, 16, 15, 13, 12, 11, 10, 9, 8, 7, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 4, 4, 5, 7, 11, 16, 16, 16, + 17, 16, 15, 12, 12, 12, 10, 8, 7, 7, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 4, 5, 7, 10, 16, 16, 16, + 16, 16, 14, 12, 12, 11, 10, 8, 7, 6, 4, 4, 3, 3, 3, 3, 3, 4, 3, 3, 2, 2, 2, 3, 3, 4, 4, 7, 10, 14, 16, 16, + 16, 15, 14, 13, 12, 11, 10, 8, 7, 6, 5, 4, 3, 3, 3, 3, 3, 4, 3, 3, 3, 2, 2, 3, 3, 3, 5, 7, 10, 14, 15, 16, + 16, 15, 15, 13, 11, 11, 11, 9, 7, 5, 5, 4, 3, 2, 2, 2, 3, 3, 3, 3, 2, 2, 2, 2, 3, 3, 5, 6, 9, 14, 15, 16, + 16, 15, 15, 13, 11, 11, 11, 10, 7, 5, 4, 4, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 3, 3, 5, 6, 10, 13, 15, 17, + 15, 15, 14, 12, 11, 11, 11, 10, 7, 4, 4, 3, 2, 2, 2, 2, 2, 2, 3, 3, 2, 2, 2, 3, 3, 3, 5, 6, 9, 13, 16, 17, + 15, 15, 14, 12, 11, 11, 10, 9, 7, 4, 4, 3, 1, 1, 1, 1, 2, 2, 3, 3, 2, 2, 3, 3, 3, 3, 5, 7, 10, 15, 17, 17, + 15, 15, 14, 12, 11, 11, 10, 9, 7, 4, 4, 3, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 5, 7, 11, 16, 17, 18, + 16, 15, 15, 12, 12, 11, 10, 9, 6, 4, 4, 3, 1, 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 7, 12, 15, 17, 18, + 16, 16, 15, 12, 12, 11, 10, 8, 6, 5, 4, 3, 1, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5, 6, 8, 12, 15, 17, 18, + 15, 15, 15, 13, 12, 12, 10, 8, 7, 5, 4, 3, 2, 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 3, 4, 5, 7, 9, 12, 16, 17, 18, + 15, 15, 15, 13, 13, 12, 10, 8, 7, 5, 5, 3, 2, 1, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 5, 5, 7, 9, 13, 16, 18, 18, + 15, 15, 15, 14, 13, 12, 10, 8, 7, 6, 5, 5, 3, 2, 1, 0, 1, 1, 1, 2, 3, 3, 3, 5, 5, 6, 8, 10, 14, 17, 18, 19, + 16, 16, 16, 15, 13, 12, 10, 9, 8, 7, 6, 6, 5, 4, 2, 1, 1, 1, 1, 2, 3, 3, 5, 5, 6, 7, 9, 12, 15, 18, 18, 19, + 17, 16, 16, 16, 13, 12, 11, 10, 9, 8, 7, 7, 6, 5, 4, 2, 1, 1, 2, 3, 4, 4, 5, 5, 6, 8, 11, 14, 16, 18, 19, 19, + 18, 18, 17, 16, 14, 13, 12, 10, 9, 8, 8, 7, 7, 5, 5, 3, 2, 2, 4, 4, 5, 5, 5, 5, 7, 10, 12, 16, 17, 19, 19, 20, + 18, 18, 17, 16, 15, 13, 12, 10, 10, 9, 9, 9, 7, 6, 5, 4, 3, 3, 4, 4, 5, 5, 5, 6, 7, 11, 15, 17, 18, 19, 19, 20, + 19, 18, 18, 17, 16, 14, 13, 11, 10, 10, 9, 9, 8, 6, 5, 5, 4, 4, 4, 4, 6, 6, 6, 7, 9, 11, 15, 17, 19, 19, 20, 21, + 20, 19, 19, 19, 17, 16, 13, 12, 11, 11, 10, 9, 9, 7, 6, 5, 5, 4, 4, 5, 6, 7, 7, 8, 9, 12, 15, 18, 19, 19, 20, 20, + 21, 20, 20, 19, 19, 16, 16, 13, 12, 12, 11, 11, 9, 8, 7, 6, 6, 5, 5, 6, 7, 7, 8, 8, 10, 13, 17, 19, 19, 20, 20, 20, + 22, 21, 20, 20, 19, 17, 16, 14, 13, 13, 14, 12, 11, 9, 8, 7, 6, 5, 5, 6, 7, 7, 8, 9, 11, 15, 17, 19, 19, 20, 20, 20, + 22, 22, 21, 20, 19, 18, 16, 15, 14, 14, 15, 15, 13, 11, 9, 8, 7, 5, 5, 6, 7, 8, 9, 10, 13, 16, 18, 18, 19, 20, 19, 19, + 22, 22, 21, 20, 19, 19, 16, 16, 15, 15, 15, 15, 15, 13, 10, 9, 7, 5, 5, 6, 8, 8, 10, 11, 14, 16, 18, 18, 19, 19, 19, 19, + ] + g: [ + 16, 16, 15, 14, 13, 11, 10, 9, 8, 7, 7, 7, 6, 6, 6, 6, 6, 7, 8, 8, 6, 6, 7, 7, 7, 9, 12, 13, 13, 14, 14, 14, + 16, 16, 15, 14, 13, 11, 10, 9, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 7, 7, 6, 6, 7, 7, 6, 6, 10, 12, 13, 14, 14, 14, + 16, 15, 15, 13, 13, 11, 10, 9, 8, 7, 7, 6, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 5, 5, 8, 12, 14, 15, 15, 16, + 15, 15, 14, 13, 12, 11, 10, 10, 8, 7, 6, 6, 5, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 5, 5, 5, 7, 11, 14, 15, 15, 17, + 16, 15, 14, 13, 12, 11, 10, 10, 9, 7, 6, 6, 4, 4, 4, 3, 3, 4, 4, 6, 6, 5, 5, 4, 4, 5, 6, 10, 14, 14, 16, 16, + 16, 15, 15, 13, 12, 11, 10, 10, 9, 7, 6, 6, 4, 4, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 9, 13, 14, 15, 16, + 16, 15, 15, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 3, 4, 5, 8, 13, 14, 15, 15, + 16, 15, 14, 12, 11, 10, 9, 8, 7, 6, 6, 5, 4, 3, 3, 3, 3, 3, 2, 2, 2, 3, 3, 3, 3, 4, 5, 7, 11, 14, 15, 15, + 16, 15, 14, 12, 11, 10, 9, 8, 7, 6, 5, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 4, 6, 10, 14, 15, 14, + 16, 15, 14, 12, 11, 11, 9, 8, 7, 6, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 4, 6, 10, 15, 15, 15, + 15, 15, 13, 12, 11, 11, 10, 8, 7, 5, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 3, 6, 9, 13, 15, 15, + 15, 14, 13, 12, 11, 11, 10, 8, 7, 5, 5, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 4, 6, 9, 13, 14, 15, + 15, 14, 14, 13, 11, 11, 10, 8, 7, 5, 5, 3, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2, 1, 2, 2, 2, 4, 5, 8, 13, 14, 15, + 15, 14, 14, 13, 11, 11, 11, 10, 7, 5, 4, 3, 2, 2, 2, 2, 2, 2, 3, 3, 2, 2, 1, 2, 2, 2, 4, 5, 8, 13, 14, 15, + 15, 14, 13, 12, 11, 11, 10, 9, 7, 4, 4, 3, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 5, 8, 13, 15, 16, + 15, 15, 13, 12, 11, 11, 10, 9, 7, 4, 4, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 4, 6, 8, 15, 16, 16, + 15, 15, 14, 12, 11, 11, 10, 9, 7, 4, 4, 2, 1, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 4, 6, 11, 15, 16, 17, + 15, 15, 15, 12, 12, 11, 10, 10, 7, 4, 4, 2, 1, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 6, 11, 15, 17, 17, + 15, 15, 15, 12, 12, 12, 10, 9, 7, 5, 4, 2, 1, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 5, 7, 11, 15, 17, 17, + 15, 15, 15, 13, 12, 12, 10, 9, 7, 5, 4, 3, 2, 0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 3, 4, 4, 6, 8, 12, 15, 17, 17, + 15, 15, 15, 13, 13, 12, 10, 9, 7, 6, 5, 3, 2, 1, 0, 0, 1, 1, 1, 2, 3, 3, 3, 3, 4, 5, 6, 9, 13, 16, 17, 18, + 15, 15, 15, 14, 13, 13, 10, 9, 8, 6, 6, 5, 3, 2, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 7, 10, 14, 17, 18, 18, + 15, 16, 16, 15, 13, 13, 11, 9, 8, 7, 6, 6, 5, 4, 2, 1, 1, 1, 2, 2, 3, 4, 5, 5, 6, 7, 8, 12, 15, 17, 18, 18, + 16, 16, 16, 16, 13, 13, 11, 10, 9, 8, 7, 7, 6, 6, 4, 2, 2, 2, 2, 3, 5, 5, 5, 5, 6, 8, 11, 14, 16, 18, 18, 18, + 17, 17, 17, 16, 14, 13, 13, 11, 10, 9, 8, 8, 7, 6, 5, 4, 2, 2, 4, 5, 5, 5, 5, 5, 7, 9, 12, 15, 17, 18, 18, 18, + 18, 18, 17, 16, 15, 14, 13, 11, 10, 10, 9, 9, 7, 6, 5, 5, 4, 3, 4, 4, 5, 5, 5, 6, 7, 10, 15, 16, 18, 18, 18, 19, + 19, 18, 18, 17, 16, 14, 13, 12, 11, 10, 10, 9, 9, 7, 6, 5, 5, 4, 4, 4, 6, 6, 6, 6, 8, 10, 15, 16, 18, 18, 18, 19, + 20, 19, 19, 19, 17, 16, 14, 13, 12, 11, 11, 10, 9, 7, 7, 6, 5, 4, 4, 5, 6, 6, 6, 7, 9, 11, 15, 17, 18, 18, 18, 18, + 22, 20, 20, 20, 19, 17, 16, 14, 13, 13, 12, 11, 10, 9, 7, 6, 6, 5, 5, 6, 7, 7, 7, 8, 9, 12, 16, 18, 18, 19, 18, 18, + 22, 22, 21, 20, 19, 18, 17, 16, 14, 14, 15, 13, 11, 10, 9, 8, 7, 6, 5, 6, 7, 8, 8, 9, 10, 14, 17, 18, 18, 19, 18, 17, + 22, 22, 22, 21, 20, 19, 17, 17, 16, 16, 16, 16, 14, 12, 10, 8, 7, 6, 6, 7, 8, 8, 8, 10, 13, 16, 18, 18, 18, 18, 18, 17, + 22, 22, 22, 21, 20, 20, 18, 17, 16, 16, 17, 17, 16, 14, 11, 10, 8, 6, 6, 7, 8, 8, 10, 11, 14, 17, 18, 18, 18, 18, 18, 17, + ] + b: [ + 13, 12, 12, 12, 11, 9, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 6, 7, 7, 6, 6, 5, 5, 5, 6, 9, 9, 9, 10, 9, 8, + 13, 13, 12, 11, 11, 9, 9, 8, 7, 7, 7, 6, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 5, 5, 4, 4, 7, 9, 9, 10, 10, 9, + 13, 13, 12, 11, 11, 9, 9, 8, 7, 7, 6, 6, 5, 4, 4, 4, 4, 5, 6, 6, 6, 5, 5, 5, 3, 3, 6, 9, 10, 11, 10, 11, + 13, 13, 12, 11, 11, 10, 9, 9, 7, 7, 6, 5, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 4, 3, 3, 5, 8, 10, 11, 11, 12, + 13, 13, 12, 11, 11, 10, 9, 9, 8, 7, 6, 6, 4, 4, 3, 3, 3, 4, 4, 5, 5, 5, 4, 3, 2, 3, 4, 8, 11, 11, 11, 12, + 13, 13, 13, 11, 11, 10, 9, 9, 8, 7, 6, 6, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 3, 7, 11, 11, 11, 12, + 14, 14, 13, 11, 11, 10, 9, 8, 8, 7, 6, 6, 4, 3, 3, 3, 3, 3, 2, 2, 2, 3, 2, 2, 2, 2, 3, 6, 11, 11, 11, 11, + 14, 14, 13, 11, 11, 10, 9, 8, 7, 7, 6, 5, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 9, 11, 11, 11, + 14, 14, 13, 12, 11, 11, 9, 8, 7, 6, 5, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 8, 12, 12, 11, + 14, 13, 13, 12, 12, 11, 10, 8, 7, 6, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 8, 12, 12, 13, + 13, 13, 13, 12, 12, 12, 11, 9, 7, 6, 4, 3, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 4, 8, 12, 12, 13, + 14, 13, 13, 12, 12, 11, 11, 9, 7, 6, 5, 3, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 4, 8, 12, 13, 13, + 14, 13, 13, 13, 12, 11, 11, 9, 7, 6, 5, 4, 2, 1, 1, 1, 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 4, 7, 12, 13, 13, + 14, 13, 13, 13, 12, 12, 12, 11, 7, 5, 5, 4, 2, 1, 1, 1, 1, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 4, 7, 12, 13, 13, + 14, 14, 13, 13, 12, 12, 11, 11, 7, 5, 4, 3, 1, 1, 1, 1, 1, 2, 3, 3, 2, 2, 2, 3, 3, 3, 4, 4, 7, 12, 13, 14, + 14, 14, 13, 13, 12, 12, 11, 11, 8, 5, 4, 3, 1, 1, 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 3, 4, 4, 5, 8, 13, 15, 15, + 14, 15, 14, 13, 13, 12, 11, 11, 8, 5, 4, 3, 1, 0, 0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 6, 10, 15, 15, 16, + 15, 15, 15, 14, 13, 13, 12, 11, 8, 5, 4, 3, 1, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 6, 11, 15, 16, 16, + 15, 15, 15, 14, 14, 14, 12, 11, 8, 6, 5, 3, 1, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 6, 7, 11, 15, 16, 16, + 15, 15, 15, 15, 14, 14, 12, 11, 9, 7, 5, 3, 2, 1, 0, 1, 1, 1, 2, 3, 3, 4, 4, 4, 4, 5, 6, 8, 12, 15, 16, 16, + 15, 16, 15, 15, 15, 14, 13, 11, 9, 7, 6, 5, 3, 2, 1, 1, 1, 1, 2, 3, 3, 4, 4, 4, 5, 6, 7, 9, 12, 16, 16, 16, + 15, 16, 16, 15, 15, 15, 13, 11, 10, 8, 7, 6, 4, 3, 2, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6, 7, 8, 10, 14, 16, 16, 16, + 16, 16, 17, 16, 15, 15, 14, 12, 11, 9, 8, 8, 6, 5, 3, 2, 2, 2, 3, 3, 5, 5, 6, 6, 7, 8, 9, 12, 15, 16, 16, 16, + 16, 17, 18, 17, 16, 15, 14, 13, 11, 10, 9, 9, 8, 6, 5, 3, 3, 3, 3, 4, 6, 6, 6, 6, 7, 8, 11, 14, 16, 16, 16, 16, + 17, 18, 18, 18, 17, 16, 16, 14, 12, 11, 10, 9, 8, 7, 6, 5, 3, 3, 5, 6, 6, 6, 6, 6, 8, 10, 12, 16, 17, 17, 17, 16, + 18, 18, 18, 18, 18, 17, 16, 14, 13, 12, 11, 11, 8, 8, 6, 6, 5, 4, 5, 5, 6, 6, 6, 7, 8, 11, 15, 16, 17, 17, 16, 16, + 18, 19, 19, 19, 19, 17, 17, 15, 14, 13, 12, 11, 11, 8, 7, 6, 6, 5, 5, 5, 7, 7, 7, 8, 9, 11, 15, 17, 17, 17, 16, 16, + 20, 20, 20, 20, 19, 19, 17, 17, 15, 14, 14, 12, 11, 9, 8, 7, 7, 6, 6, 6, 8, 8, 8, 8, 9, 12, 15, 18, 18, 16, 16, 16, + 22, 22, 22, 22, 21, 20, 19, 18, 17, 16, 15, 14, 12, 11, 10, 8, 8, 7, 7, 7, 8, 8, 8, 9, 10, 13, 17, 18, 18, 16, 16, 15, + 23, 22, 22, 22, 22, 21, 20, 20, 18, 18, 19, 16, 14, 13, 11, 10, 9, 8, 7, 7, 8, 9, 9, 10, 11, 15, 17, 18, 17, 17, 16, 14, + 23, 23, 23, 23, 23, 22, 21, 21, 20, 20, 20, 19, 18, 15, 12, 11, 10, 8, 8, 8, 9, 9, 10, 11, 13, 17, 17, 17, 17, 16, 15, 13, + 23, 23, 24, 24, 23, 23, 22, 21, 21, 21, 20, 20, 19, 17, 14, 12, 11, 9, 8, 9, 9, 10, 10, 12, 15, 17, 17, 17, 16, 16, 15, 13, + ] + - ct: 8500 + r: [ + 18, 17, 16, 15, 13, 12, 10, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 12, 12, 8, 8, 10, 13, 14, 15, 17, 18, + 17, 17, 16, 14, 12, 11, 10, 10, 8, 8, 8, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 10, 13, 14, 15, 17, 17, + 17, 16, 15, 13, 12, 11, 10, 10, 8, 8, 7, 7, 7, 6, 7, 7, 8, 8, 8, 7, 7, 7, 7, 7, 6, 6, 9, 12, 14, 15, 17, 17, + 16, 16, 15, 13, 12, 11, 10, 10, 9, 7, 7, 7, 6, 6, 5, 5, 6, 6, 7, 7, 7, 7, 7, 5, 5, 5, 8, 11, 14, 15, 17, 19, + 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 7, 6, 6, 5, 5, 5, 5, 5, 5, 6, 6, 6, 5, 5, 5, 5, 7, 10, 13, 15, 17, 19, + 15, 15, 14, 13, 12, 11, 9, 9, 8, 7, 6, 5, 5, 5, 4, 4, 5, 5, 5, 4, 4, 4, 4, 4, 4, 5, 6, 9, 13, 15, 16, 17, + 15, 15, 13, 12, 11, 11, 9, 8, 8, 6, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 4, 4, 4, 4, 4, 5, 8, 13, 15, 15, 16, + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 4, 5, 7, 11, 14, 15, 15, + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 4, 4, 4, 3, 3, 4, 3, 3, 3, 2, 3, 3, 3, 3, 4, 6, 10, 14, 15, 15, + 15, 13, 13, 11, 11, 10, 9, 8, 7, 6, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 3, 4, 6, 10, 13, 16, 16, + 14, 13, 12, 11, 10, 10, 9, 7, 7, 5, 4, 3, 3, 3, 3, 2, 2, 3, 3, 3, 2, 2, 2, 2, 2, 3, 4, 6, 9, 13, 16, 17, + 14, 13, 12, 11, 10, 10, 9, 7, 6, 5, 4, 3, 3, 3, 2, 2, 2, 3, 3, 3, 2, 2, 1, 2, 2, 3, 4, 6, 9, 13, 16, 17, + 14, 13, 12, 12, 10, 10, 9, 7, 5, 5, 4, 3, 3, 2, 2, 2, 2, 3, 3, 3, 2, 2, 1, 1, 2, 3, 4, 6, 9, 13, 15, 17, + 14, 13, 12, 11, 10, 9, 9, 7, 5, 5, 4, 3, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2, 1, 1, 2, 3, 4, 6, 9, 12, 15, 16, + 13, 13, 12, 11, 10, 9, 8, 7, 5, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 2, 2, 3, 4, 6, 8, 12, 15, 16, + 13, 13, 12, 11, 11, 9, 8, 7, 6, 4, 3, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 1, 1, 2, 3, 3, 4, 6, 9, 15, 16, 16, + 13, 13, 12, 12, 11, 10, 9, 7, 6, 4, 3, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 4, 4, 6, 10, 15, 17, 18, + 14, 13, 13, 12, 12, 11, 9, 7, 6, 4, 3, 2, 1, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 4, 5, 7, 11, 15, 17, 18, + 14, 13, 13, 12, 12, 11, 9, 7, 7, 4, 3, 1, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 4, 4, 5, 7, 11, 15, 17, 18, + 15, 13, 13, 12, 12, 11, 8, 7, 6, 4, 3, 1, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 4, 5, 6, 7, 11, 15, 17, 18, + 15, 14, 13, 13, 12, 11, 8, 8, 7, 5, 4, 2, 1, 0, 0, 0, 0, 1, 2, 2, 2, 2, 3, 3, 5, 5, 7, 8, 11, 15, 17, 18, + 15, 14, 14, 13, 12, 11, 9, 8, 7, 6, 5, 3, 2, 1, 0, 0, 0, 1, 2, 2, 3, 3, 3, 5, 5, 6, 8, 9, 13, 16, 18, 18, + 15, 14, 14, 13, 12, 11, 9, 9, 8, 7, 6, 5, 3, 3, 1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 5, 6, 8, 11, 14, 17, 18, 18, + 16, 16, 15, 14, 13, 12, 10, 9, 8, 8, 7, 6, 5, 5, 3, 1, 1, 1, 2, 3, 4, 4, 5, 5, 6, 8, 10, 13, 15, 17, 18, 18, + 16, 16, 15, 14, 14, 12, 11, 10, 9, 9, 8, 7, 6, 5, 4, 3, 1, 1, 2, 4, 4, 5, 5, 5, 7, 9, 11, 14, 16, 18, 18, 19, + 16, 16, 15, 15, 14, 13, 12, 11, 10, 10, 9, 8, 7, 6, 5, 4, 3, 2, 3, 3, 4, 5, 5, 6, 9, 10, 13, 15, 18, 18, 19, 19, + 17, 17, 16, 15, 15, 14, 12, 11, 11, 10, 10, 9, 8, 7, 6, 5, 4, 3, 3, 3, 5, 5, 6, 6, 9, 11, 13, 17, 18, 19, 19, 20, + 19, 18, 17, 17, 15, 15, 13, 12, 11, 11, 10, 10, 9, 8, 7, 5, 5, 4, 4, 5, 6, 6, 6, 7, 10, 12, 15, 18, 19, 19, 19, 20, + 19, 19, 18, 17, 17, 16, 14, 13, 12, 12, 11, 10, 9, 8, 7, 6, 6, 5, 5, 6, 6, 6, 7, 8, 11, 12, 16, 18, 19, 19, 19, 19, + 20, 19, 19, 18, 17, 16, 15, 14, 13, 13, 12, 12, 10, 9, 8, 7, 6, 5, 5, 6, 6, 7, 8, 9, 11, 14, 17, 18, 19, 19, 19, 19, + 20, 20, 19, 18, 18, 17, 15, 15, 14, 14, 14, 13, 12, 11, 9, 7, 6, 5, 5, 6, 7, 7, 8, 9, 12, 15, 17, 18, 18, 19, 18, 18, + 21, 20, 20, 19, 18, 18, 15, 15, 14, 14, 14, 14, 13, 13, 11, 9, 6, 5, 5, 6, 7, 7, 9, 10, 13, 15, 18, 18, 18, 18, 18, 18, + ] + g: [ + 16, 16, 15, 13, 12, 10, 9, 8, 7, 7, 7, 6, 6, 6, 7, 7, 7, 6, 6, 6, 6, 6, 11, 11, 6, 6, 9, 12, 12, 13, 15, 15, + 16, 15, 14, 13, 11, 10, 9, 9, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 9, 12, 12, 13, 15, 15, + 15, 15, 14, 12, 11, 11, 9, 9, 8, 7, 6, 6, 6, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 5, 4, 5, 8, 11, 13, 14, 15, 15, + 15, 15, 14, 12, 11, 10, 9, 9, 8, 6, 6, 6, 5, 5, 4, 4, 4, 4, 6, 6, 6, 6, 6, 4, 4, 4, 7, 11, 13, 14, 15, 17, + 15, 15, 13, 12, 11, 10, 9, 9, 8, 6, 6, 5, 5, 4, 4, 3, 3, 4, 4, 5, 5, 5, 4, 4, 3, 4, 6, 9, 13, 14, 15, 17, + 15, 14, 13, 12, 11, 10, 9, 8, 8, 6, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 3, 4, 5, 8, 13, 14, 14, 15, + 14, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 4, 7, 12, 14, 14, 14, + 14, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 4, 6, 11, 14, 14, 14, + 14, 13, 12, 11, 11, 10, 9, 8, 7, 6, 4, 4, 3, 3, 3, 2, 3, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 5, 10, 14, 14, 14, + 15, 13, 12, 11, 11, 10, 9, 8, 7, 6, 4, 4, 3, 3, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 5, 10, 13, 15, 16, + 14, 13, 12, 11, 11, 10, 9, 7, 7, 5, 4, 3, 2, 2, 2, 2, 2, 2, 3, 2, 2, 1, 2, 2, 2, 2, 3, 6, 9, 13, 16, 16, + 14, 13, 12, 11, 10, 10, 9, 7, 5, 5, 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 2, 2, 2, 4, 6, 8, 13, 16, 16, + 14, 13, 13, 12, 10, 10, 9, 7, 5, 5, 4, 3, 2, 2, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 2, 2, 4, 5, 8, 13, 15, 16, + 14, 13, 13, 12, 10, 10, 9, 7, 5, 5, 4, 3, 2, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 3, 5, 8, 12, 14, 15, + 14, 13, 12, 11, 10, 10, 9, 7, 6, 4, 3, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 2, 3, 3, 5, 8, 12, 15, 15, + 14, 13, 12, 11, 11, 10, 9, 7, 6, 4, 3, 2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 2, 3, 4, 5, 8, 14, 16, 16, + 14, 13, 13, 12, 12, 10, 9, 7, 6, 4, 3, 2, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 5, 10, 15, 16, 17, + 14, 13, 13, 13, 12, 12, 9, 7, 7, 4, 3, 2, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 4, 6, 11, 15, 17, 17, + 14, 14, 13, 12, 12, 12, 9, 8, 7, 5, 3, 2, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 5, 7, 11, 15, 17, 17, + 15, 14, 13, 13, 12, 12, 9, 8, 7, 5, 3, 2, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4, 5, 5, 7, 11, 15, 17, 17, + 16, 14, 14, 13, 13, 12, 9, 8, 7, 5, 4, 2, 1, 0, 0, 0, 0, 1, 2, 2, 3, 3, 3, 3, 5, 5, 6, 8, 12, 16, 17, 17, + 16, 15, 14, 14, 13, 12, 10, 9, 7, 6, 5, 4, 2, 1, 0, 0, 0, 1, 2, 3, 3, 3, 3, 5, 5, 6, 8, 9, 13, 17, 17, 17, + 16, 15, 15, 14, 13, 12, 10, 10, 8, 7, 6, 5, 4, 3, 2, 0, 1, 1, 2, 3, 3, 4, 5, 5, 5, 6, 8, 11, 14, 17, 17, 17, + 16, 16, 15, 15, 14, 13, 11, 10, 9, 8, 7, 6, 5, 5, 3, 2, 1, 1, 2, 3, 5, 5, 5, 5, 6, 7, 10, 13, 16, 17, 17, 17, + 17, 17, 15, 15, 14, 13, 12, 11, 10, 9, 9, 7, 6, 6, 5, 3, 2, 2, 3, 4, 5, 5, 5, 6, 6, 9, 11, 14, 16, 18, 18, 18, + 17, 17, 16, 15, 15, 14, 13, 12, 11, 10, 10, 9, 7, 7, 5, 4, 3, 2, 3, 4, 5, 5, 5, 6, 9, 10, 13, 15, 18, 18, 18, 18, + 18, 17, 17, 16, 15, 15, 14, 12, 11, 11, 11, 9, 8, 7, 6, 5, 4, 3, 3, 4, 5, 5, 6, 6, 9, 11, 13, 17, 18, 18, 18, 18, + 20, 19, 18, 18, 16, 16, 14, 13, 12, 11, 11, 10, 9, 8, 7, 5, 5, 4, 4, 5, 6, 6, 6, 7, 10, 11, 15, 18, 18, 18, 18, 18, + 20, 20, 19, 18, 18, 17, 15, 14, 13, 13, 12, 11, 10, 9, 8, 6, 6, 5, 5, 6, 6, 7, 7, 8, 11, 12, 16, 18, 18, 18, 18, 18, + 22, 21, 20, 19, 18, 17, 17, 17, 15, 15, 14, 13, 11, 10, 8, 7, 6, 6, 6, 6, 7, 7, 8, 8, 11, 14, 17, 18, 18, 18, 18, 17, + 22, 22, 21, 20, 19, 18, 17, 17, 17, 16, 16, 15, 14, 12, 10, 8, 7, 6, 6, 7, 7, 8, 8, 10, 13, 16, 18, 18, 18, 18, 18, 16, + 22, 22, 22, 21, 20, 19, 18, 17, 17, 17, 16, 16, 15, 15, 12, 10, 7, 6, 6, 7, 8, 8, 9, 11, 14, 16, 18, 18, 18, 18, 17, 16, + ] + b: [ + 13, 13, 13, 11, 10, 9, 9, 8, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 9, 9, 4, 4, 7, 9, 9, 9, 10, 10, + 13, 13, 12, 11, 10, 9, 9, 8, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 7, 9, 9, 10, 10, 10, + 13, 13, 12, 11, 10, 10, 9, 9, 7, 7, 6, 6, 6, 5, 5, 5, 5, 6, 6, 6, 5, 5, 5, 4, 3, 3, 6, 9, 10, 11, 11, 11, + 13, 13, 12, 11, 10, 10, 9, 9, 8, 7, 6, 6, 5, 5, 4, 4, 4, 4, 5, 5, 5, 5, 5, 3, 2, 3, 5, 9, 10, 11, 11, 13, + 13, 13, 12, 11, 11, 10, 9, 9, 8, 7, 6, 5, 5, 4, 4, 3, 3, 4, 4, 5, 5, 5, 3, 2, 2, 3, 5, 8, 11, 11, 12, 13, + 13, 12, 12, 11, 11, 10, 9, 8, 8, 7, 5, 5, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 3, 7, 11, 11, 11, 11, + 13, 13, 12, 11, 11, 10, 9, 8, 8, 7, 5, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 3, 2, 2, 2, 2, 3, 6, 11, 11, 11, 11, + 13, 13, 12, 11, 11, 11, 9, 8, 7, 6, 5, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 5, 10, 12, 12, 11, + 13, 13, 13, 11, 11, 11, 10, 8, 7, 6, 5, 4, 3, 3, 3, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 4, 9, 12, 12, 12, + 14, 13, 13, 12, 11, 11, 10, 8, 7, 6, 4, 4, 3, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 4, 9, 12, 13, 14, + 13, 13, 12, 12, 11, 11, 10, 8, 7, 6, 4, 3, 2, 2, 2, 1, 2, 3, 3, 2, 2, 1, 2, 2, 2, 2, 2, 4, 8, 12, 14, 14, + 13, 13, 12, 12, 11, 11, 11, 8, 7, 6, 5, 3, 2, 2, 1, 1, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 3, 4, 8, 12, 14, 14, + 13, 13, 12, 12, 12, 11, 11, 8, 7, 6, 5, 3, 2, 1, 1, 1, 1, 2, 3, 3, 2, 2, 2, 2, 2, 3, 3, 4, 8, 12, 14, 14, + 13, 13, 13, 12, 12, 11, 11, 8, 6, 6, 5, 3, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 4, 7, 12, 13, 14, + 13, 13, 13, 12, 12, 11, 10, 8, 6, 5, 4, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 5, 7, 12, 14, 14, + 14, 14, 13, 13, 13, 11, 10, 8, 7, 5, 4, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 4, 4, 5, 8, 14, 16, 16, + 14, 14, 13, 13, 13, 12, 11, 9, 7, 5, 4, 2, 1, 0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 4, 4, 4, 6, 10, 15, 16, 16, + 15, 14, 14, 14, 15, 15, 11, 9, 8, 5, 4, 2, 1, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 7, 11, 15, 16, 17, + 15, 15, 14, 14, 15, 15, 12, 10, 9, 6, 4, 2, 1, 0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 5, 5, 6, 7, 11, 15, 16, 16, + 15, 15, 15, 15, 15, 15, 12, 10, 9, 6, 5, 2, 1, 0, 0, 0, 1, 1, 1, 3, 3, 3, 4, 4, 5, 6, 6, 7, 11, 15, 16, 17, + 16, 16, 15, 16, 15, 15, 12, 11, 10, 7, 5, 3, 2, 1, 0, 0, 1, 1, 2, 3, 4, 4, 4, 5, 6, 6, 7, 9, 12, 16, 16, 16, + 16, 16, 16, 16, 16, 15, 13, 12, 10, 9, 6, 5, 3, 2, 1, 1, 1, 2, 3, 4, 4, 4, 5, 6, 6, 7, 8, 9, 14, 16, 17, 16, + 16, 16, 16, 16, 16, 15, 14, 12, 11, 10, 8, 6, 5, 4, 2, 1, 1, 2, 3, 4, 5, 5, 6, 6, 6, 7, 9, 12, 15, 16, 16, 16, + 17, 17, 18, 17, 16, 16, 14, 13, 12, 11, 9, 8, 6, 5, 4, 2, 2, 2, 3, 4, 6, 6, 6, 6, 7, 8, 11, 14, 16, 17, 16, 16, + 17, 17, 18, 18, 17, 16, 16, 14, 13, 12, 11, 9, 8, 7, 5, 4, 2, 3, 4, 6, 6, 6, 6, 7, 8, 10, 12, 15, 17, 17, 17, 16, + 18, 18, 18, 18, 18, 17, 16, 15, 14, 13, 12, 11, 9, 8, 6, 5, 4, 4, 4, 5, 6, 6, 7, 7, 10, 11, 14, 16, 17, 17, 17, 17, + 18, 18, 19, 19, 19, 18, 17, 16, 15, 14, 14, 11, 10, 9, 7, 6, 6, 5, 5, 5, 6, 7, 7, 7, 10, 12, 14, 17, 17, 17, 17, 17, + 20, 20, 20, 20, 20, 19, 18, 17, 16, 15, 14, 13, 11, 10, 9, 7, 6, 6, 6, 6, 7, 7, 7, 8, 11, 12, 15, 18, 18, 17, 17, 16, + 22, 21, 21, 21, 21, 21, 20, 19, 17, 16, 16, 14, 13, 11, 10, 8, 7, 7, 7, 7, 8, 8, 8, 9, 11, 13, 17, 18, 18, 17, 16, 15, + 23, 22, 22, 22, 22, 21, 21, 20, 19, 19, 18, 16, 14, 13, 11, 9, 8, 8, 8, 8, 8, 8, 9, 10, 12, 15, 18, 18, 18, 17, 16, 14, + 23, 24, 24, 23, 23, 22, 22, 21, 21, 20, 20, 19, 18, 15, 13, 11, 9, 8, 8, 8, 9, 9, 10, 11, 13, 16, 18, 18, 17, 17, 15, 14, + 24, 24, 24, 24, 24, 23, 22, 22, 22, 22, 20, 20, 19, 18, 15, 13, 10, 9, 9, 9, 9, 10, 11, 12, 15, 17, 18, 18, 17, 16, 15, 13, + ] +... diff --git a/src/ipa/mali-c55/data/meson.build b/src/ipa/mali-c55/data/meson.build new file mode 100644 index 00000000..8a5fdd36 --- /dev/null +++ b/src/ipa/mali-c55/data/meson.build @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: CC0-1.0 + +conf_files = files([ + 'imx415.yaml', + 'uncalibrated.yaml' +]) + +install_data(conf_files, + install_dir : ipa_data_dir / 'mali-c55') diff --git a/src/ipa/mali-c55/data/uncalibrated.yaml b/src/ipa/mali-c55/data/uncalibrated.yaml new file mode 100644 index 00000000..6dcc0295 --- /dev/null +++ b/src/ipa/mali-c55/data/uncalibrated.yaml @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: CC0-1.0 +%YAML 1.1 +--- +version: 1 +algorithms: + - Agc: +... diff --git a/src/ipa/mali-c55/ipa_context.cpp b/src/ipa/mali-c55/ipa_context.cpp new file mode 100644 index 00000000..99f76ecd --- /dev/null +++ b/src/ipa/mali-c55/ipa_context.cpp @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas On Board + * + * ipa_context.cpp - MaliC55 IPA Context + */ + +#include "ipa_context.h" + +/** + * \file ipa_context.h + * \brief Context and state information shared between the algorithms + */ + +namespace libcamera::ipa::mali_c55 { + +/** + * \struct IPASessionConfiguration + * \brief Session configuration for the IPA module + * + * The session configuration contains all IPA configuration parameters that + * remain constant during the capture session, from IPA module start to stop. + * It is typically set during the configure() operation of the IPA module, but + * may also be updated in the start() operation. + */ + +/** + * \struct IPAActiveState + * \brief Active state for algorithms + * + * The active state contains all algorithm-specific data that needs to be + * maintained by algorithms across frames. Unlike the session configuration, + * the active state is mutable and constantly updated by algorithms. The active + * state is accessible through the IPAContext structure. + * + * The active state stores two distinct categories of information: + * + * - The consolidated value of all algorithm controls. Requests passed to + * the queueRequest() function store values for controls that the + * application wants to modify for that particular frame, and the + * queueRequest() function updates the active state with those values. + * The active state thus contains a consolidated view of the value of all + * controls handled by the algorithm. + * + * - The value of parameters computed by the algorithm when running in auto + * mode. Algorithms running in auto mode compute new parameters every + * time statistics buffers are received (either synchronously, or + * possibly in a background thread). The latest computed value of those + * parameters is stored in the active state in the process() function. + * + * Each of the members in the active state belongs to a specific algorithm. A + * member may be read by any algorithm, but shall only be written by its owner. + */ + +/** + * \struct IPAFrameContext + * \brief Per-frame context for algorithms + * + * The frame context stores two distinct categories of information: + * + * - The value of the controls to be applied to the frame. These values are + * typically set in the queueRequest() function, from the consolidated + * control values stored in the active state. The frame context thus stores + * values for all controls related to the algorithm, not limited to the + * controls specified in the corresponding request, but consolidated from all + * requests that have been queued so far. + * + * For controls that can be set manually or computed by an algorithm + * (depending on the algorithm operation mode), such as for instance the + * colour gains for the AWB algorithm, the control value will be stored in + * the frame context in the queueRequest() function only when operating in + * manual mode. When operating in auto mode, the values are computed by the + * algorithm in process(), stored in the active state, and copied to the + * frame context in prepare(), just before being stored in the ISP parameters + * buffer. + * + * The queueRequest() function can also store ancillary data in the frame + * context, such as flags to indicate if (and what) control values have + * changed compared to the previous request. + * + * - Status information computed by the algorithm for a frame. For instance, + * the colour temperature estimated by the AWB algorithm from ISP statistics + * calculated on a frame is stored in the frame context for that frame in + * the process() function. + */ + +/** + * \struct IPAContext + * \brief Global IPA context data shared between all algorithms + * + * \var IPAContext::configuration + * \brief The IPA session configuration, immutable during the session + * + * \var IPAContext::activeState + * \brief The IPA active state, storing the latest state for all algorithms + * + * \var IPAContext::frameContexts + * \brief Ring buffer of per-frame contexts + */ + +} /* namespace libcamera::ipa::mali_c55 */ diff --git a/src/ipa/mali-c55/ipa_context.h b/src/ipa/mali-c55/ipa_context.h new file mode 100644 index 00000000..5e3e2fbd --- /dev/null +++ b/src/ipa/mali-c55/ipa_context.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas On Board + * + * ipa_context.h - Mali-C55 IPA Context + */ + +#pragma once + +#include <libcamera/base/utils.h> +#include <libcamera/controls.h> + +#include "libcamera/internal/bayer_format.h" + +#include <libipa/fc_queue.h> + +namespace libcamera { + +namespace ipa::mali_c55 { + +struct IPASessionConfiguration { + struct { + utils::Duration minShutterSpeed; + utils::Duration maxShutterSpeed; + uint32_t defaultExposure; + double minAnalogueGain; + double maxAnalogueGain; + } agc; + + struct { + BayerFormat::Order bayerOrder; + utils::Duration lineDuration; + uint32_t blackLevel; + } sensor; +}; + +struct IPAActiveState { + struct { + struct { + uint32_t exposure; + double sensorGain; + double ispGain; + } automatic; + struct { + uint32_t exposure; + double sensorGain; + double ispGain; + } manual; + bool autoEnabled; + uint32_t constraintMode; + uint32_t exposureMode; + uint32_t temperatureK; + } agc; + + struct { + double rGain; + double bGain; + } awb; +}; + +struct IPAFrameContext : public FrameContext { + struct { + uint32_t exposure; + double sensorGain; + double ispGain; + } agc; + + struct { + double rGain; + double bGain; + } awb; +}; + +struct IPAContext { + IPAContext(unsigned int frameContextSize) + : frameContexts(frameContextSize) + { + } + + IPASessionConfiguration configuration; + IPAActiveState activeState; + + FCQueue<IPAFrameContext> frameContexts; + + ControlInfoMap::Map ctrlMap; +}; + +} /* namespace ipa::mali_c55 */ + +} /* namespace libcamera*/ diff --git a/src/ipa/mali-c55/mali-c55.cpp b/src/ipa/mali-c55/mali-c55.cpp new file mode 100644 index 00000000..c6941a95 --- /dev/null +++ b/src/ipa/mali-c55/mali-c55.cpp @@ -0,0 +1,399 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2023, Ideas on Board Oy + * + * mali-c55.cpp - Mali-C55 ISP image processing algorithms + */ + +#include <map> +#include <string.h> +#include <vector> + +#include <linux/mali-c55-config.h> +#include <linux/v4l2-controls.h> + +#include <libcamera/base/file.h> +#include <libcamera/base/log.h> + +#include <libcamera/control_ids.h> +#include <libcamera/ipa/ipa_interface.h> +#include <libcamera/ipa/ipa_module_info.h> +#include <libcamera/ipa/mali-c55_ipa_interface.h> + +#include "libcamera/internal/bayer_format.h" +#include "libcamera/internal/mapped_framebuffer.h" +#include "libcamera/internal/yaml_parser.h" + +#include "algorithms/algorithm.h" +#include "libipa/camera_sensor_helper.h" + +#include "ipa_context.h" + +namespace libcamera { + +LOG_DEFINE_CATEGORY(IPAMaliC55) + +using namespace std::literals::chrono_literals; + +namespace ipa::mali_c55 { + +/* Maximum number of frame contexts to be held */ +static constexpr uint32_t kMaxFrameContexts = 16; + +class IPAMaliC55 : public IPAMaliC55Interface, public Module +{ +public: + IPAMaliC55(); + + int init(const IPASettings &settings, const IPAConfigInfo &ipaConfig, + ControlInfoMap *ipaControls) override; + int start() override; + void stop() override; + int configure(const IPAConfigInfo &ipaConfig, uint8_t bayerOrder, + ControlInfoMap *ipaControls) override; + void mapBuffers(const std::vector<IPABuffer> &buffers, bool readOnly) override; + void unmapBuffers(const std::vector<IPABuffer> &buffers) override; + void queueRequest(const uint32_t request, const ControlList &controls) override; + void fillParams(unsigned int request, uint32_t bufferId) override; + void processStats(unsigned int request, unsigned int bufferId, + const ControlList &sensorControls) override; + +protected: + std::string logPrefix() const override; + +private: + void updateSessionConfiguration(const IPACameraSensorInfo &info, + const ControlInfoMap &sensorControls, + BayerFormat::Order bayerOrder); + void updateControls(const IPACameraSensorInfo &sensorInfo, + const ControlInfoMap &sensorControls, + ControlInfoMap *ipaControls); + void setControls(); + + std::map<unsigned int, MappedFrameBuffer> buffers_; + + ControlInfoMap sensorControls_; + + /* Interface to the Camera Helper */ + std::unique_ptr<CameraSensorHelper> camHelper_; + + /* Local parameter storage */ + struct IPAContext context_; +}; + +namespace { + +} /* namespace */ + +IPAMaliC55::IPAMaliC55() + : context_(kMaxFrameContexts) +{ +} + +std::string IPAMaliC55::logPrefix() const +{ + return "mali-c55"; +} + +int IPAMaliC55::init(const IPASettings &settings, const IPAConfigInfo &ipaConfig, + ControlInfoMap *ipaControls) +{ + camHelper_ = CameraSensorHelperFactoryBase::create(settings.sensorModel); + if (!camHelper_) { + LOG(IPAMaliC55, Error) + << "Failed to create camera sensor helper for " + << settings.sensorModel; + return -ENODEV; + } + + File file(settings.configurationFile); + if (!file.open(File::OpenModeFlag::ReadOnly)) { + int ret = file.error(); + LOG(IPAMaliC55, Error) + << "Failed to open configuration file " + << settings.configurationFile << ": " << strerror(-ret); + return ret; + } + + std::unique_ptr<libcamera::YamlObject> data = YamlParser::parse(file); + if (!data) + return -EINVAL; + + if (!data->contains("algorithms")) { + LOG(IPAMaliC55, Error) + << "Tuning file doesn't contain any algorithm"; + return -EINVAL; + } + + int ret = createAlgorithms(context_, (*data)["algorithms"]); + if (ret) + return ret; + + updateControls(ipaConfig.sensorInfo, ipaConfig.sensorControls, ipaControls); + + return 0; +} + +void IPAMaliC55::setControls() +{ + IPAActiveState &activeState = context_.activeState; + uint32_t exposure; + uint32_t gain; + + if (activeState.agc.autoEnabled) { + exposure = activeState.agc.automatic.exposure; + gain = camHelper_->gainCode(activeState.agc.automatic.sensorGain); + } else { + exposure = activeState.agc.manual.exposure; + gain = camHelper_->gainCode(activeState.agc.manual.sensorGain); + } + + ControlList ctrls(sensorControls_); + ctrls.set(V4L2_CID_EXPOSURE, static_cast<int32_t>(exposure)); + ctrls.set(V4L2_CID_ANALOGUE_GAIN, static_cast<int32_t>(gain)); + + setSensorControls.emit(ctrls); +} + +int IPAMaliC55::start() +{ + return 0; +} + +void IPAMaliC55::stop() +{ + context_.frameContexts.clear(); +} + +void IPAMaliC55::updateSessionConfiguration(const IPACameraSensorInfo &info, + const ControlInfoMap &sensorControls, + BayerFormat::Order bayerOrder) +{ + context_.configuration.sensor.bayerOrder = bayerOrder; + + const ControlInfo &v4l2Exposure = sensorControls.find(V4L2_CID_EXPOSURE)->second; + int32_t minExposure = v4l2Exposure.min().get<int32_t>(); + int32_t maxExposure = v4l2Exposure.max().get<int32_t>(); + int32_t defExposure = v4l2Exposure.def().get<int32_t>(); + + const ControlInfo &v4l2Gain = sensorControls.find(V4L2_CID_ANALOGUE_GAIN)->second; + int32_t minGain = v4l2Gain.min().get<int32_t>(); + int32_t maxGain = v4l2Gain.max().get<int32_t>(); + + /* + * When the AGC computes the new exposure values for a frame, it needs + * to know the limits for shutter speed and analogue gain. + * As it depends on the sensor, update it with the controls. + * + * \todo take VBLANK into account for maximum shutter speed + */ + context_.configuration.sensor.lineDuration = info.minLineLength * 1.0s / info.pixelRate; + context_.configuration.agc.minShutterSpeed = minExposure * context_.configuration.sensor.lineDuration; + context_.configuration.agc.maxShutterSpeed = maxExposure * context_.configuration.sensor.lineDuration; + context_.configuration.agc.defaultExposure = defExposure; + context_.configuration.agc.minAnalogueGain = camHelper_->gain(minGain); + context_.configuration.agc.maxAnalogueGain = camHelper_->gain(maxGain); + + if (camHelper_->blackLevel().has_value()) { + /* + * The black level from CameraSensorHelper is a 16-bit value. + * The Mali-C55 ISP expects 20-bit settings, so we shift it to + * the appropriate width + */ + context_.configuration.sensor.blackLevel = + camHelper_->blackLevel().value() << 4; + } +} + +void IPAMaliC55::updateControls(const IPACameraSensorInfo &sensorInfo, + const ControlInfoMap &sensorControls, + ControlInfoMap *ipaControls) +{ + ControlInfoMap::Map ctrlMap; + + /* + * Compute the frame duration limits. + * + * The frame length is computed assuming a fixed line length combined + * with the vertical frame sizes. + */ + const ControlInfo &v4l2HBlank = sensorControls.find(V4L2_CID_HBLANK)->second; + uint32_t hblank = v4l2HBlank.def().get<int32_t>(); + uint32_t lineLength = sensorInfo.outputSize.width + hblank; + + const ControlInfo &v4l2VBlank = sensorControls.find(V4L2_CID_VBLANK)->second; + std::array<uint32_t, 3> frameHeights{ + v4l2VBlank.min().get<int32_t>() + sensorInfo.outputSize.height, + v4l2VBlank.max().get<int32_t>() + sensorInfo.outputSize.height, + v4l2VBlank.def().get<int32_t>() + sensorInfo.outputSize.height, + }; + + std::array<int64_t, 3> frameDurations; + for (unsigned int i = 0; i < frameHeights.size(); ++i) { + uint64_t frameSize = lineLength * frameHeights[i]; + frameDurations[i] = frameSize / (sensorInfo.pixelRate / 1000000U); + } + + ctrlMap[&controls::FrameDurationLimits] = ControlInfo(frameDurations[0], + frameDurations[1], + frameDurations[2]); + + /* + * Compute exposure time limits from the V4L2_CID_EXPOSURE control + * limits and the line duration. + */ + double lineDuration = sensorInfo.minLineLength / sensorInfo.pixelRate; + + const ControlInfo &v4l2Exposure = sensorControls.find(V4L2_CID_EXPOSURE)->second; + int32_t minExposure = v4l2Exposure.min().get<int32_t>() * lineDuration; + int32_t maxExposure = v4l2Exposure.max().get<int32_t>() * lineDuration; + int32_t defExposure = v4l2Exposure.def().get<int32_t>() * lineDuration; + ctrlMap[&controls::ExposureTime] = ControlInfo(minExposure, maxExposure, defExposure); + + /* Compute the analogue gain limits. */ + const ControlInfo &v4l2Gain = sensorControls.find(V4L2_CID_ANALOGUE_GAIN)->second; + float minGain = camHelper_->gain(v4l2Gain.min().get<int32_t>()); + float maxGain = camHelper_->gain(v4l2Gain.max().get<int32_t>()); + float defGain = camHelper_->gain(v4l2Gain.def().get<int32_t>()); + ctrlMap[&controls::AnalogueGain] = ControlInfo(minGain, maxGain, defGain); + + /* + * Merge in any controls that we support either statically or from the + * algorithms. + */ + ctrlMap.merge(context_.ctrlMap); + + *ipaControls = ControlInfoMap(std::move(ctrlMap), controls::controls); +} + +int IPAMaliC55::configure(const IPAConfigInfo &ipaConfig, uint8_t bayerOrder, + ControlInfoMap *ipaControls) +{ + sensorControls_ = ipaConfig.sensorControls; + + /* Clear the IPA context before the streaming session. */ + context_.configuration = {}; + context_.activeState = {}; + context_.frameContexts.clear(); + + const IPACameraSensorInfo &info = ipaConfig.sensorInfo; + + updateSessionConfiguration(info, ipaConfig.sensorControls, + static_cast<BayerFormat::Order>(bayerOrder)); + updateControls(info, ipaConfig.sensorControls, ipaControls); + + for (auto const &a : algorithms()) { + Algorithm *algo = static_cast<Algorithm *>(a.get()); + + int ret = algo->configure(context_, info); + if (ret) + return ret; + } + + return 0; +} + +void IPAMaliC55::mapBuffers(const std::vector<IPABuffer> &buffers, bool readOnly) +{ + for (const IPABuffer &buffer : buffers) { + const FrameBuffer fb(buffer.planes); + buffers_.emplace( + buffer.id, + MappedFrameBuffer( + &fb, + readOnly ? MappedFrameBuffer::MapFlag::Read + : MappedFrameBuffer::MapFlag::ReadWrite)); + } +} + +void IPAMaliC55::unmapBuffers(const std::vector<IPABuffer> &buffers) +{ + for (const IPABuffer &buffer : buffers) { + auto it = buffers_.find(buffer.id); + if (it == buffers_.end()) + continue; + + buffers_.erase(buffer.id); + } +} + +void IPAMaliC55::queueRequest(const uint32_t request, const ControlList &controls) +{ + IPAFrameContext &frameContext = context_.frameContexts.alloc(request); + + for (auto const &a : algorithms()) { + Algorithm *algo = static_cast<Algorithm *>(a.get()); + + algo->queueRequest(context_, request, frameContext, controls); + } +} + +void IPAMaliC55::fillParams(unsigned int request, + [[maybe_unused]] uint32_t bufferId) +{ + struct mali_c55_params_buffer *params; + IPAFrameContext &frameContext = context_.frameContexts.get(request); + + params = reinterpret_cast<mali_c55_params_buffer *>( + buffers_.at(bufferId).planes()[0].data()); + memset(params, 0, sizeof(mali_c55_params_buffer)); + + params->version = MALI_C55_PARAM_BUFFER_V1; + + for (auto const &algo : algorithms()) { + algo->prepare(context_, request, frameContext, params); + + ASSERT(params->total_size <= MALI_C55_PARAMS_MAX_SIZE); + } + + paramsComputed.emit(request); +} + +void IPAMaliC55::processStats(unsigned int request, unsigned int bufferId, + const ControlList &sensorControls) +{ + IPAFrameContext &frameContext = context_.frameContexts.get(request); + const mali_c55_stats_buffer *stats = nullptr; + + stats = reinterpret_cast<mali_c55_stats_buffer *>( + buffers_.at(bufferId).planes()[0].data()); + + frameContext.agc.exposure = + sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>(); + frameContext.agc.sensorGain = + camHelper_->gain(sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>()); + + ControlList metadata(controls::controls); + + for (auto const &a : algorithms()) { + Algorithm *algo = static_cast<Algorithm *>(a.get()); + + algo->process(context_, request, frameContext, stats, metadata); + } + + setControls(); + + statsProcessed.emit(request, metadata); +} + +} /* namespace ipa::mali_c55 */ + +/* + * External IPA module interface + */ +extern "C" { +const struct IPAModuleInfo ipaModuleInfo = { + IPA_MODULE_API_VERSION, + 1, + "mali-c55", + "mali-c55", +}; + +IPAInterface *ipaCreate() +{ + return new ipa::mali_c55::IPAMaliC55(); +} + +} /* extern "C" */ + +} /* namespace libcamera */ diff --git a/src/ipa/mali-c55/meson.build b/src/ipa/mali-c55/meson.build new file mode 100644 index 00000000..864d90ec --- /dev/null +++ b/src/ipa/mali-c55/meson.build @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: CC0-1.0 + +subdir('algorithms') +subdir('data') + +ipa_name = 'ipa_mali_c55' + +mali_c55_ipa_sources = files([ + 'ipa_context.cpp', + 'mali-c55.cpp' +]) + +mali_c55_ipa_sources += mali_c55_ipa_algorithms + +mod = shared_module(ipa_name, + mali_c55_ipa_sources, + name_prefix : '', + include_directories : [ipa_includes, libipa_includes], + dependencies : libcamera_private, + link_with : libipa, + install : true, + install_dir : ipa_install_dir) + +if ipa_sign_module + custom_target(ipa_name + '.so.sign', + input : mod, + output : ipa_name + '.so.sign', + command : [ipa_sign, ipa_priv_key, '@INPUT@', '@OUTPUT@'], + install : false, + build_by_default : true) +endif + +ipa_names += ipa_name diff --git a/src/ipa/mali-c55/module.h b/src/ipa/mali-c55/module.h new file mode 100644 index 00000000..1d85ec1f --- /dev/null +++ b/src/ipa/mali-c55/module.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas On Board + * + * module.h - Mali-C55 IPA Module + */ + +#pragma once + +#include <linux/mali-c55-config.h> + +#include <libcamera/ipa/mali-c55_ipa_interface.h> + +#include <libipa/module.h> + +#include "ipa_context.h" + +namespace libcamera { + +namespace ipa::mali_c55 { + +using Module = ipa::Module<IPAContext, IPAFrameContext, IPACameraSensorInfo, + mali_c55_params_buffer, mali_c55_stats_buffer>; + +} /* namespace ipa::mali_c55 */ + +} /* namespace libcamera*/ diff --git a/src/ipa/rkisp1/algorithms/agc.cpp b/src/ipa/rkisp1/algorithms/agc.cpp index f12f8b60..9a558a1c 100644 --- a/src/ipa/rkisp1/algorithms/agc.cpp +++ b/src/ipa/rkisp1/algorithms/agc.cpp @@ -148,7 +148,16 @@ int Agc::init(IPAContext &context, const YamlObject &tuningData) if (ret) return ret; - context.ctrlMap[&controls::AeEnable] = ControlInfo(false, true); + context.ctrlMap[&controls::ExposureTimeMode] = + ControlInfo({ { ControlValue(controls::ExposureTimeModeAuto), + ControlValue(controls::ExposureTimeModeManual) } }, + ControlValue(controls::ExposureTimeModeAuto)); + context.ctrlMap[&controls::AnalogueGainMode] = + ControlInfo({ { ControlValue(controls::AnalogueGainModeAuto), + ControlValue(controls::AnalogueGainModeManual) } }, + ControlValue(controls::AnalogueGainModeAuto)); + /* \todo Move this to the Camera class */ + context.ctrlMap[&controls::AeEnable] = ControlInfo(false, true, true); context.ctrlMap.merge(controls()); return 0; @@ -169,7 +178,8 @@ int Agc::configure(IPAContext &context, const IPACameraSensorInfo &configInfo) 10ms / context.configuration.sensor.lineDuration; context.activeState.agc.manual.gain = context.activeState.agc.automatic.gain; context.activeState.agc.manual.exposure = context.activeState.agc.automatic.exposure; - context.activeState.agc.autoEnabled = !context.configuration.raw; + context.activeState.agc.autoExposureEnabled = !context.configuration.raw; + context.activeState.agc.autoGainEnabled = !context.configuration.raw; context.activeState.agc.constraintMode = static_cast<controls::AeConstraintModeEnum>(constraintModes().begin()->first); @@ -183,7 +193,7 @@ int Agc::configure(IPAContext &context, const IPACameraSensorInfo &configInfo) * except it's computed in the IPA and not here so we'd have to * recompute it. */ - context.activeState.agc.maxFrameDuration = context.configuration.sensor.maxShutterSpeed; + context.activeState.agc.maxFrameDuration = context.configuration.sensor.maxExposureTime; /* * Define the measurement window for AGC as a centered rectangle @@ -194,8 +204,8 @@ int Agc::configure(IPAContext &context, const IPACameraSensorInfo &configInfo) context.configuration.agc.measureWindow.h_size = 3 * configInfo.outputSize.width / 4; context.configuration.agc.measureWindow.v_size = 3 * configInfo.outputSize.height / 4; - setLimits(context.configuration.sensor.minShutterSpeed, - context.configuration.sensor.maxShutterSpeed, + setLimits(context.configuration.sensor.minExposureTime, + context.configuration.sensor.maxExposureTime, context.configuration.sensor.minAnalogueGain, context.configuration.sensor.maxAnalogueGain); @@ -215,18 +225,47 @@ void Agc::queueRequest(IPAContext &context, auto &agc = context.activeState.agc; if (!context.configuration.raw) { - const auto &agcEnable = controls.get(controls::AeEnable); - if (agcEnable && *agcEnable != agc.autoEnabled) { - agc.autoEnabled = *agcEnable; + const auto &aeEnable = controls.get(controls::ExposureTimeMode); + if (aeEnable && + (*aeEnable == controls::ExposureTimeModeAuto) != agc.autoExposureEnabled) { + agc.autoExposureEnabled = (*aeEnable == controls::ExposureTimeModeAuto); LOG(RkISP1Agc, Debug) - << (agc.autoEnabled ? "Enabling" : "Disabling") - << " AGC"; + << (agc.autoExposureEnabled ? "Enabling" : "Disabling") + << " AGC (exposure)"; + + /* + * If we go from auto -> manual with no manual control + * set, use the last computed value, which we don't + * know until prepare() so save this information. + * + * \todo Check the previous frame at prepare() time + * instead of saving a flag here + */ + if (!agc.autoExposureEnabled && !controls.get(controls::ExposureTime)) + frameContext.agc.autoExposureModeChange = true; + } + + const auto &agEnable = controls.get(controls::AnalogueGainMode); + if (agEnable && + (*agEnable == controls::AnalogueGainModeAuto) != agc.autoGainEnabled) { + agc.autoGainEnabled = (*agEnable == controls::AnalogueGainModeAuto); + + LOG(RkISP1Agc, Debug) + << (agc.autoGainEnabled ? "Enabling" : "Disabling") + << " AGC (gain)"; + /* + * If we go from auto -> manual with no manual control + * set, use the last computed value, which we don't + * know until prepare() so save this information. + */ + if (!agc.autoGainEnabled && !controls.get(controls::AnalogueGain)) + frameContext.agc.autoGainModeChange = true; } } const auto &exposure = controls.get(controls::ExposureTime); - if (exposure && !agc.autoEnabled) { + if (exposure && !agc.autoExposureEnabled) { agc.manual.exposure = *exposure * 1.0us / context.configuration.sensor.lineDuration; @@ -235,18 +274,19 @@ void Agc::queueRequest(IPAContext &context, } const auto &gain = controls.get(controls::AnalogueGain); - if (gain && !agc.autoEnabled) { + if (gain && !agc.autoGainEnabled) { agc.manual.gain = *gain; LOG(RkISP1Agc, Debug) << "Set gain to " << agc.manual.gain; } - frameContext.agc.autoEnabled = agc.autoEnabled; + frameContext.agc.autoExposureEnabled = agc.autoExposureEnabled; + frameContext.agc.autoGainEnabled = agc.autoGainEnabled; - if (!frameContext.agc.autoEnabled) { + if (!frameContext.agc.autoExposureEnabled) frameContext.agc.exposure = agc.manual.exposure; + if (!frameContext.agc.autoGainEnabled) frameContext.agc.gain = agc.manual.gain; - } const auto &meteringMode = controls.get(controls::AeMeteringMode); if (meteringMode) { @@ -281,51 +321,66 @@ void Agc::queueRequest(IPAContext &context, * \copydoc libcamera::ipa::Algorithm::prepare */ void Agc::prepare(IPAContext &context, const uint32_t frame, - IPAFrameContext &frameContext, rkisp1_params_cfg *params) + IPAFrameContext &frameContext, RkISP1Params *params) { - if (frameContext.agc.autoEnabled) { - frameContext.agc.exposure = context.activeState.agc.automatic.exposure; - frameContext.agc.gain = context.activeState.agc.automatic.gain; + uint32_t activeAutoExposure = context.activeState.agc.automatic.exposure; + double activeAutoGain = context.activeState.agc.automatic.gain; + + /* Populate exposure and gain in auto mode */ + if (frameContext.agc.autoExposureEnabled) + frameContext.agc.exposure = activeAutoExposure; + if (frameContext.agc.autoGainEnabled) + frameContext.agc.gain = activeAutoGain; + + /* + * Populate manual exposure and gain from the active auto values when + * transitioning from auto to manual + */ + if (!frameContext.agc.autoExposureEnabled && frameContext.agc.autoExposureModeChange) { + context.activeState.agc.manual.exposure = activeAutoExposure; + frameContext.agc.exposure = activeAutoExposure; + } + if (!frameContext.agc.autoGainEnabled && frameContext.agc.autoGainModeChange) { + context.activeState.agc.manual.gain = activeAutoGain; + frameContext.agc.gain = activeAutoGain; } if (frame > 0 && !frameContext.agc.updateMetering) return; - /* Configure the measurement window. */ - params->meas.aec_config.meas_window = context.configuration.agc.measureWindow; - /* Use a continuous method for measure. */ - params->meas.aec_config.autostop = RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0; - /* Estimate Y as (R + G + B) x (85/256). */ - params->meas.aec_config.mode = RKISP1_CIF_ISP_EXP_MEASURING_MODE_1; + /* + * Configure the AEC measurements. Set the window, measure + * continuously, and estimate Y as (R + G + B) x (85/256). + */ + auto aecConfig = params->block<BlockType::Aec>(); + aecConfig.setEnabled(true); + + aecConfig->meas_window = context.configuration.agc.measureWindow; + aecConfig->autostop = RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0; + aecConfig->mode = RKISP1_CIF_ISP_EXP_MEASURING_MODE_1; - params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_AEC; - params->module_ens |= RKISP1_CIF_ISP_MODULE_AEC; - params->module_en_update |= RKISP1_CIF_ISP_MODULE_AEC; + /* + * Configure the histogram measurement. Set the window, produce a + * luminance histogram, and set the weights and predivider. + */ + auto hstConfig = params->block<BlockType::Hst>(); + hstConfig.setEnabled(true); - /* Configure histogram. */ - params->meas.hst_config.meas_window = context.configuration.agc.measureWindow; - /* Produce the luminance histogram. */ - params->meas.hst_config.mode = RKISP1_CIF_ISP_HISTOGRAM_MODE_Y_HISTOGRAM; + hstConfig->meas_window = context.configuration.agc.measureWindow; + hstConfig->mode = RKISP1_CIF_ISP_HISTOGRAM_MODE_Y_HISTOGRAM; - /* Set an average weighted histogram. */ Span<uint8_t> weights{ - params->meas.hst_config.hist_weight, + hstConfig->hist_weight, context.hw->numHistogramWeights }; std::vector<uint8_t> &modeWeights = meteringModes_.at(frameContext.agc.meteringMode); std::copy(modeWeights.begin(), modeWeights.end(), weights.begin()); - struct rkisp1_cif_isp_window window = params->meas.hst_config.meas_window; + struct rkisp1_cif_isp_window window = hstConfig->meas_window; Size windowSize = { window.h_size, window.v_size }; - params->meas.hst_config.histogram_predivider = + hstConfig->histogram_predivider = computeHistogramPredivider(windowSize, - static_cast<rkisp1_cif_isp_histogram_mode>(params->meas.hst_config.mode)); - - /* Update the configuration for histogram. */ - params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_HST; - /* Enable the histogram measure unit. */ - params->module_ens |= RKISP1_CIF_ISP_MODULE_HST; - params->module_en_update |= RKISP1_CIF_ISP_MODULE_HST; + static_cast<rkisp1_cif_isp_histogram_mode>(hstConfig->mode)); } void Agc::fillMetadata(IPAContext &context, IPAFrameContext &frameContext, @@ -335,7 +390,14 @@ void Agc::fillMetadata(IPAContext &context, IPAFrameContext &frameContext, * frameContext.sensor.exposure; metadata.set(controls::AnalogueGain, frameContext.sensor.gain); metadata.set(controls::ExposureTime, exposureTime.get<std::micro>()); - metadata.set(controls::AeEnable, frameContext.agc.autoEnabled); + metadata.set(controls::ExposureTimeMode, + frameContext.agc.autoExposureEnabled + ? controls::ExposureTimeModeAuto + : controls::ExposureTimeModeManual); + metadata.set(controls::AnalogueGainMode, + frameContext.agc.autoGainEnabled + ? controls::AnalogueGainModeAuto + : controls::AnalogueGainModeManual); /* \todo Use VBlank value calculated from each frame exposure. */ uint32_t vTotal = context.configuration.sensor.size.height @@ -405,6 +467,12 @@ void Agc::process(IPAContext &context, [[maybe_unused]] const uint32_t frame, return; } + if (!(stats->meas_type & RKISP1_CIF_ISP_STAT_AUTOEXP)) { + fillMetadata(context, frameContext, metadata); + LOG(RkISP1Agc, Error) << "AUTOEXP data is missing in statistics"; + return; + } + /* * \todo Verify that the exposure and gain applied by the sensor for * this frame match what has been requested. This isn't a hard @@ -414,21 +482,41 @@ void Agc::process(IPAContext &context, [[maybe_unused]] const uint32_t frame, */ const rkisp1_cif_isp_stat *params = &stats->params; - ASSERT(stats->meas_type & RKISP1_CIF_ISP_STAT_AUTOEXP); /* The lower 4 bits are fractional and meant to be discarded. */ Histogram hist({ params->hist.hist_bins, context.hw->numHistogramBins }, [](uint32_t x) { return x >> 4; }); expMeans_ = { params->ae.exp_mean, context.hw->numAeCells }; - utils::Duration maxShutterSpeed = - std::clamp(frameContext.agc.maxFrameDuration, - context.configuration.sensor.minShutterSpeed, - context.configuration.sensor.maxShutterSpeed); - setLimits(context.configuration.sensor.minShutterSpeed, - maxShutterSpeed, - context.configuration.sensor.minAnalogueGain, - context.configuration.sensor.maxAnalogueGain); + /* + * Set the AGC limits using the fixed exposure time and/or gain in + * manual mode, or the sensor limits in auto mode. + */ + utils::Duration minExposureTime; + utils::Duration maxExposureTime; + double minAnalogueGain; + double maxAnalogueGain; + + if (frameContext.agc.autoExposureEnabled) { + minExposureTime = context.configuration.sensor.minExposureTime; + maxExposureTime = std::clamp(frameContext.agc.maxFrameDuration, + context.configuration.sensor.minExposureTime, + context.configuration.sensor.maxExposureTime); + } else { + minExposureTime = context.configuration.sensor.lineDuration + * frameContext.agc.exposure; + maxExposureTime = minExposureTime; + } + + if (frameContext.agc.autoGainEnabled) { + minAnalogueGain = context.configuration.sensor.minAnalogueGain; + maxAnalogueGain = context.configuration.sensor.maxAnalogueGain; + } else { + minAnalogueGain = frameContext.agc.gain; + maxAnalogueGain = frameContext.agc.gain; + } + + setLimits(minExposureTime, maxExposureTime, minAnalogueGain, maxAnalogueGain); /* * The Agc algorithm needs to know the effective exposure value that was @@ -439,20 +527,21 @@ void Agc::process(IPAContext &context, [[maybe_unused]] const uint32_t frame, double analogueGain = frameContext.sensor.gain; utils::Duration effectiveExposureValue = exposureTime * analogueGain; - utils::Duration shutterTime; + utils::Duration newExposureTime; double aGain, dGain; - std::tie(shutterTime, aGain, dGain) = + std::tie(newExposureTime, aGain, dGain) = calculateNewEv(frameContext.agc.constraintMode, frameContext.agc.exposureMode, hist, effectiveExposureValue); LOG(RkISP1Agc, Debug) - << "Divided up shutter, analogue gain and digital gain are " - << shutterTime << ", " << aGain << " and " << dGain; + << "Divided up exposure time, analogue gain and digital gain are " + << newExposureTime << ", " << aGain << " and " << dGain; IPAActiveState &activeState = context.activeState; /* Update the estimated exposure and gain. */ - activeState.agc.automatic.exposure = shutterTime / context.configuration.sensor.lineDuration; + activeState.agc.automatic.exposure = newExposureTime + / context.configuration.sensor.lineDuration; activeState.agc.automatic.gain = aGain; fillMetadata(context, frameContext, metadata); diff --git a/src/ipa/rkisp1/algorithms/agc.h b/src/ipa/rkisp1/algorithms/agc.h index 9ceaa82b..aa86f2c5 100644 --- a/src/ipa/rkisp1/algorithms/agc.h +++ b/src/ipa/rkisp1/algorithms/agc.h @@ -15,7 +15,6 @@ #include <libcamera/geometry.h> #include "libipa/agc_mean_luminance.h" -#include "libipa/histogram.h" #include "algorithm.h" @@ -37,7 +36,7 @@ public: const ControlList &controls) override; void prepare(IPAContext &context, const uint32_t frame, IPAFrameContext &frameContext, - rkisp1_params_cfg *params) override; + RkISP1Params *params) override; void process(IPAContext &context, const uint32_t frame, IPAFrameContext &frameContext, const rkisp1_stat_buffer *stats, diff --git a/src/ipa/rkisp1/algorithms/awb.cpp b/src/ipa/rkisp1/algorithms/awb.cpp index a01fe5d9..cffaa06a 100644 --- a/src/ipa/rkisp1/algorithms/awb.cpp +++ b/src/ipa/rkisp1/algorithms/awb.cpp @@ -8,14 +8,16 @@ #include "awb.h" #include <algorithm> -#include <cmath> -#include <iomanip> +#include <ios> #include <libcamera/base/log.h> #include <libcamera/control_ids.h> + #include <libcamera/ipa/core_ipa_interface.h> +#include "libipa/colours.h" + /** * \file awb.h */ @@ -31,6 +33,10 @@ namespace ipa::rkisp1::algorithms { LOG_DEFINE_CATEGORY(RkISP1Awb) +constexpr int32_t kMinColourTemperature = 2500; +constexpr int32_t kMaxColourTemperature = 10000; +constexpr int32_t kDefaultColourTemperature = 5000; + /* Minimum mean value below which AWB can't operate. */ constexpr double kMeanMinThreshold = 2.0; @@ -40,18 +46,38 @@ Awb::Awb() } /** + * \copydoc libcamera::ipa::Algorithm::init + */ +int Awb::init(IPAContext &context, const YamlObject &tuningData) +{ + auto &cmap = context.ctrlMap; + cmap[&controls::ColourTemperature] = ControlInfo(kMinColourTemperature, + kMaxColourTemperature, + kDefaultColourTemperature); + + Interpolator<Vector<double, 2>> gainCurve; + int ret = gainCurve.readYaml(tuningData["colourGains"], "ct", "gains"); + if (ret < 0) + LOG(RkISP1Awb, Warning) + << "Failed to parse 'colourGains' " + << "parameter from tuning file; " + << "manual colour temperature will not work properly"; + else + colourGainCurve_ = gainCurve; + + return 0; +} + +/** * \copydoc libcamera::ipa::Algorithm::configure */ int Awb::configure(IPAContext &context, const IPACameraSensorInfo &configInfo) { - context.activeState.awb.gains.manual.red = 1.0; - context.activeState.awb.gains.manual.blue = 1.0; - context.activeState.awb.gains.manual.green = 1.0; - context.activeState.awb.gains.automatic.red = 1.0; - context.activeState.awb.gains.automatic.blue = 1.0; - context.activeState.awb.gains.automatic.green = 1.0; + context.activeState.awb.gains.manual = RGB<double>{ 1.0 }; + context.activeState.awb.gains.automatic = RGB<double>{ 1.0 }; context.activeState.awb.autoEnabled = true; + context.activeState.awb.temperatureK = kDefaultColourTemperature; /* * Define the measurement window for AWB as a centered rectangle @@ -85,64 +111,78 @@ void Awb::queueRequest(IPAContext &context, << (*awbEnable ? "Enabling" : "Disabling") << " AWB"; } - const auto &colourGains = controls.get(controls::ColourGains); - if (colourGains && !awb.autoEnabled) { - awb.gains.manual.red = (*colourGains)[0]; - awb.gains.manual.blue = (*colourGains)[1]; + frameContext.awb.autoEnabled = awb.autoEnabled; - LOG(RkISP1Awb, Debug) - << "Set colour gains to red: " << awb.gains.manual.red - << ", blue: " << awb.gains.manual.blue; + if (awb.autoEnabled) + return; + + const auto &colourGains = controls.get(controls::ColourGains); + const auto &colourTemperature = controls.get(controls::ColourTemperature); + bool update = false; + if (colourGains) { + awb.gains.manual.r() = (*colourGains)[0]; + awb.gains.manual.b() = (*colourGains)[1]; + /* + * \todo: Colour temperature reported in metadata is now + * incorrect, as we can't deduce the temperature from the gains. + * This will be fixed with the bayes AWB algorithm. + */ + update = true; + } else if (colourTemperature && colourGainCurve_) { + const auto &gains = colourGainCurve_->getInterpolated(*colourTemperature); + awb.gains.manual.r() = gains[0]; + awb.gains.manual.b() = gains[1]; + awb.temperatureK = *colourTemperature; + update = true; } - frameContext.awb.autoEnabled = awb.autoEnabled; + if (update) + LOG(RkISP1Awb, Debug) + << "Set colour gains to " << awb.gains.manual; - if (!awb.autoEnabled) { - frameContext.awb.gains.red = awb.gains.manual.red; - frameContext.awb.gains.green = 1.0; - frameContext.awb.gains.blue = awb.gains.manual.blue; - } + frameContext.awb.gains = awb.gains.manual; + frameContext.awb.temperatureK = awb.temperatureK; } /** * \copydoc libcamera::ipa::Algorithm::prepare */ void Awb::prepare(IPAContext &context, const uint32_t frame, - IPAFrameContext &frameContext, rkisp1_params_cfg *params) + IPAFrameContext &frameContext, RkISP1Params *params) { /* * This is the latest time we can read the active state. This is the * most up-to-date automatic values we can read. */ if (frameContext.awb.autoEnabled) { - frameContext.awb.gains.red = context.activeState.awb.gains.automatic.red; - frameContext.awb.gains.green = context.activeState.awb.gains.automatic.green; - frameContext.awb.gains.blue = context.activeState.awb.gains.automatic.blue; + frameContext.awb.gains = context.activeState.awb.gains.automatic; + frameContext.awb.temperatureK = context.activeState.awb.temperatureK; } - params->others.awb_gain_config.gain_green_b = 256 * frameContext.awb.gains.green; - params->others.awb_gain_config.gain_blue = 256 * frameContext.awb.gains.blue; - params->others.awb_gain_config.gain_red = 256 * frameContext.awb.gains.red; - params->others.awb_gain_config.gain_green_r = 256 * frameContext.awb.gains.green; + auto gainConfig = params->block<BlockType::AwbGain>(); + gainConfig.setEnabled(true); - /* Update the gains. */ - params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_AWB_GAIN; + gainConfig->gain_green_b = std::clamp<int>(256 * frameContext.awb.gains.g(), 0, 0x3ff); + gainConfig->gain_blue = std::clamp<int>(256 * frameContext.awb.gains.b(), 0, 0x3ff); + gainConfig->gain_red = std::clamp<int>(256 * frameContext.awb.gains.r(), 0, 0x3ff); + gainConfig->gain_green_r = std::clamp<int>(256 * frameContext.awb.gains.g(), 0, 0x3ff); /* If we have already set the AWB measurement parameters, return. */ if (frame > 0) return; - rkisp1_cif_isp_awb_meas_config &awb_config = params->meas.awb_meas_config; + auto awbConfig = params->block<BlockType::Awb>(); + awbConfig.setEnabled(true); /* Configure the measure window for AWB. */ - awb_config.awb_wnd = context.configuration.awb.measureWindow; + awbConfig->awb_wnd = context.configuration.awb.measureWindow; /* Number of frames to use to estimate the means (0 means 1 frame). */ - awb_config.frames = 0; + awbConfig->frames = 0; /* Select RGB or YCbCr means measurement. */ if (rgbMode_) { - awb_config.awb_mode = RKISP1_CIF_ISP_AWB_MODE_RGB; + awbConfig->awb_mode = RKISP1_CIF_ISP_AWB_MODE_RGB; /* * For RGB-based measurements, pixels are selected with maximum @@ -150,19 +190,19 @@ void Awb::prepare(IPAContext &context, const uint32_t frame, * awb_ref_cr, awb_min_y and awb_ref_cb respectively. The other * values are not used, set them to 0. */ - awb_config.awb_ref_cr = 250; - awb_config.min_y = 250; - awb_config.awb_ref_cb = 250; + awbConfig->awb_ref_cr = 250; + awbConfig->min_y = 250; + awbConfig->awb_ref_cb = 250; - awb_config.max_y = 0; - awb_config.min_c = 0; - awb_config.max_csum = 0; + awbConfig->max_y = 0; + awbConfig->min_c = 0; + awbConfig->max_csum = 0; } else { - awb_config.awb_mode = RKISP1_CIF_ISP_AWB_MODE_YCBCR; + awbConfig->awb_mode = RKISP1_CIF_ISP_AWB_MODE_YCBCR; /* Set the reference Cr and Cb (AWB target) to white. */ - awb_config.awb_ref_cb = 128; - awb_config.awb_ref_cr = 128; + awbConfig->awb_ref_cb = 128; + awbConfig->awb_ref_cr = 128; /* * Filter out pixels based on luminance and chrominance values. @@ -170,36 +210,11 @@ void Awb::prepare(IPAContext &context, const uint32_t frame, * range, while the acceptable chroma values are specified with * a minimum of 16 and a maximum Cb+Cr sum of 250. */ - awb_config.min_y = 16; - awb_config.max_y = 250; - awb_config.min_c = 16; - awb_config.max_csum = 250; + awbConfig->min_y = 16; + awbConfig->max_y = 250; + awbConfig->min_c = 16; + awbConfig->max_csum = 250; } - - /* Enable the AWB gains. */ - params->module_en_update |= RKISP1_CIF_ISP_MODULE_AWB_GAIN; - params->module_ens |= RKISP1_CIF_ISP_MODULE_AWB_GAIN; - - /* Update the AWB measurement parameters and enable the AWB module. */ - params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_AWB; - params->module_en_update |= RKISP1_CIF_ISP_MODULE_AWB; - params->module_ens |= RKISP1_CIF_ISP_MODULE_AWB; -} - -uint32_t Awb::estimateCCT(double red, double green, double blue) -{ - /* Convert the RGB values to CIE tristimulus values (XYZ) */ - double X = (-0.14282) * (red) + (1.54924) * (green) + (-0.95641) * (blue); - double Y = (-0.32466) * (red) + (1.57837) * (green) + (-0.73191) * (blue); - double Z = (-0.68202) * (red) + (0.77073) * (green) + (0.56332) * (blue); - - /* Calculate the normalized chromaticity values */ - double x = X / (X + Y + Z); - double y = Y / (X + Y + Z); - - /* Calculate CCT */ - double n = (x - 0.3320) / (0.1858 - y); - return 449 * n * n * n + 3525 * n * n + 6823.3 * n + 5520.33; } /** @@ -214,38 +229,61 @@ void Awb::process(IPAContext &context, const rkisp1_cif_isp_stat *params = &stats->params; const rkisp1_cif_isp_awb_stat *awb = ¶ms->awb; IPAActiveState &activeState = context.activeState; - double greenMean; - double redMean; - double blueMean; + RGB<double> rgbMeans; + + metadata.set(controls::AwbEnable, frameContext.awb.autoEnabled); + metadata.set(controls::ColourGains, { + static_cast<float>(frameContext.awb.gains.r()), + static_cast<float>(frameContext.awb.gains.b()) + }); + metadata.set(controls::ColourTemperature, frameContext.awb.temperatureK); + + if (!stats || !(stats->meas_type & RKISP1_CIF_ISP_STAT_AWB)) { + LOG(RkISP1Awb, Error) << "AWB data is missing in statistics"; + return; + } if (rgbMode_) { - greenMean = awb->awb_mean[0].mean_y_or_g; - redMean = awb->awb_mean[0].mean_cr_or_r; - blueMean = awb->awb_mean[0].mean_cb_or_b; + rgbMeans = {{ + static_cast<double>(awb->awb_mean[0].mean_y_or_g), + static_cast<double>(awb->awb_mean[0].mean_cr_or_r), + static_cast<double>(awb->awb_mean[0].mean_cb_or_b) + }}; } else { /* Get the YCbCr mean values */ - double yMean = awb->awb_mean[0].mean_y_or_g; - double cbMean = awb->awb_mean[0].mean_cb_or_b; - double crMean = awb->awb_mean[0].mean_cr_or_r; + Vector<double, 3> yuvMeans({ + static_cast<double>(awb->awb_mean[0].mean_y_or_g), + static_cast<double>(awb->awb_mean[0].mean_cb_or_b), + static_cast<double>(awb->awb_mean[0].mean_cr_or_r) + }); /* - * Convert from YCbCr to RGB. - * The hardware uses the following formulas: - * Y = 16 + 0.2500 R + 0.5000 G + 0.1094 B + * Convert from YCbCr to RGB. The hardware uses the following + * formulas: + * + * Y = 16 + 0.2500 R + 0.5000 G + 0.1094 B * Cb = 128 - 0.1406 R - 0.2969 G + 0.4375 B * Cr = 128 + 0.4375 R - 0.3750 G - 0.0625 B * - * The inverse matrix is thus: + * This seems to be based on limited range BT.601 with Q1.6 + * precision. + * + * The inverse matrix is: + * * [[1,1636, -0,0623, 1,6008] * [1,1636, -0,4045, -0,7949] * [1,1636, 1,9912, -0,0250]] */ - yMean -= 16; - cbMean -= 128; - crMean -= 128; - redMean = 1.1636 * yMean - 0.0623 * cbMean + 1.6008 * crMean; - greenMean = 1.1636 * yMean - 0.4045 * cbMean - 0.7949 * crMean; - blueMean = 1.1636 * yMean + 1.9912 * cbMean - 0.0250 * crMean; + static const Matrix<double, 3, 3> yuv2rgbMatrix({ + 1.1636, -0.0623, 1.6008, + 1.1636, -0.4045, -0.7949, + 1.1636, 1.9912, -0.0250 + }); + static const Vector<double, 3> yuv2rgbOffset({ + 16, 128, 128 + }); + + rgbMeans = yuv2rgbMatrix * (yuvMeans - yuv2rgbOffset); /* * Due to hardware rounding errors in the YCbCr means, the @@ -253,9 +291,7 @@ void Awb::process(IPAContext &context, * negative gains, messing up calculation. Prevent this by * clamping the means to positive values. */ - redMean = std::max(redMean, 0.0); - greenMean = std::max(greenMean, 0.0); - blueMean = std::max(blueMean, 0.0); + rgbMeans = rgbMeans.max(0.0); } /* @@ -263,29 +299,28 @@ void Awb::process(IPAContext &context, * divide by the gains that were used to get the raw means from the * sensor. */ - redMean /= frameContext.awb.gains.red; - greenMean /= frameContext.awb.gains.green; - blueMean /= frameContext.awb.gains.blue; + rgbMeans /= frameContext.awb.gains; /* * If the means are too small we don't have enough information to * meaningfully calculate gains. Freeze the algorithm in that case. */ - if (redMean < kMeanMinThreshold && greenMean < kMeanMinThreshold && - blueMean < kMeanMinThreshold) { - frameContext.awb.temperatureK = activeState.awb.temperatureK; + if (rgbMeans.r() < kMeanMinThreshold && rgbMeans.g() < kMeanMinThreshold && + rgbMeans.b() < kMeanMinThreshold) return; - } - activeState.awb.temperatureK = estimateCCT(redMean, greenMean, blueMean); + activeState.awb.temperatureK = estimateCCT(rgbMeans); /* * Estimate the red and blue gains to apply in a grey world. The green * gain is hardcoded to 1.0. Avoid divisions by zero by clamping the * divisor to a minimum value of 1.0. */ - double redGain = greenMean / std::max(redMean, 1.0); - double blueGain = greenMean / std::max(blueMean, 1.0); + RGB<double> gains({ + rgbMeans.g() / std::max(rgbMeans.r(), 1.0), + 1.0, + rgbMeans.g() / std::max(rgbMeans.b(), 1.0) + }); /* * Clamp the gain values to the hardware, which expresses gains as Q2.8 @@ -293,33 +328,19 @@ void Awb::process(IPAContext &context, * divisions by zero when computing the raw means in subsequent * iterations. */ - redGain = std::clamp(redGain, 1.0 / 256, 1023.0 / 256); - blueGain = std::clamp(blueGain, 1.0 / 256, 1023.0 / 256); + gains = gains.max(1.0 / 256).min(1023.0 / 256); /* Filter the values to avoid oscillations. */ double speed = 0.2; - redGain = speed * redGain + (1 - speed) * activeState.awb.gains.automatic.red; - blueGain = speed * blueGain + (1 - speed) * activeState.awb.gains.automatic.blue; + gains = gains * speed + activeState.awb.gains.automatic * (1 - speed); - activeState.awb.gains.automatic.red = redGain; - activeState.awb.gains.automatic.blue = blueGain; - activeState.awb.gains.automatic.green = 1.0; - - frameContext.awb.temperatureK = activeState.awb.temperatureK; - - metadata.set(controls::AwbEnable, frameContext.awb.autoEnabled); - metadata.set(controls::ColourGains, { - static_cast<float>(frameContext.awb.gains.red), - static_cast<float>(frameContext.awb.gains.blue) - }); - metadata.set(controls::ColourTemperature, frameContext.awb.temperatureK); + activeState.awb.gains.automatic = gains; - LOG(RkISP1Awb, Debug) << std::showpoint - << "Means [" << redMean << ", " << greenMean << ", " << blueMean - << "], gains [" << activeState.awb.gains.automatic.red << ", " - << activeState.awb.gains.automatic.green << ", " - << activeState.awb.gains.automatic.blue << "], temp " - << frameContext.awb.temperatureK << "K"; + LOG(RkISP1Awb, Debug) + << std::showpoint + << "Means " << rgbMeans << ", gains " + << activeState.awb.gains.automatic << ", temp " + << activeState.awb.temperatureK << "K"; } REGISTER_IPA_ALGORITHM(Awb, "Awb") diff --git a/src/ipa/rkisp1/algorithms/awb.h b/src/ipa/rkisp1/algorithms/awb.h index 06c92896..34ec42cb 100644 --- a/src/ipa/rkisp1/algorithms/awb.h +++ b/src/ipa/rkisp1/algorithms/awb.h @@ -7,6 +7,12 @@ #pragma once +#include <optional> + +#include "libcamera/internal/vector.h" + +#include "libipa/interpolator.h" + #include "algorithm.h" namespace libcamera { @@ -19,21 +25,21 @@ public: Awb(); ~Awb() = default; + int init(IPAContext &context, const YamlObject &tuningData) override; int configure(IPAContext &context, const IPACameraSensorInfo &configInfo) override; void queueRequest(IPAContext &context, const uint32_t frame, IPAFrameContext &frameContext, const ControlList &controls) override; void prepare(IPAContext &context, const uint32_t frame, IPAFrameContext &frameContext, - rkisp1_params_cfg *params) override; + RkISP1Params *params) override; void process(IPAContext &context, const uint32_t frame, IPAFrameContext &frameContext, const rkisp1_stat_buffer *stats, ControlList &metadata) override; private: - uint32_t estimateCCT(double red, double green, double blue); - + std::optional<Interpolator<Vector<double, 2>>> colourGainCurve_; bool rgbMode_; }; diff --git a/src/ipa/rkisp1/algorithms/blc.cpp b/src/ipa/rkisp1/algorithms/blc.cpp index d2e74354..98cb7145 100644 --- a/src/ipa/rkisp1/algorithms/blc.cpp +++ b/src/ipa/rkisp1/algorithms/blc.cpp @@ -7,8 +7,12 @@ #include "blc.h" +#include <linux/videodev2.h> + #include <libcamera/base/log.h> +#include <libcamera/control_ids.h> + #include "libcamera/internal/yaml_parser.h" /** @@ -36,22 +40,62 @@ namespace ipa::rkisp1::algorithms { LOG_DEFINE_CATEGORY(RkISP1Blc) BlackLevelCorrection::BlackLevelCorrection() - : tuningParameters_(false) { + /* + * This is a bit of a hack. In raw mode no black level correction + * happens. This flag is used to ensure the metadata gets populated with + * the black level which is needed to capture proper raw images for + * tuning. + */ + supportsRaw_ = true; } /** * \copydoc libcamera::ipa::Algorithm::init */ -int BlackLevelCorrection::init([[maybe_unused]] IPAContext &context, - const YamlObject &tuningData) +int BlackLevelCorrection::init(IPAContext &context, const YamlObject &tuningData) { - blackLevelRed_ = tuningData["R"].get<int16_t>(256); - blackLevelGreenR_ = tuningData["Gr"].get<int16_t>(256); - blackLevelGreenB_ = tuningData["Gb"].get<int16_t>(256); - blackLevelBlue_ = tuningData["B"].get<int16_t>(256); - - tuningParameters_ = true; + std::optional<int16_t> levelRed = tuningData["R"].get<int16_t>(); + std::optional<int16_t> levelGreenR = tuningData["Gr"].get<int16_t>(); + std::optional<int16_t> levelGreenB = tuningData["Gb"].get<int16_t>(); + std::optional<int16_t> levelBlue = tuningData["B"].get<int16_t>(); + bool tuningHasLevels = levelRed && levelGreenR && levelGreenB && levelBlue; + + auto blackLevel = context.camHelper->blackLevel(); + if (!blackLevel) { + /* + * Not all camera sensor helpers have been updated with black + * levels. Print a warning and fall back to the levels from the + * tuning data to preserve backward compatibility. This should + * be removed once all helpers provide the data. + */ + LOG(RkISP1Blc, Warning) + << "No black levels provided by camera sensor helper" + << ", please fix"; + + blackLevelRed_ = levelRed.value_or(4096); + blackLevelGreenR_ = levelGreenR.value_or(4096); + blackLevelGreenB_ = levelGreenB.value_or(4096); + blackLevelBlue_ = levelBlue.value_or(4096); + } else if (tuningHasLevels) { + /* + * If black levels are provided in the tuning file, use them to + * avoid breaking existing camera tuning. This is deprecated and + * will be removed. + */ + LOG(RkISP1Blc, Warning) + << "Deprecated: black levels overwritten by tuning file"; + + blackLevelRed_ = *levelRed; + blackLevelGreenR_ = *levelGreenR; + blackLevelGreenB_ = *levelGreenB; + blackLevelBlue_ = *levelBlue; + } else { + blackLevelRed_ = *blackLevel; + blackLevelGreenR_ = *blackLevel; + blackLevelGreenB_ = *blackLevel; + blackLevelBlue_ = *blackLevel; + } LOG(RkISP1Blc, Debug) << "Black levels: red " << blackLevelRed_ @@ -62,29 +106,80 @@ int BlackLevelCorrection::init([[maybe_unused]] IPAContext &context, return 0; } +int BlackLevelCorrection::configure(IPAContext &context, + [[maybe_unused]] const IPACameraSensorInfo &configInfo) +{ + /* + * BLC on ISP versions that include the companding block requires usage + * of the extensible parameters format. + */ + supported_ = context.configuration.paramFormat == V4L2_META_FMT_RK_ISP1_EXT_PARAMS || + !context.hw->compand; + + if (!supported_) + LOG(RkISP1Blc, Warning) + << "BLC in companding block requires extensible parameters"; + + return 0; +} + /** * \copydoc libcamera::ipa::Algorithm::prepare */ -void BlackLevelCorrection::prepare([[maybe_unused]] IPAContext &context, +void BlackLevelCorrection::prepare(IPAContext &context, const uint32_t frame, [[maybe_unused]] IPAFrameContext &frameContext, - rkisp1_params_cfg *params) + RkISP1Params *params) { + if (context.configuration.raw) + return; + if (frame > 0) return; - if (!tuningParameters_) + if (!supported_) return; - params->others.bls_config.enable_auto = 0; - params->others.bls_config.fixed_val.r = blackLevelRed_; - params->others.bls_config.fixed_val.gr = blackLevelGreenR_; - params->others.bls_config.fixed_val.gb = blackLevelGreenB_; - params->others.bls_config.fixed_val.b = blackLevelBlue_; + if (context.hw->compand) { + auto config = params->block<BlockType::CompandBls>(); + config.setEnabled(true); + + /* + * Scale up to the 20-bit black levels used by the companding + * block. + */ + config->r = blackLevelRed_ << 4; + config->gr = blackLevelGreenR_ << 4; + config->gb = blackLevelGreenB_ << 4; + config->b = blackLevelBlue_ << 4; + } else { + auto config = params->block<BlockType::Bls>(); + config.setEnabled(true); + + config->enable_auto = 0; + + /* Scale down to the 12-bit black levels used by the BLS block. */ + config->fixed_val.r = blackLevelRed_ >> 4; + config->fixed_val.gr = blackLevelGreenR_ >> 4; + config->fixed_val.gb = blackLevelGreenB_ >> 4; + config->fixed_val.b = blackLevelBlue_ >> 4; + } +} - params->module_en_update |= RKISP1_CIF_ISP_MODULE_BLS; - params->module_ens |= RKISP1_CIF_ISP_MODULE_BLS; - params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_BLS; +/** + * \copydoc libcamera::ipa::Algorithm::process + */ +void BlackLevelCorrection::process([[maybe_unused]] IPAContext &context, + [[maybe_unused]] const uint32_t frame, + [[maybe_unused]] IPAFrameContext &frameContext, + [[maybe_unused]] const rkisp1_stat_buffer *stats, + ControlList &metadata) +{ + metadata.set(controls::SensorBlackLevels, + { static_cast<int32_t>(blackLevelRed_), + static_cast<int32_t>(blackLevelGreenR_), + static_cast<int32_t>(blackLevelGreenB_), + static_cast<int32_t>(blackLevelBlue_) }); } REGISTER_IPA_ALGORITHM(BlackLevelCorrection, "BlackLevelCorrection") diff --git a/src/ipa/rkisp1/algorithms/blc.h b/src/ipa/rkisp1/algorithms/blc.h index 460ebcc1..f797ae44 100644 --- a/src/ipa/rkisp1/algorithms/blc.h +++ b/src/ipa/rkisp1/algorithms/blc.h @@ -20,12 +20,19 @@ public: ~BlackLevelCorrection() = default; int init(IPAContext &context, const YamlObject &tuningData) override; + int configure(IPAContext &context, + const IPACameraSensorInfo &configInfo) override; void prepare(IPAContext &context, const uint32_t frame, IPAFrameContext &frameContext, - rkisp1_params_cfg *params) override; + RkISP1Params *params) override; + void process(IPAContext &context, const uint32_t frame, + IPAFrameContext &frameContext, + const rkisp1_stat_buffer *stats, + ControlList &metadata) override; private: - bool tuningParameters_; + bool supported_; + int16_t blackLevelRed_; int16_t blackLevelGreenR_; int16_t blackLevelGreenB_; diff --git a/src/ipa/rkisp1/algorithms/ccm.cpp b/src/ipa/rkisp1/algorithms/ccm.cpp index c1f5403a..eb8ca39e 100644 --- a/src/ipa/rkisp1/algorithms/ccm.cpp +++ b/src/ipa/rkisp1/algorithms/ccm.cpp @@ -7,11 +7,7 @@ #include "ccm.h" -#include <algorithm> -#include <chrono> -#include <cmath> -#include <tuple> -#include <vector> +#include <map> #include <libcamera/base/log.h> #include <libcamera/base/utils.h> @@ -22,8 +18,8 @@ #include "libcamera/internal/yaml_parser.h" -#include "../utils.h" -#include "libipa/matrix_interpolator.h" +#include "libipa/fixedpoint.h" +#include "libipa/interpolator.h" /** * \file ccm.h @@ -50,7 +46,7 @@ int Ccm::init([[maybe_unused]] IPAContext &context, const YamlObject &tuningData LOG(RkISP1Ccm, Warning) << "Failed to parse 'ccm' " << "parameter from tuning file; falling back to unit matrix"; - ccm_.reset(); + ccm_.setData({ { 0, Matrix<float, 3, 3>::identity() } }); } ret = offsets_.readYaml(tuningData["ccms"], "ct", "offsets"); @@ -58,25 +54,17 @@ int Ccm::init([[maybe_unused]] IPAContext &context, const YamlObject &tuningData LOG(RkISP1Ccm, Warning) << "Failed to parse 'offsets' " << "parameter from tuning file; falling back to zero offsets"; - /* - * MatrixInterpolator::reset() resets to identity matrices - * while here we need zero matrices so we need to construct it - * ourselves. - */ - Matrix<int16_t, 3, 1> m({ 0, 0, 0 }); - std::map<unsigned int, Matrix<int16_t, 3, 1>> matrices = { { 0, m } }; - offsets_ = MatrixInterpolator<int16_t, 3, 1>(matrices); + + offsets_.setData({ { 0, Matrix<int16_t, 3, 1>({ 0, 0, 0 }) } }); } return 0; } -void Ccm::setParameters(rkisp1_params_cfg *params, +void Ccm::setParameters(struct rkisp1_cif_isp_ctk_config &config, const Matrix<float, 3, 3> &matrix, const Matrix<int16_t, 3, 1> &offsets) { - struct rkisp1_cif_isp_ctk_config &config = params->others.ctk_config; - /* * 4 bit integer and 7 bit fractional, ranging from -8 (0x400) to * +7.992 (0x3ff) @@ -84,7 +72,7 @@ void Ccm::setParameters(rkisp1_params_cfg *params, for (unsigned int i = 0; i < 3; i++) { for (unsigned int j = 0; j < 3; j++) config.coeff[i][j] = - utils::floatingToFixedPoint<4, 7, uint16_t, double>(matrix[i][j]); + floatingToFixedPoint<4, 7, uint16_t, double>(matrix[i][j]); } for (unsigned int i = 0; i < 3; i++) @@ -92,18 +80,13 @@ void Ccm::setParameters(rkisp1_params_cfg *params, LOG(RkISP1Ccm, Debug) << "Setting matrix " << matrix; LOG(RkISP1Ccm, Debug) << "Setting offsets " << offsets; - - params->module_en_update |= RKISP1_CIF_ISP_MODULE_CTK; - params->module_ens |= RKISP1_CIF_ISP_MODULE_CTK; - params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_CTK; } /** * \copydoc libcamera::ipa::Algorithm::prepare */ void Ccm::prepare(IPAContext &context, const uint32_t frame, - IPAFrameContext &frameContext, - rkisp1_params_cfg *params) + IPAFrameContext &frameContext, RkISP1Params *params) { uint32_t ct = context.activeState.awb.temperatureK; @@ -111,16 +94,21 @@ void Ccm::prepare(IPAContext &context, const uint32_t frame, * \todo The colour temperature will likely be noisy, add filtering to * avoid updating the CCM matrix all the time. */ - if (frame > 0 && ct == ct_) + if (frame > 0 && ct == ct_) { + frameContext.ccm.ccm = context.activeState.ccm.ccm; return; + } ct_ = ct; - Matrix<float, 3, 3> ccm = ccm_.get(ct); - Matrix<int16_t, 3, 1> offsets = offsets_.get(ct); + Matrix<float, 3, 3> ccm = ccm_.getInterpolated(ct); + Matrix<int16_t, 3, 1> offsets = offsets_.getInterpolated(ct); + context.activeState.ccm.ccm = ccm; frameContext.ccm.ccm = ccm; - setParameters(params, ccm, offsets); + auto config = params->block<BlockType::Ctk>(); + config.setEnabled(true); + setParameters(*config, ccm, offsets); } /** @@ -132,12 +120,7 @@ void Ccm::process([[maybe_unused]] IPAContext &context, [[maybe_unused]] const rkisp1_stat_buffer *stats, ControlList &metadata) { - float m[9]; - for (unsigned int i = 0; i < 3; i++) { - for (unsigned int j = 0; j < 3; j++) - m[i] = frameContext.ccm.ccm[i][j]; - } - metadata.set(controls::ColourCorrectionMatrix, m); + metadata.set(controls::ColourCorrectionMatrix, frameContext.ccm.ccm.data()); } REGISTER_IPA_ALGORITHM(Ccm, "Ccm") diff --git a/src/ipa/rkisp1/algorithms/ccm.h b/src/ipa/rkisp1/algorithms/ccm.h index 30cb8821..a5d9a9a4 100644 --- a/src/ipa/rkisp1/algorithms/ccm.h +++ b/src/ipa/rkisp1/algorithms/ccm.h @@ -9,8 +9,9 @@ #include <linux/rkisp1-config.h> -#include "libipa/matrix.h" -#include "libipa/matrix_interpolator.h" +#include "libcamera/internal/matrix.h" + +#include "libipa/interpolator.h" #include "algorithm.h" @@ -27,7 +28,7 @@ public: int init(IPAContext &context, const YamlObject &tuningData) override; void prepare(IPAContext &context, const uint32_t frame, IPAFrameContext &frameContext, - rkisp1_params_cfg *params) override; + RkISP1Params *params) override; void process(IPAContext &context, const uint32_t frame, IPAFrameContext &frameContext, const rkisp1_stat_buffer *stats, @@ -35,13 +36,13 @@ public: private: void parseYaml(const YamlObject &tuningData); - void setParameters(rkisp1_params_cfg *params, + void setParameters(struct rkisp1_cif_isp_ctk_config &config, const Matrix<float, 3, 3> &matrix, const Matrix<int16_t, 3, 1> &offsets); unsigned int ct_; - MatrixInterpolator<float, 3, 3> ccm_; - MatrixInterpolator<int16_t, 3, 1> offsets_; + Interpolator<Matrix<float, 3, 3>> ccm_; + Interpolator<Matrix<int16_t, 3, 1>> offsets_; }; } /* namespace ipa::rkisp1::algorithms */ diff --git a/src/ipa/rkisp1/algorithms/cproc.cpp b/src/ipa/rkisp1/algorithms/cproc.cpp index ef0931b2..d1fff699 100644 --- a/src/ipa/rkisp1/algorithms/cproc.cpp +++ b/src/ipa/rkisp1/algorithms/cproc.cpp @@ -140,19 +140,17 @@ void ColorProcessing::queueRequest(IPAContext &context, void ColorProcessing::prepare([[maybe_unused]] IPAContext &context, [[maybe_unused]] const uint32_t frame, IPAFrameContext &frameContext, - rkisp1_params_cfg *params) + RkISP1Params *params) { /* Check if the algorithm configuration has been updated. */ if (!frameContext.cproc.update) return; - params->others.cproc_config.brightness = frameContext.cproc.brightness; - params->others.cproc_config.contrast = frameContext.cproc.contrast; - params->others.cproc_config.sat = frameContext.cproc.saturation; - - params->module_en_update |= RKISP1_CIF_ISP_MODULE_CPROC; - params->module_ens |= RKISP1_CIF_ISP_MODULE_CPROC; - params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_CPROC; + auto config = params->block<BlockType::Cproc>(); + config.setEnabled(true); + config->brightness = frameContext.cproc.brightness; + config->contrast = frameContext.cproc.contrast; + config->sat = frameContext.cproc.saturation; } REGISTER_IPA_ALGORITHM(ColorProcessing, "ColorProcessing") diff --git a/src/ipa/rkisp1/algorithms/cproc.h b/src/ipa/rkisp1/algorithms/cproc.h index e50e7200..fd38fd17 100644 --- a/src/ipa/rkisp1/algorithms/cproc.h +++ b/src/ipa/rkisp1/algorithms/cproc.h @@ -29,7 +29,7 @@ public: const ControlList &controls) override; void prepare(IPAContext &context, const uint32_t frame, IPAFrameContext &frameContext, - rkisp1_params_cfg *params) override; + RkISP1Params *params) override; }; } /* namespace ipa::rkisp1::algorithms */ diff --git a/src/ipa/rkisp1/algorithms/dpcc.cpp b/src/ipa/rkisp1/algorithms/dpcc.cpp index b5a339e9..78946281 100644 --- a/src/ipa/rkisp1/algorithms/dpcc.cpp +++ b/src/ipa/rkisp1/algorithms/dpcc.cpp @@ -232,16 +232,14 @@ int DefectPixelClusterCorrection::init([[maybe_unused]] IPAContext &context, void DefectPixelClusterCorrection::prepare([[maybe_unused]] IPAContext &context, const uint32_t frame, [[maybe_unused]] IPAFrameContext &frameContext, - rkisp1_params_cfg *params) + RkISP1Params *params) { if (frame > 0) return; - params->others.dpcc_config = config_; - - params->module_en_update |= RKISP1_CIF_ISP_MODULE_DPCC; - params->module_ens |= RKISP1_CIF_ISP_MODULE_DPCC; - params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_DPCC; + auto config = params->block<BlockType::Dpcc>(); + config.setEnabled(true); + *config = config_; } REGISTER_IPA_ALGORITHM(DefectPixelClusterCorrection, "DefectPixelClusterCorrection") diff --git a/src/ipa/rkisp1/algorithms/dpcc.h b/src/ipa/rkisp1/algorithms/dpcc.h index d39b7bed..b77766c3 100644 --- a/src/ipa/rkisp1/algorithms/dpcc.h +++ b/src/ipa/rkisp1/algorithms/dpcc.h @@ -22,7 +22,7 @@ public: int init(IPAContext &context, const YamlObject &tuningData) override; void prepare(IPAContext &context, const uint32_t frame, IPAFrameContext &frameContext, - rkisp1_params_cfg *params) override; + RkISP1Params *params) override; private: rkisp1_cif_isp_dpcc_config config_; diff --git a/src/ipa/rkisp1/algorithms/dpf.cpp b/src/ipa/rkisp1/algorithms/dpf.cpp index abf95728..cb6095da 100644 --- a/src/ipa/rkisp1/algorithms/dpf.cpp +++ b/src/ipa/rkisp1/algorithms/dpf.cpp @@ -7,7 +7,9 @@ #include "dpf.h" -#include <cmath> +#include <algorithm> +#include <string> +#include <vector> #include <libcamera/base/log.h> @@ -215,15 +217,21 @@ void Dpf::queueRequest(IPAContext &context, * \copydoc libcamera::ipa::Algorithm::prepare */ void Dpf::prepare(IPAContext &context, const uint32_t frame, - IPAFrameContext &frameContext, rkisp1_params_cfg *params) + IPAFrameContext &frameContext, RkISP1Params *params) { - if (frame == 0) { - params->others.dpf_config = config_; - params->others.dpf_strength_config = strengthConfig_; + if (!frameContext.dpf.update && frame > 0) + return; + + auto config = params->block<BlockType::Dpf>(); + config.setEnabled(frameContext.dpf.denoise); + + if (frameContext.dpf.denoise) { + *config = config_; const auto &awb = context.configuration.awb; const auto &lsc = context.configuration.lsc; - auto &mode = params->others.dpf_config.gain.mode; + + auto &mode = config->gain.mode; /* * The DPF needs to take into account the total amount of @@ -241,15 +249,12 @@ void Dpf::prepare(IPAContext &context, const uint32_t frame, mode = RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS; else mode = RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED; - - params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_DPF | - RKISP1_CIF_ISP_MODULE_DPF_STRENGTH; } - if (frameContext.dpf.update) { - params->module_en_update |= RKISP1_CIF_ISP_MODULE_DPF; - if (frameContext.dpf.denoise) - params->module_ens |= RKISP1_CIF_ISP_MODULE_DPF; + if (frame == 0) { + auto strengthConfig = params->block<BlockType::DpfStrength>(); + strengthConfig.setEnabled(true); + *strengthConfig = strengthConfig_; } } diff --git a/src/ipa/rkisp1/algorithms/dpf.h b/src/ipa/rkisp1/algorithms/dpf.h index da0115ba..2dd8cd36 100644 --- a/src/ipa/rkisp1/algorithms/dpf.h +++ b/src/ipa/rkisp1/algorithms/dpf.h @@ -27,7 +27,7 @@ public: const ControlList &controls) override; void prepare(IPAContext &context, const uint32_t frame, IPAFrameContext &frameContext, - rkisp1_params_cfg *params) override; + RkISP1Params *params) override; private: struct rkisp1_cif_isp_dpf_config config_; diff --git a/src/ipa/rkisp1/algorithms/filter.cpp b/src/ipa/rkisp1/algorithms/filter.cpp index 9752248a..7598ef8a 100644 --- a/src/ipa/rkisp1/algorithms/filter.cpp +++ b/src/ipa/rkisp1/algorithms/filter.cpp @@ -104,7 +104,7 @@ void Filter::queueRequest(IPAContext &context, */ void Filter::prepare([[maybe_unused]] IPAContext &context, [[maybe_unused]] const uint32_t frame, - IPAFrameContext &frameContext, rkisp1_params_cfg *params) + IPAFrameContext &frameContext, RkISP1Params *params) { /* Check if the algorithm configuration has been updated. */ if (!frameContext.filter.update) @@ -160,23 +160,25 @@ void Filter::prepare([[maybe_unused]] IPAContext &context, uint8_t denoise = frameContext.filter.denoise; uint8_t sharpness = frameContext.filter.sharpness; - auto &flt_config = params->others.flt_config; - - flt_config.fac_sh0 = filt_fac_sh0[sharpness]; - flt_config.fac_sh1 = filt_fac_sh1[sharpness]; - flt_config.fac_mid = filt_fac_mid[sharpness]; - flt_config.fac_bl0 = filt_fac_bl0[sharpness]; - flt_config.fac_bl1 = filt_fac_bl1[sharpness]; - - flt_config.lum_weight = kFiltLumWeightDefault; - flt_config.mode = kFiltModeDefault; - flt_config.thresh_sh0 = filt_thresh_sh0[denoise]; - flt_config.thresh_sh1 = filt_thresh_sh1[denoise]; - flt_config.thresh_bl0 = filt_thresh_bl0[denoise]; - flt_config.thresh_bl1 = filt_thresh_bl1[denoise]; - flt_config.grn_stage1 = stage1_select[denoise]; - flt_config.chr_v_mode = filt_chr_v_mode[denoise]; - flt_config.chr_h_mode = filt_chr_h_mode[denoise]; + + auto config = params->block<BlockType::Flt>(); + config.setEnabled(true); + + config->fac_sh0 = filt_fac_sh0[sharpness]; + config->fac_sh1 = filt_fac_sh1[sharpness]; + config->fac_mid = filt_fac_mid[sharpness]; + config->fac_bl0 = filt_fac_bl0[sharpness]; + config->fac_bl1 = filt_fac_bl1[sharpness]; + + config->lum_weight = kFiltLumWeightDefault; + config->mode = kFiltModeDefault; + config->thresh_sh0 = filt_thresh_sh0[denoise]; + config->thresh_sh1 = filt_thresh_sh1[denoise]; + config->thresh_bl0 = filt_thresh_bl0[denoise]; + config->thresh_bl1 = filt_thresh_bl1[denoise]; + config->grn_stage1 = stage1_select[denoise]; + config->chr_v_mode = filt_chr_v_mode[denoise]; + config->chr_h_mode = filt_chr_h_mode[denoise]; /* * Combined high denoising and high sharpening requires some @@ -186,27 +188,23 @@ void Filter::prepare([[maybe_unused]] IPAContext &context, */ if (denoise == 9) { if (sharpness > 3) - flt_config.grn_stage1 = 2; + config->grn_stage1 = 2; } else if (denoise == 10) { if (sharpness > 5) - flt_config.grn_stage1 = 2; + config->grn_stage1 = 2; else if (sharpness > 3) - flt_config.grn_stage1 = 1; + config->grn_stage1 = 1; } if (denoise > 7) { if (sharpness > 7) { - flt_config.fac_bl0 /= 2; - flt_config.fac_bl1 /= 4; + config->fac_bl0 /= 2; + config->fac_bl1 /= 4; } else if (sharpness > 4) { - flt_config.fac_bl0 = flt_config.fac_bl0 * 3 / 4; - flt_config.fac_bl1 /= 2; + config->fac_bl0 = config->fac_bl0 * 3 / 4; + config->fac_bl1 /= 2; } } - - params->module_en_update |= RKISP1_CIF_ISP_MODULE_FLT; - params->module_ens |= RKISP1_CIF_ISP_MODULE_FLT; - params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_FLT; } REGISTER_IPA_ALGORITHM(Filter, "Filter") diff --git a/src/ipa/rkisp1/algorithms/filter.h b/src/ipa/rkisp1/algorithms/filter.h index d595811d..8f858e57 100644 --- a/src/ipa/rkisp1/algorithms/filter.h +++ b/src/ipa/rkisp1/algorithms/filter.h @@ -26,7 +26,7 @@ public: const ControlList &controls) override; void prepare(IPAContext &context, const uint32_t frame, IPAFrameContext &frameContext, - rkisp1_params_cfg *params) override; + RkISP1Params *params) override; }; } /* namespace ipa::rkisp1::algorithms */ diff --git a/src/ipa/rkisp1/algorithms/goc.cpp b/src/ipa/rkisp1/algorithms/goc.cpp index a82cee3b..a9493678 100644 --- a/src/ipa/rkisp1/algorithms/goc.cpp +++ b/src/ipa/rkisp1/algorithms/goc.cpp @@ -99,11 +99,14 @@ void GammaOutCorrection::queueRequest(IPAContext &context, const uint32_t frame, void GammaOutCorrection::prepare(IPAContext &context, [[maybe_unused]] const uint32_t frame, IPAFrameContext &frameContext, - rkisp1_params_cfg *params) + RkISP1Params *params) { ASSERT(context.hw->numGammaOutSamples == RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10); + if (!frameContext.goc.update) + return; + /* * The logarithmic segments as specified in the reference. * Plus an additional 0 to make the loop easier @@ -112,10 +115,11 @@ void GammaOutCorrection::prepare(IPAContext &context, 64, 64, 64, 64, 128, 128, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 0 }; - __u16 *gamma_y = params->others.goc_config.gamma_y; - if (!frameContext.goc.update) - return; + auto config = params->block<BlockType::Goc>(); + config.setEnabled(true); + + __u16 *gamma_y = config->gamma_y; unsigned x = 0; for (const auto [i, size] : utils::enumerate(segments)) { @@ -123,10 +127,7 @@ void GammaOutCorrection::prepare(IPAContext &context, x += size; } - params->others.goc_config.mode = RKISP1_CIF_ISP_GOC_MODE_LOGARITHMIC; - params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_GOC; - params->module_en_update |= RKISP1_CIF_ISP_MODULE_GOC; - params->module_ens |= RKISP1_CIF_ISP_MODULE_GOC; + config->mode = RKISP1_CIF_ISP_GOC_MODE_LOGARITHMIC; } /** diff --git a/src/ipa/rkisp1/algorithms/goc.h b/src/ipa/rkisp1/algorithms/goc.h index 0e05d7ce..bb2ddfc9 100644 --- a/src/ipa/rkisp1/algorithms/goc.h +++ b/src/ipa/rkisp1/algorithms/goc.h @@ -28,7 +28,7 @@ public: const ControlList &controls) override; void prepare(IPAContext &context, const uint32_t frame, IPAFrameContext &frameContext, - rkisp1_params_cfg *params) override; + RkISP1Params *params) override; void process(IPAContext &context, const uint32_t frame, IPAFrameContext &frameContext, const rkisp1_stat_buffer *stats, diff --git a/src/ipa/rkisp1/algorithms/gsl.cpp b/src/ipa/rkisp1/algorithms/gsl.cpp index 9b056c6e..9604c0ac 100644 --- a/src/ipa/rkisp1/algorithms/gsl.cpp +++ b/src/ipa/rkisp1/algorithms/gsl.cpp @@ -119,24 +119,20 @@ int GammaSensorLinearization::init([[maybe_unused]] IPAContext &context, void GammaSensorLinearization::prepare([[maybe_unused]] IPAContext &context, const uint32_t frame, [[maybe_unused]] IPAFrameContext &frameContext, - rkisp1_params_cfg *params) + RkISP1Params *params) { if (frame > 0) return; - params->others.sdg_config.xa_pnts.gamma_dx0 = gammaDx_[0]; - params->others.sdg_config.xa_pnts.gamma_dx1 = gammaDx_[1]; + auto config = params->block<BlockType::Sdg>(); + config.setEnabled(true); - std::copy(curveYr_.begin(), curveYr_.end(), - params->others.sdg_config.curve_r.gamma_y); - std::copy(curveYg_.begin(), curveYg_.end(), - params->others.sdg_config.curve_g.gamma_y); - std::copy(curveYb_.begin(), curveYb_.end(), - params->others.sdg_config.curve_b.gamma_y); + config->xa_pnts.gamma_dx0 = gammaDx_[0]; + config->xa_pnts.gamma_dx1 = gammaDx_[1]; - params->module_en_update |= RKISP1_CIF_ISP_MODULE_SDG; - params->module_ens |= RKISP1_CIF_ISP_MODULE_SDG; - params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_SDG; + std::copy(curveYr_.begin(), curveYr_.end(), config->curve_r.gamma_y); + std::copy(curveYg_.begin(), curveYg_.end(), config->curve_g.gamma_y); + std::copy(curveYb_.begin(), curveYb_.end(), config->curve_b.gamma_y); } REGISTER_IPA_ALGORITHM(GammaSensorLinearization, "GammaSensorLinearization") diff --git a/src/ipa/rkisp1/algorithms/gsl.h b/src/ipa/rkisp1/algorithms/gsl.h index c404105e..91cf6efa 100644 --- a/src/ipa/rkisp1/algorithms/gsl.h +++ b/src/ipa/rkisp1/algorithms/gsl.h @@ -22,7 +22,7 @@ public: int init(IPAContext &context, const YamlObject &tuningData) override; void prepare(IPAContext &context, const uint32_t frame, IPAFrameContext &frameContext, - rkisp1_params_cfg *params) override; + RkISP1Params *params) override; private: uint32_t gammaDx_[2]; diff --git a/src/ipa/rkisp1/algorithms/lsc.cpp b/src/ipa/rkisp1/algorithms/lsc.cpp index 161183fc..e47aa2f0 100644 --- a/src/ipa/rkisp1/algorithms/lsc.cpp +++ b/src/ipa/rkisp1/algorithms/lsc.cpp @@ -16,6 +16,7 @@ #include "libcamera/internal/yaml_parser.h" +#include "libipa/lsc_polynomial.h" #include "linux/rkisp1-config.h" /** @@ -24,6 +25,36 @@ namespace libcamera { +namespace ipa { + +constexpr int kColourTemperatureChangeThreshhold = 10; + +template<typename T> +void interpolateVector(const std::vector<T> &a, const std::vector<T> &b, + std::vector<T> &dest, double lambda) +{ + assert(a.size() == b.size()); + dest.resize(a.size()); + for (size_t i = 0; i < a.size(); i++) { + dest[i] = a[i] * (1.0 - lambda) + b[i] * lambda; + } +} + +template<> +void Interpolator<rkisp1::algorithms::LensShadingCorrection::Components>:: + interpolate(const rkisp1::algorithms::LensShadingCorrection::Components &a, + const rkisp1::algorithms::LensShadingCorrection::Components &b, + rkisp1::algorithms::LensShadingCorrection::Components &dest, + double lambda) +{ + interpolateVector(a.r, b.r, dest.r, lambda); + interpolateVector(a.gr, b.gr, dest.gr, lambda); + interpolateVector(a.gb, b.gb, dest.gb, lambda); + interpolateVector(a.b, b.b, dest.b, lambda); +} + +} /* namespace ipa */ + namespace ipa::rkisp1::algorithms { /** @@ -40,6 +71,200 @@ namespace ipa::rkisp1::algorithms { LOG_DEFINE_CATEGORY(RkISP1Lsc) +class LscPolynomialLoader +{ +public: + LscPolynomialLoader(const Size &sensorSize, + const Rectangle &cropRectangle, + const std::vector<double> &xSizes, + const std::vector<double> &ySizes) + : sensorSize_(sensorSize), + cropRectangle_(cropRectangle), + xSizes_(xSizes), + ySizes_(ySizes) + { + } + + int parseLscData(const YamlObject &yamlSets, + std::map<unsigned int, LensShadingCorrection::Components> &lscData) + { + const auto &sets = yamlSets.asList(); + for (const auto &yamlSet : sets) { + std::optional<LscPolynomial> pr, pgr, pgb, pb; + uint32_t ct = yamlSet["ct"].get<uint32_t>(0); + + if (lscData.count(ct)) { + LOG(RkISP1Lsc, Error) + << "Multiple sets found for " + << "color temperature " << ct; + return -EINVAL; + } + + LensShadingCorrection::Components &set = lscData[ct]; + pr = yamlSet["r"].get<LscPolynomial>(); + pgr = yamlSet["gr"].get<LscPolynomial>(); + pgb = yamlSet["gb"].get<LscPolynomial>(); + pb = yamlSet["b"].get<LscPolynomial>(); + + if (!(pr || pgr || pgb || pb)) { + LOG(RkISP1Lsc, Error) + << "Failed to parse polynomial for " + << "colour temperature " << ct; + return -EINVAL; + } + + set.ct = ct; + pr->setReferenceImageSize(sensorSize_); + pgr->setReferenceImageSize(sensorSize_); + pgb->setReferenceImageSize(sensorSize_); + pb->setReferenceImageSize(sensorSize_); + set.r = samplePolynomial(*pr); + set.gr = samplePolynomial(*pgr); + set.gb = samplePolynomial(*pgb); + set.b = samplePolynomial(*pb); + } + + if (lscData.empty()) { + LOG(RkISP1Lsc, Error) << "Failed to load any sets"; + return -EINVAL; + } + + return 0; + } + +private: + /* + * The lsc grid has custom spacing defined on half the range (see + * parseSizes() for details). For easier handling this function converts + * the spaces vector to positions and mirrors them. E.g.: + * + * input: | 0.2 | 0.3 | + * output: 0.0 0.2 0.5 0.8 1.0 + */ + std::vector<double> sizesListToPositions(const std::vector<double> &sizes) + { + const int half = sizes.size(); + std::vector<double> res(half * 2 + 1); + double x = 0.0; + + res[half] = 0.5; + for (int i = 1; i <= half; i++) { + x += sizes[half - i]; + res[half - i] = 0.5 - x; + res[half + i] = 0.5 + x; + } + + return res; + } + + std::vector<uint16_t> samplePolynomial(const LscPolynomial &poly) + { + constexpr int k = RKISP1_CIF_ISP_LSC_SAMPLES_MAX; + + double m = poly.getM(); + double x0 = cropRectangle_.x / m; + double y0 = cropRectangle_.y / m; + double w = cropRectangle_.width / m; + double h = cropRectangle_.height / m; + std::vector<uint16_t> res; + + assert(xSizes_.size() * 2 + 1 == k); + assert(ySizes_.size() * 2 + 1 == k); + + res.reserve(k * k); + + std::vector<double> xPos(sizesListToPositions(xSizes_)); + std::vector<double> yPos(sizesListToPositions(ySizes_)); + + for (int y = 0; y < k; y++) { + for (int x = 0; x < k; x++) { + double xp = x0 + xPos[x] * w; + double yp = y0 + yPos[y] * h; + /* + * The hardware uses 2.10 fixed point format and + * limits the legal values to [1..3.999]. Scale + * and clamp the sampled value accordingly. + */ + int v = static_cast<int>( + poly.sampleAtNormalizedPixelPos(xp, yp) * + 1024); + v = std::min(std::max(v, 1024), 4095); + res.push_back(v); + } + } + return res; + } + + Size sensorSize_; + Rectangle cropRectangle_; + const std::vector<double> &xSizes_; + const std::vector<double> &ySizes_; +}; + +class LscTableLoader +{ +public: + int parseLscData(const YamlObject &yamlSets, + std::map<unsigned int, LensShadingCorrection::Components> &lscData) + { + const auto &sets = yamlSets.asList(); + + for (const auto &yamlSet : sets) { + uint32_t ct = yamlSet["ct"].get<uint32_t>(0); + + if (lscData.count(ct)) { + LOG(RkISP1Lsc, Error) + << "Multiple sets found for color temperature " + << ct; + return -EINVAL; + } + + LensShadingCorrection::Components &set = lscData[ct]; + + set.ct = ct; + set.r = parseTable(yamlSet, "r"); + set.gr = parseTable(yamlSet, "gr"); + set.gb = parseTable(yamlSet, "gb"); + set.b = parseTable(yamlSet, "b"); + + if (set.r.empty() || set.gr.empty() || + set.gb.empty() || set.b.empty()) { + LOG(RkISP1Lsc, Error) + << "Set for color temperature " << ct + << " is missing tables"; + return -EINVAL; + } + } + + if (lscData.empty()) { + LOG(RkISP1Lsc, Error) << "Failed to load any sets"; + return -EINVAL; + } + + return 0; + } + +private: + std::vector<uint16_t> parseTable(const YamlObject &tuningData, + const char *prop) + { + static constexpr unsigned int kLscNumSamples = + RKISP1_CIF_ISP_LSC_SAMPLES_MAX * RKISP1_CIF_ISP_LSC_SAMPLES_MAX; + + std::vector<uint16_t> table = + tuningData[prop].getList<uint16_t>().value_or(std::vector<uint16_t>{}); + if (table.size() != kLscNumSamples) { + LOG(RkISP1Lsc, Error) + << "Invalid '" << prop << "' values: expected " + << kLscNumSamples + << " elements, got " << table.size(); + return {}; + } + + return table; + } +}; + static std::vector<double> parseSizes(const YamlObject &tuningData, const char *prop) { @@ -70,28 +295,10 @@ static std::vector<double> parseSizes(const YamlObject &tuningData, return sizes; } -static std::vector<uint16_t> parseTable(const YamlObject &tuningData, - const char *prop) -{ - static constexpr unsigned int kLscNumSamples = - RKISP1_CIF_ISP_LSC_SAMPLES_MAX * RKISP1_CIF_ISP_LSC_SAMPLES_MAX; - - std::vector<uint16_t> table = - tuningData[prop].getList<uint16_t>().value_or(std::vector<uint16_t>{}); - if (table.size() != kLscNumSamples) { - LOG(RkISP1Lsc, Error) - << "Invalid '" << prop << "' values: expected " - << kLscNumSamples - << " elements, got " << table.size(); - return {}; - } - - return table; -} - LensShadingCorrection::LensShadingCorrection() - : lastCt_({ 0, 0 }) + : lastAppliedCt_(0), lastAppliedQuantizedCt_(0) { + sets_.setQuantization(kColourTemperatureChangeThreshhold); } /** @@ -114,38 +321,30 @@ int LensShadingCorrection::init([[maybe_unused]] IPAContext &context, return -EINVAL; } - const auto &sets = yamlSets.asList(); - for (const auto &yamlSet : sets) { - uint32_t ct = yamlSet["ct"].get<uint32_t>(0); - - if (sets_.count(ct)) { - LOG(RkISP1Lsc, Error) - << "Multiple sets found for color temperature " - << ct; - return -EINVAL; - } - - Components &set = sets_[ct]; - - set.ct = ct; - set.r = parseTable(yamlSet, "r"); - set.gr = parseTable(yamlSet, "gr"); - set.gb = parseTable(yamlSet, "gb"); - set.b = parseTable(yamlSet, "b"); - - if (set.r.empty() || set.gr.empty() || - set.gb.empty() || set.b.empty()) { - LOG(RkISP1Lsc, Error) - << "Set for color temperature " << ct - << " is missing tables"; - return -EINVAL; - } + std::map<unsigned int, Components> lscData; + int res = 0; + std::string type = tuningData["type"].get<std::string>("table"); + if (type == "table") { + LOG(RkISP1Lsc, Debug) << "Loading tabular LSC data."; + auto loader = LscTableLoader(); + res = loader.parseLscData(yamlSets, lscData); + } else if (type == "polynomial") { + LOG(RkISP1Lsc, Debug) << "Loading polynomial LSC data."; + auto loader = LscPolynomialLoader(context.sensorInfo.activeAreaSize, + context.sensorInfo.analogCrop, + xSize_, + ySize_); + res = loader.parseLscData(yamlSets, lscData); + } else { + LOG(RkISP1Lsc, Error) << "Unsupported LSC data type '" + << type << "'"; + res = -EINVAL; } - if (sets_.empty()) { - LOG(RkISP1Lsc, Error) << "Failed to load any sets"; - return -EINVAL; - } + if (res) + return res; + + sets_.setData(std::move(lscData)); return 0; } @@ -185,18 +384,12 @@ int LensShadingCorrection::configure(IPAContext &context, return 0; } -void LensShadingCorrection::setParameters(rkisp1_params_cfg *params) +void LensShadingCorrection::setParameters(rkisp1_cif_isp_lsc_config &config) { - struct rkisp1_cif_isp_lsc_config &config = params->others.lsc_config; - memcpy(config.x_grad_tbl, xGrad_, sizeof(config.x_grad_tbl)); memcpy(config.y_grad_tbl, yGrad_, sizeof(config.y_grad_tbl)); memcpy(config.x_size_tbl, xSizes_, sizeof(config.x_size_tbl)); memcpy(config.y_size_tbl, ySizes_, sizeof(config.y_size_tbl)); - - params->module_en_update |= RKISP1_CIF_ISP_MODULE_LSC; - params->module_ens |= RKISP1_CIF_ISP_MODULE_LSC; - params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_LSC; } void LensShadingCorrection::copyTable(rkisp1_cif_isp_lsc_config &config, @@ -208,131 +401,34 @@ void LensShadingCorrection::copyTable(rkisp1_cif_isp_lsc_config &config, std::copy(set.b.begin(), set.b.end(), &config.b_data_tbl[0][0]); } -/* - * Interpolate LSC parameters based on color temperature value. - */ -void LensShadingCorrection::interpolateTable(rkisp1_cif_isp_lsc_config &config, - const Components &set0, - const Components &set1, - const uint32_t ct) -{ - double coeff0 = (set1.ct - ct) / static_cast<double>(set1.ct - set0.ct); - double coeff1 = (ct - set0.ct) / static_cast<double>(set1.ct - set0.ct); - - for (unsigned int i = 0; i < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; ++i) { - for (unsigned int j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; ++j) { - unsigned int sample = i * RKISP1_CIF_ISP_LSC_SAMPLES_MAX + j; - - config.r_data_tbl[i][j] = - set0.r[sample] * coeff0 + - set1.r[sample] * coeff1; - - config.gr_data_tbl[i][j] = - set0.gr[sample] * coeff0 + - set1.gr[sample] * coeff1; - - config.gb_data_tbl[i][j] = - set0.gb[sample] * coeff0 + - set1.gb[sample] * coeff1; - - config.b_data_tbl[i][j] = - set0.b[sample] * coeff0 + - set1.b[sample] * coeff1; - } - } -} - /** * \copydoc libcamera::ipa::Algorithm::prepare */ void LensShadingCorrection::prepare(IPAContext &context, - const uint32_t frame, + [[maybe_unused]] const uint32_t frame, [[maybe_unused]] IPAFrameContext &frameContext, - rkisp1_params_cfg *params) + RkISP1Params *params) { - struct rkisp1_cif_isp_lsc_config &config = params->others.lsc_config; - - /* - * If there is only one set, the configuration has already been done - * for first frame. - */ - if (sets_.size() == 1 && frame > 0) - return; - - /* - * If there is only one set, pick it. We can ignore lastCt_, as it will - * never be relevant. - */ - if (sets_.size() == 1) { - setParameters(params); - copyTable(config, sets_.cbegin()->second); - return; - } - uint32_t ct = context.activeState.awb.temperatureK; - ct = std::clamp(ct, sets_.cbegin()->first, sets_.crbegin()->first); - - /* - * If the original is the same, then it means the same adjustment would - * be made. If the adjusted is the same, then it means that it's the - * same as what was actually applied. Thus in these cases we can skip - * reprogramming the LSC. - * - * original == adjusted can only happen if an interpolation - * happened, or if original has an exact entry in sets_. This means - * that if original != adjusted, then original was adjusted to - * the nearest available entry in sets_, resulting in adjusted. - * Clearly, any ct value that is in between original and adjusted - * will be adjusted to the same adjusted value, so we can skip - * reprogramming the LSC table. - * - * We also skip updating the original value, as the last one had a - * larger bound and thus a larger range of ct values that will be - * adjusted to the same adjusted. - */ - if ((lastCt_.original <= ct && ct <= lastCt_.adjusted) || - (lastCt_.adjusted <= ct && ct <= lastCt_.original)) + if (std::abs(static_cast<int>(ct) - static_cast<int>(lastAppliedCt_)) < + kColourTemperatureChangeThreshhold) return; - - setParameters(params); - - /* - * The color temperature matches exactly one of the available LSC tables. - */ - if (sets_.count(ct)) { - copyTable(config, sets_[ct]); - lastCt_ = { ct, ct }; + unsigned int quantizedCt; + const Components &set = sets_.getInterpolated(ct, &quantizedCt); + if (lastAppliedQuantizedCt_ == quantizedCt) return; - } - /* No shortcuts left; we need to round or interpolate */ - auto iter = sets_.upper_bound(ct); - const Components &set1 = iter->second; - const Components &set0 = (--iter)->second; - uint32_t ct0 = set0.ct; - uint32_t ct1 = set1.ct; - uint32_t diff0 = ct - ct0; - uint32_t diff1 = ct1 - ct; - static constexpr double kThreshold = 0.1; - float threshold = kThreshold * (ct1 - ct0); - - if (diff0 < threshold || diff1 < threshold) { - const Components &set = diff0 < diff1 ? set0 : set1; - LOG(RkISP1Lsc, Debug) << "using LSC table for " << set.ct; - copyTable(config, set); - lastCt_ = { ct, set.ct }; - return; - } + auto config = params->block<BlockType::Lsc>(); + config.setEnabled(true); + setParameters(*config); + copyTable(*config, set); + + lastAppliedCt_ = ct; + lastAppliedQuantizedCt_ = quantizedCt; - /* - * ct is not within 10% of the difference between the neighbouring - * color temperatures, so we need to interpolate. - */ LOG(RkISP1Lsc, Debug) - << "ct is " << ct << ", interpolating between " - << ct0 << " and " << ct1; - interpolateTable(config, set0, set1, ct); - lastCt_ = { ct, ct }; + << "ct is " << ct << ", quantized to " + << quantizedCt; } REGISTER_IPA_ALGORITHM(LensShadingCorrection, "LensShadingCorrection") diff --git a/src/ipa/rkisp1/algorithms/lsc.h b/src/ipa/rkisp1/algorithms/lsc.h index 5baf5927..5a0824e3 100644 --- a/src/ipa/rkisp1/algorithms/lsc.h +++ b/src/ipa/rkisp1/algorithms/lsc.h @@ -9,6 +9,8 @@ #include <map> +#include "libipa/interpolator.h" + #include "algorithm.h" namespace libcamera { @@ -25,9 +27,8 @@ public: int configure(IPAContext &context, const IPACameraSensorInfo &configInfo) override; void prepare(IPAContext &context, const uint32_t frame, IPAFrameContext &frameContext, - rkisp1_params_cfg *params) override; + RkISP1Params *params) override; -private: struct Components { uint32_t ct; std::vector<uint16_t> r; @@ -36,23 +37,23 @@ private: std::vector<uint16_t> b; }; - void setParameters(rkisp1_params_cfg *params); +private: + void setParameters(rkisp1_cif_isp_lsc_config &config); void copyTable(rkisp1_cif_isp_lsc_config &config, const Components &set0); void interpolateTable(rkisp1_cif_isp_lsc_config &config, const Components &set0, const Components &set1, const uint32_t ct); - std::map<uint32_t, Components> sets_; + ipa::Interpolator<Components> sets_; std::vector<double> xSize_; std::vector<double> ySize_; uint16_t xGrad_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE]; uint16_t yGrad_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE]; uint16_t xSizes_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE]; uint16_t ySizes_[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE]; - struct { - uint32_t original; - uint32_t adjusted; - } lastCt_; + + unsigned int lastAppliedCt_; + unsigned int lastAppliedQuantizedCt_; }; } /* namespace ipa::rkisp1::algorithms */ diff --git a/src/ipa/rkisp1/algorithms/lux.cpp b/src/ipa/rkisp1/algorithms/lux.cpp new file mode 100644 index 00000000..b0f74963 --- /dev/null +++ b/src/ipa/rkisp1/algorithms/lux.cpp @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas On Board + * + * lux.cpp - RkISP1 Lux control + */ + +#include "lux.h" + +#include <libcamera/base/log.h> + +#include <libcamera/control_ids.h> + +#include "libipa/histogram.h" +#include "libipa/lux.h" + +/** + * \file lux.h + */ + +namespace libcamera { + +namespace ipa::rkisp1::algorithms { + +/** + * \class Lux + * \brief RkISP1 Lux control + * + * The Lux algorithm is responsible for estimating the lux level of the image. + * It doesn't take or generate any controls, but it provides a lux level for + * other algorithms (such as AGC) to use. + */ + +/** + * \brief Construct an rkisp1 Lux algo module + * + * The Lux helper is initialized to 65535 as that is the max bin count on the + * rkisp1. + */ +Lux::Lux() + : lux_(65535) +{ +} + +/** + * \copydoc libcamera::ipa::Algorithm::init + */ +int Lux::init([[maybe_unused]] IPAContext &context, const YamlObject &tuningData) +{ + return lux_.parseTuningData(tuningData); +} + +/** + * \copydoc libcamera::ipa::Algorithm::process + */ +void Lux::process(IPAContext &context, + [[maybe_unused]] const uint32_t frame, + IPAFrameContext &frameContext, + const rkisp1_stat_buffer *stats, + ControlList &metadata) +{ + utils::Duration exposureTime = context.configuration.sensor.lineDuration + * frameContext.sensor.exposure; + double gain = frameContext.sensor.gain; + + /* \todo Deduplicate the histogram calculation from AGC */ + const rkisp1_cif_isp_stat *params = &stats->params; + Histogram yHist({ params->hist.hist_bins, context.hw->numHistogramBins }, + [](uint32_t x) { return x >> 4; }); + + double lux = lux_.estimateLux(exposureTime, gain, 1.0, yHist); + frameContext.lux.lux = lux; + metadata.set(controls::Lux, lux); +} + +REGISTER_IPA_ALGORITHM(Lux, "Lux") + +} /* namespace ipa::rkisp1::algorithms */ + +} /* namespace libcamera */ diff --git a/src/ipa/rkisp1/algorithms/lux.h b/src/ipa/rkisp1/algorithms/lux.h new file mode 100644 index 00000000..8a90de55 --- /dev/null +++ b/src/ipa/rkisp1/algorithms/lux.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas On Board + * + * lux.h - RkISP1 Lux control + */ + +#pragma once + +#include <sys/types.h> + +#include "libipa/lux.h" + +#include "algorithm.h" + +namespace libcamera { + +namespace ipa::rkisp1::algorithms { + +class Lux : public Algorithm +{ +public: + Lux(); + + int init(IPAContext &context, const YamlObject &tuningData) override; + void process(IPAContext &context, const uint32_t frame, + IPAFrameContext &frameContext, + const rkisp1_stat_buffer *stats, + ControlList &metadata) override; + +private: + ipa::Lux lux_; +}; + +} /* namespace ipa::rkisp1::algorithms */ +} /* namespace libcamera */ diff --git a/src/ipa/rkisp1/algorithms/meson.build b/src/ipa/rkisp1/algorithms/meson.build index 1734a667..c66b0b70 100644 --- a/src/ipa/rkisp1/algorithms/meson.build +++ b/src/ipa/rkisp1/algorithms/meson.build @@ -12,4 +12,5 @@ rkisp1_ipa_algorithms = files([ 'goc.cpp', 'gsl.cpp', 'lsc.cpp', + 'lux.cpp', ]) diff --git a/src/ipa/rkisp1/data/imx219.yaml b/src/ipa/rkisp1/data/imx219.yaml index cbcc43b8..0d99cb52 100644 --- a/src/ipa/rkisp1/data/imx219.yaml +++ b/src/ipa/rkisp1/data/imx219.yaml @@ -6,10 +6,6 @@ algorithms: - Agc: - Awb: - BlackLevelCorrection: - R: 256 - Gr: 256 - Gb: 256 - B: 256 - LensShadingCorrection: x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ] y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ] diff --git a/src/ipa/rkisp1/data/imx258.yaml b/src/ipa/rkisp1/data/imx258.yaml index 43dddf20..202af36a 100644 --- a/src/ipa/rkisp1/data/imx258.yaml +++ b/src/ipa/rkisp1/data/imx258.yaml @@ -5,6 +5,7 @@ version: 1 algorithms: - Agc: - Awb: + - BlackLevelCorrection: - LensShadingCorrection: x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ] y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ] diff --git a/src/ipa/rkisp1/data/meson.build b/src/ipa/rkisp1/data/meson.build index 7150e155..1e3522b2 100644 --- a/src/ipa/rkisp1/data/meson.build +++ b/src/ipa/rkisp1/data/meson.build @@ -2,8 +2,12 @@ conf_files = files([ 'imx219.yaml', + 'imx258.yaml', + 'ov2685.yaml', 'ov4689.yaml', 'ov5640.yaml', + 'ov5695.yaml', + 'ov8858.yaml', 'uncalibrated.yaml', ]) diff --git a/src/ipa/rkisp1/data/ov4689.yaml b/src/ipa/rkisp1/data/ov4689.yaml index 2068684c..60901296 100644 --- a/src/ipa/rkisp1/data/ov4689.yaml +++ b/src/ipa/rkisp1/data/ov4689.yaml @@ -6,8 +6,4 @@ algorithms: - Agc: - Awb: - BlackLevelCorrection: - R: 66 - Gr: 66 - Gb: 66 - B: 66 ... diff --git a/src/ipa/rkisp1/data/ov5640.yaml b/src/ipa/rkisp1/data/ov5640.yaml index 897b83cb..4b21d412 100644 --- a/src/ipa/rkisp1/data/ov5640.yaml +++ b/src/ipa/rkisp1/data/ov5640.yaml @@ -6,10 +6,6 @@ algorithms: - Agc: - Awb: - BlackLevelCorrection: - R: 256 - Gr: 256 - Gb: 256 - B: 256 - ColorProcessing: - GammaSensorLinearization: x-intervals: [ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 ] diff --git a/src/ipa/rkisp1/data/uncalibrated.yaml b/src/ipa/rkisp1/data/uncalibrated.yaml index a7bbd8d8..60901296 100644 --- a/src/ipa/rkisp1/data/uncalibrated.yaml +++ b/src/ipa/rkisp1/data/uncalibrated.yaml @@ -5,4 +5,5 @@ version: 1 algorithms: - Agc: - Awb: + - BlackLevelCorrection: ... diff --git a/src/ipa/rkisp1/ipa_context.cpp b/src/ipa/rkisp1/ipa_context.cpp index 9f3f576a..261c0472 100644 --- a/src/ipa/rkisp1/ipa_context.cpp +++ b/src/ipa/rkisp1/ipa_context.cpp @@ -78,11 +78,11 @@ namespace libcamera::ipa::rkisp1 { * \var IPASessionConfiguration::sensor * \brief Sensor-specific configuration of the IPA * - * \var IPASessionConfiguration::sensor.minShutterSpeed - * \brief Minimum shutter speed supported with the sensor + * \var IPASessionConfiguration::sensor.minExposureTime + * \brief Minimum exposure time supported with the sensor * - * \var IPASessionConfiguration::sensor.maxShutterSpeed - * \brief Maximum shutter speed supported with the sensor + * \var IPASessionConfiguration::sensor.maxExposureTime + * \brief Maximum exposure time supported with the sensor * * \var IPASessionConfiguration::sensor.minAnalogueGain * \brief Minimum analogue gain supported with the sensor @@ -106,6 +106,11 @@ namespace libcamera::ipa::rkisp1 { */ /** + * \var IPASessionConfiguration::paramFormat + * \brief The fourcc of the parameters buffers format + */ + +/** * \struct IPAActiveState * \brief Active state for algorithms * @@ -160,8 +165,11 @@ namespace libcamera::ipa::rkisp1 { * \var IPAActiveState::agc.automatic.gain * \brief Automatic analogue gain multiplier * - * \var IPAActiveState::agc.autoEnabled - * \brief Manual/automatic AGC state as set by the AeEnable control + * \var IPAActiveState::agc.autoExposureEnabled + * \brief Manual/automatic AGC state (exposure) as set by the ExposureTimeMode control + * + * \var IPAActiveState::agc.autoGainEnabled + * \brief Manual/automatic AGC state (gain) as set by the AnalogueGainMode control * * \var IPAActiveState::agc.constraintMode * \brief Constraint mode as set by the AeConstraintMode control @@ -183,30 +191,12 @@ namespace libcamera::ipa::rkisp1 { * \struct IPAActiveState::awb.gains * \brief White balance gains * - * \struct IPAActiveState::awb.gains.manual + * \var IPAActiveState::awb.gains.manual * \brief Manual white balance gains (set through requests) * - * \var IPAActiveState::awb.gains.manual.red - * \brief Manual white balance gain for R channel - * - * \var IPAActiveState::awb.gains.manual.green - * \brief Manual white balance gain for G channel - * - * \var IPAActiveState::awb.gains.manual.blue - * \brief Manual white balance gain for B channel - * - * \struct IPAActiveState::awb.gains.automatic + * \var IPAActiveState::awb.gains.automatic * \brief Automatic white balance gains (computed by the algorithm) * - * \var IPAActiveState::awb.gains.automatic.red - * \brief Automatic white balance gain for R channel - * - * \var IPAActiveState::awb.gains.automatic.green - * \brief Automatic white balance gain for G channel - * - * \var IPAActiveState::awb.gains.automatic.blue - * \brief Automatic white balance gain for B channel - * * \var IPAActiveState::awb.temperatureK * \brief Estimated color temperature * @@ -302,8 +292,11 @@ namespace libcamera::ipa::rkisp1 { * * The gain should be adapted to the sensor specific gain code before applying. * - * \var IPAFrameContext::agc.autoEnabled - * \brief Manual/automatic AGC state as set by the AeEnable control + * \var IPAFrameContext::agc.autoExposureEnabled + * \brief Manual/automatic AGC state (exposure) as set by the ExposureTimeMode control + * + * \var IPAFrameContext::agc.autoGainEnabled + * \brief Manual/automatic AGC state (gain) as set by the AnalogueGainMode control * * \var IPAFrameContext::agc.constraintMode * \brief Constraint mode as set by the AeConstraintMode control @@ -319,6 +312,16 @@ namespace libcamera::ipa::rkisp1 { * * \var IPAFrameContext::agc.updateMetering * \brief Indicate if new ISP AGC metering parameters need to be applied + * + * \var IPAFrameContext::agc.autoExposureModeChange + * \brief Indicate if autoExposureEnabled has changed from true in the previous + * frame to false in the current frame, and no manual exposure value has been + * supplied in the current frame. + * + * \var IPAFrameContext::agc.autoGainModeChange + * \brief Indicate if autoGainEnabled has changed from true in the previous + * frame to false in the current frame, and no manual gain value has been + * supplied in the current frame. */ /** @@ -328,15 +331,6 @@ namespace libcamera::ipa::rkisp1 { * \struct IPAFrameContext::awb.gains * \brief White balance gains * - * \var IPAFrameContext::awb.gains.red - * \brief White balance gain for R channel - * - * \var IPAFrameContext::awb.gains.green - * \brief White balance gain for G channel - * - * \var IPAFrameContext::awb.gains.blue - * \brief White balance gain for B channel - * * \var IPAFrameContext::awb.temperatureK * \brief Estimated color temperature * @@ -419,6 +413,9 @@ namespace libcamera::ipa::rkisp1 { * \var IPAContext::hw * \brief RkISP1 version-specific hardware parameters * + * \var IPAContext::sensorInfo + * \brief The IPA session sensorInfo, immutable during the session + * * \var IPAContext::configuration * \brief The IPA session configuration, immutable during the session * diff --git a/src/ipa/rkisp1/ipa_context.h b/src/ipa/rkisp1/ipa_context.h index 8602b408..c765b928 100644 --- a/src/ipa/rkisp1/ipa_context.h +++ b/src/ipa/rkisp1/ipa_context.h @@ -8,6 +8,8 @@ #pragma once +#include <memory> + #include <linux/rkisp1-config.h> #include <libcamera/base/utils.h> @@ -16,8 +18,14 @@ #include <libcamera/controls.h> #include <libcamera/geometry.h> +#include <libcamera/ipa/core_ipa_interface.h> + +#include "libcamera/internal/debug_controls.h" +#include "libcamera/internal/matrix.h" +#include "libcamera/internal/vector.h" + +#include <libipa/camera_sensor_helper.h> #include <libipa/fc_queue.h> -#include <libipa/matrix.h> namespace libcamera { @@ -28,6 +36,7 @@ struct IPAHwSettings { unsigned int numHistogramBins; unsigned int numHistogramWeights; unsigned int numGammaOutSamples; + bool compand; }; struct IPASessionConfiguration { @@ -45,8 +54,8 @@ struct IPASessionConfiguration { } lsc; struct { - utils::Duration minShutterSpeed; - utils::Duration maxShutterSpeed; + utils::Duration minExposureTime; + utils::Duration maxExposureTime; double minAnalogueGain; double maxAnalogueGain; @@ -56,6 +65,7 @@ struct IPASessionConfiguration { } sensor; bool raw; + uint32_t paramFormat; }; struct IPAActiveState { @@ -69,7 +79,8 @@ struct IPAActiveState { double gain; } automatic; - bool autoEnabled; + bool autoExposureEnabled; + bool autoGainEnabled; controls::AeConstraintModeEnum constraintMode; controls::AeExposureModeEnum exposureMode; controls::AeMeteringModeEnum meteringMode; @@ -78,16 +89,8 @@ struct IPAActiveState { struct { struct { - struct { - double red; - double green; - double blue; - } manual; - struct { - double red; - double green; - double blue; - } automatic; + RGB<double> manual; + RGB<double> automatic; } gains; unsigned int temperatureK; @@ -95,6 +98,10 @@ struct IPAActiveState { } awb; struct { + Matrix<float, 3, 3> ccm; + } ccm; + + struct { int8_t brightness; uint8_t contrast; uint8_t saturation; @@ -118,23 +125,21 @@ struct IPAFrameContext : public FrameContext { struct { uint32_t exposure; double gain; - bool autoEnabled; + bool autoExposureEnabled; + bool autoGainEnabled; controls::AeConstraintModeEnum constraintMode; controls::AeExposureModeEnum exposureMode; controls::AeMeteringModeEnum meteringMode; utils::Duration maxFrameDuration; bool updateMetering; + bool autoExposureModeChange; + bool autoGainModeChange; } agc; struct { - struct { - double red; - double green; - double blue; - } gains; - - unsigned int temperatureK; + RGB<double> gains; bool autoEnabled; + unsigned int temperatureK; } awb; struct { @@ -168,16 +173,31 @@ struct IPAFrameContext : public FrameContext { struct { Matrix<float, 3, 3> ccm; } ccm; + + struct { + double lux; + } lux; }; struct IPAContext { + IPAContext(unsigned int frameContextSize) + : hw(nullptr), frameContexts(frameContextSize) + { + } + const IPAHwSettings *hw; + IPACameraSensorInfo sensorInfo; IPASessionConfiguration configuration; IPAActiveState activeState; FCQueue<IPAFrameContext> frameContexts; ControlInfoMap::Map ctrlMap; + + DebugMetadata debugMetadata; + + /* Interface to the Camera Helper */ + std::unique_ptr<CameraSensorHelper> camHelper; }; } /* namespace ipa::rkisp1 */ diff --git a/src/ipa/rkisp1/meson.build b/src/ipa/rkisp1/meson.build index e8b266f1..26a9fa40 100644 --- a/src/ipa/rkisp1/meson.build +++ b/src/ipa/rkisp1/meson.build @@ -7,14 +7,13 @@ ipa_name = 'ipa_rkisp1' rkisp1_ipa_sources = files([ 'ipa_context.cpp', + 'params.cpp', 'rkisp1.cpp', - 'utils.cpp', ]) rkisp1_ipa_sources += rkisp1_ipa_algorithms -mod = shared_module(ipa_name, - [rkisp1_ipa_sources, libcamera_generated_ipa_headers], +mod = shared_module(ipa_name, rkisp1_ipa_sources, name_prefix : '', include_directories : [ipa_includes], dependencies : [libcamera_private, libipa_dep], diff --git a/src/ipa/rkisp1/module.h b/src/ipa/rkisp1/module.h index 16c3e43e..69e9bc82 100644 --- a/src/ipa/rkisp1/module.h +++ b/src/ipa/rkisp1/module.h @@ -14,13 +14,14 @@ #include <libipa/module.h> #include "ipa_context.h" +#include "params.h" namespace libcamera { namespace ipa::rkisp1 { using Module = ipa::Module<IPAContext, IPAFrameContext, IPACameraSensorInfo, - rkisp1_params_cfg, rkisp1_stat_buffer>; + RkISP1Params, rkisp1_stat_buffer>; } /* namespace ipa::rkisp1 */ diff --git a/src/ipa/rkisp1/params.cpp b/src/ipa/rkisp1/params.cpp new file mode 100644 index 00000000..4c0b051c --- /dev/null +++ b/src/ipa/rkisp1/params.cpp @@ -0,0 +1,222 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas On Board + * + * RkISP1 ISP Parameters + */ + +#include "params.h" + +#include <map> +#include <stddef.h> +#include <string.h> + +#include <linux/rkisp1-config.h> +#include <linux/videodev2.h> + +#include <libcamera/base/log.h> +#include <libcamera/base/utils.h> + +namespace libcamera { + +LOG_DEFINE_CATEGORY(RkISP1Params) + +namespace ipa::rkisp1 { + +namespace { + +struct BlockTypeInfo { + enum rkisp1_ext_params_block_type type; + size_t size; + size_t offset; + uint32_t enableBit; +}; + +#define RKISP1_BLOCK_TYPE_ENTRY(block, id, type, category, bit) \ + { BlockType::block, { \ + RKISP1_EXT_PARAMS_BLOCK_TYPE_##id, \ + sizeof(struct rkisp1_cif_isp_##type##_config), \ + offsetof(struct rkisp1_params_cfg, category.type##_config), \ + RKISP1_CIF_ISP_MODULE_##bit, \ + } } + +#define RKISP1_BLOCK_TYPE_ENTRY_MEAS(block, id, type) \ + RKISP1_BLOCK_TYPE_ENTRY(block, id##_MEAS, type, meas, id) + +#define RKISP1_BLOCK_TYPE_ENTRY_OTHERS(block, id, type) \ + RKISP1_BLOCK_TYPE_ENTRY(block, id, type, others, id) + +#define RKISP1_BLOCK_TYPE_ENTRY_EXT(block, id, type) \ + { BlockType::block, { \ + RKISP1_EXT_PARAMS_BLOCK_TYPE_##id, \ + sizeof(struct rkisp1_cif_isp_##type##_config), \ + 0, 0, \ + } } + +const std::map<BlockType, BlockTypeInfo> kBlockTypeInfo = { + RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Bls, BLS, bls), + RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Dpcc, DPCC, dpcc), + RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Sdg, SDG, sdg), + RKISP1_BLOCK_TYPE_ENTRY_OTHERS(AwbGain, AWB_GAIN, awb_gain), + RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Flt, FLT, flt), + RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Bdm, BDM, bdm), + RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Ctk, CTK, ctk), + RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Goc, GOC, goc), + RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Dpf, DPF, dpf), + RKISP1_BLOCK_TYPE_ENTRY_OTHERS(DpfStrength, DPF_STRENGTH, dpf_strength), + RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Cproc, CPROC, cproc), + RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Ie, IE, ie), + RKISP1_BLOCK_TYPE_ENTRY_OTHERS(Lsc, LSC, lsc), + RKISP1_BLOCK_TYPE_ENTRY_MEAS(Awb, AWB, awb_meas), + RKISP1_BLOCK_TYPE_ENTRY_MEAS(Hst, HST, hst), + RKISP1_BLOCK_TYPE_ENTRY_MEAS(Aec, AEC, aec), + RKISP1_BLOCK_TYPE_ENTRY_MEAS(Afc, AFC, afc), + RKISP1_BLOCK_TYPE_ENTRY_EXT(CompandBls, COMPAND_BLS, compand_bls), + RKISP1_BLOCK_TYPE_ENTRY_EXT(CompandExpand, COMPAND_EXPAND, compand_curve), + RKISP1_BLOCK_TYPE_ENTRY_EXT(CompandCompress, COMPAND_COMPRESS, compand_curve), +}; + +} /* namespace */ + +RkISP1ParamsBlockBase::RkISP1ParamsBlockBase(RkISP1Params *params, BlockType type, + const Span<uint8_t> &data) + : params_(params), type_(type) +{ + if (params_->format() == V4L2_META_FMT_RK_ISP1_EXT_PARAMS) { + header_ = data.subspan(0, sizeof(rkisp1_ext_params_block_header)); + data_ = data.subspan(sizeof(rkisp1_ext_params_block_header)); + } else { + data_ = data; + } +} + +void RkISP1ParamsBlockBase::setEnabled(bool enabled) +{ + /* + * For the legacy fixed format, blocks are enabled in the top-level + * header. Delegate to the RkISP1Params class. + */ + if (params_->format() == V4L2_META_FMT_RK_ISP1_PARAMS) + return params_->setBlockEnabled(type_, enabled); + + /* + * For the extensible format, set the enable and disable flags in the + * block header directly. + */ + struct rkisp1_ext_params_block_header *header = + reinterpret_cast<struct rkisp1_ext_params_block_header *>(header_.data()); + header->flags &= ~(RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE | + RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE); + header->flags |= enabled ? RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE + : RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE; +} + +RkISP1Params::RkISP1Params(uint32_t format, Span<uint8_t> data) + : format_(format), data_(data), used_(0) +{ + if (format_ == V4L2_META_FMT_RK_ISP1_EXT_PARAMS) { + struct rkisp1_ext_params_cfg *cfg = + reinterpret_cast<struct rkisp1_ext_params_cfg *>(data.data()); + + cfg->version = RKISP1_EXT_PARAM_BUFFER_V1; + cfg->data_size = 0; + + used_ += offsetof(struct rkisp1_ext_params_cfg, data); + } else { + memset(data.data(), 0, data.size()); + used_ = sizeof(struct rkisp1_params_cfg); + } +} + +void RkISP1Params::setBlockEnabled(BlockType type, bool enabled) +{ + const BlockTypeInfo &info = kBlockTypeInfo.at(type); + + struct rkisp1_params_cfg *cfg = + reinterpret_cast<struct rkisp1_params_cfg *>(data_.data()); + if (enabled) + cfg->module_ens |= info.enableBit; + else + cfg->module_ens &= ~info.enableBit; +} + +Span<uint8_t> RkISP1Params::block(BlockType type) +{ + auto infoIt = kBlockTypeInfo.find(type); + if (infoIt == kBlockTypeInfo.end()) { + LOG(RkISP1Params, Error) + << "Invalid parameters block type " + << utils::to_underlying(type); + return {}; + } + + const BlockTypeInfo &info = infoIt->second; + + /* + * For the legacy format, return a block referencing the fixed location + * of the data. + */ + if (format_ == V4L2_META_FMT_RK_ISP1_PARAMS) { + /* + * Blocks available only in extended parameters have an offset + * of 0. Return nullptr in that case. + */ + if (info.offset == 0) { + LOG(RkISP1Params, Error) + << "Block type " << utils::to_underlying(type) + << " unavailable in fixed parameters format"; + return {}; + } + + struct rkisp1_params_cfg *cfg = + reinterpret_cast<struct rkisp1_params_cfg *>(data_.data()); + + cfg->module_cfg_update |= info.enableBit; + cfg->module_en_update |= info.enableBit; + + return data_.subspan(info.offset, info.size); + } + + /* + * For the extensible format, allocate memory for the block, including + * the header. Look up the block in the cache first. If an algorithm + * requests the same block type twice, it should get the same block. + */ + auto cacheIt = blocks_.find(type); + if (cacheIt != blocks_.end()) + return cacheIt->second; + + /* Make sure we don't run out of space. */ + size_t size = sizeof(struct rkisp1_ext_params_block_header) + + ((info.size + 7) & ~7); + if (size > data_.size() - used_) { + LOG(RkISP1Params, Error) + << "Out of memory to allocate block type " + << utils::to_underlying(type); + return {}; + } + + /* Allocate a new block, clear its memory, and initialize its header. */ + Span<uint8_t> block = data_.subspan(used_, size); + used_ += size; + + struct rkisp1_ext_params_cfg *cfg = + reinterpret_cast<struct rkisp1_ext_params_cfg *>(data_.data()); + cfg->data_size += size; + + memset(block.data(), 0, block.size()); + + struct rkisp1_ext_params_block_header *header = + reinterpret_cast<struct rkisp1_ext_params_block_header *>(block.data()); + header->type = info.type; + header->size = block.size(); + + /* Update the cache. */ + blocks_[type] = block; + + return block; +} + +} /* namespace ipa::rkisp1 */ + +} /* namespace libcamera */ diff --git a/src/ipa/rkisp1/params.h b/src/ipa/rkisp1/params.h new file mode 100644 index 00000000..40450e34 --- /dev/null +++ b/src/ipa/rkisp1/params.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas On Board + * + * RkISP1 ISP Parameters + */ + +#pragma once + +#include <map> +#include <stdint.h> + +#include <linux/rkisp1-config.h> + +#include <libcamera/base/class.h> +#include <libcamera/base/span.h> + +namespace libcamera { + +namespace ipa::rkisp1 { + +enum class BlockType { + Bls, + Dpcc, + Sdg, + AwbGain, + Flt, + Bdm, + Ctk, + Goc, + Dpf, + DpfStrength, + Cproc, + Ie, + Lsc, + Awb, + Hst, + Aec, + Afc, + CompandBls, + CompandExpand, + CompandCompress, +}; + +namespace details { + +template<BlockType B> +struct block_type { +}; + +#define RKISP1_DEFINE_BLOCK_TYPE(blockType, blockStruct) \ +template<> \ +struct block_type<BlockType::blockType> { \ + using type = struct rkisp1_cif_isp_##blockStruct##_config; \ +}; + +RKISP1_DEFINE_BLOCK_TYPE(Bls, bls) +RKISP1_DEFINE_BLOCK_TYPE(Dpcc, dpcc) +RKISP1_DEFINE_BLOCK_TYPE(Sdg, sdg) +RKISP1_DEFINE_BLOCK_TYPE(AwbGain, awb_gain) +RKISP1_DEFINE_BLOCK_TYPE(Flt, flt) +RKISP1_DEFINE_BLOCK_TYPE(Bdm, bdm) +RKISP1_DEFINE_BLOCK_TYPE(Ctk, ctk) +RKISP1_DEFINE_BLOCK_TYPE(Goc, goc) +RKISP1_DEFINE_BLOCK_TYPE(Dpf, dpf) +RKISP1_DEFINE_BLOCK_TYPE(DpfStrength, dpf_strength) +RKISP1_DEFINE_BLOCK_TYPE(Cproc, cproc) +RKISP1_DEFINE_BLOCK_TYPE(Ie, ie) +RKISP1_DEFINE_BLOCK_TYPE(Lsc, lsc) +RKISP1_DEFINE_BLOCK_TYPE(Awb, awb_meas) +RKISP1_DEFINE_BLOCK_TYPE(Hst, hst) +RKISP1_DEFINE_BLOCK_TYPE(Aec, aec) +RKISP1_DEFINE_BLOCK_TYPE(Afc, afc) +RKISP1_DEFINE_BLOCK_TYPE(CompandBls, compand_bls) +RKISP1_DEFINE_BLOCK_TYPE(CompandExpand, compand_curve) +RKISP1_DEFINE_BLOCK_TYPE(CompandCompress, compand_curve) + +} /* namespace details */ + +class RkISP1Params; + +class RkISP1ParamsBlockBase +{ +public: + RkISP1ParamsBlockBase(RkISP1Params *params, BlockType type, + const Span<uint8_t> &data); + + Span<uint8_t> data() const { return data_; } + + void setEnabled(bool enabled); + +private: + LIBCAMERA_DISABLE_COPY(RkISP1ParamsBlockBase) + + RkISP1Params *params_; + BlockType type_; + Span<uint8_t> header_; + Span<uint8_t> data_; +}; + +template<BlockType B> +class RkISP1ParamsBlock : public RkISP1ParamsBlockBase +{ +public: + using Type = typename details::block_type<B>::type; + + RkISP1ParamsBlock(RkISP1Params *params, const Span<uint8_t> &data) + : RkISP1ParamsBlockBase(params, B, data) + { + } + + const Type *operator->() const + { + return reinterpret_cast<const Type *>(data().data()); + } + + Type *operator->() + { + return reinterpret_cast<Type *>(data().data()); + } + + const Type &operator*() const & + { + return *reinterpret_cast<const Type *>(data().data()); + } + + Type &operator*() & + { + return *reinterpret_cast<Type *>(data().data()); + } +}; + +class RkISP1Params +{ +public: + RkISP1Params(uint32_t format, Span<uint8_t> data); + + template<BlockType B> + RkISP1ParamsBlock<B> block() + { + return RkISP1ParamsBlock<B>(this, block(B)); + } + + uint32_t format() const { return format_; } + size_t size() const { return used_; } + +private: + friend class RkISP1ParamsBlockBase; + + Span<uint8_t> block(BlockType type); + void setBlockEnabled(BlockType type, bool enabled); + + uint32_t format_; + + Span<uint8_t> data_; + size_t used_; + + std::map<BlockType, Span<uint8_t>> blocks_; +}; + +} /* namespace ipa::rkisp1 */ + +} /* namespace libcamera*/ diff --git a/src/ipa/rkisp1/rkisp1.cpp b/src/ipa/rkisp1/rkisp1.cpp index d31cdbab..2ffdd99b 100644 --- a/src/ipa/rkisp1/rkisp1.cpp +++ b/src/ipa/rkisp1/rkisp1.cpp @@ -6,8 +6,8 @@ */ #include <algorithm> -#include <math.h> -#include <queue> +#include <array> +#include <chrono> #include <stdint.h> #include <string.h> @@ -18,20 +18,22 @@ #include <libcamera/base/log.h> #include <libcamera/control_ids.h> +#include <libcamera/controls.h> #include <libcamera/framebuffer.h> +#include <libcamera/request.h> + #include <libcamera/ipa/ipa_interface.h> #include <libcamera/ipa/ipa_module_info.h> #include <libcamera/ipa/rkisp1_ipa_interface.h> -#include <libcamera/request.h> #include "libcamera/internal/formats.h" #include "libcamera/internal/mapped_framebuffer.h" #include "libcamera/internal/yaml_parser.h" #include "algorithms/algorithm.h" -#include "libipa/camera_sensor_helper.h" #include "ipa_context.h" +#include "params.h" namespace libcamera { @@ -63,9 +65,9 @@ public: void unmapBuffers(const std::vector<unsigned int> &ids) override; void queueRequest(const uint32_t frame, const ControlList &controls) override; - void fillParamsBuffer(const uint32_t frame, const uint32_t bufferId) override; - void processStatsBuffer(const uint32_t frame, const uint32_t bufferId, - const ControlList &sensorControls) override; + void computeParams(const uint32_t frame, const uint32_t bufferId) override; + void processStats(const uint32_t frame, const uint32_t bufferId, + const ControlList &sensorControls) override; protected: std::string logPrefix() const override; @@ -81,9 +83,6 @@ private: ControlInfoMap sensorControls_; - /* Interface to the Camera Helper */ - std::unique_ptr<CameraSensorHelper> camHelper_; - /* Local parameter storage */ struct IPAContext context_; }; @@ -95,6 +94,15 @@ const IPAHwSettings ipaHwSettingsV10{ RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10, RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10, RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10, + false, +}; + +const IPAHwSettings ipaHwSettingsIMX8MP{ + RKISP1_CIF_ISP_AE_MEAN_MAX_V10, + RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10, + RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10, + RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10, + true, }; const IPAHwSettings ipaHwSettingsV12{ @@ -102,12 +110,14 @@ const IPAHwSettings ipaHwSettingsV12{ RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12, RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12, RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12, + false, }; /* List of controls handled by the RkISP1 IPA */ const ControlInfoMap::Map rkisp1Controls{ { &controls::AwbEnable, ControlInfo(false, true) }, { &controls::ColourGains, ControlInfo(0.0f, 3.996f, 1.0f) }, + { &controls::DebugMetadataEnable, ControlInfo(false, true, false) }, { &controls::Sharpness, ControlInfo(0.0f, 10.0f, 1.0f) }, { &controls::draft::NoiseReductionMode, ControlInfo(controls::draft::NoiseReductionModeValues) }, }; @@ -115,7 +125,7 @@ const ControlInfoMap::Map rkisp1Controls{ } /* namespace */ IPARkISP1::IPARkISP1() - : context_({ {}, {}, {}, { kMaxFrameContexts }, {} }) + : context_(kMaxFrameContexts) { } @@ -132,9 +142,11 @@ int IPARkISP1::init(const IPASettings &settings, unsigned int hwRevision, /* \todo Add support for other revisions */ switch (hwRevision) { case RKISP1_V10: - case RKISP1_V_IMX8MP: context_.hw = &ipaHwSettingsV10; break; + case RKISP1_V_IMX8MP: + context_.hw = &ipaHwSettingsIMX8MP; + break; case RKISP1_V12: context_.hw = &ipaHwSettingsV12; break; @@ -147,16 +159,18 @@ int IPARkISP1::init(const IPASettings &settings, unsigned int hwRevision, LOG(IPARkISP1, Debug) << "Hardware revision is " << hwRevision; - camHelper_ = CameraSensorHelperFactoryBase::create(settings.sensorModel); - if (!camHelper_) { + context_.sensorInfo = sensorInfo; + + context_.camHelper = CameraSensorHelperFactoryBase::create(settings.sensorModel); + if (!context_.camHelper) { LOG(IPARkISP1, Error) << "Failed to create camera sensor helper for " << settings.sensorModel; return -ENODEV; } - context_.configuration.sensor.lineDuration = sensorInfo.minLineLength - * 1.0s / sensorInfo.pixelRate; + context_.configuration.sensor.lineDuration = + sensorInfo.minLineLength * 1.0s / sensorInfo.pixelRate; /* Load the tuning data file. */ File file(settings.configurationFile); @@ -230,6 +244,8 @@ int IPARkISP1::configure(const IPAConfigInfo &ipaConfig, context_.activeState = {}; context_.frameContexts.clear(); + context_.configuration.paramFormat = ipaConfig.paramFormat; + const IPACameraSensorInfo &info = ipaConfig.sensorInfo; const ControlInfo vBlank = sensorControls_.find(V4L2_CID_VBLANK)->second; context_.configuration.sensor.defVBlank = vBlank.def().get<int32_t>(); @@ -241,17 +257,19 @@ int IPARkISP1::configure(const IPAConfigInfo &ipaConfig, /* * When the AGC computes the new exposure values for a frame, it needs - * to know the limits for shutter speed and analogue gain. - * As it depends on the sensor, update it with the controls. + * to know the limits for exposure time and analogue gain. As it depends + * on the sensor, update it with the controls. * - * \todo take VBLANK into account for maximum shutter speed + * \todo take VBLANK into account for maximum exposure time */ - context_.configuration.sensor.minShutterSpeed = + context_.configuration.sensor.minExposureTime = minExposure * context_.configuration.sensor.lineDuration; - context_.configuration.sensor.maxShutterSpeed = + context_.configuration.sensor.maxExposureTime = maxExposure * context_.configuration.sensor.lineDuration; - context_.configuration.sensor.minAnalogueGain = camHelper_->gain(minGain); - context_.configuration.sensor.maxAnalogueGain = camHelper_->gain(maxGain); + context_.configuration.sensor.minAnalogueGain = + context_.camHelper->gain(minGain); + context_.configuration.sensor.maxAnalogueGain = + context_.camHelper->gain(maxGain); context_.configuration.raw = std::any_of(streamConfig.begin(), streamConfig.end(), [](auto &cfg) -> bool { @@ -309,6 +327,7 @@ void IPARkISP1::unmapBuffers(const std::vector<unsigned int> &ids) void IPARkISP1::queueRequest(const uint32_t frame, const ControlList &controls) { IPAFrameContext &frameContext = context_.frameContexts.alloc(frame); + context_.debugMetadata.enableByControl(controls); for (auto const &a : algorithms()) { Algorithm *algo = static_cast<Algorithm *>(a.get()); @@ -318,25 +337,21 @@ void IPARkISP1::queueRequest(const uint32_t frame, const ControlList &controls) } } -void IPARkISP1::fillParamsBuffer(const uint32_t frame, const uint32_t bufferId) +void IPARkISP1::computeParams(const uint32_t frame, const uint32_t bufferId) { IPAFrameContext &frameContext = context_.frameContexts.get(frame); - rkisp1_params_cfg *params = - reinterpret_cast<rkisp1_params_cfg *>( - mappedBuffers_.at(bufferId).planes()[0].data()); - - /* Prepare parameters buffer. */ - memset(params, 0, sizeof(*params)); + RkISP1Params params(context_.configuration.paramFormat, + mappedBuffers_.at(bufferId).planes()[0]); for (auto const &algo : algorithms()) - algo->prepare(context_, frame, frameContext, params); + algo->prepare(context_, frame, frameContext, ¶ms); - paramsBufferReady.emit(frame); + paramsComputed.emit(frame, params.size()); } -void IPARkISP1::processStatsBuffer(const uint32_t frame, const uint32_t bufferId, - const ControlList &sensorControls) +void IPARkISP1::processStats(const uint32_t frame, const uint32_t bufferId, + const ControlList &sensorControls) { IPAFrameContext &frameContext = context_.frameContexts.get(frame); @@ -352,7 +367,7 @@ void IPARkISP1::processStatsBuffer(const uint32_t frame, const uint32_t bufferId frameContext.sensor.exposure = sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>(); frameContext.sensor.gain = - camHelper_->gain(sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>()); + context_.camHelper->gain(sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>()); ControlList metadata(controls::controls); @@ -365,6 +380,7 @@ void IPARkISP1::processStatsBuffer(const uint32_t frame, const uint32_t bufferId setControls(frame); + context_.debugMetadata.moveEntries(metadata); metadataReady.emit(frame, metadata); } @@ -389,9 +405,9 @@ void IPARkISP1::updateControls(const IPACameraSensorInfo &sensorInfo, /* Compute the analogue gain limits. */ const ControlInfo &v4l2Gain = sensorControls.find(V4L2_CID_ANALOGUE_GAIN)->second; - float minGain = camHelper_->gain(v4l2Gain.min().get<int32_t>()); - float maxGain = camHelper_->gain(v4l2Gain.max().get<int32_t>()); - float defGain = camHelper_->gain(v4l2Gain.def().get<int32_t>()); + float minGain = context_.camHelper->gain(v4l2Gain.min().get<int32_t>()); + float maxGain = context_.camHelper->gain(v4l2Gain.max().get<int32_t>()); + float defGain = context_.camHelper->gain(v4l2Gain.def().get<int32_t>()); ctrlMap.emplace(std::piecewise_construct, std::forward_as_tuple(&controls::AnalogueGain), std::forward_as_tuple(minGain, maxGain, defGain)); @@ -436,7 +452,7 @@ void IPARkISP1::setControls(unsigned int frame) IPAFrameContext &frameContext = context_.frameContexts.get(frame); uint32_t exposure = frameContext.agc.exposure; - uint32_t gain = camHelper_->gainCode(frameContext.agc.gain); + uint32_t gain = context_.camHelper->gainCode(frameContext.agc.gain); ControlList ctrls(sensorControls_); ctrls.set(V4L2_CID_EXPOSURE, static_cast<int32_t>(exposure)); diff --git a/src/ipa/rpi/cam_helper/cam_helper.cpp b/src/ipa/rpi/cam_helper/cam_helper.cpp index ee5d011f..a78db9c1 100644 --- a/src/ipa/rpi/cam_helper/cam_helper.cpp +++ b/src/ipa/rpi/cam_helper/cam_helper.cpp @@ -156,17 +156,9 @@ void CamHelper::setCameraMode(const CameraMode &mode) } } -void CamHelper::getDelays(int &exposureDelay, int &gainDelay, - int &vblankDelay, int &hblankDelay) const +void CamHelper::setHwConfig(const Controller::HardwareConfig &hwConfig) { - /* - * These values are correct for many sensors. Other sensors will - * need to over-ride this function. - */ - exposureDelay = 2; - gainDelay = 1; - vblankDelay = 2; - hblankDelay = 2; + hwConfig_ = hwConfig; } bool CamHelper::sensorEmbeddedDataPresent() const @@ -241,7 +233,7 @@ void CamHelper::parseEmbeddedData(Span<const uint8_t> buffer, return; } - deviceStatus.shutterSpeed = parsedDeviceStatus.shutterSpeed; + deviceStatus.exposureTime = parsedDeviceStatus.exposureTime; deviceStatus.analogueGain = parsedDeviceStatus.analogueGain; deviceStatus.frameLength = parsedDeviceStatus.frameLength; deviceStatus.lineLength = parsedDeviceStatus.lineLength; diff --git a/src/ipa/rpi/cam_helper/cam_helper.h b/src/ipa/rpi/cam_helper/cam_helper.h index 4a4ab5e6..4a826690 100644 --- a/src/ipa/rpi/cam_helper/cam_helper.h +++ b/src/ipa/rpi/cam_helper/cam_helper.h @@ -36,11 +36,6 @@ namespace RPiController { * exposure time, and to convert between the sensor's gain codes and actual * gains. * - * A function to return the number of frames of delay between updating exposure, - * analogue gain and vblanking, and for the changes to take effect. For many - * sensors these take the values 2, 1 and 2 respectively, but sensors that are - * different will need to over-ride the default function provided. - * * A function to query if the sensor outputs embedded data that can be parsed. * * A function to return the sensitivity of a given camera mode. @@ -76,6 +71,7 @@ public: CamHelper(std::unique_ptr<MdParser> parser, unsigned int frameIntegrationDiff); virtual ~CamHelper(); void setCameraMode(const CameraMode &mode); + void setHwConfig(const Controller::HardwareConfig &hwConfig); virtual void prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata); virtual void process(StatisticsPtr &stats, Metadata &metadata); @@ -91,8 +87,6 @@ public: libcamera::utils::Duration lineLengthPckToDuration(uint32_t lineLengthPck) const; virtual uint32_t gainCode(double gain) const = 0; virtual double gain(uint32_t gainCode) const = 0; - virtual void getDelays(int &exposureDelay, int &gainDelay, - int &vblankDelay, int &hblankDelay) const; virtual bool sensorEmbeddedDataPresent() const; virtual double getModeSensitivity(const CameraMode &mode) const; virtual unsigned int hideFramesStartup() const; @@ -108,6 +102,7 @@ protected: std::unique_ptr<MdParser> parser_; CameraMode mode_; + Controller::HardwareConfig hwConfig_; private: /* diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx219.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx219.cpp index 91461f7a..ba01153e 100644 --- a/src/ipa/rpi/cam_helper/cam_helper_imx219.cpp +++ b/src/ipa/rpi/cam_helper/cam_helper_imx219.cpp @@ -99,7 +99,7 @@ void CamHelperImx219::populateMetadata(const MdParser::RegisterMap ®isters, deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 + registers.at(lineLengthLoReg)); - deviceStatus.shutterSpeed = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg), + deviceStatus.exposureTime = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg), deviceStatus.lineLength); deviceStatus.analogueGain = gain(registers.at(gainReg)); deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg); diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx283.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx283.cpp new file mode 100644 index 00000000..efc03193 --- /dev/null +++ b/src/ipa/rpi/cam_helper/cam_helper_imx283.cpp @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * Copyright (C) 2024, Raspberry Pi Ltd + * + * cam_helper_Imx283.cpp - camera information for Imx283 sensor + */ + +#include <assert.h> + +#include "cam_helper.h" + +using namespace RPiController; + +class CamHelperImx283 : public CamHelper +{ +public: + CamHelperImx283(); + uint32_t gainCode(double gain) const override; + double gain(uint32_t gainCode) const override; + unsigned int hideFramesModeSwitch() const override; + +private: + /* + * Smallest difference between the frame length and integration time, + * in units of lines. + */ + static constexpr int frameIntegrationDiff = 4; +}; + +/* + * Imx283 doesn't output metadata, so we have to use the delayed controls which + * works by counting frames. + */ + +CamHelperImx283::CamHelperImx283() + : CamHelper({}, frameIntegrationDiff) +{ +} + +uint32_t CamHelperImx283::gainCode(double gain) const +{ + return static_cast<uint32_t>(2048.0 - 2048.0 / gain); +} + +double CamHelperImx283::gain(uint32_t gainCode) const +{ + return static_cast<double>(2048.0 / (2048 - gainCode)); +} + +unsigned int CamHelperImx283::hideFramesModeSwitch() const +{ + /* After a mode switch, we seem to get 1 bad frame. */ + return 1; +} + +static CamHelper *create() +{ + return new CamHelperImx283(); +} + +static RegisterCamHelper reg("imx283", &create); diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx290.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx290.cpp index 24275e12..c1aa8528 100644 --- a/src/ipa/rpi/cam_helper/cam_helper_imx290.cpp +++ b/src/ipa/rpi/cam_helper/cam_helper_imx290.cpp @@ -5,7 +5,7 @@ * camera helper for imx290 sensor */ -#include <math.h> +#include <cmath> #include "cam_helper.h" @@ -17,8 +17,6 @@ public: CamHelperImx290(); uint32_t gainCode(double gain) const override; double gain(uint32_t gainCode) const override; - void getDelays(int &exposureDelay, int &gainDelay, - int &vblankDelay, int &hblankDelay) const override; unsigned int hideFramesStartup() const override; unsigned int hideFramesModeSwitch() const override; @@ -37,22 +35,13 @@ CamHelperImx290::CamHelperImx290() uint32_t CamHelperImx290::gainCode(double gain) const { - int code = 66.6667 * log10(gain); + int code = 66.6667 * std::log10(gain); return std::max(0, std::min(code, 0xf0)); } double CamHelperImx290::gain(uint32_t gainCode) const { - return pow(10, 0.015 * gainCode); -} - -void CamHelperImx290::getDelays(int &exposureDelay, int &gainDelay, - int &vblankDelay, int &hblankDelay) const -{ - exposureDelay = 2; - gainDelay = 2; - vblankDelay = 2; - hblankDelay = 2; + return std::pow(10, 0.015 * gainCode); } unsigned int CamHelperImx290::hideFramesStartup() const @@ -73,3 +62,5 @@ static CamHelper *create() } static RegisterCamHelper reg("imx290", &create); +static RegisterCamHelper reg327("imx327", &create); +static RegisterCamHelper reg462("imx462", &create); diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx296.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx296.cpp index d4a4fa79..ac7ee2ea 100644 --- a/src/ipa/rpi/cam_helper/cam_helper_imx296.cpp +++ b/src/ipa/rpi/cam_helper/cam_helper_imx296.cpp @@ -23,8 +23,6 @@ public: double gain(uint32_t gainCode) const override; uint32_t exposureLines(const Duration exposure, const Duration lineLength) const override; Duration exposure(uint32_t exposureLines, const Duration lineLength) const override; - void getDelays(int &exposureDelay, int &gainDelay, - int &vblankDelay, int &hblankDelay) const override; private: static constexpr uint32_t minExposureLines = 1; @@ -66,15 +64,6 @@ Duration CamHelperImx296::exposure(uint32_t exposureLines, return std::max<uint32_t>(minExposureLines, exposureLines) * timePerLine + 14.26us; } -void CamHelperImx296::getDelays(int &exposureDelay, int &gainDelay, - int &vblankDelay, int &hblankDelay) const -{ - exposureDelay = 2; - gainDelay = 2; - vblankDelay = 2; - hblankDelay = 2; -} - static CamHelper *create() { return new CamHelperImx296(); diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx415.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx415.cpp new file mode 100644 index 00000000..c0a09eee --- /dev/null +++ b/src/ipa/rpi/cam_helper/cam_helper_imx415.cpp @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * Copyright (C) 2025, Raspberry Pi Ltd + * + * camera helper for imx415 sensor + */ + +#include <cmath> + +#include "cam_helper.h" + +using namespace RPiController; + +class CamHelperImx415 : public CamHelper +{ +public: + CamHelperImx415(); + uint32_t gainCode(double gain) const override; + double gain(uint32_t gainCode) const override; + unsigned int hideFramesStartup() const override; + unsigned int hideFramesModeSwitch() const override; + +private: + /* + * Smallest difference between the frame length and integration time, + * in units of lines. + */ + static constexpr int frameIntegrationDiff = 8; +}; + +CamHelperImx415::CamHelperImx415() + : CamHelper({}, frameIntegrationDiff) +{ +} + +uint32_t CamHelperImx415::gainCode(double gain) const +{ + int code = 66.6667 * std::log10(gain); + return std::max(0, std::min(code, 0xf0)); +} + +double CamHelperImx415::gain(uint32_t gainCode) const +{ + return std::pow(10, 0.015 * gainCode); +} + +unsigned int CamHelperImx415::hideFramesStartup() const +{ + /* On startup, we seem to get 1 bad frame. */ + return 1; +} + +unsigned int CamHelperImx415::hideFramesModeSwitch() const +{ + /* After a mode switch, we seem to get 1 bad frame. */ + return 1; +} + +static CamHelper *create() +{ + return new CamHelperImx415(); +} + +static RegisterCamHelper reg("imx415", &create); diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx477.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx477.cpp index 6bd89334..a72ac67d 100644 --- a/src/ipa/rpi/cam_helper/cam_helper_imx477.cpp +++ b/src/ipa/rpi/cam_helper/cam_helper_imx477.cpp @@ -51,8 +51,6 @@ public: void prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata) override; std::pair<uint32_t, uint32_t> getBlanking(Duration &exposure, Duration minFrameDuration, Duration maxFrameDuration) const override; - void getDelays(int &exposureDelay, int &gainDelay, - int &vblankDelay, int &hblankDelay) const override; bool sensorEmbeddedDataPresent() const override; private: @@ -112,7 +110,7 @@ void CamHelperImx477::prepare(libcamera::Span<const uint8_t> buffer, Metadata &m DeviceStatus parsedDeviceStatus; metadata.get("device.status", parsedDeviceStatus); - parsedDeviceStatus.shutterSpeed = deviceStatus.shutterSpeed; + parsedDeviceStatus.exposureTime = deviceStatus.exposureTime; parsedDeviceStatus.frameLength = deviceStatus.frameLength; metadata.set("device.status", parsedDeviceStatus); @@ -159,15 +157,6 @@ std::pair<uint32_t, uint32_t> CamHelperImx477::getBlanking(Duration &exposure, return { frameLength - mode_.height, hblank }; } -void CamHelperImx477::getDelays(int &exposureDelay, int &gainDelay, - int &vblankDelay, int &hblankDelay) const -{ - exposureDelay = 2; - gainDelay = 2; - vblankDelay = 3; - hblankDelay = 3; -} - bool CamHelperImx477::sensorEmbeddedDataPresent() const { return true; @@ -180,7 +169,7 @@ void CamHelperImx477::populateMetadata(const MdParser::RegisterMap ®isters, deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 + registers.at(lineLengthLoReg)); - deviceStatus.shutterSpeed = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg), + deviceStatus.exposureTime = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg), deviceStatus.lineLength); deviceStatus.analogueGain = gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg)); deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg); diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx519.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx519.cpp index c2de3d40..10cbea48 100644 --- a/src/ipa/rpi/cam_helper/cam_helper_imx519.cpp +++ b/src/ipa/rpi/cam_helper/cam_helper_imx519.cpp @@ -51,8 +51,6 @@ public: void prepare(libcamera::Span<const uint8_t> buffer, Metadata &metadata) override; std::pair<uint32_t, uint32_t> getBlanking(Duration &exposure, Duration minFrameDuration, Duration maxFrameDuration) const override; - void getDelays(int &exposureDelay, int &gainDelay, - int &vblankDelay, int &hblankDelay) const override; bool sensorEmbeddedDataPresent() const override; private: @@ -112,7 +110,7 @@ void CamHelperImx519::prepare(libcamera::Span<const uint8_t> buffer, Metadata &m DeviceStatus parsedDeviceStatus; metadata.get("device.status", parsedDeviceStatus); - parsedDeviceStatus.shutterSpeed = deviceStatus.shutterSpeed; + parsedDeviceStatus.exposureTime = deviceStatus.exposureTime; parsedDeviceStatus.frameLength = deviceStatus.frameLength; metadata.set("device.status", parsedDeviceStatus); @@ -159,15 +157,6 @@ std::pair<uint32_t, uint32_t> CamHelperImx519::getBlanking(Duration &exposure, return { frameLength - mode_.height, hblank }; } -void CamHelperImx519::getDelays(int &exposureDelay, int &gainDelay, - int &vblankDelay, int &hblankDelay) const -{ - exposureDelay = 2; - gainDelay = 2; - vblankDelay = 3; - hblankDelay = 3; -} - bool CamHelperImx519::sensorEmbeddedDataPresent() const { return true; @@ -180,7 +169,7 @@ void CamHelperImx519::populateMetadata(const MdParser::RegisterMap ®isters, deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 + registers.at(lineLengthLoReg)); - deviceStatus.shutterSpeed = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg), + deviceStatus.exposureTime = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg), deviceStatus.lineLength); deviceStatus.analogueGain = gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg)); deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg); diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx708.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx708.cpp index 63ddb55e..6150909c 100644 --- a/src/ipa/rpi/cam_helper/cam_helper_imx708.cpp +++ b/src/ipa/rpi/cam_helper/cam_helper_imx708.cpp @@ -54,8 +54,6 @@ public: void process(StatisticsPtr &stats, Metadata &metadata) override; std::pair<uint32_t, uint32_t> getBlanking(Duration &exposure, Duration minFrameDuration, Duration maxFrameDuration) const override; - void getDelays(int &exposureDelay, int &gainDelay, - int &vblankDelay, int &hblankDelay) const override; bool sensorEmbeddedDataPresent() const override; double getModeSensitivity(const CameraMode &mode) const override; unsigned int hideFramesModeSwitch() const override; @@ -66,7 +64,7 @@ private: * Smallest difference between the frame length and integration time, * in units of lines. */ - static constexpr int frameIntegrationDiff = 22; + static constexpr int frameIntegrationDiff = 48; /* Maximum frame length allowable for long exposure calculations. */ static constexpr int frameLengthMax = 0xffdc; /* Largest long exposure scale factor given as a left shift on the frame length. */ @@ -155,7 +153,7 @@ void CamHelperImx708::prepare(libcamera::Span<const uint8_t> buffer, Metadata &m DeviceStatus parsedDeviceStatus; metadata.get("device.status", parsedDeviceStatus); - parsedDeviceStatus.shutterSpeed = deviceStatus.shutterSpeed; + parsedDeviceStatus.exposureTime = deviceStatus.exposureTime; parsedDeviceStatus.frameLength = deviceStatus.frameLength; metadata.set("device.status", parsedDeviceStatus); @@ -208,15 +206,6 @@ std::pair<uint32_t, uint32_t> CamHelperImx708::getBlanking(Duration &exposure, return { frameLength - mode_.height, hblank }; } -void CamHelperImx708::getDelays(int &exposureDelay, int &gainDelay, - int &vblankDelay, int &hblankDelay) const -{ - exposureDelay = 2; - gainDelay = 2; - vblankDelay = 3; - hblankDelay = 3; -} - bool CamHelperImx708::sensorEmbeddedDataPresent() const { return true; @@ -255,7 +244,7 @@ void CamHelperImx708::populateMetadata(const MdParser::RegisterMap ®isters, deviceStatus.lineLength = lineLengthPckToDuration(registers.at(lineLengthHiReg) * 256 + registers.at(lineLengthLoReg)); - deviceStatus.shutterSpeed = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg), + deviceStatus.exposureTime = exposure(registers.at(expHiReg) * 256 + registers.at(expLoReg), deviceStatus.lineLength); deviceStatus.analogueGain = gain(registers.at(gainHiReg) * 256 + registers.at(gainLoReg)); deviceStatus.frameLength = registers.at(frameLengthHiReg) * 256 + registers.at(frameLengthLoReg); diff --git a/src/ipa/rpi/cam_helper/cam_helper_ov5647.cpp b/src/ipa/rpi/cam_helper/cam_helper_ov5647.cpp index c30b017c..40d6b6d7 100644 --- a/src/ipa/rpi/cam_helper/cam_helper_ov5647.cpp +++ b/src/ipa/rpi/cam_helper/cam_helper_ov5647.cpp @@ -17,8 +17,6 @@ public: CamHelperOv5647(); uint32_t gainCode(double gain) const override; double gain(uint32_t gainCode) const override; - void getDelays(int &exposureDelay, int &gainDelay, - int &vblankDelay, int &hblankDelay) const override; unsigned int hideFramesStartup() const override; unsigned int hideFramesModeSwitch() const override; unsigned int mistrustFramesStartup() const override; @@ -52,19 +50,6 @@ double CamHelperOv5647::gain(uint32_t gainCode) const return static_cast<double>(gainCode) / 16.0; } -void CamHelperOv5647::getDelays(int &exposureDelay, int &gainDelay, - int &vblankDelay, int &hblankDelay) const -{ - /* - * We run this sensor in a mode where the gain delay is bumped up to - * 2. It seems to be the only way to make the delays "predictable". - */ - exposureDelay = 2; - gainDelay = 2; - vblankDelay = 2; - hblankDelay = 2; -} - unsigned int CamHelperOv5647::hideFramesStartup() const { /* diff --git a/src/ipa/rpi/cam_helper/cam_helper_ov64a40.cpp b/src/ipa/rpi/cam_helper/cam_helper_ov64a40.cpp index a8efd389..980495a8 100644 --- a/src/ipa/rpi/cam_helper/cam_helper_ov64a40.cpp +++ b/src/ipa/rpi/cam_helper/cam_helper_ov64a40.cpp @@ -18,8 +18,6 @@ public: CamHelperOv64a40(); uint32_t gainCode(double gain) const override; double gain(uint32_t gainCode) const override; - void getDelays(int &exposureDelay, int &gainDelay, - int &vblankDelay, int &hblankDelay) const override; double getModeSensitivity(const CameraMode &mode) const override; private: @@ -45,16 +43,6 @@ double CamHelperOv64a40::gain(uint32_t gainCode) const return static_cast<double>(gainCode) / 128.0; } -void CamHelperOv64a40::getDelays(int &exposureDelay, int &gainDelay, - int &vblankDelay, int &hblankDelay) const -{ - /* The driver appears to behave as follows: */ - exposureDelay = 2; - gainDelay = 2; - vblankDelay = 2; - hblankDelay = 2; -} - double CamHelperOv64a40::getModeSensitivity(const CameraMode &mode) const { if (mode.binX >= 2 && mode.scaleX >= 4) { diff --git a/src/ipa/rpi/cam_helper/cam_helper_ov7251.cpp b/src/ipa/rpi/cam_helper/cam_helper_ov7251.cpp new file mode 100644 index 00000000..fc7b999f --- /dev/null +++ b/src/ipa/rpi/cam_helper/cam_helper_ov7251.cpp @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * Copyright (C) 2021, Raspberry Pi Ltd + * + * camera information for ov7251 sensor + */ + +#include <assert.h> + +#include "cam_helper.h" + +using namespace RPiController; + +class CamHelperOv7251 : public CamHelper +{ +public: + CamHelperOv7251(); + uint32_t gainCode(double gain) const override; + double gain(uint32_t gainCode) const override; + +private: + /* + * Smallest difference between the frame length and integration time, + * in units of lines. + */ + static constexpr int frameIntegrationDiff = 4; +}; + +/* + * OV7251 doesn't output metadata, so we have to use the "unicam parser" which + * works by counting frames. + */ + +CamHelperOv7251::CamHelperOv7251() + : CamHelper({}, frameIntegrationDiff) +{ +} + +uint32_t CamHelperOv7251::gainCode(double gain) const +{ + return static_cast<uint32_t>(gain * 16.0); +} + +double CamHelperOv7251::gain(uint32_t gainCode) const +{ + return static_cast<double>(gainCode) / 16.0; +} + +static CamHelper *create() +{ + return new CamHelperOv7251(); +} + +static RegisterCamHelper reg("ov7251", &create); diff --git a/src/ipa/rpi/cam_helper/cam_helper_ov9281.cpp b/src/ipa/rpi/cam_helper/cam_helper_ov9281.cpp index a65c8ac0..e93a4691 100644 --- a/src/ipa/rpi/cam_helper/cam_helper_ov9281.cpp +++ b/src/ipa/rpi/cam_helper/cam_helper_ov9281.cpp @@ -17,15 +17,13 @@ public: CamHelperOv9281(); uint32_t gainCode(double gain) const override; double gain(uint32_t gainCode) const override; - void getDelays(int &exposureDelay, int &gainDelay, - int &vblankDelay, int &hblankDelay) const override; private: /* * Smallest difference between the frame length and integration time, * in units of lines. */ - static constexpr int frameIntegrationDiff = 4; + static constexpr int frameIntegrationDiff = 25; }; /* @@ -48,16 +46,6 @@ double CamHelperOv9281::gain(uint32_t gainCode) const return static_cast<double>(gainCode) / 16.0; } -void CamHelperOv9281::getDelays(int &exposureDelay, int &gainDelay, - int &vblankDelay, int &hblankDelay) const -{ - /* The driver appears to behave as follows: */ - exposureDelay = 2; - gainDelay = 2; - vblankDelay = 2; - hblankDelay = 2; -} - static CamHelper *create() { return new CamHelperOv9281(); diff --git a/src/ipa/rpi/cam_helper/meson.build b/src/ipa/rpi/cam_helper/meson.build index 72625057..abf02147 100644 --- a/src/ipa/rpi/cam_helper/meson.build +++ b/src/ipa/rpi/cam_helper/meson.build @@ -4,12 +4,15 @@ rpi_ipa_cam_helper_sources = files([ 'cam_helper.cpp', 'cam_helper_ov5647.cpp', 'cam_helper_imx219.cpp', + 'cam_helper_imx283.cpp', 'cam_helper_imx290.cpp', 'cam_helper_imx296.cpp', + 'cam_helper_imx415.cpp', 'cam_helper_imx477.cpp', 'cam_helper_imx519.cpp', 'cam_helper_imx708.cpp', 'cam_helper_ov64a40.cpp', + 'cam_helper_ov7251.cpp', 'cam_helper_ov9281.cpp', 'md_parser_smia.cpp', ]) diff --git a/src/ipa/rpi/common/ipa_base.cpp b/src/ipa/rpi/common/ipa_base.cpp index ee3848b5..6734c32e 100644 --- a/src/ipa/rpi/common/ipa_base.cpp +++ b/src/ipa/rpi/common/ipa_base.cpp @@ -55,9 +55,19 @@ constexpr Duration controllerMinFrameDuration = 1.0s / 30.0; /* List of controls handled by the Raspberry Pi IPA */ const ControlInfoMap::Map ipaControls{ - { &controls::AeEnable, ControlInfo(false, true) }, - { &controls::ExposureTime, ControlInfo(0, 66666) }, - { &controls::AnalogueGain, ControlInfo(1.0f, 16.0f) }, + /* \todo Move this to the Camera class */ + { &controls::AeEnable, ControlInfo(false, true, true) }, + { &controls::ExposureTimeMode, + ControlInfo(static_cast<int32_t>(controls::ExposureTimeModeAuto), + static_cast<int32_t>(controls::ExposureTimeModeManual), + static_cast<int32_t>(controls::ExposureTimeModeAuto)) }, + { &controls::ExposureTime, + ControlInfo(1, 66666, static_cast<int32_t>(defaultExposureTime.get<std::micro>())) }, + { &controls::AnalogueGainMode, + ControlInfo(static_cast<int32_t>(controls::AnalogueGainModeAuto), + static_cast<int32_t>(controls::AnalogueGainModeManual), + static_cast<int32_t>(controls::AnalogueGainModeAuto)) }, + { &controls::AnalogueGain, ControlInfo(1.0f, 16.0f, 1.0f) }, { &controls::AeMeteringMode, ControlInfo(controls::AeMeteringModeValues) }, { &controls::AeConstraintMode, ControlInfo(controls::AeConstraintModeValues) }, { &controls::AeExposureMode, ControlInfo(controls::AeExposureModeValues) }, @@ -71,7 +81,9 @@ const ControlInfoMap::Map ipaControls{ { &controls::HdrMode, ControlInfo(controls::HdrModeValues) }, { &controls::Sharpness, ControlInfo(0.0f, 16.0f, 1.0f) }, { &controls::ScalerCrop, ControlInfo(Rectangle{}, Rectangle(65535, 65535, 65535, 65535), Rectangle{}) }, - { &controls::FrameDurationLimits, ControlInfo(INT64_C(33333), INT64_C(120000)) }, + { &controls::FrameDurationLimits, + ControlInfo(INT64_C(33333), INT64_C(120000), + static_cast<int64_t>(defaultMinFrameDuration.get<std::micro>())) }, { &controls::draft::NoiseReductionMode, ControlInfo(controls::draft::NoiseReductionModeValues) }, { &controls::rpi::StatsOutputEnable, ControlInfo(false, true, false) }, }; @@ -81,6 +93,7 @@ const ControlInfoMap::Map ipaColourControls{ { &controls::AwbEnable, ControlInfo(false, true) }, { &controls::AwbMode, ControlInfo(controls::AwbModeValues) }, { &controls::ColourGains, ControlInfo(0.0f, 32.0f) }, + { &controls::ColourTemperature, ControlInfo(100, 100000) }, { &controls::Saturation, ControlInfo(0.0f, 32.0f, 1.0f) }, }; @@ -96,6 +109,13 @@ const ControlInfoMap::Map ipaAfControls{ { &controls::LensPosition, ControlInfo(0.0f, 32.0f, 1.0f) } }; +/* Platform specific controls */ +const std::map<const std::string, ControlInfoMap::Map> platformControls { + { "pisp", { + { &controls::rpi::ScalerCrops, ControlInfo(Rectangle{}, Rectangle(65535, 65535, 65535, 65535), Rectangle{}) } + } }, +}; + } /* namespace */ LOG_DEFINE_CATEGORY(IPARPI) @@ -127,18 +147,8 @@ int32_t IpaBase::init(const IPASettings &settings, const InitParams ¶ms, Ini return -EINVAL; } - /* - * Pass out the sensor config to the pipeline handler in order - * to setup the staggered writer class. - */ - int gainDelay, exposureDelay, vblankDelay, hblankDelay, sensorMetadata; - helper_->getDelays(exposureDelay, gainDelay, vblankDelay, hblankDelay); - sensorMetadata = helper_->sensorEmbeddedDataPresent(); - - result->sensorConfig.gainDelay = gainDelay; - result->sensorConfig.exposureDelay = exposureDelay; - result->sensorConfig.vblankDelay = vblankDelay; - result->sensorConfig.hblankDelay = hblankDelay; + /* Pass out the sensor metadata to the pipeline handler */ + int sensorMetadata = helper_->sensorEmbeddedDataPresent(); result->sensorConfig.sensorMetadata = sensorMetadata; /* Load the tuning file for this sensor. */ @@ -153,12 +163,17 @@ int32_t IpaBase::init(const IPASettings &settings, const InitParams ¶ms, Ini lensPresent_ = params.lensPresent; controller_.initialise(); + helper_->setHwConfig(controller_.getHardwareConfig()); /* Return the controls handled by the IPA */ ControlInfoMap::Map ctrlMap = ipaControls; if (lensPresent_) ctrlMap.merge(ControlInfoMap::Map(ipaAfControls)); + auto platformCtrlsIt = platformControls.find(controller_.getTarget()); + if (platformCtrlsIt != platformControls.end()) + ctrlMap.merge(ControlInfoMap::Map(platformCtrlsIt->second)); + monoSensor_ = params.sensorInfo.cfaPattern == properties::draft::ColorFilterArrangementEnum::MONO; if (!monoSensor_) ctrlMap.merge(ControlInfoMap::Map(ipaColourControls)); @@ -213,7 +228,7 @@ int32_t IpaBase::configure(const IPACameraSensorInfo &sensorInfo, const ConfigPa /* Supply initial values for gain and exposure. */ AgcStatus agcStatus; - agcStatus.shutterTime = defaultExposureTime; + agcStatus.exposureTime = defaultExposureTime; agcStatus.analogueGain = defaultAnalogueGain; applyAGC(&agcStatus, ctrls); @@ -247,15 +262,18 @@ int32_t IpaBase::configure(const IPACameraSensorInfo &sensorInfo, const ConfigPa ControlInfoMap::Map ctrlMap = ipaControls; ctrlMap[&controls::FrameDurationLimits] = ControlInfo(static_cast<int64_t>(mode_.minFrameDuration.get<std::micro>()), - static_cast<int64_t>(mode_.maxFrameDuration.get<std::micro>())); + static_cast<int64_t>(mode_.maxFrameDuration.get<std::micro>()), + static_cast<int64_t>(defaultMinFrameDuration.get<std::micro>())); ctrlMap[&controls::AnalogueGain] = ControlInfo(static_cast<float>(mode_.minAnalogueGain), - static_cast<float>(mode_.maxAnalogueGain)); + static_cast<float>(mode_.maxAnalogueGain), + static_cast<float>(defaultAnalogueGain)); ctrlMap[&controls::ExposureTime] = - ControlInfo(static_cast<int32_t>(mode_.minShutter.get<std::micro>()), - static_cast<int32_t>(mode_.maxShutter.get<std::micro>())); + ControlInfo(static_cast<int32_t>(mode_.minExposureTime.get<std::micro>()), + static_cast<int32_t>(mode_.maxExposureTime.get<std::micro>()), + static_cast<int32_t>(defaultExposureTime.get<std::micro>())); /* Declare colour processing related controls for non-mono sensors. */ if (!monoSensor_) @@ -288,11 +306,11 @@ void IpaBase::start(const ControlList &controls, StartResult *result) /* SwitchMode may supply updated exposure/gain values to use. */ AgcStatus agcStatus; - agcStatus.shutterTime = 0.0s; + agcStatus.exposureTime = 0.0s; agcStatus.analogueGain = 0.0; metadata.get("agc.status", agcStatus); - if (agcStatus.shutterTime && agcStatus.analogueGain) { + if (agcStatus.exposureTime && agcStatus.analogueGain) { ControlList ctrls(sensorCtrls_); applyAGC(&agcStatus, ctrls); result->controls = std::move(ctrls); @@ -588,7 +606,7 @@ void IpaBase::setMode(const IPACameraSensorInfo &sensorInfo) mode_.sensitivity = helper_->getModeSensitivity(mode_); const ControlInfo &gainCtrl = sensorCtrls_.at(V4L2_CID_ANALOGUE_GAIN); - const ControlInfo &shutterCtrl = sensorCtrls_.at(V4L2_CID_EXPOSURE); + const ControlInfo &exposureTimeCtrl = sensorCtrls_.at(V4L2_CID_EXPOSURE); mode_.minAnalogueGain = helper_->gain(gainCtrl.min().get<int32_t>()); mode_.maxAnalogueGain = helper_->gain(gainCtrl.max().get<int32_t>()); @@ -599,11 +617,15 @@ void IpaBase::setMode(const IPACameraSensorInfo &sensorInfo) */ helper_->setCameraMode(mode_); - /* Shutter speed is calculated based on the limits of the frame durations. */ - mode_.minShutter = helper_->exposure(shutterCtrl.min().get<int32_t>(), mode_.minLineLength); - mode_.maxShutter = Duration::max(); - helper_->getBlanking(mode_.maxShutter, - mode_.minFrameDuration, mode_.maxFrameDuration); + /* + * Exposure time is calculated based on the limits of the frame + * durations. + */ + mode_.minExposureTime = helper_->exposure(exposureTimeCtrl.min().get<int32_t>(), + mode_.minLineLength); + mode_.maxExposureTime = Duration::max(); + helper_->getBlanking(mode_.maxExposureTime, mode_.minFrameDuration, + mode_.maxFrameDuration); } void IpaBase::setCameraTimeoutValue() @@ -742,6 +764,42 @@ void IpaBase::applyControls(const ControlList &controls) af->setMode(mode->second); } + /* + * Because some AE controls are mode-specific, handle the AE-related + * mode changes first. + */ + const auto analogueGainMode = controls.get(controls::AnalogueGainMode); + const auto exposureTimeMode = controls.get(controls::ExposureTimeMode); + + if (analogueGainMode || exposureTimeMode) { + RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>( + controller_.getAlgorithm("agc")); + if (agc) { + if (analogueGainMode) { + if (*analogueGainMode == controls::AnalogueGainModeManual) + agc->disableAutoGain(); + else + agc->enableAutoGain(); + + libcameraMetadata_.set(controls::AnalogueGainMode, + *analogueGainMode); + } + + if (exposureTimeMode) { + if (*exposureTimeMode == controls::ExposureTimeModeManual) + agc->disableAutoExposure(); + else + agc->enableAutoExposure(); + + libcameraMetadata_.set(controls::ExposureTimeMode, + *exposureTimeMode); + } + } else { + LOG(IPARPI, Warning) + << "Could not set AnalogueGainMode or ExposureTimeMode - no AGC algorithm"; + } + } + /* Iterate over controls */ for (auto const &ctrl : controls) { LOG(IPARPI, Debug) << "Request ctrl: " @@ -749,23 +807,8 @@ void IpaBase::applyControls(const ControlList &controls) << " = " << ctrl.second.toString(); switch (ctrl.first) { - case controls::AE_ENABLE: { - RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>( - controller_.getAlgorithm("agc")); - if (!agc) { - LOG(IPARPI, Warning) - << "Could not set AE_ENABLE - no AGC algorithm"; - break; - } - - if (ctrl.second.get<bool>() == false) - agc->disableAuto(); - else - agc->enableAuto(); - - libcameraMetadata_.set(controls::AeEnable, ctrl.second.get<bool>()); - break; - } + case controls::EXPOSURE_TIME_MODE: + break; /* We already handled this one above */ case controls::EXPOSURE_TIME: { RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>( @@ -776,13 +819,23 @@ void IpaBase::applyControls(const ControlList &controls) break; } + /* + * Ignore manual exposure time when the auto exposure + * algorithm is running. + */ + if (agc->autoExposureEnabled()) + break; + /* The control provides units of microseconds. */ - agc->setFixedShutter(0, ctrl.second.get<int32_t>() * 1.0us); + agc->setFixedExposureTime(0, ctrl.second.get<int32_t>() * 1.0us); libcameraMetadata_.set(controls::ExposureTime, ctrl.second.get<int32_t>()); break; } + case controls::ANALOGUE_GAIN_MODE: + break; /* We already handled this one above */ + case controls::ANALOGUE_GAIN: { RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>( controller_.getAlgorithm("agc")); @@ -792,6 +845,13 @@ void IpaBase::applyControls(const ControlList &controls) break; } + /* + * Ignore manual analogue gain value when the auto gain + * algorithm is running. + */ + if (agc->autoGainEnabled()) + break; + agc->setFixedAnalogueGain(0, ctrl.second.get<float>()); libcameraMetadata_.set(controls::AnalogueGain, @@ -848,6 +908,13 @@ void IpaBase::applyControls(const ControlList &controls) break; } + /* + * Ignore AE_EXPOSURE_MODE if the shutter or the gain + * are in auto mode. + */ + if (agc->autoExposureEnabled() || agc->autoGainEnabled()) + break; + int32_t idx = ctrl.second.get<int32_t>(); if (ExposureModeTable.count(idx)) { agc->setExposureMode(ExposureModeTable.at(idx)); @@ -1006,6 +1073,25 @@ void IpaBase::applyControls(const ControlList &controls) break; } + case controls::COLOUR_TEMPERATURE: { + /* Silently ignore this control for a mono sensor. */ + if (monoSensor_) + break; + + auto temperatureK = ctrl.second.get<int32_t>(); + RPiController::AwbAlgorithm *awb = dynamic_cast<RPiController::AwbAlgorithm *>( + controller_.getAlgorithm("awb")); + if (!awb) { + LOG(IPARPI, Warning) + << "Could not set COLOUR_TEMPERATURE - no AWB algorithm"; + break; + } + + awb->setColourTemperature(temperatureK); + /* This metadata will get reported back automatically. */ + break; + } + case controls::BRIGHTNESS: { RPiController::ContrastAlgorithm *contrast = dynamic_cast<RPiController::ContrastAlgorithm *>( controller_.getAlgorithm("contrast")); @@ -1070,6 +1156,7 @@ void IpaBase::applyControls(const ControlList &controls) break; } + case controls::rpi::SCALER_CROPS: case controls::SCALER_CROP: { /* We do nothing with this, but should avoid the warning below. */ break; @@ -1269,7 +1356,7 @@ void IpaBase::fillDeviceStatus(const ControlList &sensorControls, unsigned int i int32_t hblank = sensorControls.get(V4L2_CID_HBLANK).get<int32_t>(); deviceStatus.lineLength = helper_->hblankToLineLength(hblank); - deviceStatus.shutterSpeed = helper_->exposure(exposureLines, deviceStatus.lineLength); + deviceStatus.exposureTime = helper_->exposure(exposureLines, deviceStatus.lineLength); deviceStatus.analogueGain = helper_->gain(gainCode); deviceStatus.frameLength = mode_.height + vblank; @@ -1296,7 +1383,7 @@ void IpaBase::reportMetadata(unsigned int ipaContext) DeviceStatus *deviceStatus = rpiMetadata.getLocked<DeviceStatus>("device.status"); if (deviceStatus) { libcameraMetadata_.set(controls::ExposureTime, - deviceStatus->shutterSpeed.get<std::micro>()); + deviceStatus->exposureTime.get<std::micro>()); libcameraMetadata_.set(controls::AnalogueGain, deviceStatus->analogueGain); libcameraMetadata_.set(controls::FrameDuration, helper_->exposure(deviceStatus->frameLength, deviceStatus->lineLength).get<std::micro>()); @@ -1307,9 +1394,19 @@ void IpaBase::reportMetadata(unsigned int ipaContext) } AgcPrepareStatus *agcPrepareStatus = rpiMetadata.getLocked<AgcPrepareStatus>("agc.prepare_status"); - if (agcPrepareStatus) { - libcameraMetadata_.set(controls::AeLocked, agcPrepareStatus->locked); + if (agcPrepareStatus) libcameraMetadata_.set(controls::DigitalGain, agcPrepareStatus->digitalGain); + + RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>( + controller_.getAlgorithm("agc")); + if (agc) { + if (!agc->autoExposureEnabled() && !agc->autoGainEnabled()) + libcameraMetadata_.set(controls::AeState, controls::AeStateIdle); + else if (agcPrepareStatus) + libcameraMetadata_.set(controls::AeState, + agcPrepareStatus->locked + ? controls::AeStateConverged + : controls::AeStateSearching); } LuxStatus *luxStatus = rpiMetadata.getLocked<LuxStatus>("lux.status"); @@ -1447,15 +1544,15 @@ void IpaBase::applyFrameDurations(Duration minFrameDuration, Duration maxFrameDu /* * Calculate the maximum exposure time possible for the AGC to use. - * getBlanking() will update maxShutter with the largest exposure + * getBlanking() will update maxExposureTime with the largest exposure * value possible. */ - Duration maxShutter = Duration::max(); - helper_->getBlanking(maxShutter, minFrameDuration_, maxFrameDuration_); + Duration maxExposureTime = Duration::max(); + helper_->getBlanking(maxExposureTime, minFrameDuration_, maxFrameDuration_); RPiController::AgcAlgorithm *agc = dynamic_cast<RPiController::AgcAlgorithm *>( controller_.getAlgorithm("agc")); - agc->setMaxShutter(maxShutter); + agc->setMaxExposureTime(maxExposureTime); } void IpaBase::applyAGC(const struct AgcStatus *agcStatus, ControlList &ctrls) @@ -1472,14 +1569,14 @@ void IpaBase::applyAGC(const struct AgcStatus *agcStatus, ControlList &ctrls) gainCode = std::clamp<int32_t>(gainCode, minGainCode, maxGainCode); /* getBlanking might clip exposure time to the fps limits. */ - Duration exposure = agcStatus->shutterTime; + Duration exposure = agcStatus->exposureTime; auto [vblank, hblank] = helper_->getBlanking(exposure, minFrameDuration_, maxFrameDuration_); int32_t exposureLines = helper_->exposureLines(exposure, helper_->hblankToLineLength(hblank)); LOG(IPARPI, Debug) << "Applying AGC Exposure: " << exposure - << " (Shutter lines: " << exposureLines << ", AGC requested " - << agcStatus->shutterTime << ") Gain: " + << " (Exposure lines: " << exposureLines << ", AGC requested " + << agcStatus->exposureTime << ") Gain: " << agcStatus->analogueGain << " (Gain Code: " << gainCode << ")"; diff --git a/src/ipa/rpi/controller/agc_algorithm.h b/src/ipa/rpi/controller/agc_algorithm.h index 1132de7e..fdaa10e6 100644 --- a/src/ipa/rpi/controller/agc_algorithm.h +++ b/src/ipa/rpi/controller/agc_algorithm.h @@ -23,15 +23,19 @@ public: virtual std::vector<double> const &getWeights() const = 0; virtual void setEv(unsigned int channel, double ev) = 0; virtual void setFlickerPeriod(libcamera::utils::Duration flickerPeriod) = 0; - virtual void setFixedShutter(unsigned int channel, - libcamera::utils::Duration fixedShutter) = 0; - virtual void setMaxShutter(libcamera::utils::Duration maxShutter) = 0; + virtual void setFixedExposureTime(unsigned int channel, + libcamera::utils::Duration fixedExposureTime) = 0; + virtual void setMaxExposureTime(libcamera::utils::Duration maxExposureTime) = 0; virtual void setFixedAnalogueGain(unsigned int channel, double fixedAnalogueGain) = 0; virtual void setMeteringMode(std::string const &meteringModeName) = 0; virtual void setExposureMode(std::string const &exposureModeName) = 0; virtual void setConstraintMode(std::string const &contraintModeName) = 0; - virtual void enableAuto() = 0; - virtual void disableAuto() = 0; + virtual void enableAutoExposure() = 0; + virtual void disableAutoExposure() = 0; + virtual bool autoExposureEnabled() const = 0; + virtual void enableAutoGain() = 0; + virtual void disableAutoGain() = 0; + virtual bool autoGainEnabled() const = 0; virtual void setActiveChannels(const std::vector<unsigned int> &activeChannels) = 0; }; diff --git a/src/ipa/rpi/controller/agc_status.h b/src/ipa/rpi/controller/agc_status.h index c7c87b83..9308b156 100644 --- a/src/ipa/rpi/controller/agc_status.h +++ b/src/ipa/rpi/controller/agc_status.h @@ -28,7 +28,7 @@ struct AgcStatus { libcamera::utils::Duration totalExposureValue; /* value for all exposure and gain for this image */ libcamera::utils::Duration targetExposureValue; /* (unfiltered) target total exposure AGC is aiming for */ - libcamera::utils::Duration shutterTime; + libcamera::utils::Duration exposureTime; double analogueGain; std::string exposureMode; std::string constraintMode; @@ -36,7 +36,7 @@ struct AgcStatus { double ev; libcamera::utils::Duration flickerPeriod; int floatingRegionEnable; - libcamera::utils::Duration fixedShutter; + libcamera::utils::Duration fixedExposureTime; double fixedAnalogueGain; unsigned int channel; HdrStatus hdr; diff --git a/src/ipa/rpi/controller/awb_algorithm.h b/src/ipa/rpi/controller/awb_algorithm.h index 1779b050..d941ed4e 100644 --- a/src/ipa/rpi/controller/awb_algorithm.h +++ b/src/ipa/rpi/controller/awb_algorithm.h @@ -19,6 +19,7 @@ public: virtual void initialValues(double &gainR, double &gainB) = 0; virtual void setMode(std::string const &modeName) = 0; virtual void setManualGains(double manualR, double manualB) = 0; + virtual void setColourTemperature(double temperatureK) = 0; virtual void enableAuto() = 0; virtual void disableAuto() = 0; }; diff --git a/src/ipa/rpi/controller/camera_mode.h b/src/ipa/rpi/controller/camera_mode.h index 4fdb5b85..61162b32 100644 --- a/src/ipa/rpi/controller/camera_mode.h +++ b/src/ipa/rpi/controller/camera_mode.h @@ -50,9 +50,9 @@ struct CameraMode { double sensitivity; /* pixel clock rate */ uint64_t pixelRate; - /* Mode specific shutter speed limits */ - libcamera::utils::Duration minShutter; - libcamera::utils::Duration maxShutter; + /* Mode specific exposure time limits */ + libcamera::utils::Duration minExposureTime; + libcamera::utils::Duration maxExposureTime; /* Mode specific analogue gain limits */ double minAnalogueGain; double maxAnalogueGain; diff --git a/src/ipa/rpi/controller/controller.cpp b/src/ipa/rpi/controller/controller.cpp index e0131018..651fff63 100644 --- a/src/ipa/rpi/controller/controller.cpp +++ b/src/ipa/rpi/controller/controller.cpp @@ -39,6 +39,7 @@ static const std::map<std::string, Controller::HardwareConfig> HardwareConfigMap .pipelineWidth = 13, .statsInline = false, .minPixelProcessingTime = 0s, + .dataBufferStrided = true, } }, { @@ -71,6 +72,7 @@ static const std::map<std::string, Controller::HardwareConfig> HardwareConfigMap * frames wider than ~16,000 pixels. */ .minPixelProcessingTime = 1.0us / 380, + .dataBufferStrided = false, } }, }; diff --git a/src/ipa/rpi/controller/controller.h b/src/ipa/rpi/controller/controller.h index eff520bd..fdb46557 100644 --- a/src/ipa/rpi/controller/controller.h +++ b/src/ipa/rpi/controller/controller.h @@ -49,6 +49,7 @@ public: unsigned int pipelineWidth; bool statsInline; libcamera::utils::Duration minPixelProcessingTime; + bool dataBufferStrided; }; Controller(); diff --git a/src/ipa/rpi/controller/device_status.cpp b/src/ipa/rpi/controller/device_status.cpp index 68100137..1695764d 100644 --- a/src/ipa/rpi/controller/device_status.cpp +++ b/src/ipa/rpi/controller/device_status.cpp @@ -10,7 +10,7 @@ using namespace libcamera; /* for the Duration operator<< overload */ std::ostream &operator<<(std::ostream &out, const DeviceStatus &d) { - out << "Exposure: " << d.shutterSpeed + out << "Exposure time: " << d.exposureTime << " Frame length: " << d.frameLength << " Line length: " << d.lineLength << " Gain: " << d.analogueGain; diff --git a/src/ipa/rpi/controller/device_status.h b/src/ipa/rpi/controller/device_status.h index 518f15b5..b1792035 100644 --- a/src/ipa/rpi/controller/device_status.h +++ b/src/ipa/rpi/controller/device_status.h @@ -12,21 +12,21 @@ #include <libcamera/base/utils.h> /* - * Definition of "device metadata" which stores things like shutter time and + * Definition of "device metadata" which stores things like exposure time and * analogue gain that downstream control algorithms will want to know. */ struct DeviceStatus { DeviceStatus() - : shutterSpeed(std::chrono::seconds(0)), frameLength(0), + : exposureTime(std::chrono::seconds(0)), frameLength(0), lineLength(std::chrono::seconds(0)), analogueGain(0.0) { } friend std::ostream &operator<<(std::ostream &out, const DeviceStatus &d); - /* time shutter is open */ - libcamera::utils::Duration shutterSpeed; + /* time the image is exposed */ + libcamera::utils::Duration exposureTime; /* frame length given in number of lines */ uint32_t frameLength; /* line length for the current frame */ diff --git a/src/ipa/rpi/controller/histogram.cpp b/src/ipa/rpi/controller/histogram.cpp index ba5b25dd..13089839 100644 --- a/src/ipa/rpi/controller/histogram.cpp +++ b/src/ipa/rpi/controller/histogram.cpp @@ -4,7 +4,7 @@ * * histogram calculations */ -#include <math.h> +#include <cmath> #include <stdio.h> #include "histogram.h" @@ -49,9 +49,9 @@ double Histogram::interBinMean(double binLo, double binHi) const { assert(binHi >= binLo); double sumBinFreq = 0, cumulFreq = 0; - for (double binNext = floor(binLo) + 1.0; binNext <= ceil(binHi); + for (double binNext = std::floor(binLo) + 1.0; binNext <= std::ceil(binHi); binLo = binNext, binNext += 1.0) { - int bin = floor(binLo); + int bin = std::floor(binLo); double freq = (cumulative_[bin + 1] - cumulative_[bin]) * (std::min(binNext, binHi) - binLo); sumBinFreq += bin * freq; diff --git a/src/ipa/rpi/controller/metadata.h b/src/ipa/rpi/controller/metadata.h index b4650d25..77d3b074 100644 --- a/src/ipa/rpi/controller/metadata.h +++ b/src/ipa/rpi/controller/metadata.h @@ -12,6 +12,7 @@ #include <map> #include <mutex> #include <string> +#include <utility> #include <libcamera/base/thread_annotations.h> @@ -36,10 +37,10 @@ public: } template<typename T> - void set(std::string const &tag, T const &value) + void set(std::string const &tag, T &&value) { std::scoped_lock lock(mutex_); - data_[tag] = value; + data_[tag] = std::forward<T>(value); } template<typename T> @@ -90,6 +91,12 @@ public: data_.insert(other.data_.begin(), other.data_.end()); } + void erase(std::string const &tag) + { + std::scoped_lock lock(mutex_); + eraseLocked(tag); + } + template<typename T> T *getLocked(std::string const &tag) { @@ -104,10 +111,18 @@ public: } template<typename T> - void setLocked(std::string const &tag, T const &value) + void setLocked(std::string const &tag, T &&value) { /* Use this only if you're holding the lock yourself. */ - data_[tag] = value; + data_[tag] = std::forward<T>(value); + } + + void eraseLocked(std::string const &tag) + { + auto it = data_.find(tag); + if (it == data_.end()) + return; + data_.erase(it); } /* diff --git a/src/ipa/rpi/controller/rpi/af.cpp b/src/ipa/rpi/controller/rpi/af.cpp index 5ca76dd9..2157eb94 100644 --- a/src/ipa/rpi/controller/rpi/af.cpp +++ b/src/ipa/rpi/controller/rpi/af.cpp @@ -7,8 +7,8 @@ #include "af.h" +#include <cmath> #include <iomanip> -#include <math.h> #include <stdlib.h> #include <libcamera/base/log.h> diff --git a/src/ipa/rpi/controller/rpi/agc.cpp b/src/ipa/rpi/controller/rpi/agc.cpp index fcf7aec9..02bfdb4a 100644 --- a/src/ipa/rpi/controller/rpi/agc.cpp +++ b/src/ipa/rpi/controller/rpi/agc.cpp @@ -74,22 +74,62 @@ int Agc::checkChannel(unsigned int channelIndex) const return 0; } -void Agc::disableAuto() +void Agc::disableAutoExposure() { - LOG(RPiAgc, Debug) << "disableAuto"; + LOG(RPiAgc, Debug) << "disableAutoExposure"; /* All channels are enabled/disabled together. */ for (auto &data : channelData_) - data.channel.disableAuto(); + data.channel.disableAutoExposure(); } -void Agc::enableAuto() +void Agc::enableAutoExposure() { - LOG(RPiAgc, Debug) << "enableAuto"; + LOG(RPiAgc, Debug) << "enableAutoExposure"; /* All channels are enabled/disabled together. */ for (auto &data : channelData_) - data.channel.enableAuto(); + data.channel.enableAutoExposure(); +} + +bool Agc::autoExposureEnabled() const +{ + LOG(RPiAgc, Debug) << "autoExposureEnabled"; + + /* + * We always have at least one channel, and since all channels are + * enabled and disabled together we can simply check the first one. + */ + return channelData_[0].channel.autoExposureEnabled(); +} + +void Agc::disableAutoGain() +{ + LOG(RPiAgc, Debug) << "disableAutoGain"; + + /* All channels are enabled/disabled together. */ + for (auto &data : channelData_) + data.channel.disableAutoGain(); +} + +void Agc::enableAutoGain() +{ + LOG(RPiAgc, Debug) << "enableAutoGain"; + + /* All channels are enabled/disabled together. */ + for (auto &data : channelData_) + data.channel.enableAutoGain(); +} + +bool Agc::autoGainEnabled() const +{ + LOG(RPiAgc, Debug) << "autoGainEnabled"; + + /* + * We always have at least one channel, and since all channels are + * enabled and disabled together we can simply check the first one. + */ + return channelData_[0].channel.autoGainEnabled(); } unsigned int Agc::getConvergenceFrames() const @@ -127,21 +167,21 @@ void Agc::setFlickerPeriod(Duration flickerPeriod) data.channel.setFlickerPeriod(flickerPeriod); } -void Agc::setMaxShutter(Duration maxShutter) +void Agc::setMaxExposureTime(Duration maxExposureTime) { /* Frame durations will be the same across all channels too. */ for (auto &data : channelData_) - data.channel.setMaxShutter(maxShutter); + data.channel.setMaxExposureTime(maxExposureTime); } -void Agc::setFixedShutter(unsigned int channelIndex, Duration fixedShutter) +void Agc::setFixedExposureTime(unsigned int channelIndex, Duration fixedExposureTime) { if (checkChannel(channelIndex)) return; - LOG(RPiAgc, Debug) << "setFixedShutter " << fixedShutter + LOG(RPiAgc, Debug) << "setFixedExposureTime " << fixedExposureTime << " for channel " << channelIndex; - channelData_[channelIndex].channel.setFixedShutter(fixedShutter); + channelData_[channelIndex].channel.setFixedExposureTime(fixedExposureTime); } void Agc::setFixedAnalogueGain(unsigned int channelIndex, double fixedAnalogueGain) diff --git a/src/ipa/rpi/controller/rpi/agc.h b/src/ipa/rpi/controller/rpi/agc.h index 5d056f02..c3a940bf 100644 --- a/src/ipa/rpi/controller/rpi/agc.h +++ b/src/ipa/rpi/controller/rpi/agc.h @@ -32,16 +32,20 @@ public: std::vector<double> const &getWeights() const override; void setEv(unsigned int channel, double ev) override; void setFlickerPeriod(libcamera::utils::Duration flickerPeriod) override; - void setMaxShutter(libcamera::utils::Duration maxShutter) override; - void setFixedShutter(unsigned int channelIndex, - libcamera::utils::Duration fixedShutter) override; + void setMaxExposureTime(libcamera::utils::Duration maxExposureTime) override; + void setFixedExposureTime(unsigned int channelIndex, + libcamera::utils::Duration fixedExposureTime) override; void setFixedAnalogueGain(unsigned int channelIndex, double fixedAnalogueGain) override; void setMeteringMode(std::string const &meteringModeName) override; void setExposureMode(std::string const &exposureModeName) override; void setConstraintMode(std::string const &contraintModeName) override; - void enableAuto() override; - void disableAuto() override; + void enableAutoExposure() override; + void disableAutoExposure() override; + bool autoExposureEnabled() const override; + void enableAutoGain() override; + void disableAutoGain() override; + bool autoGainEnabled() const override; void switchMode(CameraMode const &cameraMode, Metadata *metadata) override; void prepare(Metadata *imageMetadata) override; void process(StatisticsPtr &stats, Metadata *imageMetadata) override; diff --git a/src/ipa/rpi/controller/rpi/agc_channel.cpp b/src/ipa/rpi/controller/rpi/agc_channel.cpp index cf2565a8..a5562760 100644 --- a/src/ipa/rpi/controller/rpi/agc_channel.cpp +++ b/src/ipa/rpi/controller/rpi/agc_channel.cpp @@ -12,6 +12,10 @@ #include <libcamera/base/log.h> +#include "libcamera/internal/vector.h" + +#include "libipa/colours.h" + #include "../awb_status.h" #include "../device_status.h" #include "../histogram.h" @@ -65,7 +69,7 @@ int AgcExposureMode::read(const libcamera::YamlObject ¶ms) auto value = params["shutter"].getList<double>(); if (!value) return -EINVAL; - std::transform(value->begin(), value->end(), std::back_inserter(shutter), + std::transform(value->begin(), value->end(), std::back_inserter(exposureTime), [](double v) { return v * 1us; }); value = params["gain"].getList<double>(); @@ -73,13 +77,13 @@ int AgcExposureMode::read(const libcamera::YamlObject ¶ms) return -EINVAL; gain = std::move(*value); - if (shutter.size() < 2 || gain.size() < 2) { + if (exposureTime.size() < 2 || gain.size() < 2) { LOG(RPiAgc, Error) << "AgcExposureMode: must have at least two entries in exposure profile"; return -EINVAL; } - if (shutter.size() != gain.size()) { + if (exposureTime.size() != gain.size()) { LOG(RPiAgc, Error) << "AgcExposureMode: expect same number of exposure and gain entries in exposure profile"; return -EINVAL; @@ -260,7 +264,7 @@ int AgcConfig::read(const libcamera::YamlObject ¶ms) } AgcChannel::ExposureValues::ExposureValues() - : shutter(0s), analogueGain(0), + : exposureTime(0s), analogueGain(0), totalExposure(0s), totalExposureNoDG(0s) { } @@ -269,7 +273,7 @@ AgcChannel::AgcChannel() : meteringMode_(nullptr), exposureMode_(nullptr), constraintMode_(nullptr), frameCount_(0), lockCount_(0), lastTargetExposure_(0s), ev_(1.0), flickerPeriod_(0s), - maxShutter_(0s), fixedShutter_(0s), fixedAnalogueGain_(0.0) + maxExposureTime_(0s), fixedExposureTime_(0s), fixedAnalogueGain_(0.0) { /* Set AWB default values in case early frames have no updates in metadata. */ awb_.gainR = 1.0; @@ -310,31 +314,49 @@ int AgcChannel::read(const libcamera::YamlObject ¶ms, exposureMode_ = &config_.exposureModes[exposureModeName_]; constraintModeName_ = config_.defaultConstraintMode; constraintMode_ = &config_.constraintModes[constraintModeName_]; - /* Set up the "last shutter/gain" values, in case AGC starts "disabled". */ - status_.shutterTime = config_.defaultExposureTime; + /* Set up the "last exposure time/gain" values, in case AGC starts "disabled". */ + status_.exposureTime = config_.defaultExposureTime; status_.analogueGain = config_.defaultAnalogueGain; return 0; } -void AgcChannel::disableAuto() +void AgcChannel::disableAutoExposure() +{ + fixedExposureTime_ = status_.exposureTime; +} + +void AgcChannel::enableAutoExposure() +{ + fixedExposureTime_ = 0s; +} + +bool AgcChannel::autoExposureEnabled() const +{ + return fixedExposureTime_ == 0s; +} + +void AgcChannel::disableAutoGain() { - fixedShutter_ = status_.shutterTime; fixedAnalogueGain_ = status_.analogueGain; } -void AgcChannel::enableAuto() +void AgcChannel::enableAutoGain() { - fixedShutter_ = 0s; fixedAnalogueGain_ = 0; } +bool AgcChannel::autoGainEnabled() const +{ + return fixedAnalogueGain_ == 0; +} + unsigned int AgcChannel::getConvergenceFrames() const { /* - * If shutter and gain have been explicitly set, there is no + * If exposure time and gain have been explicitly set, there is no * convergence to happen, so no need to drop any frames - return zero. */ - if (fixedShutter_ && fixedAnalogueGain_) + if (fixedExposureTime_ && fixedAnalogueGain_) return 0; else return config_.convergenceFrames; @@ -362,16 +384,16 @@ void AgcChannel::setFlickerPeriod(Duration flickerPeriod) flickerPeriod_ = flickerPeriod; } -void AgcChannel::setMaxShutter(Duration maxShutter) +void AgcChannel::setMaxExposureTime(Duration maxExposureTime) { - maxShutter_ = maxShutter; + maxExposureTime_ = maxExposureTime; } -void AgcChannel::setFixedShutter(Duration fixedShutter) +void AgcChannel::setFixedExposureTime(Duration fixedExposureTime) { - fixedShutter_ = fixedShutter; + fixedExposureTime_ = fixedExposureTime; /* Set this in case someone calls disableAuto() straight after. */ - status_.shutterTime = limitShutter(fixedShutter_); + status_.exposureTime = limitExposureTime(fixedExposureTime_); } void AgcChannel::setFixedAnalogueGain(double fixedAnalogueGain) @@ -411,22 +433,22 @@ void AgcChannel::switchMode(CameraMode const &cameraMode, double lastSensitivity = mode_.sensitivity; mode_ = cameraMode; - Duration fixedShutter = limitShutter(fixedShutter_); - if (fixedShutter && fixedAnalogueGain_) { + Duration fixedExposureTime = limitExposureTime(fixedExposureTime_); + if (fixedExposureTime && fixedAnalogueGain_) { /* We're going to reset the algorithm here with these fixed values. */ fetchAwbStatus(metadata); double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 }); ASSERT(minColourGain != 0.0); /* This is the equivalent of computeTargetExposure and applyDigitalGain. */ - target_.totalExposureNoDG = fixedShutter_ * fixedAnalogueGain_; + target_.totalExposureNoDG = fixedExposureTime_ * fixedAnalogueGain_; target_.totalExposure = target_.totalExposureNoDG / minColourGain; /* Equivalent of filterExposure. This resets any "history". */ filtered_ = target_; /* Equivalent of divideUpExposure. */ - filtered_.shutter = fixedShutter; + filtered_.exposureTime = fixedExposureTime; filtered_.analogueGain = fixedAnalogueGain_; } else if (status_.totalExposureValue) { /* @@ -448,14 +470,15 @@ void AgcChannel::switchMode(CameraMode const &cameraMode, divideUpExposure(); } else { /* - * We come through here on startup, when at least one of the shutter - * or gain has not been fixed. We must still write those values out so - * that they will be applied immediately. We supply some arbitrary defaults - * for any that weren't set. + * We come through here on startup, when at least one of the + * exposure time or gain has not been fixed. We must still + * write those values out so that they will be applied + * immediately. We supply some arbitrary defaults for any that + * weren't set. */ /* Equivalent of divideUpExposure. */ - filtered_.shutter = fixedShutter ? fixedShutter : config_.defaultExposureTime; + filtered_.exposureTime = fixedExposureTime ? fixedExposureTime : config_.defaultExposureTime; filtered_.analogueGain = fixedAnalogueGain_ ? fixedAnalogueGain_ : config_.defaultAnalogueGain; } @@ -481,7 +504,7 @@ void AgcChannel::prepare(Metadata *imageMetadata) /* Process has run, so we have meaningful values. */ DeviceStatus deviceStatus; if (imageMetadata->get("device.status", deviceStatus) == 0) { - Duration actualExposure = deviceStatus.shutterSpeed * + Duration actualExposure = deviceStatus.exposureTime * deviceStatus.analogueGain; if (actualExposure) { double digitalGain = totalExposureValue / actualExposure; @@ -535,7 +558,7 @@ void AgcChannel::process(StatisticsPtr &stats, DeviceStatus const &deviceStatus, */ bool desaturate = applyDigitalGain(gain, targetY, channelBound); /* - * The last thing is to divide up the exposure value into a shutter time + * The last thing is to divide up the exposure value into a exposure time * and analogue gain, according to the current exposure mode. */ divideUpExposure(); @@ -551,7 +574,7 @@ bool AgcChannel::updateLockStatus(DeviceStatus const &deviceStatus) const double resetMargin = 1.5; /* Add 200us to the exposure time error to allow for line quantisation. */ - Duration exposureError = lastDeviceStatus_.shutterSpeed * errorFactor + 200us; + Duration exposureError = lastDeviceStatus_.exposureTime * errorFactor + 200us; double gainError = lastDeviceStatus_.analogueGain * errorFactor; Duration targetError = lastTargetExposure_ * errorFactor; @@ -560,15 +583,15 @@ bool AgcChannel::updateLockStatus(DeviceStatus const &deviceStatus) * the values we keep requesting may be unachievable. For this reason * we only insist that we're close to values in the past few frames. */ - if (deviceStatus.shutterSpeed > lastDeviceStatus_.shutterSpeed - exposureError && - deviceStatus.shutterSpeed < lastDeviceStatus_.shutterSpeed + exposureError && + if (deviceStatus.exposureTime > lastDeviceStatus_.exposureTime - exposureError && + deviceStatus.exposureTime < lastDeviceStatus_.exposureTime + exposureError && deviceStatus.analogueGain > lastDeviceStatus_.analogueGain - gainError && deviceStatus.analogueGain < lastDeviceStatus_.analogueGain + gainError && status_.targetExposureValue > lastTargetExposure_ - targetError && status_.targetExposureValue < lastTargetExposure_ + targetError) lockCount_ = std::min(lockCount_ + 1, maxLockCount); - else if (deviceStatus.shutterSpeed < lastDeviceStatus_.shutterSpeed - resetMargin * exposureError || - deviceStatus.shutterSpeed > lastDeviceStatus_.shutterSpeed + resetMargin * exposureError || + else if (deviceStatus.exposureTime < lastDeviceStatus_.exposureTime - resetMargin * exposureError || + deviceStatus.exposureTime > lastDeviceStatus_.exposureTime + resetMargin * exposureError || deviceStatus.analogueGain < lastDeviceStatus_.analogueGain - resetMargin * gainError || deviceStatus.analogueGain > lastDeviceStatus_.analogueGain + resetMargin * gainError || status_.targetExposureValue < lastTargetExposure_ - resetMargin * targetError || @@ -586,11 +609,11 @@ void AgcChannel::housekeepConfig() { /* First fetch all the up-to-date settings, so no one else has to do it. */ status_.ev = ev_; - status_.fixedShutter = limitShutter(fixedShutter_); + status_.fixedExposureTime = limitExposureTime(fixedExposureTime_); status_.fixedAnalogueGain = fixedAnalogueGain_; status_.flickerPeriod = flickerPeriod_; - LOG(RPiAgc, Debug) << "ev " << status_.ev << " fixedShutter " - << status_.fixedShutter << " fixedAnalogueGain " + LOG(RPiAgc, Debug) << "ev " << status_.ev << " fixedExposureTime " + << status_.fixedExposureTime << " fixedAnalogueGain " << status_.fixedAnalogueGain; /* * Make sure the "mode" pointers point to the up-to-date things, if @@ -634,10 +657,10 @@ void AgcChannel::housekeepConfig() void AgcChannel::fetchCurrentExposure(DeviceStatus const &deviceStatus) { - current_.shutter = deviceStatus.shutterSpeed; + current_.exposureTime = deviceStatus.exposureTime; current_.analogueGain = deviceStatus.analogueGain; current_.totalExposure = 0s; /* this value is unused */ - current_.totalExposureNoDG = current_.shutter * current_.analogueGain; + current_.totalExposureNoDG = current_.exposureTime * current_.analogueGain; } void AgcChannel::fetchAwbStatus(Metadata *imageMetadata) @@ -678,12 +701,13 @@ static double computeInitialY(StatisticsPtr &stats, AwbStatus const &awb, * Note that the weights are applied by the IPA to the statistics directly, * before they are given to us here. */ - double rSum = 0, gSum = 0, bSum = 0, pixelSum = 0; + RGB<double> sum{ 0.0 }; + double pixelSum = 0; for (unsigned int i = 0; i < stats->agcRegions.numRegions(); i++) { auto ®ion = stats->agcRegions.get(i); - rSum += std::min<double>(region.val.rSum * gain, (maxVal - 1) * region.counted); - gSum += std::min<double>(region.val.gSum * gain, (maxVal - 1) * region.counted); - bSum += std::min<double>(region.val.bSum * gain, (maxVal - 1) * region.counted); + sum.r() += std::min<double>(region.val.rSum * gain, (maxVal - 1) * region.counted); + sum.g() += std::min<double>(region.val.gSum * gain, (maxVal - 1) * region.counted); + sum.b() += std::min<double>(region.val.bSum * gain, (maxVal - 1) * region.counted); pixelSum += region.counted; } if (pixelSum == 0.0) { @@ -691,14 +715,11 @@ static double computeInitialY(StatisticsPtr &stats, AwbStatus const &awb, return 0; } - double ySum; /* Factor in the AWB correction if needed. */ - if (stats->agcStatsPos == Statistics::AgcStatsPos::PreWb) { - ySum = rSum * awb.gainR * .299 + - gSum * awb.gainG * .587 + - bSum * awb.gainB * .114; - } else - ySum = rSum * .299 + gSum * .587 + bSum * .114; + if (stats->agcStatsPos == Statistics::AgcStatsPos::PreWb) + sum *= RGB<double>{ { awb.gainR, awb.gainR, awb.gainB } }; + + double ySum = ipa::rec601LuminanceFromRGB(sum); return ySum / pixelSum / (1 << 16); } @@ -775,17 +796,17 @@ void AgcChannel::computeGain(StatisticsPtr &statistics, Metadata *imageMetadata, void AgcChannel::computeTargetExposure(double gain) { - if (status_.fixedShutter && status_.fixedAnalogueGain) { + if (status_.fixedExposureTime && status_.fixedAnalogueGain) { /* - * When ag and shutter are both fixed, we need to drive the - * total exposure so that we end up with a digital gain of at least - * 1/minColourGain. Otherwise we'd desaturate channels causing - * white to go cyan or magenta. + * When analogue gain and exposure time are both fixed, we need + * to drive the total exposure so that we end up with a digital + * gain of at least 1/minColourGain. Otherwise we'd desaturate + * channels causing white to go cyan or magenta. */ double minColourGain = std::min({ awb_.gainR, awb_.gainG, awb_.gainB, 1.0 }); ASSERT(minColourGain != 0.0); target_.totalExposure = - status_.fixedShutter * status_.fixedAnalogueGain / minColourGain; + status_.fixedExposureTime * status_.fixedAnalogueGain / minColourGain; } else { /* * The statistics reflect the image without digital gain, so the final @@ -793,12 +814,12 @@ void AgcChannel::computeTargetExposure(double gain) */ target_.totalExposure = current_.totalExposureNoDG * gain; /* The final target exposure is also limited to what the exposure mode allows. */ - Duration maxShutter = status_.fixedShutter - ? status_.fixedShutter - : exposureMode_->shutter.back(); - maxShutter = limitShutter(maxShutter); + Duration maxExposureTime = status_.fixedExposureTime + ? status_.fixedExposureTime + : exposureMode_->exposureTime.back(); + maxExposureTime = limitExposureTime(maxExposureTime); Duration maxTotalExposure = - maxShutter * + maxExposureTime * (status_.fixedAnalogueGain != 0.0 ? status_.fixedAnalogueGain : exposureMode_->gain.back()); @@ -882,12 +903,16 @@ void AgcChannel::filterExposure() double stableRegion = config_.stableRegion; /* - * AGC adapts instantly if both shutter and gain are directly specified - * or we're in the startup phase. + * AGC adapts instantly if both exposure time and gain are directly + * specified or we're in the startup phase. Also disable the stable + * region, because we want to reflect any user exposure/gain updates, + * however small. */ - if ((status_.fixedShutter && status_.fixedAnalogueGain) || - frameCount_ <= config_.startupFrames) + if ((status_.fixedExposureTime && status_.fixedAnalogueGain) || + frameCount_ <= config_.startupFrames) { speed = 1.0; + stableRegion = 0.0; + } if (!filtered_.totalExposure) { filtered_.totalExposure = target_.totalExposure; } else if (filtered_.totalExposure * (1.0 - stableRegion) < target_.totalExposure && @@ -911,34 +936,34 @@ void AgcChannel::filterExposure() void AgcChannel::divideUpExposure() { /* - * Sending the fixed shutter/gain cases through the same code may seem - * unnecessary, but it will make more sense when extend this to cover - * variable aperture. + * Sending the fixed exposure time/gain cases through the same code may + * seem unnecessary, but it will make more sense when extend this to + * cover variable aperture. */ Duration exposureValue = filtered_.totalExposureNoDG; - Duration shutterTime; + Duration exposureTime; double analogueGain; - shutterTime = status_.fixedShutter ? status_.fixedShutter - : exposureMode_->shutter[0]; - shutterTime = limitShutter(shutterTime); + exposureTime = status_.fixedExposureTime ? status_.fixedExposureTime + : exposureMode_->exposureTime[0]; + exposureTime = limitExposureTime(exposureTime); analogueGain = status_.fixedAnalogueGain != 0.0 ? status_.fixedAnalogueGain : exposureMode_->gain[0]; analogueGain = limitGain(analogueGain); - if (shutterTime * analogueGain < exposureValue) { + if (exposureTime * analogueGain < exposureValue) { for (unsigned int stage = 1; stage < exposureMode_->gain.size(); stage++) { - if (!status_.fixedShutter) { - Duration stageShutter = - limitShutter(exposureMode_->shutter[stage]); - if (stageShutter * analogueGain >= exposureValue) { - shutterTime = exposureValue / analogueGain; + if (!status_.fixedExposureTime) { + Duration stageExposureTime = + limitExposureTime(exposureMode_->exposureTime[stage]); + if (stageExposureTime * analogueGain >= exposureValue) { + exposureTime = exposureValue / analogueGain; break; } - shutterTime = stageShutter; + exposureTime = stageExposureTime; } if (status_.fixedAnalogueGain == 0.0) { - if (exposureMode_->gain[stage] * shutterTime >= exposureValue) { - analogueGain = exposureValue / shutterTime; + if (exposureMode_->gain[stage] * exposureTime >= exposureValue) { + analogueGain = exposureValue / exposureTime; break; } analogueGain = exposureMode_->gain[stage]; @@ -946,18 +971,19 @@ void AgcChannel::divideUpExposure() } } } - LOG(RPiAgc, Debug) << "Divided up shutter and gain are " << shutterTime << " and " - << analogueGain; + LOG(RPiAgc, Debug) + << "Divided up exposure time and gain are " << exposureTime + << " and " << analogueGain; /* - * Finally adjust shutter time for flicker avoidance (require both - * shutter and gain not to be fixed). + * Finally adjust exposure time for flicker avoidance (require both + * exposure time and gain not to be fixed). */ - if (!status_.fixedShutter && !status_.fixedAnalogueGain && + if (!status_.fixedExposureTime && !status_.fixedAnalogueGain && status_.flickerPeriod) { - int flickerPeriods = shutterTime / status_.flickerPeriod; + int flickerPeriods = exposureTime / status_.flickerPeriod; if (flickerPeriods) { - Duration newShutterTime = flickerPeriods * status_.flickerPeriod; - analogueGain *= shutterTime / newShutterTime; + Duration newExposureTime = flickerPeriods * status_.flickerPeriod; + analogueGain *= exposureTime / newExposureTime; /* * We should still not allow the ag to go over the * largest value in the exposure mode. Note that this @@ -966,12 +992,12 @@ void AgcChannel::divideUpExposure() */ analogueGain = std::min(analogueGain, exposureMode_->gain.back()); analogueGain = limitGain(analogueGain); - shutterTime = newShutterTime; + exposureTime = newExposureTime; } - LOG(RPiAgc, Debug) << "After flicker avoidance, shutter " - << shutterTime << " gain " << analogueGain; + LOG(RPiAgc, Debug) << "After flicker avoidance, exposure time " + << exposureTime << " gain " << analogueGain; } - filtered_.shutter = shutterTime; + filtered_.exposureTime = exposureTime; filtered_.analogueGain = analogueGain; } @@ -979,7 +1005,7 @@ void AgcChannel::writeAndFinish(Metadata *imageMetadata, bool desaturate) { status_.totalExposureValue = filtered_.totalExposure; status_.targetExposureValue = desaturate ? 0s : target_.totalExposure; - status_.shutterTime = filtered_.shutter; + status_.exposureTime = filtered_.exposureTime; status_.analogueGain = filtered_.analogueGain; /* * Write to metadata as well, in case anyone wants to update the camera @@ -988,32 +1014,32 @@ void AgcChannel::writeAndFinish(Metadata *imageMetadata, bool desaturate) imageMetadata->set("agc.status", status_); LOG(RPiAgc, Debug) << "Output written, total exposure requested is " << filtered_.totalExposure; - LOG(RPiAgc, Debug) << "Camera exposure update: shutter time " << filtered_.shutter + LOG(RPiAgc, Debug) << "Camera exposure update: exposure time " << filtered_.exposureTime << " analogue gain " << filtered_.analogueGain; } -Duration AgcChannel::limitShutter(Duration shutter) +Duration AgcChannel::limitExposureTime(Duration exposureTime) { /* - * shutter == 0 is a special case for fixed shutter values, and must pass - * through unchanged + * exposureTime == 0 is a special case for fixed exposure time values, + * and must pass through unchanged. */ - if (!shutter) - return shutter; + if (!exposureTime) + return exposureTime; - shutter = std::clamp(shutter, mode_.minShutter, maxShutter_); - return shutter; + exposureTime = std::clamp(exposureTime, mode_.minExposureTime, maxExposureTime_); + return exposureTime; } double AgcChannel::limitGain(double gain) const { /* - * Only limit the lower bounds of the gain value to what the sensor limits. - * The upper bound on analogue gain will be made up with additional digital - * gain applied by the ISP. + * Only limit the lower bounds of the gain value to what the sensor + * limits. The upper bound on analogue gain will be made up with + * additional digital gain applied by the ISP. * - * gain == 0.0 is a special case for fixed shutter values, and must pass - * through unchanged + * gain == 0.0 is a special case for fixed exposure time values, and + * must pass through unchanged. */ if (!gain) return gain; diff --git a/src/ipa/rpi/controller/rpi/agc_channel.h b/src/ipa/rpi/controller/rpi/agc_channel.h index 58368889..fa697e6f 100644 --- a/src/ipa/rpi/controller/rpi/agc_channel.h +++ b/src/ipa/rpi/controller/rpi/agc_channel.h @@ -30,7 +30,7 @@ struct AgcMeteringMode { }; struct AgcExposureMode { - std::vector<libcamera::utils::Duration> shutter; + std::vector<libcamera::utils::Duration> exposureTime; std::vector<double> gain; int read(const libcamera::YamlObject ¶ms); }; @@ -90,14 +90,18 @@ public: std::vector<double> const &getWeights() const; void setEv(double ev); void setFlickerPeriod(libcamera::utils::Duration flickerPeriod); - void setMaxShutter(libcamera::utils::Duration maxShutter); - void setFixedShutter(libcamera::utils::Duration fixedShutter); + void setMaxExposureTime(libcamera::utils::Duration maxExposureTime); + void setFixedExposureTime(libcamera::utils::Duration fixedExposureTime); void setFixedAnalogueGain(double fixedAnalogueGain); void setMeteringMode(std::string const &meteringModeName); void setExposureMode(std::string const &exposureModeName); void setConstraintMode(std::string const &contraintModeName); - void enableAuto(); - void disableAuto(); + void enableAutoExposure(); + void disableAutoExposure(); + bool autoExposureEnabled() const; + void enableAutoGain(); + void disableAutoGain(); + bool autoGainEnabled() const; void switchMode(CameraMode const &cameraMode, Metadata *metadata); void prepare(Metadata *imageMetadata); void process(StatisticsPtr &stats, DeviceStatus const &deviceStatus, Metadata *imageMetadata, @@ -117,7 +121,7 @@ private: bool applyDigitalGain(double gain, double targetY, bool channelBound); void divideUpExposure(); void writeAndFinish(Metadata *imageMetadata, bool desaturate); - libcamera::utils::Duration limitShutter(libcamera::utils::Duration shutter); + libcamera::utils::Duration limitExposureTime(libcamera::utils::Duration exposureTime); double limitGain(double gain) const; AgcMeteringMode *meteringMode_; AgcExposureMode *exposureMode_; @@ -128,7 +132,7 @@ private: struct ExposureValues { ExposureValues(); - libcamera::utils::Duration shutter; + libcamera::utils::Duration exposureTime; double analogueGain; libcamera::utils::Duration totalExposure; libcamera::utils::Duration totalExposureNoDG; /* without digital gain */ @@ -146,8 +150,8 @@ private: std::string constraintModeName_; double ev_; libcamera::utils::Duration flickerPeriod_; - libcamera::utils::Duration maxShutter_; - libcamera::utils::Duration fixedShutter_; + libcamera::utils::Duration maxExposureTime_; + libcamera::utils::Duration fixedExposureTime_; double fixedAnalogueGain_; }; diff --git a/src/ipa/rpi/controller/rpi/alsc.cpp b/src/ipa/rpi/controller/rpi/alsc.cpp index 67029fc3..21edb819 100644 --- a/src/ipa/rpi/controller/rpi/alsc.cpp +++ b/src/ipa/rpi/controller/rpi/alsc.cpp @@ -6,9 +6,10 @@ */ #include <algorithm> +#include <cmath> #include <functional> -#include <math.h> #include <numeric> +#include <vector> #include <libcamera/base/log.h> #include <libcamera/base/span.h> @@ -251,12 +252,12 @@ static bool compareModes(CameraMode const &cm0, CameraMode const &cm1) */ if (cm0.transform != cm1.transform) return true; - int leftDiff = abs(cm0.cropX - cm1.cropX); - int topDiff = abs(cm0.cropY - cm1.cropY); - int rightDiff = fabs(cm0.cropX + cm0.scaleX * cm0.width - - cm1.cropX - cm1.scaleX * cm1.width); - int bottomDiff = fabs(cm0.cropY + cm0.scaleY * cm0.height - - cm1.cropY - cm1.scaleY * cm1.height); + int leftDiff = std::abs(cm0.cropX - cm1.cropX); + int topDiff = std::abs(cm0.cropY - cm1.cropY); + int rightDiff = std::abs(cm0.cropX + cm0.scaleX * cm0.width - + cm1.cropX - cm1.scaleX * cm1.width); + int bottomDiff = std::abs(cm0.cropY + cm0.scaleY * cm0.height - + cm1.cropY - cm1.scaleY * cm1.height); /* * These thresholds are a rather arbitrary amount chosen to trigger * when carrying on with the previously calculated tables might be @@ -496,8 +497,9 @@ void resampleCalTable(const Array2D<double> &calTableIn, * Precalculate and cache the x sampling locations and phases to save * recomputing them on every row. */ - int xLo[X], xHi[X]; - double xf[X]; + std::vector<int> xLo(X); + std::vector<int> xHi(X); + std::vector<double> xf(X); double scaleX = cameraMode.sensorWidth / (cameraMode.width * cameraMode.scaleX); double xOff = cameraMode.cropX / (double)cameraMode.sensorWidth; @@ -730,7 +732,7 @@ static double gaussSeidel2Sor(const SparseArray<double> &M, double omega, double maxDiff = 0; for (i = 0; i < XY; i++) { lambda[i] = oldLambda[i] + (lambda[i] - oldLambda[i]) * omega; - if (fabs(lambda[i] - oldLambda[i]) > fabs(maxDiff)) + if (std::abs(lambda[i] - oldLambda[i]) > std::abs(maxDiff)) maxDiff = lambda[i] - oldLambda[i]; } return maxDiff; @@ -762,7 +764,7 @@ static void runMatrixIterations(const Array2D<double> &C, constructM(C, W, M); double lastMaxDiff = std::numeric_limits<double>::max(); for (unsigned int i = 0; i < nIter; i++) { - double maxDiff = fabs(gaussSeidel2Sor(M, omega, lambda, lambdaBound)); + double maxDiff = std::abs(gaussSeidel2Sor(M, omega, lambda, lambdaBound)); if (maxDiff < threshold) { LOG(RPiAlsc, Debug) << "Stop after " << i + 1 << " iterations"; diff --git a/src/ipa/rpi/controller/rpi/awb.cpp b/src/ipa/rpi/controller/rpi/awb.cpp index 003c8fa1..8479ae40 100644 --- a/src/ipa/rpi/controller/rpi/awb.cpp +++ b/src/ipa/rpi/controller/rpi/awb.cpp @@ -6,6 +6,7 @@ */ #include <assert.h> +#include <cmath> #include <functional> #include <libcamera/base/log.h> @@ -20,6 +21,8 @@ using namespace libcamera; LOG_DEFINE_CATEGORY(RPiAwb) +constexpr double kDefaultCT = 4500.0; + #define NAME "rpi.awb" /* @@ -122,7 +125,7 @@ int AwbConfig::read(const libcamera::YamlObject ¶ms) } if (priors.empty()) { LOG(RPiAwb, Error) << "AwbConfig: no AWB priors configured"; - return ret; + return -EINVAL; } } if (params.contains("modes")) { @@ -167,6 +170,14 @@ int AwbConfig::read(const libcamera::YamlObject ¶ms) whitepointB = params["whitepoint_b"].get<double>(0.0); if (bayes == false) sensitivityR = sensitivityB = 1.0; /* nor do sensitivities make any sense */ + /* + * The biasProportion parameter adds a small proportion of the counted + * pixles to a region biased to the biasCT colour temperature. + * + * A typical value for biasProportion would be between 0.05 to 0.1. + */ + biasProportion = params["bias_proportion"].get<double>(0.0); + biasCT = params["bias_ct"].get<double>(kDefaultCT); return 0; } @@ -214,7 +225,7 @@ void Awb::initialise() syncResults_.gainB = 1.0 / config_.ctB.eval(syncResults_.temperatureK); } else { /* random values just to stop the world blowing up */ - syncResults_.temperatureK = 4500; + syncResults_.temperatureK = kDefaultCT; syncResults_.gainR = syncResults_.gainG = syncResults_.gainB = 1.0; } prevSyncResults_ = syncResults_; @@ -282,6 +293,24 @@ void Awb::setManualGains(double manualR, double manualB) } } +void Awb::setColourTemperature(double temperatureK) +{ + if (!config_.bayes) { + LOG(RPiAwb, Warning) << "AWB uncalibrated - cannot set colour temperature"; + return; + } + + temperatureK = config_.ctR.domain().clamp(temperatureK); + manualR_ = 1 / config_.ctR.eval(temperatureK); + manualB_ = 1 / config_.ctB.eval(temperatureK); + + syncResults_.temperatureK = temperatureK; + syncResults_.gainR = manualR_; + syncResults_.gainG = 1.0; + syncResults_.gainB = manualB_; + prevSyncResults_ = syncResults_; +} + void Awb::switchMode([[maybe_unused]] CameraMode const &cameraMode, Metadata *metadata) { @@ -407,7 +436,8 @@ void Awb::asyncFunc() static void generateStats(std::vector<Awb::RGB> &zones, StatisticsPtr &stats, double minPixels, - double minG, Metadata &globalMetadata) + double minG, Metadata &globalMetadata, + double biasProportion, double biasCtR, double biasCtB) { std::scoped_lock<RPiController::Metadata> l(globalMetadata); @@ -420,6 +450,14 @@ static void generateStats(std::vector<Awb::RGB> &zones, continue; zone.R = region.val.rSum / region.counted; zone.B = region.val.bSum / region.counted; + /* + * Add some bias samples to allow the search to tend to a + * bias CT in failure cases. + */ + const unsigned int proportion = biasProportion * region.counted; + zone.R += proportion * biasCtR; + zone.B += proportion * biasCtB; + zone.G += proportion * 1.0; /* Factor in the ALSC applied colour shading correction if required. */ const AlscStatus *alscStatus = globalMetadata.getLocked<AlscStatus>("alsc.status"); if (stats->colourStatsPos == Statistics::ColourStatsPos::PreLsc && alscStatus) { @@ -439,8 +477,11 @@ void Awb::prepareStats() * LSC has already been applied to the stats in this pipeline, so stop * any LSC compensation. We also ignore config_.fast in this version. */ + const double biasCtR = config_.bayes ? config_.ctR.eval(config_.biasCT) : 0; + const double biasCtB = config_.bayes ? config_.ctB.eval(config_.biasCT) : 0; generateStats(zones_, statistics_, config_.minPixels, - config_.minG, getGlobalMetadata()); + config_.minG, getGlobalMetadata(), + config_.biasProportion, biasCtR, biasCtB); /* * apply sensitivities, so values appear to come from our "canonical" * sensor. @@ -505,7 +546,7 @@ static double interpolateQuadatric(ipa::Pwl::Point const &a, ipa::Pwl::Point con const double eps = 1e-3; ipa::Pwl::Point ca = c - a, ba = b - a; double denominator = 2 * (ba.y() * ca.x() - ca.y() * ba.x()); - if (abs(denominator) > eps) { + if (std::abs(denominator) > eps) { double numerator = ba.y() * ca.x() * ca.x() - ca.y() * ba.x() * ba.x(); double result = numerator / denominator + a.x(); return std::max(a.x(), std::min(c.x(), result)); @@ -716,7 +757,11 @@ void Awb::awbGrey() sumR += *ri, sumB += *bi; double gainR = sumR.G / (sumR.R + 1), gainB = sumB.G / (sumB.B + 1); - asyncResults_.temperatureK = 4500; /* don't know what it is */ + /* + * The grey world model can't estimate the colour temperature, use a + * default value. + */ + asyncResults_.temperatureK = kDefaultCT; asyncResults_.gainR = gainR; asyncResults_.gainG = 1.0; asyncResults_.gainB = gainB; diff --git a/src/ipa/rpi/controller/rpi/awb.h b/src/ipa/rpi/controller/rpi/awb.h index ab30f4fa..86640f8f 100644 --- a/src/ipa/rpi/controller/rpi/awb.h +++ b/src/ipa/rpi/controller/rpi/awb.h @@ -87,6 +87,10 @@ struct AwbConfig { double whitepointR; double whitepointB; bool bayes; /* use Bayesian algorithm */ + /* proportion of counted samples to add for the search bias */ + double biasProportion; + /* CT target for the search bias */ + double biasCT; }; class Awb : public AwbAlgorithm @@ -101,6 +105,7 @@ public: void initialValues(double &gainR, double &gainB) override; void setMode(std::string const &name) override; void setManualGains(double manualR, double manualB) override; + void setColourTemperature(double temperatureK) override; void enableAuto() override; void disableAuto() override; void switchMode(CameraMode const &cameraMode, Metadata *metadata) override; diff --git a/src/ipa/rpi/controller/rpi/black_level.cpp b/src/ipa/rpi/controller/rpi/black_level.cpp index ea991df9..4c968f14 100644 --- a/src/ipa/rpi/controller/rpi/black_level.cpp +++ b/src/ipa/rpi/controller/rpi/black_level.cpp @@ -5,7 +5,6 @@ * black level control algorithm */ -#include <math.h> #include <stdint.h> #include <libcamera/base/log.h> diff --git a/src/ipa/rpi/controller/rpi/ccm.cpp b/src/ipa/rpi/controller/rpi/ccm.cpp index e673964c..8607f152 100644 --- a/src/ipa/rpi/controller/rpi/ccm.cpp +++ b/src/ipa/rpi/controller/rpi/ccm.cpp @@ -29,34 +29,7 @@ LOG_DEFINE_CATEGORY(RPiCcm) #define NAME "rpi.ccm" -Matrix::Matrix() -{ - memset(m, 0, sizeof(m)); -} -Matrix::Matrix(double m0, double m1, double m2, double m3, double m4, double m5, - double m6, double m7, double m8) -{ - m[0][0] = m0, m[0][1] = m1, m[0][2] = m2, m[1][0] = m3, m[1][1] = m4, - m[1][2] = m5, m[2][0] = m6, m[2][1] = m7, m[2][2] = m8; -} -int Matrix::read(const libcamera::YamlObject ¶ms) -{ - double *ptr = (double *)m; - - if (params.size() != 9) { - LOG(RPiCcm, Error) << "Wrong number of values in CCM"; - return -EINVAL; - } - - for (const auto ¶m : params.asList()) { - auto value = param.get<double>(); - if (!value) - return -EINVAL; - *ptr++ = *value; - } - - return 0; -} +using Matrix3x3 = Matrix<double, 3, 3>; Ccm::Ccm(Controller *controller) : CcmAlgorithm(controller), saturation_(1.0) {} @@ -68,8 +41,6 @@ char const *Ccm::name() const int Ccm::read(const libcamera::YamlObject ¶ms) { - int ret; - if (params.contains("saturation")) { config_.saturation = params["saturation"].get<ipa::Pwl>(ipa::Pwl{}); if (config_.saturation.empty()) @@ -83,9 +54,12 @@ int Ccm::read(const libcamera::YamlObject ¶ms) CtCcm ctCcm; ctCcm.ct = *value; - ret = ctCcm.ccm.read(p["ccm"]); - if (ret) - return ret; + + auto ccm = p["ccm"].get<Matrix3x3>(); + if (!ccm) + return -EINVAL; + + ctCcm.ccm = *ccm; if (!config_.ccms.empty() && ctCcm.ct <= config_.ccms.back().ct) { LOG(RPiCcm, Error) @@ -113,8 +87,10 @@ void Ccm::initialise() { } +namespace { + template<typename T> -static bool getLocked(Metadata *metadata, std::string const &tag, T &value) +bool getLocked(Metadata *metadata, std::string const &tag, T &value) { T *ptr = metadata->getLocked<T>(tag); if (ptr == nullptr) @@ -123,7 +99,7 @@ static bool getLocked(Metadata *metadata, std::string const &tag, T &value) return true; } -Matrix calculateCcm(std::vector<CtCcm> const &ccms, double ct) +Matrix3x3 calculateCcm(std::vector<CtCcm> const &ccms, double ct) { if (ct <= ccms.front().ct) return ccms.front().ccm; @@ -139,16 +115,25 @@ Matrix calculateCcm(std::vector<CtCcm> const &ccms, double ct) } } -Matrix applySaturation(Matrix const &ccm, double saturation) +Matrix3x3 applySaturation(Matrix3x3 const &ccm, double saturation) { - Matrix RGB2Y(0.299, 0.587, 0.114, -0.169, -0.331, 0.500, 0.500, -0.419, - -0.081); - Matrix Y2RGB(1.000, 0.000, 1.402, 1.000, -0.345, -0.714, 1.000, 1.771, - 0.000); - Matrix S(1, 0, 0, 0, saturation, 0, 0, 0, saturation); + static const Matrix3x3 RGB2Y({ 0.299, 0.587, 0.114, + -0.169, -0.331, 0.500, + 0.500, -0.419, -0.081 }); + + static const Matrix3x3 Y2RGB({ 1.000, 0.000, 1.402, + 1.000, -0.345, -0.714, + 1.000, 1.771, 0.000 }); + + Matrix3x3 S({ 1, 0, 0, + 0, saturation, 0, + 0, 0, saturation }); + return Y2RGB * S * RGB2Y * ccm; } +} /* namespace */ + void Ccm::prepare(Metadata *imageMetadata) { bool awbOk = false, luxOk = false; @@ -166,7 +151,7 @@ void Ccm::prepare(Metadata *imageMetadata) LOG(RPiCcm, Warning) << "no colour temperature found"; if (!luxOk) LOG(RPiCcm, Warning) << "no lux value found"; - Matrix ccm = calculateCcm(config_.ccms, awb.temperatureK); + Matrix3x3 ccm = calculateCcm(config_.ccms, awb.temperatureK); double saturation = saturation_; struct CcmStatus ccmStatus; ccmStatus.saturation = saturation; @@ -177,7 +162,7 @@ void Ccm::prepare(Metadata *imageMetadata) for (int j = 0; j < 3; j++) for (int i = 0; i < 3; i++) ccmStatus.matrix[j * 3 + i] = - std::max(-8.0, std::min(7.9999, ccm.m[j][i])); + std::max(-8.0, std::min(7.9999, ccm[j][i])); LOG(RPiCcm, Debug) << "colour temperature " << awb.temperatureK << "K"; LOG(RPiCcm, Debug) diff --git a/src/ipa/rpi/controller/rpi/ccm.h b/src/ipa/rpi/controller/rpi/ccm.h index 4e5b33fe..c05dbb17 100644 --- a/src/ipa/rpi/controller/rpi/ccm.h +++ b/src/ipa/rpi/controller/rpi/ccm.h @@ -8,6 +8,7 @@ #include <vector> +#include "libcamera/internal/matrix.h" #include <libipa/pwl.h> #include "../ccm_algorithm.h" @@ -16,41 +17,9 @@ namespace RPiController { /* Algorithm to calculate colour matrix. Should be placed after AWB. */ -struct Matrix { - Matrix(double m0, double m1, double m2, double m3, double m4, double m5, - double m6, double m7, double m8); - Matrix(); - double m[3][3]; - int read(const libcamera::YamlObject ¶ms); -}; -static inline Matrix operator*(double d, Matrix const &m) -{ - return Matrix(m.m[0][0] * d, m.m[0][1] * d, m.m[0][2] * d, - m.m[1][0] * d, m.m[1][1] * d, m.m[1][2] * d, - m.m[2][0] * d, m.m[2][1] * d, m.m[2][2] * d); -} -static inline Matrix operator*(Matrix const &m1, Matrix const &m2) -{ - Matrix m; - for (int i = 0; i < 3; i++) - for (int j = 0; j < 3; j++) - m.m[i][j] = m1.m[i][0] * m2.m[0][j] + - m1.m[i][1] * m2.m[1][j] + - m1.m[i][2] * m2.m[2][j]; - return m; -} -static inline Matrix operator+(Matrix const &m1, Matrix const &m2) -{ - Matrix m; - for (int i = 0; i < 3; i++) - for (int j = 0; j < 3; j++) - m.m[i][j] = m1.m[i][j] + m2.m[i][j]; - return m; -} - struct CtCcm { double ct; - Matrix ccm; + libcamera::Matrix<double, 3, 3> ccm; }; struct CcmConfig { diff --git a/src/ipa/rpi/controller/rpi/contrast.cpp b/src/ipa/rpi/controller/rpi/contrast.cpp index 9b37943a..fe866a54 100644 --- a/src/ipa/rpi/controller/rpi/contrast.cpp +++ b/src/ipa/rpi/controller/rpi/contrast.cpp @@ -94,6 +94,8 @@ void Contrast::prepare(Metadata *imageMetadata) imageMetadata->set("contrast.status", status_); } +namespace { + ipa::Pwl computeStretchCurve(Histogram const &histogram, ContrastConfig const &config) { @@ -153,6 +155,8 @@ ipa::Pwl applyManualContrast(ipa::Pwl const &gammaCurve, double brightness, return newGammaCurve; } +} /* namespace */ + void Contrast::process(StatisticsPtr &stats, [[maybe_unused]] Metadata *imageMetadata) { diff --git a/src/ipa/rpi/controller/rpi/lux.cpp b/src/ipa/rpi/controller/rpi/lux.cpp index 7b31faab..27b89a8f 100644 --- a/src/ipa/rpi/controller/rpi/lux.cpp +++ b/src/ipa/rpi/controller/rpi/lux.cpp @@ -4,7 +4,6 @@ * * Lux control algorithm */ -#include <math.h> #include <libcamera/base/log.h> @@ -41,7 +40,7 @@ int Lux::read(const libcamera::YamlObject ¶ms) auto value = params["reference_shutter_speed"].get<double>(); if (!value) return -EINVAL; - referenceShutterSpeed_ = *value * 1.0us; + referenceExposureTime_ = *value * 1.0us; value = params["reference_gain"].get<double>(); if (!value) @@ -83,11 +82,11 @@ void Lux::process(StatisticsPtr &stats, Metadata *imageMetadata) double currentAperture = deviceStatus.aperture.value_or(currentAperture_); double currentY = stats->yHist.interQuantileMean(0, 1); double gainRatio = referenceGain_ / currentGain; - double shutterSpeedRatio = - referenceShutterSpeed_ / deviceStatus.shutterSpeed; + double exposureTimeRatio = + referenceExposureTime_ / deviceStatus.exposureTime; double apertureRatio = referenceAperture_ / currentAperture; double yRatio = currentY * (65536 / stats->yHist.bins()) / referenceY_; - double estimatedLux = shutterSpeedRatio * gainRatio * + double estimatedLux = exposureTimeRatio * gainRatio * apertureRatio * apertureRatio * yRatio * referenceLux_; LuxStatus status; diff --git a/src/ipa/rpi/controller/rpi/lux.h b/src/ipa/rpi/controller/rpi/lux.h index 89f441fc..da007fe9 100644 --- a/src/ipa/rpi/controller/rpi/lux.h +++ b/src/ipa/rpi/controller/rpi/lux.h @@ -32,7 +32,7 @@ private: * These values define the conditions of the reference image, against * which we compare the new image. */ - libcamera::utils::Duration referenceShutterSpeed_; + libcamera::utils::Duration referenceExposureTime_; double referenceGain_; double referenceAperture_; /* units of 1/f */ double referenceY_; /* out of 65536 */ diff --git a/src/ipa/rpi/controller/rpi/noise.cpp b/src/ipa/rpi/controller/rpi/noise.cpp index 3f1c62cf..145175fb 100644 --- a/src/ipa/rpi/controller/rpi/noise.cpp +++ b/src/ipa/rpi/controller/rpi/noise.cpp @@ -5,7 +5,7 @@ * Noise control algorithm */ -#include <math.h> +#include <cmath> #include <libcamera/base/log.h> @@ -69,7 +69,7 @@ void Noise::prepare(Metadata *imageMetadata) * make some adjustments based on the camera mode (such as * binning), if we knew how to discover it... */ - double factor = sqrt(deviceStatus.analogueGain) / modeFactor_; + double factor = std::sqrt(deviceStatus.analogueGain) / modeFactor_; struct NoiseStatus status; status.noiseConstant = referenceConstant_ * factor; status.noiseSlope = referenceSlope_ * factor; diff --git a/src/ipa/rpi/controller/rpi/sharpen.cpp b/src/ipa/rpi/controller/rpi/sharpen.cpp index 39537f4a..1d143ff5 100644 --- a/src/ipa/rpi/controller/rpi/sharpen.cpp +++ b/src/ipa/rpi/controller/rpi/sharpen.cpp @@ -5,7 +5,7 @@ * sharpening control algorithm */ -#include <math.h> +#include <cmath> #include <libcamera/base/log.h> @@ -68,7 +68,7 @@ void Sharpen::prepare(Metadata *imageMetadata) * we adjust the limit and threshold less aggressively. Using a sqrt * function is an arbitrary but gentle way of accomplishing this. */ - double userStrengthSqrt = sqrt(userStrength_); + double userStrengthSqrt = std::sqrt(userStrength_); struct SharpenStatus status; /* * Binned modes seem to need the sharpening toned down with this diff --git a/src/ipa/rpi/vc4/data/imx283.json b/src/ipa/rpi/vc4/data/imx283.json new file mode 100644 index 00000000..bfacecc8 --- /dev/null +++ b/src/ipa/rpi/vc4/data/imx283.json @@ -0,0 +1,313 @@ +{ + "version": 2.0, + "target": "bcm2835", + "algorithms": [ + { + "rpi.black_level": + { + "black_level": 3200 + } + }, + { + "rpi.dpc": { } + }, + { + "rpi.lux": + { + "reference_shutter_speed": 2461, + "reference_gain": 1.0, + "reference_aperture": 1.0, + "reference_lux": 1148, + "reference_Y": 13314 + } + }, + { + "rpi.noise": + { + "reference_constant": 0, + "reference_slope": 2.204 + } + }, + { + "rpi.geq": + { + "offset": 199, + "slope": 0.01947 + } + }, + { + "rpi.sdn": { } + }, + { + "rpi.awb": + { + "priors": [ + { + "lux": 0, + "prior": + [ + 2000, 1.0, + 3000, 0.0, + 13000, 0.0 + ] + }, + { + "lux": 800, + "prior": + [ + 2000, 0.0, + 6000, 2.0, + 13000, 2.0 + ] + }, + { + "lux": 1500, + "prior": + [ + 2000, 0.0, + 4000, 1.0, + 6000, 6.0, + 6500, 7.0, + 7000, 1.0, + 13000, 1.0 + ] + } + ], + "modes": + { + "auto": + { + "lo": 2500, + "hi": 8000 + }, + "incandescent": + { + "lo": 2500, + "hi": 3000 + }, + "tungsten": + { + "lo": 3000, + "hi": 3500 + }, + "fluorescent": + { + "lo": 4000, + "hi": 4700 + }, + "indoor": + { + "lo": 3000, + "hi": 5000 + }, + "daylight": + { + "lo": 5500, + "hi": 6500 + } + }, + "bayes": 1, + "ct_curve": + [ + 2213.0, 0.9607, 0.2593, + 5313.0, 0.4822, 0.5909, + 6237.0, 0.4739, 0.6308 + ], + "sensitivity_r": 1.0, + "sensitivity_b": 1.0, + "transverse_pos": 0.0144, + "transverse_neg": 0.01 + } + }, + { + "rpi.agc": + { + "metering_modes": + { + "centre-weighted": + { + "weights": + [ + 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 + ] + }, + "spot": + { + "weights": + [ + 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ] + }, + "matrix": + { + "weights": + [ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 + ] + } + }, + "exposure_modes": + { + "normal": + { + "shutter": [ 100, 10000, 30000, 60000, 120000 ], + "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ] + }, + "short": + { + "shutter": [ 100, 5000, 10000, 20000, 120000 ], + "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ] + } + }, + "constraint_modes": + { + "normal": [ + { + "bound": "LOWER", + "q_lo": 0.98, + "q_hi": 1.0, + "y_target": + [ + 0, 0.5, + 1000, 0.5 + ] + } + ], + "highlight": [ + { + "bound": "LOWER", + "q_lo": 0.98, + "q_hi": 1.0, + "y_target": + [ + 0, 0.5, + 1000, 0.5 + ] + }, + { + "bound": "UPPER", + "q_lo": 0.98, + "q_hi": 1.0, + "y_target": + [ + 0, 0.8, + 1000, 0.8 + ] + } + ] + }, + "y_target": + [ + 0, 0.16, + 1000, 0.165, + 10000, 0.17 + ] + } + }, + { + "rpi.alsc": + { + "omega": 1.3, + "n_iter": 100, + "luminance_strength": 0.7 + } + }, + { + "rpi.contrast": + { + "ce_enable": 1, + "gamma_curve": + [ + 0, 0, + 1024, 5040, + 2048, 9338, + 3072, 12356, + 4096, 15312, + 5120, 18051, + 6144, 20790, + 7168, 23193, + 8192, 25744, + 9216, 27942, + 10240, 30035, + 11264, 32005, + 12288, 33975, + 13312, 35815, + 14336, 37600, + 15360, 39168, + 16384, 40642, + 18432, 43379, + 20480, 45749, + 22528, 47753, + 24576, 49621, + 26624, 51253, + 28672, 52698, + 30720, 53796, + 32768, 54876, + 36864, 57012, + 40960, 58656, + 45056, 59954, + 49152, 61183, + 53248, 62355, + 57344, 63419, + 61440, 64476, + 65535, 65535 + ] + } + }, + { + "rpi.ccm": + { + "ccms": [ + { + "ct": 2213, + "ccm": + [ + 1.91264, -0.27609, -0.63655, + -0.65708, 2.11718, -0.46009, + 0.03629, -1.38441, 2.34811 + ] + }, + { + "ct": 2255, + "ccm": + [ + 1.90369, -0.29309, -0.61059, + -0.64693, 2.08169, -0.43476, + 0.04086, -1.29999, 2.25914 + ] + }, + { + "ct": 2259, + "ccm": + [ + 1.92762, -0.35134, -0.57628, + -0.63523, 2.08481, -0.44958, + 0.06754, -1.32953, 2.26199 + ] + }, + { + "ct": 5313, + "ccm": + [ + 1.75924, -0.54053, -0.21871, + -0.38159, 1.88671, -0.50511, + -0.00747, -0.53492, 1.54239 + ] + }, + { + "ct": 6237, + "ccm": + [ + 2.19299, -0.74764, -0.44536, + -0.51678, 2.27651, -0.75972, + -0.06498, -0.74269, 1.80767 + ] + } + ] + } + }, + { + "rpi.sharpen": { } + } + ] +} diff --git a/src/ipa/rpi/vc4/data/imx327.json b/src/ipa/rpi/vc4/data/imx327.json new file mode 100644 index 00000000..40a56842 --- /dev/null +++ b/src/ipa/rpi/vc4/data/imx327.json @@ -0,0 +1,215 @@ +{ + "version": 2.0, + "target": "bcm2835", + "description": "This is an interim tuning only. Please consider doing a more formal tuning for your application.", + "algorithms": [ + { + "rpi.black_level": + { + "black_level": 3840 + } + }, + { + "rpi.dpc": { } + }, + { + "rpi.lux": + { + "reference_shutter_speed": 6813, + "reference_gain": 1.0, + "reference_aperture": 1.0, + "reference_lux": 890, + "reference_Y": 12900 + } + }, + { + "rpi.noise": + { + "reference_constant": 0, + "reference_slope": 2.67 + } + }, + { + "rpi.geq": + { + "offset": 187, + "slope": 0.00842 + } + }, + { + "rpi.sdn": { } + }, + { + "rpi.awb": + { + "bayes": 0 + } + }, + { + "rpi.agc": + { + "speed": 0.2, + "metering_modes": + { + "matrix": + { + "weights": + [ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 + ] + }, + "centre-weighted": + { + "weights": + [ + 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 + ] + }, + "spot": + { + "weights": + [ + 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ] + } + }, + "exposure_modes": + { + "normal": + { + "shutter": [ 10, 30000, 60000 ], + "gain": [ 1.0, 2.0, 8.0 ] + }, + "short": + { + "shutter": [ 10, 5000, 10000, 20000, 120000 ], + "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ] + }, + "long": + { + "shutter": [ 1000, 30000, 60000, 90000, 120000 ], + "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ] + } + }, + "constraint_modes": + { + "normal": [ ], + "highlight": [ + { + "bound": "LOWER", + "q_lo": 0.98, + "q_hi": 1.0, + "y_target": + [ + 0, 0.5, + 1000, 0.5 + ] + }, + { + "bound": "UPPER", + "q_lo": 0.98, + "q_hi": 1.0, + "y_target": + [ + 0, 0.8, + 1000, 0.8 + ] + } + ] + }, + "y_target": + [ + 0, 0.16, + 1000, 0.16, + 10000, 0.16 + ] + } + }, + { + "rpi.alsc": + { + "omega": 1.3, + "n_iter": 100, + "luminance_strength": 0.7, + "luminance_lut": + [ + 2.844, 2.349, 2.018, 1.775, 1.599, 1.466, 1.371, 1.321, 1.306, 1.316, 1.357, 1.439, 1.552, 1.705, 1.915, 2.221, + 2.576, 2.151, 1.851, 1.639, 1.478, 1.358, 1.272, 1.231, 1.218, 1.226, 1.262, 1.335, 1.438, 1.571, 1.766, 2.067, + 2.381, 2.005, 1.739, 1.545, 1.389, 1.278, 1.204, 1.166, 1.153, 1.161, 1.194, 1.263, 1.356, 1.489, 1.671, 1.943, + 2.242, 1.899, 1.658, 1.481, 1.329, 1.225, 1.156, 1.113, 1.096, 1.107, 1.143, 1.201, 1.289, 1.423, 1.607, 1.861, + 2.152, 1.831, 1.602, 1.436, 1.291, 1.193, 1.121, 1.069, 1.047, 1.062, 1.107, 1.166, 1.249, 1.384, 1.562, 1.801, + 2.104, 1.795, 1.572, 1.407, 1.269, 1.174, 1.099, 1.041, 1.008, 1.029, 1.083, 1.146, 1.232, 1.364, 1.547, 1.766, + 2.104, 1.796, 1.572, 1.403, 1.264, 1.171, 1.097, 1.036, 1.001, 1.025, 1.077, 1.142, 1.231, 1.363, 1.549, 1.766, + 2.148, 1.827, 1.594, 1.413, 1.276, 1.184, 1.114, 1.062, 1.033, 1.049, 1.092, 1.153, 1.242, 1.383, 1.577, 1.795, + 2.211, 1.881, 1.636, 1.455, 1.309, 1.214, 1.149, 1.104, 1.081, 1.089, 1.125, 1.184, 1.273, 1.423, 1.622, 1.846, + 2.319, 1.958, 1.698, 1.516, 1.362, 1.262, 1.203, 1.156, 1.137, 1.142, 1.171, 1.229, 1.331, 1.484, 1.682, 1.933, + 2.459, 2.072, 1.789, 1.594, 1.441, 1.331, 1.261, 1.219, 1.199, 1.205, 1.232, 1.301, 1.414, 1.571, 1.773, 2.052, + 2.645, 2.206, 1.928, 1.728, 1.559, 1.451, 1.352, 1.301, 1.282, 1.289, 1.319, 1.395, 1.519, 1.685, 1.904, 2.227 + ], + "sigma": 0.005, + "sigma_Cb": 0.005 + } + }, + { + "rpi.contrast": + { + "ce_enable": 1, + "gamma_curve": + [ + 0, 0, + 1024, 5040, + 2048, 9338, + 3072, 12356, + 4096, 15312, + 5120, 18051, + 6144, 20790, + 7168, 23193, + 8192, 25744, + 9216, 27942, + 10240, 30035, + 11264, 32005, + 12288, 33975, + 13312, 35815, + 14336, 37600, + 15360, 39168, + 16384, 40642, + 18432, 43379, + 20480, 45749, + 22528, 47753, + 24576, 49621, + 26624, 51253, + 28672, 52698, + 30720, 53796, + 32768, 54876, + 36864, 57012, + 40960, 58656, + 45056, 59954, + 49152, 61183, + 53248, 62355, + 57344, 63419, + 61440, 64476, + 65535, 65535 + ] + } + }, + { + "rpi.sharpen": { } + }, + { + "rpi.ccm": + { + "ccms": [ + { + "ct": 3900, + "ccm": + [ + 1.54659, -0.17707, -0.36953, + -0.51471, 1.72733, -0.21262, + 0.06667, -0.92279, 1.85612 + ] + } + ] + } + } + ] +}
\ No newline at end of file diff --git a/src/ipa/rpi/vc4/data/imx415.json b/src/ipa/rpi/vc4/data/imx415.json new file mode 100755 index 00000000..6ed16b17 --- /dev/null +++ b/src/ipa/rpi/vc4/data/imx415.json @@ -0,0 +1,413 @@ +{ + "version": 2.0, + "target": "bcm2835", + "algorithms": [ + { + "rpi.black_level": + { + "black_level": 3840 + } + }, + { + "rpi.dpc": { } + }, + { + "rpi.lux": + { + "reference_shutter_speed": 19230, + "reference_gain": 1.0, + "reference_aperture": 1.0, + "reference_lux": 1198, + "reference_Y": 14876 + } + }, + { + "rpi.noise": + { + "reference_constant": 17, + "reference_slope": 3.439 + } + }, + { + "rpi.geq": + { + "offset": 193, + "slope": 0.00902 + } + }, + { + "rpi.sdn": { } + }, + { + "rpi.awb": + { + "priors": [ + { + "lux": 0, + "prior": + [ + 2000, 1.0, + 3000, 0.0, + 13000, 0.0 + ] + }, + { + "lux": 800, + "prior": + [ + 2000, 0.0, + 6000, 2.0, + 13000, 2.0 + ] + }, + { + "lux": 1500, + "prior": + [ + 2000, 0.0, + 4000, 1.0, + 6000, 6.0, + 6500, 7.0, + 7000, 1.0, + 13000, 1.0 + ] + } + ], + "modes": + { + "auto": + { + "lo": 2500, + "hi": 8000 + }, + "incandescent": + { + "lo": 2500, + "hi": 3000 + }, + "tungsten": + { + "lo": 3000, + "hi": 3500 + }, + "fluorescent": + { + "lo": 4000, + "hi": 4700 + }, + "indoor": + { + "lo": 3000, + "hi": 5000 + }, + "daylight": + { + "lo": 5500, + "hi": 6500 + }, + "cloudy": + { + "lo": 7000, + "hi": 8600 + } + }, + "bayes": 1, + "ct_curve": + [ + 2698.0, 0.7681, 0.2026, + 2930.0, 0.7515, 0.2116, + 3643.0, 0.6355, 0.2858, + 4605.0, 0.4992, 0.4041, + 5658.0, 0.4498, 0.4574 + ], + "sensitivity_r": 1.0, + "sensitivity_b": 1.0, + "transverse_pos": 0.0112, + "transverse_neg": 0.01424 + } + }, + { + "rpi.agc": + { + "metering_modes": + { + "centre-weighted": + { + "weights": + [ + 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 + ] + }, + "spot": + { + "weights": + [ + 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ] + }, + "matrix": + { + "weights": + [ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 + ] + } + }, + "exposure_modes": + { + "normal": + { + "shutter": [ 100, 10000, 30000, 60000, 120000 ], + "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ] + }, + "short": + { + "shutter": [ 100, 5000, 10000, 20000, 120000 ], + "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ] + } + }, + "constraint_modes": + { + "normal": [ + { + "bound": "LOWER", + "q_lo": 0.98, + "q_hi": 1.0, + "y_target": + [ + 0, 0.5, + 1000, 0.5 + ] + } + ], + "highlight": [ + { + "bound": "LOWER", + "q_lo": 0.98, + "q_hi": 1.0, + "y_target": + [ + 0, 0.5, + 1000, 0.5 + ] + }, + { + "bound": "UPPER", + "q_lo": 0.98, + "q_hi": 1.0, + "y_target": + [ + 0, 0.8, + 1000, 0.8 + ] + } + ] + }, + "y_target": + [ + 0, 0.16, + 1000, 0.165, + 10000, 0.17 + ] + } + }, + { + "rpi.alsc": + { + "omega": 1.3, + "n_iter": 100, + "luminance_strength": 0.8, + "calibrations_Cr": [ + { + "ct": 3000, + "table": + [ + 1.025, 1.016, 1.013, 1.011, 1.008, 1.005, 1.003, 1.001, 1.003, 1.005, 1.008, 1.011, 1.014, 1.019, 1.027, 1.035, + 1.025, 1.017, 1.013, 1.011, 1.008, 1.005, 1.003, 1.003, 1.004, 1.005, 1.009, 1.012, 1.017, 1.023, 1.029, 1.035, + 1.022, 1.017, 1.013, 1.009, 1.007, 1.005, 1.003, 1.003, 1.004, 1.006, 1.009, 1.012, 1.017, 1.023, 1.029, 1.035, + 1.019, 1.015, 1.011, 1.007, 1.005, 1.003, 1.001, 1.001, 1.003, 1.004, 1.007, 1.009, 1.015, 1.022, 1.028, 1.035, + 1.018, 1.014, 1.009, 1.006, 1.004, 1.002, 1.001, 1.001, 1.001, 1.003, 1.006, 1.009, 1.015, 1.021, 1.028, 1.035, + 1.018, 1.013, 1.011, 1.006, 1.003, 1.002, 1.001, 1.001, 1.001, 1.003, 1.006, 1.009, 1.015, 1.022, 1.028, 1.036, + 1.018, 1.014, 1.011, 1.007, 1.004, 1.002, 1.001, 1.001, 1.001, 1.004, 1.007, 1.009, 1.015, 1.023, 1.029, 1.036, + 1.019, 1.014, 1.012, 1.008, 1.005, 1.003, 1.002, 1.001, 1.003, 1.005, 1.008, 1.012, 1.016, 1.024, 1.031, 1.037, + 1.021, 1.016, 1.013, 1.009, 1.008, 1.005, 1.003, 1.003, 1.005, 1.008, 1.011, 1.014, 1.019, 1.026, 1.033, 1.039, + 1.025, 1.021, 1.016, 1.013, 1.009, 1.008, 1.006, 1.006, 1.008, 1.011, 1.014, 1.019, 1.024, 1.031, 1.038, 1.046, + 1.029, 1.025, 1.021, 1.018, 1.014, 1.013, 1.011, 1.011, 1.012, 1.015, 1.019, 1.023, 1.028, 1.035, 1.046, 1.051, + 1.032, 1.029, 1.023, 1.021, 1.018, 1.015, 1.014, 1.014, 1.015, 1.018, 1.022, 1.027, 1.033, 1.041, 1.051, 1.054 + ] + }, + { + "ct": 5000, + "table": + [ + 1.025, 1.011, 1.009, 1.005, 1.004, 1.003, 1.001, 1.001, 1.002, 1.006, 1.009, 1.012, 1.016, 1.021, 1.031, 1.041, + 1.025, 1.014, 1.009, 1.007, 1.005, 1.004, 1.003, 1.003, 1.004, 1.007, 1.009, 1.013, 1.021, 1.028, 1.037, 1.041, + 1.023, 1.014, 1.009, 1.007, 1.005, 1.004, 1.003, 1.003, 1.005, 1.007, 1.011, 1.014, 1.021, 1.028, 1.037, 1.048, + 1.022, 1.012, 1.007, 1.005, 1.002, 1.001, 1.001, 1.001, 1.003, 1.005, 1.009, 1.014, 1.019, 1.028, 1.039, 1.048, + 1.022, 1.011, 1.006, 1.003, 1.001, 1.001, 1.001, 1.001, 1.002, 1.005, 1.009, 1.014, 1.021, 1.029, 1.039, 1.051, + 1.022, 1.012, 1.007, 1.003, 1.002, 1.001, 1.001, 1.001, 1.002, 1.005, 1.009, 1.015, 1.021, 1.031, 1.041, 1.053, + 1.023, 1.013, 1.009, 1.005, 1.003, 1.003, 1.001, 1.002, 1.004, 1.006, 1.011, 1.015, 1.022, 1.031, 1.042, 1.056, + 1.024, 1.015, 1.012, 1.008, 1.005, 1.004, 1.004, 1.004, 1.006, 1.009, 1.013, 1.018, 1.024, 1.034, 1.045, 1.057, + 1.027, 1.017, 1.015, 1.012, 1.009, 1.007, 1.007, 1.008, 1.009, 1.013, 1.018, 1.023, 1.029, 1.038, 1.051, 1.061, + 1.029, 1.023, 1.017, 1.015, 1.014, 1.012, 1.011, 1.011, 1.014, 1.018, 1.024, 1.029, 1.036, 1.044, 1.056, 1.066, + 1.034, 1.028, 1.023, 1.022, 1.019, 1.019, 1.018, 1.018, 1.021, 1.025, 1.031, 1.035, 1.042, 1.053, 1.066, 1.074, + 1.041, 1.034, 1.027, 1.025, 1.025, 1.023, 1.023, 1.023, 1.025, 1.031, 1.035, 1.041, 1.049, 1.059, 1.074, 1.079 + ] + } + ], + "calibrations_Cb": [ + { + "ct": 3000, + "table": + [ + 1.001, 1.001, 1.007, 1.015, 1.027, 1.034, 1.038, 1.041, 1.042, 1.043, 1.043, 1.043, 1.041, 1.039, 1.049, 1.054, + 1.011, 1.011, 1.013, 1.023, 1.032, 1.039, 1.044, 1.047, 1.052, 1.056, 1.059, 1.059, 1.055, 1.051, 1.054, 1.056, + 1.015, 1.015, 1.019, 1.032, 1.039, 1.044, 1.047, 1.052, 1.055, 1.059, 1.061, 1.066, 1.063, 1.058, 1.061, 1.064, + 1.016, 1.017, 1.023, 1.032, 1.041, 1.045, 1.048, 1.053, 1.056, 1.061, 1.066, 1.069, 1.067, 1.064, 1.065, 1.068, + 1.018, 1.019, 1.025, 1.033, 1.042, 1.045, 1.049, 1.054, 1.058, 1.063, 1.071, 1.072, 1.071, 1.068, 1.069, 1.071, + 1.023, 1.024, 1.029, 1.035, 1.043, 1.048, 1.052, 1.057, 1.061, 1.065, 1.074, 1.075, 1.075, 1.072, 1.072, 1.075, + 1.027, 1.028, 1.031, 1.038, 1.045, 1.051, 1.054, 1.059, 1.064, 1.068, 1.075, 1.079, 1.078, 1.075, 1.076, 1.081, + 1.029, 1.031, 1.033, 1.044, 1.048, 1.054, 1.059, 1.064, 1.067, 1.073, 1.079, 1.082, 1.082, 1.079, 1.081, 1.085, + 1.033, 1.033, 1.035, 1.047, 1.053, 1.058, 1.064, 1.067, 1.073, 1.079, 1.084, 1.086, 1.086, 1.084, 1.089, 1.091, + 1.037, 1.037, 1.038, 1.049, 1.057, 1.062, 1.068, 1.073, 1.079, 1.084, 1.089, 1.092, 1.092, 1.092, 1.096, 1.104, + 1.041, 1.041, 1.043, 1.051, 1.061, 1.068, 1.073, 1.079, 1.083, 1.089, 1.092, 1.094, 1.097, 1.099, 1.105, 1.115, + 1.048, 1.044, 1.044, 1.051, 1.063, 1.071, 1.076, 1.082, 1.088, 1.091, 1.094, 1.097, 1.099, 1.104, 1.115, 1.126 + ] + }, + { + "ct": 5000, + "table": + [ + 1.001, 1.001, 1.005, 1.011, 1.014, 1.018, 1.019, 1.019, 1.019, 1.021, 1.021, 1.021, 1.019, 1.017, 1.014, 1.014, + 1.009, 1.009, 1.011, 1.014, 1.019, 1.024, 1.026, 1.029, 1.031, 1.032, 1.032, 1.031, 1.027, 1.023, 1.022, 1.022, + 1.011, 1.012, 1.015, 1.018, 1.024, 1.026, 1.029, 1.032, 1.035, 1.036, 1.036, 1.034, 1.031, 1.027, 1.025, 1.025, + 1.012, 1.013, 1.015, 1.019, 1.025, 1.029, 1.032, 1.035, 1.036, 1.038, 1.038, 1.036, 1.034, 1.029, 1.026, 1.026, + 1.013, 1.014, 1.016, 1.019, 1.027, 1.031, 1.034, 1.037, 1.039, 1.039, 1.041, 1.039, 1.036, 1.031, 1.028, 1.027, + 1.014, 1.014, 1.017, 1.021, 1.027, 1.033, 1.037, 1.039, 1.041, 1.041, 1.042, 1.042, 1.039, 1.033, 1.029, 1.028, + 1.015, 1.015, 1.018, 1.021, 1.027, 1.033, 1.037, 1.041, 1.041, 1.042, 1.042, 1.042, 1.039, 1.034, 1.029, 1.029, + 1.015, 1.016, 1.018, 1.022, 1.027, 1.033, 1.037, 1.041, 1.041, 1.042, 1.043, 1.043, 1.041, 1.035, 1.031, 1.031, + 1.015, 1.016, 1.018, 1.022, 1.027, 1.032, 1.037, 1.041, 1.042, 1.042, 1.044, 1.043, 1.041, 1.036, 1.034, 1.033, + 1.016, 1.017, 1.017, 1.022, 1.027, 1.032, 1.036, 1.039, 1.042, 1.042, 1.043, 1.043, 1.041, 1.039, 1.036, 1.034, + 1.017, 1.017, 1.018, 1.022, 1.027, 1.031, 1.035, 1.039, 1.041, 1.042, 1.042, 1.042, 1.042, 1.039, 1.039, 1.039, + 1.018, 1.017, 1.017, 1.021, 1.027, 1.031, 1.033, 1.038, 1.041, 1.041, 1.042, 1.042, 1.041, 1.041, 1.041, 1.041 + ] + } + ], + "luminance_lut": + [ + 2.102, 1.903, 1.658, 1.483, 1.358, 1.267, 1.202, 1.202, 1.202, 1.242, 1.323, 1.431, 1.585, 1.797, 2.096, 2.351, + 1.996, 1.776, 1.549, 1.385, 1.273, 1.204, 1.138, 1.133, 1.133, 1.185, 1.252, 1.343, 1.484, 1.679, 1.954, 2.228, + 1.923, 1.689, 1.474, 1.318, 1.204, 1.138, 1.079, 1.071, 1.071, 1.133, 1.185, 1.284, 1.415, 1.597, 1.854, 2.146, + 1.881, 1.631, 1.423, 1.272, 1.159, 1.079, 1.051, 1.026, 1.046, 1.071, 1.144, 1.245, 1.369, 1.543, 1.801, 2.095, + 1.867, 1.595, 1.391, 1.242, 1.131, 1.051, 1.013, 1.002, 1.013, 1.046, 1.121, 1.219, 1.343, 1.511, 1.752, 2.079, + 1.867, 1.589, 1.385, 1.236, 1.125, 1.048, 1.001, 1.001, 1.003, 1.045, 1.118, 1.217, 1.342, 1.511, 1.746, 2.079, + 1.867, 1.589, 1.385, 1.236, 1.125, 1.048, 1.011, 1.003, 1.011, 1.046, 1.118, 1.217, 1.343, 1.511, 1.746, 2.079, + 1.884, 1.621, 1.411, 1.261, 1.149, 1.071, 1.048, 1.024, 1.046, 1.069, 1.141, 1.239, 1.369, 1.541, 1.781, 2.093, + 1.913, 1.675, 1.459, 1.304, 1.191, 1.125, 1.071, 1.065, 1.069, 1.124, 1.181, 1.278, 1.413, 1.592, 1.842, 2.133, + 1.981, 1.755, 1.529, 1.368, 1.251, 1.191, 1.125, 1.124, 1.124, 1.181, 1.242, 1.337, 1.479, 1.669, 1.935, 2.207, + 2.078, 1.867, 1.625, 1.453, 1.344, 1.251, 1.202, 1.201, 1.201, 1.242, 1.333, 1.418, 1.571, 1.776, 2.063, 2.321, + 2.217, 2.011, 1.747, 1.562, 1.431, 1.331, 1.278, 1.278, 1.278, 1.313, 1.407, 1.523, 1.686, 1.911, 2.226, 2.484 + ], + "sigma": 0.00135, + "sigma_Cb": 0.00279 + } + }, + { + "rpi.contrast": + { + "ce_enable": 1, + "gamma_curve": + [ + 0, 0, + 1024, 5040, + 2048, 9338, + 3072, 12356, + 4096, 15312, + 5120, 18051, + 6144, 20790, + 7168, 23193, + 8192, 25744, + 9216, 27942, + 10240, 30035, + 11264, 32005, + 12288, 33975, + 13312, 35815, + 14336, 37600, + 15360, 39168, + 16384, 40642, + 18432, 43379, + 20480, 45749, + 22528, 47753, + 24576, 49621, + 26624, 51253, + 28672, 52698, + 30720, 53796, + 32768, 54876, + 36864, 57012, + 40960, 58656, + 45056, 59954, + 49152, 61183, + 53248, 62355, + 57344, 63419, + 61440, 64476, + 65535, 65535 + ] + } + }, + { + "rpi.ccm": + { + "ccms": [ + { + "ct": 2698, + "ccm": + [ + 1.57227, -0.32596, -0.24631, + -0.61264, 1.70791, -0.09526, + -0.43254, 0.48489, 0.94765 + ] + }, + { + "ct": 2930, + "ccm": + [ + 1.69455, -0.52724, -0.16731, + -0.67131, 1.78468, -0.11338, + -0.41609, 0.54693, 0.86916 + ] + }, + { + "ct": 3643, + "ccm": + [ + 1.74041, -0.77553, 0.03512, + -0.44073, 1.34131, 0.09943, + -0.11035, -0.93919, 2.04954 + ] + }, + { + "ct": 4605, + "ccm": + [ + 1.49865, -0.41638, -0.08227, + -0.39445, 1.70114, -0.30669, + 0.01319, -0.88009, 1.86689 + ] + }, + { + "ct": 5658, + "ccm": + [ + 1.38601, -0.23128, -0.15472, + -0.37641, 1.70444, -0.32803, + -0.01575, -0.71466, 1.73041 + ] + } + ] + } + }, + { + "rpi.sharpen": { } + } + ] +} diff --git a/src/ipa/rpi/vc4/data/imx462.json b/src/ipa/rpi/vc4/data/imx462.json new file mode 100644 index 00000000..40a56842 --- /dev/null +++ b/src/ipa/rpi/vc4/data/imx462.json @@ -0,0 +1,215 @@ +{ + "version": 2.0, + "target": "bcm2835", + "description": "This is an interim tuning only. Please consider doing a more formal tuning for your application.", + "algorithms": [ + { + "rpi.black_level": + { + "black_level": 3840 + } + }, + { + "rpi.dpc": { } + }, + { + "rpi.lux": + { + "reference_shutter_speed": 6813, + "reference_gain": 1.0, + "reference_aperture": 1.0, + "reference_lux": 890, + "reference_Y": 12900 + } + }, + { + "rpi.noise": + { + "reference_constant": 0, + "reference_slope": 2.67 + } + }, + { + "rpi.geq": + { + "offset": 187, + "slope": 0.00842 + } + }, + { + "rpi.sdn": { } + }, + { + "rpi.awb": + { + "bayes": 0 + } + }, + { + "rpi.agc": + { + "speed": 0.2, + "metering_modes": + { + "matrix": + { + "weights": + [ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 + ] + }, + "centre-weighted": + { + "weights": + [ + 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 + ] + }, + "spot": + { + "weights": + [ + 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ] + } + }, + "exposure_modes": + { + "normal": + { + "shutter": [ 10, 30000, 60000 ], + "gain": [ 1.0, 2.0, 8.0 ] + }, + "short": + { + "shutter": [ 10, 5000, 10000, 20000, 120000 ], + "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ] + }, + "long": + { + "shutter": [ 1000, 30000, 60000, 90000, 120000 ], + "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ] + } + }, + "constraint_modes": + { + "normal": [ ], + "highlight": [ + { + "bound": "LOWER", + "q_lo": 0.98, + "q_hi": 1.0, + "y_target": + [ + 0, 0.5, + 1000, 0.5 + ] + }, + { + "bound": "UPPER", + "q_lo": 0.98, + "q_hi": 1.0, + "y_target": + [ + 0, 0.8, + 1000, 0.8 + ] + } + ] + }, + "y_target": + [ + 0, 0.16, + 1000, 0.16, + 10000, 0.16 + ] + } + }, + { + "rpi.alsc": + { + "omega": 1.3, + "n_iter": 100, + "luminance_strength": 0.7, + "luminance_lut": + [ + 2.844, 2.349, 2.018, 1.775, 1.599, 1.466, 1.371, 1.321, 1.306, 1.316, 1.357, 1.439, 1.552, 1.705, 1.915, 2.221, + 2.576, 2.151, 1.851, 1.639, 1.478, 1.358, 1.272, 1.231, 1.218, 1.226, 1.262, 1.335, 1.438, 1.571, 1.766, 2.067, + 2.381, 2.005, 1.739, 1.545, 1.389, 1.278, 1.204, 1.166, 1.153, 1.161, 1.194, 1.263, 1.356, 1.489, 1.671, 1.943, + 2.242, 1.899, 1.658, 1.481, 1.329, 1.225, 1.156, 1.113, 1.096, 1.107, 1.143, 1.201, 1.289, 1.423, 1.607, 1.861, + 2.152, 1.831, 1.602, 1.436, 1.291, 1.193, 1.121, 1.069, 1.047, 1.062, 1.107, 1.166, 1.249, 1.384, 1.562, 1.801, + 2.104, 1.795, 1.572, 1.407, 1.269, 1.174, 1.099, 1.041, 1.008, 1.029, 1.083, 1.146, 1.232, 1.364, 1.547, 1.766, + 2.104, 1.796, 1.572, 1.403, 1.264, 1.171, 1.097, 1.036, 1.001, 1.025, 1.077, 1.142, 1.231, 1.363, 1.549, 1.766, + 2.148, 1.827, 1.594, 1.413, 1.276, 1.184, 1.114, 1.062, 1.033, 1.049, 1.092, 1.153, 1.242, 1.383, 1.577, 1.795, + 2.211, 1.881, 1.636, 1.455, 1.309, 1.214, 1.149, 1.104, 1.081, 1.089, 1.125, 1.184, 1.273, 1.423, 1.622, 1.846, + 2.319, 1.958, 1.698, 1.516, 1.362, 1.262, 1.203, 1.156, 1.137, 1.142, 1.171, 1.229, 1.331, 1.484, 1.682, 1.933, + 2.459, 2.072, 1.789, 1.594, 1.441, 1.331, 1.261, 1.219, 1.199, 1.205, 1.232, 1.301, 1.414, 1.571, 1.773, 2.052, + 2.645, 2.206, 1.928, 1.728, 1.559, 1.451, 1.352, 1.301, 1.282, 1.289, 1.319, 1.395, 1.519, 1.685, 1.904, 2.227 + ], + "sigma": 0.005, + "sigma_Cb": 0.005 + } + }, + { + "rpi.contrast": + { + "ce_enable": 1, + "gamma_curve": + [ + 0, 0, + 1024, 5040, + 2048, 9338, + 3072, 12356, + 4096, 15312, + 5120, 18051, + 6144, 20790, + 7168, 23193, + 8192, 25744, + 9216, 27942, + 10240, 30035, + 11264, 32005, + 12288, 33975, + 13312, 35815, + 14336, 37600, + 15360, 39168, + 16384, 40642, + 18432, 43379, + 20480, 45749, + 22528, 47753, + 24576, 49621, + 26624, 51253, + 28672, 52698, + 30720, 53796, + 32768, 54876, + 36864, 57012, + 40960, 58656, + 45056, 59954, + 49152, 61183, + 53248, 62355, + 57344, 63419, + 61440, 64476, + 65535, 65535 + ] + } + }, + { + "rpi.sharpen": { } + }, + { + "rpi.ccm": + { + "ccms": [ + { + "ct": 3900, + "ccm": + [ + 1.54659, -0.17707, -0.36953, + -0.51471, 1.72733, -0.21262, + 0.06667, -0.92279, 1.85612 + ] + } + ] + } + } + ] +}
\ No newline at end of file diff --git a/src/ipa/rpi/vc4/data/meson.build b/src/ipa/rpi/vc4/data/meson.build index afbf875a..7a8001ee 100644 --- a/src/ipa/rpi/vc4/data/meson.build +++ b/src/ipa/rpi/vc4/data/meson.build @@ -3,10 +3,14 @@ conf_files = files([ 'imx219.json', 'imx219_noir.json', + 'imx283.json', 'imx290.json', 'imx296.json', 'imx296_mono.json', + 'imx327.json', 'imx378.json', + 'imx415.json', + 'imx462.json', 'imx477.json', 'imx477_noir.json', 'imx477_scientific.json', @@ -18,6 +22,7 @@ conf_files = files([ 'ov5647.json', 'ov5647_noir.json', 'ov64a40.json', + 'ov7251_mono.json', 'ov9281_mono.json', 'se327m12.json', 'uncalibrated.json', diff --git a/src/ipa/rpi/vc4/data/ov7251_mono.json b/src/ipa/rpi/vc4/data/ov7251_mono.json new file mode 100644 index 00000000..a9d05a01 --- /dev/null +++ b/src/ipa/rpi/vc4/data/ov7251_mono.json @@ -0,0 +1,136 @@ +{ + "version": 2.0, + "target": "bcm2835", + "algorithms": [ + { + "rpi.black_level": + { + "black_level": 4096 + } + }, + { + "rpi.lux": + { + "reference_shutter_speed": 2000, + "reference_gain": 1.0, + "reference_aperture": 1.0, + "reference_lux": 800, + "reference_Y": 20000 + } + }, + { + "rpi.noise": + { + "reference_constant": 0, + "reference_slope": 2.5 + } + }, + { + "rpi.sdn": { } + }, + { + "rpi.agc": + { + "metering_modes": + { + "centre-weighted": + { + "weights": + [ + 4, 4, 4, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 + ] + } + }, + "exposure_modes": + { + "normal": + { + "shutter": [ 100, 15000, 30000, 60000, 120000 ], + "gain": [ 1.0, 2.0, 3.0, 4.0, 8.0 ] + }, + "short": + { + "shutter": [ 100, 5000, 10000, 20000, 30000 ], + "gain": [ 1.0, 2.0, 4.0, 6.0, 8.0 ] + }, + "long": + { + "shutter": [ 1000, 30000, 60000, 90000, 120000 ], + "gain": [ 1.0, 2.0, 4.0, 6.0, 12.0 ] + } + }, + "constraint_modes": + { + "normal": [ + { + "bound": "LOWER", + "q_lo": 0.98, + "q_hi": 1.0, + "y_target": + [ + 0, 0.4, + 1000, 0.4 + ] + } + ] + }, + "y_target": + [ + 0, 0.16, + 1000, 0.165, + 10000, 0.17 + ] + } + }, + { + "rpi.alsc": + { + "n_iter": 0, + "luminance_strength": 1.0, + "corner_strength": 1.5 + } + }, + { + "rpi.contrast": + { + "ce_enable": 0, + "gamma_curve": + [ + 0, 0, + 1024, 5040, + 2048, 9338, + 3072, 12356, + 4096, 15312, + 5120, 18051, + 6144, 20790, + 7168, 23193, + 8192, 25744, + 9216, 27942, + 10240, 30035, + 11264, 32005, + 12288, 33975, + 13312, 35815, + 14336, 37600, + 15360, 39168, + 16384, 40642, + 18432, 43379, + 20480, 45749, + 22528, 47753, + 24576, 49621, + 26624, 51253, + 28672, 52698, + 30720, 53796, + 32768, 54876, + 36864, 57012, + 40960, 58656, + 45056, 59954, + 49152, 61183, + 53248, 62355, + 57344, 63419, + 61440, 64476, + 65535, 65535 + ] + } + } + ] +}
\ No newline at end of file diff --git a/src/ipa/rpi/vc4/meson.build b/src/ipa/rpi/vc4/meson.build index 63fc5925..c10fa17e 100644 --- a/src/ipa/rpi/vc4/meson.build +++ b/src/ipa/rpi/vc4/meson.build @@ -23,8 +23,7 @@ vc4_ipa_sources = files([ vc4_ipa_includes += include_directories('..') -mod = shared_module(ipa_name, - [vc4_ipa_sources, libcamera_generated_ipa_headers], +mod = shared_module(ipa_name, vc4_ipa_sources, name_prefix : '', include_directories : vc4_ipa_includes, dependencies : [vc4_ipa_deps, libipa_dep], diff --git a/src/ipa/simple/algorithms/agc.cpp b/src/ipa/simple/algorithms/agc.cpp new file mode 100644 index 00000000..72aade14 --- /dev/null +++ b/src/ipa/simple/algorithms/agc.cpp @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Red Hat Inc. + * + * Exposure and gain + */ + +#include "agc.h" + +#include <stdint.h> + +#include <libcamera/base/log.h> + +namespace libcamera { + +LOG_DEFINE_CATEGORY(IPASoftExposure) + +namespace ipa::soft::algorithms { + +/* + * The number of bins to use for the optimal exposure calculations. + */ +static constexpr unsigned int kExposureBinsCount = 5; + +/* + * The exposure is optimal when the mean sample value of the histogram is + * in the middle of the range. + */ +static constexpr float kExposureOptimal = kExposureBinsCount / 2.0; + +/* + * This implements the hysteresis for the exposure adjustment. + * It is small enough to have the exposure close to the optimal, and is big + * enough to prevent the exposure from wobbling around the optimal value. + */ +static constexpr float kExposureSatisfactory = 0.2; + +Agc::Agc() +{ +} + +void Agc::updateExposure(IPAContext &context, IPAFrameContext &frameContext, double exposureMSV) +{ + /* + * kExpDenominator of 10 gives ~10% increment/decrement; + * kExpDenominator of 5 - about ~20% + */ + static constexpr uint8_t kExpDenominator = 10; + static constexpr uint8_t kExpNumeratorUp = kExpDenominator + 1; + static constexpr uint8_t kExpNumeratorDown = kExpDenominator - 1; + + double next; + int32_t &exposure = frameContext.sensor.exposure; + double &again = frameContext.sensor.gain; + + if (exposureMSV < kExposureOptimal - kExposureSatisfactory) { + next = exposure * kExpNumeratorUp / kExpDenominator; + if (next - exposure < 1) + exposure += 1; + else + exposure = next; + if (exposure >= context.configuration.agc.exposureMax) { + next = again * kExpNumeratorUp / kExpDenominator; + if (next - again < context.configuration.agc.againMinStep) + again += context.configuration.agc.againMinStep; + else + again = next; + } + } + + if (exposureMSV > kExposureOptimal + kExposureSatisfactory) { + if (exposure == context.configuration.agc.exposureMax && + again > context.configuration.agc.againMin) { + next = again * kExpNumeratorDown / kExpDenominator; + if (again - next < context.configuration.agc.againMinStep) + again -= context.configuration.agc.againMinStep; + else + again = next; + } else { + next = exposure * kExpNumeratorDown / kExpDenominator; + if (exposure - next < 1) + exposure -= 1; + else + exposure = next; + } + } + + exposure = std::clamp(exposure, context.configuration.agc.exposureMin, + context.configuration.agc.exposureMax); + again = std::clamp(again, context.configuration.agc.againMin, + context.configuration.agc.againMax); + + LOG(IPASoftExposure, Debug) + << "exposureMSV " << exposureMSV + << " exp " << exposure << " again " << again; +} + +void Agc::process(IPAContext &context, + [[maybe_unused]] const uint32_t frame, + [[maybe_unused]] IPAFrameContext &frameContext, + const SwIspStats *stats, + [[maybe_unused]] ControlList &metadata) +{ + /* + * Calculate Mean Sample Value (MSV) according to formula from: + * https://www.araa.asn.au/acra/acra2007/papers/paper84final.pdf + */ + const auto &histogram = stats->yHistogram; + const unsigned int blackLevelHistIdx = + context.activeState.blc.level / (256 / SwIspStats::kYHistogramSize); + const unsigned int histogramSize = + SwIspStats::kYHistogramSize - blackLevelHistIdx; + const unsigned int yHistValsPerBin = histogramSize / kExposureBinsCount; + const unsigned int yHistValsPerBinMod = + histogramSize / (histogramSize % kExposureBinsCount + 1); + int exposureBins[kExposureBinsCount] = {}; + unsigned int denom = 0; + unsigned int num = 0; + + for (unsigned int i = 0; i < histogramSize; i++) { + unsigned int idx = (i - (i / yHistValsPerBinMod)) / yHistValsPerBin; + exposureBins[idx] += histogram[blackLevelHistIdx + i]; + } + + for (unsigned int i = 0; i < kExposureBinsCount; i++) { + LOG(IPASoftExposure, Debug) << i << ": " << exposureBins[i]; + denom += exposureBins[i]; + num += exposureBins[i] * (i + 1); + } + + float exposureMSV = (denom == 0 ? 0 : static_cast<float>(num) / denom); + updateExposure(context, frameContext, exposureMSV); +} + +REGISTER_IPA_ALGORITHM(Agc, "Agc") + +} /* namespace ipa::soft::algorithms */ + +} /* namespace libcamera */ diff --git a/src/ipa/simple/algorithms/agc.h b/src/ipa/simple/algorithms/agc.h new file mode 100644 index 00000000..112d9f5a --- /dev/null +++ b/src/ipa/simple/algorithms/agc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Red Hat Inc. + * + * Exposure and gain + */ + +#pragma once + +#include "algorithm.h" + +namespace libcamera { + +namespace ipa::soft::algorithms { + +class Agc : public Algorithm +{ +public: + Agc(); + ~Agc() = default; + + void process(IPAContext &context, const uint32_t frame, + IPAFrameContext &frameContext, + const SwIspStats *stats, + ControlList &metadata) override; + +private: + void updateExposure(IPAContext &context, IPAFrameContext &frameContext, double exposureMSV); +}; + +} /* namespace ipa::soft::algorithms */ + +} /* namespace libcamera */ diff --git a/src/ipa/simple/algorithms/algorithm.h b/src/ipa/simple/algorithms/algorithm.h new file mode 100644 index 00000000..41f63170 --- /dev/null +++ b/src/ipa/simple/algorithms/algorithm.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024 Red Hat, Inc. + * + * Software ISP control algorithm interface + */ + +#pragma once + +#include <libipa/algorithm.h> + +#include "module.h" + +namespace libcamera { + +namespace ipa::soft { + +using Algorithm = libcamera::ipa::Algorithm<Module>; + +} /* namespace ipa::soft */ + +} /* namespace libcamera */ diff --git a/src/ipa/simple/algorithms/awb.cpp b/src/ipa/simple/algorithms/awb.cpp new file mode 100644 index 00000000..195de41d --- /dev/null +++ b/src/ipa/simple/algorithms/awb.cpp @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Red Hat Inc. + * + * Auto white balance + */ + +#include "awb.h" + +#include <numeric> +#include <stdint.h> + +#include <libcamera/base/log.h> + +#include "simple/ipa_context.h" + +namespace libcamera { + +LOG_DEFINE_CATEGORY(IPASoftAwb) + +namespace ipa::soft::algorithms { + +int Awb::configure(IPAContext &context, + [[maybe_unused]] const IPAConfigInfo &configInfo) +{ + auto &gains = context.activeState.gains; + gains.red = gains.green = gains.blue = 1.0; + + return 0; +} + +void Awb::process(IPAContext &context, + [[maybe_unused]] const uint32_t frame, + [[maybe_unused]] IPAFrameContext &frameContext, + const SwIspStats *stats, + [[maybe_unused]] ControlList &metadata) +{ + const SwIspStats::Histogram &histogram = stats->yHistogram; + const uint8_t blackLevel = context.activeState.blc.level; + + /* + * Black level must be subtracted to get the correct AWB ratios, they + * would be off if they were computed from the whole brightness range + * rather than from the sensor range. + */ + const uint64_t nPixels = std::accumulate( + histogram.begin(), histogram.end(), 0); + const uint64_t offset = blackLevel * nPixels; + const uint64_t sumR = stats->sumR_ - offset / 4; + const uint64_t sumG = stats->sumG_ - offset / 2; + const uint64_t sumB = stats->sumB_ - offset / 4; + + /* + * Calculate red and blue gains for AWB. + * Clamp max gain at 4.0, this also avoids 0 division. + */ + auto &gains = context.activeState.gains; + gains.red = sumR <= sumG / 4 ? 4.0 : static_cast<double>(sumG) / sumR; + gains.blue = sumB <= sumG / 4 ? 4.0 : static_cast<double>(sumG) / sumB; + /* Green gain is fixed to 1.0 */ + + LOG(IPASoftAwb, Debug) << "gain R/B " << gains.red << "/" << gains.blue; +} + +REGISTER_IPA_ALGORITHM(Awb, "Awb") + +} /* namespace ipa::soft::algorithms */ + +} /* namespace libcamera */ diff --git a/src/ipa/simple/algorithms/awb.h b/src/ipa/simple/algorithms/awb.h new file mode 100644 index 00000000..db1496cd --- /dev/null +++ b/src/ipa/simple/algorithms/awb.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Red Hat Inc. + * + * Auto white balance + */ + +#pragma once + +#include "algorithm.h" + +namespace libcamera { + +namespace ipa::soft::algorithms { + +class Awb : public Algorithm +{ +public: + Awb() = default; + ~Awb() = default; + + int configure(IPAContext &context, const IPAConfigInfo &configInfo) override; + void process(IPAContext &context, + const uint32_t frame, + IPAFrameContext &frameContext, + const SwIspStats *stats, + ControlList &metadata) override; +}; + +} /* namespace ipa::soft::algorithms */ + +} /* namespace libcamera */ diff --git a/src/ipa/simple/algorithms/blc.cpp b/src/ipa/simple/algorithms/blc.cpp new file mode 100644 index 00000000..1d7d370b --- /dev/null +++ b/src/ipa/simple/algorithms/blc.cpp @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Red Hat Inc. + * + * Black level handling + */ + +#include "blc.h" + +#include <numeric> + +#include <libcamera/base/log.h> + +namespace libcamera { + +namespace ipa::soft::algorithms { + +LOG_DEFINE_CATEGORY(IPASoftBL) + +BlackLevel::BlackLevel() +{ +} + +int BlackLevel::init([[maybe_unused]] IPAContext &context, + const YamlObject &tuningData) +{ + auto blackLevel = tuningData["blackLevel"].get<int16_t>(); + if (blackLevel.has_value()) { + /* + * Convert 16 bit values from the tuning file to 8 bit black + * level for the SoftISP. + */ + definedLevel_ = blackLevel.value() >> 8; + } + return 0; +} + +int BlackLevel::configure(IPAContext &context, + [[maybe_unused]] const IPAConfigInfo &configInfo) +{ + if (definedLevel_.has_value()) + context.configuration.black.level = definedLevel_; + context.activeState.blc.level = + context.configuration.black.level.value_or(255); + return 0; +} + +void BlackLevel::process(IPAContext &context, + [[maybe_unused]] const uint32_t frame, + IPAFrameContext &frameContext, + const SwIspStats *stats, + [[maybe_unused]] ControlList &metadata) +{ + if (context.configuration.black.level.has_value()) + return; + + if (frameContext.sensor.exposure == exposure_ && + frameContext.sensor.gain == gain_) { + return; + } + + const SwIspStats::Histogram &histogram = stats->yHistogram; + + /* + * The constant is selected to be "good enough", not overly + * conservative or aggressive. There is no magic about the given value. + */ + constexpr float ignoredPercentage = 0.02; + const unsigned int total = + std::accumulate(begin(histogram), end(histogram), 0); + const unsigned int pixelThreshold = ignoredPercentage * total; + const unsigned int histogramRatio = 256 / SwIspStats::kYHistogramSize; + const unsigned int currentBlackIdx = + context.activeState.blc.level / histogramRatio; + + for (unsigned int i = 0, seen = 0; + i < currentBlackIdx && i < SwIspStats::kYHistogramSize; + i++) { + seen += histogram[i]; + if (seen >= pixelThreshold) { + context.activeState.blc.level = i * histogramRatio; + exposure_ = frameContext.sensor.exposure; + gain_ = frameContext.sensor.gain; + LOG(IPASoftBL, Debug) + << "Auto-set black level: " + << i << "/" << SwIspStats::kYHistogramSize + << " (" << 100 * (seen - histogram[i]) / total << "% below, " + << 100 * seen / total << "% at or below)"; + break; + } + }; +} + +REGISTER_IPA_ALGORITHM(BlackLevel, "BlackLevel") + +} /* namespace ipa::soft::algorithms */ + +} /* namespace libcamera */ diff --git a/src/ipa/simple/algorithms/blc.h b/src/ipa/simple/algorithms/blc.h new file mode 100644 index 00000000..52d59cab --- /dev/null +++ b/src/ipa/simple/algorithms/blc.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Red Hat Inc. + * + * Black level handling + */ + +#pragma once + +#include <optional> +#include <stdint.h> + +#include "algorithm.h" + +namespace libcamera { + +namespace ipa::soft::algorithms { + +class BlackLevel : public Algorithm +{ +public: + BlackLevel(); + ~BlackLevel() = default; + + int init(IPAContext &context, const YamlObject &tuningData) override; + int configure(IPAContext &context, const IPAConfigInfo &configInfo) override; + void process(IPAContext &context, const uint32_t frame, + IPAFrameContext &frameContext, + const SwIspStats *stats, + ControlList &metadata) override; + +private: + int32_t exposure_; + double gain_; + std::optional<uint8_t> definedLevel_; +}; + +} /* namespace ipa::soft::algorithms */ + +} /* namespace libcamera */ diff --git a/src/ipa/simple/algorithms/lut.cpp b/src/ipa/simple/algorithms/lut.cpp new file mode 100644 index 00000000..0ba2391f --- /dev/null +++ b/src/ipa/simple/algorithms/lut.cpp @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Red Hat Inc. + * + * Color lookup tables construction + */ + +#include "lut.h" + +#include <algorithm> +#include <cmath> +#include <optional> +#include <stdint.h> + +#include <libcamera/base/log.h> + +#include "simple/ipa_context.h" + +#include "control_ids.h" + +namespace libcamera { + +LOG_DEFINE_CATEGORY(IPASoftLut) + +namespace ipa::soft::algorithms { + +int Lut::init(IPAContext &context, + [[maybe_unused]] const YamlObject &tuningData) +{ + context.ctrlMap[&controls::Contrast] = ControlInfo(0.0f, 2.0f, 1.0f); + return 0; +} + +int Lut::configure(IPAContext &context, + [[maybe_unused]] const IPAConfigInfo &configInfo) +{ + /* Gamma value is fixed */ + context.configuration.gamma = 0.5; + context.activeState.knobs.contrast = std::optional<double>(); + updateGammaTable(context); + + return 0; +} + +void Lut::queueRequest(typename Module::Context &context, + [[maybe_unused]] const uint32_t frame, + [[maybe_unused]] typename Module::FrameContext &frameContext, + const ControlList &controls) +{ + const auto &contrast = controls.get(controls::Contrast); + if (contrast.has_value()) { + context.activeState.knobs.contrast = contrast; + LOG(IPASoftLut, Debug) << "Setting contrast to " << contrast.value(); + } +} + +void Lut::updateGammaTable(IPAContext &context) +{ + auto &gammaTable = context.activeState.gamma.gammaTable; + const auto blackLevel = context.activeState.blc.level; + const unsigned int blackIndex = blackLevel * gammaTable.size() / 256; + const auto contrast = context.activeState.knobs.contrast.value_or(1.0); + + std::fill(gammaTable.begin(), gammaTable.begin() + blackIndex, 0); + const float divisor = gammaTable.size() - blackIndex - 1.0; + for (unsigned int i = blackIndex; i < gammaTable.size(); i++) { + double normalized = (i - blackIndex) / divisor; + /* Convert 0..2 to 0..infinity; avoid actual inifinity at tan(pi/2) */ + double contrastExp = tan(std::clamp(contrast * M_PI_4, 0.0, M_PI_2 - 0.00001)); + /* Apply simple S-curve */ + if (normalized < 0.5) + normalized = 0.5 * std::pow(normalized / 0.5, contrastExp); + else + normalized = 1.0 - 0.5 * std::pow((1.0 - normalized) / 0.5, contrastExp); + gammaTable[i] = UINT8_MAX * + std::pow(normalized, context.configuration.gamma); + } + + context.activeState.gamma.blackLevel = blackLevel; + context.activeState.gamma.contrast = contrast; +} + +void Lut::prepare(IPAContext &context, + [[maybe_unused]] const uint32_t frame, + [[maybe_unused]] IPAFrameContext &frameContext, + [[maybe_unused]] DebayerParams *params) +{ + /* + * Update the gamma table if needed. This means if black level changes + * and since the black level gets updated only if a lower value is + * observed, it's not permanently prone to minor fluctuations or + * rounding errors. + */ + if (context.activeState.gamma.blackLevel != context.activeState.blc.level || + context.activeState.gamma.contrast != context.activeState.knobs.contrast) + updateGammaTable(context); + + auto &gains = context.activeState.gains; + auto &gammaTable = context.activeState.gamma.gammaTable; + const unsigned int gammaTableSize = gammaTable.size(); + + for (unsigned int i = 0; i < DebayerParams::kRGBLookupSize; i++) { + const double div = static_cast<double>(DebayerParams::kRGBLookupSize) / + gammaTableSize; + /* Apply gamma after gain! */ + unsigned int idx; + idx = std::min({ static_cast<unsigned int>(i * gains.red / div), + gammaTableSize - 1 }); + params->red[i] = gammaTable[idx]; + idx = std::min({ static_cast<unsigned int>(i * gains.green / div), + gammaTableSize - 1 }); + params->green[i] = gammaTable[idx]; + idx = std::min({ static_cast<unsigned int>(i * gains.blue / div), + gammaTableSize - 1 }); + params->blue[i] = gammaTable[idx]; + } +} + +REGISTER_IPA_ALGORITHM(Lut, "Lut") + +} /* namespace ipa::soft::algorithms */ + +} /* namespace libcamera */ diff --git a/src/ipa/simple/algorithms/lut.h b/src/ipa/simple/algorithms/lut.h new file mode 100644 index 00000000..889f864b --- /dev/null +++ b/src/ipa/simple/algorithms/lut.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Red Hat Inc. + * + * Color lookup tables construction + */ + +#pragma once + +#include "algorithm.h" + +namespace libcamera { + +namespace ipa::soft::algorithms { + +class Lut : public Algorithm +{ +public: + Lut() = default; + ~Lut() = default; + + int init(IPAContext &context, const YamlObject &tuningData) override; + int configure(IPAContext &context, const IPAConfigInfo &configInfo) override; + void queueRequest(typename Module::Context &context, + const uint32_t frame, + typename Module::FrameContext &frameContext, + const ControlList &controls) + override; + void prepare(IPAContext &context, + const uint32_t frame, + IPAFrameContext &frameContext, + DebayerParams *params) override; + +private: + void updateGammaTable(IPAContext &context); +}; + +} /* namespace ipa::soft::algorithms */ + +} /* namespace libcamera */ diff --git a/src/ipa/simple/algorithms/meson.build b/src/ipa/simple/algorithms/meson.build new file mode 100644 index 00000000..37a2eb53 --- /dev/null +++ b/src/ipa/simple/algorithms/meson.build @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: CC0-1.0 + +soft_simple_ipa_algorithms = files([ + 'awb.cpp', + 'agc.cpp', + 'blc.cpp', + 'lut.cpp', +]) diff --git a/src/ipa/simple/black_level.cpp b/src/ipa/simple/black_level.cpp deleted file mode 100644 index cc490eb5..00000000 --- a/src/ipa/simple/black_level.cpp +++ /dev/null @@ -1,88 +0,0 @@ -/* SPDX-License-Identifier: LGPL-2.1-or-later */ -/* - * Copyright (C) 2024, Red Hat Inc. - * - * black level handling - */ - -#include "black_level.h" - -#include <numeric> - -#include <libcamera/base/log.h> - -namespace libcamera { - -LOG_DEFINE_CATEGORY(IPASoftBL) - -/** - * \class BlackLevel - * \brief Object providing black point level for software ISP - * - * Black level can be provided in hardware tuning files or, if no tuning file is - * available for the given hardware, guessed automatically, with less accuracy. - * As tuning files are not yet implemented for software ISP, BlackLevel - * currently provides only guessed black levels. - * - * This class serves for tracking black level as a property of the underlying - * hardware, not as means of enhancing a particular scene or image. - * - * The class is supposed to be instantiated for the given camera stream. - * The black level can be retrieved using BlackLevel::get() method. It is - * initially 0 and may change when updated using BlackLevel::update() method. - */ - -BlackLevel::BlackLevel() - : blackLevel_(255), blackLevelSet_(false) -{ -} - -/** - * \brief Return the current black level - * - * \return The black level, in the range from 0 (minimum) to 255 (maximum). - * If the black level couldn't be determined yet, return 0. - */ -uint8_t BlackLevel::get() const -{ - return blackLevelSet_ ? blackLevel_ : 0; -} - -/** - * \brief Update black level from the provided histogram - * \param[in] yHistogram The histogram to be used for updating black level - * - * The black level is property of the given hardware, not image. It is updated - * only if it has not been yet set or if it is lower than the lowest value seen - * so far. - */ -void BlackLevel::update(SwIspStats::Histogram &yHistogram) -{ - /* - * The constant is selected to be "good enough", not overly conservative or - * aggressive. There is no magic about the given value. - */ - constexpr float ignoredPercentage_ = 0.02; - const unsigned int total = - std::accumulate(begin(yHistogram), end(yHistogram), 0); - const unsigned int pixelThreshold = ignoredPercentage_ * total; - const unsigned int histogramRatio = 256 / SwIspStats::kYHistogramSize; - const unsigned int currentBlackIdx = blackLevel_ / histogramRatio; - - for (unsigned int i = 0, seen = 0; - i < currentBlackIdx && i < SwIspStats::kYHistogramSize; - i++) { - seen += yHistogram[i]; - if (seen >= pixelThreshold) { - blackLevel_ = i * histogramRatio; - blackLevelSet_ = true; - LOG(IPASoftBL, Debug) - << "Auto-set black level: " - << i << "/" << SwIspStats::kYHistogramSize - << " (" << 100 * (seen - yHistogram[i]) / total << "% below, " - << 100 * seen / total << "% at or below)"; - break; - } - }; -} -} /* namespace libcamera */ diff --git a/src/ipa/simple/black_level.h b/src/ipa/simple/black_level.h deleted file mode 100644 index 5e032f9f..00000000 --- a/src/ipa/simple/black_level.h +++ /dev/null @@ -1,29 +0,0 @@ -/* SPDX-License-Identifier: LGPL-2.1-or-later */ -/* - * Copyright (C) 2024, Red Hat Inc. - * - * black level handling - */ - -#pragma once - -#include <array> -#include <stdint.h> - -#include "libcamera/internal/software_isp/swisp_stats.h" - -namespace libcamera { - -class BlackLevel -{ -public: - BlackLevel(); - uint8_t get() const; - void update(SwIspStats::Histogram &yHistogram); - -private: - uint8_t blackLevel_; - bool blackLevelSet_; -}; - -} /* namespace libcamera */ diff --git a/src/ipa/simple/data/uncalibrated.yaml b/src/ipa/simple/data/uncalibrated.yaml index ff981a1a..3f147112 100644 --- a/src/ipa/simple/data/uncalibrated.yaml +++ b/src/ipa/simple/data/uncalibrated.yaml @@ -2,4 +2,9 @@ %YAML 1.1 --- version: 1 +algorithms: + - BlackLevel: + - Awb: + - Lut: + - Agc: ... diff --git a/src/ipa/simple/ipa_context.cpp b/src/ipa/simple/ipa_context.cpp new file mode 100644 index 00000000..3f94bbeb --- /dev/null +++ b/src/ipa/simple/ipa_context.cpp @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Google Inc. + * Copyright (C) 2024 Red Hat Inc. + * + * Software ISP IPA Context + */ + +#include "ipa_context.h" + +/** + * \file ipa_context.h + * \brief Context and state information shared between the algorithms + */ + +namespace libcamera::ipa::soft { + +/** + * \struct IPASessionConfiguration + * \brief Session configuration for the IPA module + * + * The session configuration contains all IPA configuration parameters that + * remain constant during the capture session, from IPA module start to stop. + * It is typically set during the configure() operation of the IPA module, but + * may also be updated in the start() operation. + */ + +/** + * \struct IPAActiveState + * \brief The active state of the IPA algorithms + * + * The IPA is fed with the statistics generated from the latest frame processed. + * The statistics are then processed by the IPA algorithms to compute parameters + * required for the next frame capture and processing. The current state of the + * algorithms is reflected through the IPAActiveState to store the values most + * recently computed by the IPA algorithms. + */ + +/** + * \struct IPAContext + * \brief Global IPA context data shared between all algorithms + * + * \var IPAContext::configuration + * \brief The IPA session configuration, immutable during the session + * + * \var IPAContext::frameContexts + * \brief Ring buffer of the IPAFrameContext(s) + * + * \var IPAContext::activeState + * \brief The current state of IPA algorithms + */ + +/** + * \var IPASessionConfiguration::gamma + * \brief Gamma value to be used in the raw image processing + */ + +/** + * \var IPAActiveState::black + * \brief Context for the Black Level algorithm + * + * \var IPAActiveState::black.level + * \brief Current determined black level + */ + +/** + * \var IPAActiveState::gains + * \brief Context for gains in the Colors algorithm + * + * \var IPAActiveState::gains.red + * \brief Gain of red color + * + * \var IPAActiveState::gains.green + * \brief Gain of green color + * + * \var IPAActiveState::gains.blue + * \brief Gain of blue color + */ + +/** + * \var IPAActiveState::agc + * \brief Context for the AGC algorithm + * + * \var IPAActiveState::agc.exposure + * \brief Current exposure value + * + * \var IPAActiveState::agc.again + * \brief Current analog gain value + */ + +/** + * \var IPAActiveState::gamma + * \brief Context for gamma in the Colors algorithm + * + * \var IPAActiveState::gamma.gammaTable + * \brief Computed gamma table + * + * \var IPAActiveState::gamma.blackLevel + * \brief Black level used for the gamma table computation + */ + +} /* namespace libcamera::ipa::soft */ diff --git a/src/ipa/simple/ipa_context.h b/src/ipa/simple/ipa_context.h new file mode 100644 index 00000000..4af51306 --- /dev/null +++ b/src/ipa/simple/ipa_context.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024 Red Hat, Inc. + * + * Simple pipeline IPA Context + */ + +#pragma once + +#include <array> +#include <optional> +#include <stdint.h> + +#include <libcamera/controls.h> + +#include <libipa/fc_queue.h> + +namespace libcamera { + +namespace ipa::soft { + +struct IPASessionConfiguration { + float gamma; + struct { + int32_t exposureMin, exposureMax; + double againMin, againMax, againMinStep; + } agc; + struct { + std::optional<uint8_t> level; + } black; +}; + +struct IPAActiveState { + struct { + uint8_t level; + } blc; + + struct { + double red; + double green; + double blue; + } gains; + + static constexpr unsigned int kGammaLookupSize = 1024; + struct { + std::array<double, kGammaLookupSize> gammaTable; + uint8_t blackLevel; + double contrast; + } gamma; + struct { + /* 0..2 range, 1.0 = normal */ + std::optional<double> contrast; + } knobs; +}; + +struct IPAFrameContext : public FrameContext { + struct { + int32_t exposure; + double gain; + } sensor; +}; + +struct IPAContext { + IPAContext(unsigned int frameContextSize) + : frameContexts(frameContextSize) + { + } + + IPASessionConfiguration configuration; + IPAActiveState activeState; + FCQueue<IPAFrameContext> frameContexts; + ControlInfoMap::Map ctrlMap; +}; + +} /* namespace ipa::soft */ + +} /* namespace libcamera */ diff --git a/src/ipa/simple/meson.build b/src/ipa/simple/meson.build index 33d1c96a..2f9f15f4 100644 --- a/src/ipa/simple/meson.build +++ b/src/ipa/simple/meson.build @@ -1,14 +1,18 @@ # SPDX-License-Identifier: CC0-1.0 +subdir('algorithms') +subdir('data') + ipa_name = 'ipa_soft_simple' soft_simple_sources = files([ + 'ipa_context.cpp', 'soft_simple.cpp', - 'black_level.cpp', ]) -mod = shared_module(ipa_name, - [soft_simple_sources, libcamera_generated_ipa_headers], +soft_simple_sources += soft_simple_ipa_algorithms + +mod = shared_module(ipa_name, soft_simple_sources, name_prefix : '', include_directories : [ipa_includes], dependencies : [libcamera_private, libipa_dep], @@ -24,6 +28,4 @@ if ipa_sign_module build_by_default : true) endif -subdir('data') - ipa_names += ipa_name diff --git a/src/ipa/simple/module.h b/src/ipa/simple/module.h new file mode 100644 index 00000000..8d4d53fb --- /dev/null +++ b/src/ipa/simple/module.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024 Red Hat, Inc. + * + * Software ISP IPA Module + */ + +#pragma once + +#include <libcamera/controls.h> + +#include <libcamera/ipa/soft_ipa_interface.h> + +#include "libcamera/internal/software_isp/debayer_params.h" +#include "libcamera/internal/software_isp/swisp_stats.h" + +#include <libipa/module.h> + +#include "ipa_context.h" + +namespace libcamera { + +namespace ipa::soft { + +using Module = ipa::Module<IPAContext, IPAFrameContext, IPAConfigInfo, + DebayerParams, SwIspStats>; + +} /* namespace ipa::soft */ + +} /* namespace libcamera */ diff --git a/src/ipa/simple/soft_simple.cpp b/src/ipa/simple/soft_simple.cpp index b7746ce0..b26e4e15 100644 --- a/src/ipa/simple/soft_simple.cpp +++ b/src/ipa/simple/soft_simple.cpp @@ -5,8 +5,6 @@ * Simple Software Image Processing Algorithm module */ -#include <cmath> -#include <numeric> #include <stdint.h> #include <sys/mman.h> @@ -29,37 +27,21 @@ #include "libipa/camera_sensor_helper.h" -#include "black_level.h" +#include "module.h" namespace libcamera { LOG_DEFINE_CATEGORY(IPASoft) namespace ipa::soft { -/* - * The number of bins to use for the optimal exposure calculations. - */ -static constexpr unsigned int kExposureBinsCount = 5; - -/* - * The exposure is optimal when the mean sample value of the histogram is - * in the middle of the range. - */ -static constexpr float kExposureOptimal = kExposureBinsCount / 2.0; - -/* - * The below value implements the hysteresis for the exposure adjustment. - * It is small enough to have the exposure close to the optimal, and is big - * enough to prevent the exposure from wobbling around the optimal value. - */ -static constexpr float kExposureSatisfactory = 0.2; +/* Maximum number of frame contexts to be held */ +static constexpr uint32_t kMaxFrameContexts = 16; -class IPASoftSimple : public ipa::soft::IPASoftInterface +class IPASoftSimple : public ipa::soft::IPASoftInterface, public Module { public: IPASoftSimple() - : params_(nullptr), stats_(nullptr), blackLevel_(BlackLevel()), - ignoreUpdates_(0) + : context_(kMaxFrameContexts) { } @@ -68,13 +50,20 @@ public: int init(const IPASettings &settings, const SharedFD &fdStats, const SharedFD &fdParams, - const ControlInfoMap &sensorInfoMap) override; - int configure(const ControlInfoMap &sensorInfoMap) override; + const ControlInfoMap &sensorInfoMap, + ControlInfoMap *ipaControls) override; + int configure(const IPAConfigInfo &configInfo) override; int start() override; void stop() override; - void processStats(const ControlList &sensorControls) override; + void queueRequest(const uint32_t frame, const ControlList &controls) override; + void computeParams(const uint32_t frame) override; + void processStats(const uint32_t frame, const uint32_t bufferId, + const ControlList &sensorControls) override; + +protected: + std::string logPrefix() const override; private: void updateExposure(double exposureMSV); @@ -83,17 +72,9 @@ private: SwIspStats *stats_; std::unique_ptr<CameraSensorHelper> camHelper_; ControlInfoMap sensorInfoMap_; - BlackLevel blackLevel_; - - static constexpr unsigned int kGammaLookupSize = 1024; - std::array<uint8_t, kGammaLookupSize> gammaTable_; - int lastBlackLevel_ = -1; - int32_t exposureMin_, exposureMax_; - int32_t exposure_; - double againMin_, againMax_, againMinStep_; - double again_; - unsigned int ignoreUpdates_; + /* Local parameter storage */ + struct IPAContext context_; }; IPASoftSimple::~IPASoftSimple() @@ -107,7 +88,8 @@ IPASoftSimple::~IPASoftSimple() int IPASoftSimple::init(const IPASettings &settings, const SharedFD &fdStats, const SharedFD &fdParams, - const ControlInfoMap &sensorInfoMap) + const ControlInfoMap &sensorInfoMap, + ControlInfoMap *ipaControls) { camHelper_ = CameraSensorHelperFactoryBase::create(settings.sensorModel); if (!camHelper_) { @@ -134,6 +116,15 @@ int IPASoftSimple::init(const IPASettings &settings, unsigned int version = (*data)["version"].get<uint32_t>(0); LOG(IPASoft, Debug) << "Tuning file version " << version; + if (!data->contains("algorithms")) { + LOG(IPASoft, Error) << "Tuning file doesn't contain algorithms"; + return -EINVAL; + } + + int ret = createAlgorithms(context_, (*data)["algorithms"]); + if (ret) + return ret; + params_ = nullptr; stats_ = nullptr; @@ -169,6 +160,9 @@ int IPASoftSimple::init(const IPASettings &settings, stats_ = static_cast<SwIspStats *>(mem); } + ControlInfoMap::Map ctrlMap = context_.ctrlMap; + *ipaControls = ControlInfoMap(std::move(ctrlMap), controls::controls); + /* * Check if the sensor driver supports the controls required by the * Soft IPA. @@ -188,27 +182,45 @@ int IPASoftSimple::init(const IPASettings &settings, return 0; } -int IPASoftSimple::configure(const ControlInfoMap &sensorInfoMap) +int IPASoftSimple::configure(const IPAConfigInfo &configInfo) { - sensorInfoMap_ = sensorInfoMap; + sensorInfoMap_ = configInfo.sensorControls; const ControlInfo &exposureInfo = sensorInfoMap_.find(V4L2_CID_EXPOSURE)->second; const ControlInfo &gainInfo = sensorInfoMap_.find(V4L2_CID_ANALOGUE_GAIN)->second; - exposureMin_ = exposureInfo.min().get<int32_t>(); - exposureMax_ = exposureInfo.max().get<int32_t>(); - if (!exposureMin_) { + /* Clear the IPA context before the streaming session. */ + context_.configuration = {}; + context_.activeState = {}; + context_.frameContexts.clear(); + + context_.configuration.agc.exposureMin = exposureInfo.min().get<int32_t>(); + context_.configuration.agc.exposureMax = exposureInfo.max().get<int32_t>(); + if (!context_.configuration.agc.exposureMin) { LOG(IPASoft, Warning) << "Minimum exposure is zero, that can't be linear"; - exposureMin_ = 1; + context_.configuration.agc.exposureMin = 1; } int32_t againMin = gainInfo.min().get<int32_t>(); int32_t againMax = gainInfo.max().get<int32_t>(); if (camHelper_) { - againMin_ = camHelper_->gain(againMin); - againMax_ = camHelper_->gain(againMax); - againMinStep_ = (againMax_ - againMin_) / 100.0; + context_.configuration.agc.againMin = camHelper_->gain(againMin); + context_.configuration.agc.againMax = camHelper_->gain(againMax); + context_.configuration.agc.againMinStep = + (context_.configuration.agc.againMax - + context_.configuration.agc.againMin) / + 100.0; + if (camHelper_->blackLevel().has_value()) { + /* + * The black level from camHelper_ is a 16 bit value, software ISP + * works with 8 bit pixel values, both regardless of the actual + * sensor pixel width. Hence we obtain the pixel-based black value + * by dividing the value from the helper by 256. + */ + context_.configuration.black.level = + camHelper_->blackLevel().value() / 256; + } } else { /* * The camera sensor gain (g) is usually not equal to the value written @@ -220,18 +232,28 @@ int IPASoftSimple::configure(const ControlInfoMap &sensorInfoMap) * the AGC algorithm (abrupt near one edge, and very small near the * other) we limit the range of the gain values used. */ - againMax_ = againMax; + context_.configuration.agc.againMax = againMax; if (!againMin) { LOG(IPASoft, Warning) << "Minimum gain is zero, that can't be linear"; - againMin_ = std::min(100, againMin / 2 + againMax / 2); + context_.configuration.agc.againMin = + std::min(100, againMin / 2 + againMax / 2); } - againMinStep_ = 1.0; + context_.configuration.agc.againMinStep = 1.0; } - LOG(IPASoft, Info) << "Exposure " << exposureMin_ << "-" << exposureMax_ - << ", gain " << againMin_ << "-" << againMax_ - << " (" << againMinStep_ << ")"; + for (auto const &algo : algorithms()) { + int ret = algo->configure(context_, configInfo); + if (ret) + return ret; + } + + LOG(IPASoft, Info) + << "Exposure " << context_.configuration.agc.exposureMin << "-" + << context_.configuration.agc.exposureMax + << ", gain " << context_.configuration.agc.againMin << "-" + << context_.configuration.agc.againMax + << " (" << context_.configuration.agc.againMinStep << ")"; return 0; } @@ -243,107 +265,45 @@ int IPASoftSimple::start() void IPASoftSimple::stop() { + context_.frameContexts.clear(); } -void IPASoftSimple::processStats(const ControlList &sensorControls) +void IPASoftSimple::queueRequest(const uint32_t frame, const ControlList &controls) { - SwIspStats::Histogram histogram = stats_->yHistogram; - if (ignoreUpdates_ > 0) - blackLevel_.update(histogram); - const uint8_t blackLevel = blackLevel_.get(); - - /* - * Black level must be subtracted to get the correct AWB ratios, they - * would be off if they were computed from the whole brightness range - * rather than from the sensor range. - */ - const uint64_t nPixels = std::accumulate( - histogram.begin(), histogram.end(), 0); - const uint64_t offset = blackLevel * nPixels; - const uint64_t sumR = stats_->sumR_ - offset / 4; - const uint64_t sumG = stats_->sumG_ - offset / 2; - const uint64_t sumB = stats_->sumB_ - offset / 4; - - /* - * Calculate red and blue gains for AWB. - * Clamp max gain at 4.0, this also avoids 0 division. - * Gain: 128 = 0.5, 256 = 1.0, 512 = 2.0, etc. - */ - const unsigned int gainR = sumR <= sumG / 4 ? 1024 : 256 * sumG / sumR; - const unsigned int gainB = sumB <= sumG / 4 ? 1024 : 256 * sumG / sumB; - /* Green gain and gamma values are fixed */ - constexpr unsigned int gainG = 256; - - /* Update the gamma table if needed */ - if (blackLevel != lastBlackLevel_) { - constexpr float gamma = 0.5; - const unsigned int blackIndex = blackLevel * kGammaLookupSize / 256; - std::fill(gammaTable_.begin(), gammaTable_.begin() + blackIndex, 0); - const float divisor = kGammaLookupSize - blackIndex - 1.0; - for (unsigned int i = blackIndex; i < kGammaLookupSize; i++) - gammaTable_[i] = UINT8_MAX * - std::pow((i - blackIndex) / divisor, gamma); - - lastBlackLevel_ = blackLevel; - } - - for (unsigned int i = 0; i < DebayerParams::kRGBLookupSize; i++) { - constexpr unsigned int div = - DebayerParams::kRGBLookupSize * 256 / kGammaLookupSize; - unsigned int idx; - - /* Apply gamma after gain! */ - idx = std::min({ i * gainR / div, (kGammaLookupSize - 1) }); - params_->red[i] = gammaTable_[idx]; + IPAFrameContext &frameContext = context_.frameContexts.alloc(frame); - idx = std::min({ i * gainG / div, (kGammaLookupSize - 1) }); - params_->green[i] = gammaTable_[idx]; - - idx = std::min({ i * gainB / div, (kGammaLookupSize - 1) }); - params_->blue[i] = gammaTable_[idx]; - } + for (auto const &algo : algorithms()) + algo->queueRequest(context_, frame, frameContext, controls); +} +void IPASoftSimple::computeParams(const uint32_t frame) +{ + IPAFrameContext &frameContext = context_.frameContexts.get(frame); + for (auto const &algo : algorithms()) + algo->prepare(context_, frame, frameContext, params_); setIspParams.emit(); +} - /* \todo Switch to the libipa/algorithm.h API someday. */ +void IPASoftSimple::processStats(const uint32_t frame, + [[maybe_unused]] const uint32_t bufferId, + const ControlList &sensorControls) +{ + IPAFrameContext &frameContext = context_.frameContexts.get(frame); - /* - * AE / AGC, use 2 frames delay to make sure that the exposure and - * the gain set have applied to the camera sensor. - * \todo This could be handled better with DelayedControls. - */ - if (ignoreUpdates_ > 0) { - --ignoreUpdates_; - return; - } + frameContext.sensor.exposure = + sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>(); + int32_t again = sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>(); + frameContext.sensor.gain = camHelper_ ? camHelper_->gain(again) : again; /* - * Calculate Mean Sample Value (MSV) according to formula from: - * https://www.araa.asn.au/acra/acra2007/papers/paper84final.pdf + * Software ISP currently does not produce any metadata. Use an empty + * ControlList for now. + * + * \todo Implement proper metadata handling */ - const unsigned int blackLevelHistIdx = - blackLevel / (256 / SwIspStats::kYHistogramSize); - const unsigned int histogramSize = - SwIspStats::kYHistogramSize - blackLevelHistIdx; - const unsigned int yHistValsPerBin = histogramSize / kExposureBinsCount; - const unsigned int yHistValsPerBinMod = - histogramSize / (histogramSize % kExposureBinsCount + 1); - int exposureBins[kExposureBinsCount] = {}; - unsigned int denom = 0; - unsigned int num = 0; - - for (unsigned int i = 0; i < histogramSize; i++) { - unsigned int idx = (i - (i / yHistValsPerBinMod)) / yHistValsPerBin; - exposureBins[idx] += stats_->yHistogram[blackLevelHistIdx + i]; - } - - for (unsigned int i = 0; i < kExposureBinsCount; i++) { - LOG(IPASoft, Debug) << i << ": " << exposureBins[i]; - denom += exposureBins[i]; - num += exposureBins[i] * (i + 1); - } - - float exposureMSV = static_cast<float>(num) / denom; + ControlList metadata(controls::controls); + for (auto const &algo : algorithms()) + algo->process(context_, frame, frameContext, stats_, metadata); /* Sanity check */ if (!sensorControls.contains(V4L2_CID_EXPOSURE) || @@ -352,73 +312,19 @@ void IPASoftSimple::processStats(const ControlList &sensorControls) return; } - exposure_ = sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>(); - int32_t again = sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>(); - again_ = camHelper_ ? camHelper_->gain(again) : again; - - updateExposure(exposureMSV); - ControlList ctrls(sensorInfoMap_); - ctrls.set(V4L2_CID_EXPOSURE, exposure_); + auto &againNew = frameContext.sensor.gain; + ctrls.set(V4L2_CID_EXPOSURE, frameContext.sensor.exposure); ctrls.set(V4L2_CID_ANALOGUE_GAIN, - static_cast<int32_t>(camHelper_ ? camHelper_->gainCode(again_) : again_)); - - ignoreUpdates_ = 2; + static_cast<int32_t>(camHelper_ ? camHelper_->gainCode(againNew) : againNew)); setSensorControls.emit(ctrls); - - LOG(IPASoft, Debug) << "exposureMSV " << exposureMSV - << " exp " << exposure_ << " again " << again_ - << " gain R/B " << gainR << "/" << gainB - << " black level " << static_cast<unsigned int>(blackLevel); } -void IPASoftSimple::updateExposure(double exposureMSV) +std::string IPASoftSimple::logPrefix() const { - /* - * kExpDenominator of 10 gives ~10% increment/decrement; - * kExpDenominator of 5 - about ~20% - */ - static constexpr uint8_t kExpDenominator = 10; - static constexpr uint8_t kExpNumeratorUp = kExpDenominator + 1; - static constexpr uint8_t kExpNumeratorDown = kExpDenominator - 1; - - double next; - - if (exposureMSV < kExposureOptimal - kExposureSatisfactory) { - next = exposure_ * kExpNumeratorUp / kExpDenominator; - if (next - exposure_ < 1) - exposure_ += 1; - else - exposure_ = next; - if (exposure_ >= exposureMax_) { - next = again_ * kExpNumeratorUp / kExpDenominator; - if (next - again_ < againMinStep_) - again_ += againMinStep_; - else - again_ = next; - } - } - - if (exposureMSV > kExposureOptimal + kExposureSatisfactory) { - if (exposure_ == exposureMax_ && again_ > againMin_) { - next = again_ * kExpNumeratorDown / kExpDenominator; - if (again_ - next < againMinStep_) - again_ -= againMinStep_; - else - again_ = next; - } else { - next = exposure_ * kExpNumeratorDown / kExpDenominator; - if (exposure_ - next < 1) - exposure_ -= 1; - else - exposure_ = next; - } - } - - exposure_ = std::clamp(exposure_, exposureMin_, exposureMax_); - again_ = std::clamp(again_, againMin_, againMax_); + return "IPASoft"; } } /* namespace ipa::soft */ diff --git a/src/ipa/vimc/meson.build b/src/ipa/vimc/meson.build index d0b63edd..2cc5f80b 100644 --- a/src/ipa/vimc/meson.build +++ b/src/ipa/vimc/meson.build @@ -2,8 +2,7 @@ ipa_name = 'ipa_vimc' -mod = shared_module(ipa_name, - ['vimc.cpp', libcamera_generated_ipa_headers], +mod = shared_module(ipa_name, 'vimc.cpp', name_prefix : '', include_directories : [ipa_includes], dependencies : [libcamera_private, libipa_dep], diff --git a/src/ipa/vimc/vimc.cpp b/src/ipa/vimc/vimc.cpp index ebd63fa6..a1351a0f 100644 --- a/src/ipa/vimc/vimc.cpp +++ b/src/ipa/vimc/vimc.cpp @@ -14,6 +14,7 @@ #include <iostream> #include <libcamera/base/file.h> +#include <libcamera/base/flags.h> #include <libcamera/base/log.h> #include <libcamera/ipa/ipa_interface.h> @@ -47,7 +48,7 @@ public: void unmapBuffers(const std::vector<unsigned int> &ids) override; void queueRequest(uint32_t frame, const ControlList &controls) override; - void fillParamsBuffer(uint32_t frame, uint32_t bufferId) override; + void computeParams(uint32_t frame, uint32_t bufferId) override; private: void initTrace(); @@ -149,7 +150,7 @@ void IPAVimc::queueRequest([[maybe_unused]] uint32_t frame, { } -void IPAVimc::fillParamsBuffer([[maybe_unused]] uint32_t frame, uint32_t bufferId) +void IPAVimc::computeParams([[maybe_unused]] uint32_t frame, uint32_t bufferId) { auto it = buffers_.find(bufferId); if (it == buffers_.end()) { @@ -158,7 +159,7 @@ void IPAVimc::fillParamsBuffer([[maybe_unused]] uint32_t frame, uint32_t bufferI } Flags<ipa::vimc::TestFlag> flags; - paramsBufferReady.emit(bufferId, flags); + paramsComputed.emit(bufferId, flags); } void IPAVimc::initTrace() diff --git a/src/libcamera/base/event_dispatcher_poll.cpp b/src/libcamera/base/event_dispatcher_poll.cpp index b737ca7a..52bfb34e 100644 --- a/src/libcamera/base/event_dispatcher_poll.cpp +++ b/src/libcamera/base/event_dispatcher_poll.cpp @@ -7,14 +7,13 @@ #include <libcamera/base/event_dispatcher_poll.h> -#include <algorithm> -#include <chrono> #include <iomanip> #include <poll.h> #include <stdint.h> #include <string.h> #include <sys/eventfd.h> #include <unistd.h> +#include <vector> #include <libcamera/base/event_notifier.h> #include <libcamera/base/log.h> diff --git a/src/libcamera/base/log.cpp b/src/libcamera/base/log.cpp index 3a656b8f..72e0db85 100644 --- a/src/libcamera/base/log.cpp +++ b/src/libcamera/base/log.cpp @@ -8,6 +8,7 @@ #include <libcamera/base/log.h> #include <array> +#include <fnmatch.h> #include <fstream> #include <iostream> #include <list> @@ -38,8 +39,8 @@ * The levels are configurable through the LIBCAMERA_LOG_LEVELS environment * variable that contains a comma-separated list of 'category:level' pairs. * - * The category names are strings and can include a wildcard ('*') character at - * the end to match multiple categories. + * The category names are strings and can include a wildcard ('*') character to + * match multiple categories. * * The level are either numeric values, or strings containing the log level * name. The available log levels are DEBUG, INFO, WARN, ERROR and FATAL. Log @@ -717,24 +718,9 @@ void Logger::registerCategory(LogCategory *category) categories_.push_back(category); const std::string &name = category->name(); - for (const std::pair<std::string, LogSeverity> &level : levels_) { - bool match = true; - - for (unsigned int i = 0; i < level.first.size(); ++i) { - if (level.first[i] == '*') - break; - - if (i >= name.size() || - name[i] != level.first[i]) { - match = false; - break; - } - } - - if (match) { - category->setSeverity(level.second); - break; - } + for (const auto &[pattern, severity] : levels_) { + if (fnmatch(pattern.c_str(), name.c_str(), FNM_NOESCAPE) == 0) + category->setSeverity(severity); } } diff --git a/src/libcamera/base/memfd.cpp b/src/libcamera/base/memfd.cpp new file mode 100644 index 00000000..ed0b299b --- /dev/null +++ b/src/libcamera/base/memfd.cpp @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas on Board Oy + * + * Anonymous file creation + */ + +#include <libcamera/base/memfd.h> + +#include <fcntl.h> +#include <string.h> +#include <sys/mman.h> +#include <sys/syscall.h> +#include <unistd.h> + +#include <libcamera/base/log.h> + +/** + * \file base/memfd.h + * \brief Anonymous file creation + */ + +#ifndef __DOXYGEN__ +namespace { + +/* uClibc doesn't provide the file sealing API. */ +#if not HAVE_FILE_SEALS +#define F_ADD_SEALS 1033 +#define F_SEAL_SHRINK 0x0002 +#define F_SEAL_GROW 0x0004 +#endif + +#if not HAVE_MEMFD_CREATE +int memfd_create(const char *name, unsigned int flags) +{ + return syscall(SYS_memfd_create, name, flags); +} +#endif + +} /* namespace */ +#endif /* __DOXYGEN__ */ + +namespace libcamera { + +LOG_DECLARE_CATEGORY(File) + +/** + * \class MemFd + * \brief Helper class to create anonymous files + * + * Anonymous files behave like regular files, and can be modified, truncated, + * memory-mapped and so on. Unlike regular files, they however live in RAM and + * don't have permanent backing storage. + */ + +/** + * \enum MemFd::Seal + * \brief Seals for the MemFd::create() function + * \var MemFd::Seal::None + * \brief No seals (used as default value) + * \var MemFd::Seal::Shrink + * \brief Prevent the memfd from shrinking + * \var MemFd::Seal::Grow + * \brief Prevent the memfd from growing + */ + +/** + * \typedef MemFd::Seals + * \brief A bitwise combination of MemFd::Seal values + */ + +/** + * \brief Create an anonymous file + * \param[in] name The file name (displayed in symbolic links in /proc/self/fd/) + * \param[in] size The file size + * \param[in] seals The file seals + * + * This function is a helper that wraps anonymous file (memfd) creation and + * sets the file size and optional seals. + * + * \return The descriptor of the anonymous file if creation succeeded, or an + * invalid UniqueFD otherwise + */ +UniqueFD MemFd::create(const char *name, std::size_t size, Seals seals) +{ + int ret = memfd_create(name, MFD_ALLOW_SEALING | MFD_CLOEXEC); + if (ret < 0) { + ret = errno; + LOG(File, Error) + << "Failed to allocate memfd storage for " << name + << ": " << strerror(ret); + return {}; + } + + UniqueFD memfd(ret); + + ret = ftruncate(memfd.get(), size); + if (ret < 0) { + ret = errno; + LOG(File, Error) + << "Failed to set memfd size for " << name + << ": " << strerror(ret); + return {}; + } + + if (seals) { + int fileSeals = (seals & Seal::Shrink ? F_SEAL_SHRINK : 0) + | (seals & Seal::Grow ? F_SEAL_GROW : 0); + + ret = fcntl(memfd.get(), F_ADD_SEALS, fileSeals); + if (ret < 0) { + ret = errno; + LOG(File, Error) + << "Failed to seal the memfd for " << name + << ": " << strerror(ret); + return {}; + } + } + + return memfd; +} + +} /* namespace libcamera */ diff --git a/src/libcamera/base/meson.build b/src/libcamera/base/meson.build index 7a7fd7e4..a742dfdf 100644 --- a/src/libcamera/base/meson.build +++ b/src/libcamera/base/meson.build @@ -1,24 +1,28 @@ # SPDX-License-Identifier: CC0-1.0 -libcamera_base_sources = files([ - 'backtrace.cpp', - 'class.cpp', +libcamera_base_public_sources = files([ 'bound_method.cpp', + 'class.cpp', + 'flags.cpp', + 'object.cpp', + 'shared_fd.cpp', + 'signal.cpp', + 'unique_fd.cpp', +]) + +libcamera_base_internal_sources = files([ + 'backtrace.cpp', 'event_dispatcher.cpp', 'event_dispatcher_poll.cpp', 'event_notifier.cpp', 'file.cpp', - 'flags.cpp', 'log.cpp', + 'memfd.cpp', 'message.cpp', 'mutex.cpp', - 'object.cpp', 'semaphore.cpp', - 'shared_fd.cpp', - 'signal.cpp', 'thread.cpp', 'timer.cpp', - 'unique_fd.cpp', 'utils.cpp', ]) @@ -49,7 +53,11 @@ libcamera_base_deps = [ libcamera_base_args = [ '-DLIBCAMERA_BASE_PRIVATE' ] libcamera_base_lib = shared_library('libcamera-base', - [libcamera_base_sources, libcamera_base_headers], + [ + libcamera_base_public_sources, + libcamera_base_internal_sources, + libcamera_base_headers, + ], version : libcamera_version, soversion : libcamera_soversion, name_prefix : '', diff --git a/src/libcamera/base/thread.cpp b/src/libcamera/base/thread.cpp index 72733431..319bfda9 100644 --- a/src/libcamera/base/thread.cpp +++ b/src/libcamera/base/thread.cpp @@ -9,6 +9,7 @@ #include <atomic> #include <list> +#include <optional> #include <sys/syscall.h> #include <sys/types.h> #include <unistd.h> @@ -64,42 +65,6 @@ * receiver's event loop, running in the receiver's thread. This mechanism can * be overridden by selecting a different connection type when calling * Signal::connect(). - * - * \section thread-reentrancy Reentrancy and Thread-Safety - * - * Through the documentation, several terms are used to define how classes and - * their member functions can be used from multiple threads. - * - * - A **reentrant** function may be called simultaneously from multiple - * threads if and only if each invocation uses a different instance of the - * class. This is the default for all member functions not explictly marked - * otherwise. - * - * - \anchor thread-safe A **thread-safe** function may be called - * simultaneously from multiple threads on the same instance of a class. A - * thread-safe function is thus reentrant. Thread-safe functions may also be - * called simultaneously with any other reentrant function of the same class - * on the same instance. - * - * - \anchor thread-bound A **thread-bound** function may be called only from - * the thread that the class instances lives in (see section \ref - * thread-objects). For instances of classes that do not derive from the - * Object class, this is the thread in which the instance was created. A - * thread-bound function is not thread-safe, and may or may not be reentrant. - * - * Neither reentrancy nor thread-safety, in this context, mean that a function - * may be called simultaneously from the same thread, for instance from a - * callback invoked by the function. This may deadlock and isn't allowed unless - * separately documented. - * - * A class is defined as reentrant, thread-safe or thread-bound if all its - * member functions are reentrant, thread-safe or thread-bound respectively. - * Some member functions may additionally be documented as having additional - * thread-related attributes. - * - * Most classes are reentrant but not thread-safe, as making them fully - * thread-safe would incur locking costs considered prohibitive for the - * expected use cases. */ /** @@ -164,6 +129,8 @@ private: int exitCode_; MessageQueue messages_; + + std::optional<cpu_set_t> cpuset_; }; /** @@ -290,6 +257,8 @@ void Thread::start() data_->exit_.store(false, std::memory_order_relaxed); thread_ = std::thread(&Thread::startThread, this); + + setThreadAffinityInternal(); } void Thread::startThread() @@ -447,6 +416,48 @@ bool Thread::wait(utils::duration duration) } /** + * \brief Set the CPU affinity mask of the thread + * \param[in] cpus The list of CPU indices that the thread is set affinity to + * + * The CPU indices should be within [0, std::thread::hardware_concurrency()). + * If any index is invalid, this function won't modify the thread affinity and + * will return an error. + * + * \return 0 if all indices are valid, -EINVAL otherwise + */ +int Thread::setThreadAffinity(const Span<const unsigned int> &cpus) +{ + const unsigned int numCpus = std::thread::hardware_concurrency(); + + MutexLocker locker(data_->mutex_); + data_->cpuset_ = cpu_set_t(); + CPU_ZERO(&data_->cpuset_.value()); + + for (const unsigned int &cpu : cpus) { + if (cpu >= numCpus) { + LOG(Thread, Error) << "Invalid CPU " << cpu << "for thread affinity"; + return -EINVAL; + } + + CPU_SET(cpu, &data_->cpuset_.value()); + } + + if (data_->running_) + setThreadAffinityInternal(); + + return 0; +} + +void Thread::setThreadAffinityInternal() +{ + if (!data_->cpuset_) + return; + + const cpu_set_t &cpuset = data_->cpuset_.value(); + pthread_setaffinity_np(thread_.native_handle(), sizeof(cpuset), &cpuset); +} + +/** * \brief Check if the thread is running * * A Thread instance is considered as running once the underlying thread has diff --git a/src/libcamera/base/utils.cpp b/src/libcamera/base/utils.cpp index ccb31063..bcfc1941 100644 --- a/src/libcamera/base/utils.cpp +++ b/src/libcamera/base/utils.cpp @@ -276,21 +276,6 @@ std::string details::StringSplitter::iterator::operator*() const return ss_->str_.substr(pos_, count); } -bool details::StringSplitter::iterator::operator!=(const details::StringSplitter::iterator &other) const -{ - return pos_ != other.pos_; -} - -details::StringSplitter::iterator details::StringSplitter::begin() const -{ - return iterator(this, 0); -} - -details::StringSplitter::iterator details::StringSplitter::end() const -{ - return iterator(this, std::string::npos); -} - /** * \fn template<typename Container, typename UnaryOp> \ * std::string utils::join(const Container &items, const std::string &sep, UnaryOp op) @@ -531,6 +516,138 @@ double strtod(const char *__restrict nptr, char **__restrict endptr) * \return The value of e converted to its underlying type */ +/** + * \class ScopeExitActions + * \brief An object that performs actions upon destruction + * + * The ScopeExitActions class is a simple object that performs user-provided + * actions upon destruction. It is meant to simplify cleanup tasks in error + * handling paths. + * + * When the code flow performs multiple sequential actions that each need a + * corresponding cleanup action, error handling quickly become tedious: + * + * \code{.cpp} + * { + * int ret = allocateMemory(); + * if (ret) + * return ret; + * + * ret = startProducer(); + * if (ret) { + * freeMemory(); + * return ret; + * } + * + * ret = startConsumer(); + * if (ret) { + * stopProducer(); + * freeMemory(); + * return ret; + * } + * + * return 0; + * } + * \endcode + * + * This is prone to programming mistakes, as cleanup actions can easily be + * forgotten or ordered incorrectly. One strategy to simplify error handling is + * to use goto statements: + * + * \code{.cpp} + * { + * int ret = allocateMemory(); + * if (ret) + * return ret; + * + * ret = startProducer(); + * if (ret) + * goto error_free; + * + * ret = startConsumer(); + * if (ret) + * goto error_stop; + * + * return 0; + * + * error_stop: + * stopProducer(); + * error_free: + * freeMemory(); + * return ret; + * } + * \endcode + * + * While this may be considered better, this solution is still quite + * error-prone. Beside the risk of picking the wrong error label, the error + * handling logic is separated from the normal code flow, which increases the + * risk of error when refactoring the code. Additionally, C++ doesn't allow + * goto statements to jump over local variable declarations, which can make + * usage of this pattern more difficult. + * + * The ScopeExitActions class solves these issues by allowing code that + * requires cleanup actions to be grouped with its corresponding error handling + * code: + * + * \code{.cpp} + * { + * ScopeExitActions actions; + * + * int ret = allocateMemory(); + * if (ret) + * return ret; + * + * actions += [&]() { freeMemory(); }; + * + * ret = startProducer(); + * if (ret) + * return ret; + * + * actions += [&]() { stopProducer(); }; + * + * ret = startConsumer(); + * if (ret) + * return ret; + * + * actions.release(); + * return 0; + * } + * \endcode + * + * Error handlers are executed when the ScopeExitActions instance is destroyed, + * in the reverse order of their addition. + */ + +ScopeExitActions::~ScopeExitActions() +{ + for (const auto &action : utils::reverse(actions_)) + action(); +} + +/** + * \brief Add an exit action + * \param[in] action The action + * + * Add an exit action to the ScopeExitActions. Actions will be called upon + * destruction in the reverse order of their addition. + */ +void ScopeExitActions::operator+=(std::function<void()> &&action) +{ + actions_.push_back(std::move(action)); +} + +/** + * \brief Remove all exit actions + * + * This function should be called in scope exit paths that don't need the + * actions to be executed, such as success return paths from a function when + * the ScopeExitActions is used for error cleanup. + */ +void ScopeExitActions::release() +{ + actions_.clear(); +} + } /* namespace utils */ #ifndef __DOXYGEN__ diff --git a/src/libcamera/bayer_format.cpp b/src/libcamera/bayer_format.cpp index 014f716d..3dab91fc 100644 --- a/src/libcamera/bayer_format.cpp +++ b/src/libcamera/bayer_format.cpp @@ -184,6 +184,8 @@ const std::map<BayerFormat, Formats, BayerFormatComparator> bayerToFormat{ { formats::R10_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_Y10P) } }, { { BayerFormat::MONO, 12, BayerFormat::Packing::None }, { formats::R12, V4L2PixelFormat(V4L2_PIX_FMT_Y12) } }, + { { BayerFormat::MONO, 12, BayerFormat::Packing::CSI2 }, + { formats::R12_CSI2P, V4L2PixelFormat(V4L2_PIX_FMT_Y12P) } }, { { BayerFormat::MONO, 16, BayerFormat::Packing::None }, { formats::R16, V4L2PixelFormat(V4L2_PIX_FMT_Y16) } }, { { BayerFormat::MONO, 16, BayerFormat::Packing::PISP1 }, @@ -223,6 +225,10 @@ const std::unordered_map<unsigned int, BayerFormat> mbusCodeToBayer{ { MEDIA_BUS_FMT_SGBRG16_1X16, { BayerFormat::GBRG, 16, BayerFormat::Packing::None } }, { MEDIA_BUS_FMT_SGRBG16_1X16, { BayerFormat::GRBG, 16, BayerFormat::Packing::None } }, { MEDIA_BUS_FMT_SRGGB16_1X16, { BayerFormat::RGGB, 16, BayerFormat::Packing::None } }, + { MEDIA_BUS_FMT_SBGGR20_1X20, { BayerFormat::BGGR, 20, BayerFormat::Packing::None } }, + { MEDIA_BUS_FMT_SGBRG20_1X20, { BayerFormat::GBRG, 20, BayerFormat::Packing::None } }, + { MEDIA_BUS_FMT_SGRBG20_1X20, { BayerFormat::GRBG, 20, BayerFormat::Packing::None } }, + { MEDIA_BUS_FMT_SRGGB20_1X20, { BayerFormat::RGGB, 20, BayerFormat::Packing::None } }, { MEDIA_BUS_FMT_Y8_1X8, { BayerFormat::MONO, 8, BayerFormat::Packing::None } }, { MEDIA_BUS_FMT_Y10_1X10, { BayerFormat::MONO, 10, BayerFormat::Packing::None } }, { MEDIA_BUS_FMT_Y12_1X12, { BayerFormat::MONO, 12, BayerFormat::Packing::None } }, diff --git a/src/libcamera/camera.cpp b/src/libcamera/camera.cpp index 67f34901..56c58519 100644 --- a/src/libcamera/camera.cpp +++ b/src/libcamera/camera.cpp @@ -9,19 +9,23 @@ #include <array> #include <atomic> -#include <iomanip> +#include <ios> +#include <memory> +#include <optional> +#include <set> +#include <sstream> #include <libcamera/base/log.h> #include <libcamera/base/thread.h> #include <libcamera/color_space.h> +#include <libcamera/control_ids.h> #include <libcamera/framebuffer_allocator.h> #include <libcamera/request.h> #include <libcamera/stream.h> #include "libcamera/internal/camera.h" #include "libcamera/internal/camera_controls.h" -#include "libcamera/internal/formats.h" #include "libcamera/internal/pipeline_handler.h" #include "libcamera/internal/request.h" @@ -117,6 +121,12 @@ * of view is affected by the pipeline. */ +/** + * \internal + * \file libcamera/internal/camera.h + * \brief Internal camera device handling + */ + namespace libcamera { LOG_DECLARE_CATEGORY(Camera) @@ -327,7 +337,7 @@ void CameraConfiguration::addConfiguration(const StreamConfiguration &cfg) * \retval CameraConfiguration::Invalid The configuration is invalid and can't * be adjusted. This may only occur in extreme cases such as when the * configuration is empty. - * \retval CameraConfigutation::Adjusted The configuration has been adjusted + * \retval CameraConfiguration::Adjusted The configuration has been adjusted * and is now valid. Parameters may have changed for any stream, and stream * configurations may have been removed. The caller shall check the * configuration carefully. @@ -559,6 +569,7 @@ CameraConfiguration::Status CameraConfiguration::validateColorSpaces(ColorSpaceF * \brief The vector of stream configurations */ +#ifndef __DOXYGEN_PUBLIC__ /** * \class Camera::Private * \brief Base class for camera private data @@ -594,6 +605,11 @@ Camera::Private::~Private() */ /** + * \fn Camera::Private::pipe() const + * \copydoc Camera::Private::pipe() + */ + +/** * \fn Camera::Private::validator() * \brief Retrieve the control validator related to this camera * \return The control validator associated with this camera @@ -719,6 +735,7 @@ void Camera::Private::setState(State state) { state_.store(state, std::memory_order_release); } +#endif /* __DOXYGEN_PUBLIC__ */ /** * \class Camera @@ -813,6 +830,7 @@ void Camera::Private::setState(State state) */ /** + * \internal * \brief Create a camera instance * \param[in] d Camera private data * \param[in] id The ID of the camera device @@ -986,7 +1004,8 @@ int Camera::acquire() if (ret < 0) return ret == -EACCES ? -EBUSY : ret; - if (!d->pipe_->acquire()) { + if (!d->pipe_->invokeMethod(&PipelineHandler::acquire, + ConnectionTypeBlocking, this)) { LOG(Camera, Info) << "Pipeline handler in use by another process"; return -EBUSY; @@ -1021,7 +1040,8 @@ int Camera::release() return ret == -EACCES ? -EBUSY : ret; if (d->isAcquired()) - d->pipe_->release(this); + d->pipe_->invokeMethod(&PipelineHandler::release, + ConnectionTypeBlocking, this); d->setState(Private::CameraAvailable); @@ -1164,8 +1184,8 @@ int Camera::configure(CameraConfiguration *config) if (ret < 0) return ret; - for (auto it : *config) - it.setStream(nullptr); + for (auto &cfg : *config) + cfg.setStream(nullptr); if (config->validate() != CameraConfiguration::Valid) { LOG(Camera, Error) @@ -1306,6 +1326,25 @@ int Camera::queueRequest(Request *request) } } + /* Pre-process AeEnable. */ + ControlList &controls = request->controls(); + const auto &aeEnable = controls.get(controls::AeEnable); + if (aeEnable) { + if (_d()->controlInfo_.count(controls::AnalogueGainMode.id()) && + !controls.contains(controls::AnalogueGainMode.id())) { + controls.set(controls::AnalogueGainMode, + *aeEnable ? controls::AnalogueGainModeAuto + : controls::AnalogueGainModeManual); + } + + if (_d()->controlInfo_.count(controls::ExposureTimeMode.id()) && + !controls.contains(controls::ExposureTimeMode.id())) { + controls.set(controls::ExposureTimeMode, + *aeEnable ? controls::ExposureTimeModeAuto + : controls::ExposureTimeModeManual); + } + } + d->pipe_->invokeMethod(&PipelineHandler::queueRequest, ConnectionTypeQueued, request); diff --git a/src/libcamera/camera_manager.cpp b/src/libcamera/camera_manager.cpp index 95a9e326..87e6717e 100644 --- a/src/libcamera/camera_manager.cpp +++ b/src/libcamera/camera_manager.cpp @@ -15,6 +15,7 @@ #include "libcamera/internal/camera.h" #include "libcamera/internal/device_enumerator.h" +#include "libcamera/internal/ipa_manager.h" #include "libcamera/internal/pipeline_handler.h" /** @@ -23,6 +24,7 @@ */ /** + * \internal * \file libcamera/internal/camera_manager.h * \brief Internal camera manager support */ @@ -34,9 +36,11 @@ namespace libcamera { LOG_DEFINE_CATEGORY(Camera) +#ifndef __DOXYGEN_PUBLIC__ CameraManager::Private::Private() : initialized_(false) { + ipaManager_ = std::make_unique<IPAManager>(); } int CameraManager::Private::start() @@ -76,8 +80,10 @@ void CameraManager::Private::run() mutex_.unlock(); cv_.notify_one(); - if (ret < 0) + if (ret < 0) { + cleanup(); return; + } /* Now start processing events and messages. */ exec(); @@ -250,6 +256,14 @@ void CameraManager::Private::removeCamera(std::shared_ptr<Camera> camera) } /** + * \fn CameraManager::Private::ipaManager() const + * \brief Retrieve the IPAManager + * \context This function is \threadsafe. + * \return The IPAManager for this CameraManager + */ +#endif /* __DOXYGEN_PUBLIC__ */ + +/** * \class CameraManager * \brief Provide access and manage all cameras in the system * @@ -374,7 +388,7 @@ std::shared_ptr<Camera> CameraManager::get(const std::string &id) MutexLocker locker(d->mutex_); - for (std::shared_ptr<Camera> camera : d->cameras_) { + for (const std::shared_ptr<Camera> &camera : d->cameras_) { if (camera->id() == id) return camera; } diff --git a/src/libcamera/control_ids.cpp.in b/src/libcamera/control_ids.cpp.in index d283c1c1..65668d48 100644 --- a/src/libcamera/control_ids.cpp.in +++ b/src/libcamera/control_ids.cpp.in @@ -2,51 +2,122 @@ /* * Copyright (C) 2019, Google Inc. * - * control_ids.cpp : Control ID list + * {{mode}} ID list * * This file is auto-generated. Do not edit. */ -#include <libcamera/control_ids.h> +#include <libcamera/{{filename}}.h> #include <libcamera/controls.h> /** - * \file control_ids.h - * \brief Camera control identifiers + * \file {{filename}}.h + * \brief Camera {{mode}} identifiers */ namespace libcamera { /** - * \brief Namespace for libcamera controls + * \brief Namespace for libcamera {{mode}} */ -namespace controls { +namespace {{mode}} { -${controls_doc} +{%- for vendor, ctrls in controls -%} -${vendor_controls_doc} +{%- if vendor != 'libcamera' %} +/** + * \brief Namespace for {{vendor}} {{mode}} + */ +namespace {{vendor}} { +{%- endif -%} + +{% for ctrl in ctrls %} + +{% if ctrl.is_enum -%} +/** + * \enum {{ctrl.name}}Enum + * \brief Supported {{ctrl.name}} values +{%- for enum in ctrl.enum_values %} + * + * \var {{enum.name}} + * \brief {{enum.description|format_description}} +{%- endfor %} + */ + +/** + * \var {{ctrl.name}}Values + * \brief List of all {{ctrl.name}} supported values + */ + +/** + * \var {{ctrl.name}}NameValueMap + * \brief Map of all {{ctrl.name}} supported value names (in std::string format) to value + */ + +{% endif -%} +/** + * \var {{ctrl.name}} + * \brief {{ctrl.description|format_description}} + */ +{%- endfor %} +{% if vendor != 'libcamera' %} +} /* namespace {{vendor}} */ +{% endif -%} + +{%- endfor %} #ifndef __DOXYGEN__ /* - * Keep the controls definitions hidden from doxygen as it incorrectly parses + * Keep the {{mode}} definitions hidden from doxygen as it incorrectly parses * them as functions. */ -${controls_def} +{% for vendor, ctrls in controls -%} + +{% if vendor != 'libcamera' %} +namespace {{vendor}} { +{% endif %} + +{%- for ctrl in ctrls %} +{% if ctrl.is_enum -%} +extern const std::array<const ControlValue, {{ctrl.enum_values_count}}> {{ctrl.name}}Values = { +{%- for enum in ctrl.enum_values %} + static_cast<{{ctrl.type}}>({{enum.name}}), +{%- endfor %} +}; +extern const std::map<std::string, {{ctrl.type}}> {{ctrl.name}}NameValueMap = { +{%- for enum in ctrl.enum_values %} + { "{{enum.name}}", {{enum.name}} }, +{%- endfor %} +}; +extern const Control<{{ctrl.type}}> {{ctrl.name}}({{ctrl.name|snake_case|upper}}, "{{ctrl.name}}", "{{vendor}}", {{ctrl.direction}}, {{ctrl.name}}NameValueMap); +{% else -%} +extern const Control<{{ctrl.type}}> {{ctrl.name}}({{ctrl.name|snake_case|upper}}, "{{ctrl.name}}", "{{vendor}}", {{ctrl.direction}}); +{% endif -%} +{%- endfor %} -${vendor_controls_def} +{% if vendor != 'libcamera' %} +} /* namespace {{vendor}} */ +{% endif -%} -#endif +{%- endfor %} +#endif /* __DOXYGEN__ */ /** - * \brief List of all supported libcamera controls + * \brief List of all supported libcamera {{mode}} +{%- if mode == 'controls' %} * * Unless otherwise stated, all controls are bi-directional, i.e. they can be * set through Request::controls() and returned out through Request::metadata(). +{%- endif %} */ -extern const ControlIdMap controls { -${controls_map} +extern const ControlIdMap {{mode}} { +{%- for vendor, ctrls in controls -%} +{%- for ctrl in ctrls %} + { {{ctrl.namespace}}{{ctrl.name|snake_case|upper}}, &{{ctrl.namespace}}{{ctrl.name}} }, +{%- endfor -%} +{%- endfor %} }; -} /* namespace controls */ +} /* namespace {{mode}} */ } /* namespace libcamera */ diff --git a/src/libcamera/control_ids_core.yaml b/src/libcamera/control_ids_core.yaml index 9d413a94..aa744864 100644 --- a/src/libcamera/control_ids_core.yaml +++ b/src/libcamera/control_ids_core.yaml @@ -10,32 +10,95 @@ vendor: libcamera controls: - AeEnable: type: bool + direction: in description: | - Enable or disable the AE. + Enable or disable the AEGC algorithm. When this control is set to true, + both ExposureTimeMode and AnalogueGainMode are set to auto, and if this + control is set to false then both are set to manual. - \sa ExposureTime AnalogueGain + If ExposureTimeMode or AnalogueGainMode are also set in the same + request as AeEnable, then the modes supplied by ExposureTimeMode or + AnalogueGainMode will take precedence. - - AeLocked: - type: bool + \sa ExposureTimeMode AnalogueGainMode + + - AeState: + type: int32_t + direction: out description: | - Report the lock status of a running AE algorithm. + Report the AEGC algorithm state. - If the AE algorithm is locked the value shall be set to true, if it's - converging it shall be set to false. If the AE algorithm is not - running the control shall not be present in the metadata control list. + The AEGC algorithm computes the exposure time and the analogue gain + to be applied to the image sensor. + + The AEGC algorithm behaviour is controlled by the ExposureTimeMode and + AnalogueGainMode controls, which allow applications to decide how + the exposure time and gain are computed, in Auto or Manual mode, + independently from one another. + + The AeState control reports the AEGC algorithm state through a single + value and describes it as a single computation block which computes + both the exposure time and the analogue gain values. + + When both the exposure time and analogue gain values are configured to + be in Manual mode, the AEGC algorithm is quiescent and does not actively + compute any value and the AeState control will report AeStateIdle. + + When at least the exposure time or analogue gain are configured to be + computed by the AEGC algorithm, the AeState control will report if the + algorithm has converged to stable values for all of the controls set + to be computed in Auto mode. + + \sa AnalogueGainMode + \sa ExposureTimeMode + + enum: + - name: AeStateIdle + value: 0 + description: | + The AEGC algorithm is inactive. + + This state is returned when both AnalogueGainMode and + ExposureTimeMode are set to Manual and the algorithm is not + actively computing any value. + - name: AeStateSearching + value: 1 + description: | + The AEGC algorithm is actively computing new values, for either the + exposure time or the analogue gain, but has not converged to a + stable result yet. + + This state is returned if at least one of AnalogueGainMode or + ExposureTimeMode is auto and the algorithm hasn't converged yet. + + The AEGC algorithm converges once stable values are computed for + all of the controls set to be computed in Auto mode. Once the + algorithm converges the state is moved to AeStateConverged. + - name: AeStateConverged + value: 2 + description: | + The AEGC algorithm has converged. + + This state is returned if at least one of AnalogueGainMode or + ExposureTimeMode is Auto, and the AEGC algorithm has converged to a + stable value. - \sa AeEnable + If the measurements move too far away from the convergence point + then the AEGC algorithm might start adjusting again, in which case + the state is moved to AeStateSearching. # AeMeteringMode needs further attention: # - Auto-generate max enum value. # - Better handling of custom types. - AeMeteringMode: type: int32_t + direction: inout description: | - Specify a metering mode for the AE algorithm to use. The metering - modes determine which parts of the image are used to determine the - scene brightness. Metering modes may be platform specific and not - all metering modes may be supported. + Specify a metering mode for the AE algorithm to use. + + The metering modes determine which parts of the image are used to + determine the scene brightness. Metering modes may be platform specific + and not all metering modes may be supported. enum: - name: MeteringCentreWeighted value: 0 @@ -55,45 +118,63 @@ controls: # - Better handling of custom types. - AeConstraintMode: type: int32_t + direction: inout description: | - Specify a constraint mode for the AE algorithm to use. These determine - how the measured scene brightness is adjusted to reach the desired - target exposure. Constraint modes may be platform specific, and not - all constraint modes may be supported. + Specify a constraint mode for the AE algorithm to use. + + The constraint modes determine how the measured scene brightness is + adjusted to reach the desired target exposure. Constraint modes may be + platform specific, and not all constraint modes may be supported. enum: - name: ConstraintNormal value: 0 - description: Default constraint mode. + description: | + Default constraint mode. + This mode aims to balance the exposure of different parts of the image so as to reach a reasonable average level. However, highlights in the image may appear over-exposed and lowlights may appear under-exposed. - name: ConstraintHighlight value: 1 - description: Highlight constraint mode. + description: | + Highlight constraint mode. + This mode adjusts the exposure levels in order to try and avoid over-exposing the brightest parts (highlights) of an image. Other non-highlight parts of the image may appear under-exposed. - name: ConstraintShadows value: 2 - description: Shadows constraint mode. + description: | + Shadows constraint mode. + This mode adjusts the exposure levels in order to try and avoid under-exposing the dark parts (shadows) of an image. Other normally exposed parts of the image may appear over-exposed. - name: ConstraintCustom value: 3 - description: Custom constraint mode. + description: | + Custom constraint mode. # AeExposureMode needs further attention: # - Auto-generate max enum value. # - Better handling of custom types. - AeExposureMode: type: int32_t + direction: inout description: | - Specify an exposure mode for the AE algorithm to use. These specify - how the desired total exposure is divided between the shutter time - and the sensor's analogue gain. The exposure modes are platform - specific, and not all exposure modes may be supported. + Specify an exposure mode for the AE algorithm to use. + + The exposure modes specify how the desired total exposure is divided + between the exposure time and the sensor's analogue gain. They are + platform specific, and not all exposure modes may be supported. + + When one of AnalogueGainMode or ExposureTimeMode is set to Manual, + the fixed values will override any choices made by AeExposureMode. + + \sa AnalogueGainMode + \sa ExposureTimeMode + enum: - name: ExposureNormal value: 0 @@ -110,59 +191,222 @@ controls: - ExposureValue: type: float + direction: inout description: | - Specify an Exposure Value (EV) parameter. The EV parameter will only be - applied if the AE algorithm is currently enabled. + Specify an Exposure Value (EV) parameter. + + The EV parameter will only be applied if the AE algorithm is currently + enabled, that is, at least one of AnalogueGainMode and ExposureTimeMode + are in Auto mode. By convention EV adjusts the exposure as log2. For example - EV = [-2, -1, 0.5, 0, 0.5, 1, 2] results in an exposure adjustment + EV = [-2, -1, -0.5, 0, 0.5, 1, 2] results in an exposure adjustment of [1/4x, 1/2x, 1/sqrt(2)x, 1x, sqrt(2)x, 2x, 4x]. - \sa AeEnable + \sa AnalogueGainMode + \sa ExposureTimeMode - ExposureTime: type: int32_t + direction: inout description: | - Exposure time (shutter speed) for the frame applied in the sensor - device. This value is specified in micro-seconds. + Exposure time for the frame applied in the sensor device. + + This value is specified in micro-seconds. - Setting this value means that it is now fixed and the AE algorithm may - not change it. Setting it back to zero returns it to the control of the - AE algorithm. + This control will only take effect if ExposureTimeMode is Manual. If + this control is set when ExposureTimeMode is Auto, the value will be + ignored and will not be retained. - \sa AnalogueGain AeEnable + When reported in metadata, this control indicates what exposure time + was used for the current frame, regardless of ExposureTimeMode. + ExposureTimeMode will indicate the source of the exposure time value, + whether it came from the AE algorithm or not. - \todo Document the interactions between AeEnable and setting a fixed - value for this control. Consider interactions with other AE features, - such as aperture and aperture/shutter priority mode, and decide if - control of which features should be automatically adjusted shouldn't - better be handled through a separate AE mode control. + \sa AnalogueGain + \sa ExposureTimeMode + + - ExposureTimeMode: + type: int32_t + direction: inout + description: | + Controls the source of the exposure time that is applied to the image + sensor. + + When set to Auto, the AE algorithm computes the exposure time and + configures the image sensor accordingly. When set to Manual, the value + of the ExposureTime control is used. + + When transitioning from Auto to Manual mode and no ExposureTime control + is provided by the application, the last value computed by the AE + algorithm when the mode was Auto will be used. If the ExposureTimeMode + was never set to Auto (either because the camera started in Manual mode, + or Auto is not supported by the camera), the camera should use a + best-effort default value. + + If ExposureTimeModeManual is supported, the ExposureTime control must + also be supported. + + Cameras that support manual control of the sensor shall support manual + mode for both ExposureTimeMode and AnalogueGainMode, and shall expose + the ExposureTime and AnalogueGain controls. If the camera also has an + AEGC implementation, both ExposureTimeMode and AnalogueGainMode shall + support both manual and auto mode. If auto mode is available, it shall + be the default mode. These rules do not apply to black box cameras + such as UVC cameras, where the available gain and exposure modes are + completely dependent on what the device exposes. + + \par Flickerless exposure mode transitions + + Applications that wish to transition from ExposureTimeModeAuto to direct + control of the exposure time without causing extra flicker can do so by + selecting an ExposureTime value as close as possible to the last value + computed by the auto exposure algorithm in order to avoid any visible + flickering. + + To select the correct value to use as ExposureTime value, applications + should accommodate the natural delay in applying controls caused by the + capture pipeline frame depth. + + When switching to manual exposure mode, applications should not + immediately specify an ExposureTime value in the same request where + ExposureTimeMode is set to Manual. They should instead wait for the + first Request where ExposureTimeMode is reported as + ExposureTimeModeManual in the Request metadata, and use the reported + ExposureTime to populate the control value in the next Request to be + queued to the Camera. + + The implementation of the auto-exposure algorithm should equally try to + minimize flickering and when transitioning from manual exposure mode to + auto exposure use the last value provided by the application as starting + point. + + 1. Start with ExposureTimeMode set to Auto + + 2. Set ExposureTimeMode to Manual + + 3. Wait for the first completed request that has ExposureTimeMode + set to Manual + + 4. Copy the value reported in ExposureTime into a new request, and + submit it + + 5. Proceed to run manual exposure time as desired + + \sa ExposureTime + enum: + - name: ExposureTimeModeAuto + value: 0 + description: | + The exposure time will be calculated automatically and set by the + AE algorithm. + + If ExposureTime is set while this mode is active, it will be + ignored, and its value will not be retained. + + When transitioning from Manual to Auto mode, the AEGC should start + its adjustments based on the last set manual ExposureTime value. + - name: ExposureTimeModeManual + value: 1 + description: | + The exposure time will not be updated by the AE algorithm. + + When transitioning from Auto to Manual mode, the last computed + exposure value is used until a new value is specified through the + ExposureTime control. If an ExposureTime value is specified in the + same request where the ExposureTimeMode is changed from Auto to + Manual, the provided ExposureTime is applied immediately. - AnalogueGain: type: float + direction: inout description: | Analogue gain value applied in the sensor device. + The value of the control specifies the gain multiplier applied to all colour channels. This value cannot be lower than 1.0. - Setting this value means that it is now fixed and the AE algorithm may - not change it. Setting it back to zero returns it to the control of the - AE algorithm. + This control will only take effect if AnalogueGainMode is Manual. If + this control is set when AnalogueGainMode is Auto, the value will be + ignored and will not be retained. + + When reported in metadata, this control indicates what analogue gain + was used for the current request, regardless of AnalogueGainMode. + AnalogueGainMode will indicate the source of the analogue gain value, + whether it came from the AEGC algorithm or not. + + \sa ExposureTime + \sa AnalogueGainMode - \sa ExposureTime AeEnable + - AnalogueGainMode: + type: int32_t + direction: inout + description: | + Controls the source of the analogue gain that is applied to the image + sensor. + + When set to Auto, the AEGC algorithm computes the analogue gain and + configures the image sensor accordingly. When set to Manual, the value + of the AnalogueGain control is used. + + When transitioning from Auto to Manual mode and no AnalogueGain control + is provided by the application, the last value computed by the AEGC + algorithm when the mode was Auto will be used. If the AnalogueGainMode + was never set to Auto (either because the camera started in Manual mode, + or Auto is not supported by the camera), the camera should use a + best-effort default value. + + If AnalogueGainModeManual is supported, the AnalogueGain control must + also be supported. + + For cameras where we have control over the ISP, both ExposureTimeMode + and AnalogueGainMode are expected to support manual mode, and both + controls (as well as ExposureTimeMode and AnalogueGain) are expected to + be present. If the camera also has an AEGC implementation, both + ExposureTimeMode and AnalogueGainMode shall support both manual and + auto mode. If auto mode is available, it shall be the default mode. + These rules do not apply to black box cameras such as UVC cameras, + where the available gain and exposure modes are completely dependent on + what the hardware exposes. + + The same procedure described for performing flickerless transitions in + the ExposureTimeMode control documentation can be applied to analogue + gain. + + \sa ExposureTimeMode + \sa AnalogueGain + enum: + - name: AnalogueGainModeAuto + value: 0 + description: | + The analogue gain will be calculated automatically and set by the + AEGC algorithm. + + If AnalogueGain is set while this mode is active, it will be + ignored, and it will also not be retained. + + When transitioning from Manual to Auto mode, the AEGC should start + its adjustments based on the last set manual AnalogueGain value. + - name: AnalogueGainModeManual + value: 1 + description: | + The analogue gain will not be updated by the AEGC algorithm. - \todo Document the interactions between AeEnable and setting a fixed - value for this control. Consider interactions with other AE features, - such as aperture and aperture/shutter priority mode, and decide if - control of which features should be automatically adjusted shouldn't - better be handled through a separate AE mode control. + When transitioning from Auto to Manual mode, the last computed + gain value is used until a new value is specified through the + AnalogueGain control. If an AnalogueGain value is specified in the + same request where the AnalogueGainMode is changed from Auto to + Manual, the provided AnalogueGain is applied immediately. - AeFlickerMode: type: int32_t + direction: inout description: | - Set the flicker mode, which determines whether, and how, the AGC/AEC - algorithm attempts to hide flicker effects caused by the duty cycle of - artificial lighting. + Set the flicker avoidance mode for AGC/AEC. + + The flicker mode determines whether, and how, the AGC/AEC algorithm + attempts to hide flicker effects caused by the duty cycle of artificial + lighting. Although implementation dependent, many algorithms for "flicker avoidance" work by restricting this exposure time to integer multiples @@ -176,16 +420,21 @@ controls: enum: - name: FlickerOff value: 0 - description: No flicker avoidance is performed. + description: | + No flicker avoidance is performed. - name: FlickerManual value: 1 - description: Manual flicker avoidance. + description: | + Manual flicker avoidance. + Suppress flicker effects caused by lighting running with a period specified by the AeFlickerPeriod control. \sa AeFlickerPeriod - name: FlickerAuto value: 2 - description: Automatic flicker period detection and avoidance. + description: | + Automatic flicker period detection and avoidance. + The system will automatically determine the most likely value of flicker period, and avoid flicker of this frequency. Once flicker is being corrected, it is implementation dependent whether the @@ -194,7 +443,10 @@ controls: - AeFlickerPeriod: type: int32_t - description: Manual flicker period in microseconds. + direction: inout + description: | + Manual flicker period in microseconds. + This value sets the current flicker period to avoid. It is used when AeFlickerMode is set to FlickerManual. @@ -212,7 +464,10 @@ controls: - AeFlickerDetected: type: int32_t - description: Flicker period detected in microseconds. + direction: out + description: | + Flicker period detected in microseconds. + The value reported here indicates the currently detected flicker period, or zero if no flicker at all is detected. @@ -232,38 +487,61 @@ controls: - Brightness: type: float + direction: inout description: | - Specify a fixed brightness parameter. Positive values (up to 1.0) - produce brighter images; negative values (up to -1.0) produce darker - images and 0.0 leaves pixels unchanged. + Specify a fixed brightness parameter. + + Positive values (up to 1.0) produce brighter images; negative values + (up to -1.0) produce darker images and 0.0 leaves pixels unchanged. - Contrast: type: float + direction: inout description: | - Specify a fixed contrast parameter. Normal contrast is given by the - value 1.0; larger values produce images with more contrast. + Specify a fixed contrast parameter. + + Normal contrast is given by the value 1.0; larger values produce images + with more contrast. - Lux: type: float + direction: out description: | - Report an estimate of the current illuminance level in lux. The Lux - control can only be returned in metadata. + Report an estimate of the current illuminance level in lux. + + The Lux control can only be returned in metadata. - AwbEnable: type: bool + direction: inout description: | Enable or disable the AWB. + When AWB is enabled, the algorithm estimates the colour temperature of + the scene and computes colour gains and the colour correction matrix + automatically. The computed colour temperature, gains and correction + matrix are reported in metadata. The corresponding controls are ignored + if set in a request. + + When AWB is disabled, the colour temperature, gains and correction + matrix are not updated automatically and can be set manually in + requests. + + \sa ColourCorrectionMatrix \sa ColourGains + \sa ColourTemperature # AwbMode needs further attention: # - Auto-generate max enum value. # - Better handling of custom types. - AwbMode: type: int32_t + direction: inout description: | - Specify the range of illuminants to use for the AWB algorithm. The modes - supported are platform specific, and not all modes may be supported. + Specify the range of illuminants to use for the AWB algorithm. + + The modes supported are platform specific, and not all modes may be + supported. enum: - name: AwbAuto value: 0 @@ -292,6 +570,7 @@ controls: - AwbLocked: type: bool + direction: out description: | Report the lock status of a running AWB algorithm. @@ -303,39 +582,67 @@ controls: - ColourGains: type: float + direction: inout description: | Pair of gain values for the Red and Blue colour channels, in that - order. ColourGains can only be applied in a Request when the AWB is - disabled. + order. + + ColourGains can only be applied in a Request when the AWB is disabled. + If ColourGains is set in a request but ColourTemperature is not, the + implementation shall calculate and set the ColourTemperature based on + the ColourGains. \sa AwbEnable + \sa ColourTemperature size: [2] - ColourTemperature: type: int32_t - description: Report the current estimate of the colour temperature, in - kelvin, for this frame. The ColourTemperature control can only be - returned in metadata. + direction: out + description: | + ColourTemperature of the frame, in kelvin. + + ColourTemperature can only be applied in a Request when the AWB is + disabled. + + If ColourTemperature is set in a request but ColourGains is not, the + implementation shall calculate and set the ColourGains based on the + given ColourTemperature. If ColourTemperature is set (either directly, + or indirectly by setting ColourGains) but ColourCorrectionMatrix is not, + the ColourCorrectionMatrix is updated based on the ColourTemperature. + + The ColourTemperature used to process the frame is reported in metadata. + + \sa AwbEnable + \sa ColourCorrectionMatrix + \sa ColourGains - Saturation: type: float + direction: inout description: | - Specify a fixed saturation parameter. Normal saturation is given by - the value 1.0; larger values produce more saturated colours; 0.0 - produces a greyscale image. + Specify a fixed saturation parameter. + + Normal saturation is given by the value 1.0; larger values produce more + saturated colours; 0.0 produces a greyscale image. - SensorBlackLevels: type: int32_t + direction: out description: | - Reports the sensor black levels used for processing a frame, in the - order R, Gr, Gb, B. These values are returned as numbers out of a 16-bit - pixel range (as if pixels ranged from 0 to 65535). The SensorBlackLevels - control can only be returned in metadata. + Reports the sensor black levels used for processing a frame. + + The values are in the order R, Gr, Gb, B. They are returned as numbers + out of a 16-bit pixel range (as if pixels ranged from 0 to 65535). The + SensorBlackLevels control can only be returned in metadata. size: [4] - Sharpness: type: float + direction: inout description: | + Intensity of the sharpening applied to the image. + A value of 0.0 means no sharpening. The minimum value means minimal sharpening, and shall be 0.0 unless the camera can't disable sharpening completely. The default value shall give a @@ -347,8 +654,10 @@ controls: - FocusFoM: type: int32_t + direction: out description: | Reports a Figure of Merit (FoM) to indicate how in-focus the frame is. + A larger FocusFoM value indicates a more in-focus frame. This singular value may be based on a combination of statistics gathered from multiple focus regions within an image. The number of focus regions and @@ -358,23 +667,34 @@ controls: - ColourCorrectionMatrix: type: float + direction: inout description: | - The 3x3 matrix that converts camera RGB to sRGB within the - imaging pipeline. This should describe the matrix that is used - after pixels have been white-balanced, but before any gamma - transformation. The 3x3 matrix is stored in conventional reading - order in an array of 9 floating point values. + The 3x3 matrix that converts camera RGB to sRGB within the imaging + pipeline. + + This should describe the matrix that is used after pixels have been + white-balanced, but before any gamma transformation. The 3x3 matrix is + stored in conventional reading order in an array of 9 floating point + values. + + ColourCorrectionMatrix can only be applied in a Request when the AWB is + disabled. + \sa AwbEnable + \sa ColourTemperature size: [3,3] - ScalerCrop: type: Rectangle + direction: inout description: | Sets the image portion that will be scaled to form the whole of - the final output image. The (x,y) location of this rectangle is - relative to the PixelArrayActiveAreas that is being used. The units - remain native sensor pixels, even if the sensor is being used in - a binning or skipping mode. + the final output image. + + The (x,y) location of this rectangle is relative to the + PixelArrayActiveAreas that is being used. The units remain native + sensor pixels, even if the sensor is being used in a binning or + skipping mode. This control is only present when the pipeline supports scaling. Its maximum valid value is given by the properties::ScalerCropMaximum @@ -382,6 +702,7 @@ controls: - DigitalGain: type: float + direction: inout description: | Digital gain value applied during the processing steps applied to the image as captured from the sensor. @@ -399,13 +720,16 @@ controls: - FrameDuration: type: int64_t + direction: out description: | The instantaneous frame duration from start of frame exposure to start - of next exposure, expressed in microseconds. This control is meant to - be returned in metadata. + of next exposure, expressed in microseconds. + + This control is meant to be returned in metadata. - FrameDurationLimits: type: int64_t + direction: inout description: | The minimum and maximum (in that order) frame duration, expressed in microseconds. @@ -419,14 +743,13 @@ controls: values to be the same. Setting both values to 0 reverts to using the camera defaults. - The maximum frame duration provides the absolute limit to the shutter - speed computed by the AE algorithm and it overrides any exposure mode + The maximum frame duration provides the absolute limit to the exposure + time computed by the AE algorithm and it overrides any exposure mode setting specified with controls::AeExposureMode. Similarly, when a manual exposure time is set through controls::ExposureTime, it also gets clipped to the limits set by this control. When reported in - metadata, the control expresses the minimum and maximum frame - durations used after being clipped to the sensor provided frame - duration limits. + metadata, the control expresses the minimum and maximum frame durations + used after being clipped to the sensor provided frame duration limits. \sa AeExposureMode \sa ExposureTime @@ -443,16 +766,20 @@ controls: - SensorTemperature: type: float + direction: out description: | - Temperature measure from the camera sensor in Celsius. This is typically - obtained by a thermal sensor present on-die or in the camera module. The - range of reported temperatures is device dependent. + Temperature measure from the camera sensor in Celsius. + + This value is typically obtained by a thermal sensor present on-die or + in the camera module. The range of reported temperatures is device + dependent. The SensorTemperature control will only be returned in metadata if a thermal sensor is present. - SensorTimestamp: type: int64_t + direction: out description: | The time when the first row of the image sensor active array is exposed. @@ -467,8 +794,9 @@ controls: - AfMode: type: int32_t + direction: inout description: | - Control to set the mode of the AF (autofocus) algorithm. + The mode of the AF (autofocus) algorithm. An implementation may choose not to implement all the modes. @@ -476,12 +804,12 @@ controls: - name: AfModeManual value: 0 description: | - The AF algorithm is in manual mode. In this mode it will never - perform any action nor move the lens of its own accord, but an - application can specify the desired lens position using the - LensPosition control. + The AF algorithm is in manual mode. - In this mode the AfState will always report AfStateIdle. + In this mode it will never perform any action nor move the lens of + its own accord, but an application can specify the desired lens + position using the LensPosition control. The AfState will always + report AfStateIdle. If the camera is started in AfModeManual, it will move the focus lens to the position specified by the LensPosition control. @@ -492,31 +820,34 @@ controls: - name: AfModeAuto value: 1 description: | - The AF algorithm is in auto mode. This means that the algorithm - will never move the lens or change state unless the AfTrigger - control is used. The AfTrigger control can be used to initiate a - focus scan, the results of which will be reported by AfState. - - If the autofocus algorithm is moved from AfModeAuto to another - mode while a scan is in progress, the scan is cancelled - immediately, without waiting for the scan to finish. - - When first entering this mode the AfState will report - AfStateIdle. When a trigger control is sent, AfState will - report AfStateScanning for a period before spontaneously - changing to AfStateFocused or AfStateFailed, depending on - the outcome of the scan. It will remain in this state until - another scan is initiated by the AfTrigger control. If a scan is - cancelled (without changing to another mode), AfState will return - to AfStateIdle. + The AF algorithm is in auto mode. + + In this mode the algorithm will never move the lens or change state + unless the AfTrigger control is used. The AfTrigger control can be + used to initiate a focus scan, the results of which will be + reported by AfState. + + If the autofocus algorithm is moved from AfModeAuto to another mode + while a scan is in progress, the scan is cancelled immediately, + without waiting for the scan to finish. + + When first entering this mode the AfState will report AfStateIdle. + When a trigger control is sent, AfState will report AfStateScanning + for a period before spontaneously changing to AfStateFocused or + AfStateFailed, depending on the outcome of the scan. It will remain + in this state until another scan is initiated by the AfTrigger + control. If a scan is cancelled (without changing to another mode), + AfState will return to AfStateIdle. - name: AfModeContinuous value: 2 description: | - The AF algorithm is in continuous mode. This means that the lens can - re-start a scan spontaneously at any moment, without any user - intervention. The AfState still reports whether the algorithm is - currently scanning or not, though the application has no ability to - initiate or cancel scans, nor to move the lens for itself. + The AF algorithm is in continuous mode. + + In this mode the lens can re-start a scan spontaneously at any + moment, without any user intervention. The AfState still reports + whether the algorithm is currently scanning or not, though the + application has no ability to initiate or cancel scans, nor to move + the lens for itself. However, applications can pause the AF algorithm from continuously scanning by using the AfPause control. This allows video or still @@ -528,35 +859,43 @@ controls: - AfRange: type: int32_t + direction: inout description: | - Control to set the range of focus distances that is scanned. An - implementation may choose not to implement all the options here. + The range of focus distances that is scanned. + + An implementation may choose not to implement all the options here. enum: - name: AfRangeNormal value: 0 description: | - A wide range of focus distances is scanned, all the way from - infinity down to close distances, though depending on the - implementation, possibly not including the very closest macro - positions. + A wide range of focus distances is scanned. + + Scanned distances cover all the way from infinity down to close + distances, though depending on the implementation, possibly not + including the very closest macro positions. - name: AfRangeMacro value: 1 - description: Only close distances are scanned. + description: | + Only close distances are scanned. - name: AfRangeFull value: 2 description: | - The full range of focus distances is scanned just as with - AfRangeNormal but this time including the very closest macro - positions. + The full range of focus distances is scanned. + + This range is similar to AfRangeNormal but includes the very + closest macro positions. - AfSpeed: type: int32_t + direction: inout description: | - Control that determines whether the AF algorithm is to move the lens - as quickly as possible or more steadily. For example, during video - recording it may be desirable not to move the lens too abruptly, but - when in a preview mode (waiting for a still capture) it may be - helpful to move the lens as quickly as is reasonably possible. + Determine whether the AF is to move the lens as quickly as possible or + more steadily. + + For example, during video recording it may be desirable not to move the + lens too abruptly, but when in a preview mode (waiting for a still + capture) it may be helpful to move the lens as quickly as is reasonably + possible. enum: - name: AfSpeedNormal value: 0 @@ -567,26 +906,30 @@ controls: - AfMetering: type: int32_t + direction: inout description: | - Instruct the AF algorithm how it should decide which parts of the image - should be used to measure focus. + The parts of the image used by the AF algorithm to measure focus. enum: - name: AfMeteringAuto value: 0 - description: The AF algorithm should decide for itself where it will - measure focus. + description: | + Let the AF algorithm decide for itself where it will measure focus. - name: AfMeteringWindows value: 1 - description: The AF algorithm should use the rectangles defined by - the AfWindows control to measure focus. If no windows are specified - the behaviour is platform dependent. + description: | + Use the rectangles defined by the AfWindows control to measure focus. + + If no windows are specified the behaviour is platform dependent. - AfWindows: type: Rectangle + direction: inout description: | - Sets the focus windows used by the AF algorithm when AfMetering is set - to AfMeteringWindows. The units used are pixels within the rectangle - returned by the ScalerCropMaximum property. + The focus windows used by the AF algorithm when AfMetering is set to + AfMeteringWindows. + + The units used are pixels within the rectangle returned by the + ScalerCropMaximum property. In order to be activated, a rectangle must be programmed with non-zero width and height. Internally, these rectangles are intersected with the @@ -610,24 +953,36 @@ controls: - AfTrigger: type: int32_t + direction: in description: | - This control starts an autofocus scan when AfMode is set to AfModeAuto, - and can also be used to terminate a scan early. + Start an autofocus scan. - It is ignored if AfMode is set to AfModeManual or AfModeContinuous. + This control starts an autofocus scan when AfMode is set to AfModeAuto, + and is ignored if AfMode is set to AfModeManual or AfModeContinuous. It + can also be used to terminate a scan early. enum: - name: AfTriggerStart value: 0 - description: Start an AF scan. Ignored if a scan is in progress. + description: | + Start an AF scan. + + Setting the control to AfTriggerStart is ignored if a scan is in + progress. - name: AfTriggerCancel value: 1 - description: Cancel an AF scan. This does not cause the lens to move - anywhere else. Ignored if no scan is in progress. + description: | + Cancel an AF scan. + + This does not cause the lens to move anywhere else. Ignored if no + scan is in progress. - AfPause: type: int32_t + direction: in description: | + Pause lens movements when in continuous autofocus mode. + This control has no effect except when in continuous autofocus mode (AfModeContinuous). It can be used to pause any lens movements while (for example) images are captured. The algorithm remains inactive @@ -637,17 +992,21 @@ controls: - name: AfPauseImmediate value: 0 description: | - Pause the continuous autofocus algorithm immediately, whether or not - any kind of scan is underway. AfPauseState will subsequently report + Pause the continuous autofocus algorithm immediately. + + The autofocus algorithm is paused whether or not any kind of scan + is underway. AfPauseState will subsequently report AfPauseStatePaused. AfState may report any of AfStateScanning, AfStateFocused or AfStateFailed, depending on the algorithm's state when it received this control. - name: AfPauseDeferred value: 1 description: | - This is similar to AfPauseImmediate, and if the AfState is currently - reporting AfStateFocused or AfStateFailed it will remain in that - state and AfPauseState will report AfPauseStatePaused. + Pause the continuous autofocus algorithm at the end of the scan. + + This is similar to AfPauseImmediate, and if the AfState is + currently reporting AfStateFocused or AfStateFailed it will remain + in that state and AfPauseState will report AfPauseStatePaused. However, if the algorithm is scanning (AfStateScanning), AfPauseState will report AfPauseStatePausing until the scan is @@ -658,15 +1017,19 @@ controls: - name: AfPauseResume value: 2 description: | - Resume continuous autofocus operation. The algorithm starts again - from exactly where it left off, and AfPauseState will report - AfPauseStateRunning. + Resume continuous autofocus operation. + + The algorithm starts again from exactly where it left off, and + AfPauseState will report AfPauseStateRunning. - LensPosition: type: float + direction: inout description: | - Acts as a control to instruct the lens to move to a particular position - and also reports back the position of the lens for each frame. + Set and report the focus lens position. + + This control instructs the lens to move to a particular position and + also reports back the position of the lens for each frame. The LensPosition control is ignored unless the AfMode is set to AfModeManual, though the value is reported back unconditionally in all @@ -680,39 +1043,43 @@ controls: For example: - 0 moves the lens to infinity. - 0.5 moves the lens to focus on objects 2m away. - 2 moves the lens to focus on objects 50cm away. - And larger values will focus the lens closer. + - 0 moves the lens to infinity. + - 0.5 moves the lens to focus on objects 2m away. + - 2 moves the lens to focus on objects 50cm away. + - And larger values will focus the lens closer. - The default value of the control should indicate a good general position - for the lens, often corresponding to the hyperfocal distance (the - closest position for which objects at infinity are still acceptably - sharp). The minimum will often be zero (meaning infinity), and the - maximum value defines the closest focus position. + The default value of the control should indicate a good general + position for the lens, often corresponding to the hyperfocal distance + (the closest position for which objects at infinity are still + acceptably sharp). The minimum will often be zero (meaning infinity), + and the maximum value defines the closest focus position. \todo Define a property to report the Hyperfocal distance of calibrated lenses. - AfState: type: int32_t + direction: out description: | - Reports the current state of the AF algorithm in conjunction with the - reported AfMode value and (in continuous AF mode) the AfPauseState - value. The possible state changes are described below, though we note - the following state transitions that occur when the AfMode is changed. + The current state of the AF algorithm. + + This control reports the current state of the AF algorithm in + conjunction with the reported AfMode value and (in continuous AF mode) + the AfPauseState value. The possible state changes are described below, + though we note the following state transitions that occur when the + AfMode is changed. If the AfMode is set to AfModeManual, then the AfState will always - report AfStateIdle (even if the lens is subsequently moved). Changing to - the AfModeManual state does not initiate any lens movement. + report AfStateIdle (even if the lens is subsequently moved). Changing + to the AfModeManual state does not initiate any lens movement. If the AfMode is set to AfModeAuto then the AfState will report - AfStateIdle. However, if AfModeAuto and AfTriggerStart are sent together - then AfState will omit AfStateIdle and move straight to AfStateScanning - (and start a scan). + AfStateIdle. However, if AfModeAuto and AfTriggerStart are sent + together then AfState will omit AfStateIdle and move straight to + AfStateScanning (and start a scan). - If the AfMode is set to AfModeContinuous then the AfState will initially - report AfStateScanning. + If the AfMode is set to AfModeContinuous then the AfState will + initially report AfStateScanning. enum: - name: AfStateIdle @@ -725,11 +1092,12 @@ controls: value: 1 description: | The AF algorithm is in auto mode (AfModeAuto), and a scan has been - started using the AfTrigger control. The scan can be cancelled by - sending AfTriggerCancel at which point the algorithm will either - move back to AfStateIdle or, if the scan actually completes before - the cancel request is processed, to one of AfStateFocused or - AfStateFailed. + started using the AfTrigger control. + + The scan can be cancelled by sending AfTriggerCancel at which point + the algorithm will either move back to AfStateIdle or, if the scan + actually completes before the cancel request is processed, to one + of AfStateFocused or AfStateFailed. Alternatively the AF algorithm could be in continuous mode (AfModeContinuous) at which point it may enter this state @@ -749,10 +1117,14 @@ controls: - AfPauseState: type: int32_t + direction: out description: | - Only applicable in continuous (AfModeContinuous) mode, this reports - whether the algorithm is currently running, paused or pausing (that is, - will pause as soon as any in-progress scan completes). + Report whether the autofocus is currently running, paused or pausing. + + This control is only applicable in continuous (AfModeContinuous) mode, + and reports whether the algorithm is currently running, paused or + pausing (that is, will pause as soon as any in-progress scan + completes). Any change to AfMode will cause AfPauseStateRunning to be reported. @@ -766,22 +1138,28 @@ controls: value: 1 description: | Continuous AF has been sent an AfPauseDeferred control, and will - pause as soon as any in-progress scan completes (and then report - AfPauseStatePaused). No new scans will be start spontaneously until + pause as soon as any in-progress scan completes. + + When the scan completes, the AfPauseState control will report + AfPauseStatePaused. No new scans will be start spontaneously until the AfPauseResume control is sent. - name: AfPauseStatePaused value: 2 description: | - Continuous AF is paused. No further state changes or lens movements - will occur until the AfPauseResume control is sent. + Continuous AF is paused. + + No further state changes or lens movements will occur until the + AfPauseResume control is sent. - HdrMode: type: int32_t + direction: inout description: | - Control to set the mode to be used for High Dynamic Range (HDR) - imaging. HDR techniques typically include multiple exposure, image - fusion and tone mapping techniques to improve the dynamic range of the - resulting images. + Set the mode to be used for High Dynamic Range (HDR) imaging. + + HDR techniques typically include multiple exposure, image fusion and + tone mapping techniques to improve the dynamic range of the resulting + images. When using an HDR mode, images are captured with different sets of AGC settings called HDR channels. Channels indicate in particular the type @@ -795,16 +1173,18 @@ controls: - name: HdrModeOff value: 0 description: | - HDR is disabled. Metadata for this frame will not include the - HdrChannel control. + HDR is disabled. + + Metadata for this frame will not include the HdrChannel control. - name: HdrModeMultiExposureUnmerged value: 1 description: | Multiple exposures will be generated in an alternating fashion. - However, they will not be merged together and will be returned to - the application as they are. Each image will be tagged with the - correct HDR channel, indicating what kind of exposure it is. The - tag should be the same as in the HdrModeMultiExposure case. + + The multiple exposures will not be merged together and will be + returned to the application as they are. Each image will be tagged + with the correct HDR channel, indicating what kind of exposure it + is. The tag should be the same as in the HdrModeMultiExposure case. The expectation is that an application using this mode would merge the frames to create HDR images for itself if it requires them. @@ -812,8 +1192,10 @@ controls: value: 2 description: | Multiple exposures will be generated and merged to create HDR - images. Each image will be tagged with the HDR channel (long, medium - or short) that arrived and which caused this image to be output. + images. + + Each image will be tagged with the HDR channel (long, medium or + short) that arrived and which caused this image to be output. Systems that use two channels for HDR will return images tagged alternately as the short and long channel. Systems that use three @@ -823,24 +1205,30 @@ controls: value: 3 description: | Multiple frames all at a single exposure will be used to create HDR - images. These images should be reported as all corresponding to the - HDR short channel. + images. + + These images should be reported as all corresponding to the HDR + short channel. - name: HdrModeNight value: 4 description: | - Multiple frames will be combined to produce "night mode" images. It - is up to the implementation exactly which HDR channels it uses, and - the images will all be tagged accordingly with the correct HDR + Multiple frames will be combined to produce "night mode" images. + + It is up to the implementation exactly which HDR channels it uses, + and the images will all be tagged accordingly with the correct HDR channel information. - HdrChannel: type: int32_t + direction: out description: | + The HDR channel used to capture the frame. + This value is reported back to the application so that it can discover - whether this capture corresponds to the short or long exposure image (or - any other image used by the HDR procedure). An application can monitor - the HDR channel to discover when the differently exposed images have - arrived. + whether this capture corresponds to the short or long exposure image + (or any other image used by the HDR procedure). An application can + monitor the HDR channel to discover when the differently exposed images + have arrived. This metadata is only available when an HDR mode has been enabled. @@ -867,9 +1255,17 @@ controls: - Gamma: type: float + direction: inout description: | - Specify a fixed gamma value. Default must be 2.2 which closely mimics - sRGB gamma. Note that this is camera gamma, so it is applied as - 1.0/gamma. + Specify a fixed gamma value. + + The default gamma value must be 2.2 which closely mimics sRGB gamma. + Note that this is camera gamma, so it is applied as 1.0/gamma. + + - DebugMetadataEnable: + type: bool + direction: inout + description: | + Enable or disable the debug metadata. ... diff --git a/src/libcamera/control_ids_debug.yaml b/src/libcamera/control_ids_debug.yaml new file mode 100644 index 00000000..79753271 --- /dev/null +++ b/src/libcamera/control_ids_debug.yaml @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: LGPL-2.1-or-later +# +%YAML 1.1 +--- +vendor: debug +controls: [] diff --git a/src/libcamera/control_ids_draft.yaml b/src/libcamera/control_ids_draft.yaml index 9bef5bf1..03309eea 100644 --- a/src/libcamera/control_ids_draft.yaml +++ b/src/libcamera/control_ids_draft.yaml @@ -10,6 +10,7 @@ vendor: draft controls: - AePrecaptureTrigger: type: int32_t + direction: inout description: | Control for AE metering trigger. Currently identical to ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER. @@ -31,6 +32,7 @@ controls: - NoiseReductionMode: type: int32_t + direction: inout description: | Control to select the noise reduction algorithm mode. Currently identical to ANDROID_NOISE_REDUCTION_MODE. @@ -59,6 +61,7 @@ controls: - ColorCorrectionAberrationMode: type: int32_t + direction: inout description: | Control to select the color correction aberration mode. Currently identical to ANDROID_COLOR_CORRECTION_ABERRATION_MODE. @@ -77,37 +80,9 @@ controls: High quality aberration correction which might reduce the frame rate. - - AeState: - type: int32_t - description: | - Control to report the current AE algorithm state. Currently identical to - ANDROID_CONTROL_AE_STATE. - - Current state of the AE algorithm. - enum: - - name: AeStateInactive - value: 0 - description: The AE algorithm is inactive. - - name: AeStateSearching - value: 1 - description: The AE algorithm has not converged yet. - - name: AeStateConverged - value: 2 - description: The AE algorithm has converged. - - name: AeStateLocked - value: 3 - description: The AE algorithm is locked. - - name: AeStateFlashRequired - value: 4 - description: The AE algorithm would need a flash for good results - - name: AeStatePrecapture - value: 5 - description: | - The AE algorithm has started a pre-capture metering session. - \sa AePrecaptureTrigger - - AwbState: type: int32_t + direction: out description: | Control to report the current AWB algorithm state. Currently identical to ANDROID_CONTROL_AWB_STATE. @@ -129,6 +104,7 @@ controls: - SensorRollingShutterSkew: type: int64_t + direction: out description: | Control to report the time between the start of exposure of the first row and the start of exposure of the last row. Currently identical to @@ -136,6 +112,7 @@ controls: - LensShadingMapMode: type: int32_t + direction: inout description: | Control to report if the lens shading map is available. Currently identical to ANDROID_STATISTICS_LENS_SHADING_MAP_MODE. @@ -149,6 +126,7 @@ controls: - PipelineDepth: type: int32_t + direction: out description: | Specifies the number of pipeline stages the frame went through from when it was exposed to when the final completed result was available to the @@ -163,6 +141,7 @@ controls: - MaxLatency: type: int32_t + direction: out description: | The maximum number of frames that can occur after a request (different than the previous) has been submitted, and before the result's state @@ -172,6 +151,7 @@ controls: - TestPatternMode: type: int32_t + direction: inout description: | Control to select the test pattern mode. Currently identical to ANDROID_SENSOR_TEST_PATTERN_MODE. @@ -227,4 +207,91 @@ controls: value. All of the custom test patterns will be static (that is the raw image must not vary from frame to frame). + - FaceDetectMode: + type: int32_t + direction: inout + description: | + Control to select the face detection mode used by the pipeline. + + Currently identical to ANDROID_STATISTICS_FACE_DETECT_MODE. + + \sa FaceDetectFaceRectangles + \sa FaceDetectFaceScores + \sa FaceDetectFaceLandmarks + \sa FaceDetectFaceIds + + enum: + - name: FaceDetectModeOff + value: 0 + description: | + Pipeline doesn't perform face detection and doesn't report any + control related to face detection. + - name: FaceDetectModeSimple + value: 1 + description: | + Pipeline performs face detection and reports the + FaceDetectFaceRectangles and FaceDetectFaceScores controls for each + detected face. FaceDetectFaceLandmarks and FaceDetectFaceIds are + optional. + - name: FaceDetectModeFull + value: 2 + description: | + Pipeline performs face detection and reports all the controls + related to face detection including FaceDetectFaceRectangles, + FaceDetectFaceScores, FaceDetectFaceLandmarks, and + FaceDeteceFaceIds for each detected face. + + - FaceDetectFaceRectangles: + type: Rectangle + direction: out + description: | + Boundary rectangles of the detected faces. The number of values is + the number of detected faces. + + The FaceDetectFaceRectangles control can only be returned in metadata. + + Currently identical to ANDROID_STATISTICS_FACE_RECTANGLES. + size: [n] + + - FaceDetectFaceScores: + type: uint8_t + direction: out + description: | + Confidence score of each of the detected faces. The range of score is + [0, 100]. The number of values should be the number of faces reported + in FaceDetectFaceRectangles. + + The FaceDetectFaceScores control can only be returned in metadata. + + Currently identical to ANDROID_STATISTICS_FACE_SCORES. + size: [n] + + - FaceDetectFaceLandmarks: + type: Point + direction: out + description: | + Array of human face landmark coordinates in format [..., left_eye_i, + right_eye_i, mouth_i, left_eye_i+1, ...], with i = index of face. The + number of values should be 3 * the number of faces reported in + FaceDetectFaceRectangles. + + The FaceDetectFaceLandmarks control can only be returned in metadata. + + Currently identical to ANDROID_STATISTICS_FACE_LANDMARKS. + size: [n] + + - FaceDetectFaceIds: + type: int32_t + direction: out + description: | + Each detected face is given a unique ID that is valid for as long as the + face is visible to the camera device. A face that leaves the field of + view and later returns may be assigned a new ID. The number of values + should be the number of faces reported in FaceDetectFaceRectangles. + + The FaceDetectFaceIds control can only be returned in metadata. + + Currently identical to ANDROID_STATISTICS_FACE_IDS. + size: [n] + ... diff --git a/src/libcamera/control_ids_rpi.yaml b/src/libcamera/control_ids_rpi.yaml index cb097f88..7524c5d2 100644 --- a/src/libcamera/control_ids_rpi.yaml +++ b/src/libcamera/control_ids_rpi.yaml @@ -9,21 +9,53 @@ vendor: rpi controls: - StatsOutputEnable: type: bool + direction: inout description: | - Toggles the Raspberry Pi IPA to output a binary dump of the hardware - generated statistics through the Request metadata in the Bcm2835StatsOutput - control. + Toggles the Raspberry Pi IPA to output the hardware generated statistics. + + When this control is set to true, the IPA outputs a binary dump of the + hardware generated statistics through the Request metadata in the + Bcm2835StatsOutput control. \sa Bcm2835StatsOutput - Bcm2835StatsOutput: type: uint8_t size: [n] + direction: out description: | - Span of the BCM2835 ISP generated statistics for the current frame. This - is sent in the Request metadata if the StatsOutputEnable is set to true. - The statistics struct definition can be found in include/linux/bcm2835-isp.h. + Span of the BCM2835 ISP generated statistics for the current frame. + + This is sent in the Request metadata if the StatsOutputEnable is set to + true. The statistics struct definition can be found in + include/linux/bcm2835-isp.h. \sa StatsOutputEnable + - ScalerCrops: + type: Rectangle + size: [n] + direction: out + description: | + An array of rectangles, where each singular value has identical + functionality to the ScalerCrop control. This control allows the + Raspberry Pi pipeline handler to control individual scaler crops per + output stream. + + The order of rectangles passed into the control must match the order of + streams configured by the application. The pipeline handler will only + configure crop retangles up-to the number of output streams configured. + All subsequent rectangles passed into this control are ignored by the + pipeline handler. + + If both rpi::ScalerCrops and ScalerCrop controls are present in a + ControlList, the latter is discarded, and crops are obtained from this + control. + + Note that using different crop rectangles for each output stream with + this control is only applicable on the Pi5/PiSP platform. This control + should also be considered temporary/draft and will be replaced with + official libcamera API support for per-stream controls in the future. + + \sa ScalerCrop ... diff --git a/src/libcamera/control_ranges.yaml b/src/libcamera/control_ranges.yaml index d42447d0..6752eb98 100644 --- a/src/libcamera/control_ranges.yaml +++ b/src/libcamera/control_ranges.yaml @@ -13,6 +13,8 @@ ranges: draft: 10000 # Raspberry Pi vendor controls rpi: 20000 - # Next range starts at 30000 + # Controls for debug metadata + debug: 30000 + # Next range starts at 40000 ... diff --git a/src/libcamera/control_serializer.cpp b/src/libcamera/control_serializer.cpp index 52fd714f..17834648 100644 --- a/src/libcamera/control_serializer.cpp +++ b/src/libcamera/control_serializer.cpp @@ -281,6 +281,7 @@ int ControlSerializer::serialize(const ControlInfoMap &infoMap, entry.id = id->id(); entry.type = id->type(); entry.offset = values.offset(); + entry.direction = static_cast<ControlId::DirectionFlags::Type>(id->direction()); entries.write(&entry); store(info, values); @@ -493,12 +494,17 @@ ControlInfoMap ControlSerializer::deserialize<ControlInfoMap>(ByteStreamBuffer & /* If we're using a local id map, populate it. */ if (localIdMap) { + ControlId::DirectionFlags flags{ + static_cast<ControlId::Direction>(entry->direction) + }; + /** * \todo Find a way to preserve the control name for * debugging purpose. */ controlIds_.emplace_back(std::make_unique<ControlId>(entry->id, - "", type)); + "", "local", type, + flags)); (*localIdMap)[entry->id] = controlIds_.back().get(); } diff --git a/src/libcamera/controls.cpp b/src/libcamera/controls.cpp index 11d35321..70f6f609 100644 --- a/src/libcamera/controls.cpp +++ b/src/libcamera/controls.cpp @@ -7,10 +7,9 @@ #include <libcamera/controls.h> -#include <iomanip> #include <sstream> -#include <string> #include <string.h> +#include <string> #include <libcamera/base/log.h> #include <libcamera/base/utils.h> @@ -55,12 +54,15 @@ static constexpr size_t ControlValueSize[] = { [ControlTypeNone] = 0, [ControlTypeBool] = sizeof(bool), [ControlTypeByte] = sizeof(uint8_t), + [ControlTypeUnsigned16] = sizeof(uint16_t), + [ControlTypeUnsigned32] = sizeof(uint32_t), [ControlTypeInteger32] = sizeof(int32_t), [ControlTypeInteger64] = sizeof(int64_t), [ControlTypeFloat] = sizeof(float), [ControlTypeString] = sizeof(char), [ControlTypeRectangle] = sizeof(Rectangle), [ControlTypeSize] = sizeof(Size), + [ControlTypePoint] = sizeof(Point), }; } /* namespace */ @@ -74,10 +76,14 @@ static constexpr size_t ControlValueSize[] = { * The control stores a boolean value * \var ControlTypeByte * The control stores a byte value as an unsigned 8-bit integer + * \var ControlTypeUnsigned16 + * The control stores an unsigned 16-bit integer value + * \var ControlTypeUnsigned32 + * The control stores an unsigned 32-bit integer value * \var ControlTypeInteger32 - * The control stores a 32-bit integer value + * The control stores a signed 32-bit integer value * \var ControlTypeInteger64 - * The control stores a 64-bit integer value + * The control stores a signed 64-bit integer value * \var ControlTypeFloat * The control stores a 32-bit floating point value * \var ControlTypeString @@ -230,6 +236,16 @@ std::string ControlValue::toString() const str += std::to_string(*value); break; } + case ControlTypeUnsigned16: { + const uint16_t *value = reinterpret_cast<const uint16_t *>(data); + str += std::to_string(*value); + break; + } + case ControlTypeUnsigned32: { + const uint32_t *value = reinterpret_cast<const uint32_t *>(data); + str += std::to_string(*value); + break; + } case ControlTypeInteger32: { const int32_t *value = reinterpret_cast<const int32_t *>(data); str += std::to_string(*value); @@ -255,6 +271,11 @@ std::string ControlValue::toString() const str += value->toString(); break; } + case ControlTypePoint: { + const Point *value = reinterpret_cast<const Point *>(data); + str += value->toString(); + break; + } case ControlTypeNone: case ControlTypeString: break; @@ -389,8 +410,22 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen * \brief Construct a ControlId instance * \param[in] id The control numerical ID * \param[in] name The control name + * \param[in] vendor The vendor name * \param[in] type The control data type - */ + * \param[in] direction The direction of the control, if it can be used in Controls or Metadata + * \param[in] size The size of the array control, or 0 if scalar control + * \param[in] enumStrMap The map from enum names to values (optional) + */ +ControlId::ControlId(unsigned int id, const std::string &name, + const std::string &vendor, ControlType type, + DirectionFlags direction, std::size_t size, + const std::map<std::string, int32_t> &enumStrMap) + : id_(id), name_(name), vendor_(vendor), type_(type), + direction_(direction), size_(size), enumStrMap_(enumStrMap) +{ + for (const auto &pair : enumStrMap_) + reverseMap_[pair.second] = pair.first; +} /** * \fn unsigned int ControlId::id() const @@ -405,12 +440,68 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen */ /** + * \fn const std::string &ControlId::vendor() const + * \brief Retrieve the vendor name + * \return The vendor name, as a string + */ + +/** * \fn ControlType ControlId::type() const * \brief Retrieve the control data type * \return The control data type */ /** + * \fn DirectionFlags ControlId::direction() const + * \brief Return the direction that the control can be used in + * + * This is similar to \sa isInput() and \sa isOutput(), but returns the flags + * direction instead of booleans for each direction. + * + * \return The direction flags corresponding to if the control can be used as + * an input control or as output metadata + */ + +/** + * \fn bool ControlId::isInput() const + * \brief Determine if the control is available to be used as an input control + * + * Controls can be used either as input in controls, or as output in metadata. + * This function checks if the control is allowed to be used as the former. + * + * \return True if the control can be used as an input control, false otherwise + */ + +/** + * \fn bool ControlId::isOutput() const + * \brief Determine if the control is available to be used in output metadata + * + * Controls can be used either as input in controls, or as output in metadata. + * This function checks if the control is allowed to be used as the latter. + * + * \return True if the control can be returned in output metadata, false otherwise + */ + +/** + * \fn bool ControlId::isArray() const + * \brief Determine if the control is an array control + * \return True if the control is an array control, false otherwise + */ + +/** + * \fn std::size_t ControlId::size() const + * \brief Retrieve the size of the control if it is an array control + * \return The size of the array control, size_t::max for dynamic extent, or 0 + * for non-array + */ + +/** + * \fn const std::map<int32_t, std::string> &ControlId::enumerators() const + * \brief Retrieve the map of enum values to enum names + * \return The map of enum values to enum names + */ + +/** * \fn bool operator==(unsigned int lhs, const ControlId &rhs) * \brief Compare a ControlId with a control numerical ID * \param[in] lhs Left-hand side numerical ID @@ -429,6 +520,22 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen */ /** + * \enum ControlId::Direction + * \brief The direction the control is capable of being passed from/to + * + * \var ControlId::Direction::In + * \brief The control can be passed as input in controls + * + * \var ControlId::Direction::Out + * \brief The control can be returned as output in metadata + */ + +/** + * \typedef ControlId::DirectionFlags + * \brief A wrapper for ControlId::Direction so that it can be used as flags + */ + +/** * \class Control * \brief Describe a control and its intrinsic properties * @@ -456,10 +563,14 @@ void ControlValue::reserve(ControlType type, bool isArray, std::size_t numElemen */ /** - * \fn Control::Control(unsigned int id, const char *name) + * \fn Control::Control(unsigned int id, const char *name, const char *vendor) * \brief Construct a Control instance * \param[in] id The control numerical ID * \param[in] name The control name + * \param[in] vendor The vendor name + * \param[in] direction The direction of the control, if it can be used in + * Controls or Metadata + * \param[in] enumStrMap The map from enum names to values (optional) * * The control data type is automatically deduced from the template type T. */ diff --git a/src/libcamera/converter.cpp b/src/libcamera/converter.cpp index d3d38c1b..d551b908 100644 --- a/src/libcamera/converter.cpp +++ b/src/libcamera/converter.cpp @@ -11,10 +11,12 @@ #include <libcamera/base/log.h> +#include <libcamera/stream.h> + #include "libcamera/internal/media_device.h" /** - * \file internal/converter.h + * \file converter.h * \brief Abstract converter */ @@ -35,13 +37,38 @@ LOG_DEFINE_CATEGORY(Converter) */ /** + * \enum Converter::Feature + * \brief Specify the features supported by the converter + * \var Converter::Feature::None + * \brief No extra features supported by the converter + * \var Converter::Feature::InputCrop + * \brief Cropping capability at input is supported by the converter + */ + +/** + * \typedef Converter::Features + * \brief A bitwise combination of features supported by the converter + */ + +/** + * \enum Converter::Alignment + * \brief The alignment mode specified when adjusting the converter input or + * output sizes + * \var Converter::Alignment::Down + * \brief Adjust the Converter sizes to a smaller valid size + * \var Converter::Alignment::Up + * \brief Adjust the Converter sizes to a larger valid size + */ + +/** * \brief Construct a Converter instance * \param[in] media The media device implementing the converter + * \param[in] features Features flags representing supported features * * This searches for the entity implementing the data streaming function in the * media graph entities and use its device node as the converter device node. */ -Converter::Converter(MediaDevice *media) +Converter::Converter(MediaDevice *media, Features features) { const std::vector<MediaEntity *> &entities = media->entities(); auto it = std::find_if(entities.begin(), entities.end(), @@ -56,6 +83,7 @@ Converter::Converter(MediaDevice *media) } deviceNode_ = (*it)->deviceNode(); + features_ = features; } Converter::~Converter() @@ -93,6 +121,26 @@ Converter::~Converter() */ /** + * \fn Converter::adjustInputSize() + * \brief Adjust the converter input \a size to a valid value + * \param[in] pixFmt The pixel format of the converter input stream + * \param[in] size The converter input size to adjust to a valid value + * \param[in] align The desired alignment + * \return The adjusted converter input size or a null Size if \a size cannot + * be adjusted + */ + +/** + * \fn Converter::adjustOutputSize() + * \brief Adjust the converter output \a size to a valid value + * \param[in] pixFmt The pixel format of the converter output stream + * \param[in] size The converter output size to adjust to a valid value + * \param[in] align The desired alignment + * \return The adjusted converter output size or a null Size if \a size cannot + * be adjusted + */ + +/** * \fn Converter::strideAndFrameSize() * \brief Retrieve the output stride and frame size for an input configutation * \param[in] pixelFormat Input stream pixel format @@ -101,6 +149,16 @@ Converter::~Converter() */ /** + * \fn Converter::validateOutput() + * \brief Validate and possibily adjust \a cfg to a valid converter output + * \param[inout] cfg The StreamConfiguration to validate and adjust + * \param[out] adjusted Set to true if \a cfg has been adjusted + * \param[in] align The desired alignment + * \return 0 if \a cfg is valid or has been adjusted, a negative error code + * otherwise if \a cfg cannot be adjusted + */ + +/** * \fn Converter::configure() * \brief Configure a set of output stream conversion from an input stream * \param[in] inputCfg Input stream configuration @@ -109,14 +167,21 @@ Converter::~Converter() */ /** + * \fn Converter::isConfigured() + * \brief Check if a given stream is configured + * \param[in] stream The output stream + * \return True if the \a stream is configured or false otherwise + */ + +/** * \fn Converter::exportBuffers() * \brief Export buffers from the converter device - * \param[in] output Output stream index exporting the buffers + * \param[in] stream Output stream pointer exporting the buffers * \param[in] count Number of buffers to allocate * \param[out] buffers Vector to store allocated buffers * * This function operates similarly to V4L2VideoDevice::exportBuffers() on the - * output stream indicated by the \a output index. + * output stream indicated by the \a output. * * \return The number of allocated buffers on success or a negative error code * otherwise @@ -137,7 +202,7 @@ Converter::~Converter() * \fn Converter::queueBuffers() * \brief Queue buffers to converter device * \param[in] input The frame buffer to apply the conversion - * \param[out] outputs The container holding the output stream indexes and + * \param[out] outputs The container holding the output stream pointers and * their respective frame buffer outputs. * * This function queues the \a input frame buffer on the output streams of the @@ -148,6 +213,52 @@ Converter::~Converter() */ /** + * \fn Converter::setInputCrop() + * \brief Set the crop rectangle \a rect for \a stream + * \param[in] stream The output stream + * \param[inout] rect The crop rectangle to apply and return the rectangle + * that is actually applied + * + * Set the crop rectangle \a rect for \a stream provided the converter supports + * cropping. The converter has the Feature::InputCrop flag in this case. + * + * The underlying hardware can adjust the rectangle supplied by the user + * due to hardware constraints. The caller can inspect \a rect to determine the + * actual rectangle that has been applied by the converter, after this function + * returns. + * + * \return 0 on success or a negative error code otherwise + */ + +/** + * \fn Converter::inputCropBounds() + * \brief Retrieve the crop bounds of the converter + * + * Retrieve the minimum and maximum crop bounds of the converter. This can be + * used to query the crop bounds before configuring a stream. + * + * \return A pair containing the minimum and maximum crop bound in that order + */ + +/** + * \fn Converter::inputCropBounds(const Stream *stream) + * \brief Retrieve the crop bounds for \a stream + * \param[in] stream The output stream + * + * Retrieve the minimum and maximum crop bounds for \a stream. The converter + * should support cropping (Feature::InputCrop). + * + * The crop bounds depend on the configuration of the output stream and hence + * this function should be called after the \a stream has been configured using + * configure(). + * + * When called with an unconfigured \a stream, this function returns a pair of + * null rectangles. + * + * \return A pair containing the minimum and maximum crop bound in that order + */ + +/** * \var Converter::inputBufferReady * \brief A signal emitted when the input frame buffer completes */ @@ -158,12 +269,23 @@ Converter::~Converter() */ /** + * \var Converter::features_ + * \brief Stores the features supported by the converter + */ + +/** * \fn Converter::deviceNode() * \brief The converter device node attribute accessor * \return The converter device node string */ /** + * \fn Converter::features() + * \brief Retrieve the features supported by the converter + * \return The converter Features flags + */ + +/** * \class ConverterFactoryBase * \brief Base class for converter factories * @@ -263,8 +385,9 @@ std::vector<std::string> ConverterFactoryBase::names() for (ConverterFactoryBase *factory : factories) { list.push_back(factory->name_); - for (auto alias : factory->compatibles()) - list.push_back(alias); + + const auto &compatibles = factory->compatibles(); + list.insert(list.end(), compatibles.begin(), compatibles.end()); } return list; diff --git a/src/libcamera/converter/converter_v4l2_m2m.cpp b/src/libcamera/converter/converter_v4l2_m2m.cpp index d8929fc5..566f18ce 100644 --- a/src/libcamera/converter/converter_v4l2_m2m.cpp +++ b/src/libcamera/converter/converter_v4l2_m2m.cpp @@ -23,7 +23,7 @@ #include "libcamera/internal/v4l2_videodevice.h" /** - * \file internal/converter/converter_v4l2_m2m.h + * \file converter/converter_v4l2_m2m.h * \brief V4L2 M2M based converter */ @@ -31,25 +31,71 @@ namespace libcamera { LOG_DECLARE_CATEGORY(Converter) +namespace { + +int getCropBounds(V4L2VideoDevice *device, Rectangle &minCrop, + Rectangle &maxCrop) +{ + Rectangle minC; + Rectangle maxC; + + /* Find crop bounds */ + minC.width = 1; + minC.height = 1; + maxC.width = UINT_MAX; + maxC.height = UINT_MAX; + + int ret = device->setSelection(V4L2_SEL_TGT_CROP, &minC); + if (ret) { + LOG(Converter, Error) + << "Could not query minimum selection crop: " + << strerror(-ret); + return ret; + } + + ret = device->getSelection(V4L2_SEL_TGT_CROP_BOUNDS, &maxC); + if (ret) { + LOG(Converter, Error) + << "Could not query maximum selection crop: " + << strerror(-ret); + return ret; + } + + /* Reset the crop to its maximum */ + ret = device->setSelection(V4L2_SEL_TGT_CROP, &maxC); + if (ret) { + LOG(Converter, Error) + << "Could not reset selection crop: " + << strerror(-ret); + return ret; + } + + minCrop = minC; + maxCrop = maxC; + return 0; +} + +} /* namespace */ + /* ----------------------------------------------------------------------------- - * V4L2M2MConverter::Stream + * V4L2M2MConverter::V4L2M2MStream */ -V4L2M2MConverter::Stream::Stream(V4L2M2MConverter *converter, unsigned int index) - : converter_(converter), index_(index) +V4L2M2MConverter::V4L2M2MStream::V4L2M2MStream(V4L2M2MConverter *converter, const Stream *stream) + : converter_(converter), stream_(stream) { m2m_ = std::make_unique<V4L2M2MDevice>(converter->deviceNode()); - m2m_->output()->bufferReady.connect(this, &Stream::outputBufferReady); - m2m_->capture()->bufferReady.connect(this, &Stream::captureBufferReady); + m2m_->output()->bufferReady.connect(this, &V4L2M2MStream::outputBufferReady); + m2m_->capture()->bufferReady.connect(this, &V4L2M2MStream::captureBufferReady); int ret = m2m_->open(); if (ret < 0) m2m_.reset(); } -int V4L2M2MConverter::Stream::configure(const StreamConfiguration &inputCfg, - const StreamConfiguration &outputCfg) +int V4L2M2MConverter::V4L2M2MStream::configure(const StreamConfiguration &inputCfg, + const StreamConfiguration &outputCfg) { V4L2PixelFormat videoFormat = m2m_->output()->toV4L2PixelFormat(inputCfg.pixelFormat); @@ -98,16 +144,23 @@ int V4L2M2MConverter::Stream::configure(const StreamConfiguration &inputCfg, inputBufferCount_ = inputCfg.bufferCount; outputBufferCount_ = outputCfg.bufferCount; + if (converter_->features() & Feature::InputCrop) { + ret = getCropBounds(m2m_->output(), inputCropBounds_.first, + inputCropBounds_.second); + if (ret) + return ret; + } + return 0; } -int V4L2M2MConverter::Stream::exportBuffers(unsigned int count, - std::vector<std::unique_ptr<FrameBuffer>> *buffers) +int V4L2M2MConverter::V4L2M2MStream::exportBuffers(unsigned int count, + std::vector<std::unique_ptr<FrameBuffer>> *buffers) { return m2m_->capture()->exportBuffers(count, buffers); } -int V4L2M2MConverter::Stream::start() +int V4L2M2MConverter::V4L2M2MStream::start() { int ret = m2m_->output()->importBuffers(inputBufferCount_); if (ret < 0) @@ -134,7 +187,7 @@ int V4L2M2MConverter::Stream::start() return 0; } -void V4L2M2MConverter::Stream::stop() +void V4L2M2MConverter::V4L2M2MStream::stop() { m2m_->capture()->streamOff(); m2m_->output()->streamOff(); @@ -142,7 +195,7 @@ void V4L2M2MConverter::Stream::stop() m2m_->output()->releaseBuffers(); } -int V4L2M2MConverter::Stream::queueBuffers(FrameBuffer *input, FrameBuffer *output) +int V4L2M2MConverter::V4L2M2MStream::queueBuffers(FrameBuffer *input, FrameBuffer *output) { int ret = m2m_->output()->queueBuffer(input); if (ret < 0) @@ -155,12 +208,27 @@ int V4L2M2MConverter::Stream::queueBuffers(FrameBuffer *input, FrameBuffer *outp return 0; } -std::string V4L2M2MConverter::Stream::logPrefix() const +int V4L2M2MConverter::V4L2M2MStream::getInputSelection(unsigned int target, Rectangle *rect) +{ + return m2m_->output()->getSelection(target, rect); +} + +int V4L2M2MConverter::V4L2M2MStream::setInputSelection(unsigned int target, Rectangle *rect) +{ + return m2m_->output()->setSelection(target, rect); +} + +std::pair<Rectangle, Rectangle> V4L2M2MConverter::V4L2M2MStream::inputCropBounds() { - return "stream" + std::to_string(index_); + return inputCropBounds_; } -void V4L2M2MConverter::Stream::outputBufferReady(FrameBuffer *buffer) +std::string V4L2M2MConverter::V4L2M2MStream::logPrefix() const +{ + return stream_->configuration().toString(); +} + +void V4L2M2MConverter::V4L2M2MStream::outputBufferReady(FrameBuffer *buffer) { auto it = converter_->queue_.find(buffer); if (it == converter_->queue_.end()) @@ -172,7 +240,7 @@ void V4L2M2MConverter::Stream::outputBufferReady(FrameBuffer *buffer) } } -void V4L2M2MConverter::Stream::captureBufferReady(FrameBuffer *buffer) +void V4L2M2MConverter::V4L2M2MStream::captureBufferReady(FrameBuffer *buffer) { converter_->outputBufferReady.emit(buffer); } @@ -205,6 +273,15 @@ V4L2M2MConverter::V4L2M2MConverter(MediaDevice *media) m2m_.reset(); return; } + + ret = getCropBounds(m2m_->output(), inputCropBounds_.first, + inputCropBounds_.second); + if (!ret && inputCropBounds_.first != inputCropBounds_.second) { + features_ |= Feature::InputCrop; + + LOG(Converter, Info) + << "Converter supports cropping on its input"; + } } /** @@ -325,6 +402,127 @@ V4L2M2MConverter::strideAndFrameSize(const PixelFormat &pixelFormat, } /** + * \copydoc libcamera::Converter::adjustInputSize + */ +Size V4L2M2MConverter::adjustInputSize(const PixelFormat &pixFmt, + const Size &size, Alignment align) +{ + auto formats = m2m_->output()->formats(); + V4L2PixelFormat v4l2PixFmt = m2m_->output()->toV4L2PixelFormat(pixFmt); + + auto it = formats.find(v4l2PixFmt); + if (it == formats.end()) { + LOG(Converter, Info) + << "Unsupported pixel format " << pixFmt; + return {}; + } + + return adjustSizes(size, it->second, align); +} + +/** + * \copydoc libcamera::Converter::adjustOutputSize + */ +Size V4L2M2MConverter::adjustOutputSize(const PixelFormat &pixFmt, + const Size &size, Alignment align) +{ + auto formats = m2m_->capture()->formats(); + V4L2PixelFormat v4l2PixFmt = m2m_->capture()->toV4L2PixelFormat(pixFmt); + + auto it = formats.find(v4l2PixFmt); + if (it == formats.end()) { + LOG(Converter, Info) + << "Unsupported pixel format " << pixFmt; + return {}; + } + + return adjustSizes(size, it->second, align); +} + +Size V4L2M2MConverter::adjustSizes(const Size &cfgSize, + const std::vector<SizeRange> &ranges, + Alignment align) +{ + Size size = cfgSize; + + if (ranges.size() == 1) { + /* + * The device supports either V4L2_FRMSIZE_TYPE_CONTINUOUS or + * V4L2_FRMSIZE_TYPE_STEPWISE. + */ + const SizeRange &range = *ranges.begin(); + + size.width = std::clamp(size.width, range.min.width, + range.max.width); + size.height = std::clamp(size.height, range.min.height, + range.max.height); + + /* + * Check if any alignment is needed. If the sizes are already + * aligned, or the device supports V4L2_FRMSIZE_TYPE_CONTINUOUS + * with hStep and vStep equal to 1, we're done here. + */ + int widthR = size.width % range.hStep; + int heightR = size.height % range.vStep; + + /* Align up or down according to the caller request. */ + + if (widthR != 0) + size.width = size.width - widthR + + ((align == Alignment::Up) ? range.hStep : 0); + + if (heightR != 0) + size.height = size.height - heightR + + ((align == Alignment::Up) ? range.vStep : 0); + } else { + /* + * The device supports V4L2_FRMSIZE_TYPE_DISCRETE, find the + * size closer to the requested output configuration. + * + * The size ranges vector is not ordered, so we sort it first. + * If we align up, start from the larger element. + */ + std::vector<Size> sizes(ranges.size()); + std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes), + [](const SizeRange &range) { return range.max; }); + std::sort(sizes.begin(), sizes.end()); + + if (align == Alignment::Up) + std::reverse(sizes.begin(), sizes.end()); + + /* + * Return true if s2 is valid according to the desired + * alignment: smaller than s1 if we align down, larger than s1 + * if we align up. + */ + auto nextSizeValid = [](const Size &s1, const Size &s2, Alignment a) { + return a == Alignment::Down + ? (s1.width > s2.width && s1.height > s2.height) + : (s1.width < s2.width && s1.height < s2.height); + }; + + Size newSize; + for (const Size &sz : sizes) { + if (!nextSizeValid(size, sz, align)) + break; + + newSize = sz; + } + + if (newSize.isNull()) { + LOG(Converter, Error) + << "Cannot adjust " << cfgSize + << " to a supported converter size"; + return {}; + } + + size = newSize; + } + + return size; +} + +/** * \copydoc libcamera::Converter::configure */ int V4L2M2MConverter::configure(const StreamConfiguration &inputCfg, @@ -333,21 +531,24 @@ int V4L2M2MConverter::configure(const StreamConfiguration &inputCfg, int ret = 0; streams_.clear(); - streams_.reserve(outputCfgs.size()); for (unsigned int i = 0; i < outputCfgs.size(); ++i) { - Stream &stream = streams_.emplace_back(this, i); + const StreamConfiguration &cfg = outputCfgs[i]; + std::unique_ptr<V4L2M2MStream> stream = + std::make_unique<V4L2M2MStream>(this, cfg.stream()); - if (!stream.isValid()) { + if (!stream->isValid()) { LOG(Converter, Error) << "Failed to create stream " << i; ret = -EINVAL; break; } - ret = stream.configure(inputCfg, outputCfgs[i]); + ret = stream->configure(inputCfg, cfg); if (ret < 0) break; + + streams_.emplace(cfg.stream(), std::move(stream)); } if (ret < 0) { @@ -359,15 +560,61 @@ int V4L2M2MConverter::configure(const StreamConfiguration &inputCfg, } /** + * \copydoc libcamera::Converter::isConfigured + */ +bool V4L2M2MConverter::isConfigured(const Stream *stream) const +{ + return streams_.find(stream) != streams_.end(); +} + +/** * \copydoc libcamera::Converter::exportBuffers */ -int V4L2M2MConverter::exportBuffers(unsigned int output, unsigned int count, +int V4L2M2MConverter::exportBuffers(const Stream *stream, unsigned int count, std::vector<std::unique_ptr<FrameBuffer>> *buffers) { - if (output >= streams_.size()) + auto iter = streams_.find(stream); + if (iter == streams_.end()) + return -EINVAL; + + return iter->second->exportBuffers(count, buffers); +} + +/** + * \copydoc libcamera::Converter::setInputCrop + */ +int V4L2M2MConverter::setInputCrop(const Stream *stream, Rectangle *rect) +{ + if (!(features_ & Feature::InputCrop)) + return -ENOTSUP; + + auto iter = streams_.find(stream); + if (iter == streams_.end()) { + LOG(Converter, Error) << "Invalid output stream"; return -EINVAL; + } + + return iter->second->setInputSelection(V4L2_SEL_TGT_CROP, rect); +} + +/** + * \fn libcamera::V4L2M2MConverter::inputCropBounds() + * \copydoc libcamera::Converter::inputCropBounds() + */ + +/** + * \copydoc libcamera::Converter::inputCropBounds(const Stream *stream) + */ +std::pair<Rectangle, Rectangle> +V4L2M2MConverter::inputCropBounds(const Stream *stream) +{ + auto iter = streams_.find(stream); + if (iter == streams_.end()) { + LOG(Converter, Error) << "Invalid output stream"; + return {}; + } - return streams_[output].exportBuffers(count, buffers); + return iter->second->inputCropBounds(); } /** @@ -377,8 +624,8 @@ int V4L2M2MConverter::start() { int ret; - for (Stream &stream : streams_) { - ret = stream.start(); + for (auto &iter : streams_) { + ret = iter.second->start(); if (ret < 0) { stop(); return ret; @@ -393,41 +640,87 @@ int V4L2M2MConverter::start() */ void V4L2M2MConverter::stop() { - for (Stream &stream : utils::reverse(streams_)) - stream.stop(); + for (auto &iter : streams_) + iter.second->stop(); +} + +/** + * \copydoc libcamera::Converter::validateOutput + */ +int V4L2M2MConverter::validateOutput(StreamConfiguration *cfg, bool *adjusted, + Alignment align) +{ + V4L2VideoDevice *capture = m2m_->capture(); + V4L2VideoDevice::Formats fmts = capture->formats(); + + if (adjusted) + *adjusted = false; + + PixelFormat fmt = cfg->pixelFormat; + V4L2PixelFormat v4l2PixFmt = capture->toV4L2PixelFormat(fmt); + + auto it = fmts.find(v4l2PixFmt); + if (it == fmts.end()) { + it = fmts.begin(); + v4l2PixFmt = it->first; + cfg->pixelFormat = v4l2PixFmt.toPixelFormat(); + + if (adjusted) + *adjusted = true; + + LOG(Converter, Info) + << "Converter output pixel format adjusted to " + << cfg->pixelFormat; + } + + const Size cfgSize = cfg->size; + cfg->size = adjustSizes(cfgSize, it->second, align); + + if (cfg->size.isNull()) + return -EINVAL; + + if (cfg->size.width != cfgSize.width || + cfg->size.height != cfgSize.height) { + LOG(Converter, Info) + << "Converter size adjusted to " + << cfg->size; + if (adjusted) + *adjusted = true; + } + + return 0; } /** * \copydoc libcamera::Converter::queueBuffers */ int V4L2M2MConverter::queueBuffers(FrameBuffer *input, - const std::map<unsigned int, FrameBuffer *> &outputs) + const std::map<const Stream *, FrameBuffer *> &outputs) { - unsigned int mask = 0; + std::set<FrameBuffer *> outputBufs; int ret; /* * Validate the outputs as a sanity check: at least one output is * required, all outputs must reference a valid stream and no two - * outputs can reference the same stream. + * streams can reference same output framebuffers. */ if (outputs.empty()) return -EINVAL; - for (auto [index, buffer] : outputs) { + for (auto [stream, buffer] : outputs) { if (!buffer) return -EINVAL; - if (index >= streams_.size()) - return -EINVAL; - if (mask & (1 << index)) - return -EINVAL; - mask |= 1 << index; + outputBufs.insert(buffer); } + if (outputBufs.size() != streams_.size()) + return -EINVAL; + /* Queue the input and output buffers to all the streams. */ - for (auto [index, buffer] : outputs) { - ret = streams_[index].queueBuffers(input, buffer); + for (auto [stream, buffer] : outputs) { + ret = streams_.at(stream)->queueBuffers(input, buffer); if (ret < 0) return ret; } @@ -444,6 +737,10 @@ int V4L2M2MConverter::queueBuffers(FrameBuffer *input, return 0; } +/* + * \todo: This should be extended to include Feature::Flag to denote + * what each converter supports feature-wise. + */ static std::initializer_list<std::string> compatibles = { "mtk-mdp", "pxp", diff --git a/src/libcamera/converter/meson.build b/src/libcamera/converter/meson.build index 2aa72fe4..af1a80fe 100644 --- a/src/libcamera/converter/meson.build +++ b/src/libcamera/converter/meson.build @@ -1,5 +1,5 @@ # SPDX-License-Identifier: CC0-1.0 -libcamera_sources += files([ +libcamera_internal_sources += files([ 'converter_v4l2_m2m.cpp' ]) diff --git a/src/libcamera/debug_controls.cpp b/src/libcamera/debug_controls.cpp new file mode 100644 index 00000000..33960231 --- /dev/null +++ b/src/libcamera/debug_controls.cpp @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Google Inc. + * + * Helper to easily record debug metadata inside libcamera. + */ + +#include "libcamera/internal/debug_controls.h" + +#include <libcamera/base/log.h> + +namespace libcamera { + +LOG_DEFINE_CATEGORY(DebugControls) + +/** + * \file debug_controls.h + * \brief Helper to easily record debug metadata inside libcamera + */ + +/** + * \class DebugMetadata + * \brief Helper to record metadata for later use + * + * Metadata is a useful tool for debugging the internal state of libcamera. It + * has the benefit that it is easy to use and related tooling is readily + * available. The difficulty is that the metadata control list is often not + * directly available (either because the variable to debug lives inside + * process() of an IPA or inside a closed algorithm class with no direct access + * to the IPA and therefore the metadata list). + * + * This class helps in both cases. It allows to forward the data to a parent or + * alternatively record the data and at a later point in time copy it to the + * metadata list when it becomes available. Both mechanisms allow easy reuse and + * loose coupling. + * + * Typical usage is to instantiate a DebugMetadata object in every + * class/algorithm where debug metadata shall be recorded (the inner object). If + * the IPA doesn't support debug metadata, the object is still usable, but the + * debug data gets dropped. If the IPA supports debug metadata it will either + * register a parent DebugMetadata object on the inner object or manually + * retrieve the data using enable()/moveToList(). + * + * The concepts of forwarding to a parent and recording for later retrieval are + * mutually exclusive and the parent takes precedence. E.g. it is not allowed to + * enable a DebugMetadata object, log entries to it and later set the parent. + * + * This is done to keep the path open for using other means of data transport + * (like tracing). For every tracing event a corresponding context needs to be + * available on set() time. The parent can be treated as such, the top level + * object (the one where enable() get's called) also lives in a place where that + * information is also available. + */ + +/** + * \fn DebugMetadata::enableByControl() + * \brief Enable based on controls::DebugMetadataEnable in the supplied + * ControlList + * \param[in] controls The supplied ControlList + * + * This function looks for controls::DebugMetadataEnable and enables or disables + * debug metadata handling accordingly. + */ +void DebugMetadata::enableByControl(const ControlList &controls) +{ + const auto &ctrl = controls.get(controls::DebugMetadataEnable); + if (ctrl) + enable(*ctrl); +} + +/** + * \fn DebugMetadata::enable() + * \brief Enable or disable metadata handling + * \param[in] enable The enable state + * + * When \a enable is true, all calls to set() get cached and can later be + * retrieved using moveEntries(). When \a enable is false, the cache gets + * cleared and no further metadata is recorded. + * + * Forwarding to a parent is independent of the enabled state. + */ +void DebugMetadata::enable(bool enable) +{ + enabled_ = enable; + if (!enabled_) + cache_.clear(); +} + +/** + * \fn DebugMetadata::setParent() + * \brief Set the parent metadata handler to \a parent + * \param[in] parent The parent handler + * + * When a \a parent is set, all further calls to set() are unconditionally + * forwarded to that instance. + * + * The parent can be reset by passing a nullptr. + */ +void DebugMetadata::setParent(DebugMetadata *parent) +{ + parent_ = parent; + + if (!parent_) + return; + + if (!cache_.empty()) + LOG(DebugControls, Error) + << "Controls were recorded before setting a parent." + << " These are dropped."; + + cache_.clear(); +} + +/** + * \fn DebugMetadata::moveEntries() + * \brief Move all cached entries into control list \a list + * \param[in] list The control list + * + * This function moves all entries into the list specified by \a list. Duplicate + * entries in \a list get overwritten. + */ +void DebugMetadata::moveEntries(ControlList &list) +{ + list.merge(std::move(cache_), ControlList::MergePolicy::OverwriteExisting); + cache_.clear(); +} + +/** + * \fn DebugMetadata::set(const Control<T> &ctrl, const V &value) + * \brief Set the value of \a ctrl to \a value + * \param[in] ctrl The control to set + * \param[in] value The control value + * + * If a parent is set, the value gets passed there unconditionally. Otherwise it + * gets cached if the instance is enabled or dropped silently when disabled. + * + * \sa enable() + */ + +/** + * \fn DebugMetadata::set(unsigned int id, const ControlValue &value) + * \brief Set the value of control \a id to \a value + * \param[in] id The id of the control + * \param[in] value The control value + * + * If a parent is set, the value gets passed there unconditionally. Otherwise it + * gets cached if the instance is enabled or dropped silently when disabled. + * + * \sa enable() + */ +void DebugMetadata::set(unsigned int id, const ControlValue &value) +{ + if (parent_) { + parent_->set(id, value); + return; + } + + if (!enabled_) + return; + + cache_.set(id, value); +} + +} /* namespace libcamera */ diff --git a/src/libcamera/device_enumerator_sysfs.cpp b/src/libcamera/device_enumerator_sysfs.cpp index fc33ba52..7866885c 100644 --- a/src/libcamera/device_enumerator_sysfs.cpp +++ b/src/libcamera/device_enumerator_sysfs.cpp @@ -33,7 +33,7 @@ int DeviceEnumeratorSysfs::init() int DeviceEnumeratorSysfs::enumerate() { struct dirent *ent; - DIR *dir; + DIR *dir = nullptr; static const char * const sysfs_dirs[] = { "/sys/subsystem/media/devices", diff --git a/src/libcamera/device_enumerator_udev.cpp b/src/libcamera/device_enumerator_udev.cpp index 01c70b6d..4e20a3cc 100644 --- a/src/libcamera/device_enumerator_udev.cpp +++ b/src/libcamera/device_enumerator_udev.cpp @@ -332,6 +332,14 @@ int DeviceEnumeratorUdev::addV4L2Device(dev_t devnum) void DeviceEnumeratorUdev::udevNotify() { struct udev_device *dev = udev_monitor_receive_device(monitor_); + if (!dev) { + int err = errno; + LOG(DeviceEnumerator, Warning) + << "Ignoring notfication received without a device: " + << strerror(err); + return; + } + std::string_view action(udev_device_get_action(dev)); std::string_view deviceNode(udev_device_get_devnode(dev)); diff --git a/src/libcamera/dma_buf_allocator.cpp b/src/libcamera/dma_buf_allocator.cpp index c06eca7d..d8c62dd6 100644 --- a/src/libcamera/dma_buf_allocator.cpp +++ b/src/libcamera/dma_buf_allocator.cpp @@ -13,7 +13,6 @@ #include <sys/ioctl.h> #include <sys/mman.h> #include <sys/stat.h> -#include <sys/syscall.h> #include <sys/types.h> #include <unistd.h> @@ -22,6 +21,10 @@ #include <linux/udmabuf.h> #include <libcamera/base/log.h> +#include <libcamera/base/memfd.h> +#include <libcamera/base/shared_fd.h> + +#include <libcamera/framebuffer.h> /** * \file dma_buf_allocator.cpp @@ -126,54 +129,16 @@ DmaBufAllocator::~DmaBufAllocator() = default; * \brief Check if the DmaBufAllocator instance is valid * \return True if the DmaBufAllocator is valid, false otherwise */ - -/* uClibc doesn't provide the file sealing API. */ -#ifndef __DOXYGEN__ -#if not HAVE_FILE_SEALS -#define F_ADD_SEALS 1033 -#define F_SEAL_SHRINK 0x0002 -#endif -#endif - UniqueFD DmaBufAllocator::allocFromUDmaBuf(const char *name, std::size_t size) { /* Size must be a multiple of the page size. Round it up. */ std::size_t pageMask = sysconf(_SC_PAGESIZE) - 1; size = (size + pageMask) & ~pageMask; -#if HAVE_MEMFD_CREATE - int ret = memfd_create(name, MFD_ALLOW_SEALING | MFD_CLOEXEC); -#else - int ret = syscall(SYS_memfd_create, name, MFD_ALLOW_SEALING | MFD_CLOEXEC); -#endif - if (ret < 0) { - ret = errno; - LOG(DmaBufAllocator, Error) - << "Failed to allocate memfd storage for " << name - << ": " << strerror(ret); - return {}; - } - - UniqueFD memfd(ret); - - ret = ftruncate(memfd.get(), size); - if (ret < 0) { - ret = errno; - LOG(DmaBufAllocator, Error) - << "Failed to set memfd size for " << name - << ": " << strerror(ret); - return {}; - } - /* udmabuf dma-buffers *must* have the F_SEAL_SHRINK seal. */ - ret = fcntl(memfd.get(), F_ADD_SEALS, F_SEAL_SHRINK); - if (ret < 0) { - ret = errno; - LOG(DmaBufAllocator, Error) - << "Failed to seal the memfd for " << name - << ": " << strerror(ret); + UniqueFD memfd = MemFd::create(name, size, MemFd::Seal::Shrink); + if (!memfd.isValid()) return {}; - } struct udmabuf_create create; @@ -182,7 +147,7 @@ UniqueFD DmaBufAllocator::allocFromUDmaBuf(const char *name, std::size_t size) create.offset = 0; create.size = size; - ret = ::ioctl(providerHandle_.get(), UDMABUF_CREATE, &create); + int ret = ::ioctl(providerHandle_.get(), UDMABUF_CREATE, &create); if (ret < 0) { ret = errno; LOG(DmaBufAllocator, Error) @@ -243,4 +208,149 @@ UniqueFD DmaBufAllocator::alloc(const char *name, std::size_t size) return allocFromHeap(name, size); } +/** + * \brief Allocate and export buffers from the DmaBufAllocator + * \param[in] count The number of requested FrameBuffers + * \param[in] planeSizes The sizes of planes in each FrameBuffer + * \param[out] buffers Array of buffers successfully allocated + * + * Planes in a FrameBuffer are allocated with a single dma buf. + * \todo Add the option to allocate each plane with a dma buf respectively. + * + * \return The number of allocated buffers on success or a negative error code + * otherwise + */ +int DmaBufAllocator::exportBuffers(unsigned int count, + const std::vector<unsigned int> &planeSizes, + std::vector<std::unique_ptr<FrameBuffer>> *buffers) +{ + for (unsigned int i = 0; i < count; ++i) { + std::unique_ptr<FrameBuffer> buffer = + createBuffer("frame-" + std::to_string(i), planeSizes); + if (!buffer) { + LOG(DmaBufAllocator, Error) << "Unable to create buffer"; + + buffers->clear(); + return -EINVAL; + } + + buffers->push_back(std::move(buffer)); + } + + return count; +} + +std::unique_ptr<FrameBuffer> +DmaBufAllocator::createBuffer(std::string name, + const std::vector<unsigned int> &planeSizes) +{ + std::vector<FrameBuffer::Plane> planes; + + unsigned int frameSize = 0, offset = 0; + for (auto planeSize : planeSizes) + frameSize += planeSize; + + SharedFD fd(alloc(name.c_str(), frameSize)); + if (!fd.isValid()) + return nullptr; + + for (auto planeSize : planeSizes) { + planes.emplace_back(FrameBuffer::Plane{ fd, offset, planeSize }); + offset += planeSize; + } + + return std::make_unique<FrameBuffer>(planes); +} + +/** + * \class DmaSyncer + * \brief Helper class for dma-buf's synchronization + * + * This class wraps a userspace dma-buf's synchronization process with an + * object's lifetime. + * + * It's used when the user needs to access a dma-buf with CPU, mostly mapped + * with MappedFrameBuffer, so that the buffer is synchronized between CPU and + * ISP. + */ + +/** + * \enum DmaSyncer::SyncType + * \brief Read and/or write access via the CPU map + * \var DmaSyncer::Read + * \brief Indicates that the mapped dma-buf will be read by the client via the + * CPU map + * \var DmaSyncer::Write + * \brief Indicates that the mapped dm-buf will be written by the client via the + * CPU map + * \var DmaSyncer::ReadWrite + * \brief Indicates that the mapped dma-buf will be read and written by the + * client via the CPU map + */ + +/** + * \brief Construct a DmaSyncer with a dma-buf's fd and the access type + * \param[in] fd The dma-buf's file descriptor to synchronize + * \param[in] type Read and/or write access via the CPU map + */ +DmaSyncer::DmaSyncer(SharedFD fd, SyncType type) + : fd_(fd) +{ + switch (type) { + case SyncType::Read: + flags_ = DMA_BUF_SYNC_READ; + break; + case SyncType::Write: + flags_ = DMA_BUF_SYNC_WRITE; + break; + case SyncType::ReadWrite: + flags_ = DMA_BUF_SYNC_RW; + break; + } + + sync(DMA_BUF_SYNC_START); +} + +/** + * \fn DmaSyncer::DmaSyncer(DmaSyncer &&other); + * \param[in] other The other instance + * \brief Enable move on class DmaSyncer + */ + +/** + * \fn DmaSyncer::operator=(DmaSyncer &&other); + * \param[in] other The other instance + * \brief Enable move on class DmaSyncer + */ + +DmaSyncer::~DmaSyncer() +{ + /* + * DmaSyncer might be moved and left with an empty SharedFD. + * Avoid syncing with an invalid file descriptor in this case. + */ + if (fd_.isValid()) + sync(DMA_BUF_SYNC_END); +} + +void DmaSyncer::sync(uint64_t step) +{ + struct dma_buf_sync sync = { + .flags = flags_ | step + }; + + int ret; + do { + ret = ioctl(fd_.get(), DMA_BUF_IOCTL_SYNC, &sync); + } while (ret && (errno == EINTR || errno == EAGAIN)); + + if (ret) { + ret = errno; + LOG(DmaBufAllocator, Error) + << "Unable to sync dma fd: " << fd_.get() + << ", err: " << strerror(ret) + << ", flags: " << sync.flags; + } +} + } /* namespace libcamera */ diff --git a/src/libcamera/fence.cpp b/src/libcamera/fence.cpp index 634c74f8..73299b40 100644 --- a/src/libcamera/fence.cpp +++ b/src/libcamera/fence.cpp @@ -11,7 +11,7 @@ namespace libcamera { /** * - * \file libcamera/fence.h + * \file fence.h * \brief Definition of the Fence class */ diff --git a/src/libcamera/formats.cpp b/src/libcamera/formats.cpp index cf41f2c2..bfcdfc08 100644 --- a/src/libcamera/formats.cpp +++ b/src/libcamera/formats.cpp @@ -7,8 +7,7 @@ #include "libcamera/internal/formats.h" -#include <algorithm> -#include <errno.h> +#include <map> #include <libcamera/base/log.h> #include <libcamera/base/utils.h> @@ -16,7 +15,7 @@ #include <libcamera/formats.h> /** - * \file internal/formats.h + * \file libcamera/internal/formats.h * \brief Types and helper functions to handle libcamera image formats */ @@ -158,7 +157,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{ .colourEncoding = PixelFormatInfo::ColourEncodingRGB, .packed = false, .pixelsPerGroup = 1, - .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }}, + .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }}, } }, { formats::RGB565_BE, { .name = "RGB565_BE", @@ -168,7 +167,7 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{ .colourEncoding = PixelFormatInfo::ColourEncodingRGB, .packed = false, .pixelsPerGroup = 1, - .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }}, + .planes = {{ { 2, 1 }, { 0, 0 }, { 0, 0 } }}, } }, { formats::BGR888, { .name = "BGR888", @@ -527,6 +526,16 @@ const std::map<PixelFormat, PixelFormatInfo> pixelFormatInfo{ .pixelsPerGroup = 4, .planes = {{ { 5, 1 }, { 0, 0 }, { 0, 0 } }}, } }, + { formats::R12_CSI2P, { + .name = "R12_CSI2P", + .format = formats::R12_CSI2P, + .v4l2Formats = { V4L2PixelFormat(V4L2_PIX_FMT_Y12P), }, + .bitsPerPixel = 12, + .colourEncoding = PixelFormatInfo::ColourEncodingYUV, + .packed = true, + .pixelsPerGroup = 2, + .planes = {{ { 3, 1 }, { 0, 0 }, { 0, 0 } }}, + } }, { formats::R12, { .name = "R12", .format = formats::R12, diff --git a/src/libcamera/formats.yaml b/src/libcamera/formats.yaml index fe027a7c..2d54d391 100644 --- a/src/libcamera/formats.yaml +++ b/src/libcamera/formats.yaml @@ -138,6 +138,9 @@ formats: - R10_CSI2P: fourcc: DRM_FORMAT_R10 mod: MIPI_FORMAT_MOD_CSI2_PACKED + - R12_CSI2P: + fourcc: DRM_FORMAT_R12 + mod: MIPI_FORMAT_MOD_CSI2_PACKED - SRGGB10_CSI2P: fourcc: DRM_FORMAT_SRGGB10 diff --git a/src/libcamera/framebuffer.cpp b/src/libcamera/framebuffer.cpp index 63d679cb..826848f7 100644 --- a/src/libcamera/framebuffer.cpp +++ b/src/libcamera/framebuffer.cpp @@ -16,7 +16,10 @@ /** * \file libcamera/framebuffer.h * \brief Frame buffer handling - * + */ + +/** + * \internal * \file libcamera/internal/framebuffer.h * \brief Internal frame buffer handling support */ @@ -104,6 +107,7 @@ LOG_DEFINE_CATEGORY(Buffer) * \return The array of per-plane metadata */ +#ifndef __DOXYGEN_PUBLIC__ /** * \class FrameBuffer::Private * \brief Base class for FrameBuffer private data @@ -206,6 +210,7 @@ FrameBuffer::Private::~Private() * \brief Retrieve the dynamic metadata * \return Dynamic metadata for the frame contained in the buffer */ +#endif /* __DOXYGEN_PUBLIC__ */ /** * \class FrameBuffer diff --git a/src/libcamera/geometry.cpp b/src/libcamera/geometry.cpp index 00015136..81cc8cd5 100644 --- a/src/libcamera/geometry.cpp +++ b/src/libcamera/geometry.cpp @@ -594,6 +594,8 @@ std::ostream &operator<<(std::ostream &out, const SizeRange &sr) * * Rectangles are used to identify an area of an image. They are specified by * the coordinates of top-left corner and their horizontal and vertical size. + * By convention, the top-left corner is defined as the corner with the lowest + * x and y coordinates, regardless of the origin and direction of the axes. * * The measure unit of the rectangle coordinates and size, as well as the * reference point from which the Rectangle::x and Rectangle::y displacements @@ -611,6 +613,8 @@ std::ostream &operator<<(std::ostream &out, const SizeRange &sr) * \param[in] x The horizontal coordinate of the top-left corner * \param[in] y The vertical coordinate of the top-left corner * \param[in] size The size + * + * The rectangle's top-left corner is the point with the smaller x and y values. */ /** @@ -620,6 +624,8 @@ std::ostream &operator<<(std::ostream &out, const SizeRange &sr) * \param[in] y The vertical coordinate of the top-left corner * \param[in] width The width * \param[in] height The height + * + * The rectangle's top-left corner is the point with the smaller x and y values. */ /** @@ -630,13 +636,24 @@ std::ostream &operator<<(std::ostream &out, const SizeRange &sr) */ /** + * \fn Rectangle::Rectangle(const Point &point1, const Point &point2) + * \brief Construct a Rectangle from two opposite corners + * \param[in] point1 One of corners of the rectangle + * \param[in] point2 The opposite corner of \a point1 + */ + +/** * \var Rectangle::x * \brief The horizontal coordinate of the rectangle's top-left corner + * + * The rectangle's top-left corner is the point with the smaller x and y values. */ /** * \var Rectangle::y * \brief The vertical coordinate of the rectangle's top-left corner + * + * The rectangle's top-left corner is the point with the smaller x and y values. */ /** @@ -685,6 +702,9 @@ Point Rectangle::center() const /** * \fn Point Rectangle::topLeft() const * \brief Retrieve the coordinates of the top left corner of this Rectangle + * + * The rectangle's top-left corner is the point with the smaller x and y values. + * * \return The Rectangle's top left corner */ @@ -818,6 +838,55 @@ Rectangle Rectangle::translatedBy(const Point &point) const } /** + * \brief Transform a Rectangle from one reference rectangle to another + * \param[in] source The \a source reference rectangle + * \param[in] destination The \a destination reference rectangle + * + * The \a source and \a destination parameters describe two rectangles defined + * in different reference systems. The Rectangle is translated from the source + * reference system into the destination reference system. + * + * The typical use case for this function is to translate a selection rectangle + * specified in a reference system, in example the sensor's pixel array, into + * the same rectangle re-scaled and translated into a different reference + * system, in example the output frame on which the selection rectangle is + * applied to. + * + * For example, consider a sensor with a resolution of 4040x2360 pixels and a + * assume a rectangle of (100, 100)/3840x2160 (sensorFrame) in sensor + * coordinates is mapped to a rectangle (0,0)/(1920,1080) (displayFrame) in + * display coordinates. This function can be used to transform an arbitrary + * rectangle from display coordinates to sensor coordinates or vice versa: + * + * \code{.cpp} + * Rectangle sensorReference(100, 100, 3840, 2160); + * Rectangle displayReference(0, 0, 1920, 1080); + * + * // Bottom right quarter in sensor coordinates + * Rectangle sensorRect(2020, 100, 1920, 1080); + * displayRect = sensorRect.transformedBetween(sensorReference, displayReference); + * // displayRect is now (960, 540)/960x540 + * + * // Transformation back to sensor coordinates + * sensorRect = displayRect.transformedBetween(displayReference, sensorReference); + * \endcode + */ +Rectangle Rectangle::transformedBetween(const Rectangle &source, + const Rectangle &destination) const +{ + Rectangle r; + double sx = static_cast<double>(destination.width) / source.width; + double sy = static_cast<double>(destination.height) / source.height; + + r.x = static_cast<int>((x - source.x) * sx) + destination.x; + r.y = static_cast<int>((y - source.y) * sy) + destination.y; + r.width = static_cast<int>(width * sx); + r.height = static_cast<int>(height * sy); + + return r; +} + +/** * \brief Compare rectangles for equality * \return True if the two rectangles are equal, false otherwise */ diff --git a/src/libcamera/ipa_controls.cpp b/src/libcamera/ipa_controls.cpp index 9420c889..12d92ebe 100644 --- a/src/libcamera/ipa_controls.cpp +++ b/src/libcamera/ipa_controls.cpp @@ -220,6 +220,10 @@ static_assert(sizeof(ipa_control_value_entry) == 16, * \var ipa_control_info_entry::offset * The offset in bytes from the beginning of the data section to the control * info data (shall be a multiple of 8 bytes) + * \var ipa_control_info_entry::direction + * The directions in which the control is allowed to be sent. This is a flags + * value, where 0x1 signifies input (as controls), and 0x2 signifies output (as + * metadata). \sa ControlId::Direction * \var ipa_control_info_entry::padding * Padding bytes (shall be set to 0) */ diff --git a/src/libcamera/ipa_data_serializer.cpp b/src/libcamera/ipa_data_serializer.cpp index 3e9bef08..2189a246 100644 --- a/src/libcamera/ipa_data_serializer.cpp +++ b/src/libcamera/ipa_data_serializer.cpp @@ -11,6 +11,8 @@ #include <libcamera/base/log.h> +#include "libcamera/internal/byte_stream_buffer.h" + /** * \file ipa_data_serializer.h * \brief IPA Data Serializer @@ -537,7 +539,6 @@ IPADataSerializer<SharedFD>::serialize(const SharedFD &data, if (data.isValid()) fdVec.push_back(data); - return { dataVec, fdVec }; } @@ -604,7 +605,7 @@ IPADataSerializer<FrameBuffer::Plane>::deserialize(std::vector<uint8_t>::const_i FrameBuffer::Plane ret; ret.fd = IPADataSerializer<SharedFD>::deserialize(dataBegin, dataBegin + 4, - fdsBegin, fdsBegin + 1); + fdsBegin, fdsBegin + 1); ret.offset = readPOD<uint32_t>(dataBegin, 4, dataEnd); ret.length = readPOD<uint32_t>(dataBegin, 8, dataEnd); diff --git a/src/libcamera/ipa_manager.cpp b/src/libcamera/ipa_manager.cpp index f4e0b633..cfc24d38 100644 --- a/src/libcamera/ipa_manager.cpp +++ b/src/libcamera/ipa_manager.cpp @@ -95,8 +95,6 @@ LOG_DEFINE_CATEGORY(IPAManager) * IPC. */ -IPAManager *IPAManager::self_ = nullptr; - /** * \brief Construct an IPAManager instance * @@ -105,10 +103,6 @@ IPAManager *IPAManager::self_ = nullptr; */ IPAManager::IPAManager() { - if (self_) - LOG(IPAManager, Fatal) - << "Multiple IPAManager objects are not allowed"; - #if HAVE_IPA_PUBKEY if (!pubKey_.isValid()) LOG(IPAManager, Warning) << "Public key not valid"; @@ -153,16 +147,12 @@ IPAManager::IPAManager() if (!ipaCount) LOG(IPAManager, Warning) << "No IPA found in '" IPA_MODULE_DIR "'"; - - self_ = this; } IPAManager::~IPAManager() { for (IPAModule *module : modules_) delete module; - - self_ = nullptr; } /** diff --git a/src/libcamera/ipa_module.cpp b/src/libcamera/ipa_module.cpp index 0756b691..9ca74be6 100644 --- a/src/libcamera/ipa_module.cpp +++ b/src/libcamera/ipa_module.cpp @@ -8,7 +8,6 @@ #include "libcamera/internal/ipa_module.h" #include <algorithm> -#include <array> #include <ctype.h> #include <dlfcn.h> #include <elf.h> @@ -51,8 +50,8 @@ typename std::remove_extent_t<T> *elfPointer(Span<const uint8_t> elf, if (size > elf.size() || size < objSize) return nullptr; - return reinterpret_cast<typename std::remove_extent_t<T> *> - (reinterpret_cast<const char *>(elf.data()) + offset); + return reinterpret_cast<typename std::remove_extent_t<T> *>( + reinterpret_cast<const char *>(elf.data()) + offset); } template<typename T> diff --git a/src/libcamera/ipa_proxy.cpp b/src/libcamera/ipa_proxy.cpp index 6c17c456..25f772a4 100644 --- a/src/libcamera/ipa_proxy.cpp +++ b/src/libcamera/ipa_proxy.cpp @@ -7,7 +7,6 @@ #include "libcamera/internal/ipa_proxy.h" -#include <string.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> @@ -72,6 +71,7 @@ IPAProxy::~IPAProxy() /** * \brief Retrieve the absolute path to an IPA configuration file * \param[in] name The configuration file name + * \param[in] fallbackName The name of a fallback configuration file * * This function locates the configuration file for an IPA and returns its * absolute path. It searches the following directories, in order: @@ -89,21 +89,42 @@ IPAProxy::~IPAProxy() * named after the IPA module name, as reported in IPAModuleInfo::name, and for * a file named \a name within that directory. The \a name is IPA-specific. * + * If the file named \a name is not found and \a fallbackName is non-empty then + * the whole search is repeated for \a fallbackName. + * * \return The full path to the IPA configuration file, or an empty string if * no configuration file can be found */ -std::string IPAProxy::configurationFile(const std::string &name) const +std::string IPAProxy::configurationFile(const std::string &name, + const std::string &fallbackName) const { - struct stat statbuf; - int ret; - /* * The IPA module name can be used as-is to build directory names as it * has been validated when loading the module. */ - std::string ipaName = ipam_->info().name; + const std::string ipaName = ipam_->info().name; + + /* + * Start with any user override through the module-specific environment + * variable. Use the name of the IPA module up to the first '/' to + * construct the variable name. + */ + std::string ipaEnvName = ipaName.substr(0, ipaName.find('/')); + std::transform(ipaEnvName.begin(), ipaEnvName.end(), ipaEnvName.begin(), + [](unsigned char c) { return std::toupper(c); }); + ipaEnvName = "LIBCAMERA_" + ipaEnvName + "_TUNING_FILE"; + + char const *configFromEnv = utils::secure_getenv(ipaEnvName.c_str()); + if (configFromEnv && *configFromEnv == '\0') + return { configFromEnv }; + + struct stat statbuf; + int ret; - /* Check the environment variable first. */ + /* + * Check the directory pointed to by the IPA config path environment + * variable next. + */ const char *confPaths = utils::secure_getenv("LIBCAMERA_IPA_CONFIG_PATH"); if (confPaths) { for (const auto &dir : utils::split(confPaths, ":")) { @@ -146,11 +167,18 @@ std::string IPAProxy::configurationFile(const std::string &name) const } } - LOG(IPAProxy, Error) - << "Configuration file '" << name - << "' not found for IPA module '" << ipaName << "'"; + if (fallbackName.empty()) { + LOG(IPAProxy, Error) + << "Configuration file '" << name + << "' not found for IPA module '" << ipaName << "'"; + return std::string(); + } - return std::string(); + LOG(IPAProxy, Warning) + << "Configuration file '" << name + << "' not found for IPA module '" << ipaName + << "', falling back to '" << fallbackName << "'"; + return configurationFile(fallbackName); } /** diff --git a/src/libcamera/ipc_unixsocket.cpp b/src/libcamera/ipc_unixsocket.cpp index 75285b67..002053e3 100644 --- a/src/libcamera/ipc_unixsocket.cpp +++ b/src/libcamera/ipc_unixsocket.cpp @@ -12,6 +12,7 @@ #include <string.h> #include <sys/socket.h> #include <unistd.h> +#include <vector> #include <libcamera/base/event_notifier.h> #include <libcamera/base/log.h> @@ -247,10 +248,9 @@ int IPCUnixSocket::sendData(const void *buffer, size_t length, iov[0].iov_base = const_cast<void *>(buffer); iov[0].iov_len = length; - char buf[CMSG_SPACE(num * sizeof(uint32_t))]; - memset(buf, 0, sizeof(buf)); + std::vector<uint8_t> buf(CMSG_SPACE(num * sizeof(uint32_t))); - struct cmsghdr *cmsg = (struct cmsghdr *)buf; + struct cmsghdr *cmsg = reinterpret_cast<struct cmsghdr *>(buf.data()); cmsg->cmsg_len = CMSG_LEN(num * sizeof(uint32_t)); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; @@ -283,10 +283,9 @@ int IPCUnixSocket::recvData(void *buffer, size_t length, iov[0].iov_base = buffer; iov[0].iov_len = length; - char buf[CMSG_SPACE(num * sizeof(uint32_t))]; - memset(buf, 0, sizeof(buf)); + std::vector<uint8_t> buf(CMSG_SPACE(num * sizeof(uint32_t))); - struct cmsghdr *cmsg = (struct cmsghdr *)buf; + struct cmsghdr *cmsg = reinterpret_cast<struct cmsghdr *>(buf.data()); cmsg->cmsg_len = CMSG_LEN(num * sizeof(uint32_t)); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; diff --git a/src/libcamera/mapped_framebuffer.cpp b/src/libcamera/mapped_framebuffer.cpp index b3104e05..f54bbf21 100644 --- a/src/libcamera/mapped_framebuffer.cpp +++ b/src/libcamera/mapped_framebuffer.cpp @@ -16,7 +16,7 @@ #include <libcamera/base/log.h> /** - * \file libcamera/internal/mapped_framebuffer.h + * \file mapped_framebuffer.h * \brief Frame buffer memory mapping support */ @@ -72,7 +72,7 @@ MappedBuffer::MappedBuffer(MappedBuffer &&other) /** * \brief Move assignment operator, replace the mappings with those of \a other -* \param[in] other The other MappedBuffer + * \param[in] other The other MappedBuffer * * Moving a MappedBuffer moves the mappings contained in the \a other to the new * MappedBuffer and invalidates the \a other. diff --git a/src/ipa/libipa/matrix.cpp b/src/libcamera/matrix.cpp index 8346f0d3..e7e02722 100644 --- a/src/ipa/libipa/matrix.cpp +++ b/src/libcamera/matrix.cpp @@ -5,7 +5,7 @@ * Matrix and related operations */ -#include "matrix.h" +#include "libcamera/internal/matrix.h" #include <libcamera/base/log.h> @@ -18,8 +18,6 @@ namespace libcamera { LOG_DEFINE_CATEGORY(Matrix) -namespace ipa { - /** * \class Matrix * \brief Matrix class @@ -34,7 +32,7 @@ namespace ipa { */ /** - * \fn Matrix::Matrix(const std::vector<T> &data) + * \fn Matrix::Matrix(const std::array<T, Rows * Cols> &data) * \brief Construct a matrix from supplied data * \param[in] data Data from which to construct a matrix * @@ -55,6 +53,17 @@ namespace ipa { */ /** + * \fn Matrix::data() + * \brief Access the matrix data as a linear array + * + * Access the contents of the matrix as a one-dimensional linear array of + * values in row-major order. The size of the array is equal to the product of + * the number of rows and columns of the matrix (Rows x Cols). + * + * \return A span referencing the matrix data as a linear array + */ + +/** * \fn Span<const T, Cols> Matrix::operator[](size_t i) const * \brief Index to a row in the matrix * \param[in] i Index of row to retrieve @@ -144,6 +153,4 @@ bool matrixValidateYaml(const YamlObject &obj, unsigned int size) } #endif /* __DOXYGEN__ */ -} /* namespace ipa */ - } /* namespace libcamera */ diff --git a/src/libcamera/media_device.cpp b/src/libcamera/media_device.cpp index bd054552..d71dad74 100644 --- a/src/libcamera/media_device.cpp +++ b/src/libcamera/media_device.cpp @@ -818,20 +818,12 @@ int MediaDevice::setupLink(const MediaLink *link, unsigned int flags) if (ret) { ret = -errno; LOG(MediaDevice, Error) - << "Failed to setup link " - << source->entity()->name() << "[" - << source->index() << "] -> " - << sink->entity()->name() << "[" - << sink->index() << "]: " + << "Failed to setup link " << *link << ": " << strerror(-ret); return ret; } - LOG(MediaDevice, Debug) - << source->entity()->name() << "[" - << source->index() << "] -> " - << sink->entity()->name() << "[" - << sink->index() << "]: " << flags; + LOG(MediaDevice, Debug) << *link << ": " << flags; return 0; } diff --git a/src/libcamera/media_object.cpp b/src/libcamera/media_object.cpp index 1b191a1e..3e3772a6 100644 --- a/src/libcamera/media_object.cpp +++ b/src/libcamera/media_object.cpp @@ -147,6 +147,31 @@ MediaLink::MediaLink(const struct media_v2_link *link, MediaPad *source, } /** + * \brief Generate a string representation of the MediaLink + * \return A string representing the MediaLink + */ +std::string MediaLink::toString() const +{ + std::stringstream ss; + ss << *this; + + return ss.str(); +} + +/** + * \brief Insert a text representation of a Link into an output stream + * \param[in] out The output stream + * \param[in] link The MediaLink + * \return The output stream \a out + */ +std::ostream &operator<<(std::ostream &out, const MediaLink &link) +{ + out << *link.source() << " -> " << *link.sink(); + + return out; +} + +/** * \fn MediaLink::source() * \brief Retrieve the link's source pad * \return The source pad at the origin of the link @@ -236,6 +261,31 @@ void MediaPad::addLink(MediaLink *link) } /** + * \brief Generate a string representation of the MediaPad + * \return A string representing the MediaPad + */ +std::string MediaPad::toString() const +{ + std::stringstream ss; + ss << *this; + + return ss.str(); +} + +/** + * \brief Insert a text representation of a MediaPad into an output stream + * \param[in] out The output stream + * \param[in] pad The MediaPad + * \return The output stream \a out + */ +std::ostream &operator<<(std::ostream &out, const MediaPad &pad) +{ + out << "'" << pad.entity()->name() << "'[" << pad.index() << "]"; + + return out; +} + +/** * \class MediaEntity * \brief The MediaEntity represents an entity in the media graph * diff --git a/src/libcamera/meson.build b/src/libcamera/meson.build index 89504cee..de22b8e6 100644 --- a/src/libcamera/meson.build +++ b/src/libcamera/meson.build @@ -1,26 +1,35 @@ # SPDX-License-Identifier: CC0-1.0 -libcamera_sources = files([ - 'bayer_format.cpp', - 'byte_stream_buffer.cpp', +libcamera_public_sources = files([ 'camera.cpp', - 'camera_controls.cpp', - 'camera_lens.cpp', 'camera_manager.cpp', 'color_space.cpp', 'controls.cpp', + 'fence.cpp', + 'framebuffer.cpp', + 'framebuffer_allocator.cpp', + 'geometry.cpp', + 'orientation.cpp', + 'pixel_format.cpp', + 'request.cpp', + 'stream.cpp', + 'transform.cpp', +]) + +libcamera_internal_sources = files([ + 'bayer_format.cpp', + 'byte_stream_buffer.cpp', + 'camera_controls.cpp', + 'camera_lens.cpp', 'control_serializer.cpp', 'control_validator.cpp', 'converter.cpp', + 'debug_controls.cpp', 'delayed_controls.cpp', 'device_enumerator.cpp', 'device_enumerator_sysfs.cpp', 'dma_buf_allocator.cpp', - 'fence.cpp', 'formats.cpp', - 'framebuffer.cpp', - 'framebuffer_allocator.cpp', - 'geometry.cpp', 'ipa_controls.cpp', 'ipa_data_serializer.cpp', 'ipa_interface.cpp', @@ -31,30 +40,23 @@ libcamera_sources = files([ 'ipc_pipe_unixsocket.cpp', 'ipc_unixsocket.cpp', 'mapped_framebuffer.cpp', + 'matrix.cpp', 'media_device.cpp', 'media_object.cpp', - 'orientation.cpp', 'pipeline_handler.cpp', - 'pixel_format.cpp', 'process.cpp', 'pub_key.cpp', - 'request.cpp', 'shared_mem_object.cpp', 'source_paths.cpp', - 'stream.cpp', 'sysfs.cpp', - 'transform.cpp', 'v4l2_device.cpp', 'v4l2_pixelformat.cpp', 'v4l2_subdevice.cpp', 'v4l2_videodevice.cpp', + 'vector.cpp', 'yaml_parser.cpp', ]) -libcamera_sources += libcamera_public_headers -libcamera_sources += libcamera_generated_ipa_headers -libcamera_sources += libcamera_tracepoint_header - includes = [ libcamera_includes, ] @@ -104,14 +106,14 @@ endif if liblttng.found() tracing_enabled = true config_h.set('HAVE_TRACING', 1) - libcamera_sources += files(['tracepoints.cpp']) + libcamera_internal_sources += files(['tracepoints.cpp']) else tracing_enabled = false endif if libudev.found() config_h.set('HAVE_LIBUDEV', 1) - libcamera_sources += files([ + libcamera_internal_sources += files([ 'device_enumerator_udev.cpp', ]) endif @@ -130,29 +132,33 @@ endif control_sources = [] controls_mode_files = { - 'controls' : controls_files, - 'properties' : properties_files, + 'controls': [ + controls_files, + 'control_ids.cpp', + ], + 'properties': [ + properties_files, + 'property_ids.cpp', + ], } -foreach mode, input_files : controls_mode_files - input_files = files(input_files) - - if mode == 'controls' - template_file = files('control_ids.cpp.in') - else - template_file = files('property_ids.cpp.in') - endif +foreach mode, inout_files : controls_mode_files + input_files = inout_files[0] + output_file = inout_files[1] + template_file = files('control_ids.cpp.in') ranges_file = files('control_ranges.yaml') - control_sources += custom_target(mode + '_cpp', + + control_sources += custom_target(mode + '_ids_cpp', input : input_files, - output : mode + '_ids.cpp', + output : output_file, command : [gen_controls, '-o', '@OUTPUT@', '--mode', mode, '-t', template_file, - '-r', ranges_file, '@INPUT@']) + '-r', ranges_file, '@INPUT@'], + env : py_build_env) endforeach -libcamera_sources += control_sources +libcamera_public_sources += control_sources gen_version = meson.project_source_root() / 'utils' / 'gen-version.sh' @@ -163,7 +169,7 @@ version_cpp = vcs_tag(command : [gen_version, meson.project_build_root(), meson. output : 'version.cpp', fallback : meson.project_version()) -libcamera_sources += version_cpp +libcamera_public_sources += version_cpp if ipa_sign_module ipa_pub_key_cpp = custom_target('ipa_pub_key_cpp', @@ -171,7 +177,7 @@ if ipa_sign_module output : 'ipa_pub_key.cpp', command : [gen_ipa_pub_key, '@INPUT@', '@OUTPUT@']) - libcamera_sources += ipa_pub_key_cpp + libcamera_internal_sources += ipa_pub_key_cpp endif libcamera_deps += [ @@ -191,7 +197,13 @@ libcamera_deps += [ # for the presence or abscence of the dynamic tag. libcamera = shared_library('libcamera', - libcamera_sources, + [ + libcamera_public_headers, + libcamera_public_sources, + libcamera_ipa_headers, + libcamera_internal_headers, + libcamera_internal_sources, + ], version : libcamera_version, soversion : libcamera_soversion, name_prefix : '', @@ -201,7 +213,6 @@ libcamera = shared_library('libcamera', dependencies : libcamera_deps) libcamera_public = declare_dependency(sources : [ - libcamera_ipa_headers, libcamera_public_headers, ], include_directories : libcamera_includes, @@ -210,7 +221,7 @@ libcamera_public = declare_dependency(sources : [ # Internal dependency for components and plugins which can use private APIs libcamera_private = declare_dependency(sources : [ - libcamera_generated_ipa_headers, + libcamera_ipa_headers, ], dependencies : [ libcamera_public, diff --git a/src/libcamera/orientation.cpp b/src/libcamera/orientation.cpp index 47fd6a32..7d7d21ae 100644 --- a/src/libcamera/orientation.cpp +++ b/src/libcamera/orientation.cpp @@ -8,10 +8,9 @@ #include <libcamera/orientation.h> #include <array> -#include <string> /** - * \file libcamera/orientation.h + * \file orientation.h * \brief Image orientation definition */ @@ -102,10 +101,14 @@ std::ostream &operator<<(std::ostream &out, const Orientation &orientation) { constexpr std::array<const char *, 9> orientationNames = { "", /* Orientation starts counting from 1. */ - "Rotate0", "Rotate0Mirror", - "Rotate180", "Rotate180Mirror", - "Rotate90Mirror", "Rotate270", - "Rotate270Mirror", "Rotate90", + "Rotate0", + "Rotate0Mirror", + "Rotate180", + "Rotate180Mirror", + "Rotate90Mirror", + "Rotate270", + "Rotate270Mirror", + "Rotate90", }; out << orientationNames[static_cast<unsigned int>(orientation)]; diff --git a/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp b/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp index 72aa6c75..4e66b336 100644 --- a/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp +++ b/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp @@ -157,11 +157,10 @@ PipelineHandlerISI *ISICameraData::pipe() /* Open and initialize pipe components. */ int ISICameraData::init() { - int ret = sensor_->init(); - if (ret) - return ret; + if (!sensor_) + return -ENODEV; - ret = csis_->open(); + int ret = csis_->open(); if (ret) return ret; @@ -1057,7 +1056,7 @@ bool PipelineHandlerISI::match(DeviceEnumerator *enumerator) std::unique_ptr<ISICameraData> data = std::make_unique<ISICameraData>(this); - data->sensor_ = std::make_unique<CameraSensor>(sensor); + data->sensor_ = CameraSensorFactoryBase::create(sensor); data->csis_ = std::make_unique<V4L2Subdevice>(csi); data->xbarSink_ = sink; diff --git a/src/libcamera/pipeline/imx8-isi/meson.build b/src/libcamera/pipeline/imx8-isi/meson.build index ffd0ce54..b369b031 100644 --- a/src/libcamera/pipeline/imx8-isi/meson.build +++ b/src/libcamera/pipeline/imx8-isi/meson.build @@ -1,5 +1,5 @@ # SPDX-License-Identifier: CC0-1.0 -libcamera_sources += files([ +libcamera_internal_sources += files([ 'imx8-isi.cpp' ]) diff --git a/src/libcamera/pipeline/ipu3/cio2.cpp b/src/libcamera/pipeline/ipu3/cio2.cpp index 81a7a8ab..aa544d7b 100644 --- a/src/libcamera/pipeline/ipu3/cio2.cpp +++ b/src/libcamera/pipeline/ipu3/cio2.cpp @@ -7,8 +7,8 @@ #include "cio2.h" +#include <cmath> #include <limits> -#include <math.h> #include <linux/media-bus-format.h> @@ -134,10 +134,9 @@ int CIO2Device::init(const MediaDevice *media, unsigned int index) MediaLink *link = links[0]; MediaEntity *sensorEntity = link->source()->entity(); - sensor_ = std::make_unique<CameraSensor>(sensorEntity); - ret = sensor_->init(); - if (ret) - return ret; + sensor_ = CameraSensorFactoryBase::create(sensorEntity); + if (!sensor_) + return -ENODEV; ret = link->setEnabled(true); if (ret) @@ -304,7 +303,7 @@ V4L2SubdeviceFormat CIO2Device::getSensorFormat(const std::vector<unsigned int> * comparing it with a single precision digit is enough. */ ratio = static_cast<unsigned int>(ratio * 10) / 10.0; - float ratioDiff = fabsf(ratio - desiredRatio); + float ratioDiff = std::abs(ratio - desiredRatio); unsigned int area = sz.width * sz.height; unsigned int areaDiff = area - desiredArea; diff --git a/src/libcamera/pipeline/ipu3/frames.cpp b/src/libcamera/pipeline/ipu3/frames.cpp index 88eb9d05..bc0526a7 100644 --- a/src/libcamera/pipeline/ipu3/frames.cpp +++ b/src/libcamera/pipeline/ipu3/frames.cpp @@ -7,12 +7,13 @@ #include "frames.h" +#include <libcamera/base/log.h> + #include <libcamera/framebuffer.h> #include <libcamera/request.h> #include "libcamera/internal/framebuffer.h" #include "libcamera/internal/pipeline_handler.h" -#include "libcamera/internal/v4l2_videodevice.h" namespace libcamera { diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp b/src/libcamera/pipeline/ipu3/ipu3.cpp index 066fd4a2..e31e3879 100644 --- a/src/libcamera/pipeline/ipu3/ipu3.cpp +++ b/src/libcamera/pipeline/ipu3/ipu3.cpp @@ -6,7 +6,6 @@ */ #include <algorithm> -#include <iomanip> #include <memory> #include <queue> #include <vector> @@ -19,15 +18,17 @@ #include <libcamera/camera.h> #include <libcamera/control_ids.h> #include <libcamera/formats.h> -#include <libcamera/ipa/ipu3_ipa_interface.h> -#include <libcamera/ipa/ipu3_ipa_proxy.h> #include <libcamera/property_ids.h> #include <libcamera/request.h> #include <libcamera/stream.h> +#include <libcamera/ipa/ipu3_ipa_interface.h> +#include <libcamera/ipa/ipu3_ipa_proxy.h> + #include "libcamera/internal/camera.h" #include "libcamera/internal/camera_lens.h" #include "libcamera/internal/camera_sensor.h" +#include "libcamera/internal/camera_sensor_properties.h" #include "libcamera/internal/delayed_controls.h" #include "libcamera/internal/device_enumerator.h" #include "libcamera/internal/framebuffer.h" @@ -88,7 +89,7 @@ public: private: void metadataReady(unsigned int id, const ControlList &metadata); - void paramsBufferReady(unsigned int id); + void paramsComputed(unsigned int id); void setSensorControls(unsigned int id, const ControlList &sensorControls, const ControlList &lensControls); }; @@ -958,7 +959,7 @@ int PipelineHandlerIPU3::updateControls(IPU3CameraData *data) values.reserve(testPatternModes.size()); for (auto pattern : testPatternModes) - values.emplace_back(static_cast<int32_t>(pattern)); + values.emplace_back(pattern); controls[&controls::draft::TestPatternMode] = ControlInfo(values); } @@ -1077,14 +1078,10 @@ int PipelineHandlerIPU3::registerCameras() if (ret) continue; - /* - * \todo Read delay values from the sensor itself or from a - * a sensor database. For now use generic values taken from - * the Raspberry Pi and listed as 'generic values'. - */ + const CameraSensorProperties::SensorDelays &delays = cio2->sensor()->sensorDelays(); std::unordered_map<uint32_t, DelayedControls::ControlParams> params = { - { V4L2_CID_ANALOGUE_GAIN, { 1, false } }, - { V4L2_CID_EXPOSURE, { 2, false } }, + { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } }, + { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } }, }; data->delayedCtrls_ = @@ -1117,19 +1114,19 @@ int PipelineHandlerIPU3::registerCameras() * returned through the ImgU main and secondary outputs. */ data->cio2_.bufferReady().connect(data.get(), - &IPU3CameraData::cio2BufferReady); + &IPU3CameraData::cio2BufferReady); data->cio2_.bufferAvailable.connect( data.get(), &IPU3CameraData::queuePendingRequests); data->imgu_->input_->bufferReady.connect(&data->cio2_, - &CIO2Device::tryReturnBuffer); + &CIO2Device::tryReturnBuffer); data->imgu_->output_->bufferReady.connect(data.get(), - &IPU3CameraData::imguOutputBufferReady); + &IPU3CameraData::imguOutputBufferReady); data->imgu_->viewfinder_->bufferReady.connect(data.get(), - &IPU3CameraData::imguOutputBufferReady); + &IPU3CameraData::imguOutputBufferReady); data->imgu_->param_->bufferReady.connect(data.get(), - &IPU3CameraData::paramBufferReady); + &IPU3CameraData::paramBufferReady); data->imgu_->stat_->bufferReady.connect(data.get(), - &IPU3CameraData::statBufferReady); + &IPU3CameraData::statBufferReady); /* Create and register the Camera instance. */ const std::string &cameraId = cio2->sensor()->id(); @@ -1156,7 +1153,7 @@ int IPU3CameraData::loadIPA() return -ENOENT; ipa_->setSensorControls.connect(this, &IPU3CameraData::setSensorControls); - ipa_->paramsBufferReady.connect(this, &IPU3CameraData::paramsBufferReady); + ipa_->paramsComputed.connect(this, &IPU3CameraData::paramsComputed); ipa_->metadataReady.connect(this, &IPU3CameraData::metadataReady); /* @@ -1186,9 +1183,8 @@ int IPU3CameraData::loadIPA() * The API tuning file is made from the sensor name. If the tuning file * isn't found, fall back to the 'uncalibrated' file. */ - std::string ipaTuningFile = ipa_->configurationFile(sensor->model() + ".yaml"); - if (ipaTuningFile.empty()) - ipaTuningFile = ipa_->configurationFile("uncalibrated.yaml"); + std::string ipaTuningFile = + ipa_->configurationFile(sensor->model() + ".yaml", "uncalibrated.yaml"); ret = ipa_->init(IPASettings{ ipaTuningFile, sensor->model() }, sensorInfo, sensor->controls(), &ipaControls_); @@ -1218,7 +1214,7 @@ void IPU3CameraData::setSensorControls([[maybe_unused]] unsigned int id, focusLens->setFocusPosition(focusValue.get<int32_t>()); } -void IPU3CameraData::paramsBufferReady(unsigned int id) +void IPU3CameraData::paramsComputed(unsigned int id) { IPU3Frames::Info *info = frameInfos_.find(id); if (!info) @@ -1329,7 +1325,7 @@ void IPU3CameraData::cio2BufferReady(FrameBuffer *buffer) if (request->findBuffer(&rawStream_)) pipe()->completeBuffer(request, buffer); - ipa_->fillParamsBuffer(info->id, info->paramBuffer->cookie()); + ipa_->computeParams(info->id, info->paramBuffer->cookie()); } void IPU3CameraData::paramBufferReady(FrameBuffer *buffer) @@ -1373,8 +1369,8 @@ void IPU3CameraData::statBufferReady(FrameBuffer *buffer) return; } - ipa_->processStatsBuffer(info->id, request->metadata().get(controls::SensorTimestamp).value_or(0), - info->statBuffer->cookie(), info->effectiveSensorControls); + ipa_->processStats(info->id, request->metadata().get(controls::SensorTimestamp).value_or(0), + info->statBuffer->cookie(), info->effectiveSensorControls); } /* diff --git a/src/libcamera/pipeline/ipu3/meson.build b/src/libcamera/pipeline/ipu3/meson.build index a1b0b31a..f2904b4a 100644 --- a/src/libcamera/pipeline/ipu3/meson.build +++ b/src/libcamera/pipeline/ipu3/meson.build @@ -1,6 +1,6 @@ # SPDX-License-Identifier: CC0-1.0 -libcamera_sources += files([ +libcamera_internal_sources += files([ 'cio2.cpp', 'frames.cpp', 'imgu.cpp', diff --git a/src/libcamera/pipeline/mali-c55/mali-c55.cpp b/src/libcamera/pipeline/mali-c55/mali-c55.cpp index 45c71c1d..5abd6b20 100644 --- a/src/libcamera/pipeline/mali-c55/mali-c55.cpp +++ b/src/libcamera/pipeline/mali-c55/mali-c55.cpp @@ -12,6 +12,7 @@ #include <set> #include <string> +#include <linux/mali-c55-config.h> #include <linux/media-bus-format.h> #include <linux/media.h> @@ -20,14 +21,24 @@ #include <libcamera/camera.h> #include <libcamera/formats.h> #include <libcamera/geometry.h> +#include <libcamera/property_ids.h> #include <libcamera/stream.h> +#include <libcamera/ipa/core_ipa_interface.h> +#include <libcamera/ipa/mali-c55_ipa_interface.h> +#include <libcamera/ipa/mali-c55_ipa_proxy.h> + #include "libcamera/internal/bayer_format.h" #include "libcamera/internal/camera.h" #include "libcamera/internal/camera_sensor.h" +#include "libcamera/internal/camera_sensor_properties.h" +#include "libcamera/internal/delayed_controls.h" #include "libcamera/internal/device_enumerator.h" +#include "libcamera/internal/framebuffer.h" +#include "libcamera/internal/ipa_manager.h" #include "libcamera/internal/media_device.h" #include "libcamera/internal/pipeline_handler.h" +#include "libcamera/internal/request.h" #include "libcamera/internal/v4l2_subdevice.h" #include "libcamera/internal/v4l2_videodevice.h" @@ -57,32 +68,27 @@ const std::map<libcamera::PixelFormat, unsigned int> maliC55FmtToCode = { { formats::NV21, MEDIA_BUS_FMT_YUV10_1X30 }, /* RAW formats, FR pipe only. */ - { formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 }, - { formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 }, - { formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 }, - { formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 }, - { formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 }, - { formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 }, - { formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 }, - { formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 }, - { formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 }, - { formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 }, - { formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 }, - { formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 }, - { formats::SGBRG14, MEDIA_BUS_FMT_SGBRG14_1X14 }, - { formats::SRGGB14, MEDIA_BUS_FMT_SRGGB14_1X14 }, - { formats::SBGGR14, MEDIA_BUS_FMT_SBGGR14_1X14 }, - { formats::SGRBG14, MEDIA_BUS_FMT_SGRBG14_1X14 }, { formats::SGBRG16, MEDIA_BUS_FMT_SGBRG16_1X16 }, { formats::SRGGB16, MEDIA_BUS_FMT_SRGGB16_1X16 }, { formats::SBGGR16, MEDIA_BUS_FMT_SBGGR16_1X16 }, { formats::SGRBG16, MEDIA_BUS_FMT_SGRBG16_1X16 }, }; +constexpr Size kMaliC55MinInputSize = { 640, 480 }; constexpr Size kMaliC55MinSize = { 128, 128 }; constexpr Size kMaliC55MaxSize = { 8192, 8192 }; constexpr unsigned int kMaliC55ISPInternalFormat = MEDIA_BUS_FMT_RGB121212_1X36; +struct MaliC55FrameInfo { + Request *request; + + FrameBuffer *paramBuffer; + FrameBuffer *statBuffer; + + bool paramsDone; + bool statsDone; +}; + class MaliC55CameraData : public Camera::Private { public: @@ -92,13 +98,16 @@ public: } int init(); + int loadIPA(); /* Deflect these functionalities to either TPG or CameraSensor. */ - const std::vector<unsigned int> mbusCodes() const; const std::vector<Size> sizes(unsigned int mbusCode) const; const Size resolution() const; - PixelFormat bestRawFormat() const; + int pixfmtToMbusCode(const PixelFormat &pixFmt) const; + const PixelFormat &bestRawFormat() const; + + void updateControls(const ControlInfoMap &ipaControls); PixelFormat adjustRawFormat(const PixelFormat &pixFmt) const; Size adjustRawSizes(const PixelFormat &pixFmt, const Size &rawSize) const; @@ -111,8 +120,15 @@ public: Stream frStream_; Stream dsStream_; + std::unique_ptr<ipa::mali_c55::IPAProxyMaliC55> ipa_; + std::vector<IPABuffer> ipaStatBuffers_; + std::vector<IPABuffer> ipaParamBuffers_; + + std::unique_ptr<DelayedControls> delayedCtrls_; + private: void initTPGData(); + void setSensorControls(const ControlList &sensorControls); std::string id_; std::vector<unsigned int> tpgCodes_; @@ -141,9 +157,8 @@ int MaliC55CameraData::init() * Register a CameraSensor if we connect to a sensor and create * an entity for the connected CSI-2 receiver. */ - sensor_ = std::make_unique<CameraSensor>(entity_); - ret = sensor_->init(); - if (ret) + sensor_ = CameraSensorFactoryBase::create(entity_); + if (!sensor_) return ret; const MediaPad *sourcePad = entity_->getPadByIndex(0); @@ -177,12 +192,9 @@ void MaliC55CameraData::initTPGData() tpgResolution_ = tpgSizes_.back(); } -const std::vector<unsigned int> MaliC55CameraData::mbusCodes() const +void MaliC55CameraData::setSensorControls(const ControlList &sensorControls) { - if (sensor_) - return sensor_->mbusCodes(); - - return tpgCodes_; + delayedCtrls_->push(sensorControls); } const std::vector<Size> MaliC55CameraData::sizes(unsigned int mbusCode) const @@ -216,33 +228,102 @@ const Size MaliC55CameraData::resolution() const return tpgResolution_; } -PixelFormat MaliC55CameraData::bestRawFormat() const +/* + * The Mali C55 ISP can only produce 16-bit RAW output in bypass modes, but the + * sensors connected to it might produce 8/10/12/16 bits. We simply search the + * sensor's supported formats for the one with a matching bayer order and the + * greatest bitdepth. + */ +int MaliC55CameraData::pixfmtToMbusCode(const PixelFormat &pixFmt) const { + auto it = maliC55FmtToCode.find(pixFmt); + if (it == maliC55FmtToCode.end()) + return -EINVAL; + + BayerFormat bayerFormat = BayerFormat::fromMbusCode(it->second); + if (!bayerFormat.isValid()) + return -EINVAL; + + V4L2Subdevice::Formats formats = sd_->formats(0); + unsigned int sensorMbusCode = 0; unsigned int bitDepth = 0; - PixelFormat rawFormat; - /* - * Iterate over all the supported PixelFormat and find the one - * supported by the camera with the largest bitdepth. - */ - for (const auto &maliFormat : maliC55FmtToCode) { - PixelFormat pixFmt = maliFormat.first; - if (!isFormatRaw(pixFmt)) + for (const auto &[code, sizes] : formats) { + BayerFormat sdBayerFormat = BayerFormat::fromMbusCode(code); + if (!sdBayerFormat.isValid()) continue; - unsigned int rawCode = maliFormat.second; - const auto rawSizes = sizes(rawCode); - if (rawSizes.empty()) + if (sdBayerFormat.order != bayerFormat.order) continue; - BayerFormat bayer = BayerFormat::fromMbusCode(rawCode); - if (bayer.bitDepth > bitDepth) { - bitDepth = bayer.bitDepth; - rawFormat = pixFmt; + if (sdBayerFormat.bitDepth > bitDepth) { + bitDepth = sdBayerFormat.bitDepth; + sensorMbusCode = code; } } - return rawFormat; + if (!sensorMbusCode) + return -EINVAL; + + return sensorMbusCode; +} + +/* + * Find a RAW PixelFormat supported by both the ISP and the sensor. + * + * The situation is mildly complicated by the fact that we expect the sensor to + * output something like RAW8/10/12/16, but the ISP can only accept as input + * RAW20 and can only produce as output RAW16. The one constant in that is the + * bayer order of the data, so we'll simply check that the sensor produces a + * format with a bayer order that matches that of one of the formats we support, + * and select that. + */ +const PixelFormat &MaliC55CameraData::bestRawFormat() const +{ + static const PixelFormat invalidPixFmt = {}; + + for (const auto &fmt : sd_->formats(0)) { + BayerFormat sensorBayer = BayerFormat::fromMbusCode(fmt.first); + + if (!sensorBayer.isValid()) + continue; + + for (const auto &[pixFmt, rawCode] : maliC55FmtToCode) { + if (!isFormatRaw(pixFmt)) + continue; + + BayerFormat bayer = BayerFormat::fromMbusCode(rawCode); + if (bayer.order == sensorBayer.order) + return pixFmt; + } + } + + LOG(MaliC55, Error) << "Sensor doesn't provide a compatible format"; + return invalidPixFmt; +} + +void MaliC55CameraData::updateControls(const ControlInfoMap &ipaControls) +{ + if (!sensor_) + return; + + IPACameraSensorInfo sensorInfo; + int ret = sensor_->sensorInfo(&sensorInfo); + if (ret) { + LOG(MaliC55, Error) << "Failed to retrieve sensor info"; + return; + } + + ControlInfoMap::Map controls; + Rectangle ispMinCrop{ 0, 0, 640, 480 }; + controls[&controls::ScalerCrop] = + ControlInfo(ispMinCrop, sensorInfo.analogCrop, + sensorInfo.analogCrop); + + for (auto const &c : ipaControls) + controls.emplace(c.first, c.second); + + controlInfo_ = ControlInfoMap(std::move(controls), controls::controls); } /* @@ -251,13 +332,11 @@ PixelFormat MaliC55CameraData::bestRawFormat() const */ PixelFormat MaliC55CameraData::adjustRawFormat(const PixelFormat &rawFmt) const { - /* Make sure the provided raw format is supported by the pipeline. */ - auto it = maliC55FmtToCode.find(rawFmt); - if (it == maliC55FmtToCode.end()) + /* Make sure the RAW mbus code is supported by the image source. */ + int rawCode = pixfmtToMbusCode(rawFmt); + if (rawCode < 0) return bestRawFormat(); - /* Now make sure the RAW mbus code is supported by the image source. */ - unsigned int rawCode = it->second; const auto rawSizes = sizes(rawCode); if (rawSizes.empty()) return bestRawFormat(); @@ -265,15 +344,16 @@ PixelFormat MaliC55CameraData::adjustRawFormat(const PixelFormat &rawFmt) const return rawFmt; } -Size MaliC55CameraData::adjustRawSizes(const PixelFormat &rawFmt, const Size &rawSize) const +Size MaliC55CameraData::adjustRawSizes(const PixelFormat &rawFmt, const Size &size) const { - /* Just make sure the format is supported. */ - auto it = maliC55FmtToCode.find(rawFmt); - if (it == maliC55FmtToCode.end()) - return {}; + /* Expand the RAW size to the minimum ISP input size. */ + Size rawSize = size.expandedTo(kMaliC55MinInputSize); /* Check if the size is natively supported. */ - unsigned int rawCode = it->second; + int rawCode = pixfmtToMbusCode(rawFmt); + if (rawCode < 0) + return {}; + const auto rawSizes = sizes(rawCode); auto sizeIt = std::find(rawSizes.begin(), rawSizes.end(), rawSize); if (sizeIt != rawSizes.end()) @@ -282,20 +362,59 @@ Size MaliC55CameraData::adjustRawSizes(const PixelFormat &rawFmt, const Size &ra /* Or adjust it to the closest supported size. */ uint16_t distance = std::numeric_limits<uint16_t>::max(); Size bestSize; - for (const Size &size : rawSizes) { + for (const Size &sz : rawSizes) { uint16_t dist = std::abs(static_cast<int>(rawSize.width) - - static_cast<int>(size.width)) + + static_cast<int>(sz.width)) + std::abs(static_cast<int>(rawSize.height) - - static_cast<int>(size.height)); + static_cast<int>(sz.height)); if (dist < distance) { dist = distance; - bestSize = size; + bestSize = sz; } } return bestSize; } +int MaliC55CameraData::loadIPA() +{ + int ret; + + /* Do not initialize IPA for TPG. */ + if (!sensor_) + return 0; + + ipa_ = IPAManager::createIPA<ipa::mali_c55::IPAProxyMaliC55>(pipe(), 1, 1); + if (!ipa_) + return -ENOENT; + + ipa_->setSensorControls.connect(this, &MaliC55CameraData::setSensorControls); + + std::string ipaTuningFile = ipa_->configurationFile(sensor_->model() + ".yaml", + "uncalibrated.yaml"); + + /* We need to inform the IPA of the sensor configuration */ + ipa::mali_c55::IPAConfigInfo ipaConfig{}; + + ret = sensor_->sensorInfo(&ipaConfig.sensorInfo); + if (ret) + return ret; + + ipaConfig.sensorControls = sensor_->controls(); + + ControlInfoMap ipaControls; + ret = ipa_->init({ ipaTuningFile, sensor_->model() }, ipaConfig, + &ipaControls); + if (ret) { + LOG(MaliC55, Error) << "Failed to initialise the Mali-C55 IPA"; + return ret; + } + + updateControls(ipaControls); + + return 0; +} + class MaliC55CameraConfiguration : public CameraConfiguration { public: @@ -305,6 +424,7 @@ public: } Status validate() override; + const Transform &combinedTransform() { return combinedTransform_; } V4L2SubdeviceFormat sensorFormat_; @@ -312,6 +432,7 @@ private: static constexpr unsigned int kMaxStreams = 2; const MaliC55CameraData *data_; + Transform combinedTransform_; }; CameraConfiguration::Status MaliC55CameraConfiguration::validate() @@ -321,6 +442,19 @@ CameraConfiguration::Status MaliC55CameraConfiguration::validate() if (config_.empty()) return Invalid; + /* + * The TPG doesn't support flips, so we only need to calculate a + * transform if we have a sensor. + */ + if (data_->sensor_) { + Orientation requestedOrientation = orientation; + combinedTransform_ = data_->sensor_->computeTransform(&orientation); + if (orientation != requestedOrientation) + status = Adjusted; + } else { + combinedTransform_ = Transform::Rot0; + } + /* Only 2 streams available. */ if (config_.size() > kMaxStreams) { config_.resize(kMaxStreams); @@ -342,7 +476,11 @@ CameraConfiguration::Status MaliC55CameraConfiguration::validate() rawConfig = &config; } - Size maxSize = kMaliC55MaxSize; + /* + * The C55 can not upscale. Limit the configuration to the ISP + * capabilities and the sensor resolution. + */ + Size maxSize = kMaliC55MaxSize.boundedTo(data_->resolution()); if (rawConfig) { /* * \todo Take into account the Bayer components ordering once @@ -350,6 +488,10 @@ CameraConfiguration::Status MaliC55CameraConfiguration::validate() */ PixelFormat rawFormat = data_->adjustRawFormat(rawConfig->pixelFormat); + + if (!rawFormat.isValid()) + return Invalid; + if (rawFormat != rawConfig->pixelFormat) { LOG(MaliC55, Debug) << "RAW format adjusted to " << rawFormat; @@ -368,12 +510,21 @@ CameraConfiguration::Status MaliC55CameraConfiguration::validate() maxSize = rawSize; + const PixelFormatInfo &info = PixelFormatInfo::info(rawConfig->pixelFormat); + rawConfig->stride = info.stride(rawConfig->size.width, 0, 4); + rawConfig->frameSize = info.frameSize(rawConfig->size, 4); + rawConfig->setStream(const_cast<Stream *>(&data_->frStream_)); frPipeAvailable = false; } - /* Adjust processed streams. */ - Size maxYuvSize; + /* + * Adjust processed streams. + * + * Compute the minimum sensor size to be later used to select the + * sensor configuration. + */ + Size minSensorSize = kMaliC55MinInputSize; for (StreamConfiguration &config : config_) { if (isFormatRaw(config.pixelFormat)) continue; @@ -395,8 +546,8 @@ CameraConfiguration::Status MaliC55CameraConfiguration::validate() status = Adjusted; } - if (maxYuvSize < size) - maxYuvSize = size; + if (minSensorSize < size) + minSensorSize = size; if (frPipeAvailable) { config.setStream(const_cast<Stream *>(&data_->frStream_)); @@ -410,30 +561,30 @@ CameraConfiguration::Status MaliC55CameraConfiguration::validate() /* If there's a RAW config, sensor configuration follows it. */ if (rawConfig) { - const auto it = maliC55FmtToCode.find(rawConfig->pixelFormat); - sensorFormat_.code = it->second; - sensorFormat_.size = rawConfig->size; + sensorFormat_.code = data_->pixfmtToMbusCode(rawConfig->pixelFormat); + sensorFormat_.size = rawConfig->size.expandedTo(minSensorSize); return status; } /* If there's no RAW config, compute the sensor configuration here. */ PixelFormat rawFormat = data_->bestRawFormat(); - const auto it = maliC55FmtToCode.find(rawFormat); - sensorFormat_.code = it->second; + if (!rawFormat.isValid()) + return Invalid; + + sensorFormat_.code = data_->pixfmtToMbusCode(rawFormat); uint16_t distance = std::numeric_limits<uint16_t>::max(); - const auto sizes = data_->sizes(it->second); + const auto sizes = data_->sizes(sensorFormat_.code); Size bestSize; for (const auto &size : sizes) { - /* Skip sensor sizes that are smaller than the max YUV size. */ - if (maxYuvSize.width > size.width || - maxYuvSize.height > size.height) + if (minSensorSize.width > size.width || + minSensorSize.height > size.height) continue; - uint16_t dist = std::abs(static_cast<int>(maxYuvSize.width) - + uint16_t dist = std::abs(static_cast<int>(minSensorSize.width) - static_cast<int>(size.width)) + - std::abs(static_cast<int>(maxYuvSize.height) - + std::abs(static_cast<int>(minSensorSize.height) - static_cast<int>(size.height)); if (dist < distance) { dist = distance; @@ -458,13 +609,19 @@ public: int exportFrameBuffers(Camera *camera, Stream *stream, std::vector<std::unique_ptr<FrameBuffer>> *buffers) override; + int allocateBuffers(Camera *camera); + void freeBuffers(Camera *camera); int start(Camera *camera, const ControlList *controls) override; void stopDevice(Camera *camera) override; int queueRequestDevice(Camera *camera, Request *request) override; - void bufferReady(FrameBuffer *buffer); + void imageBufferReady(FrameBuffer *buffer); + void paramsBufferReady(FrameBuffer *buffer); + void statsBufferReady(FrameBuffer *buffer); + void paramsComputed(unsigned int requestId); + void statsProcessed(unsigned int requestId, const ControlList &metadata); bool match(DeviceEnumerator *enumerator) override; @@ -472,6 +629,7 @@ private: struct MaliC55Pipe { std::unique_ptr<V4L2Subdevice> resizer; std::unique_ptr<V4L2VideoDevice> cap; + MediaLink *link; Stream *stream; }; @@ -508,6 +666,10 @@ private: pipe.stream = nullptr; } + MaliC55FrameInfo *findFrameInfo(FrameBuffer *buffer); + MaliC55FrameInfo *findFrameInfo(Request *request); + void tryComplete(MaliC55FrameInfo *info); + int configureRawStream(MaliC55CameraData *data, const StreamConfiguration &config, V4L2SubdeviceFormat &subdevFormat); @@ -515,13 +677,25 @@ private: const StreamConfiguration &config, V4L2SubdeviceFormat &subdevFormat); - void registerMaliCamera(std::unique_ptr<MaliC55CameraData> data, + void applyScalerCrop(Camera *camera, const ControlList &controls); + + bool registerMaliCamera(std::unique_ptr<MaliC55CameraData> data, const std::string &name); bool registerTPGCamera(MediaLink *link); bool registerSensorCamera(MediaLink *link); MediaDevice *media_; std::unique_ptr<V4L2Subdevice> isp_; + std::unique_ptr<V4L2VideoDevice> stats_; + std::unique_ptr<V4L2VideoDevice> params_; + + std::vector<std::unique_ptr<FrameBuffer>> statsBuffers_; + std::queue<FrameBuffer *> availableStatsBuffers_; + + std::vector<std::unique_ptr<FrameBuffer>> paramsBuffers_; + std::queue<FrameBuffer *> availableParamsBuffers_; + + std::map<unsigned int, MaliC55FrameInfo> frameInfoMap_; std::array<MaliC55Pipe, MaliC55NumPipes> pipes_; @@ -607,7 +781,10 @@ PipelineHandlerMaliC55::generateConfiguration(Camera *camera, if (isRaw) { /* Make sure the mbus code is supported. */ - unsigned int rawCode = maliFormat.second; + int rawCode = data->pixfmtToMbusCode(pixFmt); + if (rawCode < 0) + continue; + const auto sizes = data->sizes(rawCode); if (sizes.empty()) continue; @@ -715,16 +892,28 @@ int PipelineHandlerMaliC55::configureProcessedStream(MaliC55CameraData *data, if (ret) return ret; - /* \todo Configure the resizer crop/compose rectangles. */ - Rectangle ispCrop = { 0, 0, config.size }; + /* + * Compute the scaler-in to scaler-out ratio: first center-crop to align + * the FOV to the desired resolution, then scale to the desired size. + */ + Size scalerIn = subdevFormat.size.boundedToAspectRatio(config.size); + int xCrop = (subdevFormat.size.width - scalerIn.width) / 2; + int yCrop = (subdevFormat.size.height - scalerIn.height) / 2; + Rectangle ispCrop = { xCrop, yCrop, scalerIn }; ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_CROP, &ispCrop); if (ret) return ret; - ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_COMPOSE, &ispCrop); + Rectangle ispCompose = { 0, 0, config.size }; + ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_COMPOSE, &ispCompose); if (ret) return ret; + /* + * The source pad format size comes directly from the sink + * compose rectangle. + */ + subdevFormat.size = ispCompose.size(); subdevFormat.code = maliC55FmtToCode.find(config.pixelFormat)->second; return pipe->resizer->setFormat(1, &subdevFormat); } @@ -756,16 +945,33 @@ int PipelineHandlerMaliC55::configure(Camera *camera, if (ret) return ret; + if (data->sensor_) { + ret = data->sensor_->setFormat(&subdevFormat, + maliConfig->combinedTransform()); + if (ret) + return ret; + } + if (data->csi_) { ret = data->csi_->setFormat(0, &subdevFormat); if (ret) return ret; - ret = data->csi_->setFormat(1, &subdevFormat); + ret = data->csi_->getFormat(1, &subdevFormat); if (ret) return ret; } + V4L2DeviceFormat statsFormat; + ret = stats_->getFormat(&statsFormat); + if (ret) + return ret; + + if (statsFormat.planes[0].size != sizeof(struct mali_c55_stats_buffer)) { + LOG(MaliC55, Error) << "3a stats buffer size invalid"; + return -EINVAL; + } + /* * Propagate the format to the ISP sink pad and configure the input * crop rectangle (no crop at the moment). @@ -795,6 +1001,17 @@ int PipelineHandlerMaliC55::configure(Camera *camera, Stream *stream = streamConfig.stream(); MaliC55Pipe *pipe = pipeFromStream(data, stream); + /* + * Enable the media link between the pipe's resizer and the + * capture video device + */ + + ret = pipe->link->setEnabled(true); + if (ret) { + LOG(MaliC55, Error) << "Couldn't enable resizer's link"; + return ret; + } + if (isFormatRaw(streamConfig.pixelFormat)) ret = configureRawStream(data, streamConfig, subdevFormat); else @@ -816,6 +1033,56 @@ int PipelineHandlerMaliC55::configure(Camera *camera, pipe->stream = stream; } + if (!data->ipa_) + return 0; + + /* + * Enable the media link between the ISP subdevice and the statistics + * video device. + */ + const MediaEntity *ispEntity = isp_->entity(); + ret = ispEntity->getPadByIndex(3)->links()[0]->setEnabled(true); + if (ret) { + LOG(MaliC55, Error) << "Couldn't enable statistics link"; + return ret; + } + + /* + * Enable the media link between the ISP subdevice and the parameters + * video device. + */ + ret = ispEntity->getPadByIndex(4)->links()[0]->setEnabled(true); + if (ret) { + LOG(MaliC55, Error) << "Couldn't enable parameters link"; + return ret; + } + + /* We need to inform the IPA of the sensor configuration */ + ipa::mali_c55::IPAConfigInfo ipaConfig{}; + + ret = data->sensor_->sensorInfo(&ipaConfig.sensorInfo); + if (ret) + return ret; + + ipaConfig.sensorControls = data->sensor_->controls(); + + /* + * And we also need to tell the IPA the bayerOrder of the data (as + * affected by any flips that we've configured) + */ + const Transform &combinedTransform = maliConfig->combinedTransform(); + BayerFormat::Order bayerOrder = data->sensor_->bayerOrder(combinedTransform); + + ControlInfoMap ipaControls; + ret = data->ipa_->configure(ipaConfig, utils::to_underlying(bayerOrder), + &ipaControls); + if (ret) { + LOG(MaliC55, Error) << "Failed to configure IPA"; + return ret; + } + + data->updateControls(ipaControls); + return 0; } @@ -828,32 +1095,166 @@ int PipelineHandlerMaliC55::exportFrameBuffers(Camera *camera, Stream *stream, return pipe->cap->exportBuffers(count, buffers); } -int PipelineHandlerMaliC55::start([[maybe_unused]] Camera *camera, [[maybe_unused]] const ControlList *controls) +void PipelineHandlerMaliC55::freeBuffers(Camera *camera) { + MaliC55CameraData *data = cameraData(camera); + + while (!availableStatsBuffers_.empty()) + availableStatsBuffers_.pop(); + while (!availableParamsBuffers_.empty()) + availableParamsBuffers_.pop(); + + statsBuffers_.clear(); + paramsBuffers_.clear(); + + if (data->ipa_) { + data->ipa_->unmapBuffers(data->ipaStatBuffers_); + data->ipa_->unmapBuffers(data->ipaParamBuffers_); + } + data->ipaStatBuffers_.clear(); + data->ipaParamBuffers_.clear(); + + if (stats_->releaseBuffers()) + LOG(MaliC55, Error) << "Failed to release stats buffers"; + + if (params_->releaseBuffers()) + LOG(MaliC55, Error) << "Failed to release params buffers"; + + return; +} + +int PipelineHandlerMaliC55::allocateBuffers(Camera *camera) +{ + MaliC55CameraData *data = cameraData(camera); + unsigned int ipaBufferId = 1; + unsigned int bufferCount; + int ret; + + bufferCount = std::max({ + data->frStream_.configuration().bufferCount, + data->dsStream_.configuration().bufferCount, + }); + + ret = stats_->allocateBuffers(bufferCount, &statsBuffers_); + if (ret < 0) + return ret; + + for (std::unique_ptr<FrameBuffer> &buffer : statsBuffers_) { + buffer->setCookie(ipaBufferId++); + data->ipaStatBuffers_.emplace_back(buffer->cookie(), + buffer->planes()); + availableStatsBuffers_.push(buffer.get()); + } + + ret = params_->allocateBuffers(bufferCount, ¶msBuffers_); + if (ret < 0) + return ret; + + for (std::unique_ptr<FrameBuffer> &buffer : paramsBuffers_) { + buffer->setCookie(ipaBufferId++); + data->ipaParamBuffers_.emplace_back(buffer->cookie(), + buffer->planes()); + availableParamsBuffers_.push(buffer.get()); + } + + if (data->ipa_) { + data->ipa_->mapBuffers(data->ipaStatBuffers_, true); + data->ipa_->mapBuffers(data->ipaParamBuffers_, false); + } + + return 0; +} + +int PipelineHandlerMaliC55::start(Camera *camera, [[maybe_unused]] const ControlList *controls) +{ + MaliC55CameraData *data = cameraData(camera); + int ret; + + ret = allocateBuffers(camera); + if (ret) + return ret; + + if (data->ipa_) { + ret = data->ipa_->start(); + if (ret) { + LOG(MaliC55, Error) + << "Failed to start IPA" << camera->id(); + freeBuffers(camera); + return ret; + } + } + for (MaliC55Pipe &pipe : pipes_) { if (!pipe.stream) continue; Stream *stream = pipe.stream; - int ret = pipe.cap->importBuffers(stream->configuration().bufferCount); + ret = pipe.cap->importBuffers(stream->configuration().bufferCount); if (ret) { LOG(MaliC55, Error) << "Failed to import buffers"; + if (data->ipa_) + data->ipa_->stop(); + freeBuffers(camera); return ret; } ret = pipe.cap->streamOn(); if (ret) { LOG(MaliC55, Error) << "Failed to start stream"; + if (data->ipa_) + data->ipa_->stop(); + freeBuffers(camera); return ret; } } + ret = stats_->streamOn(); + if (ret) { + LOG(MaliC55, Error) << "Failed to start stats stream"; + + if (data->ipa_) + data->ipa_->stop(); + + for (MaliC55Pipe &pipe : pipes_) { + if (pipe.stream) + pipe.cap->streamOff(); + } + + freeBuffers(camera); + return ret; + } + + ret = params_->streamOn(); + if (ret) { + LOG(MaliC55, Error) << "Failed to start params stream"; + + stats_->streamOff(); + if (data->ipa_) + data->ipa_->stop(); + + for (MaliC55Pipe &pipe : pipes_) { + if (pipe.stream) + pipe.cap->streamOff(); + } + + freeBuffers(camera); + return ret; + } + + ret = isp_->setFrameStartEnabled(true); + if (ret) + LOG(MaliC55, Error) << "Failed to enable frame start events"; + return 0; } -void PipelineHandlerMaliC55::stopDevice([[maybe_unused]] Camera *camera) +void PipelineHandlerMaliC55::stopDevice(Camera *camera) { + MaliC55CameraData *data = cameraData(camera); + + isp_->setFrameStartEnabled(false); + for (MaliC55Pipe &pipe : pipes_) { if (!pipe.stream) continue; @@ -861,38 +1262,285 @@ void PipelineHandlerMaliC55::stopDevice([[maybe_unused]] Camera *camera) pipe.cap->streamOff(); pipe.cap->releaseBuffers(); } + + stats_->streamOff(); + params_->streamOff(); + if (data->ipa_) + data->ipa_->stop(); + freeBuffers(camera); +} + +void PipelineHandlerMaliC55::applyScalerCrop(Camera *camera, + const ControlList &controls) +{ + MaliC55CameraData *data = cameraData(camera); + + const auto &scalerCrop = controls.get<Rectangle>(controls::ScalerCrop); + if (!scalerCrop) + return; + + if (!data->sensor_) { + LOG(MaliC55, Error) << "ScalerCrop not supported for TPG"; + return; + } + + Rectangle nativeCrop = *scalerCrop; + + IPACameraSensorInfo sensorInfo; + int ret = data->sensor_->sensorInfo(&sensorInfo); + if (ret) { + LOG(MaliC55, Error) << "Failed to retrieve sensor info"; + return; + } + + /* + * The ScalerCrop rectangle re-scaling in the ISP crop rectangle + * comes straight from the RPi pipeline handler. + * + * Create a version of the crop rectangle aligned to the analogue crop + * rectangle top-left coordinates and scaled in the [analogue crop to + * output frame] ratio to take into account binning/skipping on the + * sensor. + */ + Rectangle ispCrop = nativeCrop.translatedBy(-sensorInfo.analogCrop + .topLeft()); + ispCrop.scaleBy(sensorInfo.outputSize, sensorInfo.analogCrop.size()); + + /* + * The crop rectangle should be: + * 1. At least as big as ispMinCropSize_, once that's been + * enlarged to the same aspect ratio. + * 2. With the same mid-point, if possible. + * 3. But it can't go outside the sensor area. + */ + Rectangle ispMinCrop{ 0, 0, 640, 480 }; + Size minSize = ispMinCrop.size().expandedToAspectRatio(nativeCrop.size()); + Size size = ispCrop.size().expandedTo(minSize); + ispCrop = size.centeredTo(ispCrop.center()) + .enclosedIn(Rectangle(sensorInfo.outputSize)); + + /* + * As the resizer can't upscale, the crop rectangle has to be larger + * than the larger stream output size. + */ + Size maxYuvSize; + for (MaliC55Pipe &pipe : pipes_) { + if (!pipe.stream) + continue; + + const StreamConfiguration &config = pipe.stream->configuration(); + if (isFormatRaw(config.pixelFormat)) { + LOG(MaliC55, Debug) << "Cannot crop with a RAW stream"; + return; + } + + Size streamSize = config.size; + if (streamSize.width > maxYuvSize.width) + maxYuvSize.width = streamSize.width; + if (streamSize.height > maxYuvSize.height) + maxYuvSize.height = streamSize.height; + } + + ispCrop.size().expandTo(maxYuvSize); + + /* + * Now apply the scaler crop to each enabled output. This overrides the + * crop configuration performed at configure() time and can cause + * square pixels if the crop rectangle and scaler output FOV ratio are + * different. + */ + for (MaliC55Pipe &pipe : pipes_) { + if (!pipe.stream) + continue; + + /* Create a copy to avoid setSelection() to modify ispCrop. */ + Rectangle pipeCrop = ispCrop; + ret = pipe.resizer->setSelection(0, V4L2_SEL_TGT_CROP, &pipeCrop); + if (ret) { + LOG(MaliC55, Error) + << "Failed to apply crop to " + << (pipe.stream == &data->frStream_ ? + "FR" : "DS") << " pipe"; + return; + } + } } int PipelineHandlerMaliC55::queueRequestDevice(Camera *camera, Request *request) { - int ret; + MaliC55CameraData *data = cameraData(camera); - for (auto &[stream, buffer] : request->buffers()) { - MaliC55Pipe *pipe = pipeFromStream(cameraData(camera), stream); + /* Do not run the IPA if the TPG is in use. */ + if (!data->ipa_) { + MaliC55FrameInfo frameInfo; + frameInfo.request = request; + frameInfo.statBuffer = nullptr; + frameInfo.paramBuffer = nullptr; + frameInfo.paramsDone = true; + frameInfo.statsDone = true; - ret = pipe->cap->queueBuffer(buffer); - if (ret) - return ret; + frameInfoMap_[request->sequence()] = frameInfo; + + for (auto &[stream, buffer] : request->buffers()) { + MaliC55Pipe *pipe = pipeFromStream(data, stream); + + pipe->cap->queueBuffer(buffer); + } + + return 0; + } + + if (availableStatsBuffers_.empty()) { + LOG(MaliC55, Error) << "Stats buffer underrun"; + return -ENOENT; + } + + if (availableParamsBuffers_.empty()) { + LOG(MaliC55, Error) << "Params buffer underrun"; + return -ENOENT; } + MaliC55FrameInfo frameInfo; + frameInfo.request = request; + + frameInfo.statBuffer = availableStatsBuffers_.front(); + availableStatsBuffers_.pop(); + frameInfo.paramBuffer = availableParamsBuffers_.front(); + availableParamsBuffers_.pop(); + + frameInfo.paramsDone = false; + frameInfo.statsDone = false; + + frameInfoMap_[request->sequence()] = frameInfo; + + data->ipa_->queueRequest(request->sequence(), request->controls()); + data->ipa_->fillParams(request->sequence(), + frameInfo.paramBuffer->cookie()); + return 0; } -void PipelineHandlerMaliC55::bufferReady(FrameBuffer *buffer) +MaliC55FrameInfo *PipelineHandlerMaliC55::findFrameInfo(Request *request) { - Request *request = buffer->request(); + for (auto &[sequence, info] : frameInfoMap_) { + if (info.request == request) + return &info; + } + + return nullptr; +} - completeBuffer(request, buffer); +MaliC55FrameInfo *PipelineHandlerMaliC55::findFrameInfo(FrameBuffer *buffer) +{ + for (auto &[sequence, info] : frameInfoMap_) { + if (info.paramBuffer == buffer || + info.statBuffer == buffer) + return &info; + } + return nullptr; +} + +void PipelineHandlerMaliC55::tryComplete(MaliC55FrameInfo *info) +{ + if (!info->paramsDone) + return; + if (!info->statsDone) + return; + + Request *request = info->request; if (request->hasPendingBuffers()) return; + if (info->statBuffer) + availableStatsBuffers_.push(info->statBuffer); + if (info->paramBuffer) + availableParamsBuffers_.push(info->paramBuffer); + + frameInfoMap_.erase(request->sequence()); + completeRequest(request); } -void PipelineHandlerMaliC55::registerMaliCamera(std::unique_ptr<MaliC55CameraData> data, +void PipelineHandlerMaliC55::imageBufferReady(FrameBuffer *buffer) +{ + Request *request = buffer->request(); + MaliC55FrameInfo *info = findFrameInfo(request); + ASSERT(info); + + if (completeBuffer(request, buffer)) + tryComplete(info); +} + +void PipelineHandlerMaliC55::paramsBufferReady(FrameBuffer *buffer) +{ + MaliC55FrameInfo *info = findFrameInfo(buffer); + ASSERT(info); + + info->paramsDone = true; + + tryComplete(info); +} + +void PipelineHandlerMaliC55::statsBufferReady(FrameBuffer *buffer) +{ + MaliC55FrameInfo *info = findFrameInfo(buffer); + ASSERT(info); + + Request *request = info->request; + MaliC55CameraData *data = cameraData(request->_d()->camera()); + + ControlList sensorControls = data->delayedCtrls_->get(buffer->metadata().sequence); + + data->ipa_->processStats(request->sequence(), buffer->cookie(), + sensorControls); +} + +void PipelineHandlerMaliC55::paramsComputed(unsigned int requestId) +{ + MaliC55FrameInfo &frameInfo = frameInfoMap_[requestId]; + Request *request = frameInfo.request; + MaliC55CameraData *data = cameraData(request->_d()->camera()); + + /* + * Queue buffers for stats and params, then queue buffers to the capture + * video devices. + */ + + frameInfo.paramBuffer->_d()->metadata().planes()[0].bytesused = + sizeof(struct mali_c55_params_buffer); + params_->queueBuffer(frameInfo.paramBuffer); + stats_->queueBuffer(frameInfo.statBuffer); + + for (auto &[stream, buffer] : request->buffers()) { + MaliC55Pipe *pipe = pipeFromStream(data, stream); + + pipe->cap->queueBuffer(buffer); + } +} + +void PipelineHandlerMaliC55::statsProcessed(unsigned int requestId, + const ControlList &metadata) +{ + MaliC55FrameInfo &frameInfo = frameInfoMap_[requestId]; + + frameInfo.statsDone = true; + frameInfo.request->metadata().merge(metadata); + + tryComplete(&frameInfo); +} + +bool PipelineHandlerMaliC55::registerMaliCamera(std::unique_ptr<MaliC55CameraData> data, const std::string &name) { + if (data->loadIPA()) + return false; + + if (data->ipa_) { + data->ipa_->statsProcessed.connect(this, &PipelineHandlerMaliC55::statsProcessed); + data->ipa_->paramsComputed.connect(this, &PipelineHandlerMaliC55::paramsComputed); + } + std::set<Stream *> streams{ &data->frStream_ }; if (dsFitted_) streams.insert(&data->dsStream_); @@ -900,6 +1548,8 @@ void PipelineHandlerMaliC55::registerMaliCamera(std::unique_ptr<MaliC55CameraDat std::shared_ptr<Camera> camera = Camera::create(std::move(data), name, streams); registerCamera(std::move(camera)); + + return true; } /* @@ -925,9 +1575,7 @@ bool PipelineHandlerMaliC55::registerTPGCamera(MediaLink *link) if (data->init()) return false; - registerMaliCamera(std::move(data), name); - - return true; + return registerMaliCamera(std::move(data), name); } /* @@ -953,9 +1601,24 @@ bool PipelineHandlerMaliC55::registerSensorCamera(MediaLink *ispLink) if (data->init()) return false; - /* \todo: Init properties and controls. */ + data->properties_ = data->sensor_->properties(); - registerMaliCamera(std::move(data), sensor->name()); + const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays(); + std::unordered_map<uint32_t, DelayedControls::ControlParams> params = { + { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } }, + { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } }, + }; + + data->delayedCtrls_ = + std::make_unique<DelayedControls>(data->sensor_->device(), + params); + isp_->frameStart.connect(data->delayedCtrls_.get(), + &DelayedControls::applyControls); + + /* \todo: Init properties. */ + + if (!registerMaliCamera(std::move(data), sensor->name())) + return false; } return true; @@ -966,7 +1629,7 @@ bool PipelineHandlerMaliC55::match(DeviceEnumerator *enumerator) const MediaPad *ispSink; /* - * We search for just the ISP subdevice and the full resolution pipe. + * We search for just the always-available elements of the media graph. * The TPG and the downscale pipe are both optional blocks and may not * be fitted. */ @@ -974,6 +1637,8 @@ bool PipelineHandlerMaliC55::match(DeviceEnumerator *enumerator) dm.add("mali-c55 isp"); dm.add("mali-c55 resizer fr"); dm.add("mali-c55 fr"); + dm.add("mali-c55 3a stats"); + dm.add("mali-c55 3a params"); media_ = acquireMediaDevice(enumerator, dm); if (!media_) @@ -983,6 +1648,14 @@ bool PipelineHandlerMaliC55::match(DeviceEnumerator *enumerator) if (isp_->open() < 0) return false; + stats_ = V4L2VideoDevice::fromEntityName(media_, "mali-c55 3a stats"); + if (stats_->open() < 0) + return false; + + params_ = V4L2VideoDevice::fromEntityName(media_, "mali-c55 3a params"); + if (params_->open() < 0) + return false; + MaliC55Pipe *frPipe = &pipes_[MaliC55FR]; frPipe->resizer = V4L2Subdevice::fromEntityName(media_, "mali-c55 resizer fr"); if (frPipe->resizer->open() < 0) @@ -992,7 +1665,13 @@ bool PipelineHandlerMaliC55::match(DeviceEnumerator *enumerator) if (frPipe->cap->open() < 0) return false; - frPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::bufferReady); + frPipe->link = media_->link("mali-c55 resizer fr", 1, "mali-c55 fr", 0); + if (!frPipe->link) { + LOG(MaliC55, Error) << "No link between fr resizer and video node"; + return false; + } + + frPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::imageBufferReady); dsFitted_ = !!media_->getEntityByName("mali-c55 ds"); if (dsFitted_) { @@ -1008,9 +1687,19 @@ bool PipelineHandlerMaliC55::match(DeviceEnumerator *enumerator) if (dsPipe->cap->open() < 0) return false; - dsPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::bufferReady); + dsPipe->link = media_->link("mali-c55 resizer ds", 1, + "mali-c55 ds", 0); + if (!dsPipe->link) { + LOG(MaliC55, Error) << "No link between ds resizer and video node"; + return false; + } + + dsPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::imageBufferReady); } + stats_->bufferReady.connect(this, &PipelineHandlerMaliC55::statsBufferReady); + params_->bufferReady.connect(this, &PipelineHandlerMaliC55::paramsBufferReady); + ispSink = isp_->entity()->getPadByIndex(0); if (!ispSink || ispSink->links().empty()) { LOG(MaliC55, Error) << "ISP sink pad error"; diff --git a/src/libcamera/pipeline/mali-c55/meson.build b/src/libcamera/pipeline/mali-c55/meson.build index 30fd29b9..eba8e5a3 100644 --- a/src/libcamera/pipeline/mali-c55/meson.build +++ b/src/libcamera/pipeline/mali-c55/meson.build @@ -1,5 +1,5 @@ # SPDX-License-Identifier: CC0-1.0 -libcamera_sources += files([ +libcamera_internal_sources += files([ 'mali-c55.cpp' ]) diff --git a/src/libcamera/pipeline/rkisp1/meson.build b/src/libcamera/pipeline/rkisp1/meson.build index cad66535..d21a6ef9 100644 --- a/src/libcamera/pipeline/rkisp1/meson.build +++ b/src/libcamera/pipeline/rkisp1/meson.build @@ -1,6 +1,6 @@ # SPDX-License-Identifier: CC0-1.0 -libcamera_sources += files([ +libcamera_internal_sources += files([ 'rkisp1.cpp', 'rkisp1_path.cpp', ]) diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp b/src/libcamera/pipeline/rkisp1/rkisp1.cpp index 4cbf105d..1ac8d8ae 100644 --- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp +++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp @@ -6,11 +6,12 @@ */ #include <algorithm> -#include <array> -#include <iomanip> +#include <map> #include <memory> #include <numeric> +#include <optional> #include <queue> +#include <vector> #include <linux/media-bus-format.h> #include <linux/rkisp1-config.h> @@ -23,6 +24,7 @@ #include <libcamera/control_ids.h> #include <libcamera/formats.h> #include <libcamera/framebuffer.h> +#include <libcamera/property_ids.h> #include <libcamera/request.h> #include <libcamera/stream.h> #include <libcamera/transform.h> @@ -33,6 +35,8 @@ #include "libcamera/internal/camera.h" #include "libcamera/internal/camera_sensor.h" +#include "libcamera/internal/camera_sensor_properties.h" +#include "libcamera/internal/converter/converter_v4l2_m2m.h" #include "libcamera/internal/delayed_controls.h" #include "libcamera/internal/device_enumerator.h" #include "libcamera/internal/framebuffer.h" @@ -94,6 +98,7 @@ public: } PipelineHandlerRkISP1 *pipe(); + const PipelineHandlerRkISP1 *pipe() const; int loadIPA(unsigned int hwRevision); Stream mainPathStream_; @@ -109,8 +114,10 @@ public: std::unique_ptr<ipa::rkisp1::IPAProxyRkISP1> ipa_; + ControlInfoMap ipaControls_; + private: - void paramFilled(unsigned int frame); + void paramsComputed(unsigned int frame, unsigned int bytesused); void setSensorControls(unsigned int frame, const ControlList &sensorControls); @@ -170,20 +177,24 @@ private: } friend RkISP1CameraData; + friend RkISP1CameraConfiguration; friend RkISP1Frames; int initLinks(Camera *camera, const CameraSensor *sensor, const RkISP1CameraConfiguration &config); int createCamera(MediaEntity *sensor); void tryCompleteRequest(RkISP1FrameInfo *info); - void bufferReady(FrameBuffer *buffer); - void paramReady(FrameBuffer *buffer); - void statReady(FrameBuffer *buffer); + void imageBufferReady(FrameBuffer *buffer); + void paramBufferReady(FrameBuffer *buffer); + void statBufferReady(FrameBuffer *buffer); + void dewarpBufferReady(FrameBuffer *buffer); void frameStart(uint32_t sequence); int allocateBuffers(Camera *camera); int freeBuffers(Camera *camera); + int updateControls(RkISP1CameraData *data); + MediaDevice *media_; std::unique_ptr<V4L2Subdevice> isp_; std::unique_ptr<V4L2VideoDevice> param_; @@ -196,6 +207,16 @@ private: RkISP1MainPath mainPath_; RkISP1SelfPath selfPath_; + std::unique_ptr<V4L2M2MConverter> dewarper_; + Rectangle scalerMaxCrop_; + bool useDewarper_; + + std::optional<Rectangle> activeCrop_; + + /* Internal buffers used when dewarper is being used */ + std::vector<std::unique_ptr<FrameBuffer>> mainPathBuffers_; + std::queue<FrameBuffer *> availableMainPathBuffers_; + std::vector<std::unique_ptr<FrameBuffer>> paramBuffers_; std::vector<std::unique_ptr<FrameBuffer>> statBuffers_; std::queue<FrameBuffer *> availableParamBuffers_; @@ -218,6 +239,8 @@ RkISP1FrameInfo *RkISP1Frames::create(const RkISP1CameraData *data, Request *req FrameBuffer *paramBuffer = nullptr; FrameBuffer *statBuffer = nullptr; + FrameBuffer *mainPathBuffer = nullptr; + FrameBuffer *selfPathBuffer = nullptr; if (!isRaw) { if (pipe_->availableParamBuffers_.empty()) { @@ -235,10 +258,16 @@ RkISP1FrameInfo *RkISP1Frames::create(const RkISP1CameraData *data, Request *req statBuffer = pipe_->availableStatBuffers_.front(); pipe_->availableStatBuffers_.pop(); + + if (pipe_->useDewarper_) { + mainPathBuffer = pipe_->availableMainPathBuffers_.front(); + pipe_->availableMainPathBuffers_.pop(); + } } - FrameBuffer *mainPathBuffer = request->findBuffer(&data->mainPathStream_); - FrameBuffer *selfPathBuffer = request->findBuffer(&data->selfPathStream_); + if (!mainPathBuffer) + mainPathBuffer = request->findBuffer(&data->mainPathStream_); + selfPathBuffer = request->findBuffer(&data->selfPathStream_); RkISP1FrameInfo *info = new RkISP1FrameInfo; @@ -264,6 +293,7 @@ int RkISP1Frames::destroy(unsigned int frame) pipe_->availableParamBuffers_.push(info->paramBuffer); pipe_->availableStatBuffers_.push(info->statBuffer); + pipe_->availableMainPathBuffers_.push(info->mainPathBuffer); frameInfo_.erase(info->frame); @@ -279,6 +309,7 @@ void RkISP1Frames::clear() pipe_->availableParamBuffers_.push(info->paramBuffer); pipe_->availableStatBuffers_.push(info->statBuffer); + pipe_->availableMainPathBuffers_.push(info->mainPathBuffer); delete info; } @@ -334,6 +365,11 @@ PipelineHandlerRkISP1 *RkISP1CameraData::pipe() return static_cast<PipelineHandlerRkISP1 *>(Camera::Private::pipe()); } +const PipelineHandlerRkISP1 *RkISP1CameraData::pipe() const +{ + return static_cast<const PipelineHandlerRkISP1 *>(Camera::Private::pipe()); +} + int RkISP1CameraData::loadIPA(unsigned int hwRevision) { ipa_ = IPAManager::createIPA<ipa::rkisp1::IPAProxyRkISP1>(pipe(), 1, 1); @@ -341,26 +377,12 @@ int RkISP1CameraData::loadIPA(unsigned int hwRevision) return -ENOENT; ipa_->setSensorControls.connect(this, &RkISP1CameraData::setSensorControls); - ipa_->paramsBufferReady.connect(this, &RkISP1CameraData::paramFilled); + ipa_->paramsComputed.connect(this, &RkISP1CameraData::paramsComputed); ipa_->metadataReady.connect(this, &RkISP1CameraData::metadataReady); - /* - * The API tuning file is made from the sensor name unless the - * environment variable overrides it. - */ - std::string ipaTuningFile; - char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RKISP1_TUNING_FILE"); - if (!configFromEnv || *configFromEnv == '\0') { - ipaTuningFile = ipa_->configurationFile(sensor_->model() + ".yaml"); - /* - * If the tuning file isn't found, fall back to the - * 'uncalibrated' configuration file. - */ - if (ipaTuningFile.empty()) - ipaTuningFile = ipa_->configurationFile("uncalibrated.yaml"); - } else { - ipaTuningFile = std::string(configFromEnv); - } + /* The IPA tuning file is made from the sensor name. */ + std::string ipaTuningFile = + ipa_->configurationFile(sensor_->model() + ".yaml", "uncalibrated.yaml"); IPACameraSensorInfo sensorInfo{}; int ret = sensor_->sensorInfo(&sensorInfo); @@ -370,7 +392,7 @@ int RkISP1CameraData::loadIPA(unsigned int hwRevision) } ret = ipa_->init({ ipaTuningFile, sensor_->model() }, hwRevision, - sensorInfo, sensor_->controls(), &controlInfo_); + sensorInfo, sensor_->controls(), &ipaControls_); if (ret < 0) { LOG(RkISP1, Error) << "IPA initialization failure"; return ret; @@ -379,15 +401,14 @@ int RkISP1CameraData::loadIPA(unsigned int hwRevision) return 0; } -void RkISP1CameraData::paramFilled(unsigned int frame) +void RkISP1CameraData::paramsComputed(unsigned int frame, unsigned int bytesused) { PipelineHandlerRkISP1 *pipe = RkISP1CameraData::pipe(); RkISP1FrameInfo *info = frameInfo_.find(frame); if (!info) return; - info->paramBuffer->_d()->metadata().planes()[0].bytesused = - sizeof(struct rkisp1_params_cfg); + info->paramBuffer->_d()->metadata().planes()[0].bytesused = bytesused; pipe->param_->queueBuffer(info->paramBuffer); pipe->stat_->queueBuffer(info->statBuffer); @@ -454,11 +475,12 @@ bool RkISP1CameraConfiguration::fitsAllPaths(const StreamConfiguration &cfg) StreamConfiguration config; config = cfg; - if (data_->mainPath_->validate(sensor, &config) != Valid) + if (data_->mainPath_->validate(sensor, sensorConfig, &config) != Valid) return false; config = cfg; - if (data_->selfPath_ && data_->selfPath_->validate(sensor, &config) != Valid) + if (data_->selfPath_ && + data_->selfPath_->validate(sensor, sensorConfig, &config) != Valid) return false; return true; @@ -466,6 +488,7 @@ bool RkISP1CameraConfiguration::fitsAllPaths(const StreamConfiguration &cfg) CameraConfiguration::Status RkISP1CameraConfiguration::validate() { + const PipelineHandlerRkISP1 *pipe = data_->pipe(); const CameraSensor *sensor = data_->sensor_.get(); unsigned int pathCount = data_->selfPath_ ? 2 : 1; Status status; @@ -475,6 +498,27 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate() status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace); + /* + * Make sure that if a sensor configuration has been requested it + * is valid. + */ + if (sensorConfig) { + if (!sensorConfig->isValid()) { + LOG(RkISP1, Error) + << "Invalid sensor configuration request"; + + return Invalid; + } + + unsigned int bitDepth = sensorConfig->bitDepth; + if (bitDepth != 8 && bitDepth != 10 && bitDepth != 12) { + LOG(RkISP1, Error) + << "Invalid sensor configuration bit depth"; + + return Invalid; + } + } + /* Cap the number of entries to the available streams. */ if (config_.size() > pathCount) { config_.resize(pathCount); @@ -501,6 +545,18 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate() } } + bool useDewarper = false; + if (pipe->dewarper_) { + /* + * Platforms with dewarper support, such as i.MX8MP, support + * only a single stream. We can inspect config_[0] only here. + */ + bool isRaw = PixelFormatInfo::info(config_[0].pixelFormat).colourEncoding == + PixelFormatInfo::ColourEncodingRAW; + if (!isRaw) + useDewarper = true; + } + /* * If there are more than one stream in the configuration figure out the * order to evaluate the streams. The first stream has the highest @@ -513,50 +569,72 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate() if (config_.size() == 2 && fitsAllPaths(config_[0])) std::reverse(order.begin(), order.end()); + /* + * Validate the configuration against the desired path and, if the + * platform supports it, the dewarper. + */ + auto validateConfig = [&](StreamConfiguration &cfg, RkISP1Path *path, + Stream *stream, Status expectedStatus) { + StreamConfiguration tryCfg = cfg; + + Status ret = path->validate(sensor, sensorConfig, &tryCfg); + if (ret == Invalid) + return false; + + if (!useDewarper && + (expectedStatus == Valid && ret == Adjusted)) + return false; + + if (useDewarper) { + bool adjusted; + + pipe->dewarper_->validateOutput(&tryCfg, &adjusted, + Converter::Alignment::Down); + if (expectedStatus == Valid && adjusted) + return false; + } + + cfg = tryCfg; + cfg.setStream(stream); + return true; + }; + bool mainPathAvailable = true; bool selfPathAvailable = data_->selfPath_; + RkISP1Path *mainPath = data_->mainPath_; + RkISP1Path *selfPath = data_->selfPath_; + Stream *mainPathStream = const_cast<Stream *>(&data_->mainPathStream_); + Stream *selfPathStream = const_cast<Stream *>(&data_->selfPathStream_); for (unsigned int index : order) { StreamConfiguration &cfg = config_[index]; /* Try to match stream without adjusting configuration. */ if (mainPathAvailable) { - StreamConfiguration tryCfg = cfg; - if (data_->mainPath_->validate(sensor, &tryCfg) == Valid) { + if (validateConfig(cfg, mainPath, mainPathStream, Valid)) { mainPathAvailable = false; - cfg = tryCfg; - cfg.setStream(const_cast<Stream *>(&data_->mainPathStream_)); continue; } } if (selfPathAvailable) { - StreamConfiguration tryCfg = cfg; - if (data_->selfPath_->validate(sensor, &tryCfg) == Valid) { + if (validateConfig(cfg, selfPath, selfPathStream, Valid)) { selfPathAvailable = false; - cfg = tryCfg; - cfg.setStream(const_cast<Stream *>(&data_->selfPathStream_)); continue; } } /* Try to match stream allowing adjusting configuration. */ if (mainPathAvailable) { - StreamConfiguration tryCfg = cfg; - if (data_->mainPath_->validate(sensor, &tryCfg) == Adjusted) { + if (validateConfig(cfg, mainPath, mainPathStream, Adjusted)) { mainPathAvailable = false; - cfg = tryCfg; - cfg.setStream(const_cast<Stream *>(&data_->mainPathStream_)); status = Adjusted; continue; } } if (selfPathAvailable) { - StreamConfiguration tryCfg = cfg; - if (data_->selfPath_->validate(sensor, &tryCfg) == Adjusted) { + if (validateConfig(cfg, selfPath, selfPathStream, Adjusted)) { selfPathAvailable = false; - cfg = tryCfg; - cfg.setStream(const_cast<Stream *>(&data_->selfPathStream_)); status = Adjusted; continue; } @@ -590,7 +668,8 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate() [](const auto &value) { return value.second; }); } - sensorFormat_ = sensor->getFormat(mbusCodes, maxSize); + sensorFormat_ = sensor->getFormat(mbusCodes, maxSize, + mainPath->maxResolution()); if (sensorFormat_.size.isNull()) sensorFormat_.size = sensor->resolution(); @@ -603,7 +682,7 @@ CameraConfiguration::Status RkISP1CameraConfiguration::validate() */ PipelineHandlerRkISP1::PipelineHandlerRkISP1(CameraManager *manager) - : PipelineHandler(manager), hasSelfPath_(true) + : PipelineHandler(manager), hasSelfPath_(true), useDewarper_(false) { } @@ -730,7 +809,13 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c) V4L2SubdeviceFormat format = config->sensorFormat(); LOG(RkISP1, Debug) << "Configuring sensor with " << format; - ret = sensor->setFormat(&format, config->combinedTransform()); + if (config->sensorConfig) + ret = sensor->applyConfiguration(*config->sensorConfig, + config->combinedTransform(), + &format); + else + ret = sensor->setFormat(&format, config->combinedTransform()); + if (ret < 0) return ret; @@ -746,28 +831,48 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c) if (ret < 0) return ret; - Rectangle rect(0, 0, format.size); - ret = isp_->setSelection(0, V4L2_SEL_TGT_CROP, &rect); + Rectangle inputCrop(0, 0, format.size); + ret = isp_->setSelection(0, V4L2_SEL_TGT_CROP, &inputCrop); if (ret < 0) return ret; LOG(RkISP1, Debug) << "ISP input pad configured with " << format - << " crop " << rect; + << " crop " << inputCrop; + Rectangle outputCrop = inputCrop; const PixelFormat &streamFormat = config->at(0).pixelFormat; const PixelFormatInfo &info = PixelFormatInfo::info(streamFormat); isRaw_ = info.colourEncoding == PixelFormatInfo::ColourEncodingRAW; + useDewarper_ = dewarper_ && !isRaw_; /* YUYV8_2X8 is required on the ISP source path pad for YUV output. */ if (!isRaw_) format.code = MEDIA_BUS_FMT_YUYV8_2X8; + /* + * On devices without DUAL_CROP (like the imx8mp) cropping needs to be + * done on the ISP/IS output. + */ + if (media_->hwRevision() == RKISP1_V_IMX8MP) { + /* imx8mp has only a single path. */ + const auto &cfg = config->at(0); + Size ispCrop = format.size.boundedToAspectRatio(cfg.size); + if (useDewarper_) + ispCrop = dewarper_->adjustInputSize(cfg.pixelFormat, + ispCrop); + else + ispCrop.alignUpTo(2, 2); + + outputCrop = ispCrop.centeredTo(Rectangle(format.size).center()); + format.size = ispCrop; + } + LOG(RkISP1, Debug) << "Configuring ISP output pad with " << format - << " crop " << rect; + << " crop " << outputCrop; - ret = isp_->setSelection(2, V4L2_SEL_TGT_CROP, &rect); + ret = isp_->setSelection(2, V4L2_SEL_TGT_CROP, &outputCrop); if (ret < 0) return ret; @@ -778,15 +883,37 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c) LOG(RkISP1, Debug) << "ISP output pad configured with " << format - << " crop " << rect; + << " crop " << outputCrop; + + IPACameraSensorInfo sensorInfo; + ret = data->sensor_->sensorInfo(&sensorInfo); + if (ret) + return ret; std::map<unsigned int, IPAStream> streamConfig; + std::vector<std::reference_wrapper<StreamConfiguration>> outputCfgs; for (const StreamConfiguration &cfg : *config) { if (cfg.stream() == &data->mainPathStream_) { ret = mainPath_.configure(cfg, format); streamConfig[0] = IPAStream(cfg.pixelFormat, cfg.size); + /* Configure dewarp */ + if (dewarper_ && !isRaw_) { + outputCfgs.push_back(const_cast<StreamConfiguration &>(cfg)); + ret = dewarper_->configure(cfg, outputCfgs); + if (ret) + return ret; + + /* + * Calculate the crop rectangle of the data + * flowing into the dewarper in sensor + * coordinates. + */ + scalerMaxCrop_ = + outputCrop.transformedBetween(inputCrop, + sensorInfo.analogCrop); + } } else if (hasSelfPath_) { ret = selfPath_.configure(cfg, format); streamConfig[1] = IPAStream(cfg.pixelFormat, @@ -800,7 +927,7 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c) } V4L2DeviceFormat paramFormat; - paramFormat.fourcc = V4L2PixelFormat(V4L2_META_FMT_RK_ISP1_PARAMS); + paramFormat.fourcc = V4L2PixelFormat(V4L2_META_FMT_RK_ISP1_EXT_PARAMS); ret = param_->setFormat(¶mFormat); if (ret) return ret; @@ -812,20 +939,17 @@ int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c) return ret; /* Inform IPA of stream configuration and sensor controls. */ - ipa::rkisp1::IPAConfigInfo ipaConfig{}; - - ret = data->sensor_->sensorInfo(&ipaConfig.sensorInfo); - if (ret) - return ret; + ipa::rkisp1::IPAConfigInfo ipaConfig{ sensorInfo, + data->sensor_->controls(), + paramFormat.fourcc }; - ipaConfig.sensorControls = data->sensor_->controls(); - - ret = data->ipa_->configure(ipaConfig, streamConfig, &data->controlInfo_); + ret = data->ipa_->configure(ipaConfig, streamConfig, &data->ipaControls_); if (ret) { LOG(RkISP1, Error) << "failed configuring IPA (" << ret << ")"; return ret; } - return 0; + + return updateControls(data); } int PipelineHandlerRkISP1::exportFrameBuffers([[maybe_unused]] Camera *camera, Stream *stream, @@ -834,10 +958,19 @@ int PipelineHandlerRkISP1::exportFrameBuffers([[maybe_unused]] Camera *camera, S RkISP1CameraData *data = cameraData(camera); unsigned int count = stream->configuration().bufferCount; - if (stream == &data->mainPathStream_) - return mainPath_.exportBuffers(count, buffers); - else if (hasSelfPath_ && stream == &data->selfPathStream_) + if (stream == &data->mainPathStream_) { + /* + * Currently, i.MX8MP is the only platform with DW100 dewarper. + * It has mainpath and no self path. Hence, export buffers from + * dewarper just for the main path stream, for now. + */ + if (useDewarper_) + return dewarper_->exportBuffers(&data->mainPathStream_, count, buffers); + else + return mainPath_.exportBuffers(count, buffers); + } else if (hasSelfPath_ && stream == &data->selfPathStream_) { return selfPath_.exportBuffers(count, buffers); + } return -EINVAL; } @@ -863,6 +996,16 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera *camera) goto error; } + /* If the dewarper is being used, allocate internal buffers for ISP. */ + if (useDewarper_) { + ret = mainPath_.exportBuffers(maxCount, &mainPathBuffers_); + if (ret < 0) + goto error; + + for (std::unique_ptr<FrameBuffer> &buffer : mainPathBuffers_) + availableMainPathBuffers_.push(buffer.get()); + } + for (std::unique_ptr<FrameBuffer> &buffer : paramBuffers_) { buffer->setCookie(ipaBufferId++); data->ipaBuffers_.emplace_back(buffer->cookie(), @@ -884,6 +1027,7 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera *camera) error: paramBuffers_.clear(); statBuffers_.clear(); + mainPathBuffers_.clear(); return ret; } @@ -898,8 +1042,12 @@ int PipelineHandlerRkISP1::freeBuffers(Camera *camera) while (!availableParamBuffers_.empty()) availableParamBuffers_.pop(); + while (!availableMainPathBuffers_.empty()) + availableMainPathBuffers_.pop(); + paramBuffers_.clear(); statBuffers_.clear(); + mainPathBuffers_.clear(); std::vector<unsigned int> ids; for (IPABuffer &ipabuf : data->ipaBuffers_) @@ -920,71 +1068,71 @@ int PipelineHandlerRkISP1::freeBuffers(Camera *camera) int PipelineHandlerRkISP1::start(Camera *camera, [[maybe_unused]] const ControlList *controls) { RkISP1CameraData *data = cameraData(camera); + utils::ScopeExitActions actions; int ret; /* Allocate buffers for internal pipeline usage. */ ret = allocateBuffers(camera); if (ret) return ret; + actions += [&]() { freeBuffers(camera); }; ret = data->ipa_->start(); if (ret) { - freeBuffers(camera); LOG(RkISP1, Error) << "Failed to start IPA " << camera->id(); return ret; } + actions += [&]() { data->ipa_->stop(); }; data->frame_ = 0; if (!isRaw_) { ret = param_->streamOn(); if (ret) { - data->ipa_->stop(); - freeBuffers(camera); LOG(RkISP1, Error) << "Failed to start parameters " << camera->id(); return ret; } + actions += [&]() { param_->streamOff(); }; ret = stat_->streamOn(); if (ret) { - param_->streamOff(); - data->ipa_->stop(); - freeBuffers(camera); LOG(RkISP1, Error) << "Failed to start statistics " << camera->id(); return ret; } + actions += [&]() { stat_->streamOff(); }; + + if (useDewarper_) { + ret = dewarper_->start(); + if (ret) { + LOG(RkISP1, Error) << "Failed to start dewarper"; + return ret; + } + actions += [&]() { dewarper_->stop(); }; + } } if (data->mainPath_->isEnabled()) { ret = mainPath_.start(); - if (ret) { - param_->streamOff(); - stat_->streamOff(); - data->ipa_->stop(); - freeBuffers(camera); + if (ret) return ret; - } + actions += [&]() { mainPath_.stop(); }; } if (hasSelfPath_ && data->selfPath_->isEnabled()) { ret = selfPath_.start(); - if (ret) { - mainPath_.stop(); - param_->streamOff(); - stat_->streamOff(); - data->ipa_->stop(); - freeBuffers(camera); + if (ret) return ret; - } } isp_->setFrameStartEnabled(true); activeCamera_ = camera; - return ret; + + actions.release(); + return 0; } void PipelineHandlerRkISP1::stopDevice(Camera *camera) @@ -1010,6 +1158,9 @@ void PipelineHandlerRkISP1::stopDevice(Camera *camera) if (ret) LOG(RkISP1, Warning) << "Failed to stop parameters for " << camera->id(); + + if (useDewarper_) + dewarper_->stop(); } ASSERT(data->queuedRequests_.empty()); @@ -1036,8 +1187,8 @@ int PipelineHandlerRkISP1::queueRequestDevice(Camera *camera, Request *request) if (data->selfPath_ && info->selfPathBuffer) data->selfPath_->queueBuffer(info->selfPathBuffer); } else { - data->ipa_->fillParamsBuffer(data->frame_, - info->paramBuffer->cookie()); + data->ipa_->computeParams(data->frame_, + info->paramBuffer->cookie()); } data->frame_++; @@ -1101,6 +1252,58 @@ int PipelineHandlerRkISP1::initLinks(Camera *camera, return 0; } +/** + * \brief Update the camera controls + * \param[in] data The camera data + * + * Compute the camera controls by calculating controls which the pipeline + * is reponsible for and merge them with the controls computed by the IPA. + * + * This function needs data->ipaControls_ to be refreshed when a new + * configuration is applied to the camera by the IPA configure() function. + * + * Always call this function after IPA configure() to make sure to have a + * properly refreshed IPA controls list. + * + * \return 0 on success or a negative error code otherwise + */ +int PipelineHandlerRkISP1::updateControls(RkISP1CameraData *data) +{ + ControlInfoMap::Map controls; + + if (dewarper_) { + std::pair<Rectangle, Rectangle> cropLimits; + if (dewarper_->isConfigured(&data->mainPathStream_)) + cropLimits = dewarper_->inputCropBounds(&data->mainPathStream_); + else + cropLimits = dewarper_->inputCropBounds(); + + /* + * ScalerCrop is specified to be in Sensor coordinates. + * So we need to transform the limits to sensor coordinates. + * We can safely assume that the maximum crop limit contains the + * full fov of the dewarper. + */ + Rectangle min = cropLimits.first.transformedBetween(cropLimits.second, + scalerMaxCrop_); + + controls[&controls::ScalerCrop] = ControlInfo(min, + scalerMaxCrop_, + scalerMaxCrop_); + data->properties_.set(properties::ScalerCropMaximum, scalerMaxCrop_); + activeCrop_ = scalerMaxCrop_; + } + + /* Add the IPA registered controls to list of camera controls. */ + for (const auto &ipaControl : data->ipaControls_) + controls[ipaControl.first] = ipaControl.second; + + data->controlInfo_ = ControlInfoMap(std::move(controls), + controls::controls); + + return 0; +} + int PipelineHandlerRkISP1::createCamera(MediaEntity *sensor) { int ret; @@ -1109,22 +1312,19 @@ int PipelineHandlerRkISP1::createCamera(MediaEntity *sensor) std::make_unique<RkISP1CameraData>(this, &mainPath_, hasSelfPath_ ? &selfPath_ : nullptr); - data->sensor_ = std::make_unique<CameraSensor>(sensor); - ret = data->sensor_->init(); - if (ret) - return ret; + data->sensor_ = CameraSensorFactoryBase::create(sensor); + if (!data->sensor_) + return -ENODEV; /* Initialize the camera properties. */ data->properties_ = data->sensor_->properties(); - /* - * \todo Read dealy values from the sensor itself or from a - * a sensor database. For now use generic values taken from - * the Raspberry Pi and listed as generic values. - */ + scalerMaxCrop_ = Rectangle(data->sensor_->resolution()); + + const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays(); std::unordered_map<uint32_t, DelayedControls::ControlParams> params = { - { V4L2_CID_ANALOGUE_GAIN, { 1, false } }, - { V4L2_CID_EXPOSURE, { 2, false } }, + { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } }, + { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } }, }; data->delayedCtrls_ = @@ -1137,6 +1337,8 @@ int PipelineHandlerRkISP1::createCamera(MediaEntity *sensor) if (ret) return ret; + updateControls(data.get()); + std::set<Stream *> streams{ &data->mainPathStream_, &data->selfPathStream_, @@ -1209,11 +1411,34 @@ bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator) if (hasSelfPath_ && !selfPath_.init(media_)) return false; - mainPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::bufferReady); + mainPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::imageBufferReady); if (hasSelfPath_) - selfPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::bufferReady); - stat_->bufferReady.connect(this, &PipelineHandlerRkISP1::statReady); - param_->bufferReady.connect(this, &PipelineHandlerRkISP1::paramReady); + selfPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::imageBufferReady); + stat_->bufferReady.connect(this, &PipelineHandlerRkISP1::statBufferReady); + param_->bufferReady.connect(this, &PipelineHandlerRkISP1::paramBufferReady); + + /* If dewarper is present, create its instance. */ + DeviceMatch dwp("dw100"); + dwp.add("dw100-source"); + dwp.add("dw100-sink"); + + std::shared_ptr<MediaDevice> dwpMediaDevice = enumerator->search(dwp); + if (dwpMediaDevice) { + dewarper_ = std::make_unique<V4L2M2MConverter>(dwpMediaDevice.get()); + if (dewarper_->isValid()) { + dewarper_->outputBufferReady.connect( + this, &PipelineHandlerRkISP1::dewarpBufferReady); + + LOG(RkISP1, Info) + << "Using DW100 dewarper " << dewarper_->deviceNode(); + } else { + LOG(RkISP1, Warning) + << "Found DW100 dewarper " << dewarper_->deviceNode() + << " but invalid"; + + dewarper_.reset(); + } + } /* * Enumerate all sensors connected to the ISP and create one @@ -1251,7 +1476,7 @@ void PipelineHandlerRkISP1::tryCompleteRequest(RkISP1FrameInfo *info) completeRequest(request); } -void PipelineHandlerRkISP1::bufferReady(FrameBuffer *buffer) +void PipelineHandlerRkISP1::imageBufferReady(FrameBuffer *buffer) { ASSERT(activeCamera_); RkISP1CameraData *data = cameraData(activeCamera_); @@ -1261,7 +1486,7 @@ void PipelineHandlerRkISP1::bufferReady(FrameBuffer *buffer) return; const FrameMetadata &metadata = buffer->metadata(); - Request *request = buffer->request(); + Request *request = info->request; if (metadata.status != FrameMetadata::FrameCancelled) { /* @@ -1276,18 +1501,102 @@ void PipelineHandlerRkISP1::bufferReady(FrameBuffer *buffer) if (isRaw_) { const ControlList &ctrls = data->delayedCtrls_->get(metadata.sequence); - data->ipa_->processStatsBuffer(info->frame, 0, ctrls); + data->ipa_->processStats(info->frame, 0, ctrls); } } else { if (isRaw_) info->metadataProcessed = true; } + if (!useDewarper_) { + completeBuffer(request, buffer); + tryCompleteRequest(info); + + return; + } + + /* Do not queue cancelled frames to dewarper. */ + if (metadata.status == FrameMetadata::FrameCancelled) { + /* + * i.MX8MP is the only known platform with dewarper. It has + * no self path. Hence, only main path buffer completion is + * required. + * + * Also, we cannot completeBuffer(request, buffer) as buffer + * here, is an internal buffer (between ISP and dewarper) and + * is not associated to the any specific request. The request + * buffer associated with main path stream is the one that + * is required to be completed (not the internal buffer). + */ + for (auto it : request->buffers()) { + if (it.first == &data->mainPathStream_) + completeBuffer(request, it.second); + } + + tryCompleteRequest(info); + return; + } + + /* Handle scaler crop control. */ + const auto &crop = request->controls().get(controls::ScalerCrop); + if (crop) { + Rectangle rect = crop.value(); + + /* + * ScalerCrop is specified to be in Sensor coordinates. + * So we need to transform it into dewarper coordinates. + * We can safely assume that the maximum crop limit contains the + * full fov of the dewarper. + */ + std::pair<Rectangle, Rectangle> cropLimits = + dewarper_->inputCropBounds(&data->mainPathStream_); + + rect = rect.transformedBetween(scalerMaxCrop_, cropLimits.second); + int ret = dewarper_->setInputCrop(&data->mainPathStream_, + &rect); + rect = rect.transformedBetween(cropLimits.second, scalerMaxCrop_); + if (!ret && rect != crop.value()) { + /* + * If the rectangle is changed by setInputCrop on the + * dewarper, log a debug message and cache the actual + * applied rectangle for metadata reporting. + */ + LOG(RkISP1, Debug) + << "Applied rectangle " << rect.toString() + << " differs from requested " << crop.value().toString(); + } + + activeCrop_ = rect; + } + + /* + * Queue input and output buffers to the dewarper. The output + * buffers for the dewarper are the buffers of the request, supplied + * by the application. + */ + int ret = dewarper_->queueBuffers(buffer, request->buffers()); + if (ret < 0) + LOG(RkISP1, Error) << "Cannot queue buffers to dewarper: " + << strerror(-ret); + + request->metadata().set(controls::ScalerCrop, activeCrop_.value()); +} + +void PipelineHandlerRkISP1::dewarpBufferReady(FrameBuffer *buffer) +{ + ASSERT(activeCamera_); + RkISP1CameraData *data = cameraData(activeCamera_); + Request *request = buffer->request(); + + RkISP1FrameInfo *info = data->frameInfo_.find(buffer->request()); + if (!info) + return; + completeBuffer(request, buffer); tryCompleteRequest(info); } -void PipelineHandlerRkISP1::paramReady(FrameBuffer *buffer) +void PipelineHandlerRkISP1::paramBufferReady(FrameBuffer *buffer) { ASSERT(activeCamera_); RkISP1CameraData *data = cameraData(activeCamera_); @@ -1300,7 +1609,7 @@ void PipelineHandlerRkISP1::paramReady(FrameBuffer *buffer) tryCompleteRequest(info); } -void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer) +void PipelineHandlerRkISP1::statBufferReady(FrameBuffer *buffer) { ASSERT(activeCamera_); RkISP1CameraData *data = cameraData(activeCamera_); @@ -1318,8 +1627,8 @@ void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer) if (data->frame_ <= buffer->metadata().sequence) data->frame_ = buffer->metadata().sequence + 1; - data->ipa_->processStatsBuffer(info->frame, info->statBuffer->cookie(), - data->delayedCtrls_->get(buffer->metadata().sequence)); + data->ipa_->processStats(info->frame, info->statBuffer->cookie(), + data->delayedCtrls_->get(buffer->metadata().sequence)); } REGISTER_PIPELINE_HANDLER(PipelineHandlerRkISP1, "rkisp1") diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp index c49017d1..eee5b09e 100644 --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp @@ -54,11 +54,8 @@ const std::map<PixelFormat, uint32_t> formatToMediaBus = { } /* namespace */ -RkISP1Path::RkISP1Path(const char *name, const Span<const PixelFormat> &formats, - const Size &minResolution, const Size &maxResolution) - : name_(name), running_(false), formats_(formats), - minResolution_(minResolution), maxResolution_(maxResolution), - link_(nullptr) +RkISP1Path::RkISP1Path(const char *name, const Span<const PixelFormat> &formats) + : name_(name), running_(false), formats_(formats), link_(nullptr) { } @@ -126,12 +123,50 @@ void RkISP1Path::populateFormats() } } +/** + * \brief Filter the sensor resolutions that can be supported + * \param[in] sensor The camera sensor + * + * This function retrieves all the sizes supported by the sensor and + * filters all the resolutions that can be supported on the pipeline. + * It is possible that the sensor's maximum output resolution is higher + * than the ISP maximum input. In that case, this function filters out all + * the resolution incapable of being supported and returns the maximum + * sensor resolution that can be supported by the pipeline. + * + * \return Maximum sensor size supported on the pipeline + */ +Size RkISP1Path::filterSensorResolution(const CameraSensor *sensor) +{ + auto iter = sensorSizesMap_.find(sensor); + if (iter != sensorSizesMap_.end()) + return iter->second.back(); + + std::vector<Size> &sizes = sensorSizesMap_[sensor]; + for (unsigned int code : sensor->mbusCodes()) { + for (const Size &size : sensor->sizes(code)) { + if (size.width > maxResolution_.width || + size.height > maxResolution_.height) + continue; + + sizes.push_back(size); + } + } + + /* Sort in increasing order and remove duplicates. */ + std::sort(sizes.begin(), sizes.end()); + auto last = std::unique(sizes.begin(), sizes.end()); + sizes.erase(last, sizes.end()); + + return sizes.back(); +} + StreamConfiguration RkISP1Path::generateConfiguration(const CameraSensor *sensor, const Size &size, StreamRole role) { const std::vector<unsigned int> &mbusCodes = sensor->mbusCodes(); - const Size &resolution = sensor->resolution(); + Size resolution = filterSensorResolution(sensor); /* Min and max resolutions to populate the available stream formats. */ Size maxResolution = maxResolution_.boundedToAspectRatio(resolution) @@ -216,11 +251,13 @@ RkISP1Path::generateConfiguration(const CameraSensor *sensor, const Size &size, return cfg; } -CameraConfiguration::Status RkISP1Path::validate(const CameraSensor *sensor, - StreamConfiguration *cfg) +CameraConfiguration::Status +RkISP1Path::validate(const CameraSensor *sensor, + const std::optional<SensorConfiguration> &sensorConfig, + StreamConfiguration *cfg) { const std::vector<unsigned int> &mbusCodes = sensor->mbusCodes(); - const Size &resolution = sensor->resolution(); + Size resolution = filterSensorResolution(sensor); const StreamConfiguration reqCfg = *cfg; CameraConfiguration::Status status = CameraConfiguration::Valid; @@ -247,9 +284,14 @@ CameraConfiguration::Status RkISP1Path::validate(const CameraSensor *sensor, continue; /* - * Store the raw format with the highest bits per pixel - * for later usage. + * If the bits per pixel is supplied from the sensor + * configuration, choose a raw format that complies with + * it. Otherwise, store the raw format with the highest + * bits per pixel for later usage. */ + if (sensorConfig && info.bitsPerPixel != sensorConfig->bitDepth) + continue; + if (info.bitsPerPixel > rawBitsPerPixel) { rawBitsPerPixel = info.bitsPerPixel; rawFormat = format; @@ -262,6 +304,9 @@ CameraConfiguration::Status RkISP1Path::validate(const CameraSensor *sensor, } } + if (sensorConfig && !rawFormat.isValid()) + return CameraConfiguration::Invalid; + bool isRaw = PixelFormatInfo::info(cfg->pixelFormat).colourEncoding == PixelFormatInfo::ColourEncodingRAW; @@ -281,14 +326,48 @@ CameraConfiguration::Status RkISP1Path::validate(const CameraSensor *sensor, if (isRaw) { /* * Use the sensor output size closest to the requested stream - * size. + * size while ensuring the output size doesn't exceed ISP limits. + * + * As 'resolution' is the largest sensor resolution + * supported by the ISP, CameraSensor::getFormat() will never + * return a V4L2SubdeviceFormat with a larger size. */ uint32_t mbusCode = formatToMediaBus.at(cfg->pixelFormat); + cfg->size.boundTo(resolution); + + Size rawSize = sensorConfig ? sensorConfig->outputSize + : cfg->size; + V4L2SubdeviceFormat sensorFormat = - sensor->getFormat({ mbusCode }, cfg->size); + sensor->getFormat({ mbusCode }, rawSize); + + if (sensorConfig && + sensorConfig->outputSize != sensorFormat.size) + return CameraConfiguration::Invalid; minResolution = sensorFormat.size; maxResolution = sensorFormat.size; + } else if (sensorConfig) { + /* + * We have already ensured 'rawFormat' has the matching bit + * depth with sensorConfig.bitDepth hence, only validate the + * sensorConfig's output size here. + */ + Size sensorSize = sensorConfig->outputSize; + + if (sensorSize > resolution) + return CameraConfiguration::Invalid; + + uint32_t mbusCode = formatToMediaBus.at(rawFormat); + V4L2SubdeviceFormat sensorFormat = + sensor->getFormat({ mbusCode }, sensorSize); + + if (sensorFormat.size != sensorSize) + return CameraConfiguration::Invalid; + + minResolution = minResolution_.expandedToAspectRatio(sensorSize); + maxResolution = maxResolution_.boundedTo(sensorSize) + .boundedToAspectRatio(sensorSize); } else { /* * Adjust the size based on the sensor resolution and absolute @@ -338,11 +417,14 @@ int RkISP1Path::configure(const StreamConfiguration &config, /* * Crop on the resizer input to maintain FOV before downscaling. * - * \todo The alignment to a multiple of 2 pixels is required but may - * change the aspect ratio very slightly. A more advanced algorithm to - * compute the resizer input crop rectangle is needed, and it should - * also take into account the need to crop away the edge pixels affected - * by the ISP processing blocks. + * Note that this does not apply to devices without DUAL_CROP support + * (like imx8mp) , where the cropping needs to be done on the + * ImageStabilizer block on the ISP source pad and therefore is + * configured before this stage. For simplicity we still set the crop. + * This gets ignored by the kernel driver because the hardware is + * missing the capability. + * + * Alignment to a multiple of 2 pixels is required by the resizer. */ Size ispCrop = inputFormat.size.boundedToAspectRatio(config.size) .alignedUpTo(2, 2); @@ -435,12 +517,10 @@ void RkISP1Path::stop() } /* - * \todo Remove the hardcoded resolutions and formats once all users will have - * migrated to a recent enough kernel. + * \todo Remove the hardcoded formats once all users will have migrated to a + * recent enough kernel. */ namespace { -constexpr Size RKISP1_RSZ_MP_SRC_MIN{ 32, 16 }; -constexpr Size RKISP1_RSZ_MP_SRC_MAX{ 4416, 3312 }; constexpr std::array<PixelFormat, 18> RKISP1_RSZ_MP_FORMATS{ formats::YUYV, formats::NV16, @@ -462,8 +542,6 @@ constexpr std::array<PixelFormat, 18> RKISP1_RSZ_MP_FORMATS{ formats::SRGGB12, }; -constexpr Size RKISP1_RSZ_SP_SRC_MIN{ 32, 16 }; -constexpr Size RKISP1_RSZ_SP_SRC_MAX{ 1920, 1920 }; constexpr std::array<PixelFormat, 8> RKISP1_RSZ_SP_FORMATS{ formats::YUYV, formats::NV16, @@ -477,14 +555,12 @@ constexpr std::array<PixelFormat, 8> RKISP1_RSZ_SP_FORMATS{ } /* namespace */ RkISP1MainPath::RkISP1MainPath() - : RkISP1Path("main", RKISP1_RSZ_MP_FORMATS, - RKISP1_RSZ_MP_SRC_MIN, RKISP1_RSZ_MP_SRC_MAX) + : RkISP1Path("main", RKISP1_RSZ_MP_FORMATS) { } RkISP1SelfPath::RkISP1SelfPath() - : RkISP1Path("self", RKISP1_RSZ_SP_FORMATS, - RKISP1_RSZ_SP_SRC_MIN, RKISP1_RSZ_SP_SRC_MAX) + : RkISP1Path("self", RKISP1_RSZ_SP_FORMATS) { } diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.h b/src/libcamera/pipeline/rkisp1/rkisp1_path.h index 08edefec..2a1ef0ab 100644 --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.h +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.h @@ -7,6 +7,7 @@ #pragma once +#include <map> #include <memory> #include <set> #include <vector> @@ -25,6 +26,7 @@ namespace libcamera { class CameraSensor; class MediaDevice; +class SensorConfiguration; class V4L2Subdevice; struct StreamConfiguration; struct V4L2SubdeviceFormat; @@ -32,8 +34,7 @@ struct V4L2SubdeviceFormat; class RkISP1Path { public: - RkISP1Path(const char *name, const Span<const PixelFormat> &formats, - const Size &minResolution, const Size &maxResolution); + RkISP1Path(const char *name, const Span<const PixelFormat> &formats); bool init(MediaDevice *media); @@ -44,6 +45,7 @@ public: const Size &resolution, StreamRole role); CameraConfiguration::Status validate(const CameraSensor *sensor, + const std::optional<SensorConfiguration> &sensorConfig, StreamConfiguration *cfg); int configure(const StreamConfiguration &config, @@ -60,9 +62,11 @@ public: int queueBuffer(FrameBuffer *buffer) { return video_->queueBuffer(buffer); } Signal<FrameBuffer *> &bufferReady() { return video_->bufferReady; } + const Size &maxResolution() const { return maxResolution_; } private: void populateFormats(); + Size filterSensorResolution(const CameraSensor *sensor); static constexpr unsigned int RKISP1_BUFFER_COUNT = 4; @@ -77,6 +81,12 @@ private: std::unique_ptr<V4L2Subdevice> resizer_; std::unique_ptr<V4L2VideoDevice> video_; MediaLink *link_; + + /* + * Map from camera sensors to the sizes (in increasing order), + * which are guaranteed to be supported by the pipeline. + */ + std::map<const CameraSensor *, std::vector<Size>> sensorSizesMap_; }; class RkISP1MainPath : public RkISP1Path diff --git a/src/libcamera/pipeline/rpi/common/meson.build b/src/libcamera/pipeline/rpi/common/meson.build index 8fb7e823..b2b1a0a6 100644 --- a/src/libcamera/pipeline/rpi/common/meson.build +++ b/src/libcamera/pipeline/rpi/common/meson.build @@ -1,6 +1,6 @@ # SPDX-License-Identifier: CC0-1.0 -libcamera_sources += files([ +libcamera_internal_sources += files([ 'delayed_controls.cpp', 'pipeline_base.cpp', 'rpi_stream.cpp', diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.cpp b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp index 3041fd1e..1f13e523 100644 --- a/src/libcamera/pipeline/rpi/common/pipeline_base.cpp +++ b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp @@ -105,7 +105,7 @@ CameraConfiguration::Status RPiCameraConfiguration::validateColorSpaces([[maybe_ Status status = Valid; yuvColorSpace_.reset(); - for (auto cfg : config_) { + for (auto &cfg : config_) { /* First fix up raw streams to have the "raw" colour space. */ if (PipelineHandlerBase::isRaw(cfg.pixelFormat)) { /* If there was no value here, that doesn't count as "adjusted". */ @@ -130,7 +130,7 @@ CameraConfiguration::Status RPiCameraConfiguration::validateColorSpaces([[maybe_ rgbColorSpace_->range = ColorSpace::Range::Full; /* Go through the streams again and force everyone to the same colour space. */ - for (auto cfg : config_) { + for (auto &cfg : config_) { if (cfg.colorSpace == ColorSpace::Raw) continue; @@ -181,12 +181,14 @@ CameraConfiguration::Status RPiCameraConfiguration::validate() rawStreams_.clear(); outStreams_.clear(); + unsigned int rawStreamIndex = 0; + unsigned int outStreamIndex = 0; - for (const auto &[index, cfg] : utils::enumerate(config_)) { + for (auto &cfg : config_) { if (PipelineHandlerBase::isRaw(cfg.pixelFormat)) - rawStreams_.emplace_back(index, &cfg); + rawStreams_.emplace_back(rawStreamIndex++, &cfg); else - outStreams_.emplace_back(index, &cfg); + outStreams_.emplace_back(outStreamIndex++, &cfg); } /* Sort the streams so the highest resolution is first. */ @@ -533,6 +535,7 @@ int PipelineHandlerBase::configure(Camera *camera, CameraConfiguration *config) * Platform specific internal stream configuration. This also assigns * external streams which get configured below. */ + data->cropParams_.clear(); ret = data->platformConfigure(rpiConfig); if (ret) return ret; @@ -545,12 +548,6 @@ int PipelineHandlerBase::configure(Camera *camera, CameraConfiguration *config) } /* - * Set the scaler crop to the value we are using (scaled to native sensor - * coordinates). - */ - data->scalerCrop_ = data->scaleIspCrop(data->ispCrop_); - - /* * Update the ScalerCropMaximum to the correct value for this camera mode. * For us, it's the same as the "analogue crop". * @@ -567,9 +564,28 @@ int PipelineHandlerBase::configure(Camera *camera, CameraConfiguration *config) for (auto const &c : result.controlInfo) ctrlMap.emplace(c.first, c.second); - /* Add the ScalerCrop control limits based on the current mode. */ - Rectangle ispMinCrop = data->scaleIspCrop(Rectangle(data->ispMinCropSize_)); - ctrlMap[&controls::ScalerCrop] = ControlInfo(ispMinCrop, data->sensorInfo_.analogCrop, data->scalerCrop_); + const auto cropParamsIt = data->cropParams_.find(0); + if (cropParamsIt != data->cropParams_.end()) { + const CameraData::CropParams &cropParams = cropParamsIt->second; + /* + * Add the ScalerCrop control limits based on the current mode and + * the first configured stream. + */ + Rectangle ispMinCrop = data->scaleIspCrop(Rectangle(cropParams.ispMinCropSize)); + ctrlMap[&controls::ScalerCrop] = ControlInfo(ispMinCrop, data->sensorInfo_.analogCrop, + data->scaleIspCrop(cropParams.ispCrop)); + if (data->cropParams_.size() == 2) { + /* + * The control map for rpi::ScalerCrops has the min value + * as the default crop for stream 0, max value as the default + * value for stream 1. + */ + ctrlMap[&controls::rpi::ScalerCrops] = + ControlInfo(data->scaleIspCrop(data->cropParams_.at(0).ispCrop), + data->scaleIspCrop(data->cropParams_.at(1).ispCrop), + ctrlMap[&controls::ScalerCrop].def()); + } + } data->controlInfo_ = ControlInfoMap(std::move(ctrlMap), result.controlInfo.idmap()); @@ -774,13 +790,10 @@ int PipelineHandlerBase::registerCamera(std::unique_ptr<RPi::CameraData> &camera CameraData *data = cameraData.get(); int ret; - data->sensor_ = std::make_unique<CameraSensor>(sensorEntity); + data->sensor_ = CameraSensorFactoryBase::create(sensorEntity); if (!data->sensor_) return -EINVAL; - if (data->sensor_->init()) - return -EINVAL; - /* Populate the map of sensor supported formats and sizes. */ for (auto const mbusCode : data->sensor_->mbusCodes()) data->sensorFormats_.emplace(mbusCode, @@ -803,11 +816,12 @@ int PipelineHandlerBase::registerCamera(std::unique_ptr<RPi::CameraData> &camera * Setup our delayed control writer with the sensor default * gain and exposure delays. Mark VBLANK for priority write. */ + const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays(); std::unordered_map<uint32_t, RPi::DelayedControls::ControlParams> params = { - { V4L2_CID_ANALOGUE_GAIN, { result.sensorConfig.gainDelay, false } }, - { V4L2_CID_EXPOSURE, { result.sensorConfig.exposureDelay, false } }, - { V4L2_CID_HBLANK, { result.sensorConfig.hblankDelay, false } }, - { V4L2_CID_VBLANK, { result.sensorConfig.vblankDelay, true } } + { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } }, + { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } }, + { V4L2_CID_HBLANK, { delays.hblankDelay, false } }, + { V4L2_CID_VBLANK, { delays.vblankDelay, true } } }; data->delayedCtrls_ = std::make_unique<RPi::DelayedControls>(data->sensor_->device(), params); data->sensorMetadata_ = result.sensorConfig.sensorMetadata; @@ -1142,20 +1156,11 @@ int CameraData::loadIPA(ipa::RPi::InitResult *result) if (!ipa_) return -ENOENT; - /* - * The configuration (tuning file) is made from the sensor name unless - * the environment variable overrides it. - */ - std::string configurationFile; - char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_TUNING_FILE"); - if (!configFromEnv || *configFromEnv == '\0') { - std::string model = sensor_->model(); - if (isMonoSensor(sensor_)) - model += "_mono"; - configurationFile = ipa_->configurationFile(model + ".json"); - } else { - configurationFile = std::string(configFromEnv); - } + /* The configuration (tuning file) is made from the sensor name. */ + std::string model = sensor_->model(); + if (isMonoSensor(sensor_)) + model += "_mono"; + std::string configurationFile = ipa_->configurationFile(model + ".json"); IPASettings settings(configurationFile, sensor_->model()); ipa::RPi::InitParams params; @@ -1295,9 +1300,30 @@ Rectangle CameraData::scaleIspCrop(const Rectangle &ispCrop) const void CameraData::applyScalerCrop(const ControlList &controls) { - const auto &scalerCrop = controls.get<Rectangle>(controls::ScalerCrop); - if (scalerCrop) { - Rectangle nativeCrop = *scalerCrop; + const auto &scalerCropRPi = controls.get<Span<const Rectangle>>(controls::rpi::ScalerCrops); + const auto &scalerCropCore = controls.get<Rectangle>(controls::ScalerCrop); + std::vector<Rectangle> scalerCrops; + + /* + * First thing to do is create a vector of crops to apply to each ISP output + * based on either controls::ScalerCrop or controls::rpi::ScalerCrops if + * present. + * + * If controls::rpi::ScalerCrops is preset, apply the given crops to the + * ISP output streams, indexed by the same order in which they had been + * configured. This is not the same as the ISP output index. Otherwise + * if controls::ScalerCrop is present, apply the same crop to all ISP + * output streams. + */ + for (unsigned int i = 0; i < cropParams_.size(); i++) { + if (scalerCropRPi && i < scalerCropRPi->size()) + scalerCrops.push_back(scalerCropRPi->data()[i]); + else if (scalerCropCore) + scalerCrops.push_back(*scalerCropCore); + } + + for (auto const &[i, scalerCrop] : utils::enumerate(scalerCrops)) { + Rectangle nativeCrop = scalerCrop; if (!nativeCrop.width || !nativeCrop.height) nativeCrop = { 0, 0, 1, 1 }; @@ -1313,20 +1339,13 @@ void CameraData::applyScalerCrop(const ControlList &controls) * 2. With the same mid-point, if possible. * 3. But it can't go outside the sensor area. */ - Size minSize = ispMinCropSize_.expandedToAspectRatio(nativeCrop.size()); + Size minSize = cropParams_.at(i).ispMinCropSize.expandedToAspectRatio(nativeCrop.size()); Size size = ispCrop.size().expandedTo(minSize); ispCrop = size.centeredTo(ispCrop.center()).enclosedIn(Rectangle(sensorInfo_.outputSize)); - if (ispCrop != ispCrop_) { - ispCrop_ = ispCrop; - platformSetIspCrop(); - - /* - * Also update the ScalerCrop in the metadata with what we actually - * used. But we must first rescale that from ISP (camera mode) pixels - * back into sensor native pixels. - */ - scalerCrop_ = scaleIspCrop(ispCrop_); + if (ispCrop != cropParams_.at(i).ispCrop) { + cropParams_.at(i).ispCrop = ispCrop; + platformSetIspCrop(cropParams_.at(i).ispIndex, ispCrop); } } } @@ -1483,7 +1502,18 @@ void CameraData::fillRequestMetadata(const ControlList &bufferControls, Request request->metadata().set(controls::SensorTimestamp, bufferControls.get(controls::SensorTimestamp).value_or(0)); - request->metadata().set(controls::ScalerCrop, scalerCrop_); + if (cropParams_.size()) { + std::vector<Rectangle> crops; + + for (auto const &[k, v] : cropParams_) + crops.push_back(scaleIspCrop(v.ispCrop)); + + request->metadata().set(controls::ScalerCrop, crops[0]); + if (crops.size() > 1) { + request->metadata().set(controls::rpi::ScalerCrops, + Span<const Rectangle>(crops.data(), crops.size())); + } + } } } /* namespace libcamera */ diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.h b/src/libcamera/pipeline/rpi/common/pipeline_base.h index f9cecf70..aae0c2f3 100644 --- a/src/libcamera/pipeline/rpi/common/pipeline_base.h +++ b/src/libcamera/pipeline/rpi/common/pipeline_base.h @@ -83,7 +83,7 @@ public: Rectangle scaleIspCrop(const Rectangle &ispCrop) const; void applyScalerCrop(const ControlList &controls); - virtual void platformSetIspCrop() = 0; + virtual void platformSetIspCrop(unsigned int index, const Rectangle &ispCrop) = 0; void cameraTimeout(); void frameStarted(uint32_t sequence); @@ -133,9 +133,23 @@ public: /* For handling digital zoom. */ IPACameraSensorInfo sensorInfo_; - Rectangle ispCrop_; /* crop in ISP (camera mode) pixels */ - Rectangle scalerCrop_; /* crop in sensor native pixels */ - Size ispMinCropSize_; + + struct CropParams { + CropParams(Rectangle ispCrop_, Size ispMinCropSize_, unsigned int ispIndex_) + : ispCrop(ispCrop_), ispMinCropSize(ispMinCropSize_), ispIndex(ispIndex_) + { + } + + /* Crop in ISP (camera mode) pixels */ + Rectangle ispCrop; + /* Minimum crop size in ISP output pixels */ + Size ispMinCropSize; + /* Index of the ISP output channel for this crop */ + unsigned int ispIndex; + }; + + /* Mapping of CropParams keyed by the output stream order in CameraConfiguration */ + std::map<unsigned int, CropParams> cropParams_; unsigned int dropFrameCount_; diff --git a/src/libcamera/pipeline/rpi/vc4/meson.build b/src/libcamera/pipeline/rpi/vc4/meson.build index 386e2296..9b37c2f0 100644 --- a/src/libcamera/pipeline/rpi/vc4/meson.build +++ b/src/libcamera/pipeline/rpi/vc4/meson.build @@ -1,6 +1,6 @@ # SPDX-License-Identifier: CC0-1.0 -libcamera_sources += files([ +libcamera_internal_sources += files([ 'vc4.cpp', ]) diff --git a/src/libcamera/pipeline/rpi/vc4/vc4.cpp b/src/libcamera/pipeline/rpi/vc4/vc4.cpp index 4a89e35f..fd8d84b1 100644 --- a/src/libcamera/pipeline/rpi/vc4/vc4.cpp +++ b/src/libcamera/pipeline/rpi/vc4/vc4.cpp @@ -109,9 +109,10 @@ public: Config config_; private: - void platformSetIspCrop() override + void platformSetIspCrop([[maybe_unused]] unsigned int index, const Rectangle &ispCrop) override { - isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &ispCrop_); + Rectangle crop = ispCrop; + isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &crop); } int platformConfigure(const RPi::RPiCameraConfiguration *rpiConfig) override; @@ -701,13 +702,19 @@ int Vc4CameraData::platformConfigure(const RPi::RPiCameraConfiguration *rpiConfi /* Figure out the smallest selection the ISP will allow. */ Rectangle testCrop(0, 0, 1, 1); isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &testCrop); - ispMinCropSize_ = testCrop.size(); /* Adjust aspect ratio by providing crops on the input image. */ Size size = unicamFormat.size.boundedToAspectRatio(maxSize); - ispCrop_ = size.centeredTo(Rectangle(unicamFormat.size).center()); + Rectangle ispCrop = size.centeredTo(Rectangle(unicamFormat.size).center()); - platformSetIspCrop(); + platformSetIspCrop(0, ispCrop); + /* + * Set the scaler crop to the value we are using (scaled to native sensor + * coordinates). + */ + cropParams_.emplace(std::piecewise_construct, + std::forward_as_tuple(0), + std::forward_as_tuple(ispCrop, testCrop.size(), 0)); return 0; } @@ -802,7 +809,7 @@ void Vc4CameraData::ispInputDequeue(FrameBuffer *buffer) void Vc4CameraData::ispOutputDequeue(FrameBuffer *buffer) { RPi::Stream *stream = nullptr; - unsigned int index; + unsigned int index = 0; if (!isRunning()) return; diff --git a/src/libcamera/pipeline/simple/meson.build b/src/libcamera/pipeline/simple/meson.build index 42b0896d..dda3de97 100644 --- a/src/libcamera/pipeline/simple/meson.build +++ b/src/libcamera/pipeline/simple/meson.build @@ -1,5 +1,5 @@ # SPDX-License-Identifier: CC0-1.0 -libcamera_sources += files([ +libcamera_internal_sources += files([ 'simple.cpp', ]) diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp index eb36578e..6e039bf3 100644 --- a/src/libcamera/pipeline/simple/simple.cpp +++ b/src/libcamera/pipeline/simple/simple.cpp @@ -13,6 +13,7 @@ #include <memory> #include <queue> #include <set> +#include <stdint.h> #include <string.h> #include <string> #include <unordered_map> @@ -30,7 +31,9 @@ #include "libcamera/internal/camera.h" #include "libcamera/internal/camera_sensor.h" +#include "libcamera/internal/camera_sensor_properties.h" #include "libcamera/internal/converter.h" +#include "libcamera/internal/delayed_controls.h" #include "libcamera/internal/device_enumerator.h" #include "libcamera/internal/media_device.h" #include "libcamera/internal/pipeline_handler.h" @@ -163,7 +166,7 @@ LOG_DEFINE_CATEGORY(SimplePipeline) * handler has no a priori knowledge of. The pipeline handler thus implements a * heuristic to handle sharing of hardware resources in a generic fashion. * - * Two cameras are considered to be mutually exclusive if their share common + * Two cameras are considered to be mutually exclusive if they share common * pads along the pipeline from the camera sensor to the video node. An entity * can thus be used concurrently by multiple cameras, as long as pads are * distinct. @@ -198,7 +201,8 @@ namespace { static const SimplePipelineInfo supportedDevices[] = { { "dcmipp", {}, false }, { "imx7-csi", { { "pxp", 1 } }, false }, - { "j721e-csi2rx", {}, false }, + { "intel-ipu6", {}, true }, + { "j721e-csi2rx", {}, true }, { "mtk-seninf", { { "mtk-mdp", 3 } }, false }, { "mxc-isi", {}, false }, { "qcom-camss", {}, true }, @@ -222,7 +226,8 @@ public: int setupFormats(V4L2SubdeviceFormat *format, V4L2Subdevice::Whence whence, Transform transform = Transform::Identity); - void bufferReady(FrameBuffer *buffer); + void imageBufferReady(FrameBuffer *buffer); + void clearIncompleteRequests(); unsigned int streamIndex(const Stream *stream) const { @@ -276,8 +281,14 @@ public: std::vector<Configuration> configs_; std::map<PixelFormat, std::vector<const Configuration *>> formats_; + std::unique_ptr<DelayedControls> delayedCtrls_; + std::vector<std::unique_ptr<FrameBuffer>> conversionBuffers_; - std::queue<std::map<unsigned int, FrameBuffer *>> conversionQueue_; + struct RequestOutputs { + Request *request; + std::map<const Stream *, FrameBuffer *> outputs; + }; + std::queue<RequestOutputs> conversionQueue_; bool useConversion_; std::unique_ptr<Converter> converter_; @@ -290,7 +301,7 @@ private: void conversionInputDone(FrameBuffer *buffer); void conversionOutputDone(FrameBuffer *buffer); - void ispStatsReady(); + void ispStatsReady(uint32_t frame, uint32_t bufferId); void setSensorControls(const ControlList &sensorControls); }; @@ -362,13 +373,12 @@ private: return static_cast<SimpleCameraData *>(camera->_d()); } - std::vector<MediaEntity *> locateSensors(); + std::vector<MediaEntity *> locateSensors(MediaDevice *media); static int resetRoutingTable(V4L2Subdevice *subdev); const MediaPad *acquirePipeline(SimpleCameraData *data); void releasePipeline(SimpleCameraData *data); - MediaDevice *media_; std::map<const MediaEntity *, EntityData> entities_; MediaDevice *converter_; @@ -384,8 +394,6 @@ SimpleCameraData::SimpleCameraData(SimplePipelineHandler *pipe, MediaEntity *sensor) : Camera::Private(pipe), streams_(numStreams) { - int ret; - /* * Find the shortest path from the camera sensor to a video capture * device using the breadth-first search algorithm. This heuristic will @@ -476,12 +484,9 @@ SimpleCameraData::SimpleCameraData(SimplePipelineHandler *pipe, } /* Finally also remember the sensor. */ - sensor_ = std::make_unique<CameraSensor>(sensor); - ret = sensor_->init(); - if (ret) { - sensor_.reset(); + sensor_ = CameraSensorFactoryBase::create(sensor); + if (!sensor_) return; - } LOG(SimplePipeline, Debug) << "Found pipeline: " @@ -526,27 +531,13 @@ int SimpleCameraData::init() * Instantiate Soft ISP if this is enabled for the given driver and no converter is used. */ if (!converter_ && pipe->swIspEnabled()) { - swIsp_ = std::make_unique<SoftwareIsp>(pipe, sensor_.get()); + swIsp_ = std::make_unique<SoftwareIsp>(pipe, sensor_.get(), &controlInfo_); if (!swIsp_->isValid()) { LOG(SimplePipeline, Warning) << "Failed to create software ISP, disabling software debayering"; swIsp_.reset(); } else { - /* - * The inputBufferReady signal is emitted from the soft ISP thread, - * and needs to be handled in the pipeline handler thread. Signals - * implement queued delivery, but this works transparently only if - * the receiver is bound to the target thread. As the - * SimpleCameraData class doesn't inherit from the Object class, it - * is not bound to any thread, and the signal would be delivered - * synchronously. Instead, connect the signal to a lambda function - * bound explicitly to the pipe, which is bound to the pipeline - * handler thread. The function then simply forwards the call to - * conversionInputDone(). - */ - swIsp_->inputBufferReady.connect(pipe, [this](FrameBuffer *buffer) { - this->conversionInputDone(buffer); - }); + swIsp_->inputBufferReady.connect(this, &SimpleCameraData::conversionInputDone); swIsp_->outputBufferReady.connect(this, &SimpleCameraData::conversionOutputDone); swIsp_->ispStatsReady.connect(this, &SimpleCameraData::ispStatsReady); swIsp_->setSensorControls.connect(this, &SimpleCameraData::setSensorControls); @@ -773,17 +764,14 @@ int SimpleCameraData::setupFormats(V4L2SubdeviceFormat *format, } LOG(SimplePipeline, Debug) - << "Link '" << source->entity()->name() - << "':" << source->index() - << " -> '" << sink->entity()->name() - << "':" << sink->index() - << " configured with format " << *format; + << "Link " << *link << ": configured with format " + << *format; } return 0; } -void SimpleCameraData::bufferReady(FrameBuffer *buffer) +void SimpleCameraData::imageBufferReady(FrameBuffer *buffer) { SimplePipelineHandler *pipe = SimpleCameraData::pipe(); @@ -812,16 +800,12 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer) if (conversionQueue_.empty()) return; - Request *request = nullptr; - for (auto &item : conversionQueue_.front()) { - FrameBuffer *outputBuffer = item.second; - request = outputBuffer->request(); - pipe->completeBuffer(request, outputBuffer); - } + const RequestOutputs &outputs = conversionQueue_.front(); + for (auto &[stream, buf] : outputs.outputs) + pipe->completeBuffer(outputs.request, buf); + pipe->completeRequest(outputs.request); conversionQueue_.pop(); - if (request) - pipe->completeRequest(request); return; } @@ -836,8 +820,8 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer) Request *request = buffer->request(); if (useConversion_ && !conversionQueue_.empty()) { - const std::map<unsigned int, FrameBuffer *> &outputs = - conversionQueue_.front(); + const std::map<const Stream *, FrameBuffer *> &outputs = + conversionQueue_.front().outputs; if (!outputs.empty()) { FrameBuffer *outputBuffer = outputs.begin()->second; if (outputBuffer) @@ -861,9 +845,15 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer) } if (converter_) - converter_->queueBuffers(buffer, conversionQueue_.front()); + converter_->queueBuffers(buffer, conversionQueue_.front().outputs); else - swIsp_->queueBuffers(buffer, conversionQueue_.front()); + /* + * request->sequence() cannot be retrieved from `buffer' inside + * queueBuffers because unique_ptr's make buffer->request() invalid + * already here. + */ + swIsp_->queueBuffers(request->sequence(), buffer, + conversionQueue_.front().outputs); conversionQueue_.pop(); return; @@ -874,6 +864,14 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer) pipe->completeRequest(request); } +void SimpleCameraData::clearIncompleteRequests() +{ + while (!conversionQueue_.empty()) { + pipe()->cancelRequest(conversionQueue_.front().request); + conversionQueue_.pop(); + } +} + void SimpleCameraData::conversionInputDone(FrameBuffer *buffer) { /* Queue the input buffer back for capture. */ @@ -890,15 +888,15 @@ void SimpleCameraData::conversionOutputDone(FrameBuffer *buffer) pipe->completeRequest(request); } -void SimpleCameraData::ispStatsReady() +void SimpleCameraData::ispStatsReady(uint32_t frame, uint32_t bufferId) { - /* \todo Use the DelayedControls class */ - swIsp_->processStats(sensor_->getControls({ V4L2_CID_ANALOGUE_GAIN, - V4L2_CID_EXPOSURE })); + swIsp_->processStats(frame, bufferId, + delayedCtrls_->get(frame)); } void SimpleCameraData::setSensorControls(const ControlList &sensorControls) { + delayedCtrls_->push(sensorControls); ControlList ctrls(sensorControls); sensor_->setControls(&ctrls); } @@ -1138,7 +1136,7 @@ CameraConfiguration::Status SimpleCameraConfiguration::validate() cfg.frameSize = format.planes[0].size; } - cfg.bufferCount = 3; + cfg.bufferCount = 4; } return status; @@ -1279,16 +1277,30 @@ int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c) if (outputCfgs.empty()) return 0; + const CameraSensorProperties::SensorDelays &delays = data->sensor_->sensorDelays(); + std::unordered_map<uint32_t, DelayedControls::ControlParams> params = { + { V4L2_CID_ANALOGUE_GAIN, { delays.gainDelay, false } }, + { V4L2_CID_EXPOSURE, { delays.exposureDelay, false } }, + }; + data->delayedCtrls_ = + std::make_unique<DelayedControls>(data->sensor_->device(), + params); + data->video_->frameStart.connect(data->delayedCtrls_.get(), + &DelayedControls::applyControls); + StreamConfiguration inputCfg; inputCfg.pixelFormat = pipeConfig->captureFormat; inputCfg.size = pipeConfig->captureSize; inputCfg.stride = captureFormat.planes[0].bpl; inputCfg.bufferCount = kNumInternalBuffers; - return data->converter_ - ? data->converter_->configure(inputCfg, outputCfgs) - : data->swIsp_->configure(inputCfg, outputCfgs, - data->sensor_->controls()); + if (data->converter_) { + return data->converter_->configure(inputCfg, outputCfgs); + } else { + ipa::soft::IPAConfigInfo configInfo; + configInfo.sensorControls = data->sensor_->controls(); + return data->swIsp_->configure(inputCfg, outputCfgs, configInfo); + } } int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream, @@ -1303,10 +1315,8 @@ int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream, */ if (data->useConversion_) return data->converter_ - ? data->converter_->exportBuffers(data->streamIndex(stream), - count, buffers) - : data->swIsp_->exportBuffers(data->streamIndex(stream), - count, buffers); + ? data->converter_->exportBuffers(stream, count, buffers) + : data->swIsp_->exportBuffers(stream, count, buffers); else return data->video_->exportBuffers(count, buffers); } @@ -1342,7 +1352,7 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL return ret; } - video->bufferReady.connect(data, &SimpleCameraData::bufferReady); + video->bufferReady.connect(data, &SimpleCameraData::imageBufferReady); ret = video->streamOn(); if (ret < 0) { @@ -1386,8 +1396,9 @@ void SimplePipelineHandler::stopDevice(Camera *camera) video->streamOff(); video->releaseBuffers(); - video->bufferReady.disconnect(data, &SimpleCameraData::bufferReady); + video->bufferReady.disconnect(data, &SimpleCameraData::imageBufferReady); + data->clearIncompleteRequests(); data->conversionBuffers_.clear(); releasePipeline(data); @@ -1398,7 +1409,7 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request) SimpleCameraData *data = cameraData(camera); int ret; - std::map<unsigned int, FrameBuffer *> buffers; + std::map<const Stream *, FrameBuffer *> buffers; for (auto &[stream, buffer] : request->buffers()) { /* @@ -1407,7 +1418,7 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request) * completion handler. */ if (data->useConversion_) { - buffers.emplace(data->streamIndex(stream), buffer); + buffers.emplace(stream, buffer); } else { ret = data->video_->queueBuffer(buffer); if (ret < 0) @@ -1415,8 +1426,11 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request) } } - if (data->useConversion_) - data->conversionQueue_.push(std::move(buffers)); + if (data->useConversion_) { + data->conversionQueue_.push({ request, std::move(buffers) }); + if (data->swIsp_) + data->swIsp_->queueRequest(request->sequence(), request->controls()); + } return 0; } @@ -1425,7 +1439,8 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request) * Match and Setup */ -std::vector<MediaEntity *> SimplePipelineHandler::locateSensors() +std::vector<MediaEntity *> +SimplePipelineHandler::locateSensors(MediaDevice *media) { std::vector<MediaEntity *> entities; @@ -1433,7 +1448,7 @@ std::vector<MediaEntity *> SimplePipelineHandler::locateSensors() * Gather all the camera sensor entities based on the function they * expose. */ - for (MediaEntity *entity : media_->entities()) { + for (MediaEntity *entity : media->entities()) { if (entity->function() == MEDIA_ENT_F_CAM_SENSOR) entities.push_back(entity); } @@ -1521,17 +1536,18 @@ bool SimplePipelineHandler::match(DeviceEnumerator *enumerator) { const SimplePipelineInfo *info = nullptr; unsigned int numStreams = 1; + MediaDevice *media; for (const SimplePipelineInfo &inf : supportedDevices) { DeviceMatch dm(inf.driver); - media_ = acquireMediaDevice(enumerator, dm); - if (media_) { + media = acquireMediaDevice(enumerator, dm); + if (media) { info = &inf; break; } } - if (!media_) + if (!media) return false; for (const auto &[name, streams] : info->converters) { @@ -1546,12 +1562,14 @@ bool SimplePipelineHandler::match(DeviceEnumerator *enumerator) swIspEnabled_ = info->swIspEnabled; /* Locate the sensors. */ - std::vector<MediaEntity *> sensors = locateSensors(); + std::vector<MediaEntity *> sensors = locateSensors(media); if (sensors.empty()) { - LOG(SimplePipeline, Error) << "No sensor found"; + LOG(SimplePipeline, Info) << "No sensor found for " << media->deviceNode(); return false; } + LOG(SimplePipeline, Debug) << "Sensor found for " << media->deviceNode(); + /* * Create one camera data instance for each sensor and gather all * entities in all pipelines. @@ -1615,8 +1633,8 @@ bool SimplePipelineHandler::match(DeviceEnumerator *enumerator) if (subdev->caps().hasStreams()) { /* * Reset the routing table to its default state - * to make sure entities are enumerate according - * to the defaul routing configuration. + * to make sure entities are enumerated according + * to the default routing configuration. */ ret = resetRoutingTable(subdev.get()); if (ret) { diff --git a/src/libcamera/pipeline/uvcvideo/meson.build b/src/libcamera/pipeline/uvcvideo/meson.build index a3c2efd4..a3a91074 100644 --- a/src/libcamera/pipeline/uvcvideo/meson.build +++ b/src/libcamera/pipeline/uvcvideo/meson.build @@ -1,5 +1,5 @@ # SPDX-License-Identifier: CC0-1.0 -libcamera_sources += files([ +libcamera_internal_sources += files([ 'uvcvideo.cpp', ]) diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp index 8a7409fc..7470b562 100644 --- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp +++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp @@ -6,13 +6,16 @@ */ #include <algorithm> +#include <cmath> #include <fstream> -#include <iomanip> -#include <math.h> +#include <map> #include <memory> -#include <tuple> +#include <set> +#include <string> +#include <vector> #include <libcamera/base/log.h> +#include <libcamera/base/mutex.h> #include <libcamera/base/utils.h> #include <libcamera/camera.h> @@ -44,10 +47,11 @@ public: int init(MediaDevice *media); void addControl(uint32_t cid, const ControlInfo &v4l2info, ControlInfoMap::Map *ctrls); - void bufferReady(FrameBuffer *buffer); + void imageBufferReady(FrameBuffer *buffer); const std::string &id() const { return id_; } + Mutex openLock_; std::unique_ptr<V4L2VideoDevice> video_; Stream stream_; std::map<PixelFormat, std::vector<SizeRange>> formats_; @@ -93,6 +97,9 @@ private: const ControlValue &value); int processControls(UVCCameraData *data, Request *request); + bool acquireDevice(Camera *camera) override; + void releaseDevice(Camera *camera) override; + UVCCameraData *cameraData(Camera *camera) { return static_cast<UVCCameraData *>(camera->_d()); @@ -158,9 +165,29 @@ CameraConfiguration::Status UVCCameraConfiguration::validate() format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat); format.size = cfg.size; - int ret = data_->video_->tryFormat(&format); - if (ret) - return Invalid; + /* + * For power-consumption reasons video_ is closed when the camera is not + * acquired. Open it here if necessary. + */ + { + bool opened = false; + + MutexLocker locker(data_->openLock_); + + if (!data_->video_->isOpen()) { + int ret = data_->video_->open(); + if (ret) + return Invalid; + + opened = true; + } + + int ret = data_->video_->tryFormat(&format); + if (opened) + data_->video_->close(); + if (ret) + return Invalid; + } cfg.stride = format.planes[0].bpl; cfg.frameSize = format.planes[0].size; @@ -271,7 +298,7 @@ int PipelineHandlerUVC::processControl(ControlList *controls, unsigned int id, cid = V4L2_CID_CONTRAST; else if (id == controls::Saturation) cid = V4L2_CID_SATURATION; - else if (id == controls::AeEnable) + else if (id == controls::ExposureTimeMode) cid = V4L2_CID_EXPOSURE_AUTO; else if (id == controls::ExposureTime) cid = V4L2_CID_EXPOSURE_ABSOLUTE; @@ -293,14 +320,14 @@ int PipelineHandlerUVC::processControl(ControlList *controls, unsigned int id, case V4L2_CID_BRIGHTNESS: { float scale = std::max(max - def, def - min); float fvalue = value.get<float>() * scale + def; - controls->set(cid, static_cast<int32_t>(lroundf(fvalue))); + controls->set(cid, static_cast<int32_t>(std::lround(fvalue))); break; } case V4L2_CID_SATURATION: { float scale = def - min; float fvalue = value.get<float>() * scale + min; - controls->set(cid, static_cast<int32_t>(lroundf(fvalue))); + controls->set(cid, static_cast<int32_t>(std::lround(fvalue))); break; } @@ -327,7 +354,7 @@ int PipelineHandlerUVC::processControl(ControlList *controls, unsigned int id, } float fvalue = (value.get<float>() - p) / m; - controls->set(cid, static_cast<int32_t>(lroundf(fvalue))); + controls->set(cid, static_cast<int32_t>(std::lround(fvalue))); break; } @@ -411,6 +438,23 @@ bool PipelineHandlerUVC::match(DeviceEnumerator *enumerator) return true; } +bool PipelineHandlerUVC::acquireDevice(Camera *camera) +{ + UVCCameraData *data = cameraData(camera); + + MutexLocker locker(data->openLock_); + + return data->video_->open() == 0; +} + +void PipelineHandlerUVC::releaseDevice(Camera *camera) +{ + UVCCameraData *data = cameraData(camera); + + MutexLocker locker(data->openLock_); + data->video_->close(); +} + int UVCCameraData::init(MediaDevice *media) { int ret; @@ -432,7 +476,7 @@ int UVCCameraData::init(MediaDevice *media) if (ret) return ret; - video_->bufferReady.connect(this, &UVCCameraData::bufferReady); + video_->bufferReady.connect(this, &UVCCameraData::imageBufferReady); /* Generate the camera ID. */ if (!generateId()) { @@ -512,6 +556,12 @@ int UVCCameraData::init(MediaDevice *media) controlInfo_ = ControlInfoMap(std::move(ctrls), controls::controls); + /* + * Close to allow camera to go into runtime-suspend, video_ will be + * re-opened from acquireDevice() and validate(). + */ + video_->close(); + return 0; } @@ -597,7 +647,7 @@ void UVCCameraData::addControl(uint32_t cid, const ControlInfo &v4l2Info, id = &controls::Saturation; break; case V4L2_CID_EXPOSURE_AUTO: - id = &controls::AeEnable; + id = &controls::ExposureTimeMode; break; case V4L2_CID_EXPOSURE_ABSOLUTE: id = &controls::ExposureTime; @@ -610,6 +660,7 @@ void UVCCameraData::addControl(uint32_t cid, const ControlInfo &v4l2Info, } /* Map the control info. */ + const std::vector<ControlValue> &v4l2Values = v4l2Info.values(); int32_t min = v4l2Info.min().get<int32_t>(); int32_t max = v4l2Info.max().get<int32_t>(); int32_t def = v4l2Info.def().get<int32_t>(); @@ -647,10 +698,52 @@ void UVCCameraData::addControl(uint32_t cid, const ControlInfo &v4l2Info, }; break; - case V4L2_CID_EXPOSURE_AUTO: - info = ControlInfo{ false, true, true }; + case V4L2_CID_EXPOSURE_AUTO: { + /* + * From the V4L2_CID_EXPOSURE_AUTO documentation: + * + * ------------------------------------------------------------ + * V4L2_EXPOSURE_AUTO: + * Automatic exposure time, automatic iris aperture. + * + * V4L2_EXPOSURE_MANUAL: + * Manual exposure time, manual iris. + * + * V4L2_EXPOSURE_SHUTTER_PRIORITY: + * Manual exposure time, auto iris. + * + * V4L2_EXPOSURE_APERTURE_PRIORITY: + * Auto exposure time, manual iris. + *------------------------------------------------------------- + * + * ExposureTimeModeAuto = { V4L2_EXPOSURE_AUTO, + * V4L2_EXPOSURE_APERTURE_PRIORITY } + * + * + * ExposureTimeModeManual = { V4L2_EXPOSURE_MANUAL, + * V4L2_EXPOSURE_SHUTTER_PRIORITY } + */ + std::array<int32_t, 2> values{}; + + auto it = std::find_if(v4l2Values.begin(), v4l2Values.end(), + [&](const ControlValue &val) { + return (val.get<int32_t>() == V4L2_EXPOSURE_APERTURE_PRIORITY || + val.get<int32_t>() == V4L2_EXPOSURE_AUTO) ? true : false; + }); + if (it != v4l2Values.end()) + values.back() = static_cast<int32_t>(controls::ExposureTimeModeAuto); + + it = std::find_if(v4l2Values.begin(), v4l2Values.end(), + [&](const ControlValue &val) { + return (val.get<int32_t>() == V4L2_EXPOSURE_SHUTTER_PRIORITY || + val.get<int32_t>() == V4L2_EXPOSURE_MANUAL) ? true : false; + }); + if (it != v4l2Values.end()) + values.back() = static_cast<int32_t>(controls::ExposureTimeModeManual); + + info = ControlInfo{Span<int32_t>{values}, values[0]}; break; - + } case V4L2_CID_EXPOSURE_ABSOLUTE: /* * ExposureTime is in units of 1 µs, and UVC expects @@ -697,7 +790,7 @@ void UVCCameraData::addControl(uint32_t cid, const ControlInfo &v4l2Info, ctrls->emplace(id, info); } -void UVCCameraData::bufferReady(FrameBuffer *buffer) +void UVCCameraData::imageBufferReady(FrameBuffer *buffer) { Request *request = buffer->request(); diff --git a/src/libcamera/pipeline/vimc/meson.build b/src/libcamera/pipeline/vimc/meson.build index 290eefb5..868e2546 100644 --- a/src/libcamera/pipeline/vimc/meson.build +++ b/src/libcamera/pipeline/vimc/meson.build @@ -1,5 +1,5 @@ # SPDX-License-Identifier: CC0-1.0 -libcamera_sources += files([ +libcamera_internal_sources += files([ 'vimc.cpp', ]) diff --git a/src/libcamera/pipeline/vimc/vimc.cpp b/src/libcamera/pipeline/vimc/vimc.cpp index 0ec9928e..07273bd2 100644 --- a/src/libcamera/pipeline/vimc/vimc.cpp +++ b/src/libcamera/pipeline/vimc/vimc.cpp @@ -6,14 +6,15 @@ */ #include <algorithm> +#include <cmath> #include <iomanip> #include <map> -#include <math.h> #include <tuple> #include <linux/media-bus-format.h> #include <linux/version.h> +#include <libcamera/base/flags.h> #include <libcamera/base/log.h> #include <libcamera/base/utils.h> @@ -21,6 +22,8 @@ #include <libcamera/control_ids.h> #include <libcamera/controls.h> #include <libcamera/formats.h> +#include <libcamera/framebuffer.h> +#include <libcamera/geometry.h> #include <libcamera/request.h> #include <libcamera/stream.h> @@ -53,8 +56,8 @@ public: int init(); int allocateMockIPABuffers(); - void bufferReady(FrameBuffer *buffer); - void paramsBufferReady(unsigned int id, const Flags<ipa::vimc::TestFlag> flags); + void imageBufferReady(FrameBuffer *buffer); + void paramsComputed(unsigned int id, const Flags<ipa::vimc::TestFlag> flags); MediaDevice *media_; std::unique_ptr<CameraSensor> sensor_; @@ -417,7 +420,7 @@ int PipelineHandlerVimc::processControls(VimcCameraData *data, Request *request) continue; } - int32_t value = lroundf(it.second.get<float>() * 128 + offset); + int32_t value = std::lround(it.second.get<float>() * 128 + offset); controls.set(cid, std::clamp(value, 0, 255)); } @@ -489,7 +492,7 @@ bool PipelineHandlerVimc::match(DeviceEnumerator *enumerator) return false; } - data->ipa_->paramsBufferReady.connect(data.get(), &VimcCameraData::paramsBufferReady); + data->ipa_->paramsComputed.connect(data.get(), &VimcCameraData::paramsComputed); std::string conf = data->ipa_->configurationFile("vimc.conf"); Flags<ipa::vimc::TestFlag> inFlags = ipa::vimc::TestFlag::Flag2; @@ -529,10 +532,9 @@ int VimcCameraData::init() return ret; /* Create and open the camera sensor, debayer, scaler and video device. */ - sensor_ = std::make_unique<CameraSensor>(media_->getEntityByName("Sensor B")); - ret = sensor_->init(); - if (ret) - return ret; + sensor_ = CameraSensorFactoryBase::create(media_->getEntityByName("Sensor B")); + if (!sensor_) + return -ENODEV; debayer_ = V4L2Subdevice::fromEntityName(media_, "Debayer B"); if (debayer_->open()) @@ -546,7 +548,7 @@ int VimcCameraData::init() if (video_->open()) return -ENODEV; - video_->bufferReady.connect(this, &VimcCameraData::bufferReady); + video_->bufferReady.connect(this, &VimcCameraData::imageBufferReady); raw_ = V4L2VideoDevice::fromEntityName(media_, "Raw Capture 1"); if (raw_->open()) @@ -594,7 +596,7 @@ int VimcCameraData::init() return 0; } -void VimcCameraData::bufferReady(FrameBuffer *buffer) +void VimcCameraData::imageBufferReady(FrameBuffer *buffer) { PipelineHandlerVimc *pipe = static_cast<PipelineHandlerVimc *>(this->pipe()); @@ -619,7 +621,7 @@ void VimcCameraData::bufferReady(FrameBuffer *buffer) pipe->completeBuffer(request, buffer); pipe->completeRequest(request); - ipa_->fillParamsBuffer(request->sequence(), mockIPABufs_[0]->cookie()); + ipa_->computeParams(request->sequence(), mockIPABufs_[0]->cookie()); } int VimcCameraData::allocateMockIPABuffers() @@ -637,8 +639,8 @@ int VimcCameraData::allocateMockIPABuffers() return video_->exportBuffers(kBufCount, &mockIPABufs_); } -void VimcCameraData::paramsBufferReady([[maybe_unused]] unsigned int id, - [[maybe_unused]] const Flags<ipa::vimc::TestFlag> flags) +void VimcCameraData::paramsComputed([[maybe_unused]] unsigned int id, + [[maybe_unused]] const Flags<ipa::vimc::TestFlag> flags) { } diff --git a/src/libcamera/pipeline/virtual/README.md b/src/libcamera/pipeline/virtual/README.md new file mode 100644 index 00000000..a9f39c15 --- /dev/null +++ b/src/libcamera/pipeline/virtual/README.md @@ -0,0 +1,65 @@ +# Virtual Pipeline Handler + +Virtual pipeline handler emulates fake external camera(s) for testing. + +## Parse config file and register cameras + +- A sample config file is located at `src/libcamera/pipeline/virtual/data/virtual.yaml`. +- If libcamera is installed, the config file should be installed at + `share/libcamera/pipeline/virtual/virtual.yaml`. + +### Config File Format +The config file contains the information about cameras' properties to register. +The config file should be a yaml file with dictionary of the cameraIds +associated with their properties as top level. The default value will be applied +when any property is empty. + +Each camera block is a dictionary, containing the following keys: +- `supported_formats` (list of `VirtualCameraData::Resolution`, optional): + List of supported resolution and frame rates of the emulated camera + - `width` (`unsigned int`, default=1920): Width of the window resolution. + This needs to be even. + - `height` (`unsigned int`, default=1080): Height of the window resolution. + - `frame_rates` (list of `int`, default=`[30,60]` ): Range of the frame + rate (per second). If the list contains one value, it's the lower bound + and the upper bound. If the list contains two values, the first is the + lower bound and the second is the upper bound. No other number of values + is allowed. +- `test_pattern` (`string`): Which test pattern to use as frames. The options + are "bars", "lines". Cannot be set with `frames`. + - The test patterns are "bars" which means color bars, and "lines" which means + diagonal lines. +- `frames` (dictionary): + - `path` (`string`): Path to an image, or path to a directory of a series of + images. Cannot be set with `test_pattern`. + - The path to an image has ".jpg" extension. + - The path to a directory ends with "/". The name of the images in the + directory are "{n}.jpg" with {n} is the sequence of images starting with 0. +- `location` (`string`, default="front"): The location of the camera. Support + "CameraLocationFront", "CameraLocationBack", and "CameraLocationExternal". +- `model` (`string`, default="Unknown"): The model name of the camera. + +Check `data/virtual.yaml` as the sample config file. + +### Implementation + +`Parser` class provides methods to parse the config file to register cameras +in Virtual Pipeline Handler. `parseConfigFile()` is exposed to use in +Virtual Pipeline Handler. + +This is the procedure of the Parser class: +1. `parseConfigFile()` parses the config file to `YamlObject` using `YamlParser::parse()`. + - Parse the top level of config file which are the camera ids and look into + each camera properties. +2. For each camera, `parseCameraConfigData()` returns a camera with the configuration. + - The methods in the next step fill the data with the pointer to the Camera object. + - If the config file contains invalid configuration, this method returns + nullptr. The camera will be skipped. +3. Parse each property and register the data. + - `parseSupportedFormats()`: Parses `supported_formats` in the config, which + contains resolutions and frame rates. + - `parseFrameGenerator()`: Parses `test_pattern` or `frames` in the config. + - `parseLocation()`: Parses `location` in the config. + - `parseModel()`: Parses `model` in the config. +4. Back to `parseConfigFile()` and append the camera configuration. +5. Returns a list of camera configurations. diff --git a/src/libcamera/pipeline/virtual/config_parser.cpp b/src/libcamera/pipeline/virtual/config_parser.cpp new file mode 100644 index 00000000..0cbfe39b --- /dev/null +++ b/src/libcamera/pipeline/virtual/config_parser.cpp @@ -0,0 +1,260 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Google Inc. + * + * Virtual cameras helper to parse config file + */ + +#include "config_parser.h" + +#include <string.h> +#include <utility> + +#include <libcamera/base/log.h> + +#include <libcamera/control_ids.h> +#include <libcamera/property_ids.h> + +#include "libcamera/internal/pipeline_handler.h" +#include "libcamera/internal/yaml_parser.h" + +#include "virtual.h" + +namespace libcamera { + +LOG_DECLARE_CATEGORY(Virtual) + +std::vector<std::unique_ptr<VirtualCameraData>> +ConfigParser::parseConfigFile(File &file, PipelineHandler *pipe) +{ + std::vector<std::unique_ptr<VirtualCameraData>> configurations; + + std::unique_ptr<YamlObject> cameras = YamlParser::parse(file); + if (!cameras) { + LOG(Virtual, Error) << "Failed to pass config file."; + return configurations; + } + + if (!cameras->isDictionary()) { + LOG(Virtual, Error) << "Config file is not a dictionary at the top level."; + return configurations; + } + + /* Look into the configuration of each camera */ + for (const auto &[cameraId, cameraConfigData] : cameras->asDict()) { + std::unique_ptr<VirtualCameraData> data = + parseCameraConfigData(cameraConfigData, pipe); + /* Parse configData to data */ + if (!data) { + /* Skip the camera if it has invalid config */ + LOG(Virtual, Error) << "Failed to parse config of the camera: " + << cameraId; + continue; + } + + data->config_.id = cameraId; + ControlInfoMap::Map controls; + /* todo: Check which resolution's frame rate to be reported */ + controls[&controls::FrameDurationLimits] = + ControlInfo(1000000 / data->config_.resolutions[0].frameRates[1], + 1000000 / data->config_.resolutions[0].frameRates[0]); + + std::vector<ControlValue> supportedFaceDetectModes{ + static_cast<int32_t>(controls::draft::FaceDetectModeOff), + }; + controls[&controls::draft::FaceDetectMode] = ControlInfo(supportedFaceDetectModes); + + data->controlInfo_ = ControlInfoMap(std::move(controls), controls::controls); + configurations.push_back(std::move(data)); + } + + return configurations; +} + +std::unique_ptr<VirtualCameraData> +ConfigParser::parseCameraConfigData(const YamlObject &cameraConfigData, + PipelineHandler *pipe) +{ + std::vector<VirtualCameraData::Resolution> resolutions; + if (parseSupportedFormats(cameraConfigData, &resolutions)) + return nullptr; + + std::unique_ptr<VirtualCameraData> data = + std::make_unique<VirtualCameraData>(pipe, resolutions); + + if (parseFrameGenerator(cameraConfigData, data.get())) + return nullptr; + + if (parseLocation(cameraConfigData, data.get())) + return nullptr; + + if (parseModel(cameraConfigData, data.get())) + return nullptr; + + return data; +} + +int ConfigParser::parseSupportedFormats(const YamlObject &cameraConfigData, + std::vector<VirtualCameraData::Resolution> *resolutions) +{ + if (cameraConfigData.contains("supported_formats")) { + const YamlObject &supportedResolutions = cameraConfigData["supported_formats"]; + + for (const YamlObject &supportedResolution : supportedResolutions.asList()) { + unsigned int width = supportedResolution["width"].get<unsigned int>(1920); + unsigned int height = supportedResolution["height"].get<unsigned int>(1080); + if (width == 0 || height == 0) { + LOG(Virtual, Error) << "Invalid width or/and height"; + return -EINVAL; + } + if (width % 2 != 0) { + LOG(Virtual, Error) << "Invalid width: width needs to be even"; + return -EINVAL; + } + + std::vector<int64_t> frameRates; + if (supportedResolution.contains("frame_rates")) { + auto frameRatesList = + supportedResolution["frame_rates"].getList<int>(); + if (!frameRatesList || (frameRatesList->size() != 1 && + frameRatesList->size() != 2)) { + LOG(Virtual, Error) << "Invalid frame_rates: either one or two values"; + return -EINVAL; + } + + if (frameRatesList->size() == 2 && + frameRatesList.value()[0] > frameRatesList.value()[1]) { + LOG(Virtual, Error) << "frame_rates's first value(lower bound)" + << " is higher than the second value(upper bound)"; + return -EINVAL; + } + /* + * Push the min and max framerates. A + * single rate is duplicated. + */ + frameRates.push_back(frameRatesList.value().front()); + frameRates.push_back(frameRatesList.value().back()); + } else { + frameRates.push_back(30); + frameRates.push_back(60); + } + + resolutions->emplace_back( + VirtualCameraData::Resolution{ Size{ width, height }, + frameRates }); + } + } else { + resolutions->emplace_back( + VirtualCameraData::Resolution{ Size{ 1920, 1080 }, + { 30, 60 } }); + } + + return 0; +} + +int ConfigParser::parseFrameGenerator(const YamlObject &cameraConfigData, VirtualCameraData *data) +{ + const std::string testPatternKey = "test_pattern"; + const std::string framesKey = "frames"; + if (cameraConfigData.contains(testPatternKey)) { + if (cameraConfigData.contains(framesKey)) { + LOG(Virtual, Error) << "A camera should use either " + << testPatternKey << " or " << framesKey; + return -EINVAL; + } + + auto testPattern = cameraConfigData[testPatternKey].get<std::string>(""); + + if (testPattern == "bars") { + data->config_.frame = TestPattern::ColorBars; + } else if (testPattern == "lines") { + data->config_.frame = TestPattern::DiagonalLines; + } else { + LOG(Virtual, Debug) << "Test pattern: " << testPattern + << " is not supported"; + return -EINVAL; + } + + return 0; + } + + const YamlObject &frames = cameraConfigData[framesKey]; + + /* When there is no frames provided in the config file, use color bar test pattern */ + if (!frames) { + data->config_.frame = TestPattern::ColorBars; + return 0; + } + + if (!frames.isDictionary()) { + LOG(Virtual, Error) << "'frames' is not a dictionary."; + return -EINVAL; + } + + auto path = frames["path"].get<std::string>(); + + if (!path) { + LOG(Virtual, Error) << "Test pattern or path should be specified."; + return -EINVAL; + } + + std::vector<std::filesystem::path> files; + + switch (std::filesystem::symlink_status(*path).type()) { + case std::filesystem::file_type::regular: + files.push_back(*path); + break; + + case std::filesystem::file_type::directory: + for (const auto &dentry : std::filesystem::directory_iterator{ *path }) { + if (dentry.is_regular_file()) + files.push_back(dentry.path()); + } + + std::sort(files.begin(), files.end(), [](const auto &a, const auto &b) { + return ::strverscmp(a.c_str(), b.c_str()) < 0; + }); + + if (files.empty()) { + LOG(Virtual, Error) << "Directory has no files: " << *path; + return -EINVAL; + } + break; + + default: + LOG(Virtual, Error) << "Frame: " << *path << " is not supported"; + return -EINVAL; + } + + data->config_.frame = ImageFrames{ std::move(files) }; + + return 0; +} + +int ConfigParser::parseLocation(const YamlObject &cameraConfigData, VirtualCameraData *data) +{ + std::string location = cameraConfigData["location"].get<std::string>("CameraLocationFront"); + + /* Default value is properties::CameraLocationFront */ + auto it = properties::LocationNameValueMap.find(location); + if (it == properties::LocationNameValueMap.end()) { + LOG(Virtual, Error) + << "location: " << location << " is not supported"; + return -EINVAL; + } + + data->properties_.set(properties::Location, it->second); + + return 0; +} + +int ConfigParser::parseModel(const YamlObject &cameraConfigData, VirtualCameraData *data) +{ + std::string model = cameraConfigData["model"].get<std::string>("Unknown"); + + data->properties_.set(properties::Model, model); + + return 0; +} + +} /* namespace libcamera */ diff --git a/src/libcamera/pipeline/virtual/config_parser.h b/src/libcamera/pipeline/virtual/config_parser.h new file mode 100644 index 00000000..d2000de9 --- /dev/null +++ b/src/libcamera/pipeline/virtual/config_parser.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Google Inc. + * + * Virtual cameras helper to parse config file + */ + +#pragma once + +#include <memory> +#include <vector> + +#include <libcamera/base/file.h> + +#include "libcamera/internal/pipeline_handler.h" +#include "libcamera/internal/yaml_parser.h" + +#include "virtual.h" + +namespace libcamera { + +class ConfigParser +{ +public: + std::vector<std::unique_ptr<VirtualCameraData>> + parseConfigFile(File &file, PipelineHandler *pipe); + +private: + std::unique_ptr<VirtualCameraData> + parseCameraConfigData(const YamlObject &cameraConfigData, PipelineHandler *pipe); + + int parseSupportedFormats(const YamlObject &cameraConfigData, + std::vector<VirtualCameraData::Resolution> *resolutions); + int parseFrameGenerator(const YamlObject &cameraConfigData, VirtualCameraData *data); + int parseLocation(const YamlObject &cameraConfigData, VirtualCameraData *data); + int parseModel(const YamlObject &cameraConfigData, VirtualCameraData *data); +}; + +} /* namespace libcamera */ diff --git a/src/libcamera/pipeline/virtual/data/meson.build b/src/libcamera/pipeline/virtual/data/meson.build new file mode 100644 index 00000000..ce63f9a2 --- /dev/null +++ b/src/libcamera/pipeline/virtual/data/meson.build @@ -0,0 +1,4 @@ +install_data('virtual.yaml', + install_dir : pipeline_data_dir / 'virtual', + install_tag : 'runtime', + rename: 'virtual.yaml.example') diff --git a/src/libcamera/pipeline/virtual/data/virtual.yaml b/src/libcamera/pipeline/virtual/data/virtual.yaml new file mode 100644 index 00000000..20471bb9 --- /dev/null +++ b/src/libcamera/pipeline/virtual/data/virtual.yaml @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: CC0-1.0 +%YAML 1.1 +--- +"Virtual0": + supported_formats: + - width: 1920 + height: 1080 + frame_rates: + - 30 + - 60 + - width: 1680 + height: 1050 + frame_rates: + - 70 + - 80 + test_pattern: "lines" + location: "CameraLocationFront" + model: "Virtual Video Device" +"Virtual1": + supported_formats: + - width: 800 + height: 600 + frame_rates: + - 60 + test_pattern: "bars" + location: "CameraLocationBack" + model: "Virtual Video Device1" +"Virtual2": + supported_formats: + - width: 400 + height: 300 + test_pattern: "lines" + location: "CameraLocationFront" + model: "Virtual Video Device2" +"Virtual3": + test_pattern: "bars" diff --git a/src/libcamera/pipeline/virtual/frame_generator.h b/src/libcamera/pipeline/virtual/frame_generator.h new file mode 100644 index 00000000..a0658c45 --- /dev/null +++ b/src/libcamera/pipeline/virtual/frame_generator.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Google Inc. + * + * Virtual cameras helper to generate frames + */ + +#pragma once + +#include <libcamera/framebuffer.h> +#include <libcamera/geometry.h> + +namespace libcamera { + +class FrameGenerator +{ +public: + virtual ~FrameGenerator() = default; + + virtual void configure(const Size &size) = 0; + + virtual int generateFrame(const Size &size, + const FrameBuffer *buffer) = 0; + +protected: + FrameGenerator() {} +}; + +} /* namespace libcamera */ diff --git a/src/libcamera/pipeline/virtual/image_frame_generator.cpp b/src/libcamera/pipeline/virtual/image_frame_generator.cpp new file mode 100644 index 00000000..d1545b5d --- /dev/null +++ b/src/libcamera/pipeline/virtual/image_frame_generator.cpp @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Google Inc. + * + * Derived class of FrameGenerator for generating frames from images + */ + +#include "image_frame_generator.h" + +#include <string> + +#include <libcamera/base/file.h> +#include <libcamera/base/log.h> + +#include <libcamera/framebuffer.h> + +#include "libcamera/internal/mapped_framebuffer.h" + +#include "libyuv/convert.h" +#include "libyuv/scale.h" + +namespace libcamera { + +LOG_DECLARE_CATEGORY(Virtual) + +/* + * Factory function to create an ImageFrameGenerator object. + * Read the images and convert them to buffers in NV12 format. + * Store the pointers to the buffers to a list (imageFrameDatas) + */ +std::unique_ptr<ImageFrameGenerator> +ImageFrameGenerator::create(ImageFrames &imageFrames) +{ + std::unique_ptr<ImageFrameGenerator> imageFrameGenerator = + std::make_unique<ImageFrameGenerator>(); + imageFrameGenerator->imageFrames_ = &imageFrames; + + /* + * For each file in the directory, load the image, + * convert it to NV12, and store the pointer. + */ + for (const auto &path : imageFrames.files) { + File file(path); + if (!file.open(File::OpenModeFlag::ReadOnly)) { + LOG(Virtual, Error) << "Failed to open image file " << file.fileName() + << ": " << strerror(file.error()); + return nullptr; + } + + /* Read the image file to data */ + auto fileSize = file.size(); + auto buffer = std::make_unique<uint8_t[]>(fileSize); + if (file.read({ buffer.get(), static_cast<size_t>(fileSize) }) != fileSize) { + LOG(Virtual, Error) << "Failed to read file " << file.fileName() + << ": " << strerror(file.error()); + return nullptr; + } + + /* Get the width and height of the image */ + int width, height; + if (libyuv::MJPGSize(buffer.get(), fileSize, &width, &height)) { + LOG(Virtual, Error) << "Failed to get the size of the image file: " + << file.fileName(); + return nullptr; + } + + std::unique_ptr<uint8_t[]> dstY = + std::make_unique<uint8_t[]>(width * height); + std::unique_ptr<uint8_t[]> dstUV = + std::make_unique<uint8_t[]>(width * height / 2); + int ret = libyuv::MJPGToNV12(buffer.get(), fileSize, + dstY.get(), width, dstUV.get(), + width, width, height, width, height); + if (ret != 0) + LOG(Virtual, Error) << "MJPGToNV12() failed with " << ret; + + imageFrameGenerator->imageFrameDatas_.emplace_back( + ImageFrameData{ std::move(dstY), std::move(dstUV), + Size(width, height) }); + } + + ASSERT(!imageFrameGenerator->imageFrameDatas_.empty()); + + return imageFrameGenerator; +} + +/* + * \var ImageFrameGenerator::frameRepeat + * \brief Number of frames to repeat before proceeding to the next frame + */ + +/* Scale the buffers for image frames. */ +void ImageFrameGenerator::configure(const Size &size) +{ + /* Reset the source images to prevent multiple configuration calls */ + scaledFrameDatas_.clear(); + frameIndex_ = 0; + parameter_ = 0; + + for (unsigned int i = 0; i < imageFrameDatas_.size(); i++) { + /* Scale the imageFrameDatas_ to scaledY and scaledUV */ + unsigned int halfSizeWidth = (size.width + 1) / 2; + unsigned int halfSizeHeight = (size.height + 1) / 2; + std::unique_ptr<uint8_t[]> scaledY = + std::make_unique<uint8_t[]>(size.width * size.height); + std::unique_ptr<uint8_t[]> scaledUV = + std::make_unique<uint8_t[]>(halfSizeWidth * halfSizeHeight * 2); + auto &src = imageFrameDatas_[i]; + + /* + * \todo Some platforms might enforce stride due to GPU. + * The width needs to be a multiple of the stride to work + * properly for now. + */ + libyuv::NV12Scale(src.Y.get(), src.size.width, + src.UV.get(), src.size.width, + src.size.width, src.size.height, + scaledY.get(), size.width, scaledUV.get(), size.width, + size.width, size.height, libyuv::FilterMode::kFilterBilinear); + + scaledFrameDatas_.emplace_back( + ImageFrameData{ std::move(scaledY), std::move(scaledUV), size }); + } +} + +int ImageFrameGenerator::generateFrame(const Size &size, const FrameBuffer *buffer) +{ + ASSERT(!scaledFrameDatas_.empty()); + + MappedFrameBuffer mappedFrameBuffer(buffer, MappedFrameBuffer::MapFlag::Write); + + const auto &planes = mappedFrameBuffer.planes(); + + /* Loop only around the number of images available */ + frameIndex_ %= imageFrameDatas_.size(); + + /* Write the scaledY and scaledUV to the mapped frame buffer */ + libyuv::NV12Copy(scaledFrameDatas_[frameIndex_].Y.get(), size.width, + scaledFrameDatas_[frameIndex_].UV.get(), size.width, planes[0].begin(), + size.width, planes[1].begin(), size.width, + size.width, size.height); + + /* Proceed to the next image every 4 frames */ + /* \todo Consider setting the frameRepeat in the config file */ + parameter_++; + if (parameter_ % frameRepeat == 0) + frameIndex_++; + + return 0; +} + +/* + * \var ImageFrameGenerator::imageFrameDatas_ + * \brief List of pointers to the not scaled image buffers + */ + +/* + * \var ImageFrameGenerator::scaledFrameDatas_ + * \brief List of pointers to the scaled image buffers + */ + +/* + * \var ImageFrameGenerator::imageFrames_ + * \brief Pointer to the imageFrames_ in VirtualCameraData + */ + +/* + * \var ImageFrameGenerator::parameter_ + * \brief Speed parameter. Change to the next image every parameter_ frames + */ + +} /* namespace libcamera */ diff --git a/src/libcamera/pipeline/virtual/image_frame_generator.h b/src/libcamera/pipeline/virtual/image_frame_generator.h new file mode 100644 index 00000000..42a077ba --- /dev/null +++ b/src/libcamera/pipeline/virtual/image_frame_generator.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Google Inc. + * + * Derived class of FrameGenerator for generating frames from images + */ + +#pragma once + +#include <filesystem> +#include <memory> +#include <stdint.h> +#include <sys/types.h> +#include <vector> + +#include "frame_generator.h" + +namespace libcamera { + +/* Frame configuration provided by the config file */ +struct ImageFrames { + std::vector<std::filesystem::path> files; +}; + +class ImageFrameGenerator : public FrameGenerator +{ +public: + static std::unique_ptr<ImageFrameGenerator> create(ImageFrames &imageFrames); + +private: + static constexpr unsigned int frameRepeat = 4; + + struct ImageFrameData { + std::unique_ptr<uint8_t[]> Y; + std::unique_ptr<uint8_t[]> UV; + Size size; + }; + + void configure(const Size &size) override; + int generateFrame(const Size &size, const FrameBuffer *buffer) override; + + std::vector<ImageFrameData> imageFrameDatas_; + std::vector<ImageFrameData> scaledFrameDatas_; + ImageFrames *imageFrames_; + unsigned int frameIndex_; + unsigned int parameter_; +}; + +} /* namespace libcamera */ diff --git a/src/libcamera/pipeline/virtual/meson.build b/src/libcamera/pipeline/virtual/meson.build new file mode 100644 index 00000000..c8434593 --- /dev/null +++ b/src/libcamera/pipeline/virtual/meson.build @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: CC0-1.0 + +libcamera_internal_sources += files([ + 'config_parser.cpp', + 'image_frame_generator.cpp', + 'test_pattern_generator.cpp', + 'virtual.cpp', +]) + +libjpeg = dependency('libjpeg', required : true) + +libcamera_deps += [libyuv_dep] +libcamera_deps += [libjpeg] + +subdir('data') diff --git a/src/libcamera/pipeline/virtual/test_pattern_generator.cpp b/src/libcamera/pipeline/virtual/test_pattern_generator.cpp new file mode 100644 index 00000000..745be83b --- /dev/null +++ b/src/libcamera/pipeline/virtual/test_pattern_generator.cpp @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Google Inc. + * + * Derived class of FrameGenerator for generating test patterns + */ + +#include "test_pattern_generator.h" + +#include <string.h> + +#include <libcamera/base/log.h> + +#include "libcamera/internal/mapped_framebuffer.h" + +#include <libyuv/convert_from_argb.h> + +namespace { + +template<size_t SampleSize> +void rotateLeft1Column(const libcamera::Size &size, uint8_t *image) +{ + if (size.width < 2) + return; + + const size_t stride = size.width * SampleSize; + uint8_t first[SampleSize]; + + for (size_t i = 0; i < size.height; i++, image += stride) { + memcpy(first, &image[0], SampleSize); + memmove(&image[0], &image[SampleSize], stride - SampleSize); + memcpy(&image[stride - SampleSize], first, SampleSize); + } +} + +} /* namespace */ + +namespace libcamera { + +LOG_DECLARE_CATEGORY(Virtual) + +static const unsigned int kARGBSize = 4; + +int TestPatternGenerator::generateFrame(const Size &size, + const FrameBuffer *buffer) +{ + MappedFrameBuffer mappedFrameBuffer(buffer, + MappedFrameBuffer::MapFlag::Write); + + const auto &planes = mappedFrameBuffer.planes(); + + rotateLeft1Column<kARGBSize>(size, template_.get()); + + /* Convert the template_ to the frame buffer */ + int ret = libyuv::ARGBToNV12(template_.get(), size.width * kARGBSize, + planes[0].begin(), size.width, + planes[1].begin(), size.width, + size.width, size.height); + if (ret != 0) + LOG(Virtual, Error) << "ARGBToNV12() failed with " << ret; + + return ret; +} + +void ColorBarsGenerator::configure(const Size &size) +{ + constexpr uint8_t kColorBar[8][3] = { + /* R, G, B */ + { 0xff, 0xff, 0xff }, /* White */ + { 0xff, 0xff, 0x00 }, /* Yellow */ + { 0x00, 0xff, 0xff }, /* Cyan */ + { 0x00, 0xff, 0x00 }, /* Green */ + { 0xff, 0x00, 0xff }, /* Magenta */ + { 0xff, 0x00, 0x00 }, /* Red */ + { 0x00, 0x00, 0xff }, /* Blue */ + { 0x00, 0x00, 0x00 }, /* Black */ + }; + + template_ = std::make_unique<uint8_t[]>( + size.width * size.height * kARGBSize); + + unsigned int colorBarWidth = size.width / std::size(kColorBar); + + uint8_t *buf = template_.get(); + for (size_t h = 0; h < size.height; h++) { + for (size_t w = 0; w < size.width; w++) { + /* Repeat when the width is exceed */ + unsigned int index = (w / colorBarWidth) % std::size(kColorBar); + + *buf++ = kColorBar[index][2]; /* B */ + *buf++ = kColorBar[index][1]; /* G */ + *buf++ = kColorBar[index][0]; /* R */ + *buf++ = 0x00; /* A */ + } + } +} + +void DiagonalLinesGenerator::configure(const Size &size) +{ + constexpr uint8_t kColorBar[2][3] = { + /* R, G, B */ + { 0xff, 0xff, 0xff }, /* White */ + { 0x00, 0x00, 0x00 }, /* Black */ + }; + + template_ = std::make_unique<uint8_t[]>( + size.width * size.height * kARGBSize); + + unsigned int lineWidth = size.width / 10; + + uint8_t *buf = template_.get(); + for (size_t h = 0; h < size.height; h++) { + for (size_t w = 0; w < size.width; w++) { + /* Repeat when the width is exceed */ + int index = ((w + h) / lineWidth) % 2; + + *buf++ = kColorBar[index][2]; /* B */ + *buf++ = kColorBar[index][1]; /* G */ + *buf++ = kColorBar[index][0]; /* R */ + *buf++ = 0x00; /* A */ + } + } +} + +} /* namespace libcamera */ diff --git a/src/libcamera/pipeline/virtual/test_pattern_generator.h b/src/libcamera/pipeline/virtual/test_pattern_generator.h new file mode 100644 index 00000000..2a51bd31 --- /dev/null +++ b/src/libcamera/pipeline/virtual/test_pattern_generator.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Google Inc. + * + * Derived class of FrameGenerator for generating test patterns + */ + +#pragma once + +#include <memory> + +#include <libcamera/framebuffer.h> +#include <libcamera/geometry.h> + +#include "frame_generator.h" + +namespace libcamera { + +enum class TestPattern : char { + ColorBars = 0, + DiagonalLines = 1, +}; + +class TestPatternGenerator : public FrameGenerator +{ +public: + int generateFrame(const Size &size, const FrameBuffer *buffer) override; + +protected: + /* Buffer of test pattern template */ + std::unique_ptr<uint8_t[]> template_; +}; + +class ColorBarsGenerator : public TestPatternGenerator +{ +public: + /* Generate a template buffer of the color bar test pattern. */ + void configure(const Size &size) override; +}; + +class DiagonalLinesGenerator : public TestPatternGenerator +{ +public: + /* Generate a template buffer of the diagonal lines test pattern. */ + void configure(const Size &size) override; +}; + +} /* namespace libcamera */ diff --git a/src/libcamera/pipeline/virtual/virtual.cpp b/src/libcamera/pipeline/virtual/virtual.cpp new file mode 100644 index 00000000..049ebcba --- /dev/null +++ b/src/libcamera/pipeline/virtual/virtual.cpp @@ -0,0 +1,419 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Google Inc. + * + * Pipeline handler for virtual cameras + */ + +#include "virtual.h" + +#include <algorithm> +#include <array> +#include <chrono> +#include <errno.h> +#include <map> +#include <memory> +#include <ostream> +#include <set> +#include <stdint.h> +#include <string> +#include <time.h> +#include <utility> +#include <vector> + +#include <libcamera/base/flags.h> +#include <libcamera/base/log.h> + +#include <libcamera/control_ids.h> +#include <libcamera/controls.h> +#include <libcamera/formats.h> +#include <libcamera/pixel_format.h> +#include <libcamera/property_ids.h> +#include <libcamera/request.h> + +#include "libcamera/internal/camera.h" +#include "libcamera/internal/dma_buf_allocator.h" +#include "libcamera/internal/formats.h" +#include "libcamera/internal/framebuffer.h" +#include "libcamera/internal/pipeline_handler.h" +#include "libcamera/internal/yaml_parser.h" + +#include "pipeline/virtual/config_parser.h" + +namespace libcamera { + +LOG_DEFINE_CATEGORY(Virtual) + +namespace { + +uint64_t currentTimestamp() +{ + const auto now = std::chrono::steady_clock::now(); + auto nsecs = std::chrono::duration_cast<std::chrono::nanoseconds>( + now.time_since_epoch()); + + return nsecs.count(); +} + +} /* namespace */ + +template<class... Ts> +struct overloaded : Ts... { + using Ts::operator()...; +}; +template<class... Ts> +overloaded(Ts...) -> overloaded<Ts...>; + +class VirtualCameraConfiguration : public CameraConfiguration +{ +public: + static constexpr unsigned int kBufferCount = 4; + + VirtualCameraConfiguration(VirtualCameraData *data); + + Status validate() override; + +private: + const VirtualCameraData *data_; +}; + +class PipelineHandlerVirtual : public PipelineHandler +{ +public: + PipelineHandlerVirtual(CameraManager *manager); + ~PipelineHandlerVirtual(); + + std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera, + Span<const StreamRole> roles) override; + int configure(Camera *camera, CameraConfiguration *config) override; + + int exportFrameBuffers(Camera *camera, Stream *stream, + std::vector<std::unique_ptr<FrameBuffer>> *buffers) override; + + int start(Camera *camera, const ControlList *controls) override; + void stopDevice(Camera *camera) override; + + int queueRequestDevice(Camera *camera, Request *request) override; + + bool match(DeviceEnumerator *enumerator) override; + +private: + static bool created_; + + VirtualCameraData *cameraData(Camera *camera) + { + return static_cast<VirtualCameraData *>(camera->_d()); + } + + bool initFrameGenerator(Camera *camera); + + DmaBufAllocator dmaBufAllocator_; + + bool resetCreated_ = false; +}; + +VirtualCameraData::VirtualCameraData(PipelineHandler *pipe, + const std::vector<Resolution> &supportedResolutions) + : Camera::Private(pipe) +{ + config_.resolutions = supportedResolutions; + for (const auto &resolution : config_.resolutions) { + if (config_.minResolutionSize.isNull() || config_.minResolutionSize > resolution.size) + config_.minResolutionSize = resolution.size; + + config_.maxResolutionSize = std::max(config_.maxResolutionSize, resolution.size); + } + + properties_.set(properties::PixelArrayActiveAreas, + { Rectangle(config_.maxResolutionSize) }); + + /* \todo Support multiple streams and pass multi_stream_test */ + streamConfigs_.resize(kMaxStream); +} + +VirtualCameraConfiguration::VirtualCameraConfiguration(VirtualCameraData *data) + : CameraConfiguration(), data_(data) +{ +} + +CameraConfiguration::Status VirtualCameraConfiguration::validate() +{ + Status status = Valid; + + if (config_.empty()) { + LOG(Virtual, Error) << "Empty config"; + return Invalid; + } + + /* Only one stream is supported */ + if (config_.size() > VirtualCameraData::kMaxStream) { + config_.resize(VirtualCameraData::kMaxStream); + status = Adjusted; + } + + for (StreamConfiguration &cfg : config_) { + bool adjusted = false; + bool found = false; + for (const auto &resolution : data_->config_.resolutions) { + if (resolution.size.width == cfg.size.width && + resolution.size.height == cfg.size.height) { + found = true; + break; + } + } + + if (!found) { + /* + * \todo It's a pipeline's decision to choose a + * resolution when the exact one is not supported. + * Defining the default logic in PipelineHandler to + * find the closest resolution would be nice. + */ + cfg.size = data_->config_.maxResolutionSize; + status = Adjusted; + adjusted = true; + } + + if (cfg.pixelFormat != formats::NV12) { + cfg.pixelFormat = formats::NV12; + status = Adjusted; + adjusted = true; + } + + if (adjusted) + LOG(Virtual, Info) + << "Stream configuration adjusted to " << cfg.toString(); + + const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat); + cfg.stride = info.stride(cfg.size.width, 0, 1); + cfg.frameSize = info.frameSize(cfg.size, 1); + + cfg.bufferCount = VirtualCameraConfiguration::kBufferCount; + } + + return status; +} + +/* static */ +bool PipelineHandlerVirtual::created_ = false; + +PipelineHandlerVirtual::PipelineHandlerVirtual(CameraManager *manager) + : PipelineHandler(manager), + dmaBufAllocator_(DmaBufAllocator::DmaBufAllocatorFlag::CmaHeap | + DmaBufAllocator::DmaBufAllocatorFlag::SystemHeap | + DmaBufAllocator::DmaBufAllocatorFlag::UDmaBuf) +{ +} + +PipelineHandlerVirtual::~PipelineHandlerVirtual() +{ + if (resetCreated_) + created_ = false; +} + +std::unique_ptr<CameraConfiguration> +PipelineHandlerVirtual::generateConfiguration(Camera *camera, + Span<const StreamRole> roles) +{ + VirtualCameraData *data = cameraData(camera); + auto config = std::make_unique<VirtualCameraConfiguration>(data); + + if (roles.empty()) + return config; + + for (const StreamRole role : roles) { + switch (role) { + case StreamRole::StillCapture: + case StreamRole::VideoRecording: + case StreamRole::Viewfinder: + break; + + case StreamRole::Raw: + default: + LOG(Virtual, Error) + << "Requested stream role not supported: " << role; + return {}; + } + + std::map<PixelFormat, std::vector<SizeRange>> streamFormats; + PixelFormat pixelFormat = formats::NV12; + streamFormats[pixelFormat] = { { data->config_.minResolutionSize, + data->config_.maxResolutionSize } }; + StreamFormats formats(streamFormats); + StreamConfiguration cfg(formats); + cfg.pixelFormat = pixelFormat; + cfg.size = data->config_.maxResolutionSize; + cfg.bufferCount = VirtualCameraConfiguration::kBufferCount; + + config->addConfiguration(cfg); + } + + ASSERT(config->validate() != CameraConfiguration::Invalid); + + return config; +} + +int PipelineHandlerVirtual::configure(Camera *camera, + CameraConfiguration *config) +{ + VirtualCameraData *data = cameraData(camera); + for (auto [i, c] : utils::enumerate(*config)) { + c.setStream(&data->streamConfigs_[i].stream); + /* Start reading the images/generating test patterns */ + data->streamConfigs_[i].frameGenerator->configure(c.size); + } + + return 0; +} + +int PipelineHandlerVirtual::exportFrameBuffers([[maybe_unused]] Camera *camera, + Stream *stream, + std::vector<std::unique_ptr<FrameBuffer>> *buffers) +{ + if (!dmaBufAllocator_.isValid()) + return -ENOBUFS; + + const StreamConfiguration &config = stream->configuration(); + const PixelFormatInfo &info = PixelFormatInfo::info(config.pixelFormat); + + std::vector<unsigned int> planeSizes; + for (size_t i = 0; i < info.numPlanes(); ++i) + planeSizes.push_back(info.planeSize(config.size, i)); + + return dmaBufAllocator_.exportBuffers(config.bufferCount, planeSizes, buffers); +} + +int PipelineHandlerVirtual::start([[maybe_unused]] Camera *camera, + [[maybe_unused]] const ControlList *controls) +{ + VirtualCameraData *data = cameraData(camera); + + for (auto &s : data->streamConfigs_) + s.seq = 0; + + return 0; +} + +void PipelineHandlerVirtual::stopDevice([[maybe_unused]] Camera *camera) +{ +} + +int PipelineHandlerVirtual::queueRequestDevice([[maybe_unused]] Camera *camera, + Request *request) +{ + VirtualCameraData *data = cameraData(camera); + const auto timestamp = currentTimestamp(); + + for (auto const &[stream, buffer] : request->buffers()) { + bool found = false; + /* map buffer and fill test patterns */ + for (auto &streamConfig : data->streamConfigs_) { + if (stream == &streamConfig.stream) { + FrameMetadata &fmd = buffer->_d()->metadata(); + + fmd.status = FrameMetadata::Status::FrameSuccess; + fmd.sequence = streamConfig.seq++; + fmd.timestamp = timestamp; + + for (const auto [i, p] : utils::enumerate(buffer->planes())) + fmd.planes()[i].bytesused = p.length; + + found = true; + + if (streamConfig.frameGenerator->generateFrame( + stream->configuration().size, buffer)) + fmd.status = FrameMetadata::Status::FrameError; + + completeBuffer(request, buffer); + break; + } + } + ASSERT(found); + } + + request->metadata().set(controls::SensorTimestamp, timestamp); + completeRequest(request); + + return 0; +} + +bool PipelineHandlerVirtual::match([[maybe_unused]] DeviceEnumerator *enumerator) +{ + if (created_) + return false; + + created_ = true; + + std::string configFile = configurationFile("virtual", "virtual.yaml", true); + if (configFile.empty()) { + LOG(Virtual, Debug) + << "Configuration file not found, skipping virtual cameras"; + return false; + } + + File file(configFile); + if (!file.open(File::OpenModeFlag::ReadOnly)) { + LOG(Virtual, Error) + << "Failed to open config file `" << file.fileName() << "`"; + return false; + } + + ConfigParser parser; + auto configData = parser.parseConfigFile(file, this); + if (configData.size() == 0) { + LOG(Virtual, Error) << "Failed to parse any cameras from the config file: " + << file.fileName(); + return false; + } + + /* Configure and register cameras with configData */ + for (auto &data : configData) { + std::set<Stream *> streams; + for (auto &streamConfig : data->streamConfigs_) + streams.insert(&streamConfig.stream); + std::string id = data->config_.id; + std::shared_ptr<Camera> camera = Camera::create(std::move(data), id, streams); + + if (!initFrameGenerator(camera.get())) { + LOG(Virtual, Error) << "Failed to initialize frame " + << "generator for camera: " << id; + continue; + } + + registerCamera(std::move(camera)); + } + + resetCreated_ = true; + + return true; +} + +bool PipelineHandlerVirtual::initFrameGenerator(Camera *camera) +{ + auto data = cameraData(camera); + auto &frame = data->config_.frame; + std::visit(overloaded{ + [&](TestPattern &testPattern) { + for (auto &streamConfig : data->streamConfigs_) { + if (testPattern == TestPattern::DiagonalLines) + streamConfig.frameGenerator = std::make_unique<DiagonalLinesGenerator>(); + else + streamConfig.frameGenerator = std::make_unique<ColorBarsGenerator>(); + } + }, + [&](ImageFrames &imageFrames) { + for (auto &streamConfig : data->streamConfigs_) + streamConfig.frameGenerator = ImageFrameGenerator::create(imageFrames); + } }, + frame); + + for (auto &streamConfig : data->streamConfigs_) + if (!streamConfig.frameGenerator) + return false; + + return true; +} + +REGISTER_PIPELINE_HANDLER(PipelineHandlerVirtual, "virtual") + +} /* namespace libcamera */ diff --git a/src/libcamera/pipeline/virtual/virtual.h b/src/libcamera/pipeline/virtual/virtual.h new file mode 100644 index 00000000..683cb82b --- /dev/null +++ b/src/libcamera/pipeline/virtual/virtual.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Google Inc. + * + * Pipeline handler for virtual cameras + */ + +#pragma once + +#include <string> +#include <variant> +#include <vector> + +#include <libcamera/geometry.h> +#include <libcamera/stream.h> + +#include "libcamera/internal/camera.h" +#include "libcamera/internal/pipeline_handler.h" + +#include "frame_generator.h" +#include "image_frame_generator.h" +#include "test_pattern_generator.h" + +namespace libcamera { + +using VirtualFrame = std::variant<TestPattern, ImageFrames>; + +class VirtualCameraData : public Camera::Private +{ +public: + const static unsigned int kMaxStream = 3; + + struct Resolution { + Size size; + std::vector<int64_t> frameRates; + }; + struct StreamConfig { + Stream stream; + std::unique_ptr<FrameGenerator> frameGenerator; + unsigned int seq = 0; + }; + /* The config file is parsed to the Configuration struct */ + struct Configuration { + std::string id; + std::vector<Resolution> resolutions; + VirtualFrame frame; + + Size maxResolutionSize; + Size minResolutionSize; + }; + + VirtualCameraData(PipelineHandler *pipe, + const std::vector<Resolution> &supportedResolutions); + + ~VirtualCameraData() = default; + + Configuration config_; + + std::vector<StreamConfig> streamConfigs_; +}; + +} /* namespace libcamera */ diff --git a/src/libcamera/pipeline_handler.cpp b/src/libcamera/pipeline_handler.cpp index 5ea2ca78..d84dff3c 100644 --- a/src/libcamera/pipeline_handler.cpp +++ b/src/libcamera/pipeline_handler.cpp @@ -22,7 +22,6 @@ #include "libcamera/internal/camera.h" #include "libcamera/internal/camera_manager.h" #include "libcamera/internal/device_enumerator.h" -#include "libcamera/internal/framebuffer.h" #include "libcamera/internal/media_device.h" #include "libcamera/internal/request.h" #include "libcamera/internal/tracepoints.h" @@ -75,7 +74,7 @@ PipelineHandler::PipelineHandler(CameraManager *manager) PipelineHandler::~PipelineHandler() { - for (std::shared_ptr<MediaDevice> media : mediaDevices_) + for (std::shared_ptr<MediaDevice> &media : mediaDevices_) media->release(); } @@ -157,26 +156,28 @@ MediaDevice *PipelineHandler::acquireMediaDevice(DeviceEnumerator *enumerator, * Pipeline handlers shall not call this function directly as the Camera class * handles access internally. * - * \context This function is \threadsafe. + * \context This function is called from the CameraManager thread. * * \return True if the pipeline handler was acquired, false if another process * has already acquired it * \sa release() */ -bool PipelineHandler::acquire() +bool PipelineHandler::acquire(Camera *camera) { - MutexLocker locker(lock_); - - if (useCount_) { - ++useCount_; - return true; + if (useCount_ == 0) { + for (std::shared_ptr<MediaDevice> &media : mediaDevices_) { + if (!media->lock()) { + unlockMediaDevices(); + return false; + } + } } - for (std::shared_ptr<MediaDevice> &media : mediaDevices_) { - if (!media->lock()) { + if (!acquireDevice(camera)) { + if (useCount_ == 0) unlockMediaDevices(); - return false; - } + + return false; } ++useCount_; @@ -195,29 +196,60 @@ bool PipelineHandler::acquire() * Pipeline handlers shall not call this function directly as the Camera class * handles access internally. * - * \context This function is \threadsafe. + * \context This function is called from the CameraManager thread. * * \sa acquire() */ void PipelineHandler::release(Camera *camera) { - MutexLocker locker(lock_); - ASSERT(useCount_); - unlockMediaDevices(); - releaseDevice(camera); + if (useCount_ == 1) + unlockMediaDevices(); + --useCount_; } /** + * \brief Acquire resources associated with this camera + * \param[in] camera The camera for which to acquire resources + * + * Pipeline handlers may override this in order to get resources such as opening + * devices and allocating buffers when a camera is acquired. + * + * This is used by the uvcvideo pipeline handler to delay opening /dev/video# + * until the camera is acquired to avoid excess power consumption. The delayed + * opening of /dev/video# is a special case because the kernel uvcvideo driver + * powers on the USB device as soon as /dev/video# is opened. This behavior + * should *not* be copied by other pipeline handlers. + * + * \context This function is called from the CameraManager thread. + * + * \return True on success, false on failure + * \sa releaseDevice() + */ +bool PipelineHandler::acquireDevice([[maybe_unused]] Camera *camera) +{ + return true; +} + +/** * \brief Release resources associated with this camera * \param[in] camera The camera for which to release resources * * Pipeline handlers may override this in order to perform cleanup operations * when a camera is released, such as freeing memory. + * + * This is called once for every camera that is released. If there are resources + * shared by multiple cameras then the pipeline handler must take care to not + * release them until releaseDevice() has been called for all previously + * acquired cameras. + * + * \context This function is called from the CameraManager thread. + * + * \sa acquireDevice() */ void PipelineHandler::releaseDevice([[maybe_unused]] Camera *camera) { @@ -335,9 +367,7 @@ void PipelineHandler::stop(Camera *camera) while (!waitingRequests_.empty()) { Request *request = waitingRequests_.front(); waitingRequests_.pop(); - - request->_d()->cancel(); - completeRequest(request); + cancelRequest(request); } /* Make sure no requests are pending. */ @@ -438,10 +468,8 @@ void PipelineHandler::doQueueRequest(Request *request) } int ret = queueRequestDevice(camera, request); - if (ret) { - request->_d()->cancel(); - completeRequest(request); - } + if (ret) + cancelRequest(request); } /** @@ -537,9 +565,23 @@ void PipelineHandler::completeRequest(Request *request) } /** + * \brief Cancel request and signal its completion + * \param[in] request The request to cancel + * + * This function cancels and completes the request. The same rules as for + * completeRequest() apply. + */ +void PipelineHandler::cancelRequest(Request *request) +{ + request->_d()->cancel(); + completeRequest(request); +} + +/** * \brief Retrieve the absolute path to a platform configuration file * \param[in] subdir The pipeline handler specific subdirectory name * \param[in] name The configuration file name + * \param[in] silent Disable error messages * * This function locates a named platform configuration file and returns * its absolute path to the pipeline handler. It searches the following @@ -555,7 +597,8 @@ void PipelineHandler::completeRequest(Request *request) * string if no configuration file can be found */ std::string PipelineHandler::configurationFile(const std::string &subdir, - const std::string &name) const + const std::string &name, + bool silent) const { std::string confPath; struct stat statbuf; @@ -585,9 +628,11 @@ std::string PipelineHandler::configurationFile(const std::string &subdir, if (ret == 0 && (statbuf.st_mode & S_IFMT) == S_IFREG) return confPath; - LOG(Pipeline, Error) - << "Configuration file '" << confPath - << "' not found for pipeline handler '" << PipelineHandler::name() << "'"; + if (!silent) + LOG(Pipeline, Error) + << "Configuration file '" << confPath + << "' not found for pipeline handler '" + << PipelineHandler::name() << "'"; return std::string(); } @@ -605,9 +650,14 @@ void PipelineHandler::registerCamera(std::shared_ptr<Camera> camera) { cameras_.push_back(camera); - if (mediaDevices_.empty()) - LOG(Pipeline, Fatal) - << "Registering camera with no media devices!"; + if (mediaDevices_.empty()) { + /* + * For virtual devices with no MediaDevice, there are no system + * devices to register. + */ + manager_->_d()->addCamera(std::move(camera)); + return; + } /* * Walk the entity list and map the devnums of all capture video nodes @@ -720,6 +770,13 @@ void PipelineHandler::disconnect() */ /** + * \fn PipelineHandler::cameraManager() const + * \brief Retrieve the CameraManager that this pipeline handler belongs to + * \context This function is \threadsafe. + * \return The CameraManager for this pipeline handler + */ + +/** * \class PipelineHandlerFactoryBase * \brief Base class for pipeline handler factories * diff --git a/src/libcamera/process.cpp b/src/libcamera/process.cpp index 86d27b2d..bc9833f4 100644 --- a/src/libcamera/process.cpp +++ b/src/libcamera/process.cpp @@ -10,7 +10,6 @@ #include <algorithm> #include <dirent.h> #include <fcntl.h> -#include <iostream> #include <list> #include <signal.h> #include <string.h> @@ -189,7 +188,6 @@ const struct sigaction &ProcessManager::oldsa() const return oldsa_; } - /** * \class Process * \brief Process object @@ -271,8 +269,8 @@ int Process::start(const std::string &path, unsigned int len = args.size(); argv[0] = path.c_str(); for (unsigned int i = 0; i < len; i++) - argv[i+1] = args[i].c_str(); - argv[len+1] = nullptr; + argv[i + 1] = args[i].c_str(); + argv[len + 1] = nullptr; execv(path.c_str(), (char **)argv); diff --git a/src/libcamera/property_ids.cpp.in b/src/libcamera/property_ids.cpp.in deleted file mode 100644 index 8b274c38..00000000 --- a/src/libcamera/property_ids.cpp.in +++ /dev/null @@ -1,48 +0,0 @@ -/* SPDX-License-Identifier: LGPL-2.1-or-later */ -/* - * Copyright (C) 2019, Google Inc. - * - * property_ids.cpp : Property ID list - * - * This file is auto-generated. Do not edit. - */ - -#include <libcamera/property_ids.h> - -/** - * \file property_ids.h - * \brief Camera property identifiers - */ - -namespace libcamera { - -/** - * \brief Namespace for libcamera properties - */ -namespace properties { - -${controls_doc} - -${vendor_controls_doc} - -#ifndef __DOXYGEN__ -/* - * Keep the properties definitions hidden from doxygen as it incorrectly parses - * them as functions. - */ -${controls_def} - -${vendor_controls_def} - -#endif - -/** - * \brief List of all supported libcamera properties - */ -extern const ControlIdMap properties { -${controls_map} -}; - -} /* namespace properties */ - -} /* namespace libcamera */ diff --git a/src/libcamera/proxy/meson.build b/src/libcamera/proxy/meson.build index 00ae5a8f..8bd1b135 100644 --- a/src/libcamera/proxy/meson.build +++ b/src/libcamera/proxy/meson.build @@ -13,7 +13,8 @@ foreach mojom : ipa_mojoms '--libcamera_generate_proxy_cpp', '--libcamera_output_path=@OUTPUT@', './' + '@INPUT@' - ]) + ], + env : py_build_env) - libcamera_sources += proxy + libcamera_internal_sources += proxy endforeach diff --git a/src/libcamera/proxy/worker/meson.build b/src/libcamera/proxy/worker/meson.build index aa4d9cd7..8c54a2e2 100644 --- a/src/libcamera/proxy/worker/meson.build +++ b/src/libcamera/proxy/worker/meson.build @@ -15,10 +15,10 @@ foreach mojom : ipa_mojoms '--libcamera_generate_proxy_worker', '--libcamera_output_path=@OUTPUT@', './' + '@INPUT@' - ]) + ], + env : py_build_env) - proxy = executable(mojom['name'] + '_ipa_proxy', - [worker, libcamera_generated_ipa_headers], + proxy = executable(mojom['name'] + '_ipa_proxy', worker, install : true, install_dir : proxy_install_dir, dependencies : libcamera_private) diff --git a/src/libcamera/request.cpp b/src/libcamera/request.cpp index cfb451e9..b206ac13 100644 --- a/src/libcamera/request.cpp +++ b/src/libcamera/request.cpp @@ -28,10 +28,17 @@ * \brief Describes a frame capture request to be processed by a camera */ +/** + * \internal + * \file libcamera/internal/request.h + * \brief Internal support for request handling + */ + namespace libcamera { LOG_DEFINE_CATEGORY(Request) +#ifndef __DOXYGEN_PUBLIC__ /** * \class Request::Private * \brief Request private data @@ -300,6 +307,7 @@ void Request::Private::timeout() emitPrepareCompleted(); } +#endif /* __DOXYGEN_PUBLIC__ */ /** * \enum Request::Status @@ -467,6 +475,15 @@ int Request::addBuffer(const Stream *stream, FrameBuffer *buffer, return -EINVAL; } + /* + * Make sure the fence has been extracted from the buffer + * to avoid waiting on a stale fence. + */ + if (buffer->_d()->fence()) { + LOG(Request, Error) << "Can't add buffer that still references a fence"; + return -EEXIST; + } + auto it = bufferMap_.find(stream); if (it != bufferMap_.end()) { LOG(Request, Error) << "FrameBuffer already set for stream"; @@ -477,15 +494,6 @@ int Request::addBuffer(const Stream *stream, FrameBuffer *buffer, _d()->pending_.insert(buffer); bufferMap_[stream] = buffer; - /* - * Make sure the fence has been extracted from the buffer - * to avoid waiting on a stale fence. - */ - if (buffer->_d()->fence()) { - LOG(Request, Error) << "Can't add buffer that still references a fence"; - return -EEXIST; - } - if (fence && fence->isValid()) buffer->_d()->setFence(std::move(fence)); diff --git a/src/libcamera/sensor/camera_sensor.cpp b/src/libcamera/sensor/camera_sensor.cpp index c6d7f801..d19b5e2e 100644 --- a/src/libcamera/sensor/camera_sensor.cpp +++ b/src/libcamera/sensor/camera_sensor.cpp @@ -6,26 +6,13 @@ */ #include "libcamera/internal/camera_sensor.h" -#include "libcamera/internal/media_device.h" -#include <algorithm> -#include <float.h> -#include <iomanip> -#include <limits.h> -#include <math.h> -#include <string.h> +#include <memory> +#include <vector> -#include <libcamera/camera.h> -#include <libcamera/orientation.h> -#include <libcamera/property_ids.h> +#include <libcamera/base/log.h> -#include <libcamera/base/utils.h> - -#include "libcamera/internal/bayer_format.h" -#include "libcamera/internal/camera_lens.h" -#include "libcamera/internal/camera_sensor_properties.h" -#include "libcamera/internal/formats.h" -#include "libcamera/internal/sysfs.h" +#include "libcamera/internal/media_object.h" /** * \file camera_sensor.h @@ -38,537 +25,16 @@ LOG_DEFINE_CATEGORY(CameraSensor) /** * \class CameraSensor - * \brief A camera sensor based on V4L2 subdevices + * \brief A abstract camera sensor * * The CameraSensor class eases handling of sensors for pipeline handlers by - * hiding the details of the V4L2 subdevice kernel API and caching sensor - * information. - * - * The implementation is currently limited to sensors that expose a single V4L2 - * subdevice with a single pad. It will be extended to support more complex - * devices as the needs arise. + * hiding the details of the kernel API and caching sensor information. */ /** - * \brief Construct a CameraSensor - * \param[in] entity The media entity backing the camera sensor - * - * Once constructed the instance must be initialized with init(). - */ -CameraSensor::CameraSensor(const MediaEntity *entity) - : entity_(entity), pad_(UINT_MAX), staticProps_(nullptr), - bayerFormat_(nullptr), supportFlips_(false), - flipsAlterBayerOrder_(false), properties_(properties::properties) -{ -} - -/** * \brief Destroy a CameraSensor */ -CameraSensor::~CameraSensor() -{ -} - -/** - * \brief Initialize the camera sensor instance - * - * This function performs the initialisation steps of the CameraSensor that may - * fail. It shall be called once and only once after constructing the instance. - * - * \return 0 on success or a negative error code otherwise - */ -int CameraSensor::init() -{ - for (const MediaPad *pad : entity_->pads()) { - if (pad->flags() & MEDIA_PAD_FL_SOURCE) { - pad_ = pad->index(); - break; - } - } - - if (pad_ == UINT_MAX) { - LOG(CameraSensor, Error) - << "Sensors with more than one pad are not supported"; - return -EINVAL; - } - - switch (entity_->function()) { - case MEDIA_ENT_F_CAM_SENSOR: - case MEDIA_ENT_F_PROC_VIDEO_ISP: - break; - - default: - LOG(CameraSensor, Error) - << "Invalid sensor function " - << utils::hex(entity_->function()); - return -EINVAL; - } - - /* Create and open the subdev. */ - subdev_ = std::make_unique<V4L2Subdevice>(entity_); - int ret = subdev_->open(); - if (ret < 0) - return ret; - - /* - * Clear any flips to be sure we get the "native" Bayer order. This is - * harmless for sensors where the flips don't affect the Bayer order. - */ - ControlList ctrls(subdev_->controls()); - if (subdev_->controls().find(V4L2_CID_HFLIP) != subdev_->controls().end()) - ctrls.set(V4L2_CID_HFLIP, 0); - if (subdev_->controls().find(V4L2_CID_VFLIP) != subdev_->controls().end()) - ctrls.set(V4L2_CID_VFLIP, 0); - subdev_->setControls(&ctrls); - - /* Enumerate, sort and cache media bus codes and sizes. */ - formats_ = subdev_->formats(pad_); - if (formats_.empty()) { - LOG(CameraSensor, Error) << "No image format found"; - return -EINVAL; - } - - mbusCodes_ = utils::map_keys(formats_); - std::sort(mbusCodes_.begin(), mbusCodes_.end()); - - for (const auto &format : formats_) { - const std::vector<SizeRange> &ranges = format.second; - std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes_), - [](const SizeRange &range) { return range.max; }); - } - - std::sort(sizes_.begin(), sizes_.end()); - - /* Remove duplicates. */ - auto last = std::unique(sizes_.begin(), sizes_.end()); - sizes_.erase(last, sizes_.end()); - - /* - * VIMC is a bit special, as it does not yet support all the mandatory - * requirements regular sensors have to respect. - * - * Do not validate the driver if it's VIMC and initialize the sensor - * properties with static information. - * - * \todo Remove the special case once the VIMC driver has been - * updated in all test platforms. - */ - if (entity_->device()->driver() == "vimc") { - initVimcDefaultProperties(); - - ret = initProperties(); - if (ret) - return ret; - - return discoverAncillaryDevices(); - } - - /* Get the color filter array pattern (only for RAW sensors). */ - for (unsigned int mbusCode : mbusCodes_) { - const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(mbusCode); - if (bayerFormat.isValid()) { - bayerFormat_ = &bayerFormat; - break; - } - } - - ret = validateSensorDriver(); - if (ret) - return ret; - - ret = initProperties(); - if (ret) - return ret; - - ret = discoverAncillaryDevices(); - if (ret) - return ret; - - /* - * Set HBLANK to the minimum to start with a well-defined line length, - * allowing IPA modules that do not modify HBLANK to use the sensor - * minimum line length in their calculations. - */ - const struct v4l2_query_ext_ctrl *hblankInfo = subdev_->controlInfo(V4L2_CID_HBLANK); - if (hblankInfo && !(hblankInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) { - ControlList ctrl(subdev_->controls()); - - ctrl.set(V4L2_CID_HBLANK, static_cast<int32_t>(hblankInfo->minimum)); - ret = subdev_->setControls(&ctrl); - if (ret) - return ret; - } - - return applyTestPatternMode(controls::draft::TestPatternModeEnum::TestPatternModeOff); -} - -int CameraSensor::generateId() -{ - const std::string devPath = subdev_->devicePath(); - - /* Try to get ID from firmware description. */ - id_ = sysfs::firmwareNodePath(devPath); - if (!id_.empty()) - return 0; - - /* - * Virtual sensors not described in firmware - * - * Verify it's a platform device and construct ID from the device path - * and model of sensor. - */ - if (devPath.find("/sys/devices/platform/", 0) == 0) { - id_ = devPath.substr(strlen("/sys/devices/")) + " " + model(); - return 0; - } - - LOG(CameraSensor, Error) << "Can't generate sensor ID"; - return -EINVAL; -} - -int CameraSensor::validateSensorDriver() -{ - int err = 0; - - /* - * Optional controls are used to register optional sensor properties. If - * not present, some values will be defaulted. - */ - static constexpr uint32_t optionalControls[] = { - V4L2_CID_CAMERA_SENSOR_ROTATION, - }; - - const ControlIdMap &controls = subdev_->controls().idmap(); - for (uint32_t ctrl : optionalControls) { - if (!controls.count(ctrl)) - LOG(CameraSensor, Debug) - << "Optional V4L2 control " << utils::hex(ctrl) - << " not supported"; - } - - /* - * Recommended controls are similar to optional controls, but will - * become mandatory in the near future. Be loud if they're missing. - */ - static constexpr uint32_t recommendedControls[] = { - V4L2_CID_CAMERA_ORIENTATION, - }; - - for (uint32_t ctrl : recommendedControls) { - if (!controls.count(ctrl)) { - LOG(CameraSensor, Warning) - << "Recommended V4L2 control " << utils::hex(ctrl) - << " not supported"; - err = -EINVAL; - } - } - - /* - * Verify if sensor supports horizontal/vertical flips - * - * \todo Handle horizontal and vertical flips independently. - */ - const struct v4l2_query_ext_ctrl *hflipInfo = subdev_->controlInfo(V4L2_CID_HFLIP); - const struct v4l2_query_ext_ctrl *vflipInfo = subdev_->controlInfo(V4L2_CID_VFLIP); - if (hflipInfo && !(hflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY) && - vflipInfo && !(vflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) { - supportFlips_ = true; - - if (hflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT || - vflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT) - flipsAlterBayerOrder_ = true; - } - - if (!supportFlips_) - LOG(CameraSensor, Debug) - << "Camera sensor does not support horizontal/vertical flip"; - - /* - * Make sure the required selection targets are supported. - * - * Failures in reading any of the targets are not deemed to be fatal, - * but some properties and features, like constructing a - * IPACameraSensorInfo for the IPA module, won't be supported. - * - * \todo Make support for selection targets mandatory as soon as all - * test platforms have been updated. - */ - Rectangle rect; - int ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP_BOUNDS, &rect); - if (ret) { - /* - * Default the pixel array size to the largest size supported - * by the sensor. The sizes_ vector is sorted in ascending - * order, the largest size is thus the last element. - */ - pixelArraySize_ = sizes_.back(); - - LOG(CameraSensor, Warning) - << "The PixelArraySize property has been defaulted to " - << pixelArraySize_; - err = -EINVAL; - } else { - pixelArraySize_ = rect.size(); - } - - ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP_DEFAULT, &activeArea_); - if (ret) { - activeArea_ = Rectangle(pixelArraySize_); - LOG(CameraSensor, Warning) - << "The PixelArrayActiveAreas property has been defaulted to " - << activeArea_; - err = -EINVAL; - } - - ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP, &rect); - if (ret) { - LOG(CameraSensor, Warning) - << "Failed to retrieve the sensor crop rectangle"; - err = -EINVAL; - } - - if (err) { - LOG(CameraSensor, Warning) - << "The sensor kernel driver needs to be fixed"; - LOG(CameraSensor, Warning) - << "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information"; - } - - if (!bayerFormat_) - return 0; - - /* - * For raw sensors, make sure the sensor driver supports the controls - * required by the CameraSensor class. - */ - static constexpr uint32_t mandatoryControls[] = { - V4L2_CID_ANALOGUE_GAIN, - V4L2_CID_EXPOSURE, - V4L2_CID_HBLANK, - V4L2_CID_PIXEL_RATE, - V4L2_CID_VBLANK, - }; - - err = 0; - for (uint32_t ctrl : mandatoryControls) { - if (!controls.count(ctrl)) { - LOG(CameraSensor, Error) - << "Mandatory V4L2 control " << utils::hex(ctrl) - << " not available"; - err = -EINVAL; - } - } - - if (err) { - LOG(CameraSensor, Error) - << "The sensor kernel driver needs to be fixed"; - LOG(CameraSensor, Error) - << "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information"; - return err; - } - - return 0; -} - -/* - * \brief Initialize properties that cannot be intialized by the - * regular initProperties() function for VIMC - */ -void CameraSensor::initVimcDefaultProperties() -{ - /* Use the largest supported size. */ - pixelArraySize_ = sizes_.back(); - activeArea_ = Rectangle(pixelArraySize_); -} - -void CameraSensor::initStaticProperties() -{ - staticProps_ = CameraSensorProperties::get(model_); - if (!staticProps_) - return; - - /* Register the properties retrieved from the sensor database. */ - properties_.set(properties::UnitCellSize, staticProps_->unitCellSize); - - initTestPatternModes(); -} - -void CameraSensor::initTestPatternModes() -{ - const auto &v4l2TestPattern = controls().find(V4L2_CID_TEST_PATTERN); - if (v4l2TestPattern == controls().end()) { - LOG(CameraSensor, Debug) << "V4L2_CID_TEST_PATTERN is not supported"; - return; - } - - const auto &testPatternModes = staticProps_->testPatternModes; - if (testPatternModes.empty()) { - /* - * The camera sensor supports test patterns but we don't know - * how to map them so this should be fixed. - */ - LOG(CameraSensor, Debug) << "No static test pattern map for \'" - << model() << "\'"; - return; - } - - /* - * Create a map that associates the V4L2 control index to the test - * pattern mode by reversing the testPatternModes map provided by the - * camera sensor properties. This makes it easier to verify if the - * control index is supported in the below for loop that creates the - * list of supported test patterns. - */ - std::map<int32_t, controls::draft::TestPatternModeEnum> indexToTestPatternMode; - for (const auto &it : testPatternModes) - indexToTestPatternMode[it.second] = it.first; - - for (const ControlValue &value : v4l2TestPattern->second.values()) { - const int32_t index = value.get<int32_t>(); - - const auto it = indexToTestPatternMode.find(index); - if (it == indexToTestPatternMode.end()) { - LOG(CameraSensor, Debug) - << "Test pattern mode " << index << " ignored"; - continue; - } - - testPatternModes_.push_back(it->second); - } -} - -int CameraSensor::initProperties() -{ - model_ = subdev_->model(); - properties_.set(properties::Model, utils::toAscii(model_)); - - /* Generate a unique ID for the sensor. */ - int ret = generateId(); - if (ret) - return ret; - - /* Initialize the static properties from the sensor database. */ - initStaticProperties(); - - /* Retrieve and register properties from the kernel interface. */ - const ControlInfoMap &controls = subdev_->controls(); - - const auto &orientation = controls.find(V4L2_CID_CAMERA_ORIENTATION); - if (orientation != controls.end()) { - int32_t v4l2Orientation = orientation->second.def().get<int32_t>(); - int32_t propertyValue; - - switch (v4l2Orientation) { - default: - LOG(CameraSensor, Warning) - << "Unsupported camera location " - << v4l2Orientation << ", setting to External"; - [[fallthrough]]; - case V4L2_CAMERA_ORIENTATION_EXTERNAL: - propertyValue = properties::CameraLocationExternal; - break; - case V4L2_CAMERA_ORIENTATION_FRONT: - propertyValue = properties::CameraLocationFront; - break; - case V4L2_CAMERA_ORIENTATION_BACK: - propertyValue = properties::CameraLocationBack; - break; - } - properties_.set(properties::Location, propertyValue); - } else { - LOG(CameraSensor, Warning) << "Failed to retrieve the camera location"; - } - - const auto &rotationControl = controls.find(V4L2_CID_CAMERA_SENSOR_ROTATION); - if (rotationControl != controls.end()) { - int32_t propertyValue = rotationControl->second.def().get<int32_t>(); - - /* - * Cache the Transform associated with the camera mounting - * rotation for later use in computeTransform(). - */ - bool success; - mountingOrientation_ = orientationFromRotation(propertyValue, &success); - if (!success) { - LOG(CameraSensor, Warning) - << "Invalid rotation of " << propertyValue - << " degrees - ignoring"; - mountingOrientation_ = Orientation::Rotate0; - } - - properties_.set(properties::Rotation, propertyValue); - } else { - LOG(CameraSensor, Warning) - << "Rotation control not available, default to 0 degrees"; - properties_.set(properties::Rotation, 0); - mountingOrientation_ = Orientation::Rotate0; - } - - properties_.set(properties::PixelArraySize, pixelArraySize_); - properties_.set(properties::PixelArrayActiveAreas, { activeArea_ }); - - /* Color filter array pattern, register only for RAW sensors. */ - if (bayerFormat_) { - int32_t cfa; - switch (bayerFormat_->order) { - case BayerFormat::BGGR: - cfa = properties::draft::BGGR; - break; - case BayerFormat::GBRG: - cfa = properties::draft::GBRG; - break; - case BayerFormat::GRBG: - cfa = properties::draft::GRBG; - break; - case BayerFormat::RGGB: - cfa = properties::draft::RGGB; - break; - case BayerFormat::MONO: - cfa = properties::draft::MONO; - break; - } - - properties_.set(properties::draft::ColorFilterArrangement, cfa); - } - - return 0; -} - -/** - * \brief Check for and initialise any ancillary devices - * - * Sensors sometimes have ancillary devices such as a Lens or Flash that could - * be linked to their MediaEntity by the kernel. Search for and handle any - * such device. - * - * \todo Handle MEDIA_ENT_F_FLASH too. - */ -int CameraSensor::discoverAncillaryDevices() -{ - int ret; - - for (MediaEntity *ancillary : entity_->ancillaryEntities()) { - switch (ancillary->function()) { - case MEDIA_ENT_F_LENS: - focusLens_ = std::make_unique<CameraLens>(ancillary); - ret = focusLens_->init(); - if (ret) { - LOG(CameraSensor, Error) - << "Lens initialisation failed, lens disabled"; - focusLens_.reset(); - } - break; - - default: - LOG(CameraSensor, Warning) - << "Unsupported ancillary entity function " - << ancillary->function(); - break; - } - } - - return 0; -} +CameraSensor::~CameraSensor() = default; /** * \fn CameraSensor::model() @@ -623,29 +89,15 @@ int CameraSensor::discoverAncillaryDevices() */ /** + * \fn CameraSensor::sizes() * \brief Retrieve the supported frame sizes for a media bus code * \param[in] mbusCode The media bus code for which sizes are requested * * \return The supported frame sizes for \a mbusCode sorted in increasing order */ -std::vector<Size> CameraSensor::sizes(unsigned int mbusCode) const -{ - std::vector<Size> sizes; - - const auto &format = formats_.find(mbusCode); - if (format == formats_.end()) - return sizes; - - const std::vector<SizeRange> &ranges = format->second; - std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes), - [](const SizeRange &range) { return range.max; }); - - std::sort(sizes.begin(), sizes.end()); - - return sizes; -} /** + * \fn CameraSensor::resolution() * \brief Retrieve the camera sensor resolution * * The camera sensor resolution is the active pixel area size, clamped to the @@ -658,15 +110,13 @@ std::vector<Size> CameraSensor::sizes(unsigned int mbusCode) const * * \return The camera sensor resolution in pixels */ -Size CameraSensor::resolution() const -{ - return std::min(sizes_.back(), activeArea_.size()); -} /** + * \fn CameraSensor::getFormat() * \brief Retrieve the best sensor format for a desired output * \param[in] mbusCodes The list of acceptable media bus codes * \param[in] size The desired size + * \param[in] maxSize The maximum size * * Media bus codes are selected from \a mbusCodes, which lists all acceptable * codes in decreasing order of preference. Media bus codes supported by the @@ -685,6 +135,8 @@ Size CameraSensor::resolution() const * bandwidth. * - The desired \a size shall be supported by one of the media bus code listed * in \a mbusCodes. + * - The desired \a size shall fit into the maximum size \a maxSize if it is not + * null. * * When multiple media bus codes can produce the same size, the code at the * lowest position in \a mbusCodes is selected. @@ -699,59 +151,9 @@ Size CameraSensor::resolution() const * \return The best sensor output format matching the desired media bus codes * and size on success, or an empty format otherwise. */ -V4L2SubdeviceFormat CameraSensor::getFormat(const std::vector<unsigned int> &mbusCodes, - const Size &size) const -{ - unsigned int desiredArea = size.width * size.height; - unsigned int bestArea = UINT_MAX; - float desiredRatio = static_cast<float>(size.width) / size.height; - float bestRatio = FLT_MAX; - const Size *bestSize = nullptr; - uint32_t bestCode = 0; - - for (unsigned int code : mbusCodes) { - const auto formats = formats_.find(code); - if (formats == formats_.end()) - continue; - - for (const SizeRange &range : formats->second) { - const Size &sz = range.max; - - if (sz.width < size.width || sz.height < size.height) - continue; - - float ratio = static_cast<float>(sz.width) / sz.height; - float ratioDiff = fabsf(ratio - desiredRatio); - unsigned int area = sz.width * sz.height; - unsigned int areaDiff = area - desiredArea; - - if (ratioDiff > bestRatio) - continue; - - if (ratioDiff < bestRatio || areaDiff < bestArea) { - bestRatio = ratioDiff; - bestArea = areaDiff; - bestSize = &sz; - bestCode = code; - } - } - } - - if (!bestSize) { - LOG(CameraSensor, Debug) << "No supported format or size found"; - return {}; - } - - V4L2SubdeviceFormat format{ - .code = bestCode, - .size = *bestSize, - .colorSpace = ColorSpace::Raw, - }; - - return format; -} /** + * \fn CameraSensor::setFormat() * \brief Set the sensor output format * \param[in] format The desired sensor output format * \param[in] transform The transform to be applied on the sensor. @@ -766,32 +168,9 @@ V4L2SubdeviceFormat CameraSensor::getFormat(const std::vector<unsigned int> &mbu * * \return 0 on success or a negative error code otherwise */ -int CameraSensor::setFormat(V4L2SubdeviceFormat *format, Transform transform) -{ - /* Configure flips if the sensor supports that. */ - if (supportFlips_) { - ControlList flipCtrls(subdev_->controls()); - - flipCtrls.set(V4L2_CID_HFLIP, - static_cast<int32_t>(!!(transform & Transform::HFlip))); - flipCtrls.set(V4L2_CID_VFLIP, - static_cast<int32_t>(!!(transform & Transform::VFlip))); - - int ret = subdev_->setControls(&flipCtrls); - if (ret) - return ret; - } - - /* Apply format on the subdev. */ - int ret = subdev_->setFormat(pad_, format); - if (ret) - return ret; - - subdev_->updateControlInfo(); - return 0; -} /** + * \fn CameraSensor::tryFormat() * \brief Try the sensor output format * \param[in] format The desired sensor output format * @@ -802,13 +181,9 @@ int CameraSensor::setFormat(V4L2SubdeviceFormat *format, Transform transform) * * \return 0 on success or a negative error code otherwise */ -int CameraSensor::tryFormat(V4L2SubdeviceFormat *format) const -{ - return subdev_->setFormat(pad_, format, - V4L2Subdevice::Whence::TryFormat); -} /** + * \fn CameraSensor::applyConfiguration() * \brief Apply a sensor configuration to the camera sensor * \param[in] config The sensor configuration * \param[in] transform The transform to be applied on the sensor. @@ -823,73 +198,72 @@ int CameraSensor::tryFormat(V4L2SubdeviceFormat *format) const * \return 0 if \a config is applied correctly to the camera sensor, a negative * error code otherwise */ -int CameraSensor::applyConfiguration(const SensorConfiguration &config, - Transform transform, - V4L2SubdeviceFormat *sensorFormat) -{ - if (!config.isValid()) { - LOG(CameraSensor, Error) << "Invalid sensor configuration"; - return -EINVAL; - } - std::vector<unsigned int> filteredCodes; - std::copy_if(mbusCodes_.begin(), mbusCodes_.end(), - std::back_inserter(filteredCodes), - [&config](unsigned int mbusCode) { - BayerFormat bayer = BayerFormat::fromMbusCode(mbusCode); - if (bayer.bitDepth == config.bitDepth) - return true; - return false; - }); - if (filteredCodes.empty()) { - LOG(CameraSensor, Error) - << "Cannot find any format with bit depth " - << config.bitDepth; - return -EINVAL; - } - - /* - * Compute the sensor's data frame size by applying the cropping - * rectangle, subsampling and output crop to the sensor's pixel array - * size. - * - * \todo The actual size computation is for now ignored and only the - * output size is considered. This implies that resolutions obtained - * with two different cropping/subsampling will look identical and - * only the first found one will be considered. - */ - V4L2SubdeviceFormat subdevFormat = {}; - for (unsigned int code : filteredCodes) { - for (const Size &size : sizes(code)) { - if (size.width != config.outputSize.width || - size.height != config.outputSize.height) - continue; - - subdevFormat.code = code; - subdevFormat.size = size; - break; - } - } - if (!subdevFormat.code) { - LOG(CameraSensor, Error) << "Invalid output size in sensor configuration"; - return -EINVAL; - } - - int ret = setFormat(&subdevFormat, transform); - if (ret) - return ret; +/** + * \brief Retrieve the image source stream + * + * Sensors that produce multiple streams do not guarantee that the image stream + * is always assigned number 0. This function allows callers to retrieve the + * image stream on the sensor's source pad, in order to configure the receiving + * side accordingly. + * + * \return The image source stream + */ +V4L2Subdevice::Stream CameraSensor::imageStream() const +{ + return { 0, 0 }; +} - /* - * Return to the caller the format actually applied to the sensor. - * This is relevant if transform has changed the bayer pattern order. - */ - if (sensorFormat) - *sensorFormat = subdevFormat; +/** + * \brief Retrieve the embedded data source stream + * + * Some sensors produce embedded data in a stream separate from the image + * stream. This function indicates if the sensor supports this feature by + * returning the embedded data stream on the sensor's source pad if available, + * or an std::optional<> without a value otheriwse. + * + * \return The embedded data source stream + */ +std::optional<V4L2Subdevice::Stream> CameraSensor::embeddedDataStream() const +{ + return {}; +} - /* \todo Handle AnalogCrop. Most sensors do not support set_selection */ - /* \todo Handle scaling in the digital domain. */ +/** + * \brief Retrieve the format on the embedded data stream + * + * When an embedded data stream is available, this function returns the + * corresponding format on the sensor's source pad. The format may vary with + * the image stream format, and should therefore be retrieved after configuring + * the image stream. + * + * If the sensor doesn't support embedded data, this function returns a + * default-constructed format. + * + * \return The format on the embedded data stream + */ +V4L2SubdeviceFormat CameraSensor::embeddedDataFormat() const +{ + return {}; +} - return 0; +/** + * \brief Enable or disable the embedded data stream + * \param[in] enable True to enable the embedded data stream, false to disable it + * + * For sensors that support embedded data, this function enables or disables + * generation of embedded data. Some of such sensors always produce embedded + * data, in which case this function return -EISCONN if the caller attempts to + * disable embedded data. + * + * If the sensor doesn't support embedded data, this function returns 0 when \a + * enable is false, and -ENOSTR otherwise. + * + * \return 0 on success, or a negative error code otherwise + */ +int CameraSensor::setEmbeddedDataEnabled(bool enable) +{ + return enable ? -ENOSTR : 0; } /** @@ -899,6 +273,7 @@ int CameraSensor::applyConfiguration(const SensorConfiguration &config, */ /** + * \fn CameraSensor::sensorInfo() * \brief Assemble and return the camera sensor info * \param[out] info The camera sensor info * @@ -912,82 +287,9 @@ int CameraSensor::applyConfiguration(const SensorConfiguration &config, * * \return 0 on success, a negative error code otherwise */ -int CameraSensor::sensorInfo(IPACameraSensorInfo *info) const -{ - if (!bayerFormat_) - return -EINVAL; - - info->model = model(); - - /* - * The active area size is a static property, while the crop - * rectangle needs to be re-read as it depends on the sensor - * configuration. - */ - info->activeAreaSize = { activeArea_.width, activeArea_.height }; - - /* - * \todo Support for retreiving the crop rectangle is scheduled to - * become mandatory. For the time being use the default value if it has - * been initialized at sensor driver validation time. - */ - int ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP, &info->analogCrop); - if (ret) { - info->analogCrop = activeArea_; - LOG(CameraSensor, Warning) - << "The analogue crop rectangle has been defaulted to the active area size"; - } - - /* - * IPACameraSensorInfo::analogCrop::x and IPACameraSensorInfo::analogCrop::y - * are defined relatively to the active pixel area, while V4L2's - * TGT_CROP target is defined in respect to the full pixel array. - * - * Compensate it by subtracting the active area offset. - */ - info->analogCrop.x -= activeArea_.x; - info->analogCrop.y -= activeArea_.y; - - /* The bit depth and image size depend on the currently applied format. */ - V4L2SubdeviceFormat format{}; - ret = subdev_->getFormat(pad_, &format); - if (ret) - return ret; - - info->bitsPerPixel = MediaBusFormatInfo::info(format.code).bitsPerPixel; - info->outputSize = format.size; - - std::optional<int32_t> cfa = properties_.get(properties::draft::ColorFilterArrangement); - info->cfaPattern = cfa ? *cfa : properties::draft::RGB; - - /* - * Retrieve the pixel rate, line length and minimum/maximum frame - * duration through V4L2 controls. Support for the V4L2_CID_PIXEL_RATE, - * V4L2_CID_HBLANK and V4L2_CID_VBLANK controls is mandatory. - */ - ControlList ctrls = subdev_->getControls({ V4L2_CID_PIXEL_RATE, - V4L2_CID_HBLANK, - V4L2_CID_VBLANK }); - if (ctrls.empty()) { - LOG(CameraSensor, Error) - << "Failed to retrieve camera info controls"; - return -EINVAL; - } - - info->pixelRate = ctrls.get(V4L2_CID_PIXEL_RATE).get<int64_t>(); - - const ControlInfo hblank = ctrls.infoMap()->at(V4L2_CID_HBLANK); - info->minLineLength = info->outputSize.width + hblank.min().get<int32_t>(); - info->maxLineLength = info->outputSize.width + hblank.max().get<int32_t>(); - - const ControlInfo vblank = ctrls.infoMap()->at(V4L2_CID_VBLANK); - info->minFrameLength = info->outputSize.height + vblank.min().get<int32_t>(); - info->maxFrameLength = info->outputSize.height + vblank.max().get<int32_t>(); - - return 0; -} /** + * \fn CameraSensor::computeTransform() * \brief Compute the Transform that gives the requested \a orientation * \param[inout] orientation The desired image orientation * @@ -1013,40 +315,9 @@ int CameraSensor::sensorInfo(IPACameraSensorInfo *info) const * \return A Transform instance that applied to the CameraSensor produces images * with \a orientation */ -Transform CameraSensor::computeTransform(Orientation *orientation) const -{ - /* - * If we cannot do any flips we cannot change the native camera mounting - * orientation. - */ - if (!supportFlips_) { - *orientation = mountingOrientation_; - return Transform::Identity; - } - - /* - * Now compute the required transform to obtain 'orientation' starting - * from the mounting rotation. - * - * As a note: - * orientation / mountingOrientation_ = transform - * mountingOrientation_ * transform = orientation - */ - Transform transform = *orientation / mountingOrientation_; - - /* - * If transform contains any Transpose we cannot do it, so adjust - * 'orientation' to report the image native orientation and return Identity. - */ - if (!!(transform & Transform::Transpose)) { - *orientation = mountingOrientation_; - return Transform::Identity; - } - - return transform; -} /** + * \fn CameraSensor::bayerOrder() * \brief Compute the Bayer order that results from the given Transform * \param[in] t The Transform to apply to the sensor * @@ -1058,23 +329,9 @@ Transform CameraSensor::computeTransform(Orientation *orientation) const * * \return The Bayer order produced by the sensor when the Transform is applied */ -BayerFormat::Order CameraSensor::bayerOrder(Transform t) const -{ - /* Return a defined by meaningless value for non-Bayer sensors. */ - if (!bayerFormat_) - return BayerFormat::Order::BGGR; - - if (!flipsAlterBayerOrder_) - return bayerFormat_->order; - - /* - * Apply the transform to the native (i.e. untransformed) Bayer order, - * using the rest of the Bayer format supplied by the caller. - */ - return bayerFormat_->transform(t).order; -} /** + * \fn CameraSensor::controls() * \brief Retrieve the supported V4L2 controls and their information * * Control information is updated automatically to reflect the current sensor @@ -1083,12 +340,9 @@ BayerFormat::Order CameraSensor::bayerOrder(Transform t) const * * \return A map of the V4L2 controls supported by the sensor */ -const ControlInfoMap &CameraSensor::controls() const -{ - return subdev_->controls(); -} /** + * \fn CameraSensor::getControls() * \brief Read V4L2 controls from the sensor * \param[in] ids The list of controls to read, specified by their ID * @@ -1106,12 +360,9 @@ const ControlInfoMap &CameraSensor::controls() const * \return The control values in a ControlList on success, or an empty list on * error */ -ControlList CameraSensor::getControls(const std::vector<uint32_t> &ids) -{ - return subdev_->getControls(ids); -} /** + * \fn CameraSensor::setControls() * \brief Write V4L2 controls to the sensor * \param[in] ctrls The list of controls to write * @@ -1136,10 +387,6 @@ ControlList CameraSensor::getControls(const std::vector<uint32_t> &ids) * \retval -EINVAL One of the control is not supported or not accessible * \retval i The index of the control that failed */ -int CameraSensor::setControls(ControlList *ctrls) -{ - return subdev_->setControls(ctrls); -} /** * \fn CameraSensor::testPatternModes() @@ -1150,6 +397,7 @@ int CameraSensor::setControls(ControlList *ctrls) */ /** + * \fn CameraSensor::setTestPatternMode() * \brief Set the test pattern mode for the camera sensor * \param[in] mode The test pattern mode * @@ -1157,51 +405,179 @@ int CameraSensor::setControls(ControlList *ctrls) * pattern mode. Otherwise, this function is a no-op. Setting the same test * pattern mode for every frame thus incurs no performance penalty. */ -int CameraSensor::setTestPatternMode(controls::draft::TestPatternModeEnum mode) -{ - if (testPatternMode_ == mode) - return 0; - if (testPatternModes_.empty()) { - LOG(CameraSensor, Error) - << "Camera sensor does not support test pattern modes."; - return -EINVAL; - } +/** + * \fn CameraSensor::sensorDelays() + * \brief Fetch the sensor delay values + * + * This function retrieves the delays that the sensor applies to controls. If + * the static properties database doesn't specifiy control delay values for the + * sensor, default delays that may be suitable are returned and a warning is + * logged. + * + * \return The sensor delay values + */ + +/** + * \class CameraSensorFactoryBase + * \brief Base class for camera sensor factories + * + * The CameraSensorFactoryBase class is the base of all specializations of + * the CameraSensorFactory class template. It implements the factory + * registration, maintains a registry of factories, and provides access to the + * registered factories. + */ - return applyTestPatternMode(mode); +/** + * \brief Construct a camera sensor factory base + * \param[in] name The camera sensor factory name + * \param[in] priority Priority order for factory selection + * + * Creating an instance of the factory base registers it with the global list of + * factories, accessible through the factories() function. + */ +CameraSensorFactoryBase::CameraSensorFactoryBase(const char *name, int priority) + : name_(name), priority_(priority) +{ + registerFactory(this); } -int CameraSensor::applyTestPatternMode(controls::draft::TestPatternModeEnum mode) +/** + * \brief Create an instance of the CameraSensor corresponding to a media entity + * \param[in] entity The media entity on the source end of the sensor + * + * When multiple factories match the same \a entity, this function selects the + * matching factory with the highest priority as specified to the + * REGISTER_CAMERA_SENSOR() macro at factory registration time. If multiple + * matching factories have the same highest priority value, which factory gets + * selected is undefined and may vary between runs. + * + * \return A unique pointer to a new instance of the CameraSensor subclass + * matching the entity, or a null pointer if no such factory exists + */ +std::unique_ptr<CameraSensor> CameraSensorFactoryBase::create(MediaEntity *entity) { - if (testPatternModes_.empty()) - return 0; + const std::vector<CameraSensorFactoryBase *> &factories = + CameraSensorFactoryBase::factories(); - auto it = std::find(testPatternModes_.begin(), testPatternModes_.end(), - mode); - if (it == testPatternModes_.end()) { - LOG(CameraSensor, Error) << "Unsupported test pattern mode " - << mode; - return -EINVAL; - } + for (const CameraSensorFactoryBase *factory : factories) { + std::variant<std::unique_ptr<CameraSensor>, int> result = + factory->match(entity); - LOG(CameraSensor, Debug) << "Apply test pattern mode " << mode; + if (std::holds_alternative<std::unique_ptr<CameraSensor>>(result)) { + LOG(CameraSensor, Debug) + << "Entity '" << entity->name() << "' matched by " + << factory->name(); + return std::get<std::unique_ptr<CameraSensor>>(std::move(result)); + } - int32_t index = staticProps_->testPatternModes.at(mode); - ControlList ctrls{ controls() }; - ctrls.set(V4L2_CID_TEST_PATTERN, index); + if (std::get<int>(result)) { + LOG(CameraSensor, Error) + << "Failed to create sensor for '" + << entity->name() << ": " << std::get<int>(result); + return nullptr; + } + } - int ret = setControls(&ctrls); - if (ret) - return ret; + return nullptr; +} - testPatternMode_ = mode; +/** + * \fn CameraSensorFactoryBase::name() + * \brief Retrieve the camera sensor factory name + * \return The name of the factory + */ - return 0; +/** + * \fn CameraSensorFactoryBase::priority() + * \brief Retrieve the priority value for the factory + * \return The priority value for the factory + */ + +/** + * \brief Retrieve the list of all camera sensor factories + * + * The factories are sorted in decreasing priority order. + * + * \return The list of camera sensor factories + */ +std::vector<CameraSensorFactoryBase *> &CameraSensorFactoryBase::factories() +{ + /* + * The static factories map is defined inside the function to ensure + * it gets initialized on first use, without any dependency on link + * order. + */ + static std::vector<CameraSensorFactoryBase *> factories; + return factories; } -std::string CameraSensor::logPrefix() const +/** + * \brief Add a camera sensor class to the registry + * \param[in] factory Factory to use to construct the camera sensor + */ +void CameraSensorFactoryBase::registerFactory(CameraSensorFactoryBase *factory) { - return "'" + entity_->name() + "'"; + std::vector<CameraSensorFactoryBase *> &factories = + CameraSensorFactoryBase::factories(); + + auto pos = std::upper_bound(factories.begin(), factories.end(), factory, + [](const CameraSensorFactoryBase *value, + const CameraSensorFactoryBase *elem) { + return value->priority() > elem->priority(); + }); + factories.insert(pos, factory); } +/** + * \class CameraSensorFactory + * \brief Registration of CameraSensorFactory classes and creation of instances + * \tparam _CameraSensor The camera sensor class type for this factory + * + * To facilitate discovery and instantiation of CameraSensor classes, the + * CameraSensorFactory class implements auto-registration of camera sensors. + * Each CameraSensor subclass shall register itself using the + * REGISTER_CAMERA_SENSOR() macro, which will create a corresponding instance + * of a CameraSensorFactory subclass and register it with the static list of + * factories. + */ + +/** + * \fn CameraSensorFactory::CameraSensorFactory() + * \brief Construct a camera sensor factory + * + * Creating an instance of the factory registers it with the global list of + * factories, accessible through the CameraSensorFactoryBase::factories() + * function. + */ + +/** + * \def REGISTER_CAMERA_SENSOR(sensor, priority) + * \brief Register a camera sensor type to the sensor factory + * \param[in] sensor Class name of the CameraSensor derived class to register + * \param[in] priority Priority order for factory selection + * + * Register a CameraSensor subclass with the factory and make it available to + * try and match sensors. The subclass needs to implement a static match + * function: + * + * \code{.cpp} + * static std::variant<std::unique_ptr<CameraSensor>, int> match(MediaEntity *entity); + * \endcode + * + * The function tests if the sensor class supports the camera sensor identified + * by a MediaEntity. If so, it creates a new instance of the sensor class. The + * return value is a variant that contains + * + * - A new instance of the camera sensor class if the entity matched and + * creation succeeded ; + * - A non-zero error code if the entity matched and the creation failed ; or + * - A zero error code if the entity didn't match. + * + * When multiple factories can support the same MediaEntity (as in the match() + * function of multiple factories returning true for the same entity), the \a + * priority argument selects which factory will be used. See + * CameraSensorFactoryBase::create() for more information. + */ + } /* namespace libcamera */ diff --git a/src/libcamera/sensor/camera_sensor_legacy.cpp b/src/libcamera/sensor/camera_sensor_legacy.cpp new file mode 100644 index 00000000..32989c19 --- /dev/null +++ b/src/libcamera/sensor/camera_sensor_legacy.cpp @@ -0,0 +1,1045 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2019, Google Inc. + * + * camera_sensor_legacy.cpp - A V4L2-backed camera sensor + */ + +#include <algorithm> +#include <cmath> +#include <float.h> +#include <iomanip> +#include <limits.h> +#include <map> +#include <memory> +#include <string.h> +#include <string> +#include <vector> + +#include <libcamera/base/class.h> +#include <libcamera/base/log.h> +#include <libcamera/base/utils.h> + +#include <libcamera/camera.h> +#include <libcamera/control_ids.h> +#include <libcamera/controls.h> +#include <libcamera/geometry.h> +#include <libcamera/orientation.h> +#include <libcamera/property_ids.h> +#include <libcamera/transform.h> + +#include <libcamera/ipa/core_ipa_interface.h> + +#include "libcamera/internal/bayer_format.h" +#include "libcamera/internal/camera_lens.h" +#include "libcamera/internal/camera_sensor.h" +#include "libcamera/internal/camera_sensor_properties.h" +#include "libcamera/internal/formats.h" +#include "libcamera/internal/media_device.h" +#include "libcamera/internal/sysfs.h" +#include "libcamera/internal/v4l2_subdevice.h" + +namespace libcamera { + +class BayerFormat; +class CameraLens; +class MediaEntity; +class SensorConfiguration; + +struct CameraSensorProperties; + +enum class Orientation; + +LOG_DECLARE_CATEGORY(CameraSensor) + +class CameraSensorLegacy : public CameraSensor, protected Loggable +{ +public: + CameraSensorLegacy(const MediaEntity *entity); + ~CameraSensorLegacy(); + + static std::variant<std::unique_ptr<CameraSensor>, int> + match(MediaEntity *entity); + + const std::string &model() const override { return model_; } + const std::string &id() const override { return id_; } + + const MediaEntity *entity() const override { return entity_; } + V4L2Subdevice *device() override { return subdev_.get(); } + + CameraLens *focusLens() override { return focusLens_.get(); } + + const std::vector<unsigned int> &mbusCodes() const override { return mbusCodes_; } + std::vector<Size> sizes(unsigned int mbusCode) const override; + Size resolution() const override; + + V4L2SubdeviceFormat getFormat(const std::vector<unsigned int> &mbusCodes, + const Size &size, + const Size maxSize) const override; + int setFormat(V4L2SubdeviceFormat *format, + Transform transform = Transform::Identity) override; + int tryFormat(V4L2SubdeviceFormat *format) const override; + + int applyConfiguration(const SensorConfiguration &config, + Transform transform = Transform::Identity, + V4L2SubdeviceFormat *sensorFormat = nullptr) override; + + const ControlList &properties() const override { return properties_; } + int sensorInfo(IPACameraSensorInfo *info) const override; + Transform computeTransform(Orientation *orientation) const override; + BayerFormat::Order bayerOrder(Transform t) const override; + + const ControlInfoMap &controls() const override; + ControlList getControls(const std::vector<uint32_t> &ids) override; + int setControls(ControlList *ctrls) override; + + const std::vector<controls::draft::TestPatternModeEnum> & + testPatternModes() const override { return testPatternModes_; } + int setTestPatternMode(controls::draft::TestPatternModeEnum mode) override; + const CameraSensorProperties::SensorDelays &sensorDelays() override; + +protected: + std::string logPrefix() const override; + +private: + LIBCAMERA_DISABLE_COPY(CameraSensorLegacy) + + int init(); + int generateId(); + int validateSensorDriver(); + void initVimcDefaultProperties(); + void initStaticProperties(); + void initTestPatternModes(); + int initProperties(); + int applyTestPatternMode(controls::draft::TestPatternModeEnum mode); + int discoverAncillaryDevices(); + + const MediaEntity *entity_; + std::unique_ptr<V4L2Subdevice> subdev_; + unsigned int pad_; + + const CameraSensorProperties *staticProps_; + + std::string model_; + std::string id_; + + V4L2Subdevice::Formats formats_; + std::vector<unsigned int> mbusCodes_; + std::vector<Size> sizes_; + std::vector<controls::draft::TestPatternModeEnum> testPatternModes_; + controls::draft::TestPatternModeEnum testPatternMode_; + + Size pixelArraySize_; + Rectangle activeArea_; + const BayerFormat *bayerFormat_; + bool supportFlips_; + bool flipsAlterBayerOrder_; + Orientation mountingOrientation_; + + ControlList properties_; + + std::unique_ptr<CameraLens> focusLens_; +}; + +/** + * \class CameraSensorLegacy + * \brief A camera sensor based on V4L2 subdevices + * + * The implementation is currently limited to sensors that expose a single V4L2 + * subdevice with a single pad. It will be extended to support more complex + * devices as the needs arise. + */ + +CameraSensorLegacy::CameraSensorLegacy(const MediaEntity *entity) + : entity_(entity), pad_(UINT_MAX), staticProps_(nullptr), + bayerFormat_(nullptr), supportFlips_(false), + flipsAlterBayerOrder_(false), properties_(properties::properties) +{ +} + +CameraSensorLegacy::~CameraSensorLegacy() = default; + +std::variant<std::unique_ptr<CameraSensor>, int> +CameraSensorLegacy::match(MediaEntity *entity) +{ + std::unique_ptr<CameraSensorLegacy> sensor = + std::make_unique<CameraSensorLegacy>(entity); + + int ret = sensor->init(); + if (ret) + return { ret }; + + return { std::move(sensor) }; +} + +int CameraSensorLegacy::init() +{ + for (const MediaPad *pad : entity_->pads()) { + if (pad->flags() & MEDIA_PAD_FL_SOURCE) { + pad_ = pad->index(); + break; + } + } + + if (pad_ == UINT_MAX) { + LOG(CameraSensor, Error) + << "Sensors with more than one pad are not supported"; + return -EINVAL; + } + + switch (entity_->function()) { + case MEDIA_ENT_F_CAM_SENSOR: + case MEDIA_ENT_F_PROC_VIDEO_ISP: + break; + + default: + LOG(CameraSensor, Error) + << "Invalid sensor function " + << utils::hex(entity_->function()); + return -EINVAL; + } + + /* Create and open the subdev. */ + subdev_ = std::make_unique<V4L2Subdevice>(entity_); + int ret = subdev_->open(); + if (ret < 0) + return ret; + + /* + * Clear any flips to be sure we get the "native" Bayer order. This is + * harmless for sensors where the flips don't affect the Bayer order. + */ + ControlList ctrls(subdev_->controls()); + if (subdev_->controls().find(V4L2_CID_HFLIP) != subdev_->controls().end()) + ctrls.set(V4L2_CID_HFLIP, 0); + if (subdev_->controls().find(V4L2_CID_VFLIP) != subdev_->controls().end()) + ctrls.set(V4L2_CID_VFLIP, 0); + subdev_->setControls(&ctrls); + + /* Enumerate, sort and cache media bus codes and sizes. */ + formats_ = subdev_->formats(pad_); + if (formats_.empty()) { + LOG(CameraSensor, Error) << "No image format found"; + return -EINVAL; + } + + mbusCodes_ = utils::map_keys(formats_); + std::sort(mbusCodes_.begin(), mbusCodes_.end()); + + for (const auto &format : formats_) { + const std::vector<SizeRange> &ranges = format.second; + std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes_), + [](const SizeRange &range) { return range.max; }); + } + + std::sort(sizes_.begin(), sizes_.end()); + + /* Remove duplicates. */ + auto last = std::unique(sizes_.begin(), sizes_.end()); + sizes_.erase(last, sizes_.end()); + + /* + * VIMC is a bit special, as it does not yet support all the mandatory + * requirements regular sensors have to respect. + * + * Do not validate the driver if it's VIMC and initialize the sensor + * properties with static information. + * + * \todo Remove the special case once the VIMC driver has been + * updated in all test platforms. + */ + if (entity_->device()->driver() == "vimc") { + initVimcDefaultProperties(); + + ret = initProperties(); + if (ret) + return ret; + + return discoverAncillaryDevices(); + } + + /* Get the color filter array pattern (only for RAW sensors). */ + for (unsigned int mbusCode : mbusCodes_) { + const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(mbusCode); + if (bayerFormat.isValid()) { + bayerFormat_ = &bayerFormat; + break; + } + } + + ret = validateSensorDriver(); + if (ret) + return ret; + + ret = initProperties(); + if (ret) + return ret; + + ret = discoverAncillaryDevices(); + if (ret) + return ret; + + /* + * Set HBLANK to the minimum to start with a well-defined line length, + * allowing IPA modules that do not modify HBLANK to use the sensor + * minimum line length in their calculations. + */ + const struct v4l2_query_ext_ctrl *hblankInfo = subdev_->controlInfo(V4L2_CID_HBLANK); + if (hblankInfo && !(hblankInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) { + ControlList ctrl(subdev_->controls()); + + ctrl.set(V4L2_CID_HBLANK, static_cast<int32_t>(hblankInfo->minimum)); + ret = subdev_->setControls(&ctrl); + if (ret) + return ret; + } + + return applyTestPatternMode(controls::draft::TestPatternModeEnum::TestPatternModeOff); +} + +int CameraSensorLegacy::generateId() +{ + const std::string devPath = subdev_->devicePath(); + + /* Try to get ID from firmware description. */ + id_ = sysfs::firmwareNodePath(devPath); + if (!id_.empty()) + return 0; + + /* + * Virtual sensors not described in firmware + * + * Verify it's a platform device and construct ID from the device path + * and model of sensor. + */ + if (devPath.find("/sys/devices/platform/", 0) == 0) { + id_ = devPath.substr(strlen("/sys/devices/")) + " " + model(); + return 0; + } + + LOG(CameraSensor, Error) << "Can't generate sensor ID"; + return -EINVAL; +} + +int CameraSensorLegacy::validateSensorDriver() +{ + int err = 0; + + /* + * Optional controls are used to register optional sensor properties. If + * not present, some values will be defaulted. + */ + static constexpr uint32_t optionalControls[] = { + V4L2_CID_CAMERA_SENSOR_ROTATION, + }; + + const ControlIdMap &controls = subdev_->controls().idmap(); + for (uint32_t ctrl : optionalControls) { + if (!controls.count(ctrl)) + LOG(CameraSensor, Debug) + << "Optional V4L2 control " << utils::hex(ctrl) + << " not supported"; + } + + /* + * Recommended controls are similar to optional controls, but will + * become mandatory in the near future. Be loud if they're missing. + */ + static constexpr uint32_t recommendedControls[] = { + V4L2_CID_CAMERA_ORIENTATION, + }; + + for (uint32_t ctrl : recommendedControls) { + if (!controls.count(ctrl)) { + LOG(CameraSensor, Warning) + << "Recommended V4L2 control " << utils::hex(ctrl) + << " not supported"; + err = -EINVAL; + } + } + + /* + * Verify if sensor supports horizontal/vertical flips + * + * \todo Handle horizontal and vertical flips independently. + */ + const struct v4l2_query_ext_ctrl *hflipInfo = subdev_->controlInfo(V4L2_CID_HFLIP); + const struct v4l2_query_ext_ctrl *vflipInfo = subdev_->controlInfo(V4L2_CID_VFLIP); + if (hflipInfo && !(hflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY) && + vflipInfo && !(vflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) { + supportFlips_ = true; + + if (hflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT || + vflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT) + flipsAlterBayerOrder_ = true; + } + + if (!supportFlips_) + LOG(CameraSensor, Debug) + << "Camera sensor does not support horizontal/vertical flip"; + + /* + * Make sure the required selection targets are supported. + * + * Failures in reading any of the targets are not deemed to be fatal, + * but some properties and features, like constructing a + * IPACameraSensorInfo for the IPA module, won't be supported. + * + * \todo Make support for selection targets mandatory as soon as all + * test platforms have been updated. + */ + Rectangle rect; + int ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP_BOUNDS, &rect); + if (ret) { + /* + * Default the pixel array size to the largest size supported + * by the sensor. The sizes_ vector is sorted in ascending + * order, the largest size is thus the last element. + */ + pixelArraySize_ = sizes_.back(); + + LOG(CameraSensor, Warning) + << "The PixelArraySize property has been defaulted to " + << pixelArraySize_; + err = -EINVAL; + } else { + pixelArraySize_ = rect.size(); + } + + ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP_DEFAULT, &activeArea_); + if (ret) { + activeArea_ = Rectangle(pixelArraySize_); + LOG(CameraSensor, Warning) + << "The PixelArrayActiveAreas property has been defaulted to " + << activeArea_; + err = -EINVAL; + } + + ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP, &rect); + if (ret) { + LOG(CameraSensor, Warning) + << "Failed to retrieve the sensor crop rectangle"; + err = -EINVAL; + } + + if (err) { + LOG(CameraSensor, Warning) + << "The sensor kernel driver needs to be fixed"; + LOG(CameraSensor, Warning) + << "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information"; + } + + if (!bayerFormat_) + return 0; + + /* + * For raw sensors, make sure the sensor driver supports the controls + * required by the CameraSensor class. + */ + static constexpr uint32_t mandatoryControls[] = { + V4L2_CID_ANALOGUE_GAIN, + V4L2_CID_EXPOSURE, + V4L2_CID_HBLANK, + V4L2_CID_PIXEL_RATE, + V4L2_CID_VBLANK, + }; + + err = 0; + for (uint32_t ctrl : mandatoryControls) { + if (!controls.count(ctrl)) { + LOG(CameraSensor, Error) + << "Mandatory V4L2 control " << utils::hex(ctrl) + << " not available"; + err = -EINVAL; + } + } + + if (err) { + LOG(CameraSensor, Error) + << "The sensor kernel driver needs to be fixed"; + LOG(CameraSensor, Error) + << "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information"; + return err; + } + + return 0; +} + +void CameraSensorLegacy::initVimcDefaultProperties() +{ + /* Use the largest supported size. */ + pixelArraySize_ = sizes_.back(); + activeArea_ = Rectangle(pixelArraySize_); +} + +void CameraSensorLegacy::initStaticProperties() +{ + staticProps_ = CameraSensorProperties::get(model_); + if (!staticProps_) + return; + + /* Register the properties retrieved from the sensor database. */ + properties_.set(properties::UnitCellSize, staticProps_->unitCellSize); + + initTestPatternModes(); +} + +const CameraSensorProperties::SensorDelays &CameraSensorLegacy::sensorDelays() +{ + static constexpr CameraSensorProperties::SensorDelays defaultSensorDelays = { + .exposureDelay = 2, + .gainDelay = 1, + .vblankDelay = 2, + .hblankDelay = 2, + }; + + if (!staticProps_ || + (!staticProps_->sensorDelays.exposureDelay && + !staticProps_->sensorDelays.gainDelay && + !staticProps_->sensorDelays.vblankDelay && + !staticProps_->sensorDelays.hblankDelay)) { + LOG(CameraSensor, Warning) + << "No sensor delays found in static properties. " + "Assuming unverified defaults."; + + return defaultSensorDelays; + } + + return staticProps_->sensorDelays; +} + +void CameraSensorLegacy::initTestPatternModes() +{ + const auto &v4l2TestPattern = controls().find(V4L2_CID_TEST_PATTERN); + if (v4l2TestPattern == controls().end()) { + LOG(CameraSensor, Debug) << "V4L2_CID_TEST_PATTERN is not supported"; + return; + } + + const auto &testPatternModes = staticProps_->testPatternModes; + if (testPatternModes.empty()) { + /* + * The camera sensor supports test patterns but we don't know + * how to map them so this should be fixed. + */ + LOG(CameraSensor, Debug) << "No static test pattern map for \'" + << model() << "\'"; + return; + } + + /* + * Create a map that associates the V4L2 control index to the test + * pattern mode by reversing the testPatternModes map provided by the + * camera sensor properties. This makes it easier to verify if the + * control index is supported in the below for loop that creates the + * list of supported test patterns. + */ + std::map<int32_t, controls::draft::TestPatternModeEnum> indexToTestPatternMode; + for (const auto &it : testPatternModes) + indexToTestPatternMode[it.second] = it.first; + + for (const ControlValue &value : v4l2TestPattern->second.values()) { + const int32_t index = value.get<int32_t>(); + + const auto it = indexToTestPatternMode.find(index); + if (it == indexToTestPatternMode.end()) { + LOG(CameraSensor, Debug) + << "Test pattern mode " << index << " ignored"; + continue; + } + + testPatternModes_.push_back(it->second); + } +} + +int CameraSensorLegacy::initProperties() +{ + model_ = subdev_->model(); + properties_.set(properties::Model, utils::toAscii(model_)); + + /* Generate a unique ID for the sensor. */ + int ret = generateId(); + if (ret) + return ret; + + /* Initialize the static properties from the sensor database. */ + initStaticProperties(); + + /* Retrieve and register properties from the kernel interface. */ + const ControlInfoMap &controls = subdev_->controls(); + + const auto &orientation = controls.find(V4L2_CID_CAMERA_ORIENTATION); + if (orientation != controls.end()) { + int32_t v4l2Orientation = orientation->second.def().get<int32_t>(); + int32_t propertyValue; + + switch (v4l2Orientation) { + default: + LOG(CameraSensor, Warning) + << "Unsupported camera location " + << v4l2Orientation << ", setting to External"; + [[fallthrough]]; + case V4L2_CAMERA_ORIENTATION_EXTERNAL: + propertyValue = properties::CameraLocationExternal; + break; + case V4L2_CAMERA_ORIENTATION_FRONT: + propertyValue = properties::CameraLocationFront; + break; + case V4L2_CAMERA_ORIENTATION_BACK: + propertyValue = properties::CameraLocationBack; + break; + } + properties_.set(properties::Location, propertyValue); + } else { + LOG(CameraSensor, Warning) << "Failed to retrieve the camera location"; + } + + const auto &rotationControl = controls.find(V4L2_CID_CAMERA_SENSOR_ROTATION); + if (rotationControl != controls.end()) { + int32_t propertyValue = rotationControl->second.def().get<int32_t>(); + + /* + * Cache the Transform associated with the camera mounting + * rotation for later use in computeTransform(). + */ + bool success; + mountingOrientation_ = orientationFromRotation(propertyValue, &success); + if (!success) { + LOG(CameraSensor, Warning) + << "Invalid rotation of " << propertyValue + << " degrees - ignoring"; + mountingOrientation_ = Orientation::Rotate0; + } + + properties_.set(properties::Rotation, propertyValue); + } else { + LOG(CameraSensor, Warning) + << "Rotation control not available, default to 0 degrees"; + properties_.set(properties::Rotation, 0); + mountingOrientation_ = Orientation::Rotate0; + } + + properties_.set(properties::PixelArraySize, pixelArraySize_); + properties_.set(properties::PixelArrayActiveAreas, { activeArea_ }); + + /* Color filter array pattern, register only for RAW sensors. */ + if (bayerFormat_) { + int32_t cfa; + switch (bayerFormat_->order) { + case BayerFormat::BGGR: + cfa = properties::draft::BGGR; + break; + case BayerFormat::GBRG: + cfa = properties::draft::GBRG; + break; + case BayerFormat::GRBG: + cfa = properties::draft::GRBG; + break; + case BayerFormat::RGGB: + cfa = properties::draft::RGGB; + break; + case BayerFormat::MONO: + cfa = properties::draft::MONO; + break; + } + + properties_.set(properties::draft::ColorFilterArrangement, cfa); + } + + return 0; +} + +int CameraSensorLegacy::discoverAncillaryDevices() +{ + int ret; + + for (MediaEntity *ancillary : entity_->ancillaryEntities()) { + switch (ancillary->function()) { + case MEDIA_ENT_F_LENS: + focusLens_ = std::make_unique<CameraLens>(ancillary); + ret = focusLens_->init(); + if (ret) { + LOG(CameraSensor, Error) + << "Lens initialisation failed, lens disabled"; + focusLens_.reset(); + } + break; + + default: + LOG(CameraSensor, Warning) + << "Unsupported ancillary entity function " + << ancillary->function(); + break; + } + } + + return 0; +} + +std::vector<Size> CameraSensorLegacy::sizes(unsigned int mbusCode) const +{ + std::vector<Size> sizes; + + const auto &format = formats_.find(mbusCode); + if (format == formats_.end()) + return sizes; + + const std::vector<SizeRange> &ranges = format->second; + std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes), + [](const SizeRange &range) { return range.max; }); + + std::sort(sizes.begin(), sizes.end()); + + return sizes; +} + +Size CameraSensorLegacy::resolution() const +{ + return std::min(sizes_.back(), activeArea_.size()); +} + +V4L2SubdeviceFormat +CameraSensorLegacy::getFormat(const std::vector<unsigned int> &mbusCodes, + const Size &size, Size maxSize) const +{ + unsigned int desiredArea = size.width * size.height; + unsigned int bestArea = UINT_MAX; + float desiredRatio = static_cast<float>(size.width) / size.height; + float bestRatio = FLT_MAX; + const Size *bestSize = nullptr; + uint32_t bestCode = 0; + + for (unsigned int code : mbusCodes) { + const auto formats = formats_.find(code); + if (formats == formats_.end()) + continue; + + for (const SizeRange &range : formats->second) { + const Size &sz = range.max; + + if (!maxSize.isNull() && + (sz.width > maxSize.width || sz.height > maxSize.height)) + continue; + + if (sz.width < size.width || sz.height < size.height) + continue; + + float ratio = static_cast<float>(sz.width) / sz.height; + float ratioDiff = std::abs(ratio - desiredRatio); + unsigned int area = sz.width * sz.height; + unsigned int areaDiff = area - desiredArea; + + if (ratioDiff > bestRatio) + continue; + + if (ratioDiff < bestRatio || areaDiff < bestArea) { + bestRatio = ratioDiff; + bestArea = areaDiff; + bestSize = &sz; + bestCode = code; + } + } + } + + if (!bestSize) { + LOG(CameraSensor, Debug) << "No supported format or size found"; + return {}; + } + + V4L2SubdeviceFormat format{ + .code = bestCode, + .size = *bestSize, + .colorSpace = ColorSpace::Raw, + }; + + return format; +} + +int CameraSensorLegacy::setFormat(V4L2SubdeviceFormat *format, Transform transform) +{ + /* Configure flips if the sensor supports that. */ + if (supportFlips_) { + ControlList flipCtrls(subdev_->controls()); + + flipCtrls.set(V4L2_CID_HFLIP, + static_cast<int32_t>(!!(transform & Transform::HFlip))); + flipCtrls.set(V4L2_CID_VFLIP, + static_cast<int32_t>(!!(transform & Transform::VFlip))); + + int ret = subdev_->setControls(&flipCtrls); + if (ret) + return ret; + } + + /* Apply format on the subdev. */ + int ret = subdev_->setFormat(pad_, format); + if (ret) + return ret; + + subdev_->updateControlInfo(); + return 0; +} + +int CameraSensorLegacy::tryFormat(V4L2SubdeviceFormat *format) const +{ + return subdev_->setFormat(pad_, format, + V4L2Subdevice::Whence::TryFormat); +} + +int CameraSensorLegacy::applyConfiguration(const SensorConfiguration &config, + Transform transform, + V4L2SubdeviceFormat *sensorFormat) +{ + if (!config.isValid()) { + LOG(CameraSensor, Error) << "Invalid sensor configuration"; + return -EINVAL; + } + + std::vector<unsigned int> filteredCodes; + std::copy_if(mbusCodes_.begin(), mbusCodes_.end(), + std::back_inserter(filteredCodes), + [&config](unsigned int mbusCode) { + BayerFormat bayer = BayerFormat::fromMbusCode(mbusCode); + if (bayer.bitDepth == config.bitDepth) + return true; + return false; + }); + if (filteredCodes.empty()) { + LOG(CameraSensor, Error) + << "Cannot find any format with bit depth " + << config.bitDepth; + return -EINVAL; + } + + /* + * Compute the sensor's data frame size by applying the cropping + * rectangle, subsampling and output crop to the sensor's pixel array + * size. + * + * \todo The actual size computation is for now ignored and only the + * output size is considered. This implies that resolutions obtained + * with two different cropping/subsampling will look identical and + * only the first found one will be considered. + */ + V4L2SubdeviceFormat subdevFormat = {}; + for (unsigned int code : filteredCodes) { + for (const Size &size : sizes(code)) { + if (size.width != config.outputSize.width || + size.height != config.outputSize.height) + continue; + + subdevFormat.code = code; + subdevFormat.size = size; + break; + } + } + if (!subdevFormat.code) { + LOG(CameraSensor, Error) << "Invalid output size in sensor configuration"; + return -EINVAL; + } + + int ret = setFormat(&subdevFormat, transform); + if (ret) + return ret; + + /* + * Return to the caller the format actually applied to the sensor. + * This is relevant if transform has changed the bayer pattern order. + */ + if (sensorFormat) + *sensorFormat = subdevFormat; + + /* \todo Handle AnalogCrop. Most sensors do not support set_selection */ + /* \todo Handle scaling in the digital domain. */ + + return 0; +} + +int CameraSensorLegacy::sensorInfo(IPACameraSensorInfo *info) const +{ + if (!bayerFormat_) + return -EINVAL; + + info->model = model(); + + /* + * The active area size is a static property, while the crop + * rectangle needs to be re-read as it depends on the sensor + * configuration. + */ + info->activeAreaSize = { activeArea_.width, activeArea_.height }; + + /* + * \todo Support for retreiving the crop rectangle is scheduled to + * become mandatory. For the time being use the default value if it has + * been initialized at sensor driver validation time. + */ + int ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP, &info->analogCrop); + if (ret) { + info->analogCrop = activeArea_; + LOG(CameraSensor, Warning) + << "The analogue crop rectangle has been defaulted to the active area size"; + } + + /* + * IPACameraSensorInfo::analogCrop::x and IPACameraSensorInfo::analogCrop::y + * are defined relatively to the active pixel area, while V4L2's + * TGT_CROP target is defined in respect to the full pixel array. + * + * Compensate it by subtracting the active area offset. + */ + info->analogCrop.x -= activeArea_.x; + info->analogCrop.y -= activeArea_.y; + + /* The bit depth and image size depend on the currently applied format. */ + V4L2SubdeviceFormat format{}; + ret = subdev_->getFormat(pad_, &format); + if (ret) + return ret; + info->bitsPerPixel = MediaBusFormatInfo::info(format.code).bitsPerPixel; + info->outputSize = format.size; + + std::optional<int32_t> cfa = properties_.get(properties::draft::ColorFilterArrangement); + info->cfaPattern = cfa ? *cfa : properties::draft::RGB; + + /* + * Retrieve the pixel rate, line length and minimum/maximum frame + * duration through V4L2 controls. Support for the V4L2_CID_PIXEL_RATE, + * V4L2_CID_HBLANK and V4L2_CID_VBLANK controls is mandatory. + */ + ControlList ctrls = subdev_->getControls({ V4L2_CID_PIXEL_RATE, + V4L2_CID_HBLANK, + V4L2_CID_VBLANK }); + if (ctrls.empty()) { + LOG(CameraSensor, Error) + << "Failed to retrieve camera info controls"; + return -EINVAL; + } + + info->pixelRate = ctrls.get(V4L2_CID_PIXEL_RATE).get<int64_t>(); + + const ControlInfo hblank = ctrls.infoMap()->at(V4L2_CID_HBLANK); + info->minLineLength = info->outputSize.width + hblank.min().get<int32_t>(); + info->maxLineLength = info->outputSize.width + hblank.max().get<int32_t>(); + + const ControlInfo vblank = ctrls.infoMap()->at(V4L2_CID_VBLANK); + info->minFrameLength = info->outputSize.height + vblank.min().get<int32_t>(); + info->maxFrameLength = info->outputSize.height + vblank.max().get<int32_t>(); + + return 0; +} + +Transform CameraSensorLegacy::computeTransform(Orientation *orientation) const +{ + /* + * If we cannot do any flips we cannot change the native camera mounting + * orientation. + */ + if (!supportFlips_) { + *orientation = mountingOrientation_; + return Transform::Identity; + } + + /* + * Now compute the required transform to obtain 'orientation' starting + * from the mounting rotation. + * + * As a note: + * orientation / mountingOrientation_ = transform + * mountingOrientation_ * transform = orientation + */ + Transform transform = *orientation / mountingOrientation_; + + /* + * If transform contains any Transpose we cannot do it, so adjust + * 'orientation' to report the image native orientation and return Identity. + */ + if (!!(transform & Transform::Transpose)) { + *orientation = mountingOrientation_; + return Transform::Identity; + } + + return transform; +} + +BayerFormat::Order CameraSensorLegacy::bayerOrder(Transform t) const +{ + /* Return a defined by meaningless value for non-Bayer sensors. */ + if (!bayerFormat_) + return BayerFormat::Order::BGGR; + + if (!flipsAlterBayerOrder_) + return bayerFormat_->order; + + /* + * Apply the transform to the native (i.e. untransformed) Bayer order, + * using the rest of the Bayer format supplied by the caller. + */ + return bayerFormat_->transform(t).order; +} + +const ControlInfoMap &CameraSensorLegacy::controls() const +{ + return subdev_->controls(); +} + +ControlList CameraSensorLegacy::getControls(const std::vector<uint32_t> &ids) +{ + return subdev_->getControls(ids); +} + +int CameraSensorLegacy::setControls(ControlList *ctrls) +{ + return subdev_->setControls(ctrls); +} + +int CameraSensorLegacy::setTestPatternMode(controls::draft::TestPatternModeEnum mode) +{ + if (testPatternMode_ == mode) + return 0; + + if (testPatternModes_.empty()) { + LOG(CameraSensor, Error) + << "Camera sensor does not support test pattern modes."; + return -EINVAL; + } + + return applyTestPatternMode(mode); +} + +int CameraSensorLegacy::applyTestPatternMode(controls::draft::TestPatternModeEnum mode) +{ + if (testPatternModes_.empty()) + return 0; + + auto it = std::find(testPatternModes_.begin(), testPatternModes_.end(), + mode); + if (it == testPatternModes_.end()) { + LOG(CameraSensor, Error) << "Unsupported test pattern mode " + << mode; + return -EINVAL; + } + + LOG(CameraSensor, Debug) << "Apply test pattern mode " << mode; + + int32_t index = staticProps_->testPatternModes.at(mode); + ControlList ctrls{ controls() }; + ctrls.set(V4L2_CID_TEST_PATTERN, index); + + int ret = setControls(&ctrls); + if (ret) + return ret; + + testPatternMode_ = mode; + + return 0; +} + +std::string CameraSensorLegacy::logPrefix() const +{ + return "'" + entity_->name() + "'"; +} + +REGISTER_CAMERA_SENSOR(CameraSensorLegacy, -100) + +} /* namespace libcamera */ diff --git a/src/libcamera/sensor/camera_sensor_properties.cpp b/src/libcamera/sensor/camera_sensor_properties.cpp index b18524d8..e2f518f9 100644 --- a/src/libcamera/sensor/camera_sensor_properties.cpp +++ b/src/libcamera/sensor/camera_sensor_properties.cpp @@ -41,6 +41,35 @@ LOG_DEFINE_CATEGORY(CameraSensorProperties) * \brief Map that associates the TestPattern control value with the indexes of * the corresponding sensor test pattern modes as returned by * V4L2_CID_TEST_PATTERN. + * + * \var CameraSensorProperties::sensorDelays + * \brief Sensor control application delays + * + * This structure may be defined as empty if the actual sensor delays are not + * available or have not been measured. + */ + +/** + * \struct CameraSensorProperties::SensorDelays + * \brief Sensor control application delay values + * + * This structure holds delay values, expressed in number of frames, between the + * time a control value is applied to the sensor and the time that value is + * reflected in the output. For example "2 frames delay" means that parameters + * set during frame N will take effect for frame N+2 (and by extension a delay + * of 0 would mean the parameter is applied immediately to the current frame). + * + * \var CameraSensorProperties::SensorDelays::exposureDelay + * \brief Number of frames between application of exposure control and effect + * + * \var CameraSensorProperties::SensorDelays::gainDelay + * \brief Number of frames between application of analogue gain control and effect + * + * \var CameraSensorProperties::SensorDelays::vblankDelay + * \brief Number of frames between application of vblank control and effect + * + * \var CameraSensorProperties::SensorDelays::hblankDelay + * \brief Number of frames between application of hblank control and effect */ /** @@ -52,6 +81,21 @@ LOG_DEFINE_CATEGORY(CameraSensorProperties) const CameraSensorProperties *CameraSensorProperties::get(const std::string &sensor) { static const std::map<std::string, const CameraSensorProperties> sensorProps = { + { "ar0144", { + .unitCellSize = { 3000, 3000 }, + .testPatternModes = { + { controls::draft::TestPatternModeOff, 0 }, + { controls::draft::TestPatternModeSolidColor, 1 }, + { controls::draft::TestPatternModeColorBars, 2 }, + { controls::draft::TestPatternModeColorBarsFadeToGray, 3 }, + }, + .sensorDelays = { + .exposureDelay = 2, + .gainDelay = 2, + .vblankDelay = 2, + .hblankDelay = 2 + }, + } }, { "ar0521", { .unitCellSize = { 2200, 2200 }, .testPatternModes = { @@ -60,6 +104,33 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen { controls::draft::TestPatternModeColorBars, 2 }, { controls::draft::TestPatternModeColorBarsFadeToGray, 3 }, }, + .sensorDelays = { }, + } }, + { "gc05a2", { + .unitCellSize = { 1120, 1120 }, + .testPatternModes = { + { controls::draft::TestPatternModeOff, 0 }, + { controls::draft::TestPatternModeColorBars, 1 }, + }, + .sensorDelays = { + .exposureDelay = 2, + .gainDelay = 2, + .vblankDelay = 2, + .hblankDelay = 2 + }, + } }, + { "gc08a3", { + .unitCellSize = { 1120, 1120 }, + .testPatternModes = { + { controls::draft::TestPatternModeOff, 0 }, + { controls::draft::TestPatternModeColorBars, 2 }, + }, + .sensorDelays = { + .exposureDelay = 2, + .gainDelay = 2, + .vblankDelay = 2, + .hblankDelay = 2 + }, } }, { "hi846", { .unitCellSize = { 1120, 1120 }, @@ -78,6 +149,18 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen * 9: "Resolution Pattern" */ }, + .sensorDelays = { }, + } }, + { "imx214", { + .unitCellSize = { 1120, 1120 }, + .testPatternModes = { + { controls::draft::TestPatternModeOff, 0 }, + { controls::draft::TestPatternModeColorBars, 1 }, + { controls::draft::TestPatternModeSolidColor, 2 }, + { controls::draft::TestPatternModeColorBarsFadeToGray, 3 }, + { controls::draft::TestPatternModePn9, 4 }, + }, + .sensorDelays = { }, } }, { "imx219", { .unitCellSize = { 1120, 1120 }, @@ -88,6 +171,12 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen { controls::draft::TestPatternModeColorBarsFadeToGray, 3 }, { controls::draft::TestPatternModePn9, 4 }, }, + .sensorDelays = { + .exposureDelay = 2, + .gainDelay = 1, + .vblankDelay = 2, + .hblankDelay = 2 + }, } }, { "imx258", { .unitCellSize = { 1120, 1120 }, @@ -98,34 +187,72 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen { controls::draft::TestPatternModeColorBarsFadeToGray, 3 }, { controls::draft::TestPatternModePn9, 4 }, }, + .sensorDelays = { }, } }, { "imx283", { .unitCellSize = { 2400, 2400 }, .testPatternModes = {}, + .sensorDelays = { + .exposureDelay = 2, + .gainDelay = 2, + .vblankDelay = 2, + .hblankDelay = 2 + }, } }, { "imx290", { .unitCellSize = { 2900, 2900 }, .testPatternModes = {}, + .sensorDelays = { + .exposureDelay = 2, + .gainDelay = 2, + .vblankDelay = 2, + .hblankDelay = 2 + }, } }, { "imx296", { .unitCellSize = { 3450, 3450 }, .testPatternModes = {}, + .sensorDelays = { + .exposureDelay = 2, + .gainDelay = 2, + .vblankDelay = 2, + .hblankDelay = 2 + }, } }, { "imx327", { .unitCellSize = { 2900, 2900 }, .testPatternModes = {}, + .sensorDelays = { }, } }, { "imx335", { .unitCellSize = { 2000, 2000 }, .testPatternModes = {}, + .sensorDelays = { }, } }, { "imx415", { .unitCellSize = { 1450, 1450 }, .testPatternModes = {}, + .sensorDelays = { + .exposureDelay = 2, + .gainDelay = 2, + .vblankDelay = 2, + .hblankDelay = 2 + }, + } }, + { "imx462", { + .unitCellSize = { 2900, 2900 }, + .testPatternModes = {}, + .sensorDelays = { }, } }, { "imx477", { .unitCellSize = { 1550, 1550 }, .testPatternModes = {}, + .sensorDelays = { + .exposureDelay = 2, + .gainDelay = 2, + .vblankDelay = 3, + .hblankDelay = 3 + }, } }, { "imx519", { .unitCellSize = { 1220, 1220 }, @@ -138,6 +265,12 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen * these two patterns do not comply with MIPI CCS v1.1 (Section 10.1). */ }, + .sensorDelays = { + .exposureDelay = 2, + .gainDelay = 2, + .vblankDelay = 3, + .hblankDelay = 3 + }, } }, { "imx708", { .unitCellSize = { 1400, 1400 }, @@ -148,6 +281,12 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen { controls::draft::TestPatternModeColorBarsFadeToGray, 3 }, { controls::draft::TestPatternModePn9, 4 }, }, + .sensorDelays = { + .exposureDelay = 2, + .gainDelay = 2, + .vblankDelay = 3, + .hblankDelay = 3 + }, } }, { "ov2685", { .unitCellSize = { 1750, 1750 }, @@ -162,6 +301,7 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen * 5: "Color Square" */ }, + .sensorDelays = { }, } }, { "ov2740", { .unitCellSize = { 1400, 1400 }, @@ -169,6 +309,7 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen { controls::draft::TestPatternModeOff, 0 }, { controls::draft::TestPatternModeColorBars, 1}, }, + .sensorDelays = { }, } }, { "ov4689", { .unitCellSize = { 2000, 2000 }, @@ -182,6 +323,7 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen * colorBarType2 and colorBarType3. */ }, + .sensorDelays = { }, } }, { "ov5640", { .unitCellSize = { 1400, 1400 }, @@ -189,10 +331,25 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen { controls::draft::TestPatternModeOff, 0 }, { controls::draft::TestPatternModeColorBars, 1 }, }, + .sensorDelays = { }, } }, { "ov5647", { .unitCellSize = { 1400, 1400 }, .testPatternModes = {}, + /* + * We run this sensor in a mode where the gain delay is + * bumped up to 2. It seems to be the only way to make + * the delays "predictable". + * + * \todo Verify these delays properly, as the upstream + * driver appears to configure _no_ delay. + */ + .sensorDelays = { + .exposureDelay = 2, + .gainDelay = 2, + .vblankDelay = 2, + .hblankDelay = 2 + }, } }, { "ov5670", { .unitCellSize = { 1120, 1120 }, @@ -200,6 +357,7 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen { controls::draft::TestPatternModeOff, 0 }, { controls::draft::TestPatternModeColorBars, 1 }, }, + .sensorDelays = { }, } }, { "ov5675", { .unitCellSize = { 1120, 1120 }, @@ -207,6 +365,7 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen { controls::draft::TestPatternModeOff, 0 }, { controls::draft::TestPatternModeColorBars, 1 }, }, + .sensorDelays = { }, } }, { "ov5693", { .unitCellSize = { 1400, 1400 }, @@ -219,6 +378,7 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen * Rolling Bar". */ }, + .sensorDelays = { }, } }, { "ov64a40", { .unitCellSize = { 1008, 1008 }, @@ -232,6 +392,22 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen * 4: "Vertical Color Bar Type 4" */ }, + .sensorDelays = { + .exposureDelay = 2, + .gainDelay = 2, + .vblankDelay = 2, + .hblankDelay = 2 + }, + } }, + { "ov7251", { + .unitCellSize = { 3000, 3000 }, + .testPatternModes = { }, + .sensorDelays = { + .exposureDelay = 2, + .gainDelay = 2, + .vblankDelay = 2, + .hblankDelay = 2 + }, } }, { "ov8858", { .unitCellSize = { 1120, 1120 }, @@ -245,6 +421,7 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen * 4: "Vertical Color Bar Type 4" */ }, + .sensorDelays = { }, } }, { "ov8865", { .unitCellSize = { 1400, 1400 }, @@ -259,6 +436,17 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen * 5: "Color squares with rolling bar" */ }, + .sensorDelays = { }, + } }, + { "ov9281", { + .unitCellSize = { 3000, 3000 }, + .testPatternModes = { }, + .sensorDelays = { + .exposureDelay = 2, + .gainDelay = 2, + .vblankDelay = 2, + .hblankDelay = 2 + }, } }, { "ov13858", { .unitCellSize = { 1120, 1120 }, @@ -266,6 +454,7 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen { controls::draft::TestPatternModeOff, 0 }, { controls::draft::TestPatternModeColorBars, 1 }, }, + .sensorDelays = { }, } }, }; diff --git a/src/libcamera/sensor/camera_sensor_raw.cpp b/src/libcamera/sensor/camera_sensor_raw.cpp new file mode 100644 index 00000000..ab75b1f8 --- /dev/null +++ b/src/libcamera/sensor/camera_sensor_raw.cpp @@ -0,0 +1,1157 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Ideas on Board Oy. + * + * camera_sensor_raw.cpp - A raw camera sensor using the V4L2 streams API + */ + +#include <algorithm> +#include <cmath> +#include <float.h> +#include <iomanip> +#include <limits.h> +#include <map> +#include <memory> +#include <optional> +#include <string.h> +#include <string> +#include <vector> + +#include <libcamera/base/class.h> +#include <libcamera/base/log.h> +#include <libcamera/base/utils.h> + +#include <libcamera/camera.h> +#include <libcamera/control_ids.h> +#include <libcamera/controls.h> +#include <libcamera/geometry.h> +#include <libcamera/orientation.h> +#include <libcamera/property_ids.h> +#include <libcamera/transform.h> + +#include <libcamera/ipa/core_ipa_interface.h> + +#include "libcamera/internal/bayer_format.h" +#include "libcamera/internal/camera_lens.h" +#include "libcamera/internal/camera_sensor.h" +#include "libcamera/internal/camera_sensor_properties.h" +#include "libcamera/internal/formats.h" +#include "libcamera/internal/media_device.h" +#include "libcamera/internal/sysfs.h" +#include "libcamera/internal/v4l2_subdevice.h" + +namespace libcamera { + +class BayerFormat; +class CameraLens; +class MediaEntity; +class SensorConfiguration; + +struct CameraSensorProperties; + +enum class Orientation; + +LOG_DECLARE_CATEGORY(CameraSensor) + +class CameraSensorRaw : public CameraSensor, protected Loggable +{ +public: + CameraSensorRaw(const MediaEntity *entity); + ~CameraSensorRaw(); + + static std::variant<std::unique_ptr<CameraSensor>, int> + match(MediaEntity *entity); + + const std::string &model() const override { return model_; } + const std::string &id() const override { return id_; } + + const MediaEntity *entity() const override { return entity_; } + V4L2Subdevice *device() override { return subdev_.get(); } + + CameraLens *focusLens() override { return focusLens_.get(); } + + const std::vector<unsigned int> &mbusCodes() const override { return mbusCodes_; } + std::vector<Size> sizes(unsigned int mbusCode) const override; + Size resolution() const override; + + V4L2SubdeviceFormat getFormat(const std::vector<unsigned int> &mbusCodes, + const Size &size, + const Size maxSize) const override; + int setFormat(V4L2SubdeviceFormat *format, + Transform transform = Transform::Identity) override; + int tryFormat(V4L2SubdeviceFormat *format) const override; + + int applyConfiguration(const SensorConfiguration &config, + Transform transform = Transform::Identity, + V4L2SubdeviceFormat *sensorFormat = nullptr) override; + + V4L2Subdevice::Stream imageStream() const override; + std::optional<V4L2Subdevice::Stream> embeddedDataStream() const override; + V4L2SubdeviceFormat embeddedDataFormat() const override; + int setEmbeddedDataEnabled(bool enable) override; + + const ControlList &properties() const override { return properties_; } + int sensorInfo(IPACameraSensorInfo *info) const override; + Transform computeTransform(Orientation *orientation) const override; + BayerFormat::Order bayerOrder(Transform t) const override; + + const ControlInfoMap &controls() const override; + ControlList getControls(const std::vector<uint32_t> &ids) override; + int setControls(ControlList *ctrls) override; + + const std::vector<controls::draft::TestPatternModeEnum> & + testPatternModes() const override { return testPatternModes_; } + int setTestPatternMode(controls::draft::TestPatternModeEnum mode) override; + const CameraSensorProperties::SensorDelays &sensorDelays() override; + +protected: + std::string logPrefix() const override; + +private: + LIBCAMERA_DISABLE_COPY(CameraSensorRaw) + + std::optional<int> init(); + int initProperties(); + void initStaticProperties(); + void initTestPatternModes(); + int applyTestPatternMode(controls::draft::TestPatternModeEnum mode); + + const MediaEntity *entity_; + std::unique_ptr<V4L2Subdevice> subdev_; + + struct Streams { + V4L2Subdevice::Stream sink; + V4L2Subdevice::Stream source; + }; + + struct { + Streams image; + std::optional<Streams> edata; + } streams_; + + const CameraSensorProperties *staticProps_; + + std::string model_; + std::string id_; + + V4L2Subdevice::Formats formats_; + std::vector<unsigned int> mbusCodes_; + std::vector<Size> sizes_; + std::vector<controls::draft::TestPatternModeEnum> testPatternModes_; + controls::draft::TestPatternModeEnum testPatternMode_; + + Size pixelArraySize_; + Rectangle activeArea_; + BayerFormat::Order cfaPattern_; + bool supportFlips_; + bool flipsAlterBayerOrder_; + Orientation mountingOrientation_; + + ControlList properties_; + + std::unique_ptr<CameraLens> focusLens_; +}; + +/** + * \class CameraSensorRaw + * \brief A camera sensor based on V4L2 subdevices + * + * This class supports single-subdev sensors with a single source pad and one + * or two internal sink pads (for the image and embedded data streams). + */ + +CameraSensorRaw::CameraSensorRaw(const MediaEntity *entity) + : entity_(entity), staticProps_(nullptr), supportFlips_(false), + flipsAlterBayerOrder_(false), properties_(properties::properties) +{ +} + +CameraSensorRaw::~CameraSensorRaw() = default; + +std::variant<std::unique_ptr<CameraSensor>, int> +CameraSensorRaw::match(MediaEntity *entity) +{ + /* Check the entity type. */ + if (entity->type() != MediaEntity::Type::V4L2Subdevice || + entity->function() != MEDIA_ENT_F_CAM_SENSOR) { + libcamera::LOG(CameraSensor, Debug) + << entity->name() << ": unsupported entity type (" + << utils::to_underlying(entity->type()) + << ") or function (" << utils::hex(entity->function()) << ")"; + return { 0 }; + } + + /* Count and check the number of pads. */ + static constexpr uint32_t kPadFlagsMask = MEDIA_PAD_FL_SINK + | MEDIA_PAD_FL_SOURCE + | MEDIA_PAD_FL_INTERNAL; + unsigned int numSinks = 0; + unsigned int numSources = 0; + + for (const MediaPad *pad : entity->pads()) { + switch (pad->flags() & kPadFlagsMask) { + case MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_INTERNAL: + numSinks++; + break; + + case MEDIA_PAD_FL_SOURCE: + numSources++; + break; + + default: + libcamera::LOG(CameraSensor, Debug) + << entity->name() << ": unsupported pad " << pad->index() + << " type " << utils::hex(pad->flags()); + return { 0 }; + } + } + + if (numSinks < 1 || numSinks > 2 || numSources != 1) { + libcamera::LOG(CameraSensor, Debug) + << entity->name() << ": unsupported number of sinks (" + << numSinks << ") or sources (" << numSources << ")"; + return { 0 }; + } + + /* + * The entity matches. Create the camera sensor and initialize it. The + * init() function will perform further match checks. + */ + std::unique_ptr<CameraSensorRaw> sensor = + std::make_unique<CameraSensorRaw>(entity); + + std::optional<int> err = sensor->init(); + if (err) + return { *err }; + + return { std::move(sensor) }; +} + +std::optional<int> CameraSensorRaw::init() +{ + /* Create and open the subdev. */ + subdev_ = std::make_unique<V4L2Subdevice>(entity_); + int ret = subdev_->open(); + if (ret) + return { ret }; + + /* + * 1. Identify the pads. + */ + + /* + * First locate the source pad. The match() function guarantees there + * is one and only one source pad. + */ + unsigned int sourcePad = UINT_MAX; + + for (const MediaPad *pad : entity_->pads()) { + if (pad->flags() & MEDIA_PAD_FL_SOURCE) { + sourcePad = pad->index(); + break; + } + } + + /* + * Iterate over the routes to identify the streams on the source pad, + * and the internal sink pads. + */ + V4L2Subdevice::Routing routing = {}; + ret = subdev_->getRouting(&routing, V4L2Subdevice::TryFormat); + if (ret) + return { ret }; + + bool imageStreamFound = false; + + for (const V4L2Subdevice::Route &route : routing) { + if (route.source.pad != sourcePad) { + LOG(CameraSensor, Error) << "Invalid route " << route; + return { -EINVAL }; + } + + /* Identify the stream type based on the supported formats. */ + V4L2Subdevice::Formats formats = subdev_->formats(route.source); + + std::optional<MediaBusFormatInfo::Type> type; + + for (const auto &[code, sizes] : formats) { + const MediaBusFormatInfo &info = + MediaBusFormatInfo::info(code); + if (info.isValid()) { + type = info.type; + break; + } + } + + if (!type) { + LOG(CameraSensor, Warning) + << "No known format on pad " << route.source; + continue; + } + + switch (*type) { + case MediaBusFormatInfo::Type::Image: + if (imageStreamFound) { + LOG(CameraSensor, Error) + << "Multiple internal image streams (" + << streams_.image.sink << " and " + << route.sink << ")"; + return { -EINVAL }; + } + + imageStreamFound = true; + streams_.image.sink = route.sink; + streams_.image.source = route.source; + break; + + case MediaBusFormatInfo::Type::Metadata: + /* + * Skip metadata streams that are not sensor embedded + * data. The source stream reports a generic metadata + * format, check the sink stream for the exact format. + */ + formats = subdev_->formats(route.sink); + if (formats.size() != 1) + continue; + + if (MediaBusFormatInfo::info(formats.cbegin()->first).type != + MediaBusFormatInfo::Type::EmbeddedData) + continue; + + if (streams_.edata) { + LOG(CameraSensor, Error) + << "Multiple internal embedded data streams (" + << streams_.edata->sink << " and " + << route.sink << ")"; + return { -EINVAL }; + } + + streams_.edata = { route.sink, route.source }; + break; + + default: + break; + } + } + + if (!imageStreamFound) { + LOG(CameraSensor, Error) << "No image stream found"; + return { -EINVAL }; + } + + LOG(CameraSensor, Debug) + << "Found image stream " << streams_.image.sink + << " -> " << streams_.image.source; + + if (streams_.edata) + LOG(CameraSensor, Debug) + << "Found embedded data stream " << streams_.edata->sink + << " -> " << streams_.edata->source; + + /* + * 2. Enumerate and cache the media bus codes, sizes and colour filter + * array order for the image stream. + */ + + /* + * Get the native sensor CFA pattern. It is simpler to retrieve it from + * the internal image sink pad as it is guaranteed to expose a single + * format, and is not affected by flips. + */ + V4L2Subdevice::Formats formats = subdev_->formats(streams_.image.sink); + if (formats.size() != 1) { + LOG(CameraSensor, Error) + << "Image pad has " << formats.size() + << " formats, expected 1"; + return { -EINVAL }; + } + + uint32_t nativeFormat = formats.cbegin()->first; + const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(nativeFormat); + if (!bayerFormat.isValid()) { + LOG(CameraSensor, Error) + << "Invalid native format " << nativeFormat; + return { 0 }; + } + + cfaPattern_ = bayerFormat.order; + + /* + * Retrieve and cache the media bus codes and sizes on the source image + * stream. + */ + formats_ = subdev_->formats(streams_.image.source); + if (formats_.empty()) { + LOG(CameraSensor, Error) << "No image format found"; + return { -EINVAL }; + } + + /* Populate and sort the media bus codes and the sizes. */ + for (const auto &[code, ranges] : formats_) { + /* Drop non-raw formats (in case we have a hybrid sensor). */ + const MediaBusFormatInfo &info = MediaBusFormatInfo::info(code); + if (info.colourEncoding != PixelFormatInfo::ColourEncodingRAW) + continue; + + mbusCodes_.push_back(code); + std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes_), + [](const SizeRange &range) { return range.max; }); + } + + if (mbusCodes_.empty()) { + LOG(CameraSensor, Debug) << "No raw image formats found"; + return { 0 }; + } + + std::sort(mbusCodes_.begin(), mbusCodes_.end()); + std::sort(sizes_.begin(), sizes_.end()); + + /* + * Remove duplicate sizes. There are no duplicate media bus codes as + * they are the keys in the formats map. + */ + auto last = std::unique(sizes_.begin(), sizes_.end()); + sizes_.erase(last, sizes_.end()); + + /* + * 3. Query selection rectangles. Retrieve properties, and verify that + * all the expected selection rectangles are supported. + */ + + Rectangle rect; + ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP_BOUNDS, + &rect); + if (ret) { + LOG(CameraSensor, Error) << "No pixel array crop bounds"; + return { ret }; + } + + pixelArraySize_ = rect.size(); + + ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP_DEFAULT, + &activeArea_); + if (ret) { + LOG(CameraSensor, Error) << "No pixel array crop default"; + return { ret }; + } + + ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP, + &rect); + if (ret) { + LOG(CameraSensor, Error) << "No pixel array crop rectangle"; + return { ret }; + } + + /* + * 4. Verify that all required controls are present. + */ + + const ControlIdMap &controls = subdev_->controls().idmap(); + + static constexpr uint32_t mandatoryControls[] = { + V4L2_CID_ANALOGUE_GAIN, + V4L2_CID_CAMERA_ORIENTATION, + V4L2_CID_EXPOSURE, + V4L2_CID_HBLANK, + V4L2_CID_PIXEL_RATE, + V4L2_CID_VBLANK, + }; + + ret = 0; + + for (uint32_t ctrl : mandatoryControls) { + if (!controls.count(ctrl)) { + LOG(CameraSensor, Error) + << "Mandatory V4L2 control " << utils::hex(ctrl) + << " not available"; + ret = -EINVAL; + } + } + + if (ret) { + LOG(CameraSensor, Error) + << "The sensor kernel driver needs to be fixed"; + LOG(CameraSensor, Error) + << "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information"; + return { ret }; + } + + /* + * Verify if sensor supports horizontal/vertical flips + * + * \todo Handle horizontal and vertical flips independently. + */ + const struct v4l2_query_ext_ctrl *hflipInfo = subdev_->controlInfo(V4L2_CID_HFLIP); + const struct v4l2_query_ext_ctrl *vflipInfo = subdev_->controlInfo(V4L2_CID_VFLIP); + if (hflipInfo && !(hflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY) && + vflipInfo && !(vflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) { + supportFlips_ = true; + + if (hflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT || + vflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT) + flipsAlterBayerOrder_ = true; + } + + if (!supportFlips_) + LOG(CameraSensor, Debug) + << "Camera sensor does not support horizontal/vertical flip"; + + /* + * 5. Discover ancillary devices. + * + * \todo This code may be shared by different V4L2 sensor classes. + */ + for (MediaEntity *ancillary : entity_->ancillaryEntities()) { + switch (ancillary->function()) { + case MEDIA_ENT_F_LENS: + focusLens_ = std::make_unique<CameraLens>(ancillary); + ret = focusLens_->init(); + if (ret) { + LOG(CameraSensor, Error) + << "Lens initialisation failed, lens disabled"; + focusLens_.reset(); + } + break; + + default: + LOG(CameraSensor, Warning) + << "Unsupported ancillary entity function " + << ancillary->function(); + break; + } + } + + /* + * 6. Initialize properties. + */ + + ret = initProperties(); + if (ret) + return { ret }; + + /* + * 7. Initialize controls. + */ + + /* + * Set HBLANK to the minimum to start with a well-defined line length, + * allowing IPA modules that do not modify HBLANK to use the sensor + * minimum line length in their calculations. + */ + const struct v4l2_query_ext_ctrl *hblankInfo = subdev_->controlInfo(V4L2_CID_HBLANK); + if (hblankInfo && !(hblankInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) { + ControlList ctrl(subdev_->controls()); + + ctrl.set(V4L2_CID_HBLANK, static_cast<int32_t>(hblankInfo->minimum)); + ret = subdev_->setControls(&ctrl); + if (ret) + return ret; + } + + ret = applyTestPatternMode(controls::draft::TestPatternModeEnum::TestPatternModeOff); + if (ret) + return { ret }; + + return {}; +} + +int CameraSensorRaw::initProperties() +{ + model_ = subdev_->model(); + properties_.set(properties::Model, utils::toAscii(model_)); + + /* Generate a unique ID for the sensor. */ + id_ = sysfs::firmwareNodePath(subdev_->devicePath()); + if (id_.empty()) { + LOG(CameraSensor, Error) << "Can't generate sensor ID"; + return -EINVAL; + } + + /* Initialize the static properties from the sensor database. */ + initStaticProperties(); + + /* Retrieve and register properties from the kernel interface. */ + const ControlInfoMap &controls = subdev_->controls(); + + const auto &orientation = controls.find(V4L2_CID_CAMERA_ORIENTATION); + if (orientation != controls.end()) { + int32_t v4l2Orientation = orientation->second.def().get<int32_t>(); + int32_t propertyValue; + + switch (v4l2Orientation) { + default: + LOG(CameraSensor, Warning) + << "Unsupported camera location " + << v4l2Orientation << ", setting to External"; + [[fallthrough]]; + case V4L2_CAMERA_ORIENTATION_EXTERNAL: + propertyValue = properties::CameraLocationExternal; + break; + case V4L2_CAMERA_ORIENTATION_FRONT: + propertyValue = properties::CameraLocationFront; + break; + case V4L2_CAMERA_ORIENTATION_BACK: + propertyValue = properties::CameraLocationBack; + break; + } + properties_.set(properties::Location, propertyValue); + } else { + LOG(CameraSensor, Warning) << "Failed to retrieve the camera location"; + } + + const auto &rotationControl = controls.find(V4L2_CID_CAMERA_SENSOR_ROTATION); + if (rotationControl != controls.end()) { + int32_t propertyValue = rotationControl->second.def().get<int32_t>(); + + /* + * Cache the Transform associated with the camera mounting + * rotation for later use in computeTransform(). + */ + bool success; + mountingOrientation_ = orientationFromRotation(propertyValue, &success); + if (!success) { + LOG(CameraSensor, Warning) + << "Invalid rotation of " << propertyValue + << " degrees - ignoring"; + mountingOrientation_ = Orientation::Rotate0; + } + + properties_.set(properties::Rotation, propertyValue); + } else { + LOG(CameraSensor, Warning) + << "Rotation control not available, default to 0 degrees"; + properties_.set(properties::Rotation, 0); + mountingOrientation_ = Orientation::Rotate0; + } + + properties_.set(properties::PixelArraySize, pixelArraySize_); + properties_.set(properties::PixelArrayActiveAreas, { activeArea_ }); + + /* Color filter array pattern. */ + uint32_t cfa; + + switch (cfaPattern_) { + case BayerFormat::BGGR: + cfa = properties::draft::BGGR; + break; + case BayerFormat::GBRG: + cfa = properties::draft::GBRG; + break; + case BayerFormat::GRBG: + cfa = properties::draft::GRBG; + break; + case BayerFormat::RGGB: + cfa = properties::draft::RGGB; + break; + case BayerFormat::MONO: + default: + cfa = properties::draft::MONO; + break; + } + + properties_.set(properties::draft::ColorFilterArrangement, cfa); + + return 0; +} + +void CameraSensorRaw::initStaticProperties() +{ + staticProps_ = CameraSensorProperties::get(model_); + if (!staticProps_) + return; + + /* Register the properties retrieved from the sensor database. */ + properties_.set(properties::UnitCellSize, staticProps_->unitCellSize); + + initTestPatternModes(); +} + +const CameraSensorProperties::SensorDelays &CameraSensorRaw::sensorDelays() +{ + static constexpr CameraSensorProperties::SensorDelays defaultSensorDelays = { + .exposureDelay = 2, + .gainDelay = 1, + .vblankDelay = 2, + .hblankDelay = 2, + }; + + if (!staticProps_ || + (!staticProps_->sensorDelays.exposureDelay && + !staticProps_->sensorDelays.gainDelay && + !staticProps_->sensorDelays.vblankDelay && + !staticProps_->sensorDelays.hblankDelay)) { + LOG(CameraSensor, Warning) + << "No sensor delays found in static properties. " + "Assuming unverified defaults."; + + return defaultSensorDelays; + } + + return staticProps_->sensorDelays; +} + +void CameraSensorRaw::initTestPatternModes() +{ + const auto &v4l2TestPattern = controls().find(V4L2_CID_TEST_PATTERN); + if (v4l2TestPattern == controls().end()) { + LOG(CameraSensor, Debug) << "V4L2_CID_TEST_PATTERN is not supported"; + return; + } + + const auto &testPatternModes = staticProps_->testPatternModes; + if (testPatternModes.empty()) { + /* + * The camera sensor supports test patterns but we don't know + * how to map them so this should be fixed. + */ + LOG(CameraSensor, Debug) << "No static test pattern map for \'" + << model() << "\'"; + return; + } + + /* + * Create a map that associates the V4L2 control index to the test + * pattern mode by reversing the testPatternModes map provided by the + * camera sensor properties. This makes it easier to verify if the + * control index is supported in the below for loop that creates the + * list of supported test patterns. + */ + std::map<int32_t, controls::draft::TestPatternModeEnum> indexToTestPatternMode; + for (const auto &it : testPatternModes) + indexToTestPatternMode[it.second] = it.first; + + for (const ControlValue &value : v4l2TestPattern->second.values()) { + const int32_t index = value.get<int32_t>(); + + const auto it = indexToTestPatternMode.find(index); + if (it == indexToTestPatternMode.end()) { + LOG(CameraSensor, Debug) + << "Test pattern mode " << index << " ignored"; + continue; + } + + testPatternModes_.push_back(it->second); + } +} + +std::vector<Size> CameraSensorRaw::sizes(unsigned int mbusCode) const +{ + std::vector<Size> sizes; + + const auto &format = formats_.find(mbusCode); + if (format == formats_.end()) + return sizes; + + const std::vector<SizeRange> &ranges = format->second; + std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes), + [](const SizeRange &range) { return range.max; }); + + std::sort(sizes.begin(), sizes.end()); + + return sizes; +} + +Size CameraSensorRaw::resolution() const +{ + return std::min(sizes_.back(), activeArea_.size()); +} + +V4L2SubdeviceFormat +CameraSensorRaw::getFormat(const std::vector<unsigned int> &mbusCodes, + const Size &size, Size maxSize) const +{ + unsigned int desiredArea = size.width * size.height; + unsigned int bestArea = UINT_MAX; + float desiredRatio = static_cast<float>(size.width) / size.height; + float bestRatio = FLT_MAX; + const Size *bestSize = nullptr; + uint32_t bestCode = 0; + + for (unsigned int code : mbusCodes) { + const auto formats = formats_.find(code); + if (formats == formats_.end()) + continue; + + for (const SizeRange &range : formats->second) { + const Size &sz = range.max; + + if (!maxSize.isNull() && + (sz.width > maxSize.width || sz.height > maxSize.height)) + continue; + + if (sz.width < size.width || sz.height < size.height) + continue; + + float ratio = static_cast<float>(sz.width) / sz.height; + float ratioDiff = std::abs(ratio - desiredRatio); + unsigned int area = sz.width * sz.height; + unsigned int areaDiff = area - desiredArea; + + if (ratioDiff > bestRatio) + continue; + + if (ratioDiff < bestRatio || areaDiff < bestArea) { + bestRatio = ratioDiff; + bestArea = areaDiff; + bestSize = &sz; + bestCode = code; + } + } + } + + if (!bestSize) { + LOG(CameraSensor, Debug) << "No supported format or size found"; + return {}; + } + + V4L2SubdeviceFormat format{ + .code = bestCode, + .size = *bestSize, + .colorSpace = ColorSpace::Raw, + }; + + return format; +} + +int CameraSensorRaw::setFormat(V4L2SubdeviceFormat *format, Transform transform) +{ + /* Configure flips if the sensor supports that. */ + if (supportFlips_) { + ControlList flipCtrls(subdev_->controls()); + + flipCtrls.set(V4L2_CID_HFLIP, + static_cast<int32_t>(!!(transform & Transform::HFlip))); + flipCtrls.set(V4L2_CID_VFLIP, + static_cast<int32_t>(!!(transform & Transform::VFlip))); + + int ret = subdev_->setControls(&flipCtrls); + if (ret) + return ret; + } + + /* Apply format on the subdev. */ + int ret = subdev_->setFormat(streams_.image.source, format); + if (ret) + return ret; + + subdev_->updateControlInfo(); + return 0; +} + +int CameraSensorRaw::tryFormat(V4L2SubdeviceFormat *format) const +{ + return subdev_->setFormat(streams_.image.source, format, + V4L2Subdevice::Whence::TryFormat); +} + +int CameraSensorRaw::applyConfiguration(const SensorConfiguration &config, + Transform transform, + V4L2SubdeviceFormat *sensorFormat) +{ + if (!config.isValid()) { + LOG(CameraSensor, Error) << "Invalid sensor configuration"; + return -EINVAL; + } + + std::vector<unsigned int> filteredCodes; + std::copy_if(mbusCodes_.begin(), mbusCodes_.end(), + std::back_inserter(filteredCodes), + [&config](unsigned int mbusCode) { + BayerFormat bayer = BayerFormat::fromMbusCode(mbusCode); + if (bayer.bitDepth == config.bitDepth) + return true; + return false; + }); + if (filteredCodes.empty()) { + LOG(CameraSensor, Error) + << "Cannot find any format with bit depth " + << config.bitDepth; + return -EINVAL; + } + + /* + * Compute the sensor's data frame size by applying the cropping + * rectangle, subsampling and output crop to the sensor's pixel array + * size. + * + * \todo The actual size computation is for now ignored and only the + * output size is considered. This implies that resolutions obtained + * with two different cropping/subsampling will look identical and + * only the first found one will be considered. + */ + V4L2SubdeviceFormat subdevFormat = {}; + for (unsigned int code : filteredCodes) { + for (const Size &size : sizes(code)) { + if (size.width != config.outputSize.width || + size.height != config.outputSize.height) + continue; + + subdevFormat.code = code; + subdevFormat.size = size; + break; + } + } + if (!subdevFormat.code) { + LOG(CameraSensor, Error) << "Invalid output size in sensor configuration"; + return -EINVAL; + } + + int ret = setFormat(&subdevFormat, transform); + if (ret) + return ret; + + /* + * Return to the caller the format actually applied to the sensor. + * This is relevant if transform has changed the bayer pattern order. + */ + if (sensorFormat) + *sensorFormat = subdevFormat; + + /* \todo Handle AnalogCrop. Most sensors do not support set_selection */ + /* \todo Handle scaling in the digital domain. */ + + return 0; +} + +V4L2Subdevice::Stream CameraSensorRaw::imageStream() const +{ + return streams_.image.source; +} + +std::optional<V4L2Subdevice::Stream> CameraSensorRaw::embeddedDataStream() const +{ + if (!streams_.edata) + return {}; + + return { streams_.edata->source }; +} + +V4L2SubdeviceFormat CameraSensorRaw::embeddedDataFormat() const +{ + if (!streams_.edata) + return {}; + + V4L2SubdeviceFormat format; + int ret = subdev_->getFormat(streams_.edata->source, &format); + if (ret) + return {}; + + return format; +} + +int CameraSensorRaw::setEmbeddedDataEnabled(bool enable) +{ + if (!streams_.edata) + return enable ? -ENOSTR : 0; + + V4L2Subdevice::Routing routing{ 2 }; + + routing[0].sink = streams_.image.sink; + routing[0].source = streams_.image.source; + routing[0].flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE; + + routing[1].sink = streams_.edata->sink; + routing[1].source = streams_.edata->source; + routing[1].flags = enable ? V4L2_SUBDEV_ROUTE_FL_ACTIVE : 0; + + int ret = subdev_->setRouting(&routing); + if (ret) + return ret; + + /* + * Check if the embedded data stream has been enabled or disabled + * correctly. Assume at least one route will match the embedded data + * source stream, as there would be something seriously wrong + * otherwise. + */ + bool enabled = false; + + for (const V4L2Subdevice::Route &route : routing) { + if (route.source != streams_.edata->source) + continue; + + enabled = route.flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE; + break; + } + + if (enabled != enable) + return enabled ? -EISCONN : -ENOSTR; + + return 0; +} + +int CameraSensorRaw::sensorInfo(IPACameraSensorInfo *info) const +{ + info->model = model(); + + /* + * The active area size is a static property, while the crop + * rectangle needs to be re-read as it depends on the sensor + * configuration. + */ + info->activeAreaSize = { activeArea_.width, activeArea_.height }; + + int ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP, + &info->analogCrop); + if (ret) + return ret; + + /* + * IPACameraSensorInfo::analogCrop::x and IPACameraSensorInfo::analogCrop::y + * are defined relatively to the active pixel area, while V4L2's + * TGT_CROP target is defined in respect to the full pixel array. + * + * Compensate it by subtracting the active area offset. + */ + info->analogCrop.x -= activeArea_.x; + info->analogCrop.y -= activeArea_.y; + + /* The bit depth and image size depend on the currently applied format. */ + V4L2SubdeviceFormat format{}; + ret = subdev_->getFormat(streams_.image.source, &format); + if (ret) + return ret; + info->bitsPerPixel = MediaBusFormatInfo::info(format.code).bitsPerPixel; + info->outputSize = format.size; + + std::optional<int32_t> cfa = properties_.get(properties::draft::ColorFilterArrangement); + info->cfaPattern = cfa ? *cfa : properties::draft::RGB; + + /* + * Retrieve the pixel rate, line length and minimum/maximum frame + * duration through V4L2 controls. Support for the V4L2_CID_PIXEL_RATE, + * V4L2_CID_HBLANK and V4L2_CID_VBLANK controls is mandatory. + */ + ControlList ctrls = subdev_->getControls({ V4L2_CID_PIXEL_RATE, + V4L2_CID_HBLANK, + V4L2_CID_VBLANK }); + if (ctrls.empty()) { + LOG(CameraSensor, Error) + << "Failed to retrieve camera info controls"; + return -EINVAL; + } + + info->pixelRate = ctrls.get(V4L2_CID_PIXEL_RATE).get<int64_t>(); + + const ControlInfo hblank = ctrls.infoMap()->at(V4L2_CID_HBLANK); + info->minLineLength = info->outputSize.width + hblank.min().get<int32_t>(); + info->maxLineLength = info->outputSize.width + hblank.max().get<int32_t>(); + + const ControlInfo vblank = ctrls.infoMap()->at(V4L2_CID_VBLANK); + info->minFrameLength = info->outputSize.height + vblank.min().get<int32_t>(); + info->maxFrameLength = info->outputSize.height + vblank.max().get<int32_t>(); + + return 0; +} + +Transform CameraSensorRaw::computeTransform(Orientation *orientation) const +{ + /* + * If we cannot do any flips we cannot change the native camera mounting + * orientation. + */ + if (!supportFlips_) { + *orientation = mountingOrientation_; + return Transform::Identity; + } + + /* + * Now compute the required transform to obtain 'orientation' starting + * from the mounting rotation. + * + * As a note: + * orientation / mountingOrientation_ = transform + * mountingOrientation_ * transform = orientation + */ + Transform transform = *orientation / mountingOrientation_; + + /* + * If transform contains any Transpose we cannot do it, so adjust + * 'orientation' to report the image native orientation and return Identity. + */ + if (!!(transform & Transform::Transpose)) { + *orientation = mountingOrientation_; + return Transform::Identity; + } + + return transform; +} + +BayerFormat::Order CameraSensorRaw::bayerOrder(Transform t) const +{ + if (!flipsAlterBayerOrder_) + return cfaPattern_; + + /* + * Apply the transform to the native (i.e. untransformed) Bayer order, + * using the rest of the Bayer format supplied by the caller. + */ + BayerFormat format{ cfaPattern_, 8, BayerFormat::Packing::None }; + return format.transform(t).order; +} + +const ControlInfoMap &CameraSensorRaw::controls() const +{ + return subdev_->controls(); +} + +ControlList CameraSensorRaw::getControls(const std::vector<uint32_t> &ids) +{ + return subdev_->getControls(ids); +} + +int CameraSensorRaw::setControls(ControlList *ctrls) +{ + return subdev_->setControls(ctrls); +} + +int CameraSensorRaw::setTestPatternMode(controls::draft::TestPatternModeEnum mode) +{ + if (testPatternMode_ == mode) + return 0; + + if (testPatternModes_.empty()) { + LOG(CameraSensor, Error) + << "Camera sensor does not support test pattern modes."; + return -EINVAL; + } + + return applyTestPatternMode(mode); +} + +int CameraSensorRaw::applyTestPatternMode(controls::draft::TestPatternModeEnum mode) +{ + if (testPatternModes_.empty()) + return 0; + + auto it = std::find(testPatternModes_.begin(), testPatternModes_.end(), + mode); + if (it == testPatternModes_.end()) { + LOG(CameraSensor, Error) << "Unsupported test pattern mode " + << mode; + return -EINVAL; + } + + LOG(CameraSensor, Debug) << "Apply test pattern mode " << mode; + + int32_t index = staticProps_->testPatternModes.at(mode); + ControlList ctrls{ controls() }; + ctrls.set(V4L2_CID_TEST_PATTERN, index); + + int ret = setControls(&ctrls); + if (ret) + return ret; + + testPatternMode_ = mode; + + return 0; +} + +std::string CameraSensorRaw::logPrefix() const +{ + return "'" + entity_->name() + "'"; +} + +REGISTER_CAMERA_SENSOR(CameraSensorRaw, 0) + +} /* namespace libcamera */ diff --git a/src/libcamera/sensor/meson.build b/src/libcamera/sensor/meson.build index bf4b131a..dce74ed6 100644 --- a/src/libcamera/sensor/meson.build +++ b/src/libcamera/sensor/meson.build @@ -1,6 +1,8 @@ # SPDX-License-Identifier: CC0-1.0 -libcamera_sources += files([ +libcamera_internal_sources += files([ 'camera_sensor.cpp', + 'camera_sensor_legacy.cpp', 'camera_sensor_properties.cpp', + 'camera_sensor_raw.cpp', ]) diff --git a/src/libcamera/shared_mem_object.cpp b/src/libcamera/shared_mem_object.cpp index 809fbdaf..d9b61d37 100644 --- a/src/libcamera/shared_mem_object.cpp +++ b/src/libcamera/shared_mem_object.cpp @@ -10,12 +10,11 @@ #include "libcamera/internal/shared_mem_object.h" -#include <stddef.h> #include <stdint.h> #include <sys/mman.h> -#include <sys/syscall.h> #include <sys/types.h> -#include <unistd.h> + +#include <libcamera/base/memfd.h> /** * \file shared_mem_object.cpp @@ -58,22 +57,12 @@ SharedMem::SharedMem() = default; */ SharedMem::SharedMem(const std::string &name, std::size_t size) { -#if HAVE_MEMFD_CREATE - int fd = memfd_create(name.c_str(), MFD_CLOEXEC); -#else - int fd = syscall(SYS_memfd_create, name.c_str(), MFD_CLOEXEC); -#endif - if (fd < 0) - return; - - fd_ = SharedFD(std::move(fd)); - if (!fd_.isValid()) + UniqueFD memfd = MemFd::create(name.c_str(), size, + MemFd::Seal::Shrink | MemFd::Seal::Grow); + if (!memfd.isValid()) return; - if (ftruncate(fd_.get(), size) < 0) { - fd_ = SharedFD(); - return; - } + fd_ = SharedFD(std::move(memfd)); void *mem = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd_.get(), 0); diff --git a/src/libcamera/software_isp/TODO b/src/libcamera/software_isp/TODO index 6bdc5905..a50db668 100644 --- a/src/libcamera/software_isp/TODO +++ b/src/libcamera/software_isp/TODO @@ -1,22 +1,3 @@ -1. Setting F_SEAL_SHRINK and F_SEAL_GROW after ftruncate() - ->> SharedMem::SharedMem(const std::string &name, std::size_t size) ->> : name_(name), size_(size), mem_(nullptr) ->> ->> ... ->> ->> if (ftruncate(fd_.get(), size_) < 0) ->> return; -> -> Should we set the GROW and SHRINK seals (in a separate patch) ? - -Yes, this can be done. -Setting F_SEAL_SHRINK and F_SEAL_GROW after the ftruncate() call above could catch -some potential errors related to improper access to the shared memory allocated by -the SharedMemObject. - ---- - 2. Reconsider stats sharing >>> +void SwStatsCpu::finishFrame(void) @@ -218,45 +199,6 @@ Yes, because, well... all the other IPAs were doing that... --- -10. Switch to libipa/algorithm.h API in processStats - ->> void IPASoftSimple::processStats(const ControlList &sensorControls) ->> -> Do you envision switching to the libipa/algorithm.h API at some point ? - -At some point, yes. - ---- - -11. Improve handling the sensor controls which take effect with a delay - -> void IPASoftSimple::processStats(const ControlList &sensorControls) -> { -> ... -> /* -> * AE / AGC, use 2 frames delay to make sure that the exposure and -> * the gain set have applied to the camera sensor. -> */ -> if (ignore_updates_ > 0) { -> --ignore_updates_; -> return; -> } - -This could be handled better with DelayedControls. - ---- - -12. Use DelayedControls class in ispStatsReady() - -> void SimpleCameraData::ispStatsReady() -> { -> swIsp_->processStats(sensor_->getControls({ V4L2_CID_ANALOGUE_GAIN, -> V4L2_CID_EXPOSURE })); - -You should use the DelayedControls class. - ---- - 13. Improve black level and colour gains application I think the black level should eventually be moved before debayering, and diff --git a/src/libcamera/software_isp/debayer.cpp b/src/libcamera/software_isp/debayer.cpp index f4a299d5..f0b83261 100644 --- a/src/libcamera/software_isp/debayer.cpp +++ b/src/libcamera/software_isp/debayer.cpp @@ -58,47 +58,48 @@ Debayer::~Debayer() /** * \fn int Debayer::configure(const StreamConfiguration &inputCfg, const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs) - * \brief Configure the debayer object according to the passed in parameters. - * \param[in] inputCfg The input configuration. - * \param[in] outputCfgs The output configurations. + * \brief Configure the debayer object according to the passed in parameters + * \param[in] inputCfg The input configuration + * \param[in] outputCfgs The output configurations * - * \return 0 on success, a negative errno on failure. + * \return 0 on success, a negative errno on failure */ /** * \fn Size Debayer::patternSize(PixelFormat inputFormat) - * \brief Get the width and height at which the bayer pattern repeats. - * \param[in] inputFormat The input format. + * \brief Get the width and height at which the bayer pattern repeats + * \param[in] inputFormat The input format * * Valid sizes are: 2x2, 4x2 or 4x4. * - * \return Pattern size or an empty size for unsupported inputFormats. + * \return Pattern size or an empty size for unsupported inputFormats */ /** * \fn std::vector<PixelFormat> Debayer::formats(PixelFormat inputFormat) - * \brief Get the supported output formats. - * \param[in] inputFormat The input format. + * \brief Get the supported output formats + * \param[in] inputFormat The input format * - * \return All supported output formats or an empty vector if there are none. + * \return All supported output formats or an empty vector if there are none */ /** * \fn std::tuple<unsigned int, unsigned int> Debayer::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size) - * \brief Get the stride and the frame size. - * \param[in] outputFormat The output format. - * \param[in] size The output size. + * \brief Get the stride and the frame size + * \param[in] outputFormat The output format + * \param[in] size The output size * * \return A tuple of the stride and the frame size, or a tuple with 0,0 if - * there is no valid output config. + * there is no valid output config */ /** - * \fn void Debayer::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params) - * \brief Process the bayer data into the requested format. - * \param[in] input The input buffer. - * \param[in] output The output buffer. - * \param[in] params The parameters to be used in debayering. + * \fn void Debayer::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params) + * \brief Process the bayer data into the requested format + * \param[in] frame The frame number + * \param[in] input The input buffer + * \param[in] output The output buffer + * \param[in] params The parameters to be used in debayering * * \note DebayerParams is passed by value deliberately so that a copy is passed * when this is run in another thread by invokeMethod(). @@ -106,21 +107,21 @@ Debayer::~Debayer() /** * \fn virtual SizeRange Debayer::sizes(PixelFormat inputFormat, const Size &inputSize) - * \brief Get the supported output sizes for the given input format and size. - * \param[in] inputFormat The input format. - * \param[in] inputSize The input size. + * \brief Get the supported output sizes for the given input format and size + * \param[in] inputFormat The input format + * \param[in] inputSize The input size * - * \return The valid size ranges or an empty range if there are none. + * \return The valid size ranges or an empty range if there are none */ /** * \var Signal<FrameBuffer *> Debayer::inputBufferReady - * \brief Signals when the input buffer is ready. + * \brief Signals when the input buffer is ready */ /** * \var Signal<FrameBuffer *> Debayer::outputBufferReady - * \brief Signals when the output buffer is ready. + * \brief Signals when the output buffer is ready */ } /* namespace libcamera */ diff --git a/src/libcamera/software_isp/debayer.h b/src/libcamera/software_isp/debayer.h index c151fe5d..d7ca060d 100644 --- a/src/libcamera/software_isp/debayer.h +++ b/src/libcamera/software_isp/debayer.h @@ -40,7 +40,7 @@ public: virtual std::tuple<unsigned int, unsigned int> strideAndFrameSize(const PixelFormat &outputFormat, const Size &size) = 0; - virtual void process(FrameBuffer *input, FrameBuffer *output, DebayerParams params) = 0; + virtual void process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params) = 0; virtual SizeRange sizes(PixelFormat inputFormat, const Size &inputSize) = 0; diff --git a/src/libcamera/software_isp/debayer_cpu.cpp b/src/libcamera/software_isp/debayer_cpu.cpp index f8d2677d..31ab96ab 100644 --- a/src/libcamera/software_isp/debayer_cpu.cpp +++ b/src/libcamera/software_isp/debayer_cpu.cpp @@ -12,11 +12,15 @@ #include "debayer_cpu.h" #include <stdlib.h> +#include <sys/ioctl.h> #include <time.h> +#include <linux/dma-buf.h> + #include <libcamera/formats.h> #include "libcamera/internal/bayer_format.h" +#include "libcamera/internal/dma_buf_allocator.h" #include "libcamera/internal/framebuffer.h" #include "libcamera/internal/mapped_framebuffer.h" @@ -49,16 +53,9 @@ DebayerCpu::DebayerCpu(std::unique_ptr<SwStatsCpu> stats) /* Initialize color lookup tables */ for (unsigned int i = 0; i < DebayerParams::kRGBLookupSize; i++) red_[i] = green_[i] = blue_[i] = i; - - for (unsigned int i = 0; i < kMaxLineBuffers; i++) - lineBuffers_[i] = nullptr; } -DebayerCpu::~DebayerCpu() -{ - for (unsigned int i = 0; i < kMaxLineBuffers; i++) - free(lineBuffers_[i]); -} +DebayerCpu::~DebayerCpu() = default; #define DECLARE_SRC_POINTERS(pixel_t) \ const pixel_t *prev = (const pixel_t *)src[0] + xShift_; \ @@ -526,13 +523,10 @@ int DebayerCpu::configure(const StreamConfiguration &inputCfg, lineBufferPadding_ = inputConfig_.patternSize.width * inputConfig_.bpp / 8; lineBufferLength_ = window_.width * inputConfig_.bpp / 8 + 2 * lineBufferPadding_; - for (unsigned int i = 0; - i < (inputConfig_.patternSize.height + 1) && enableInputMemcpy_; - i++) { - free(lineBuffers_[i]); - lineBuffers_[i] = (uint8_t *)malloc(lineBufferLength_); - if (!lineBuffers_[i]) - return -ENOMEM; + + if (enableInputMemcpy_) { + for (unsigned int i = 0; i <= inputConfig_.patternSize.height; i++) + lineBuffers_[i].resize(lineBufferLength_); } measuredFrames_ = 0; @@ -587,9 +581,10 @@ void DebayerCpu::setupInputMemcpy(const uint8_t *linePointers[]) return; for (unsigned int i = 0; i < patternHeight; i++) { - memcpy(lineBuffers_[i], linePointers[i + 1] - lineBufferPadding_, + memcpy(lineBuffers_[i].data(), + linePointers[i + 1] - lineBufferPadding_, lineBufferLength_); - linePointers[i + 1] = lineBuffers_[i] + lineBufferPadding_; + linePointers[i + 1] = lineBuffers_[i].data() + lineBufferPadding_; } /* Point lineBufferIndex_ to first unused lineBuffer */ @@ -614,9 +609,10 @@ void DebayerCpu::memcpyNextLine(const uint8_t *linePointers[]) if (!enableInputMemcpy_) return; - memcpy(lineBuffers_[lineBufferIndex_], linePointers[patternHeight] - lineBufferPadding_, + memcpy(lineBuffers_[lineBufferIndex_].data(), + linePointers[patternHeight] - lineBufferPadding_, lineBufferLength_); - linePointers[patternHeight] = lineBuffers_[lineBufferIndex_] + lineBufferPadding_; + linePointers[patternHeight] = lineBuffers_[lineBufferIndex_].data() + lineBufferPadding_; lineBufferIndex_ = (lineBufferIndex_ + 1) % (patternHeight + 1); } @@ -725,13 +721,17 @@ void DebayerCpu::process4(const uint8_t *src, uint8_t *dst) } } -static inline int64_t timeDiff(timespec &after, timespec &before) +namespace { + +inline int64_t timeDiff(timespec &after, timespec &before) { return (after.tv_sec - before.tv_sec) * 1000000000LL + (int64_t)after.tv_nsec - (int64_t)before.tv_nsec; } -void DebayerCpu::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params) +} /* namespace */ + +void DebayerCpu::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params) { timespec frameStartTime; @@ -740,6 +740,13 @@ void DebayerCpu::process(FrameBuffer *input, FrameBuffer *output, DebayerParams clock_gettime(CLOCK_MONOTONIC_RAW, &frameStartTime); } + std::vector<DmaSyncer> dmaSyncers; + for (const FrameBuffer::Plane &plane : input->planes()) + dmaSyncers.emplace_back(plane.fd, DmaSyncer::SyncType::Read); + + for (const FrameBuffer::Plane &plane : output->planes()) + dmaSyncers.emplace_back(plane.fd, DmaSyncer::SyncType::Write); + green_ = params.green; red_ = swapRedBlueGains_ ? params.blue : params.red; blue_ = swapRedBlueGains_ ? params.red : params.blue; @@ -767,6 +774,8 @@ void DebayerCpu::process(FrameBuffer *input, FrameBuffer *output, DebayerParams metadata.planes()[0].bytesused = out.planes()[0].size(); + dmaSyncers.clear(); + /* Measure before emitting signals */ if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure && ++measuredFrames_ > DebayerCpu::kFramesToSkip) { @@ -784,7 +793,12 @@ void DebayerCpu::process(FrameBuffer *input, FrameBuffer *output, DebayerParams } } - stats_->finishFrame(); + /* + * Buffer ids are currently not used, so pass zeros as its parameter. + * + * \todo Pass real bufferId once stats buffer passing is changed. + */ + stats_->finishFrame(frame, 0); outputBufferReady.emit(output); inputBufferReady.emit(input); } diff --git a/src/libcamera/software_isp/debayer_cpu.h b/src/libcamera/software_isp/debayer_cpu.h index 1dac6435..2c47e7c6 100644 --- a/src/libcamera/software_isp/debayer_cpu.h +++ b/src/libcamera/software_isp/debayer_cpu.h @@ -36,7 +36,7 @@ public: std::vector<PixelFormat> formats(PixelFormat input); std::tuple<unsigned int, unsigned int> strideAndFrameSize(const PixelFormat &outputFormat, const Size &size); - void process(FrameBuffer *input, FrameBuffer *output, DebayerParams params); + void process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params); SizeRange sizes(PixelFormat inputFormat, const Size &inputSize); /** @@ -146,7 +146,7 @@ private: DebayerInputConfig inputConfig_; DebayerOutputConfig outputConfig_; std::unique_ptr<SwStatsCpu> stats_; - uint8_t *lineBuffers_[kMaxLineBuffers]; + std::vector<uint8_t> lineBuffers_[kMaxLineBuffers]; unsigned int lineBufferLength_; unsigned int lineBufferPadding_; unsigned int lineBufferIndex_; diff --git a/src/libcamera/software_isp/meson.build b/src/libcamera/software_isp/meson.build index f7c66e28..aac7eda7 100644 --- a/src/libcamera/software_isp/meson.build +++ b/src/libcamera/software_isp/meson.build @@ -7,7 +7,7 @@ if not softisp_enabled subdir_done() endif -libcamera_sources += files([ +libcamera_internal_sources += files([ 'debayer.cpp', 'debayer_cpu.cpp', 'software_isp.cpp', diff --git a/src/libcamera/software_isp/software_isp.cpp b/src/libcamera/software_isp/software_isp.cpp index 20fb6f48..44baf200 100644 --- a/src/libcamera/software_isp/software_isp.cpp +++ b/src/libcamera/software_isp/software_isp.cpp @@ -13,13 +13,11 @@ #include <sys/types.h> #include <unistd.h> +#include <libcamera/controls.h> #include <libcamera/formats.h> #include <libcamera/stream.h> -#include "libcamera/internal/bayer_format.h" -#include "libcamera/internal/framebuffer.h" #include "libcamera/internal/ipa_manager.h" -#include "libcamera/internal/mapped_framebuffer.h" #include "libcamera/internal/software_isp/debayer_params.h" #include "debayer_cpu.h" @@ -63,9 +61,11 @@ LOG_DEFINE_CATEGORY(SoftwareIsp) * \brief Constructs SoftwareIsp object * \param[in] pipe The pipeline handler in use * \param[in] sensor Pointer to the CameraSensor instance owned by the pipeline + * \param[out] ipaControls The IPA controls to update * handler */ -SoftwareIsp::SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor) +SoftwareIsp::SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor, + ControlInfoMap *ipaControls) : dmaHeap_(DmaBufAllocator::DmaBufAllocatorFlag::CmaHeap | DmaBufAllocator::DmaBufAllocatorFlag::SystemHeap | DmaBufAllocator::DmaBufAllocatorFlag::UDmaBuf) @@ -121,14 +121,14 @@ SoftwareIsp::SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor) * The API tuning file is made from the sensor name. If the tuning file * isn't found, fall back to the 'uncalibrated' file. */ - std::string ipaTuningFile = ipa_->configurationFile(sensor->model() + ".yaml"); - if (ipaTuningFile.empty()) - ipaTuningFile = ipa_->configurationFile("uncalibrated.yaml"); + std::string ipaTuningFile = + ipa_->configurationFile(sensor->model() + ".yaml", "uncalibrated.yaml"); int ret = ipa_->init(IPASettings{ ipaTuningFile, sensor->model() }, debayer_->getStatsFD(), sharedParams_.fd(), - sensor->controls()); + sensor->controls(), + ipaControls); if (ret) { LOG(SoftwareIsp, Error) << "IPA init failed"; debayer_.reset(); @@ -159,15 +159,18 @@ SoftwareIsp::~SoftwareIsp() /** * \brief Process the statistics gathered + * \param[in] frame The frame number + * \param[in] bufferId ID of the statistics buffer * \param[in] sensorControls The sensor controls * * Requests the IPA to calculate new parameters for ISP and new control * values for the sensor. */ -void SoftwareIsp::processStats(const ControlList &sensorControls) +void SoftwareIsp::processStats(const uint32_t frame, const uint32_t bufferId, + const ControlList &sensorControls) { ASSERT(ipa_); - ipa_->processStats(sensorControls); + ipa_->processStats(frame, bufferId, sensorControls); } /** @@ -223,16 +226,17 @@ SoftwareIsp::strideAndFrameSize(const PixelFormat &outputFormat, const Size &siz * \brief Configure the SoftwareIsp object according to the passed in parameters * \param[in] inputCfg The input configuration * \param[in] outputCfgs The output configurations - * \param[in] sensorControls ControlInfoMap of the controls supported by the sensor + * \param[in] configInfo The IPA configuration data, received from the pipeline + * handler * \return 0 on success, a negative errno on failure */ int SoftwareIsp::configure(const StreamConfiguration &inputCfg, const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs, - const ControlInfoMap &sensorControls) + const ipa::soft::IPAConfigInfo &configInfo) { ASSERT(ipa_ && debayer_); - int ret = ipa_->configure(sensorControls); + int ret = ipa_->configure(configInfo); if (ret < 0) return ret; @@ -241,74 +245,63 @@ int SoftwareIsp::configure(const StreamConfiguration &inputCfg, /** * \brief Export the buffers from the Software ISP - * \param[in] output Output stream index exporting the buffers + * \param[in] stream Output stream exporting the buffers * \param[in] count Number of buffers to allocate * \param[out] buffers Vector to store the allocated buffers * \return The number of allocated buffers on success or a negative error code * otherwise */ -int SoftwareIsp::exportBuffers(unsigned int output, unsigned int count, +int SoftwareIsp::exportBuffers(const Stream *stream, unsigned int count, std::vector<std::unique_ptr<FrameBuffer>> *buffers) { ASSERT(debayer_ != nullptr); /* single output for now */ - if (output >= 1) + if (stream == nullptr) return -EINVAL; - for (unsigned int i = 0; i < count; i++) { - const std::string name = "frame-" + std::to_string(i); - const size_t frameSize = debayer_->frameSize(); - - FrameBuffer::Plane outPlane; - outPlane.fd = SharedFD(dmaHeap_.alloc(name.c_str(), frameSize)); - if (!outPlane.fd.isValid()) { - LOG(SoftwareIsp, Error) - << "failed to allocate a dma_buf"; - return -ENOMEM; - } - outPlane.offset = 0; - outPlane.length = frameSize; - - std::vector<FrameBuffer::Plane> planes{ outPlane }; - buffers->emplace_back(std::make_unique<FrameBuffer>(std::move(planes))); - } + return dmaHeap_.exportBuffers(count, { debayer_->frameSize() }, buffers); +} - return count; +/** + * \brief Queue a request and process the control list from the application + * \param[in] frame The number of the frame which will be processed next + * \param[in] controls The controls for the \a frame + */ +void SoftwareIsp::queueRequest(const uint32_t frame, const ControlList &controls) +{ + ipa_->queueRequest(frame, controls); } /** * \brief Queue buffers to Software ISP + * \param[in] frame The frame number * \param[in] input The input framebuffer - * \param[in] outputs The container holding the output stream indexes and + * \param[in] outputs The container holding the output stream pointers and * their respective frame buffer outputs * \return 0 on success, a negative errno on failure */ -int SoftwareIsp::queueBuffers(FrameBuffer *input, - const std::map<unsigned int, FrameBuffer *> &outputs) +int SoftwareIsp::queueBuffers(uint32_t frame, FrameBuffer *input, + const std::map<const Stream *, FrameBuffer *> &outputs) { - unsigned int mask = 0; - /* * Validate the outputs as a sanity check: at least one output is - * required, all outputs must reference a valid stream and no two - * outputs can reference the same stream. + * required, all outputs must reference a valid stream. */ if (outputs.empty()) return -EINVAL; - for (auto [index, buffer] : outputs) { + /* We only support a single stream for now. */ + if (outputs.size() != 1) + return -EINVAL; + + for (auto [stream, buffer] : outputs) { if (!buffer) return -EINVAL; - if (index >= 1) /* only single stream atm */ - return -EINVAL; - if (mask & (1 << index)) - return -EINVAL; - - mask |= 1 << index; } - process(input, outputs.at(0)); + for (auto iter = outputs.begin(); iter != outputs.end(); iter++) + process(frame, input, iter->second); return 0; } @@ -340,13 +333,15 @@ void SoftwareIsp::stop() /** * \brief Passes the input framebuffer to the ISP worker to process + * \param[in] frame The frame number * \param[in] input The input framebuffer * \param[out] output The framebuffer to write the processed frame to */ -void SoftwareIsp::process(FrameBuffer *input, FrameBuffer *output) +void SoftwareIsp::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output) { + ipa_->computeParams(frame); debayer_->invokeMethod(&DebayerCpu::process, - ConnectionTypeQueued, input, output, debayerParams_); + ConnectionTypeQueued, frame, input, output, debayerParams_); } void SoftwareIsp::saveIspParams() @@ -359,9 +354,9 @@ void SoftwareIsp::setSensorCtrls(const ControlList &sensorControls) setSensorControls.emit(sensorControls); } -void SoftwareIsp::statsReady() +void SoftwareIsp::statsReady(uint32_t frame, uint32_t bufferId) { - ispStatsReady.emit(); + ispStatsReady.emit(frame, bufferId); } void SoftwareIsp::inputReady(FrameBuffer *input) diff --git a/src/libcamera/software_isp/swstats_cpu.cpp b/src/libcamera/software_isp/swstats_cpu.cpp index 815c4d4f..c520c806 100644 --- a/src/libcamera/software_isp/swstats_cpu.cpp +++ b/src/libcamera/software_isp/swstats_cpu.cpp @@ -311,13 +311,15 @@ void SwStatsCpu::startFrame(void) /** * \brief Finish statistics calculation for the current frame + * \param[in] frame The frame number + * \param[in] bufferId ID of the statistics buffer * * This may only be called after a successful setWindow() call. */ -void SwStatsCpu::finishFrame(void) +void SwStatsCpu::finishFrame(uint32_t frame, uint32_t bufferId) { *sharedStats_ = stats_; - statsReady.emit(); + statsReady.emit(frame, bufferId); } /** diff --git a/src/libcamera/software_isp/swstats_cpu.h b/src/libcamera/software_isp/swstats_cpu.h index 363e326f..26a2f462 100644 --- a/src/libcamera/software_isp/swstats_cpu.h +++ b/src/libcamera/software_isp/swstats_cpu.h @@ -41,7 +41,7 @@ public: int configure(const StreamConfiguration &inputCfg); void setWindow(const Rectangle &window); void startFrame(); - void finishFrame(); + void finishFrame(uint32_t frame, uint32_t bufferId); void processLine0(unsigned int y, const uint8_t *src[]) { @@ -61,7 +61,7 @@ public: (this->*stats2_)(src); } - Signal<> statsReady; + Signal<uint32_t, uint32_t> statsReady; private: using statsProcessFn = void (SwStatsCpu::*)(const uint8_t *src[]); diff --git a/src/libcamera/stream.cpp b/src/libcamera/stream.cpp index 053cc4b8..978d7275 100644 --- a/src/libcamera/stream.cpp +++ b/src/libcamera/stream.cpp @@ -9,15 +9,15 @@ #include <algorithm> #include <array> -#include <iomanip> #include <limits.h> -#include <sstream> - -#include <libcamera/request.h> +#include <ostream> +#include <string> +#include <vector> #include <libcamera/base/log.h> #include <libcamera/base/utils.h> +#include <libcamera/request.h> /** * \file stream.h @@ -392,7 +392,23 @@ StreamConfiguration::StreamConfiguration(const StreamFormats &formats) */ std::string StreamConfiguration::toString() const { - return size.toString() + "-" + pixelFormat.toString(); + std::stringstream ss; + ss << *this; + + return ss.str(); +} + +/** + * \brief Insert a text representation of a StreamConfiguration into an output + * stream + * \param[in] out The output stream + * \param[in] cfg The StreamConfiguration + * \return The output stream \a out + */ +std::ostream &operator<<(std::ostream &out, const StreamConfiguration &cfg) +{ + out << cfg.size << "-" << cfg.pixelFormat; + return out; } /** diff --git a/src/libcamera/v4l2_device.cpp b/src/libcamera/v4l2_device.cpp index 4a2048cf..2f65a43a 100644 --- a/src/libcamera/v4l2_device.cpp +++ b/src/libcamera/v4l2_device.cpp @@ -8,9 +8,8 @@ #include "libcamera/internal/v4l2_device.h" #include <fcntl.h> -#include <iomanip> -#include <limits.h> #include <map> +#include <stdint.h> #include <stdlib.h> #include <string.h> #include <sys/ioctl.h> @@ -206,10 +205,29 @@ ControlList V4L2Device::getControls(const std::vector<uint32_t> &ids) if (info.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD) { ControlType type; + ControlValue &value = ctrl.second; + Span<uint8_t> data; switch (info.type) { case V4L2_CTRL_TYPE_U8: type = ControlTypeByte; + value.reserve(type, true, info.elems); + data = value.data(); + v4l2Ctrl.p_u8 = data.data(); + break; + + case V4L2_CTRL_TYPE_U16: + type = ControlTypeUnsigned16; + value.reserve(type, true, info.elems); + data = value.data(); + v4l2Ctrl.p_u16 = reinterpret_cast<uint16_t *>(data.data()); + break; + + case V4L2_CTRL_TYPE_U32: + type = ControlTypeUnsigned32; + value.reserve(type, true, info.elems); + data = value.data(); + v4l2Ctrl.p_u32 = reinterpret_cast<uint32_t *>(data.data()); break; default: @@ -219,11 +237,6 @@ ControlList V4L2Device::getControls(const std::vector<uint32_t> &ids) return {}; } - ControlValue &value = ctrl.second; - value.reserve(type, true, info.elems); - Span<uint8_t> data = value.data(); - - v4l2Ctrl.p_u8 = data.data(); v4l2Ctrl.size = data.size(); } } @@ -301,6 +314,30 @@ int V4L2Device::setControls(ControlList *ctrls) /* Set the v4l2_ext_control value for the write operation. */ ControlValue &value = ctrl->second; switch (iter->first->type()) { + case ControlTypeUnsigned16: { + if (value.isArray()) { + Span<uint8_t> data = value.data(); + v4l2Ctrl.p_u16 = reinterpret_cast<uint16_t *>(data.data()); + v4l2Ctrl.size = data.size(); + } else { + v4l2Ctrl.value = value.get<uint16_t>(); + } + + break; + } + + case ControlTypeUnsigned32: { + if (value.isArray()) { + Span<uint8_t> data = value.data(); + v4l2Ctrl.p_u32 = reinterpret_cast<uint32_t *>(data.data()); + v4l2Ctrl.size = data.size(); + } else { + v4l2Ctrl.value = value.get<uint32_t>(); + } + + break; + } + case ControlTypeInteger32: { if (value.isArray()) { Span<uint8_t> data = value.data(); @@ -490,6 +527,12 @@ ControlType V4L2Device::v4l2CtrlType(uint32_t ctrlType) case V4L2_CTRL_TYPE_BOOLEAN: return ControlTypeBool; + case V4L2_CTRL_TYPE_U16: + return ControlTypeUnsigned16; + + case V4L2_CTRL_TYPE_U32: + return ControlTypeUnsigned32; + case V4L2_CTRL_TYPE_INTEGER: return ControlTypeInteger32; @@ -522,7 +565,15 @@ std::unique_ptr<ControlId> V4L2Device::v4l2ControlId(const v4l2_query_ext_ctrl & const std::string name(static_cast<const char *>(ctrl.name), len); const ControlType type = v4l2CtrlType(ctrl.type); - return std::make_unique<ControlId>(ctrl.id, name, type); + ControlId::DirectionFlags flags; + if (ctrl.flags & V4L2_CTRL_FLAG_READ_ONLY) + flags = ControlId::Direction::Out; + else if (ctrl.flags & V4L2_CTRL_FLAG_WRITE_ONLY) + flags = ControlId::Direction::In; + else + flags = ControlId::Direction::In | ControlId::Direction::Out; + + return std::make_unique<ControlId>(ctrl.id, name, "v4l2", type, flags); } /** @@ -538,6 +589,16 @@ std::optional<ControlInfo> V4L2Device::v4l2ControlInfo(const v4l2_query_ext_ctrl static_cast<uint8_t>(ctrl.maximum), static_cast<uint8_t>(ctrl.default_value)); + case V4L2_CTRL_TYPE_U16: + return ControlInfo(static_cast<uint16_t>(ctrl.minimum), + static_cast<uint16_t>(ctrl.maximum), + static_cast<uint16_t>(ctrl.default_value)); + + case V4L2_CTRL_TYPE_U32: + return ControlInfo(static_cast<uint32_t>(ctrl.minimum), + static_cast<uint32_t>(ctrl.maximum), + static_cast<uint32_t>(ctrl.default_value)); + case V4L2_CTRL_TYPE_BOOLEAN: return ControlInfo(static_cast<bool>(ctrl.minimum), static_cast<bool>(ctrl.maximum), @@ -624,6 +685,8 @@ void V4L2Device::listControls() case V4L2_CTRL_TYPE_BITMASK: case V4L2_CTRL_TYPE_INTEGER_MENU: case V4L2_CTRL_TYPE_U8: + case V4L2_CTRL_TYPE_U16: + case V4L2_CTRL_TYPE_U32: break; /* \todo Support other control types. */ default: diff --git a/src/libcamera/v4l2_pixelformat.cpp b/src/libcamera/v4l2_pixelformat.cpp index 70568335..e8b3eb9c 100644 --- a/src/libcamera/v4l2_pixelformat.cpp +++ b/src/libcamera/v4l2_pixelformat.cpp @@ -139,6 +139,8 @@ const std::map<V4L2PixelFormat, V4L2PixelFormat::Info> vpf2pf{ { formats::R10_CSI2P, "10-bit Greyscale Packed" } }, { V4L2PixelFormat(V4L2_PIX_FMT_Y12), { formats::R12, "12-bit Greyscale" } }, + { V4L2PixelFormat(V4L2_PIX_FMT_Y12P), + { formats::R12_CSI2P, "12-bit Greyscale Packed" } }, { V4L2PixelFormat(V4L2_PIX_FMT_Y16), { formats::R16, "16-bit Greyscale" } }, @@ -371,6 +373,40 @@ V4L2PixelFormat::fromPixelFormat(const PixelFormat &pixelFormat) } /** + * \brief Test if a V4L2PixelFormat is one of the line based generic metadata + * formats + * + * A limited number of metadata formats, the ones that represents generic + * line-based metadata buffers, need to have their width, height and + * bytesperline set by userspace. + * + * This function tests if the current V4L2PixelFormat is one of those. + * + * Note: It would have been nicer to store this information in a + * V4L2PixelFormat::Info instance, but as metadata format are not exposed to + * applications, there are no PixelFormat and DRM fourcc codes associated to + * them. + * + * \return True if the V4L2PixelFormat() is a generic line based format, false + * otherwise + */ +bool V4L2PixelFormat::isGenericLineBasedMetadata() const +{ + switch (fourcc_) { + case V4L2_META_FMT_GENERIC_8: + case V4L2_META_FMT_GENERIC_CSI2_10: + case V4L2_META_FMT_GENERIC_CSI2_12: + case V4L2_META_FMT_GENERIC_CSI2_14: + case V4L2_META_FMT_GENERIC_CSI2_16: + case V4L2_META_FMT_GENERIC_CSI2_20: + case V4L2_META_FMT_GENERIC_CSI2_24: + return true; + default: + return false; + } +} + +/** * \brief Insert a text representation of a V4L2PixelFormat into an output * stream * \param[in] out The output stream diff --git a/src/libcamera/v4l2_subdevice.cpp b/src/libcamera/v4l2_subdevice.cpp index 82824433..33279654 100644 --- a/src/libcamera/v4l2_subdevice.cpp +++ b/src/libcamera/v4l2_subdevice.cpp @@ -8,21 +8,26 @@ #include "libcamera/internal/v4l2_subdevice.h" #include <fcntl.h> -#include <iomanip> -#include <regex> #include <sstream> #include <string.h> #include <sys/ioctl.h> #include <unistd.h> +#pragma GCC diagnostic push +#if defined __SANITIZE_ADDRESS__ && defined __OPTIMIZE__ +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif +#include <regex> +#pragma GCC diagnostic pop + #include <linux/media-bus-format.h> #include <linux/v4l2-subdev.h> -#include <libcamera/geometry.h> - #include <libcamera/base/log.h> #include <libcamera/base/utils.h> +#include <libcamera/geometry.h> + #include "libcamera/internal/formats.h" #include "libcamera/internal/media_device.h" #include "libcamera/internal/media_object.h" @@ -189,6 +194,20 @@ const std::map<uint32_t, MediaBusFormatInfo> mediaBusFormatInfo{ .bitsPerPixel = 24, .colourEncoding = PixelFormatInfo::ColourEncodingRGB, } }, + { MEDIA_BUS_FMT_RGB121212_1X36, { + .name = "RGB121212_1X36", + .code = MEDIA_BUS_FMT_RGB121212_1X36, + .type = MediaBusFormatInfo::Type::Image, + .bitsPerPixel = 36, + .colourEncoding = PixelFormatInfo::ColourEncodingRGB, + } }, + { MEDIA_BUS_FMT_RGB202020_1X60, { + .name = "RGB202020_1X60", + .code = MEDIA_BUS_FMT_RGB202020_1X60, + .type = MediaBusFormatInfo::Type::Image, + .bitsPerPixel = 60, + .colourEncoding = PixelFormatInfo::ColourEncodingRGB, + } }, { MEDIA_BUS_FMT_ARGB8888_1X32, { .name = "ARGB8888_1X32", .code = MEDIA_BUS_FMT_ARGB8888_1X32, @@ -679,6 +698,34 @@ const std::map<uint32_t, MediaBusFormatInfo> mediaBusFormatInfo{ .bitsPerPixel = 16, .colourEncoding = PixelFormatInfo::ColourEncodingRAW } }, + { MEDIA_BUS_FMT_SBGGR20_1X20, { + .name = "SBGGR20_1X20", + .code = MEDIA_BUS_FMT_SBGGR20_1X20, + .type = MediaBusFormatInfo::Type::Image, + .bitsPerPixel = 20, + .colourEncoding = PixelFormatInfo::ColourEncodingRAW + } }, + { MEDIA_BUS_FMT_SGBRG20_1X20, { + .name = "SGBRG20_1X20", + .code = MEDIA_BUS_FMT_SGBRG20_1X20, + .type = MediaBusFormatInfo::Type::Image, + .bitsPerPixel = 20, + .colourEncoding = PixelFormatInfo::ColourEncodingRAW + } }, + { MEDIA_BUS_FMT_SGRBG20_1X20, { + .name = "SGRBG20_1X20", + .code = MEDIA_BUS_FMT_SGRBG20_1X20, + .type = MediaBusFormatInfo::Type::Image, + .bitsPerPixel = 20, + .colourEncoding = PixelFormatInfo::ColourEncodingRAW + } }, + { MEDIA_BUS_FMT_SRGGB20_1X20, { + .name = "SRGGB20_1X20", + .code = MEDIA_BUS_FMT_SRGGB20_1X20, + .type = MediaBusFormatInfo::Type::Image, + .bitsPerPixel = 20, + .colourEncoding = PixelFormatInfo::ColourEncodingRAW + } }, /* \todo Clarify colour encoding for HSV formats */ { MEDIA_BUS_FMT_AHSV8888_1X32, { .name = "AHSV8888_1X32", @@ -701,6 +748,69 @@ const std::map<uint32_t, MediaBusFormatInfo> mediaBusFormatInfo{ .bitsPerPixel = 0, .colourEncoding = PixelFormatInfo::ColourEncodingRAW, } }, + { MEDIA_BUS_FMT_META_8, { + .name = "META_8", + .code = MEDIA_BUS_FMT_META_8, + .type = MediaBusFormatInfo::Type::Metadata, + .bitsPerPixel = 8, + .colourEncoding = PixelFormatInfo::ColourEncodingRAW, + } }, + { MEDIA_BUS_FMT_META_10, { + .name = "META_10", + .code = MEDIA_BUS_FMT_META_10, + .type = MediaBusFormatInfo::Type::Metadata, + .bitsPerPixel = 10, + .colourEncoding = PixelFormatInfo::ColourEncodingRAW, + } }, + { MEDIA_BUS_FMT_META_12, { + .name = "META_12", + .code = MEDIA_BUS_FMT_META_12, + .type = MediaBusFormatInfo::Type::Metadata, + .bitsPerPixel = 12, + .colourEncoding = PixelFormatInfo::ColourEncodingRAW, + } }, + { MEDIA_BUS_FMT_META_14, { + .name = "META_14", + .code = MEDIA_BUS_FMT_META_14, + .type = MediaBusFormatInfo::Type::Metadata, + .bitsPerPixel = 14, + .colourEncoding = PixelFormatInfo::ColourEncodingRAW, + } }, + { MEDIA_BUS_FMT_META_16, { + .name = "META_16", + .code = MEDIA_BUS_FMT_META_16, + .type = MediaBusFormatInfo::Type::Metadata, + .bitsPerPixel = 16, + .colourEncoding = PixelFormatInfo::ColourEncodingRAW, + } }, + { MEDIA_BUS_FMT_META_20, { + .name = "META_20", + .code = MEDIA_BUS_FMT_META_20, + .type = MediaBusFormatInfo::Type::Metadata, + .bitsPerPixel = 20, + .colourEncoding = PixelFormatInfo::ColourEncodingRAW, + } }, + { MEDIA_BUS_FMT_META_24, { + .name = "META_24", + .code = MEDIA_BUS_FMT_META_24, + .type = MediaBusFormatInfo::Type::Metadata, + .bitsPerPixel = 24, + .colourEncoding = PixelFormatInfo::ColourEncodingRAW, + } }, + { MEDIA_BUS_FMT_CCS_EMBEDDED, { + .name = "CCS_EMBEDDED", + .code = MEDIA_BUS_FMT_CCS_EMBEDDED, + .type = MediaBusFormatInfo::Type::EmbeddedData, + .bitsPerPixel = 0, + .colourEncoding = PixelFormatInfo::ColourEncodingRAW, + } }, + { MEDIA_BUS_FMT_OV2740_EMBEDDED, { + .name = "OV2740_EMBEDDED", + .code = MEDIA_BUS_FMT_CCS_EMBEDDED, + .type = MediaBusFormatInfo::Type::EmbeddedData, + .bitsPerPixel = 0, + .colourEncoding = PixelFormatInfo::ColourEncodingRAW, + } }, }; } /* namespace */ diff --git a/src/libcamera/v4l2_videodevice.cpp b/src/libcamera/v4l2_videodevice.cpp index 4947aa3d..e241eb47 100644 --- a/src/libcamera/v4l2_videodevice.cpp +++ b/src/libcamera/v4l2_videodevice.cpp @@ -10,7 +10,6 @@ #include <algorithm> #include <array> #include <fcntl.h> -#include <iomanip> #include <sstream> #include <string.h> #include <sys/ioctl.h> @@ -803,12 +802,19 @@ std::string V4L2VideoDevice::logPrefix() const */ int V4L2VideoDevice::getFormat(V4L2DeviceFormat *format) { - if (caps_.isMeta()) - return getFormatMeta(format); - else if (caps_.isMultiplanar()) - return getFormatMultiplane(format); - else + switch (bufferType_) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT: return getFormatSingleplane(format); + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + return getFormatMultiplane(format); + case V4L2_BUF_TYPE_META_CAPTURE: + case V4L2_BUF_TYPE_META_OUTPUT: + return getFormatMeta(format); + default: + return -EINVAL; + } } /** @@ -823,12 +829,19 @@ int V4L2VideoDevice::getFormat(V4L2DeviceFormat *format) */ int V4L2VideoDevice::tryFormat(V4L2DeviceFormat *format) { - if (caps_.isMeta()) - return trySetFormatMeta(format, false); - else if (caps_.isMultiplanar()) - return trySetFormatMultiplane(format, false); - else + switch (bufferType_) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT: return trySetFormatSingleplane(format, false); + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + return trySetFormatMultiplane(format, false); + case V4L2_BUF_TYPE_META_CAPTURE: + case V4L2_BUF_TYPE_META_OUTPUT: + return trySetFormatMeta(format, false); + default: + return -EINVAL; + } } /** @@ -842,13 +855,25 @@ int V4L2VideoDevice::tryFormat(V4L2DeviceFormat *format) */ int V4L2VideoDevice::setFormat(V4L2DeviceFormat *format) { - int ret = 0; - if (caps_.isMeta()) - ret = trySetFormatMeta(format, true); - else if (caps_.isMultiplanar()) - ret = trySetFormatMultiplane(format, true); - else + int ret; + + switch (bufferType_) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT: ret = trySetFormatSingleplane(format, true); + break; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + ret = trySetFormatMultiplane(format, true); + break; + case V4L2_BUF_TYPE_META_CAPTURE: + case V4L2_BUF_TYPE_META_OUTPUT: + ret = trySetFormatMeta(format, true); + break; + default: + ret = -EINVAL; + break; + } /* Cache the set format on success. */ if (ret) @@ -863,7 +888,7 @@ int V4L2VideoDevice::setFormat(V4L2DeviceFormat *format) int V4L2VideoDevice::getFormatMeta(V4L2DeviceFormat *format) { struct v4l2_format v4l2Format = {}; - struct v4l2_meta_format *pix = &v4l2Format.fmt.meta; + struct v4l2_meta_format *meta = &v4l2Format.fmt.meta; int ret; v4l2Format.type = bufferType_; @@ -873,25 +898,42 @@ int V4L2VideoDevice::getFormatMeta(V4L2DeviceFormat *format) return ret; } - format->size.width = 0; - format->size.height = 0; - format->fourcc = V4L2PixelFormat(pix->dataformat); + format->fourcc = V4L2PixelFormat(meta->dataformat); + format->planes[0].size = meta->buffersize; format->planesCount = 1; - format->planes[0].bpl = pix->buffersize; - format->planes[0].size = pix->buffersize; + + bool genericLineBased = caps_.isMetaCapture() && + format->fourcc.isGenericLineBasedMetadata(); + + if (genericLineBased) { + format->size.width = meta->width; + format->size.height = meta->height; + format->planes[0].bpl = meta->bytesperline; + } else { + format->size.width = 0; + format->size.height = 0; + format->planes[0].bpl = meta->buffersize; + } return 0; } int V4L2VideoDevice::trySetFormatMeta(V4L2DeviceFormat *format, bool set) { + bool genericLineBased = caps_.isMetaCapture() && + format->fourcc.isGenericLineBasedMetadata(); struct v4l2_format v4l2Format = {}; - struct v4l2_meta_format *pix = &v4l2Format.fmt.meta; + struct v4l2_meta_format *meta = &v4l2Format.fmt.meta; int ret; v4l2Format.type = bufferType_; - pix->dataformat = format->fourcc; - pix->buffersize = format->planes[0].size; + meta->dataformat = format->fourcc; + meta->buffersize = format->planes[0].size; + if (genericLineBased) { + meta->width = format->size.width; + meta->height = format->size.height; + meta->bytesperline = format->planes[0].bpl; + } ret = ioctl(set ? VIDIOC_S_FMT : VIDIOC_TRY_FMT, &v4l2Format); if (ret) { LOG(V4L2, Error) @@ -904,12 +946,18 @@ int V4L2VideoDevice::trySetFormatMeta(V4L2DeviceFormat *format, bool set) * Return to caller the format actually applied on the video device, * which might differ from the requested one. */ - format->size.width = 0; - format->size.height = 0; - format->fourcc = V4L2PixelFormat(pix->dataformat); + format->fourcc = V4L2PixelFormat(meta->dataformat); format->planesCount = 1; - format->planes[0].bpl = pix->buffersize; - format->planes[0].size = pix->buffersize; + format->planes[0].size = meta->buffersize; + if (genericLineBased) { + format->size.width = meta->width; + format->size.height = meta->height; + format->planes[0].bpl = meta->bytesperline; + } else { + format->size.width = 0; + format->size.height = 0; + format->planes[0].bpl = meta->buffersize; + } return 0; } @@ -1190,6 +1238,38 @@ std::vector<SizeRange> V4L2VideoDevice::enumSizes(V4L2PixelFormat pixelFormat) } /** + * \brief Get the selection rectangle for \a target + * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags + * \param[out] rect The selection rectangle to retrieve + * + * \todo Define a V4L2SelectionTarget enum for the selection target + * + * \return 0 on success or a negative error code otherwise + */ +int V4L2VideoDevice::getSelection(unsigned int target, Rectangle *rect) +{ + struct v4l2_selection sel = {}; + + sel.type = bufferType_; + sel.target = target; + sel.flags = 0; + + int ret = ioctl(VIDIOC_G_SELECTION, &sel); + if (ret < 0) { + LOG(V4L2, Error) << "Unable to get rectangle " << target + << ": " << strerror(-ret); + return ret; + } + + rect->x = sel.r.left; + rect->y = sel.r.top; + rect->width = sel.r.width; + rect->height = sel.r.height; + + return 0; +} + +/** * \brief Set a selection rectangle \a rect for \a target * \param[in] target The selection target defined by the V4L2_SEL_TGT_* flags * \param[inout] rect The selection rectangle to be applied @@ -1815,7 +1895,7 @@ FrameBuffer *V4L2VideoDevice::dequeueBuffer() * Detect kernel drivers which do not reset the sequence number to zero * on stream start. */ - if (!firstFrame_) { + if (!firstFrame_.has_value()) { if (buf.sequence) LOG(V4L2, Info) << "Zero sequence expected for first frame (got " @@ -2067,15 +2147,24 @@ V4L2PixelFormat V4L2VideoDevice::toV4L2PixelFormat(const PixelFormat &pixelForma * \class V4L2M2MDevice * \brief Memory-to-Memory video device * + * Memory to Memory devices in the kernel using the V4L2 M2M API can + * operate with multiple contexts for parallel operations on a single + * device. Each instance of a V4L2M2MDevice represents a single context. + * * The V4L2M2MDevice manages two V4L2VideoDevice instances on the same * deviceNode which operate together using two queues to implement the V4L2 * Memory to Memory API. * - * The two devices should be opened by calling open() on the V4L2M2MDevice, and - * can be closed by calling close on the V4L2M2MDevice. + * Users of this class should create a new instance of the V4L2M2MDevice for + * each desired execution context and then open it by calling open() on the + * V4L2M2MDevice and close it by calling close() on the V4L2M2MDevice. * * Calling V4L2VideoDevice::open() and V4L2VideoDevice::close() on the capture * or output V4L2VideoDevice is not permitted. + * + * Once the M2M device is open, users can operate on the output and capture + * queues represented by the V4L2VideoDevice returned by the output() and + * capture() functions. */ /** diff --git a/src/libcamera/vector.cpp b/src/libcamera/vector.cpp new file mode 100644 index 00000000..85ca2208 --- /dev/null +++ b/src/libcamera/vector.cpp @@ -0,0 +1,347 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com> + * + * Vector and related operations + */ + +#include "libcamera/internal/vector.h" + +#include <libcamera/base/log.h> + +/** + * \file vector.h + * \brief Vector class + */ + +namespace libcamera { + +LOG_DEFINE_CATEGORY(Vector) + +/** + * \class Vector + * \brief Vector class + * \tparam T Type of numerical values to be stored in the vector + * \tparam Rows Number of dimension of the vector (= number of elements) + */ + +/** + * \fn Vector::Vector() + * \brief Construct an uninitialized vector + */ + +/** + * \fn Vector::Vector(T scalar) + * \brief Construct a vector filled with a \a scalar value + * \param[in] scalar The scalar value + */ + +/** + * \fn Vector::Vector(const std::array<T, Rows> &data) + * \brief Construct vector from supplied data + * \param data Data from which to construct a vector + * + * The size of \a data must be equal to the dimension size Rows of the vector. + */ + +/** + * \fn T Vector::operator[](size_t i) const + * \brief Index to an element in the vector + * \param i Index of element to retrieve + * \return Element at index \a i from the vector + */ + +/** + * \fn T &Vector::operator[](size_t i) + * \copydoc Vector::operator[](size_t i) const + */ + +/** + * \fn Vector::operator-() const + * \brief Negate a Vector by negating both all of its coordinates + * \return The negated vector + */ + +/** + * \fn Vector::operator+(Vector const &other) const + * \brief Calculate the sum of this vector and \a other element-wise + * \param[in] other The other vector + * \return The element-wise sum of this vector and \a other + */ + +/** + * \fn Vector::operator+(T scalar) const + * \brief Calculate the sum of this vector and \a scalar element-wise + * \param[in] scalar The scalar + * \return The element-wise sum of this vector and \a other + */ + +/** + * \fn Vector::operator-(Vector const &other) const + * \brief Calculate the difference of this vector and \a other element-wise + * \param[in] other The other vector + * \return The element-wise subtraction of \a other from this vector + */ + +/** + * \fn Vector::operator-(T scalar) const + * \brief Calculate the difference of this vector and \a scalar element-wise + * \param[in] scalar The scalar + * \return The element-wise subtraction of \a scalar from this vector + */ + +/** + * \fn Vector::operator*(const Vector &other) const + * \brief Calculate the product of this vector and \a other element-wise + * \param[in] other The other vector + * \return The element-wise product of this vector and \a other + */ + +/** + * \fn Vector::operator*(T scalar) const + * \brief Calculate the product of this vector and \a scalar element-wise + * \param[in] scalar The scalar + * \return The element-wise product of this vector and \a scalar + */ + +/** + * \fn Vector::operator/(const Vector &other) const + * \brief Calculate the quotient of this vector and \a other element-wise + * \param[in] other The other vector + * \return The element-wise division of this vector by \a other + */ + +/** + * \fn Vector::operator/(T scalar) const + * \brief Calculate the quotient of this vector and \a scalar element-wise + * \param[in] scalar The scalar + * \return The element-wise division of this vector by \a scalar + */ + +/** + * \fn Vector::operator+=(Vector const &other) + * \brief Add \a other element-wise to this vector + * \param[in] other The other vector + * \return This vector + */ + +/** + * \fn Vector::operator+=(T scalar) + * \brief Add \a scalar element-wise to this vector + * \param[in] scalar The scalar + * \return This vector + */ + +/** + * \fn Vector::operator-=(Vector const &other) + * \brief Subtract \a other element-wise from this vector + * \param[in] other The other vector + * \return This vector + */ + +/** + * \fn Vector::operator-=(T scalar) + * \brief Subtract \a scalar element-wise from this vector + * \param[in] scalar The scalar + * \return This vector + */ + +/** + * \fn Vector::operator*=(const Vector &other) + * \brief Multiply this vector by \a other element-wise + * \param[in] other The other vector + * \return This vector + */ + +/** + * \fn Vector::operator*=(T scalar) + * \brief Multiply this vector by \a scalar element-wise + * \param[in] scalar The scalar + * \return This vector + */ + +/** + * \fn Vector::operator/=(const Vector &other) + * \brief Divide this vector by \a other element-wise + * \param[in] other The other vector + * \return This vector + */ + +/** + * \fn Vector::operator/=(T scalar) + * \brief Divide this vector by \a scalar element-wise + * \param[in] scalar The scalar + * \return This vector + */ + +/** + * \fn Vector::min(const Vector &other) const + * \brief Calculate the minimum of this vector and \a other element-wise + * \param[in] other The other vector + * \return The element-wise minimum of this vector and \a other + */ + +/** + * \fn Vector::min(T scalar) const + * \brief Calculate the minimum of this vector and \a scalar element-wise + * \param[in] scalar The scalar + * \return The element-wise minimum of this vector and \a scalar + */ + +/** + * \fn Vector::max(const Vector &other) const + * \brief Calculate the maximum of this vector and \a other element-wise + * \param[in] other The other vector + * \return The element-wise maximum of this vector and \a other + */ + +/** + * \fn Vector::max(T scalar) const + * \brief Calculate the maximum of this vector and \a scalar element-wise + * \param[in] scalar The scalar + * \return The element-wise maximum of this vector and \a scalar + */ + +/** + * \fn Vector::dot(const Vector<T, Rows> &other) const + * \brief Compute the dot product + * \param[in] other The other vector + * \return The dot product of the two vectors + */ + +/** + * \fn constexpr T &Vector::x() + * \brief Convenience function to access the first element of the vector + * \return The first element of the vector + */ + +/** + * \fn constexpr T &Vector::y() + * \brief Convenience function to access the second element of the vector + * \return The second element of the vector + */ + +/** + * \fn constexpr T &Vector::z() + * \brief Convenience function to access the third element of the vector + * \return The third element of the vector + */ + +/** + * \fn constexpr const T &Vector::x() const + * \copydoc Vector::x() + */ + +/** + * \fn constexpr const T &Vector::y() const + * \copydoc Vector::y() + */ + +/** + * \fn constexpr const T &Vector::z() const + * \copydoc Vector::z() + */ + +/** + * \fn constexpr T &Vector::r() + * \brief Convenience function to access the first element of the vector + * \return The first element of the vector + */ + +/** + * \fn constexpr T &Vector::g() + * \brief Convenience function to access the second element of the vector + * \return The second element of the vector + */ + +/** + * \fn constexpr T &Vector::b() + * \brief Convenience function to access the third element of the vector + * \return The third element of the vector + */ + +/** + * \fn constexpr const T &Vector::r() const + * \copydoc Vector::r() + */ + +/** + * \fn constexpr const T &Vector::g() const + * \copydoc Vector::g() + */ + +/** + * \fn constexpr const T &Vector::b() const + * \copydoc Vector::b() + */ + +/** + * \fn Vector::length2() + * \brief Get the squared length of the vector + * \return The squared length of the vector + */ + +/** + * \fn Vector::length() + * \brief Get the length of the vector + * \return The length of the vector + */ + +/** + * \fn Vector::sum() const + * \brief Calculate the sum of all the vector elements + * \tparam R The type of the sum + * + * The type R of the sum defaults to the type T of the elements, but can be set + * explicitly to use a different type in case the type T would risk + * overflowing. + * + * \return The sum of all the vector elements + */ + +/** + * \fn Vector<T, Rows> operator*(const Matrix<T, Rows, Cols> &m, const Vector<T, Cols> &v) + * \brief Multiply a matrix by a vector + * \tparam T Numerical type of the contents of the matrix and vector + * \tparam Rows The number of rows in the matrix + * \tparam Cols The number of columns in the matrix (= rows in the vector) + * \param m The matrix + * \param v The vector + * \return Product of matrix \a m and vector \a v + */ + +/** + * \typedef RGB + * \brief A Vector of 3 elements representing an RGB pixel value + */ + +/** + * \fn bool operator==(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs) + * \brief Compare vectors for equality + * \return True if the two vectors are equal, false otherwise + */ + +/** + * \fn bool operator!=(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs) + * \brief Compare vectors for inequality + * \return True if the two vectors are not equal, false otherwise + */ + +#ifndef __DOXYGEN__ +bool vectorValidateYaml(const YamlObject &obj, unsigned int size) +{ + if (!obj.isList()) + return false; + + if (obj.size() != size) { + LOG(Vector, Error) + << "Wrong number of values in YAML vector: expected " + << size << ", got " << obj.size(); + return false; + } + + return true; +} +#endif /* __DOXYGEN__ */ + +} /* namespace libcamera */ diff --git a/src/libcamera/yaml_parser.cpp b/src/libcamera/yaml_parser.cpp index 025006bc..a5e42461 100644 --- a/src/libcamera/yaml_parser.cpp +++ b/src/libcamera/yaml_parser.cpp @@ -7,10 +7,11 @@ #include "libcamera/internal/yaml_parser.h" -#include <cstdlib> +#include <charconv> #include <errno.h> #include <functional> #include <limits> +#include <stdlib.h> #include <libcamera/base/file.h> #include <libcamera/base/log.h> @@ -18,7 +19,7 @@ #include <yaml.h> /** - * \file libcamera/internal/yaml_parser.h + * \file yaml_parser.h * \brief A YAML parser helper */ @@ -38,12 +39,12 @@ static const YamlObject empty; * \brief A class representing the tree structure of the YAML content * * The YamlObject class represents the tree structure of YAML content. A - * YamlObject can be a dictionary or list of YamlObjects or a value if a tree - * leaf. + * YamlObject can be empty, a dictionary or list of YamlObjects, or a value if a + * tree leaf. */ YamlObject::YamlObject() - : type_(Type::Value) + : type_(Type::Empty) { } @@ -71,6 +72,20 @@ YamlObject::~YamlObject() = default; */ /** + * \fn YamlObject::isEmpty() + * \brief Return whether the YamlObject is an empty + * + * \return True if the YamlObject is empty, false otherwise + */ + +/** + * \fn YamlObject::operator bool() + * \brief Return whether the YamlObject is a non-empty + * + * \return False if the YamlObject is empty, true otherwise + */ + +/** * \fn YamlObject::size() * \brief Retrieve the number of elements in a dictionary or list YamlObject * @@ -132,151 +147,38 @@ YamlObject::Getter<bool>::get(const YamlObject &obj) const return std::nullopt; } -namespace { - -bool parseSignedInteger(const std::string &str, long min, long max, - long *result) -{ - if (str == "") - return false; - - char *end; - - errno = 0; - long value = std::strtol(str.c_str(), &end, 10); - - if ('\0' != *end || errno == ERANGE || value < min || value > max) - return false; - - *result = value; - return true; -} - -bool parseUnsignedInteger(const std::string &str, unsigned long max, - unsigned long *result) +template<typename T> +struct YamlObject::Getter<T, std::enable_if_t< + std::is_same_v<int8_t, T> || + std::is_same_v<uint8_t, T> || + std::is_same_v<int16_t, T> || + std::is_same_v<uint16_t, T> || + std::is_same_v<int32_t, T> || + std::is_same_v<uint32_t, T>>> { - if (str == "") - return false; - - /* - * strtoul() accepts strings representing a negative number, in which - * case it negates the converted value. We don't want to silently accept - * negative values and return a large positive number, so check for a - * minus sign (after optional whitespace) and return an error. - */ - std::size_t found = str.find_first_not_of(" \t"); - if (found != std::string::npos && str[found] == '-') - return false; - - char *end; - - errno = 0; - unsigned long value = std::strtoul(str.c_str(), &end, 10); - - if ('\0' != *end || errno == ERANGE || value > max) - return false; - - *result = value; - return true; -} - -} /* namespace */ - -template<> -std::optional<int8_t> -YamlObject::Getter<int8_t>::get(const YamlObject &obj) const -{ - if (obj.type_ != Type::Value) - return std::nullopt; - - long value; - - if (!parseSignedInteger(obj.value_, std::numeric_limits<int8_t>::min(), - std::numeric_limits<int8_t>::max(), &value)) - return std::nullopt; - - return value; -} - -template<> -std::optional<uint8_t> -YamlObject::Getter<uint8_t>::get(const YamlObject &obj) const -{ - if (obj.type_ != Type::Value) - return std::nullopt; - - unsigned long value; - - if (!parseUnsignedInteger(obj.value_, std::numeric_limits<uint8_t>::max(), - &value)) - return std::nullopt; - - return value; -} - -template<> -std::optional<int16_t> -YamlObject::Getter<int16_t>::get(const YamlObject &obj) const -{ - if (obj.type_ != Type::Value) - return std::nullopt; - - long value; - - if (!parseSignedInteger(obj.value_, std::numeric_limits<int16_t>::min(), - std::numeric_limits<int16_t>::max(), &value)) - return std::nullopt; - - return value; -} - -template<> -std::optional<uint16_t> -YamlObject::Getter<uint16_t>::get(const YamlObject &obj) const -{ - if (obj.type_ != Type::Value) - return std::nullopt; - - unsigned long value; - - if (!parseUnsignedInteger(obj.value_, std::numeric_limits<uint16_t>::max(), - &value)) - return std::nullopt; - - return value; -} - -template<> -std::optional<int32_t> -YamlObject::Getter<int32_t>::get(const YamlObject &obj) const -{ - if (obj.type_ != Type::Value) - return std::nullopt; - - long value; - - if (!parseSignedInteger(obj.value_, std::numeric_limits<int32_t>::min(), - std::numeric_limits<int32_t>::max(), &value)) - return std::nullopt; - - return value; -} + std::optional<T> get(const YamlObject &obj) const + { + if (obj.type_ != Type::Value) + return std::nullopt; -template<> -std::optional<uint32_t> -YamlObject::Getter<uint32_t>::get(const YamlObject &obj) const -{ - if (obj.type_ != Type::Value) - return std::nullopt; + const std::string &str = obj.value_; + T value; - unsigned long value; + auto [ptr, ec] = std::from_chars(str.data(), str.data() + str.size(), + value); + if (ptr != str.data() + str.size() || ec != std::errc()) + return std::nullopt; - if (!parseUnsignedInteger(obj.value_, std::numeric_limits<uint32_t>::max(), - &value)) - return std::nullopt; + return value; + } +}; - return value; -} +template struct YamlObject::Getter<int8_t>; +template struct YamlObject::Getter<uint8_t>; +template struct YamlObject::Getter<int16_t>; +template struct YamlObject::Getter<uint16_t>; +template struct YamlObject::Getter<int32_t>; +template struct YamlObject::Getter<uint32_t>; template<> std::optional<float> @@ -292,7 +194,7 @@ YamlObject::Getter<double>::get(const YamlObject &obj) const if (obj.type_ != Type::Value) return std::nullopt; - if (obj.value_ == "") + if (obj.value_.empty()) return std::nullopt; char *end; @@ -443,7 +345,8 @@ template std::optional<std::vector<Size>> YamlObject::getList<Size>() const; * * This function retrieves an element of the YamlObject. Only YamlObject * instances of List type associate elements with index, calling this function - * on other types of instances is invalid and results in undefined behaviour. + * on other types of instances or with an invalid index results in an empty + * object. * * \return The YamlObject as an element of the list */ @@ -466,26 +369,23 @@ const YamlObject &YamlObject::operator[](std::size_t index) const * * \return True if an element exists, false otherwise */ -bool YamlObject::contains(const std::string &key) const +bool YamlObject::contains(std::string_view key) const { - if (dictionary_.find(std::ref(key)) == dictionary_.end()) - return false; - - return true; + return dictionary_.find(key) != dictionary_.end(); } /** - * \fn YamlObject::operator[](const std::string &key) const + * \fn YamlObject::operator[](std::string_view key) const * \brief Retrieve a member by name from the dictionary * * This function retrieve a member of a YamlObject by name. Only YamlObject * instances of Dictionary type associate elements with names, calling this - * function on other types of instances is invalid and results in undefined - * behaviour. + * function on other types of instances or with a nonexistent key results in an + * empty object. * * \return The YamlObject corresponding to the \a key member */ -const YamlObject &YamlObject::operator[](const std::string &key) const +const YamlObject &YamlObject::operator[](std::string_view key) const { if (type_ != Type::Dictionary) return empty; @@ -609,8 +509,17 @@ YamlParserContext::EventPtr YamlParserContext::nextEvent() EventPtr event(new yaml_event_t); /* yaml_parser_parse returns 1 when it succeeds */ - if (!yaml_parser_parse(&parser_, event.get())) + if (!yaml_parser_parse(&parser_, event.get())) { + File *file = static_cast<File *>(parser_.read_handler_data); + + LOG(YamlParser, Error) << file->fileName() << ":" + << parser_.problem_mark.line << ":" + << parser_.problem_mark.column << " " + << parser_.problem << " " + << parser_.context; + return nullptr; + } return event; } diff --git a/src/meson.build b/src/meson.build index 165a77bb..76198e95 100644 --- a/src/meson.build +++ b/src/meson.build @@ -27,6 +27,27 @@ else ipa_sign_module = false endif +# libyuv, used by the Android adaptation layer and the virtual pipeline handler. +# Fallback to a subproject if libyuv isn't found, as it's typically not provided +# by distributions. +libyuv_dep = dependency('libyuv', required : false) + +if (pipelines.contains('virtual') or get_option('android').allowed()) and \ + not libyuv_dep.found() + cmake = import('cmake') + + libyuv_vars = cmake.subproject_options() + libyuv_vars.add_cmake_defines({'CMAKE_POSITION_INDEPENDENT_CODE': 'ON'}) + libyuv_vars.set_override_option('cpp_std', 'c++17') + libyuv_vars.append_compile_args('cpp', + '-Wno-sign-compare', + '-Wno-unused-variable', + '-Wno-unused-parameter') + libyuv_vars.append_link_args('-ljpeg') + libyuv = cmake.subproject('libyuv', options : libyuv_vars) + libyuv_dep = libyuv.dependency('yuv') +endif + # libcamera must be built first as a dependency to the other components. subdir('libcamera') diff --git a/src/py/cam/cam_qt.py b/src/py/cam/cam_qt.py index c1723b44..22d8c4da 100644 --- a/src/py/cam/cam_qt.py +++ b/src/py/cam/cam_qt.py @@ -2,7 +2,7 @@ # Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com> from helpers import mfb_to_rgb -from PyQt5 import QtCore, QtGui, QtWidgets +from PyQt6 import QtCore, QtGui, QtWidgets import libcamera as libcam import libcamera.utils import sys @@ -63,10 +63,10 @@ class QtRenderer: self.buf_mmap_map = buf_mmap_map def run(self): - camnotif = QtCore.QSocketNotifier(self.cm.event_fd, QtCore.QSocketNotifier.Read) + camnotif = QtCore.QSocketNotifier(self.cm.event_fd, QtCore.QSocketNotifier.Type.Read) camnotif.activated.connect(lambda _: self.readcam()) - keynotif = QtCore.QSocketNotifier(sys.stdin.fileno(), QtCore.QSocketNotifier.Read) + keynotif = QtCore.QSocketNotifier(sys.stdin.fileno(), QtCore.QSocketNotifier.Type.Read) keynotif.activated.connect(lambda _: self.readkey()) print('Capturing...') diff --git a/src/py/cam/cam_qtgl.py b/src/py/cam/cam_qtgl.py index 6cfbd347..35b4b06b 100644 --- a/src/py/cam/cam_qtgl.py +++ b/src/py/cam/cam_qtgl.py @@ -1,8 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-or-later # Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com> -from PyQt5 import QtCore, QtWidgets -from PyQt5.QtCore import Qt +from PyQt6 import QtCore, QtWidgets +from PyQt6.QtCore import Qt import math import os @@ -142,10 +142,10 @@ class QtRenderer: self.window = window def run(self): - camnotif = QtCore.QSocketNotifier(self.state.cm.event_fd, QtCore.QSocketNotifier.Read) + camnotif = QtCore.QSocketNotifier(self.state.cm.event_fd, QtCore.QSocketNotifier.Type.Read) camnotif.activated.connect(lambda _: self.readcam()) - keynotif = QtCore.QSocketNotifier(sys.stdin.fileno(), QtCore.QSocketNotifier.Read) + keynotif = QtCore.QSocketNotifier(sys.stdin.fileno(), QtCore.QSocketNotifier.Type.Read) keynotif.activated.connect(lambda _: self.readkey()) print('Capturing...') @@ -175,8 +175,8 @@ class MainWindow(QtWidgets.QWidget): def __init__(self, state): super().__init__() - self.setAttribute(Qt.WA_PaintOnScreen) - self.setAttribute(Qt.WA_NativeWindow) + self.setAttribute(Qt.WidgetAttribute.WA_PaintOnScreen) + self.setAttribute(Qt.WidgetAttribute.WA_NativeWindow) self.state = state diff --git a/src/py/libcamera/gen-py-controls.py b/src/py/libcamera/gen-py-controls.py index 8efbf95b..d43a7c1c 100755 --- a/src/py/libcamera/gen-py-controls.py +++ b/src/py/libcamera/gen-py-controls.py @@ -4,10 +4,12 @@ # Generate Python bindings controls from YAML import argparse -import string +import jinja2 import sys import yaml +from controls import Control + def find_common_prefix(strings): prefix = strings[0] @@ -21,70 +23,39 @@ def find_common_prefix(strings): return prefix -def generate_py(controls, mode): - out = '' - - vendors_class_def = [] - vendor_defs = [] - vendors = [] - for vendor, ctrl_list in controls.items(): - for ctrls in ctrl_list: - name, ctrl = ctrls.popitem() - - if vendor not in vendors and vendor != 'libcamera': - vendor_mode_str = f'{vendor.capitalize()}{mode.capitalize()}' - vendors_class_def.append('class Py{}\n{{\n}};\n'.format(vendor_mode_str)) - vendor_defs.append('\tauto {} = py::class_<Py{}>(controls, \"{}\");'.format(vendor, vendor_mode_str, vendor)) - vendors.append(vendor) - - if vendor != 'libcamera': - ns = 'libcamera::{}::{}::'.format(mode, vendor) - container = vendor - else: - ns = 'libcamera::{}::'.format(mode) - container = 'controls' - - out += f'\t{container}.def_readonly_static("{name}", static_cast<const libcamera::ControlId *>(&{ns}{name}));\n\n' - - enum = ctrl.get('enum') - if not enum: - continue - - cpp_enum = name + 'Enum' - - out += '\tpy::enum_<{}{}>({}, \"{}\")\n'.format(ns, cpp_enum, container, cpp_enum) - - if mode == 'controls': - # Adjustments for controls - if name == 'LensShadingMapMode': - prefix = 'LensShadingMapMode' - else: - prefix = find_common_prefix([e['name'] for e in enum]) - else: - # Adjustments for properties - prefix = find_common_prefix([e['name'] for e in enum]) - - for entry in enum: - cpp_enum = entry['name'] - py_enum = entry['name'][len(prefix):] - - out += '\t\t.value(\"{}\", {}{})\n'.format(py_enum, ns, cpp_enum) - - out += '\t;\n\n' - - return {'controls': out, - 'vendors_class_def': '\n'.join(vendors_class_def), - 'vendors_defs': '\n'.join(vendor_defs)} +def extend_control(ctrl, mode): + if ctrl.vendor != 'libcamera': + ctrl.klass = ctrl.vendor + ctrl.namespace = f'{ctrl.vendor}::' + else: + ctrl.klass = mode + ctrl.namespace = '' + + if not ctrl.is_enum: + return ctrl + + if mode == 'controls': + # Adjustments for controls + if ctrl.name == 'LensShadingMapMode': + prefix = 'LensShadingMapMode' + else: + prefix = find_common_prefix([e.name for e in ctrl.enum_values]) + else: + # Adjustments for properties + prefix = find_common_prefix([e.name for e in ctrl.enum_values]) + for enum in ctrl.enum_values: + enum.py_name = enum.name[len(prefix):] -def fill_template(template, data): - template = open(template, 'rb').read() - template = template.decode('utf-8') - template = string.Template(template) - return template.substitute(data) + return ctrl def main(argv): + headers = { + 'controls': 'control_ids.h', + 'properties': 'property_ids.h', + } + # Parse command line arguments parser = argparse.ArgumentParser() parser.add_argument('--mode', '-m', type=str, required=True, @@ -97,26 +68,41 @@ def main(argv): help='Input file name.') args = parser.parse_args(argv[1:]) - if args.mode not in ['controls', 'properties']: + if not headers.get(args.mode): print(f'Invalid mode option "{args.mode}"', file=sys.stderr) return -1 - controls = {} + controls = [] + vendors = [] + for input in args.input: - data = open(input, 'rb').read() - vendor = yaml.safe_load(data)['vendor'] - controls[vendor] = yaml.safe_load(data)['controls'] + data = yaml.safe_load(open(input, 'rb').read()) + + vendor = data['vendor'] + if vendor != 'libcamera': + vendors.append(vendor) + + for ctrl in data['controls']: + ctrl = Control(*ctrl.popitem(), vendor, args.mode) + controls.append(extend_control(ctrl, args.mode)) - data = generate_py(controls, args.mode) + data = { + 'mode': args.mode, + 'header': headers[args.mode], + 'vendors': vendors, + 'controls': controls, + } - data = fill_template(args.template, data) + env = jinja2.Environment() + template = env.from_string(open(args.template, 'r', encoding='utf-8').read()) + string = template.render(data) if args.output: - output = open(args.output, 'wb') - output.write(data.encode('utf-8')) + output = open(args.output, 'w', encoding='utf-8') + output.write(string) output.close() else: - sys.stdout.write(data) + sys.stdout.write(string) return 0 diff --git a/src/py/libcamera/meson.build b/src/py/libcamera/meson.build index 4807ca7d..596a203c 100644 --- a/src/py/libcamera/meson.build +++ b/src/py/libcamera/meson.build @@ -26,37 +26,24 @@ pycamera_sources = files([ 'py_transform.cpp', ]) -# Generate controls +# Generate controls and properties -gen_py_controls_input_files = [] gen_py_controls_template = files('py_controls_generated.cpp.in') - gen_py_controls = files('gen-py-controls.py') -foreach file : controls_files - gen_py_controls_input_files += files('../../libcamera/' + file) -endforeach - pycamera_sources += custom_target('py_gen_controls', - input : gen_py_controls_input_files, + input : controls_files, output : ['py_controls_generated.cpp'], command : [gen_py_controls, '--mode', 'controls', '-o', '@OUTPUT@', - '-t', gen_py_controls_template, '@INPUT@']) - -# Generate properties - -gen_py_property_enums_input_files = [] -gen_py_properties_template = files('py_properties_generated.cpp.in') - -foreach file : properties_files - gen_py_property_enums_input_files += files('../../libcamera/' + file) -endforeach + '-t', gen_py_controls_template, '@INPUT@'], + env : py_build_env) pycamera_sources += custom_target('py_gen_properties', - input : gen_py_property_enums_input_files, + input : properties_files, output : ['py_properties_generated.cpp'], command : [gen_py_controls, '--mode', 'properties', '-o', '@OUTPUT@', - '-t', gen_py_properties_template, '@INPUT@']) + '-t', gen_py_controls_template, '@INPUT@'], + env : py_build_env) # Generate formats diff --git a/src/py/libcamera/py_color_space.cpp b/src/py/libcamera/py_color_space.cpp index 5201121a..fd5a5dab 100644 --- a/src/py/libcamera/py_color_space.cpp +++ b/src/py/libcamera/py_color_space.cpp @@ -12,6 +12,8 @@ #include <pybind11/pybind11.h> #include <pybind11/stl.h> +#include "py_main.h" + namespace py = pybind11; using namespace libcamera; diff --git a/src/py/libcamera/py_controls_generated.cpp.in b/src/py/libcamera/py_controls_generated.cpp.in index 8d282ce5..22a132d1 100644 --- a/src/py/libcamera/py_controls_generated.cpp.in +++ b/src/py/libcamera/py_controls_generated.cpp.in @@ -2,27 +2,46 @@ /* * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com> * - * Python bindings - Auto-generated controls + * Python bindings - Auto-generated {{mode}} * * This file is auto-generated. Do not edit. */ -#include <libcamera/control_ids.h> +#include <libcamera/{{header}}> #include <pybind11/pybind11.h> +#include "py_main.h" + namespace py = pybind11; -class PyControls +class Py{{mode|capitalize}} { }; -${vendors_class_def} +{% for vendor in vendors -%} +class Py{{vendor|capitalize}}{{mode|capitalize}} +{ +}; -void init_py_controls_generated(py::module& m) +{% endfor -%} + +void init_py_{{mode}}_generated(py::module& m) { - auto controls = py::class_<PyControls>(m, "controls"); -${vendors_defs} + auto {{mode}} = py::class_<Py{{mode|capitalize}}>(m, "{{mode}}"); +{%- for vendor in vendors %} + auto {{vendor}} = py::class_<Py{{vendor|capitalize}}{{mode|capitalize}}>({{mode}}, "{{vendor}}"); +{%- endfor %} + +{% for ctrl in controls %} + {{ctrl.klass}}.def_readonly_static("{{ctrl.name}}", static_cast<const libcamera::ControlId *>(&libcamera::{{mode}}::{{ctrl.namespace}}{{ctrl.name}})); +{%- if ctrl.is_enum %} -${controls} + py::enum_<libcamera::{{mode}}::{{ctrl.namespace}}{{ctrl.name}}Enum>({{ctrl.klass}}, "{{ctrl.name}}Enum") +{%- for enum in ctrl.enum_values %} + .value("{{enum.py_name}}", libcamera::{{mode}}::{{ctrl.namespace}}{{enum.name}}) +{%- endfor %} + ; +{%- endif %} +{% endfor -%} } diff --git a/src/py/libcamera/py_enums.cpp b/src/py/libcamera/py_enums.cpp index e25689c6..9e75ec1a 100644 --- a/src/py/libcamera/py_enums.cpp +++ b/src/py/libcamera/py_enums.cpp @@ -9,6 +9,8 @@ #include <pybind11/pybind11.h> +#include "py_main.h" + namespace py = pybind11; using namespace libcamera; @@ -30,7 +32,8 @@ void init_py_enums(py::module &m) .value("Float", ControlType::ControlTypeFloat) .value("String", ControlType::ControlTypeString) .value("Rectangle", ControlType::ControlTypeRectangle) - .value("Size", ControlType::ControlTypeSize); + .value("Size", ControlType::ControlTypeSize) + .value("Point", ControlType::ControlTypePoint); py::enum_<Orientation>(m, "Orientation") .value("Rotate0", Orientation::Rotate0) diff --git a/src/py/libcamera/py_formats_generated.cpp.in b/src/py/libcamera/py_formats_generated.cpp.in index a3f7f94d..c5fb9063 100644 --- a/src/py/libcamera/py_formats_generated.cpp.in +++ b/src/py/libcamera/py_formats_generated.cpp.in @@ -11,6 +11,8 @@ #include <pybind11/pybind11.h> +#include "py_main.h" + namespace py = pybind11; class PyFormats diff --git a/src/py/libcamera/py_geometry.cpp b/src/py/libcamera/py_geometry.cpp index 5c2aeac4..c7e30360 100644 --- a/src/py/libcamera/py_geometry.cpp +++ b/src/py/libcamera/py_geometry.cpp @@ -14,6 +14,8 @@ #include <pybind11/pybind11.h> #include <pybind11/stl.h> +#include "py_main.h" + namespace py = pybind11; using namespace libcamera; diff --git a/src/py/libcamera/py_helpers.cpp b/src/py/libcamera/py_helpers.cpp index 79891ab6..1ad1d4c1 100644 --- a/src/py/libcamera/py_helpers.cpp +++ b/src/py/libcamera/py_helpers.cpp @@ -34,6 +34,8 @@ static py::object valueOrTuple(const ControlValue &cv) py::object controlValueToPy(const ControlValue &cv) { switch (cv.type()) { + case ControlTypeNone: + return py::none(); case ControlTypeBool: return valueOrTuple<bool>(cv); case ControlTypeByte: @@ -46,14 +48,14 @@ py::object controlValueToPy(const ControlValue &cv) return valueOrTuple<float>(cv); case ControlTypeString: return py::cast(cv.get<std::string>()); - case ControlTypeRectangle: - return valueOrTuple<Rectangle>(cv); case ControlTypeSize: { const Size *v = reinterpret_cast<const Size *>(cv.data().data()); return py::cast(v); } - case ControlTypeNone: - return py::none(); + case ControlTypeRectangle: + return valueOrTuple<Rectangle>(cv); + case ControlTypePoint: + return valueOrTuple<Point>(cv); default: throw std::runtime_error("Unsupported ControlValue type"); } @@ -73,6 +75,8 @@ static ControlValue controlValueMaybeArray(const py::object &ob) ControlValue pyToControlValue(const py::object &ob, ControlType type) { switch (type) { + case ControlTypeNone: + return ControlValue(); case ControlTypeBool: return ControlValue(ob.cast<bool>()); case ControlTypeByte: @@ -89,8 +93,8 @@ ControlValue pyToControlValue(const py::object &ob, ControlType type) return controlValueMaybeArray<Rectangle>(ob); case ControlTypeSize: return ControlValue(ob.cast<Size>()); - case ControlTypeNone: - return ControlValue(); + case ControlTypePoint: + return controlValueMaybeArray<Point>(ob); default: throw std::runtime_error("Control type not implemented"); } diff --git a/src/py/libcamera/py_main.cpp b/src/py/libcamera/py_main.cpp index bce08218..441a70ab 100644 --- a/src/py/libcamera/py_main.cpp +++ b/src/py/libcamera/py_main.cpp @@ -7,6 +7,7 @@ #include "py_main.h" +#include <limits> #include <memory> #include <stdexcept> #include <string> @@ -85,14 +86,6 @@ PYBIND11_DECLARE_HOLDER_TYPE(T, PyCameraSmartPtr<T>) */ static std::weak_ptr<PyCameraManager> gCameraManager; -void init_py_color_space(py::module &m); -void init_py_controls_generated(py::module &m); -void init_py_enums(py::module &m); -void init_py_formats_generated(py::module &m); -void init_py_geometry(py::module &m); -void init_py_properties_generated(py::module &m); -void init_py_transform(py::module &m); - PYBIND11_MODULE(_libcamera, m) { init_py_enums(m); @@ -407,12 +400,26 @@ PYBIND11_MODULE(_libcamera, m) pyControlId .def_property_readonly("id", &ControlId::id) .def_property_readonly("name", &ControlId::name) + .def_property_readonly("vendor", &ControlId::vendor) .def_property_readonly("type", &ControlId::type) + .def_property_readonly("isArray", &ControlId::isArray) + .def_property_readonly("size", &ControlId::size) .def("__str__", [](const ControlId &self) { return self.name(); }) .def("__repr__", [](const ControlId &self) { - return py::str("libcamera.ControlId({}, {}, {})") - .format(self.id(), self.name(), self.type()); - }); + std::string sizeStr = ""; + if (self.isArray()) { + sizeStr = "["; + size_t size = self.size(); + if (size == std::numeric_limits<size_t>::max()) + sizeStr += "n"; + else + sizeStr += std::to_string(size); + sizeStr += "]"; + } + return py::str("libcamera.ControlId({}, {}.{}{}, {})") + .format(self.id(), self.vendor(), self.name(), sizeStr, self.type()); + }) + .def("enumerators", &ControlId::enumerators); pyControlInfo .def_property_readonly("min", [](const ControlInfo &self) { diff --git a/src/py/libcamera/py_main.h b/src/py/libcamera/py_main.h index 5bb5f2d1..4d594326 100644 --- a/src/py/libcamera/py_main.h +++ b/src/py/libcamera/py_main.h @@ -7,8 +7,18 @@ #include <libcamera/base/log.h> +#include <pybind11/pybind11.h> + namespace libcamera { LOG_DECLARE_CATEGORY(Python) } + +void init_py_color_space(pybind11::module &m); +void init_py_controls_generated(pybind11::module &m); +void init_py_enums(pybind11::module &m); +void init_py_formats_generated(pybind11::module &m); +void init_py_geometry(pybind11::module &m); +void init_py_properties_generated(pybind11::module &m); +void init_py_transform(pybind11::module &m); diff --git a/src/py/libcamera/py_properties_generated.cpp.in b/src/py/libcamera/py_properties_generated.cpp.in deleted file mode 100644 index e3802b81..00000000 --- a/src/py/libcamera/py_properties_generated.cpp.in +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: LGPL-2.1-or-later */ -/* - * Copyright (C) 2022, Tomi Valkeinen <tomi.valkeinen@ideasonboard.com> - * - * Python bindings - Auto-generated properties - * - * This file is auto-generated. Do not edit. - */ - -#include <libcamera/property_ids.h> - -#include <pybind11/pybind11.h> - -namespace py = pybind11; - -class PyProperties -{ -}; - -${vendors_class_def} - -void init_py_properties_generated(py::module& m) -{ - auto controls = py::class_<PyProperties>(m, "properties"); -${vendors_defs} - -${controls} -} diff --git a/src/py/libcamera/py_transform.cpp b/src/py/libcamera/py_transform.cpp index f3a0bfaf..768260ff 100644 --- a/src/py/libcamera/py_transform.cpp +++ b/src/py/libcamera/py_transform.cpp @@ -12,6 +12,8 @@ #include <pybind11/pybind11.h> #include <pybind11/stl.h> +#include "py_main.h" + namespace py = pybind11; using namespace libcamera; diff --git a/src/v4l2/meson.build b/src/v4l2/meson.build index 58f53bf3..2c040414 100644 --- a/src/v4l2/meson.build +++ b/src/v4l2/meson.build @@ -1,12 +1,11 @@ # SPDX-License-Identifier: CC0-1.0 -if not get_option('v4l2') - v4l2_enabled = false +v4l2_enabled = get_option('v4l2').allowed() + +if not v4l2_enabled subdir_done() endif -v4l2_enabled = true - v4l2_compat_sources = files([ 'v4l2_camera.cpp', 'v4l2_camera_file.cpp', diff --git a/src/v4l2/v4l2_camera.cpp b/src/v4l2/v4l2_camera.cpp index 0f3b862f..94d138cd 100644 --- a/src/v4l2/v4l2_camera.cpp +++ b/src/v4l2/v4l2_camera.cpp @@ -12,13 +12,15 @@ #include <libcamera/base/log.h> +#include <libcamera/control_ids.h> + using namespace libcamera; LOG_DECLARE_CATEGORY(V4L2Compat) V4L2Camera::V4L2Camera(std::shared_ptr<Camera> camera) - : camera_(camera), isRunning_(false), bufferAllocator_(nullptr), - efd_(-1), bufferAvailableCount_(0) + : camera_(camera), controls_(controls::controls), isRunning_(false), + bufferAllocator_(nullptr), efd_(-1), bufferAvailableCount_(0) { camera_->requestCompleted.connect(this, &V4L2Camera::requestComplete); } @@ -202,10 +204,12 @@ int V4L2Camera::streamOn() if (isRunning_) return 0; - int ret = camera_->start(); + int ret = camera_->start(&controls_); if (ret < 0) return ret == -EACCES ? -EBUSY : ret; + controls_.clear(); + isRunning_ = true; for (Request *req : pendingRequests_) { @@ -265,6 +269,8 @@ int V4L2Camera::qbuf(unsigned int index) return 0; } + request->controls().merge(std::move(controls_)); + ret = camera_->queueRequest(request); if (ret < 0) { LOG(V4L2Compat, Error) << "Can't queue request"; diff --git a/src/v4l2/v4l2_camera.h b/src/v4l2/v4l2_camera.h index 278cc33e..9bd161b9 100644 --- a/src/v4l2/v4l2_camera.h +++ b/src/v4l2/v4l2_camera.h @@ -8,13 +8,15 @@ #pragma once #include <deque> -#include <utility> +#include <memory> +#include <vector> #include <libcamera/base/mutex.h> #include <libcamera/base/semaphore.h> #include <libcamera/base/shared_fd.h> #include <libcamera/camera.h> +#include <libcamera/controls.h> #include <libcamera/framebuffer.h> #include <libcamera/framebuffer_allocator.h> @@ -49,6 +51,9 @@ public: const libcamera::Size &size, libcamera::StreamConfiguration *streamConfigOut); + libcamera::ControlList &controls() { return controls_; } + const libcamera::ControlInfoMap &controlInfo() { return camera_->controls(); } + int allocBuffers(unsigned int count); void freeBuffers(); int getBufferFd(unsigned int index); @@ -70,6 +75,8 @@ private: std::shared_ptr<libcamera::Camera> camera_; std::unique_ptr<libcamera::CameraConfiguration> config_; + libcamera::ControlList controls_; + bool isRunning_; libcamera::Mutex bufferLock_; diff --git a/src/v4l2/v4l2_camera_proxy.cpp b/src/v4l2/v4l2_camera_proxy.cpp index 3f7c00a2..559ffc61 100644 --- a/src/v4l2/v4l2_camera_proxy.cpp +++ b/src/v4l2/v4l2_camera_proxy.cpp @@ -8,7 +8,6 @@ #include "v4l2_camera_proxy.h" #include <algorithm> -#include <array> #include <errno.h> #include <numeric> #include <set> @@ -23,9 +22,11 @@ #include <libcamera/base/utils.h> #include <libcamera/camera.h> +#include <libcamera/control_ids.h> +#include <libcamera/controls.h> #include <libcamera/formats.h> -#include "libcamera/internal/formats.h" +#include "libcamera/internal/v4l2_pixelformat.h" #include "v4l2_camera.h" #include "v4l2_camera_file.h" @@ -34,6 +35,7 @@ #define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) using namespace libcamera; +using namespace std::literals::chrono_literals; LOG_DECLARE_CATEGORY(V4L2Compat) @@ -193,6 +195,29 @@ void V4L2CameraProxy::setFmtFromConfig(const StreamConfiguration &streamConfig) v4l2PixFormat_.xfer_func = V4L2_XFER_FUNC_DEFAULT; sizeimage_ = streamConfig.frameSize; + + const ControlInfoMap &controls = vcam_->controlInfo(); + const auto &it = controls.find(&controls::FrameDurationLimits); + + if (it != controls.end()) { + const int64_t duration = it->second.def().get<int64_t>(); + + v4l2TimePerFrame_.numerator = duration; + v4l2TimePerFrame_.denominator = 1000000; + } else { + /* + * Default to 30fps if the camera doesn't expose the + * FrameDurationLimits control. + * + * \todo Remove this once all pipeline handlers implement the + * control + */ + LOG(V4L2Compat, Warning) + << "Camera does not support FrameDurationLimits"; + + v4l2TimePerFrame_.numerator = 333333; + v4l2TimePerFrame_.denominator = 1000000; + } } void V4L2CameraProxy::querycap(std::shared_ptr<Camera> camera) @@ -756,6 +781,55 @@ int V4L2CameraProxy::vidioc_streamoff(V4L2CameraFile *file, int *arg) return ret; } +int V4L2CameraProxy::vidioc_g_parm(V4L2CameraFile *file, struct v4l2_streamparm *arg) +{ + LOG(V4L2Compat, Debug) + << "[" << file->description() << "] " << __func__ << "()"; + + if (!validateBufferType(arg->type)) + return -EINVAL; + + memset(&arg->parm, 0, sizeof(arg->parm)); + + arg->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; + arg->parm.capture.timeperframe = v4l2TimePerFrame_; + + return 0; +} + +int V4L2CameraProxy::vidioc_s_parm(V4L2CameraFile *file, struct v4l2_streamparm *arg) +{ + LOG(V4L2Compat, Debug) + << "[" << file->description() << "] " << __func__ << "()"; + + if (!validateBufferType(arg->type)) + return -EINVAL; + + /* + * Store the frame duration if it is valid, otherwise keep the current + * value. + * + * \todo The provided value should be adjusted based on the camera + * capabilities. + */ + if (arg->parm.capture.timeperframe.numerator && + arg->parm.capture.timeperframe.denominator) + v4l2TimePerFrame_ = arg->parm.capture.timeperframe; + + memset(&arg->parm, 0, sizeof(arg->parm)); + + arg->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; + arg->parm.capture.timeperframe = v4l2TimePerFrame_; + + /* Apply the frame duration. */ + utils::Duration frameDuration = 1.0s * v4l2TimePerFrame_.numerator + / v4l2TimePerFrame_.denominator; + int64_t uDuration = frameDuration.get<std::micro>(); + vcam_->controls().set(controls::FrameDurationLimits, { uDuration, uDuration }); + + return 0; +} + const std::set<unsigned long> V4L2CameraProxy::supportedIoctls_ = { VIDIOC_QUERYCAP, VIDIOC_ENUM_FRAMESIZES, @@ -776,6 +850,8 @@ const std::set<unsigned long> V4L2CameraProxy::supportedIoctls_ = { VIDIOC_EXPBUF, VIDIOC_STREAMON, VIDIOC_STREAMOFF, + VIDIOC_G_PARM, + VIDIOC_S_PARM, }; int V4L2CameraProxy::ioctl(V4L2CameraFile *file, unsigned long longRequest, void *arg) @@ -863,6 +939,12 @@ int V4L2CameraProxy::ioctl(V4L2CameraFile *file, unsigned long longRequest, void case VIDIOC_STREAMOFF: ret = vidioc_streamoff(file, static_cast<int *>(arg)); break; + case VIDIOC_G_PARM: + ret = vidioc_g_parm(file, static_cast<struct v4l2_streamparm *>(arg)); + break; + case VIDIOC_S_PARM: + ret = vidioc_s_parm(file, static_cast<struct v4l2_streamparm *>(arg)); + break; default: ret = -ENOTTY; break; diff --git a/src/v4l2/v4l2_camera_proxy.h b/src/v4l2/v4l2_camera_proxy.h index 3d8784df..5aa352c3 100644 --- a/src/v4l2/v4l2_camera_proxy.h +++ b/src/v4l2/v4l2_camera_proxy.h @@ -67,6 +67,8 @@ private: int vidioc_expbuf(V4L2CameraFile *file, struct v4l2_exportbuffer *arg); int vidioc_streamon(V4L2CameraFile *file, int *arg); int vidioc_streamoff(V4L2CameraFile *file, int *arg); + int vidioc_g_parm(V4L2CameraFile *file, struct v4l2_streamparm *arg); + int vidioc_s_parm(V4L2CameraFile *file, struct v4l2_streamparm *arg); bool hasOwnership(V4L2CameraFile *file); int acquire(V4L2CameraFile *file); @@ -84,6 +86,7 @@ private: struct v4l2_capability capabilities_; struct v4l2_pix_format v4l2PixFormat_; + struct v4l2_fract v4l2TimePerFrame_; std::vector<struct v4l2_buffer> buffers_; std::map<void *, unsigned int> mmaps_; diff --git a/src/v4l2/v4l2_compat.cpp b/src/v4l2/v4l2_compat.cpp index 8a44403e..ff833f57 100644 --- a/src/v4l2/v4l2_compat.cpp +++ b/src/v4l2/v4l2_compat.cpp @@ -7,12 +7,14 @@ #include "v4l2_compat_manager.h" -#include <errno.h> +#include <assert.h> #include <fcntl.h> #include <stdarg.h> +#include <sys/ioctl.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/types.h> +#include <unistd.h> #include <libcamera/base/utils.h> @@ -28,71 +30,97 @@ using namespace libcamera; va_end(ap); \ } +namespace { + +/* + * Determine if the flags require a further mode arguments that needs to be + * parsed from va_args. + */ +bool needs_mode(int flags) +{ + return (flags & O_CREAT) || ((flags & O_TMPFILE) == O_TMPFILE); +} + +} /* namespace */ + extern "C" { LIBCAMERA_PUBLIC int open(const char *path, int oflag, ...) { mode_t mode = 0; - if (oflag & O_CREAT || oflag & O_TMPFILE) + if (needs_mode(oflag)) extract_va_arg(mode_t, mode, oflag); return V4L2CompatManager::instance()->openat(AT_FDCWD, path, oflag, mode); } -/* _FORTIFY_SOURCE redirects open to __open_2 */ -LIBCAMERA_PUBLIC int __open_2(const char *path, int oflag) -{ - return open(path, oflag); -} - #ifndef open64 LIBCAMERA_PUBLIC int open64(const char *path, int oflag, ...) { mode_t mode = 0; - if (oflag & O_CREAT || oflag & O_TMPFILE) + if (needs_mode(oflag)) extract_va_arg(mode_t, mode, oflag); return V4L2CompatManager::instance()->openat(AT_FDCWD, path, oflag | O_LARGEFILE, mode); } - -LIBCAMERA_PUBLIC int __open64_2(const char *path, int oflag) -{ - return open64(path, oflag); -} #endif LIBCAMERA_PUBLIC int openat(int dirfd, const char *path, int oflag, ...) { mode_t mode = 0; - if (oflag & O_CREAT || oflag & O_TMPFILE) + if (needs_mode(oflag)) extract_va_arg(mode_t, mode, oflag); return V4L2CompatManager::instance()->openat(dirfd, path, oflag, mode); } -LIBCAMERA_PUBLIC int __openat_2(int dirfd, const char *path, int oflag) -{ - return openat(dirfd, path, oflag); -} - #ifndef openat64 LIBCAMERA_PUBLIC int openat64(int dirfd, const char *path, int oflag, ...) { mode_t mode = 0; - if (oflag & O_CREAT || oflag & O_TMPFILE) + if (needs_mode(oflag)) extract_va_arg(mode_t, mode, oflag); return V4L2CompatManager::instance()->openat(dirfd, path, oflag | O_LARGEFILE, mode); } +#endif + +/* + * _FORTIFY_SOURCE redirects open* to __open*_2. Disable the + * -Wmissing-declarations warnings, as the functions won't be declared if + * _FORTIFY_SOURCE is not in use. + */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmissing-declarations" + +LIBCAMERA_PUBLIC int __open_2(const char *path, int oflag) +{ + assert(!needs_mode(oflag)); + return open(path, oflag); +} + +LIBCAMERA_PUBLIC int __open64_2(const char *path, int oflag) +{ + assert(!needs_mode(oflag)); + return open64(path, oflag); +} + +LIBCAMERA_PUBLIC int __openat_2(int dirfd, const char *path, int oflag) +{ + assert(!needs_mode(oflag)); + return openat(dirfd, path, oflag); +} LIBCAMERA_PUBLIC int __openat64_2(int dirfd, const char *path, int oflag) { + assert(!needs_mode(oflag)); return openat64(dirfd, path, oflag); } -#endif + +#pragma GCC diagnostic pop LIBCAMERA_PUBLIC int dup(int oldfd) { @@ -125,7 +153,11 @@ LIBCAMERA_PUBLIC int munmap(void *addr, size_t length) return V4L2CompatManager::instance()->munmap(addr, length); } +#if HAVE_POSIX_IOCTL +LIBCAMERA_PUBLIC int ioctl(int fd, int request, ...) +#else LIBCAMERA_PUBLIC int ioctl(int fd, unsigned long request, ...) +#endif { void *arg; extract_va_arg(void *, arg, request); diff --git a/src/v4l2/v4l2_compat_manager.cpp b/src/v4l2/v4l2_compat_manager.cpp index 6a00afb5..f53fb300 100644 --- a/src/v4l2/v4l2_compat_manager.cpp +++ b/src/v4l2/v4l2_compat_manager.cpp @@ -10,7 +10,6 @@ #include <dlfcn.h> #include <fcntl.h> #include <map> -#include <stdarg.h> #include <string.h> #include <sys/eventfd.h> #include <sys/mman.h> |