/* SPDX-License-Identifier: LGPL-2.1-or-later */ /* * Copyright (C) 2019, Google Inc. * * v4l2_camera_proxy.cpp - Proxy to V4L2 compatibility camera */ #include "v4l2_camera_proxy.h" #include #include #include #include #include #include #include #include #include #include #include #include "libcamera/internal/formats.h" #include "libcamera/internal/log.h" #include "libcamera/internal/utils.h" #include "v4l2_camera.h" #include "v4l2_camera_file.h" #include "v4l2_compat_manager.h" #define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) using namespace libcamera; LOG_DECLARE_CATEGORY(V4L2Compat) V4L2CameraProxy::V4L2CameraProxy(unsigned int index, std::shared_ptr camera) : refcount_(0), index_(index), bufferCount_(0), currentBuf_(0), vcam_(std::make_unique(camera)), owner_(nullptr) { querycap(camera); } int V4L2CameraProxy::open(V4L2CameraFile *file) { LOG(V4L2Compat, Debug) << "Servicing open fd = " << file->efd(); MutexLocker locker(proxyMutex_); if (refcount_++) { files_.insert(file); return 0; } /* * We open the camera here, once, and keep it open until the last * V4L2CameraFile is closed. The proxy is initially not owned by any * file. The first file that calls reqbufs with count > 0 or s_fmt * will become the owner, and no other file will be allowed to call * buffer-related ioctls (except querybuf), set the format, or start or * stop the stream until ownership is released with a call to reqbufs * with count = 0. */ int ret = vcam_->open(&streamConfig_); if (ret < 0) { refcount_--; return ret; } setFmtFromConfig(streamConfig_); files_.insert(file); return 0; } void V4L2CameraProxy::close(V4L2CameraFile *file) { LOG(V4L2Compat, Debug) << "Servicing close fd = " << file->efd(); MutexLocker locker(proxyMutex_); files_.erase(file); release(file); if (--refcount_ > 0) return; vcam_->close(); } void *V4L2CameraProxy::mmap(void *addr, size_t length, int prot, int flags, off64_t offset) { LOG(V4L2Compat, Debug) << "Servicing mmap"; MutexLocker locker(proxyMutex_); /* \todo Validate prot and flags properly. */ if (prot != (PROT_READ | PROT_WRITE)) { errno = EINVAL; return MAP_FAILED; } unsigned int index = offset / sizeimage_; if (static_cast(index * sizeimage_) != offset || length != sizeimage_) { errno = EINVAL; return MAP_FAILED; } FileDescriptor fd = vcam_->getBufferFd(index); if (!fd.isValid()) { errno = EINVAL; return MAP_FAILED; } void *map = V4L2CompatManager::instance()->fops().mmap(addr, length, prot, flags, fd.fd(), 0); if (map == MAP_FAILED) return map; buffers_[index].flags |= V4L2_BUF_FLAG_MAPPED; mmaps_[map] = index; return map; } int V4L2CameraProxy::munmap(void *addr, size_t length) { LOG(V4L2Compat, Debug) << "Servicing munmap"; MutexLocker locker(proxyMutex_); auto iter = mmaps_.find(addr); if (iter == mmaps_.end() || length != sizeimage_) { errno = EINVAL; return -1; } if (V4L2CompatManager::instance()->fops().munmap(addr, length)) LOG(V4L2Compat, Error) << "Failed to unmap " << addr << " with length " << length; buffers_[iter->second].flags &= ~V4L2_BUF_FLAG_MAPPED; mmaps_.erase(iter); return 0; } bool V4L2CameraProxy::validateBufferType(uint32_t type) { return type == V4L2_BUF_TYPE_VIDEO_CAPTURE; } bool V4L2CameraProxy::validateMemoryType(uint32_t memory) { return memory == V4L2_MEMORY_MMAP; } void V4L2CameraProxy::setFmtFromConfig(const StreamConfiguration &streamConfig) { const PixelFormatInfo &info = PixelFormatInfo::info(streamConfig.pixelFormat); const Size &size = streamConfig.size; v4l2PixFormat_.width = size.width; v4l2PixFormat_.height = size.height; v4l2PixFormat_.pixelformat = info.v4l2Format; v4l2PixFormat_.field = V4L2_FIELD_NONE; v4l2PixFormat_.bytesperline = streamConfig.stride; v4l2PixFormat_.sizeimage = streamConfig.frameSize; v4l2PixFormat_.colorspace = V4L2_COLORSPACE_SRGB; v4l2PixFormat_.priv = V4L2_PIX_FMT_PRIV_MAGIC; v4l2PixFormat_.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; v4l2PixFormat_.quantization = V4L2_QUANTIZATION_DEFAULT; v4l2PixFormat_.xfer_func = V4L2_XFER_FUNC_DEFAULT; sizeimage_ = streamConfig.frameSize; } void V4L2CameraProxy::querycap(std::shared_ptr camera) { std::string driver = "libcamera"; std::string bus_info = driver + ":" + std::to_string(index_); utils::strlcpy(reinterpret_cast(capabilities_.driver), driver.c_str(), sizeof(capabilities_.driver)); utils::strlcpy(reinterpret_cast(capabilities_.card), camera->id().c_str(), sizeof(capabilities_.card)); utils::strlcpy(reinterpret_cast(capabilities_.bus_info), bus_info.c_str(), sizeof(capabilities_.bus_info)); /* \todo Put this in a header/config somewhere. */ capabilities_.version = KERNEL_VERSION(5, 2, 0); capabilities_.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_EXT_PIX_FORMAT; capabilities_.capabilities = capabilities_.device_caps | V4L2_CAP_DEVICE_CAPS; memset(capabilities_.reserved, 0, sizeof(capabilities_.reserved)); } void V4L2CameraProxy::updateBuffers() { std::vector completedBuffers = vcam_->completedBuffers(); for (const V4L2Camera::Buffer &buffer : completedBuffers) { const FrameMetadata &fmd = buffer.data_; struct v4l2_buffer &buf = buffers_[buffer.index_]; switch (fmd.status) { case FrameMetadata::FrameSuccess: buf.bytesused = fmd.planes[0].bytesused; buf.field = V4L2_FIELD_NONE; buf.timestamp.tv_sec = fmd.timestamp / 1000000000; buf.timestamp.tv_usec = fmd.timestamp % 1000000; buf.sequence = fmd.sequence; buf.flags |= V4L2_BUF_FLAG_DONE; break; case FrameMetadata::FrameError: buf.flags |= V4L2_BUF_FLAG_ERROR; break; default: break; } } } int V4L2CameraProxy::vidioc_querycap(struct v4l2_capability *arg) { LOG(V4L2Compat, Debug) << "Servicing vidioc_querycap"; *arg = capabilities_; return 0; } int V4L2CameraProxy::vidioc_enum_framesizes(V4L2CameraFile *file, struct v4l2_frmsizeenum *arg) { LOG(V4L2Compat, Debug) << "Servicing vidioc_enum_framesizes fd = " << file->efd(); V4L2PixelFormat v4l2Format = V4L2PixelFormat(arg->pixel_format); PixelFormat format = PixelFormatInfo::info(v4l2Format).format; /* * \todo This might need to be expanded as few pipeline handlers * report StreamFormats. */ const std::vector &frameSizes = streamConfig_.formats().sizes(format); if (arg->index >= frameSizes.size()) return -EINVAL; arg->type = V4L2_FRMSIZE_TYPE_DISCRETE; arg->discrete.width = frameSizes[arg->index].width; arg->discrete.height = frameSizes[arg->index].height; memset(arg->reserved, 0, sizeof(arg->reserved)); return 0; } int V4L2CameraProxy::vidioc_enum_fmt(V4L2CameraFile *file, struct v4l2_fmtdesc *arg) { LOG(V4L2Compat, Debug) << "Servicing vidioc_enum_fmt fd = " << file->efd(); if (!validateBufferType(arg->type) || arg->index >= streamConfig_.formats().pixelformats().size()) return -EINVAL; PixelFormat format = streamConfig_.formats().pixelformats()[arg->index]; /* \todo Set V4L2_FMT_FLAG_COMPRESSED for compressed formats. */ arg->flags = 0; /* \todo Add map from format to description. */ utils::strlcpy(reinterpret_cast(arg->description), "Video Format Description", sizeof(arg->description)); arg->pixelformat = PixelFormatInfo::info(format).v4l2Format; memset(arg->reserved, 0, sizeof(arg->reserved)); return 0; } int V4L2CameraProxy::vidioc_g_fmt(V4L2CameraFile *file, struct v4l2_format *arg) { LOG(V4L2Compat, Debug) << "Servicing vidioc_g_fmt fd = " << file->efd(); if (!validateBufferType(arg->type)) return -EINVAL; memset(&arg->fmt, 0, sizeof(arg->fmt)); arg->fmt.pix = v4l2PixFormat_; return 0; } int V4L2CameraProxy::tryFormat(struct v4l2_format *arg) { V4L2PixelFormat v4l2Format = V4L2PixelFormat(arg->fmt.pix.pixelformat); PixelFormat format = PixelFormatInfo::info(v4l2Format).format; Size size(arg->fmt.pix.width, arg->fmt.pix.height); StreamConfiguration config; int ret = vcam_->validateConfiguration(format, size, &config); if (ret < 0) { LOG(V4L2Compat, Error) << "Failed to negotiate a valid format: " << format.toString(); return -EINVAL; } const PixelFormatInfo &info = PixelFormatInfo::info(config.pixelFormat); arg->fmt.pix.width = config.size.width; arg->fmt.pix.height = config.size.height; arg->fmt.pix.pixelformat = info.v4l2Format; arg->fmt.pix.field = V4L2_FIELD_NONE; arg->fmt.pix.bytesperline = config.stride; arg->fmt.pix.sizeimage = config.frameSize; arg->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; arg->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; arg->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; arg->fmt.pix.quantization = V4L2_QUANTIZATION_DEFAULT; arg->fmt.pix.xfer_func = V4L2_XFER_FUNC_DEFAULT; return 0; } int V4L2CameraProxy::vidioc_s_fmt(V4L2CameraFile *file, struct v4l2_format *arg) { LOG(V4L2Compat, Debug) << "Servicing vidioc_s_fmt fd = " << file->efd(); if (!validateBufferType(arg->type)) return -EINVAL; if (file->priority() < maxPriority()) return -EBUSY; int ret = acquire(file); if (ret < 0) return ret; ret = tryFormat(arg); if (ret < 0) return ret; Size size(arg->fmt.pix.width, arg->fmt.pix.height); V4L2PixelFormat v4l2Format = V4L2PixelFormat(arg->fmt.pix.pixelformat); ret = vcam_->configure(&streamConfig_, size, PixelFormatInfo::info(v4l2Format).format, bufferCount_); if (ret < 0) return -EINVAL; setFmtFromConfig(streamConfig_); return 0; } int V4L2CameraProxy::vidioc_try_fmt(V4L2CameraFile *file, struct v4l2_format *arg) { LOG(V4L2Compat, Debug) << "Servicing vidioc_try_fmt fd = " << file->efd(); if (!validateBufferType(arg->type)) return -EINVAL; int ret = tryFormat(arg); if (ret < 0) return ret; return 0; } enum v4l2_priority V4L2CameraProxy::maxPriority() { auto max = std::max_element(files_.begin(), files_.end(), [](const V4L2CameraFile *a, const V4L2CameraFile *b) { return a->priority() < b->priority(); }); return max != files_.end() ? (*max)->priority() : V4L2_PRIORITY_UNSET; } int V4L2CameraProxy::vidioc_g_priority(V4L2CameraFile *file, enum v4l2_priority *arg) { LOG(V4L2Compat, Debug) << "Servicing vidioc_g_priority fd = " << file->efd(); *arg = maxPriority(); return 0; } int V4L2CameraProxy::vidioc_s_priority(V4L2CameraFile *file, enum v4l2_priority *arg) { LOG(V4L2Compat, Debug) << "Servicing vidioc_s_priority fd = " << file->efd(); if (*arg > V4L2_PRIORITY_RECORD) return -EINVAL; if (file->priority() < maxPriority()) return -EBUSY; file->setPriority(*arg); return 0; } int V4L2CameraProxy::vidioc_enuminput(V4L2CameraFile *file, struct v4l2_input *arg) { LOG(V4L2Compat, Debug) << "Servicing vidioc_enuminput fd = " << file->efd(); if (arg->index != 0) return -EINVAL; memset(arg, 0, sizeof(*arg)); utils::strlcpy(reinterpret_cast(arg->name), reinterpret_cast(capabilities_.card), sizeof(arg->name)); arg->type = V4L2_INPUT_TYPE_CAMERA; return 0; } int V4L2CameraProxy::vidioc_g_input(V4L2CameraFile *file, int *arg) { LOG(V4L2Compat, Debug) << "Servicing vidioc_g_input fd = " << file->efd(); *arg = 0; return 0; } int V4L2CameraProxy::vidioc_s_input(V4L2CameraFile *file, int *arg) { LOG(V4L2Compat, Debug) << "Servicing vidioc_s_input fd = " << file->efd(); if (*arg != 0) return -EINVAL; return 0; } void V4L2CameraProxy::freeBuffers() { LOG(V4L2Compat, Debug) << "Freeing libcamera bufs"; vcam_->freeBuffers(); buffers_.clear(); bufferCount_ = 0; } int V4L2CameraProxy::vidioc_reqbufs(V4L2CameraFile *file, struct v4l2_requestbuffers *arg) { LOG(V4L2Compat, Debug) << "Servicing vidioc_reqbufs fd = " << file->efd(); if (!validateBufferType(arg->type) || !validateMemoryType(arg->memory)) return -EINVAL; LOG(V4L2Compat, Debug) << arg->count << " buffers requested "; if (file->priority() < maxPriority()) return -EBUSY; if (!hasOwnership(file) && owner_) return -EBUSY; arg->capabilities = V4L2_BUF_CAP_SUPPORTS_MMAP; memset(arg->reserved, 0, sizeof(arg->reserved)); if (arg->count == 0) { /* \todo Add buffer orphaning support */ if (!mmaps_.empty()) return -EBUSY; if (vcam_->isRunning()) return -EBUSY; freeBuffers(); release(file); return 0; } if (bufferCount_ > 0) freeBuffers(); Size size(v4l2PixFormat_.width, v4l2PixFormat_.height); V4L2PixelFormat v4l2Format = V4L2PixelFormat(v4l2PixFormat_.pixelformat); int ret = vcam_->configure(&streamConfig_, size, PixelFormatInfo::info(v4l2Format).format, arg->count); if (ret < 0) return -EINVAL; setFmtFromConfig(streamConfig_); arg->count = streamConfig_.bufferCount; bufferCount_ = arg->count; ret = vcam_->allocBuffers(arg->count); if (ret < 0) { arg->count = 0; return ret; } buffers_.resize(arg->count); for (unsigned int i = 0; i < arg->count; i++) { struct v4l2_buffer buf = {}; buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.length = v4l2PixFormat_.sizeimage; buf.memory = V4L2_MEMORY_MMAP; buf.m.offset = i * v4l2PixFormat_.sizeimage; buf.index = i; buf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; buffers_[i] = buf; } LOG(V4L2Compat, Debug) << "Allocated " << arg->count << " buffers"; acquire(file); return 0; } int V4L2CameraProxy::vidioc_querybuf(V4L2CameraFile *file, struct v4l2_buffer *arg) { LOG(V4L2Compat, Debug) << "Servicing vidioc_querybuf fd = " << file->efd(); if (arg->index >= bufferCount_) return -EINVAL; if (!validateBufferType(arg->type) || arg->index >= bufferCount_) return -EINVAL; updateBuffers(); *arg = buffers_[arg->index]; return 0; } int V4L2CameraProxy::vidioc_qbuf(V4L2CameraFile *file, struct v4l2_buffer *arg) { LOG(V4L2Compat, Debug) << "Servicing vidioc_qbuf, index = " << arg->index << " fd = " << file->efd(); if (arg->index >= bufferCount_) return -EINVAL; if (buffers_[arg->index].flags & V4L2_BUF_FLAG_QUEUED) return -EINVAL; if (!hasOwnership(file)) return -EBUSY; if (!validateBufferType(arg->type) || !validateMemoryType(arg->memory) || arg->index >= bufferCount_) return -EINVAL; int ret = vcam_->qbuf(arg->index); if (ret < 0) return ret; buffers_[arg->index].flags |= V4L2_BUF_FLAG_QUEUED; arg->flags = buffers_[arg->index].flags; return ret; } int V4L2CameraProxy::vidioc_dqbuf(V4L2CameraFile *file, struct v4l2_buffer *arg, MutexLocker *locker) { LOG(V4L2Compat, Debug) << "Servicing vidioc_dqbuf fd = " << file->efd(); if (arg->index >= bufferCount_) return -EINVAL; if (!hasOwnership(file)) return -EBUSY; if (!vcam_->isRunning()) return -EINVAL; if (!validateBufferType(arg->type) || !validateMemoryType(arg->memory)) return -EINVAL; if (!file->nonBlocking()) { locker->unlock(); vcam_->waitForBufferAvailable(); locker->lock(); } else if (!vcam_->isBufferAvailable()) return -EAGAIN; /* * We need to check here again in case stream was turned off while we * were blocked on waitForBufferAvailable(). */ if (!vcam_->isRunning()) return -EINVAL; updateBuffers(); struct v4l2_buffer &buf = buffers_[currentBuf_]; buf.flags &= ~(V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE); buf.length = sizeimage_; *arg = buf; currentBuf_ = (currentBuf_ + 1) % bufferCount_; uint64_t data; int ret = ::read(file->efd(), &data, sizeof(data)); if (ret != sizeof(data)) LOG(V4L2Compat, Error) << "Failed to clear eventfd POLLIN"; return 0; } int V4L2CameraProxy::vidioc_streamon(V4L2CameraFile *file, int *arg) { LOG(V4L2Compat, Debug) << "Servicing vidioc_streamon fd = " << file->efd(); if (bufferCount_ == 0) return -EINVAL; if (!validateBufferType(*arg)) return -EINVAL; if (file->priority() < maxPriority()) return -EBUSY; if (!hasOwnership(file)) return -EBUSY; if (vcam_->isRunning()) return 0; currentBuf_ = 0; return vcam_->streamOn(); } int V4L2CameraProxy::vidioc_streamoff(V4L2CameraFile *file, int *arg) { LOG(V4L2Compat, Debug) << "Servicing vidioc_streamoff fd = " << file->efd(); if (!validateBufferType(*arg)) return -EINVAL; if (file->priority() < maxPriority()) return -EBUSY; if (!hasOwnership(file) && owner_) return -EBUSY; int ret = vcam_->streamOff(); for (struct v4l2_buffer &buf : buffers_) buf.flags &= ~(V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE); return ret; } const std::set V4L2CameraProxy::supportedIoctls_ = { VIDIOC_QUERYCAP, VIDIOC_ENUM_FRAMESIZES, VIDIOC_ENUM_FMT, VIDIOC_G_FMT, VIDIOC_S_FMT, VIDIOC_TRY_FMT, VIDIOC_G_PRIORITY, VIDIOC_S_PRIORITY, VIDIOC_ENUMINPUT, VIDIOC_G_INPUT, VIDIOC_S_INPUT, VIDIOC_REQBUFS, VIDIOC_QUERYBUF, VIDIOC_QBUF, VIDIOC_DQBUF, VIDIOC_STREAMON, VIDIOC_STREAMOFF, }; int V4L2CameraProxy::ioctl(V4L2CameraFile *file, unsigned long request, void *arg) { MutexLocker locker(proxyMutex_); if (!arg && (_IOC_DIR(request) & _IOC_WRITE)) { errno = EFAULT; return -1; } if (supportedIoctls_.find(request) == supportedIoctls_.end()) { errno = ENOTTY; return -1; } if (!arg && (_IOC_DIR(request) & _IOC_READ)) { errno = EFAULT; return -1; } int ret; switch (request) { case VIDIOC_QUERYCAP: ret = vidioc_querycap(static_cast(arg)); break; case VIDIOC_ENUM_FRAMESIZES: ret = vidioc_enum_framesizes(file, static_cast(arg)); break; case VIDIOC_ENUM_FMT: ret = vidioc_enum_fmt(file, static_cast(arg)); break; case VIDIOC_G_FMT: ret = vidioc_g_fmt(file, static_cast(arg)); break; case VIDIOC_S_FMT: ret = vidioc_s_fmt(file, static_cast(arg)); break; case VIDIOC_TRY_FMT: ret = vidioc_try_fmt(file, static_cast(arg)); break; case VIDIOC_G_PRIORITY: ret = vidioc_g_priority(file, static_cast(arg)); break; case VIDIOC_S_PRIORITY: ret = vidioc_s_priority(file, static_cast(arg)); break; case VIDIOC_ENUMINPUT: ret = vidioc_enuminput(file, static_cast(arg)); break; case VIDIOC_G_INPUT: ret = vidioc_g_input(file, static_cast(arg)); break; case VIDIOC_S_INPUT: ret = vidioc_s_input(file, static_cast(arg)); break; case VIDIOC_REQBUFS: ret = vidioc_reqbufs(file, static_cast(arg)); break; case VIDIOC_QUERYBUF: ret = vidioc_querybuf(file, static_cast(arg)); break; case VIDIOC_QBUF: ret = vidioc_qbuf(file, static_cast(arg)); break; case VIDIOC_DQBUF: ret = vidioc_dqbuf(file, static_cast(arg), &locker); break; case VIDIOC_STREAMON: ret = vidioc_streamon(file, static_cast(arg)); break; case VIDIOC_STREAMOFF: ret = vidioc_streamoff(file, static_cast(arg)); break; default: ret = -ENOTTY; break; } if (ret < 0) { errno = -ret; return -1; } return ret; } bool V4L2CameraProxy::hasOwnership(V4L2CameraFile *file) { return owner_ == file; } /** * \brief Acquire exclusive ownership of the V4L2Camera * * \return Zero on success or if already acquired, and negative error on * failure. * * This is sufficient for poll()ing for buffers. Events, however, are signaled * on the file level, so all fds must be signaled. poll()ing from a different * fd than the one that locks the device is a corner case, and is currently not * supported. */ int V4L2CameraProxy::acquire(V4L2CameraFile *file) { if (owner_ == file) return 0; if (owner_) return -EBUSY; vcam_->bind(file->efd()); owner_ = file; return 0; } void V4L2CameraProxy::release(V4L2CameraFile *file) { if (owner_ != file) return; vcam_->unbind(); owner_ = nullptr; } lass="hl com"> */ /* * Static variables from ChromiumOS Intel Camera HAL and ia_imaging library: * - https://chromium.googlesource.com/chromiumos/platform/arc-camera/+/master/hal/intel/psl/ipu3/statsConverter/ipu3-stats.h * - https://chromium.googlesource.com/chromiumos/platform/camera/+/refs/heads/main/hal/intel/ipu3/include/ia_imaging/af_public.h */ /** The minimum horizontal grid dimension. */ static constexpr uint8_t kAfMinGridWidth = 16; /** The minimum vertical grid dimension. */ static constexpr uint8_t kAfMinGridHeight = 16; /** The maximum horizontal grid dimension. */ static constexpr uint8_t kAfMaxGridWidth = 32; /** The maximum vertical grid dimension. */ static constexpr uint8_t kAfMaxGridHeight = 24; /** The minimum value of Log2 of the width of the grid cell. */ static constexpr uint16_t kAfMinGridBlockWidth = 4; /** The minimum value of Log2 of the height of the grid cell. */ static constexpr uint16_t kAfMinGridBlockHeight = 3; /** The maximum value of Log2 of the width of the grid cell. */ static constexpr uint16_t kAfMaxGridBlockWidth = 6; /** The maximum value of Log2 of the height of the grid cell. */ static constexpr uint16_t kAfMaxGridBlockHeight = 6; /** The number of blocks in vertical axis per slice. */ static constexpr uint16_t kAfDefaultHeightPerSlice = 2; namespace libcamera { using namespace std::literals::chrono_literals; namespace ipa::ipu3::algorithms { LOG_DEFINE_CATEGORY(IPU3Af) /** * Maximum focus steps of the VCM control * \todo should be obtained from the VCM driver */ static constexpr uint32_t kMaxFocusSteps = 1023; /* Minimum focus step for searching appropriate focus */ static constexpr uint32_t kCoarseSearchStep = 30; static constexpr uint32_t kFineSearchStep = 1; /* Max ratio of variance change, 0.0 < kMaxChange < 1.0 */ static constexpr double kMaxChange = 0.5; /* The numbers of frame to be ignored, before performing focus scan. */ static constexpr uint32_t kIgnoreFrame = 10; /* Fine scan range 0 < kFineRange < 1 */ static constexpr double kFineRange = 0.05; /* Settings for IPU3 AF filter */ static struct ipu3_uapi_af_filter_config afFilterConfigDefault = { .y1_coeff_0 = { 0, 1, 3, 7 }, .y1_coeff_1 = { 11, 13, 1, 2 }, .y1_coeff_2 = { 8, 19, 34, 242 }, .y1_sign_vec = 0x7fdffbfe, .y2_coeff_0 = { 0, 1, 6, 6 }, .y2_coeff_1 = { 13, 25, 3, 0 }, .y2_coeff_2 = { 25, 3, 177, 254 }, .y2_sign_vec = 0x4e53ca72, .y_calc = { 8, 8, 8, 8 }, .nf = { 0, 9, 0, 9, 0 }, }; /** * \class Af * \brief An auto-focus algorithm based on IPU3 statistics * * This algorithm is used to determine the position of the lens to make a * focused image. The IPU3 AF processing block computes the statistics that * are composed by two types of filtered value and stores in a AF buffer. * Typically, for a clear image, it has a relatively higher contrast than a * blurred one. Therefore, if an image with the highest contrast can be * found through the scan, the position of the len indicates to a clearest * image. */ Af::Af() : focus_(0), bestFocus_(0), currentVariance_(0.0), previousVariance_(0.0), coarseCompleted_(false), fineCompleted_(false) { } /** * \brief Configure the Af given a configInfo * \param[in] context The shared IPA context * \param[in] configInfo The IPA configuration data * \return 0 on success, a negative error code otherwise */ int Af::configure(IPAContext &context, const IPAConfigInfo &configInfo) { struct ipu3_uapi_grid_config &grid = context.configuration.af.afGrid; grid.width = kAfMinGridWidth; grid.height = kAfMinGridHeight; grid.block_width_log2 = kAfMinGridBlockWidth; grid.block_height_log2 = kAfMinGridBlockHeight; /* * \todo - while this clamping code is effectively a no-op, it satisfies * the compiler that the constant definitions of the hardware limits * are used, and paves the way to support dynamic grid sizing in the * future. While the block_{width,height}_log2 remain assigned to the * minimum, this code should be optimized out by the compiler. */ grid.width = std::clamp(grid.width, kAfMinGridWidth, kAfMaxGridWidth); grid.height = std::clamp(grid.height, kAfMinGridHeight, kAfMaxGridHeight); grid.block_width_log2 = std::clamp(grid.block_width_log2, kAfMinGridBlockWidth, kAfMaxGridBlockWidth); grid.block_height_log2 = std::clamp(grid.block_height_log2, kAfMinGridBlockHeight, kAfMaxGridBlockHeight); grid.height_per_slice = kAfDefaultHeightPerSlice; /* Position the AF grid in the center of the BDS output. */ Rectangle bds(configInfo.bdsOutputSize); Size gridSize(grid.width << grid.block_width_log2, grid.height << grid.block_height_log2); /* * \todo - Support request metadata * - Set the ROI based on any input controls in the request * - Return the AF ROI as metadata in the Request */ Rectangle roi = gridSize.centeredTo(bds.center()); Point start = roi.topLeft(); /* x_start and y_start should be even */ grid.x_start = utils::alignDown(start.x, 2); grid.y_start = utils::alignDown(start.y, 2); grid.y_start |= IPU3_UAPI_GRID_Y_START_EN; /* Initial max focus step */ maxStep_ = kMaxFocusSteps; /* Initial frame ignore counter */ afIgnoreFrameReset(); /* Initial focus value */ context.activeState.af.focus = 0; /* Maximum variance of the AF statistics */ context.activeState.af.maxVariance = 0; /* The stable AF value flag. if it is true, the AF should be in a stable state. */ context.activeState.af.stable = false; return 0; } /** * \copydoc libcamera::ipa::Algorithm::prepare */ void Af::prepare(IPAContext &context, [[maybe_unused]] const uint32_t frame, [[maybe_unused]] IPAFrameContext &frameContext, ipu3_uapi_params *params) { const struct ipu3_uapi_grid_config &grid = context.configuration.af.afGrid; params->acc_param.af.grid_cfg = grid; params->acc_param.af.filter_config = afFilterConfigDefault; /* Enable AF processing block */ params->use.acc_af = 1; } /** * \brief AF coarse scan * \param[in] context The shared IPA context * * Find a near focused image using a coarse step. The step is determined by * kCoarseSearchStep. */ void Af::afCoarseScan(IPAContext &context) { if (coarseCompleted_) return; if (afNeedIgnoreFrame()) return; if (afScan(context, kCoarseSearchStep)) { coarseCompleted_ = true; context.activeState.af.maxVariance = 0; focus_ = context.activeState.af.focus - (context.activeState.af.focus * kFineRange); context.activeState.af.focus = focus_; previousVariance_ = 0; maxStep_ = std::clamp(focus_ + static_cast<uint32_t>((focus_ * kFineRange)), 0U, kMaxFocusSteps); } } /** * \brief AF fine scan * \param[in] context The shared IPA context * * Find an optimum lens position with moving 1 step for each search. */ void Af::afFineScan(IPAContext &context) { if (!coarseCompleted_) return; if (afNeedIgnoreFrame()) return; if (afScan(context, kFineSearchStep)) { context.activeState.af.stable = true; fineCompleted_ = true; } } /** * \brief AF reset * \param[in] context The shared IPA context * * Reset all the parameters to start over the AF process. */ void Af::afReset(IPAContext &context) { if (afNeedIgnoreFrame()) return; context.activeState.af.maxVariance = 0; context.activeState.af.focus = 0; focus_ = 0; context.activeState.af.stable = false; ignoreCounter_ = kIgnoreFrame; previousVariance_ = 0.0; coarseCompleted_ = false; fineCompleted_ = false; maxStep_ = kMaxFocusSteps; } /** * \brief AF variance comparison * \param[in] context The IPA context * \param[in] min_step The VCM movement step * * We always pick the largest variance to replace the previous one. The image * with a larger variance also indicates it is a clearer image than previous * one. If we find a negative derivative, we return immediately. * * \return True, if it finds a AF value. */ bool Af::afScan(IPAContext &context, int min_step) { if (focus_ > maxStep_) { /* If reach the max step, move lens to the position. */ context.activeState.af.focus = bestFocus_; return true; } else { /* * Find the maximum of the variance by estimating its * derivative. If the direction changes, it means we have * passed a maximum one step before. */ if ((currentVariance_ - context.activeState.af.maxVariance) >= -(context.activeState.af.maxVariance * 0.1)) { /* * Positive and zero derivative: * The variance is still increasing. The focus could be * increased for the next comparison. Also, the max variance