summaryrefslogtreecommitdiff
path: root/src/android
diff options
context:
space:
mode:
Diffstat (limited to 'src/android')
-rw-r--r--src/android/camera3_hal.cpp33
-rw-r--r--src/android/camera_buffer.h84
-rw-r--r--src/android/camera_capabilities.cpp1644
-rw-r--r--src/android/camera_capabilities.h86
-rw-r--r--src/android/camera_device.cpp2079
-rw-r--r--src/android/camera_device.h109
-rw-r--r--src/android/camera_hal_config.cpp208
-rw-r--r--src/android/camera_hal_config.h38
-rw-r--r--src/android/camera_hal_manager.cpp264
-rw-r--r--src/android/camera_hal_manager.h47
-rw-r--r--src/android/camera_metadata.cpp189
-rw-r--r--src/android/camera_metadata.h98
-rw-r--r--src/android/camera_ops.cpp13
-rw-r--r--src/android/camera_ops.h8
-rw-r--r--src/android/camera_request.cpp194
-rw-r--r--src/android/camera_request.h84
-rw-r--r--src/android/camera_stream.cpp341
-rw-r--r--src/android/camera_stream.h187
-rw-r--r--src/android/cros/camera3_hal.cpp26
-rw-r--r--src/android/cros/meson.build13
-rw-r--r--src/android/cros_mojo_token.h12
-rw-r--r--src/android/data/nautilus/camera_hal.yaml10
-rw-r--r--src/android/data/soraka/camera_hal.yaml10
-rw-r--r--src/android/frame_buffer_allocator.h56
-rw-r--r--src/android/hal_framebuffer.cpp22
-rw-r--r--src/android/hal_framebuffer.h26
-rw-r--r--src/android/jpeg/encoder.h26
-rw-r--r--src/android/jpeg/encoder_jea.cpp56
-rw-r--r--src/android/jpeg/encoder_jea.h31
-rw-r--r--src/android/jpeg/encoder_libjpeg.cpp239
-rw-r--r--src/android/jpeg/encoder_libjpeg.h44
-rw-r--r--src/android/jpeg/exif.cpp522
-rw-r--r--src/android/jpeg/exif.h112
-rw-r--r--src/android/jpeg/meson.build14
-rw-r--r--src/android/jpeg/post_processor_jpeg.cpp223
-rw-r--r--src/android/jpeg/post_processor_jpeg.h38
-rw-r--r--src/android/jpeg/thumbnailer.cpp96
-rw-r--r--src/android/jpeg/thumbnailer.h32
-rw-r--r--src/android/meson.build42
-rw-r--r--src/android/mm/cros_camera_buffer.cpp184
-rw-r--r--src/android/mm/cros_frame_buffer_allocator.cpp88
-rw-r--r--src/android/mm/generic_camera_buffer.cpp199
-rw-r--r--src/android/mm/generic_frame_buffer_allocator.cpp150
-rw-r--r--src/android/mm/libhardware_stub.c17
-rw-r--r--src/android/mm/meson.build19
-rw-r--r--src/android/post_processor.h33
-rw-r--r--src/android/yuv/post_processor_yuv.cpp146
-rw-r--r--src/android/yuv/post_processor_yuv.h35
48 files changed, 7432 insertions, 795 deletions
diff --git a/src/android/camera3_hal.cpp b/src/android/camera3_hal.cpp
index d6fc1ecc..a5ad2374 100644
--- a/src/android/camera3_hal.cpp
+++ b/src/android/camera3_hal.cpp
@@ -1,48 +1,52 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
- * camera3_hal.cpp - Android Camera HALv3 module
+ * Android Camera HALv3 module
*/
#include <hardware/camera_common.h>
+#include <libcamera/base/log.h>
+
#include "camera_device.h"
#include "camera_hal_manager.h"
-#include "log.h"
using namespace libcamera;
LOG_DEFINE_CATEGORY(HAL)
-static CameraHalManager cameraManager;
-
/*------------------------------------------------------------------------------
* Android Camera HAL callbacks
*/
-static int hal_get_number_of_cameras(void)
+static int hal_get_number_of_cameras()
{
- return cameraManager.numCameras();
+ return CameraHalManager::instance()->numCameras();
}
static int hal_get_camera_info(int id, struct camera_info *info)
{
- return cameraManager.getCameraInfo(id, info);
+ return CameraHalManager::instance()->getCameraInfo(id, info);
}
static int hal_set_callbacks(const camera_module_callbacks_t *callbacks)
{
+ CameraHalManager::instance()->setCallbacks(callbacks);
+
return 0;
}
-static int hal_open_legacy(const struct hw_module_t *module, const char *id,
- uint32_t halVersion, struct hw_device_t **device)
+static int hal_open_legacy([[maybe_unused]] const struct hw_module_t *module,
+ [[maybe_unused]] const char *id,
+ [[maybe_unused]] uint32_t halVersion,
+ [[maybe_unused]] struct hw_device_t **device)
{
return -ENOSYS;
}
-static int hal_set_torch_mode(const char *camera_id, bool enabled)
+static int hal_set_torch_mode([[maybe_unused]] const char *camera_id,
+ [[maybe_unused]] bool enabled)
{
return -ENOSYS;
}
@@ -56,7 +60,7 @@ static int hal_init()
{
LOG(HAL, Info) << "Initialising Android camera HAL";
- cameraManager.init();
+ CameraHalManager::instance()->init();
return 0;
}
@@ -71,11 +75,12 @@ static int hal_dev_open(const hw_module_t *module, const char *name,
LOG(HAL, Debug) << "Open camera " << name;
int id = atoi(name);
- CameraDevice *camera = cameraManager.open(id, module);
+
+ auto [camera, ret] = CameraHalManager::instance()->open(id, module);
if (!camera) {
LOG(HAL, Error)
<< "Failed to open camera module '" << id << "'";
- return -ENODEV;
+ return ret == -EBUSY ? -EUSERS : ret;
}
*device = &camera->camera3Device()->common;
diff --git a/src/android/camera_buffer.h b/src/android/camera_buffer.h
new file mode 100644
index 00000000..96669962
--- /dev/null
+++ b/src/android/camera_buffer.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Frame buffer handling interface definition
+ */
+
+#pragma once
+
+#include <hardware/camera3.h>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/span.h>
+#include <libcamera/geometry.h>
+#include <libcamera/pixel_format.h>
+
+class CameraBuffer final : public libcamera::Extensible
+{
+ LIBCAMERA_DECLARE_PRIVATE()
+
+public:
+ CameraBuffer(buffer_handle_t camera3Buffer,
+ libcamera::PixelFormat pixelFormat,
+ const libcamera::Size &size, int flags);
+ ~CameraBuffer();
+
+ bool isValid() const;
+
+ unsigned int numPlanes() const;
+
+ libcamera::Span<const uint8_t> plane(unsigned int plane) const;
+ libcamera::Span<uint8_t> plane(unsigned int plane);
+
+ unsigned int stride(unsigned int plane) const;
+ unsigned int offset(unsigned int plane) const;
+ unsigned int size(unsigned int plane) const;
+
+ size_t jpegBufferSize(size_t maxJpegBufferSize) const;
+};
+
+#define PUBLIC_CAMERA_BUFFER_IMPLEMENTATION \
+CameraBuffer::CameraBuffer(buffer_handle_t camera3Buffer, \
+ libcamera::PixelFormat pixelFormat, \
+ const libcamera::Size &size, int flags) \
+ : Extensible(std::make_unique<Private>(this, camera3Buffer, \
+ pixelFormat, size, \
+ flags)) \
+{ \
+} \
+CameraBuffer::~CameraBuffer() \
+{ \
+} \
+bool CameraBuffer::isValid() const \
+{ \
+ return _d()->isValid(); \
+} \
+unsigned int CameraBuffer::numPlanes() const \
+{ \
+ return _d()->numPlanes(); \
+} \
+Span<const uint8_t> CameraBuffer::plane(unsigned int plane) const \
+{ \
+ return const_cast<Private *>(_d())->plane(plane); \
+} \
+Span<uint8_t> CameraBuffer::plane(unsigned int plane) \
+{ \
+ return _d()->plane(plane); \
+} \
+unsigned int CameraBuffer::stride(unsigned int plane) const \
+{ \
+ return _d()->stride(plane); \
+} \
+unsigned int CameraBuffer::offset(unsigned int plane) const \
+{ \
+ return _d()->offset(plane); \
+} \
+unsigned int CameraBuffer::size(unsigned int plane) const \
+{ \
+ return _d()->size(plane); \
+} \
+size_t CameraBuffer::jpegBufferSize(size_t maxJpegBufferSize) const \
+{ \
+ return _d()->jpegBufferSize(maxJpegBufferSize); \
+}
diff --git a/src/android/camera_capabilities.cpp b/src/android/camera_capabilities.cpp
new file mode 100644
index 00000000..b161bc6b
--- /dev/null
+++ b/src/android/camera_capabilities.cpp
@@ -0,0 +1,1644 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Camera static properties manager
+ */
+
+#include "camera_capabilities.h"
+
+#include <algorithm>
+#include <array>
+#include <cmath>
+#include <map>
+#include <stdint.h>
+#include <type_traits>
+
+#include <hardware/camera3.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/property_ids.h>
+
+#include "libcamera/internal/formats.h"
+
+using namespace libcamera;
+
+LOG_DECLARE_CATEGORY(HAL)
+
+namespace {
+
+/*
+ * \var camera3Resolutions
+ * \brief The list of image resolutions commonly supported by Android
+ *
+ * The following are defined as mandatory to be supported by the Android
+ * Camera3 specification: (320x240), (640x480), (1280x720), (1920x1080).
+ *
+ * The following 4:3 resolutions are defined as optional, but commonly
+ * supported by Android devices: (1280x960), (1600x1200).
+ */
+const std::vector<Size> camera3Resolutions = {
+ { 320, 240 },
+ { 640, 480 },
+ { 1280, 720 },
+ { 1280, 960 },
+ { 1600, 1200 },
+ { 1920, 1080 }
+};
+
+/*
+ * \struct Camera3Format
+ * \brief Data associated with an Android format identifier
+ * \var libcameraFormats List of libcamera pixel formats compatible with the
+ * Android format
+ * \var name The human-readable representation of the Android format code
+ */
+struct Camera3Format {
+ std::vector<PixelFormat> libcameraFormats;
+ bool mandatory;
+ const char *name;
+};
+
+/*
+ * \var camera3FormatsMap
+ * \brief Associate Android format code with ancillary data
+ */
+const std::map<int, const Camera3Format> camera3FormatsMap = {
+ {
+ HAL_PIXEL_FORMAT_BLOB, {
+ { formats::MJPEG },
+ true,
+ "BLOB"
+ }
+ }, {
+ HAL_PIXEL_FORMAT_YCbCr_420_888, {
+ { formats::NV12, formats::NV21 },
+ true,
+ "YCbCr_420_888"
+ }
+ }, {
+ /*
+ * \todo Translate IMPLEMENTATION_DEFINED inspecting the gralloc
+ * usage flag. For now, copy the YCbCr_420 configuration.
+ */
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, {
+ { formats::NV12, formats::NV21 },
+ true,
+ "IMPLEMENTATION_DEFINED"
+ }
+ }, {
+ HAL_PIXEL_FORMAT_RAW10, {
+ {
+ formats::SBGGR10_CSI2P,
+ formats::SGBRG10_CSI2P,
+ formats::SGRBG10_CSI2P,
+ formats::SRGGB10_CSI2P
+ },
+ false,
+ "RAW10"
+ }
+ }, {
+ HAL_PIXEL_FORMAT_RAW12, {
+ {
+ formats::SBGGR12_CSI2P,
+ formats::SGBRG12_CSI2P,
+ formats::SGRBG12_CSI2P,
+ formats::SRGGB12_CSI2P
+ },
+ false,
+ "RAW12"
+ }
+ }, {
+ HAL_PIXEL_FORMAT_RAW16, {
+ {
+ formats::SBGGR16,
+ formats::SGBRG16,
+ formats::SGRBG16,
+ formats::SRGGB16
+ },
+ false,
+ "RAW16"
+ }
+ },
+};
+
+const std::map<camera_metadata_enum_android_info_supported_hardware_level, std::string>
+hwLevelStrings = {
+ { ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED, "LIMITED" },
+ { ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_FULL, "FULL" },
+ { ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY, "LEGACY" },
+ { ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_3, "LEVEL_3" },
+ { ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL, "EXTERNAL" },
+};
+
+enum class ControlRange {
+ Min,
+ Def,
+ Max,
+};
+
+/**
+ * \brief Set Android metadata from libcamera ControlInfo or a default value
+ * \tparam T Type of the control in libcamera
+ * \tparam U Type of the metadata in Android
+ * \param[in] metadata Android metadata pack to add the control value to
+ * \param[in] tag Android metadata tag
+ * \param[in] controlsInfo libcamera ControlInfoMap from which to find the control info
+ * \param[in] control libcamera ControlId to find from \a controlsInfo
+ * \param[in] controlRange Whether to use the min, def, or max value from the control info
+ * \param[in] defaultValue The value to set in \a metadata if \a control is not found
+ *
+ * Set the Android metadata entry in \a metadata with tag \a tag based on the
+ * control info found for the libcamera control \a control in the libcamera
+ * ControlInfoMap \a controlsInfo. If no libcamera ControlInfo is found, then
+ * the Android metadata entry is set to \a defaultValue.
+ *
+ * This function is for scalar values.
+ */
+template<typename T, typename U>
+U setMetadata(CameraMetadata *metadata, uint32_t tag,
+ const ControlInfoMap &controlsInfo, const Control<T> &control,
+ enum ControlRange controlRange, const U defaultValue)
+{
+ U value = defaultValue;
+
+ const auto &info = controlsInfo.find(&control);
+ if (info != controlsInfo.end()) {
+ switch (controlRange) {
+ case ControlRange::Min:
+ value = static_cast<U>(info->second.min().template get<T>());
+ break;
+ case ControlRange::Def:
+ value = static_cast<U>(info->second.def().template get<T>());
+ break;
+ case ControlRange::Max:
+ value = static_cast<U>(info->second.max().template get<T>());
+ break;
+ }
+ }
+
+ metadata->addEntry(tag, value);
+ return value;
+}
+
+/**
+ * \brief Set Android metadata from libcamera ControlInfo or a default value
+ * \tparam T Type of the control in libcamera
+ * \tparam U Type of the metadata in Android
+ * \param[in] metadata Android metadata pack to add the control value to
+ * \param[in] tag Android metadata tag
+ * \param[in] controlsInfo libcamera ControlInfoMap from which to find the control info
+ * \param[in] control libcamera ControlId to find from \a controlsInfo
+ * \param[in] defaultVector The value to set in \a metadata if \a control is not found
+ *
+ * Set the Android metadata entry in \a metadata with tag \a tag based on the
+ * control info found for the libcamera control \a control in the libcamera
+ * ControlInfoMap \a controlsInfo. If no libcamera ControlInfo is found, then
+ * the Android metadata entry is set to \a defaultVector.
+ *
+ * This function is for vector values.
+ */
+template<typename T, typename U>
+std::vector<U> setMetadata(CameraMetadata *metadata, uint32_t tag,
+ const ControlInfoMap &controlsInfo,
+ const Control<T> &control,
+ const std::vector<U> &defaultVector)
+{
+ const auto &info = controlsInfo.find(&control);
+ if (info == controlsInfo.end()) {
+ metadata->addEntry(tag, defaultVector);
+ return defaultVector;
+ }
+
+ std::vector<U> values(info->second.values().size());
+ for (const auto &value : info->second.values())
+ values.push_back(static_cast<U>(value.template get<T>()));
+ metadata->addEntry(tag, values);
+
+ return values;
+}
+
+} /* namespace */
+
+bool CameraCapabilities::validateManualSensorCapability()
+{
+ const char *noMode = "Manual sensor capability unavailable: ";
+
+ if (!staticMetadata_->entryContains<uint8_t>(ANDROID_CONTROL_AE_AVAILABLE_MODES,
+ ANDROID_CONTROL_AE_MODE_OFF)) {
+ LOG(HAL, Info) << noMode << "missing AE mode off";
+ return false;
+ }
+
+ if (!staticMetadata_->entryContains<uint8_t>(ANDROID_CONTROL_AE_LOCK_AVAILABLE,
+ ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE)) {
+ LOG(HAL, Info) << noMode << "missing AE lock";
+ return false;
+ }
+
+ /*
+ * \todo Return true here after we satisfy all the requirements:
+ * https://developer.android.com/reference/android/hardware/camera2/CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR
+ * Manual frame duration control
+ * android.sensor.frameDuration
+ * android.sensor.info.maxFrameDuration
+ * Manual exposure control
+ * android.sensor.exposureTime
+ * android.sensor.info.exposureTimeRange
+ * Manual sensitivity control
+ * android.sensor.sensitivity
+ * android.sensor.info.sensitivityRange
+ * Manual lens control (if the lens is adjustable)
+ * android.lens.*
+ * Manual flash control (if a flash unit is present)
+ * android.flash.*
+ * Manual black level locking
+ * android.blackLevel.lock
+ * Auto exposure lock
+ * android.control.aeLock
+ */
+ return false;
+}
+
+bool CameraCapabilities::validateManualPostProcessingCapability()
+{
+ const char *noMode = "Manual post processing capability unavailable: ";
+
+ if (!staticMetadata_->entryContains<uint8_t>(ANDROID_CONTROL_AWB_AVAILABLE_MODES,
+ ANDROID_CONTROL_AWB_MODE_OFF)) {
+ LOG(HAL, Info) << noMode << "missing AWB mode off";
+ return false;
+ }
+
+ if (!staticMetadata_->entryContains<uint8_t>(ANDROID_CONTROL_AWB_LOCK_AVAILABLE,
+ ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE)) {
+ LOG(HAL, Info) << noMode << "missing AWB lock";
+ return false;
+ }
+
+ /*
+ * \todo return true here after we satisfy all the requirements:
+ * https://developer.android.com/reference/android/hardware/camera2/CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING
+ * Manual tonemap control
+ * android.tonemap.curve
+ * android.tonemap.mode
+ * android.tonemap.maxCurvePoints
+ * android.tonemap.gamma
+ * android.tonemap.presetCurve
+ * Manual white balance control
+ * android.colorCorrection.transform
+ * android.colorCorrection.gains
+ * Manual lens shading map control
+ * android.shading.mode
+ * android.statistics.lensShadingMapMode
+ * android.statistics.lensShadingMap
+ * android.lens.info.shadingMapSize
+ * Manual aberration correction control (if aberration correction is supported)
+ * android.colorCorrection.aberrationMode
+ * android.colorCorrection.availableAberrationModes
+ * Auto white balance lock
+ * android.control.awbLock
+ */
+ return false;
+}
+
+bool CameraCapabilities::validateBurstCaptureCapability()
+{
+ camera_metadata_ro_entry_t entry;
+ bool found;
+
+ const char *noMode = "Burst capture capability unavailable: ";
+
+ if (!staticMetadata_->entryContains<uint8_t>(ANDROID_CONTROL_AE_LOCK_AVAILABLE,
+ ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE)) {
+ LOG(HAL, Info) << noMode << "missing AE lock";
+ return false;
+ }
+
+ if (!staticMetadata_->entryContains<uint8_t>(ANDROID_CONTROL_AWB_LOCK_AVAILABLE,
+ ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE)) {
+ LOG(HAL, Info) << noMode << "missing AWB lock";
+ return false;
+ }
+
+ found = staticMetadata_->getEntry(ANDROID_SYNC_MAX_LATENCY, &entry);
+ if (!found || *entry.data.i32 < 0 || 4 < *entry.data.i32) {
+ LOG(HAL, Info)
+ << noMode << "max sync latency is "
+ << (found ? std::to_string(*entry.data.i32) : "not present");
+ return false;
+ }
+
+ /*
+ * \todo return true here after we satisfy all the requirements
+ * https://developer.android.com/reference/android/hardware/camera2/CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE
+ */
+ return false;
+}
+
+std::set<camera_metadata_enum_android_request_available_capabilities>
+CameraCapabilities::computeCapabilities()
+{
+ std::set<camera_metadata_enum_android_request_available_capabilities>
+ capabilities;
+
+ capabilities.insert(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE);
+
+ if (validateManualSensorCapability()) {
+ capabilities.insert(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR);
+ /* The requirements for READ_SENSOR_SETTINGS are a subset of MANUAL_SENSOR */
+ capabilities.insert(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_READ_SENSOR_SETTINGS);
+ }
+
+ if (validateManualPostProcessingCapability())
+ capabilities.insert(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING);
+
+ if (validateBurstCaptureCapability())
+ capabilities.insert(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE);
+
+ if (rawStreamAvailable_)
+ capabilities.insert(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_RAW);
+
+ return capabilities;
+}
+
+void CameraCapabilities::computeHwLevel(
+ const std::set<camera_metadata_enum_android_request_available_capabilities> &caps)
+{
+ const char *noFull = "Hardware level FULL unavailable: ";
+ camera_metadata_ro_entry_t entry;
+ bool found;
+
+ camera_metadata_enum_android_info_supported_hardware_level
+ hwLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_FULL;
+
+ if (!caps.count(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR)) {
+ LOG(HAL, Info) << noFull << "missing manual sensor";
+ hwLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED;
+ }
+
+ if (!caps.count(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING)) {
+ LOG(HAL, Info) << noFull << "missing manual post processing";
+ hwLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED;
+ }
+
+ if (!caps.count(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE)) {
+ LOG(HAL, Info) << noFull << "missing burst capture";
+ hwLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED;
+ }
+
+ found = staticMetadata_->getEntry(ANDROID_SYNC_MAX_LATENCY, &entry);
+ if (!found || *entry.data.i32 != 0) {
+ LOG(HAL, Info) << noFull << "missing or invalid max sync latency";
+ hwLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED;
+ }
+
+ hwLevel_ = hwLevel;
+}
+
+int CameraCapabilities::initialize(std::shared_ptr<Camera> camera,
+ int orientation, int facing)
+{
+ camera_ = camera;
+ orientation_ = orientation;
+ facing_ = facing;
+ rawStreamAvailable_ = false;
+ maxFrameDuration_ = 0;
+
+ /* Acquire the camera and initialize available stream configurations. */
+ int ret = camera_->acquire();
+ if (ret) {
+ LOG(HAL, Error) << "Failed to temporarily acquire the camera";
+ return ret;
+ }
+
+ ret = initializeStreamConfigurations();
+ if (ret) {
+ camera_->release();
+ return ret;
+ }
+
+ ret = initializeStaticMetadata();
+ camera_->release();
+ return ret;
+}
+
+std::vector<Size>
+CameraCapabilities::initializeYUVResolutions(const PixelFormat &pixelFormat,
+ const std::vector<Size> &resolutions)
+{
+ std::vector<Size> supportedResolutions;
+ std::unique_ptr<CameraConfiguration> cameraConfig =
+ camera_->generateConfiguration({ StreamRole::Viewfinder });
+ if (!cameraConfig) {
+ LOG(HAL, Error) << "Failed to get supported YUV resolutions";
+ return supportedResolutions;
+ }
+
+ StreamConfiguration &cfg = cameraConfig->at(0);
+
+ for (const Size &res : resolutions) {
+ cfg.pixelFormat = pixelFormat;
+ cfg.size = res;
+
+ CameraConfiguration::Status status = cameraConfig->validate();
+ if (status != CameraConfiguration::Valid) {
+ LOG(HAL, Debug) << cfg.toString() << " not supported";
+ continue;
+ }
+
+ LOG(HAL, Debug) << cfg.toString() << " supported";
+
+ supportedResolutions.push_back(res);
+ }
+
+ return supportedResolutions;
+}
+
+std::vector<Size>
+CameraCapabilities::initializeRawResolutions(const PixelFormat &pixelFormat)
+{
+ std::vector<Size> supportedResolutions;
+ std::unique_ptr<CameraConfiguration> cameraConfig =
+ camera_->generateConfiguration({ StreamRole::Raw });
+ if (!cameraConfig) {
+ LOG(HAL, Error) << "Failed to get supported Raw resolutions";
+ return supportedResolutions;
+ }
+
+ StreamConfiguration &cfg = cameraConfig->at(0);
+ const StreamFormats &formats = cfg.formats();
+ supportedResolutions = formats.sizes(pixelFormat);
+
+ return supportedResolutions;
+}
+
+/*
+ * Initialize the format conversion map to translate from Android format
+ * identifier to libcamera pixel formats and fill in the list of supported
+ * stream configurations to be reported to the Android camera framework through
+ * the camera static metadata.
+ */
+int CameraCapabilities::initializeStreamConfigurations()
+{
+ /*
+ * Get the maximum output resolutions
+ * \todo Get this from the camera properties once defined
+ */
+ std::unique_ptr<CameraConfiguration> cameraConfig =
+ camera_->generateConfiguration({ StreamRole::StillCapture });
+ if (!cameraConfig) {
+ LOG(HAL, Error) << "Failed to get maximum resolution";
+ return -EINVAL;
+ }
+ StreamConfiguration &cfg = cameraConfig->at(0);
+
+ /*
+ * \todo JPEG - Adjust the maximum available resolution by taking the
+ * JPEG encoder requirements into account (alignment and aspect ratio).
+ */
+ const Size maxRes = cfg.size;
+ LOG(HAL, Debug) << "Maximum supported resolution: " << maxRes;
+
+ /*
+ * Build the list of supported image resolutions.
+ *
+ * The resolutions listed in camera3Resolution are supported, up to the
+ * camera maximum resolution.
+ *
+ * Augment the list by adding resolutions calculated from the camera
+ * maximum one.
+ */
+ std::vector<Size> cameraResolutions;
+ std::copy_if(camera3Resolutions.begin(), camera3Resolutions.end(),
+ std::back_inserter(cameraResolutions),
+ [&](const Size &res) { return res < maxRes; });
+
+ /*
+ * The Camera3 specification suggests adding 1/2 and 1/4 of the maximum
+ * resolution.
+ */
+ for (unsigned int divider = 2;; divider <<= 1) {
+ Size derivedSize{
+ maxRes.width / divider,
+ maxRes.height / divider,
+ };
+
+ if (derivedSize.width < 320 ||
+ derivedSize.height < 240)
+ break;
+
+ cameraResolutions.push_back(derivedSize);
+ }
+ cameraResolutions.push_back(maxRes);
+
+ /* Remove duplicated entries from the list of supported resolutions. */
+ std::sort(cameraResolutions.begin(), cameraResolutions.end());
+ auto last = std::unique(cameraResolutions.begin(), cameraResolutions.end());
+ cameraResolutions.erase(last, cameraResolutions.end());
+
+ /*
+ * Build the list of supported camera formats.
+ *
+ * To each Android format a list of compatible libcamera formats is
+ * associated. The first libcamera format that tests successful is added
+ * to the format translation map used when configuring the streams.
+ * It is then tested against the list of supported camera resolutions to
+ * build the stream configuration map reported through the camera static
+ * metadata.
+ */
+ Size maxJpegSize;
+ for (const auto &format : camera3FormatsMap) {
+ int androidFormat = format.first;
+ const Camera3Format &camera3Format = format.second;
+ const std::vector<PixelFormat> &libcameraFormats =
+ camera3Format.libcameraFormats;
+
+ LOG(HAL, Debug) << "Trying to map Android format "
+ << camera3Format.name;
+
+ /*
+ * JPEG is always supported, either produced directly by the
+ * camera, or encoded in the HAL.
+ */
+ if (androidFormat == HAL_PIXEL_FORMAT_BLOB) {
+ formatsMap_[androidFormat] = formats::MJPEG;
+ LOG(HAL, Debug) << "Mapped Android format "
+ << camera3Format.name << " to "
+ << formats::MJPEG
+ << " (fixed mapping)";
+ continue;
+ }
+
+ /*
+ * Test the libcamera formats that can produce images
+ * compatible with the format defined by Android.
+ */
+ PixelFormat mappedFormat;
+ for (const PixelFormat &pixelFormat : libcameraFormats) {
+
+ LOG(HAL, Debug) << "Testing " << pixelFormat;
+
+ /*
+ * The stream configuration size can be adjusted,
+ * not the pixel format.
+ *
+ * \todo This could be simplified once all pipeline
+ * handlers will report the StreamFormats list of
+ * supported formats.
+ */
+ cfg.pixelFormat = pixelFormat;
+
+ CameraConfiguration::Status status = cameraConfig->validate();
+ if (status != CameraConfiguration::Invalid &&
+ cfg.pixelFormat == pixelFormat) {
+ mappedFormat = pixelFormat;
+ break;
+ }
+ }
+
+ if (!mappedFormat.isValid()) {
+ /* If the format is not mandatory, skip it. */
+ if (!camera3Format.mandatory)
+ continue;
+
+ LOG(HAL, Error)
+ << "Failed to map mandatory Android format "
+ << camera3Format.name << " ("
+ << utils::hex(androidFormat) << "): aborting";
+ return -EINVAL;
+ }
+
+ /*
+ * Record the mapping and then proceed to generate the
+ * stream configurations map, by testing the image resolutions.
+ */
+ formatsMap_[androidFormat] = mappedFormat;
+ LOG(HAL, Debug) << "Mapped Android format "
+ << camera3Format.name << " to "
+ << mappedFormat;
+
+ std::vector<Size> resolutions;
+ const PixelFormatInfo &info = PixelFormatInfo::info(mappedFormat);
+ switch (info.colourEncoding) {
+ case PixelFormatInfo::ColourEncodingRAW:
+ if (info.bitsPerPixel != 16)
+ continue;
+
+ rawStreamAvailable_ = true;
+ resolutions = initializeRawResolutions(mappedFormat);
+ break;
+
+ case PixelFormatInfo::ColourEncodingYUV:
+ case PixelFormatInfo::ColourEncodingRGB:
+ /*
+ * We support enumerating RGB streams here to allow
+ * mapping IMPLEMENTATION_DEFINED format to RGB.
+ */
+ resolutions = initializeYUVResolutions(mappedFormat,
+ cameraResolutions);
+ break;
+ }
+
+ for (const Size &res : resolutions) {
+ /*
+ * Configure the Camera with the collected format and
+ * resolution to get an updated list of controls.
+ *
+ * \todo Avoid the need to configure the camera when
+ * redesigning the configuration API.
+ */
+ cfg.size = res;
+ int ret = camera_->configure(cameraConfig.get());
+ if (ret)
+ return ret;
+
+ const ControlInfoMap &controls = camera_->controls();
+ const auto frameDurations = controls.find(
+ &controls::FrameDurationLimits);
+ if (frameDurations == controls.end()) {
+ LOG(HAL, Error)
+ << "Camera does not report frame durations";
+ return -EINVAL;
+ }
+
+ int64_t minFrameDuration = frameDurations->second.min().get<int64_t>() * 1000;
+ int64_t maxFrameDuration = frameDurations->second.max().get<int64_t>() * 1000;
+
+ /*
+ * Cap min frame duration to 30 FPS with 1% tolerance.
+ *
+ * 30 frames per second has been validated as the most
+ * opportune frame rate for quality tuning, and power
+ * vs performances budget on Intel IPU3-based
+ * Chromebooks.
+ *
+ * \todo This is a platform-specific decision that needs
+ * to be abstracted and delegated to the configuration
+ * file.
+ *
+ * \todo libcamera only allows to control frame duration
+ * through the per-request controls::FrameDuration
+ * control. If we cap the durations here, we should be
+ * capable of configuring the camera to operate at such
+ * duration without requiring to have the FrameDuration
+ * control to be specified for each Request. Defer this
+ * to the in-development configuration API rework.
+ */
+ int64_t minFrameDurationCap = 1e9 / 30.0;
+ if (minFrameDuration < minFrameDurationCap) {
+ float tolerance =
+ (minFrameDurationCap - minFrameDuration) * 100.0 / minFrameDurationCap;
+
+ /*
+ * If the tolerance is less than 1%, do not cap
+ * the frame duration.
+ */
+ if (tolerance > 1.0)
+ minFrameDuration = minFrameDurationCap;
+ }
+
+ /*
+ * Calculate FPS as CTS does and adjust the minimum
+ * frame duration accordingly: see
+ * Camera2SurfaceViewTestCase.java:getSuitableFpsRangeForDuration()
+ */
+ minFrameDuration =
+ 1e9 / static_cast<unsigned int>(floor(1e9 / minFrameDuration + 0.05f));
+
+ streamConfigurations_.push_back({
+ res, androidFormat, minFrameDuration, maxFrameDuration,
+ });
+
+ /*
+ * If the format is HAL_PIXEL_FORMAT_YCbCr_420_888
+ * from which JPEG is produced, add an entry for
+ * the JPEG stream.
+ *
+ * \todo Wire the JPEG encoder to query the supported
+ * sizes provided a list of formats it can encode.
+ *
+ * \todo Support JPEG streams produced by the camera
+ * natively.
+ *
+ * \todo HAL_PIXEL_FORMAT_BLOB is a 'stalling' format,
+ * its duration should take into account the time
+ * required for the YUV to JPEG encoding. For now
+ * use the same frame durations as collected for
+ * the YUV/RGB streams.
+ */
+ if (androidFormat == HAL_PIXEL_FORMAT_YCbCr_420_888) {
+ streamConfigurations_.push_back({
+ res, HAL_PIXEL_FORMAT_BLOB,
+ minFrameDuration, maxFrameDuration,
+ });
+ maxJpegSize = std::max(maxJpegSize, res);
+ }
+
+ maxFrameDuration_ = std::max(maxFrameDuration_,
+ maxFrameDuration);
+ }
+
+ /*
+ * \todo Calculate the maximum JPEG buffer size by asking the
+ * encoder giving the maximum frame size required.
+ */
+ maxJpegBufferSize_ = maxJpegSize.width * maxJpegSize.height * 1.5;
+ }
+
+ LOG(HAL, Debug) << "Collected stream configuration map: ";
+ for (const auto &entry : streamConfigurations_)
+ LOG(HAL, Debug) << "{ " << entry.resolution << " - "
+ << utils::hex(entry.androidFormat) << " }";
+
+ return 0;
+}
+
+int CameraCapabilities::initializeStaticMetadata()
+{
+ staticMetadata_ = std::make_unique<CameraMetadata>(64, 1024);
+ if (!staticMetadata_->isValid()) {
+ LOG(HAL, Error) << "Failed to allocate static metadata";
+ staticMetadata_.reset();
+ return -EINVAL;
+ }
+
+ /*
+ * Generate and apply a new configuration for the Viewfinder role to
+ * collect control limits and properties from a known state.
+ */
+ std::unique_ptr<CameraConfiguration> cameraConfig =
+ camera_->generateConfiguration({ StreamRole::Viewfinder });
+ if (!cameraConfig) {
+ LOG(HAL, Error) << "Failed to generate camera configuration";
+ staticMetadata_.reset();
+ return -ENODEV;
+ }
+
+ int ret = camera_->configure(cameraConfig.get());
+ if (ret) {
+ LOG(HAL, Error) << "Failed to initialize the camera state";
+ staticMetadata_.reset();
+ return ret;
+ }
+
+ const ControlInfoMap &controlsInfo = camera_->controls();
+ const ControlList &properties = camera_->properties();
+
+ availableCharacteristicsKeys_ = {
+ ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
+ ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
+ ANDROID_CONTROL_AE_AVAILABLE_MODES,
+ ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
+ ANDROID_CONTROL_AE_COMPENSATION_RANGE,
+ ANDROID_CONTROL_AE_COMPENSATION_STEP,
+ ANDROID_CONTROL_AE_LOCK_AVAILABLE,
+ ANDROID_CONTROL_AF_AVAILABLE_MODES,
+ ANDROID_CONTROL_AVAILABLE_EFFECTS,
+ ANDROID_CONTROL_AVAILABLE_MODES,
+ ANDROID_CONTROL_AVAILABLE_SCENE_MODES,
+ ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
+ ANDROID_CONTROL_AWB_AVAILABLE_MODES,
+ ANDROID_CONTROL_AWB_LOCK_AVAILABLE,
+ ANDROID_CONTROL_MAX_REGIONS,
+ ANDROID_CONTROL_SCENE_MODE_OVERRIDES,
+ ANDROID_FLASH_INFO_AVAILABLE,
+ ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL,
+ ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
+ ANDROID_JPEG_MAX_SIZE,
+ ANDROID_LENS_FACING,
+ ANDROID_LENS_INFO_AVAILABLE_APERTURES,
+ ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
+ ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
+ ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE,
+ ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
+ ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES,
+ ANDROID_REQUEST_AVAILABLE_CAPABILITIES,
+ ANDROID_REQUEST_MAX_NUM_INPUT_STREAMS,
+ ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS,
+ ANDROID_REQUEST_PARTIAL_RESULT_COUNT,
+ ANDROID_REQUEST_PIPELINE_MAX_DEPTH,
+ ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
+ ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
+ ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
+ ANDROID_SCALER_CROPPING_TYPE,
+ ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES,
+ ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
+ ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
+ ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE,
+ ANDROID_SENSOR_INFO_MAX_FRAME_DURATION,
+ ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
+ ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
+ ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
+ ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE,
+ ANDROID_SENSOR_ORIENTATION,
+ ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
+ ANDROID_STATISTICS_INFO_MAX_FACE_COUNT,
+ ANDROID_SYNC_MAX_LATENCY,
+ };
+
+ availableRequestKeys_ = {
+ ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE,
+ ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
+ ANDROID_CONTROL_AE_LOCK,
+ ANDROID_CONTROL_AE_MODE,
+ ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER,
+ ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+ ANDROID_CONTROL_AF_MODE,
+ ANDROID_CONTROL_AF_TRIGGER,
+ ANDROID_CONTROL_AWB_LOCK,
+ ANDROID_CONTROL_AWB_MODE,
+ ANDROID_CONTROL_CAPTURE_INTENT,
+ ANDROID_CONTROL_EFFECT_MODE,
+ ANDROID_CONTROL_MODE,
+ ANDROID_CONTROL_SCENE_MODE,
+ ANDROID_CONTROL_VIDEO_STABILIZATION_MODE,
+ ANDROID_FLASH_MODE,
+ ANDROID_JPEG_ORIENTATION,
+ ANDROID_JPEG_QUALITY,
+ ANDROID_JPEG_THUMBNAIL_QUALITY,
+ ANDROID_JPEG_THUMBNAIL_SIZE,
+ ANDROID_LENS_APERTURE,
+ ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
+ ANDROID_NOISE_REDUCTION_MODE,
+ ANDROID_SCALER_CROP_REGION,
+ ANDROID_STATISTICS_FACE_DETECT_MODE
+ };
+
+ availableResultKeys_ = {
+ ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE,
+ ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
+ ANDROID_CONTROL_AE_LOCK,
+ ANDROID_CONTROL_AE_MODE,
+ ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER,
+ ANDROID_CONTROL_AE_STATE,
+ ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+ ANDROID_CONTROL_AF_MODE,
+ ANDROID_CONTROL_AF_STATE,
+ ANDROID_CONTROL_AF_TRIGGER,
+ ANDROID_CONTROL_AWB_LOCK,
+ ANDROID_CONTROL_AWB_MODE,
+ ANDROID_CONTROL_AWB_STATE,
+ ANDROID_CONTROL_CAPTURE_INTENT,
+ ANDROID_CONTROL_EFFECT_MODE,
+ ANDROID_CONTROL_MODE,
+ ANDROID_CONTROL_SCENE_MODE,
+ ANDROID_CONTROL_VIDEO_STABILIZATION_MODE,
+ ANDROID_FLASH_MODE,
+ ANDROID_FLASH_STATE,
+ ANDROID_JPEG_GPS_COORDINATES,
+ ANDROID_JPEG_GPS_PROCESSING_METHOD,
+ ANDROID_JPEG_GPS_TIMESTAMP,
+ ANDROID_JPEG_ORIENTATION,
+ ANDROID_JPEG_QUALITY,
+ ANDROID_JPEG_SIZE,
+ ANDROID_JPEG_THUMBNAIL_QUALITY,
+ ANDROID_JPEG_THUMBNAIL_SIZE,
+ ANDROID_LENS_APERTURE,
+ ANDROID_LENS_FOCAL_LENGTH,
+ ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
+ ANDROID_LENS_STATE,
+ ANDROID_NOISE_REDUCTION_MODE,
+ ANDROID_REQUEST_PIPELINE_DEPTH,
+ ANDROID_SCALER_CROP_REGION,
+ ANDROID_SENSOR_EXPOSURE_TIME,
+ ANDROID_SENSOR_FRAME_DURATION,
+ ANDROID_SENSOR_ROLLING_SHUTTER_SKEW,
+ ANDROID_SENSOR_TEST_PATTERN_MODE,
+ ANDROID_SENSOR_TIMESTAMP,
+ ANDROID_STATISTICS_FACE_DETECT_MODE,
+ ANDROID_STATISTICS_LENS_SHADING_MAP_MODE,
+ ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE,
+ ANDROID_STATISTICS_SCENE_FLICKER,
+ };
+
+ /* Color correction static metadata. */
+ {
+ std::vector<uint8_t> data;
+ data.reserve(3);
+ const auto &infoMap = controlsInfo.find(&controls::draft::ColorCorrectionAberrationMode);
+ if (infoMap != controlsInfo.end()) {
+ for (const auto &value : infoMap->second.values())
+ data.push_back(value.get<int32_t>());
+ } else {
+ data.push_back(ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF);
+ }
+ staticMetadata_->addEntry(ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
+ data);
+ }
+
+ /* Control static metadata. */
+ std::vector<uint8_t> aeAvailableAntiBandingModes = {
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF,
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ,
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ,
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
+ aeAvailableAntiBandingModes);
+
+ std::vector<uint8_t> aeAvailableModes = {
+ ANDROID_CONTROL_AE_MODE_ON,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AE_AVAILABLE_MODES,
+ aeAvailableModes);
+
+ std::vector<int32_t> aeCompensationRange = {
+ 0, 0,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AE_COMPENSATION_RANGE,
+ aeCompensationRange);
+
+ const camera_metadata_rational_t aeCompensationStep[] = {
+ { 0, 1 }
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AE_COMPENSATION_STEP,
+ aeCompensationStep);
+
+ std::vector<uint8_t> availableAfModes = {
+ ANDROID_CONTROL_AF_MODE_OFF,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AF_AVAILABLE_MODES,
+ availableAfModes);
+
+ std::vector<uint8_t> availableEffects = {
+ ANDROID_CONTROL_EFFECT_MODE_OFF,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_EFFECTS,
+ availableEffects);
+
+ std::vector<uint8_t> availableSceneModes = {
+ ANDROID_CONTROL_SCENE_MODE_DISABLED,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_SCENE_MODES,
+ availableSceneModes);
+
+ std::vector<uint8_t> availableStabilizationModes = {
+ ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
+ availableStabilizationModes);
+
+ /*
+ * \todo Inspect the camera capabilities to report the available
+ * AWB modes. Default to AUTO as CTS tests require it.
+ */
+ std::vector<uint8_t> availableAwbModes = {
+ ANDROID_CONTROL_AWB_MODE_AUTO,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AWB_AVAILABLE_MODES,
+ availableAwbModes);
+
+ std::vector<int32_t> availableMaxRegions = {
+ 0, 0, 0,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_MAX_REGIONS,
+ availableMaxRegions);
+
+ std::vector<uint8_t> sceneModesOverride = {
+ ANDROID_CONTROL_AE_MODE_ON,
+ ANDROID_CONTROL_AWB_MODE_AUTO,
+ ANDROID_CONTROL_AF_MODE_OFF,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_SCENE_MODE_OVERRIDES,
+ sceneModesOverride);
+
+ uint8_t aeLockAvailable = ANDROID_CONTROL_AE_LOCK_AVAILABLE_FALSE;
+ staticMetadata_->addEntry(ANDROID_CONTROL_AE_LOCK_AVAILABLE,
+ aeLockAvailable);
+
+ uint8_t awbLockAvailable = ANDROID_CONTROL_AWB_LOCK_AVAILABLE_FALSE;
+ staticMetadata_->addEntry(ANDROID_CONTROL_AWB_LOCK_AVAILABLE,
+ awbLockAvailable);
+
+ char availableControlModes = ANDROID_CONTROL_MODE_AUTO;
+ staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_MODES,
+ availableControlModes);
+
+ /* JPEG static metadata. */
+
+ /*
+ * Create the list of supported thumbnail sizes by inspecting the
+ * available JPEG resolutions collected in streamConfigurations_ and
+ * generate one entry for each aspect ratio.
+ *
+ * The JPEG thumbnailer can freely scale, so pick an arbitrary
+ * (160, 160) size as the bounding rectangle, which is then cropped to
+ * the different supported aspect ratios.
+ */
+ constexpr Size maxJpegThumbnail(160, 160);
+ std::vector<Size> thumbnailSizes;
+ thumbnailSizes.push_back({ 0, 0 });
+ for (const auto &entry : streamConfigurations_) {
+ if (entry.androidFormat != HAL_PIXEL_FORMAT_BLOB)
+ continue;
+
+ Size thumbnailSize = maxJpegThumbnail
+ .boundedToAspectRatio({ entry.resolution.width,
+ entry.resolution.height });
+ thumbnailSizes.push_back(thumbnailSize);
+ }
+
+ std::sort(thumbnailSizes.begin(), thumbnailSizes.end());
+ auto last = std::unique(thumbnailSizes.begin(), thumbnailSizes.end());
+ thumbnailSizes.erase(last, thumbnailSizes.end());
+
+ /* Transform sizes in to a list of integers that can be consumed. */
+ std::vector<int32_t> thumbnailEntries;
+ thumbnailEntries.reserve(thumbnailSizes.size() * 2);
+ for (const auto &size : thumbnailSizes) {
+ thumbnailEntries.push_back(size.width);
+ thumbnailEntries.push_back(size.height);
+ }
+ staticMetadata_->addEntry(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
+ thumbnailEntries);
+
+ staticMetadata_->addEntry(ANDROID_JPEG_MAX_SIZE, maxJpegBufferSize_);
+
+ /* Sensor static metadata. */
+ std::array<int32_t, 2> pixelArraySize;
+ {
+ const Size &size = properties.get(properties::PixelArraySize).value_or(Size{});
+ pixelArraySize[0] = size.width;
+ pixelArraySize[1] = size.height;
+ staticMetadata_->addEntry(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
+ pixelArraySize);
+ }
+
+ const auto &cellSize = properties.get<Size>(properties::UnitCellSize);
+ if (cellSize) {
+ std::array<float, 2> physicalSize{
+ cellSize->width * pixelArraySize[0] / 1e6f,
+ cellSize->height * pixelArraySize[1] / 1e6f
+ };
+ staticMetadata_->addEntry(ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
+ physicalSize);
+ }
+
+ {
+ const Span<const Rectangle> rects =
+ properties.get(properties::PixelArrayActiveAreas).value_or(Span<const Rectangle>{});
+ std::vector<int32_t> data{
+ static_cast<int32_t>(rects[0].x),
+ static_cast<int32_t>(rects[0].y),
+ static_cast<int32_t>(rects[0].width),
+ static_cast<int32_t>(rects[0].height),
+ };
+ staticMetadata_->addEntry(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
+ data);
+ }
+
+ int32_t sensitivityRange[] = {
+ 32, 2400,
+ };
+ staticMetadata_->addEntry(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
+ sensitivityRange);
+
+ /* Report the color filter arrangement if the camera reports it. */
+ const auto &filterArr = properties.get(properties::draft::ColorFilterArrangement);
+ if (filterArr)
+ staticMetadata_->addEntry(ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
+ *filterArr);
+
+ const auto &exposureInfo = controlsInfo.find(&controls::ExposureTime);
+ if (exposureInfo != controlsInfo.end()) {
+ int64_t exposureTimeRange[2] = {
+ exposureInfo->second.min().get<int32_t>() * 1000LL,
+ exposureInfo->second.max().get<int32_t>() * 1000LL,
+ };
+ staticMetadata_->addEntry(ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE,
+ exposureTimeRange, 2);
+ }
+
+ staticMetadata_->addEntry(ANDROID_SENSOR_ORIENTATION, orientation_);
+
+ std::vector<int32_t> testPatternModes = {
+ ANDROID_SENSOR_TEST_PATTERN_MODE_OFF
+ };
+ const auto &testPatternsInfo =
+ controlsInfo.find(&controls::draft::TestPatternMode);
+ if (testPatternsInfo != controlsInfo.end()) {
+ const auto &values = testPatternsInfo->second.values();
+ ASSERT(!values.empty());
+ for (const auto &value : values) {
+ switch (value.get<int32_t>()) {
+ case controls::draft::TestPatternModeOff:
+ /*
+ * ANDROID_SENSOR_TEST_PATTERN_MODE_OFF is
+ * already in testPatternModes.
+ */
+ break;
+
+ case controls::draft::TestPatternModeSolidColor:
+ testPatternModes.push_back(
+ ANDROID_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR);
+ break;
+
+ case controls::draft::TestPatternModeColorBars:
+ testPatternModes.push_back(
+ ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS);
+ break;
+
+ case controls::draft::TestPatternModeColorBarsFadeToGray:
+ testPatternModes.push_back(
+ ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS_FADE_TO_GRAY);
+ break;
+
+ case controls::draft::TestPatternModePn9:
+ testPatternModes.push_back(
+ ANDROID_SENSOR_TEST_PATTERN_MODE_PN9);
+ break;
+
+ case controls::draft::TestPatternModeCustom1:
+ /* We don't support this yet. */
+ break;
+
+ default:
+ LOG(HAL, Error) << "Unknown test pattern mode: "
+ << value.get<int32_t>();
+ continue;
+ }
+ }
+ }
+ staticMetadata_->addEntry(ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES,
+ testPatternModes);
+
+ uint8_t timestampSource = ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_UNKNOWN;
+ staticMetadata_->addEntry(ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE,
+ timestampSource);
+
+ staticMetadata_->addEntry(ANDROID_SENSOR_INFO_MAX_FRAME_DURATION,
+ maxFrameDuration_);
+
+ /* Statistics static metadata. */
+ int32_t maxFaceCount = 0;
+ auto iter = camera_->controls().find(controls::draft::FaceDetectMode.id());
+ if (iter != camera_->controls().end()) {
+ const ControlInfo &faceDetectCtrlInfo = iter->second;
+ std::vector<uint8_t> faceDetectModes;
+ bool hasFaceDetection = false;
+
+ for (const auto &value : faceDetectCtrlInfo.values()) {
+ int32_t mode = value.get<int32_t>();
+ uint8_t androidMode = 0;
+
+ switch (mode) {
+ case controls::draft::FaceDetectModeOff:
+ androidMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
+ break;
+ case controls::draft::FaceDetectModeSimple:
+ androidMode = ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE;
+ hasFaceDetection = true;
+ break;
+ default:
+ LOG(HAL, Fatal) << "Received invalid face detect mode: " << mode;
+ }
+ faceDetectModes.push_back(androidMode);
+ }
+ if (hasFaceDetection) {
+ /*
+ * \todo Create new libcamera controls to query max
+ * possible faces detected.
+ */
+ maxFaceCount = 10;
+ staticMetadata_->addEntry(
+ ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
+ faceDetectModes.data(), faceDetectModes.size());
+ }
+ } else {
+ uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
+ staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
+ faceDetectMode);
+ }
+
+ staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT,
+ maxFaceCount);
+
+ {
+ std::vector<uint8_t> data;
+ data.reserve(2);
+ const auto &infoMap = controlsInfo.find(&controls::draft::LensShadingMapMode);
+ if (infoMap != controlsInfo.end()) {
+ for (const auto &value : infoMap->second.values())
+ data.push_back(value.get<int32_t>());
+ } else {
+ data.push_back(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF);
+ }
+ staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES,
+ data);
+ }
+
+ /* Sync static metadata. */
+ setMetadata(staticMetadata_.get(), ANDROID_SYNC_MAX_LATENCY,
+ controlsInfo, controls::draft::MaxLatency,
+ ControlRange::Def,
+ ANDROID_SYNC_MAX_LATENCY_UNKNOWN);
+
+ /* Flash static metadata. */
+ char flashAvailable = ANDROID_FLASH_INFO_AVAILABLE_FALSE;
+ staticMetadata_->addEntry(ANDROID_FLASH_INFO_AVAILABLE,
+ flashAvailable);
+
+ /* Lens static metadata. */
+ std::vector<float> lensApertures = {
+ 2.53 / 100,
+ };
+ staticMetadata_->addEntry(ANDROID_LENS_INFO_AVAILABLE_APERTURES,
+ lensApertures);
+
+ uint8_t lensFacing;
+ switch (facing_) {
+ default:
+ case CAMERA_FACING_FRONT:
+ lensFacing = ANDROID_LENS_FACING_FRONT;
+ break;
+ case CAMERA_FACING_BACK:
+ lensFacing = ANDROID_LENS_FACING_BACK;
+ break;
+ case CAMERA_FACING_EXTERNAL:
+ lensFacing = ANDROID_LENS_FACING_EXTERNAL;
+ break;
+ }
+ staticMetadata_->addEntry(ANDROID_LENS_FACING, lensFacing);
+
+ std::vector<float> lensFocalLengths = {
+ 1,
+ };
+ staticMetadata_->addEntry(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
+ lensFocalLengths);
+
+ std::vector<uint8_t> opticalStabilizations = {
+ ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF,
+ };
+ staticMetadata_->addEntry(ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
+ opticalStabilizations);
+
+ float hypeFocalDistance = 0;
+ staticMetadata_->addEntry(ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE,
+ hypeFocalDistance);
+
+ float minFocusDistance = 0;
+ staticMetadata_->addEntry(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
+ minFocusDistance);
+
+ /* Noise reduction modes. */
+ {
+ std::vector<uint8_t> data;
+ data.reserve(5);
+ const auto &infoMap = controlsInfo.find(&controls::draft::NoiseReductionMode);
+ if (infoMap != controlsInfo.end()) {
+ for (const auto &value : infoMap->second.values())
+ data.push_back(value.get<int32_t>());
+ } else {
+ data.push_back(ANDROID_NOISE_REDUCTION_MODE_OFF);
+ }
+ staticMetadata_->addEntry(ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES,
+ data);
+ }
+
+ /* Scaler static metadata. */
+
+ /*
+ * \todo The digital zoom factor is a property that depends on the
+ * desired output configuration and the sensor frame size input to the
+ * ISP. This information is not available to the Android HAL, not at
+ * initialization time at least.
+ *
+ * As a workaround rely on pipeline handlers initializing the
+ * ScalerCrop control with the camera default configuration and use the
+ * maximum and minimum crop rectangles to calculate the digital zoom
+ * factor.
+ */
+ float maxZoom = 1.0f;
+ const auto scalerCrop = controlsInfo.find(&controls::ScalerCrop);
+ if (scalerCrop != controlsInfo.end()) {
+ Rectangle min = scalerCrop->second.min().get<Rectangle>();
+ Rectangle max = scalerCrop->second.max().get<Rectangle>();
+ maxZoom = std::min(1.0f * max.width / min.width,
+ 1.0f * max.height / min.height);
+ }
+ staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
+ maxZoom);
+
+ std::vector<uint32_t> availableStreamConfigurations;
+ std::vector<int64_t> minFrameDurations;
+ int maxYUVFps = 0;
+ Size maxYUVSize;
+
+ availableStreamConfigurations.reserve(streamConfigurations_.size() * 4);
+ minFrameDurations.reserve(streamConfigurations_.size() * 4);
+
+ for (const auto &entry : streamConfigurations_) {
+ /*
+ * Filter out YUV streams not capable of running at 30 FPS.
+ *
+ * This requirement comes from CTS RecordingTest failures most
+ * probably related to a requirement of the camcoder video
+ * recording profile. Inspecting the Intel IPU3 HAL
+ * implementation confirms this but no reference has been found
+ * in the metadata documentation.
+ */
+ unsigned int fps =
+ static_cast<unsigned int>(floor(1e9 / entry.minFrameDurationNsec));
+
+ if (entry.androidFormat != HAL_PIXEL_FORMAT_BLOB && fps < 30)
+ continue;
+
+ /*
+ * Collect the FPS of the maximum YUV output size to populate
+ * AE_AVAILABLE_TARGET_FPS_RANGE
+ */
+ if (entry.androidFormat == HAL_PIXEL_FORMAT_YCbCr_420_888 &&
+ entry.resolution > maxYUVSize) {
+ maxYUVSize = entry.resolution;
+ maxYUVFps = fps;
+ }
+
+ /* Stream configuration map. */
+ availableStreamConfigurations.push_back(entry.androidFormat);
+ availableStreamConfigurations.push_back(entry.resolution.width);
+ availableStreamConfigurations.push_back(entry.resolution.height);
+ availableStreamConfigurations.push_back(
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT);
+
+ /* Per-stream durations. */
+ minFrameDurations.push_back(entry.androidFormat);
+ minFrameDurations.push_back(entry.resolution.width);
+ minFrameDurations.push_back(entry.resolution.height);
+ minFrameDurations.push_back(entry.minFrameDurationNsec);
+
+ LOG(HAL, Debug)
+ << "Output Stream: " << utils::hex(entry.androidFormat)
+ << " (" << entry.resolution << ")["
+ << entry.minFrameDurationNsec << "]"
+ << "@" << fps;
+ }
+ staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
+ availableStreamConfigurations);
+
+ staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
+ minFrameDurations);
+
+ /*
+ * Register to the camera service {min, max} and {max, max} with
+ * 'max' being the larger YUV stream maximum frame rate and 'min' being
+ * the globally minimum frame rate rounded to the next largest integer
+ * as the camera service expects the camera maximum frame duration to be
+ * smaller than 10^9 / minFps.
+ */
+ int32_t minFps = std::ceil(1e9 / maxFrameDuration_);
+ int32_t availableAeFpsTarget[] = {
+ minFps, maxYUVFps, maxYUVFps, maxYUVFps,
+ };
+ staticMetadata_->addEntry(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
+ availableAeFpsTarget);
+
+ std::vector<int64_t> availableStallDurations;
+ for (const auto &entry : streamConfigurations_) {
+ if (entry.androidFormat != HAL_PIXEL_FORMAT_BLOB)
+ continue;
+
+ availableStallDurations.push_back(entry.androidFormat);
+ availableStallDurations.push_back(entry.resolution.width);
+ availableStallDurations.push_back(entry.resolution.height);
+ availableStallDurations.push_back(entry.minFrameDurationNsec);
+ }
+ staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
+ availableStallDurations);
+
+ uint8_t croppingType = ANDROID_SCALER_CROPPING_TYPE_CENTER_ONLY;
+ staticMetadata_->addEntry(ANDROID_SCALER_CROPPING_TYPE, croppingType);
+
+ /* Request static metadata. */
+ int32_t partialResultCount = 1;
+ staticMetadata_->addEntry(ANDROID_REQUEST_PARTIAL_RESULT_COUNT,
+ partialResultCount);
+
+ {
+ /* Default the value to 2 if not reported by the camera. */
+ uint8_t maxPipelineDepth = 2;
+ const auto &infoMap = controlsInfo.find(&controls::draft::PipelineDepth);
+ if (infoMap != controlsInfo.end())
+ maxPipelineDepth = infoMap->second.max().get<int32_t>();
+ staticMetadata_->addEntry(ANDROID_REQUEST_PIPELINE_MAX_DEPTH,
+ maxPipelineDepth);
+ }
+
+ /* LIMITED does not support reprocessing. */
+ uint32_t maxNumInputStreams = 0;
+ staticMetadata_->addEntry(ANDROID_REQUEST_MAX_NUM_INPUT_STREAMS,
+ maxNumInputStreams);
+
+ /* Number of { RAW, YUV, JPEG } supported output streams */
+ int32_t numOutStreams[] = { rawStreamAvailable_, 2, 1 };
+ staticMetadata_->addEntry(ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS,
+ numOutStreams);
+
+ /* Check capabilities */
+ capabilities_ = computeCapabilities();
+ /* This *must* be uint8_t. */
+ std::vector<uint8_t> capsVec(capabilities_.begin(), capabilities_.end());
+ staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_CAPABILITIES, capsVec);
+
+ computeHwLevel(capabilities_);
+ staticMetadata_->addEntry(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL, hwLevel_);
+
+ LOG(HAL, Info)
+ << "Hardware level: " << hwLevelStrings.find(hwLevel_)->second;
+
+ staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS,
+ std::vector<int32_t>(availableCharacteristicsKeys_.begin(),
+ availableCharacteristicsKeys_.end()));
+
+ staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS,
+ std::vector<int32_t>(availableRequestKeys_.begin(),
+ availableRequestKeys_.end()));
+
+ staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_RESULT_KEYS,
+ std::vector<int32_t>(availableResultKeys_.begin(),
+ availableResultKeys_.end()));
+
+ if (!staticMetadata_->isValid()) {
+ LOG(HAL, Error) << "Failed to construct static metadata";
+ staticMetadata_.reset();
+ return -EINVAL;
+ }
+
+ if (staticMetadata_->resized()) {
+ auto [entryCount, dataCount] = staticMetadata_->usage();
+ LOG(HAL, Info)
+ << "Static metadata resized: " << entryCount
+ << " entries and " << dataCount << " bytes used";
+ }
+
+ return 0;
+}
+
+/* Translate Android format code to libcamera pixel format. */
+PixelFormat CameraCapabilities::toPixelFormat(int format) const
+{
+ auto it = formatsMap_.find(format);
+ if (it == formatsMap_.end()) {
+ LOG(HAL, Error) << "Requested format " << utils::hex(format)
+ << " not supported";
+ return PixelFormat();
+ }
+
+ return it->second;
+}
+
+std::unique_ptr<CameraMetadata> CameraCapabilities::requestTemplateManual() const
+{
+ if (!capabilities_.count(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR)) {
+ LOG(HAL, Error) << "Manual template not supported";
+ return nullptr;
+ }
+
+ std::unique_ptr<CameraMetadata> manualTemplate = requestTemplatePreview();
+ if (!manualTemplate)
+ return nullptr;
+
+ return manualTemplate;
+}
+
+std::unique_ptr<CameraMetadata> CameraCapabilities::requestTemplatePreview() const
+{
+ /*
+ * Give initial hint of entries and number of bytes to be allocated.
+ * It is deliberate that the hint is slightly larger than required, to
+ * avoid resizing the container.
+ *
+ * CameraMetadata is capable of resizing the container on the fly, if
+ * adding a new entry will exceed its capacity.
+ */
+ auto requestTemplate = std::make_unique<CameraMetadata>(22, 38);
+ if (!requestTemplate->isValid()) {
+ return nullptr;
+ }
+
+ /* Get the FPS range registered in the static metadata. */
+ camera_metadata_ro_entry_t entry;
+ bool found = staticMetadata_->getEntry(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
+ &entry);
+ if (!found) {
+ LOG(HAL, Error) << "Cannot create capture template without FPS range";
+ return nullptr;
+ }
+
+ /*
+ * Assume the AE_AVAILABLE_TARGET_FPS_RANGE static metadata
+ * has been assembled as {{min, max} {max, max}}.
+ */
+ requestTemplate->addEntry(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+ entry.data.i32, 2);
+
+ /*
+ * Get thumbnail sizes from static metadata and add the first non-zero
+ * size to the template.
+ */
+ found = staticMetadata_->getEntry(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
+ &entry);
+ ASSERT(found && entry.count >= 4);
+ requestTemplate->addEntry(ANDROID_JPEG_THUMBNAIL_SIZE,
+ entry.data.i32 + 2, 2);
+
+ uint8_t aeMode = ANDROID_CONTROL_AE_MODE_ON;
+ requestTemplate->addEntry(ANDROID_CONTROL_AE_MODE, aeMode);
+
+ int32_t aeExposureCompensation = 0;
+ requestTemplate->addEntry(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
+ aeExposureCompensation);
+
+ uint8_t aePrecaptureTrigger = ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE;
+ requestTemplate->addEntry(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER,
+ aePrecaptureTrigger);
+
+ uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_OFF;
+ requestTemplate->addEntry(ANDROID_CONTROL_AE_LOCK, aeLock);
+
+ uint8_t aeAntibandingMode = ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO;
+ requestTemplate->addEntry(ANDROID_CONTROL_AE_ANTIBANDING_MODE,
+ aeAntibandingMode);
+
+ uint8_t afMode = ANDROID_CONTROL_AF_MODE_OFF;
+ requestTemplate->addEntry(ANDROID_CONTROL_AF_MODE, afMode);
+
+ uint8_t afTrigger = ANDROID_CONTROL_AF_TRIGGER_IDLE;
+ requestTemplate->addEntry(ANDROID_CONTROL_AF_TRIGGER, afTrigger);
+
+ uint8_t awbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
+ requestTemplate->addEntry(ANDROID_CONTROL_AWB_MODE, awbMode);
+
+ uint8_t awbLock = ANDROID_CONTROL_AWB_LOCK_OFF;
+ requestTemplate->addEntry(ANDROID_CONTROL_AWB_LOCK, awbLock);
+
+ uint8_t flashMode = ANDROID_FLASH_MODE_OFF;
+ requestTemplate->addEntry(ANDROID_FLASH_MODE, flashMode);
+
+ uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
+ requestTemplate->addEntry(ANDROID_STATISTICS_FACE_DETECT_MODE,
+ faceDetectMode);
+
+ uint8_t noiseReduction = ANDROID_NOISE_REDUCTION_MODE_OFF;
+ requestTemplate->addEntry(ANDROID_NOISE_REDUCTION_MODE,
+ noiseReduction);
+
+ uint8_t aberrationMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF;
+ requestTemplate->addEntry(ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
+ aberrationMode);
+
+ uint8_t controlMode = ANDROID_CONTROL_MODE_AUTO;
+ requestTemplate->addEntry(ANDROID_CONTROL_MODE, controlMode);
+
+ float lensAperture = 2.53 / 100;
+ requestTemplate->addEntry(ANDROID_LENS_APERTURE, lensAperture);
+
+ uint8_t opticalStabilization = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+ requestTemplate->addEntry(ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
+ opticalStabilization);
+
+ uint8_t captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+ requestTemplate->addEntry(ANDROID_CONTROL_CAPTURE_INTENT,
+ captureIntent);
+
+ return requestTemplate;
+}
+
+std::unique_ptr<CameraMetadata> CameraCapabilities::requestTemplateStill() const
+{
+ std::unique_ptr<CameraMetadata> stillTemplate = requestTemplatePreview();
+ if (!stillTemplate)
+ return nullptr;
+
+ return stillTemplate;
+}
+
+std::unique_ptr<CameraMetadata> CameraCapabilities::requestTemplateVideo() const
+{
+ std::unique_ptr<CameraMetadata> previewTemplate = requestTemplatePreview();
+ if (!previewTemplate)
+ return nullptr;
+
+ /*
+ * The video template requires a fixed FPS range. Everything else
+ * stays the same as the preview template.
+ */
+ camera_metadata_ro_entry_t entry;
+ staticMetadata_->getEntry(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
+ &entry);
+
+ /*
+ * Assume the AE_AVAILABLE_TARGET_FPS_RANGE static metadata
+ * has been assembled as {{min, max} {max, max}}.
+ */
+ previewTemplate->updateEntry(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+ entry.data.i32 + 2, 2);
+
+ return previewTemplate;
+}
diff --git a/src/android/camera_capabilities.h b/src/android/camera_capabilities.h
new file mode 100644
index 00000000..56ac1efe
--- /dev/null
+++ b/src/android/camera_capabilities.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Camera static properties manager
+ */
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <set>
+#include <vector>
+
+#include <libcamera/base/class.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
+
+#include "camera_metadata.h"
+
+class CameraCapabilities
+{
+public:
+ CameraCapabilities() = default;
+
+ int initialize(std::shared_ptr<libcamera::Camera> camera,
+ int orientation, int facing);
+
+ CameraMetadata *staticMetadata() const { return staticMetadata_.get(); }
+ libcamera::PixelFormat toPixelFormat(int format) const;
+ unsigned int maxJpegBufferSize() const { return maxJpegBufferSize_; }
+
+ std::unique_ptr<CameraMetadata> requestTemplateManual() const;
+ std::unique_ptr<CameraMetadata> requestTemplatePreview() const;
+ std::unique_ptr<CameraMetadata> requestTemplateStill() const;
+ std::unique_ptr<CameraMetadata> requestTemplateVideo() const;
+
+private:
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraCapabilities)
+
+ struct Camera3StreamConfiguration {
+ libcamera::Size resolution;
+ int androidFormat;
+ int64_t minFrameDurationNsec;
+ int64_t maxFrameDurationNsec;
+ };
+
+ bool validateManualSensorCapability();
+ bool validateManualPostProcessingCapability();
+ bool validateBurstCaptureCapability();
+
+ std::set<camera_metadata_enum_android_request_available_capabilities>
+ computeCapabilities();
+
+ void computeHwLevel(
+ const std::set<camera_metadata_enum_android_request_available_capabilities> &caps);
+
+ std::vector<libcamera::Size>
+ initializeYUVResolutions(const libcamera::PixelFormat &pixelFormat,
+ const std::vector<libcamera::Size> &resolutions);
+ std::vector<libcamera::Size>
+ initializeRawResolutions(const libcamera::PixelFormat &pixelFormat);
+ int initializeStreamConfigurations();
+
+ int initializeStaticMetadata();
+
+ std::shared_ptr<libcamera::Camera> camera_;
+
+ int facing_;
+ int orientation_;
+ bool rawStreamAvailable_;
+ int64_t maxFrameDuration_;
+ camera_metadata_enum_android_info_supported_hardware_level hwLevel_;
+ std::set<camera_metadata_enum_android_request_available_capabilities> capabilities_;
+
+ std::vector<Camera3StreamConfiguration> streamConfigurations_;
+ std::map<int, libcamera::PixelFormat> formatsMap_;
+ std::unique_ptr<CameraMetadata> staticMetadata_;
+ unsigned int maxJpegBufferSize_;
+
+ std::set<int32_t> availableCharacteristicsKeys_;
+ std::set<int32_t> availableRequestKeys_;
+ std::set<int32_t> availableResultKeys_;
+};
diff --git a/src/android/camera_device.cpp b/src/android/camera_device.cpp
index 76af70eb..a038131a 100644
--- a/src/android/camera_device.cpp
+++ b/src/android/camera_device.cpp
@@ -2,43 +2,238 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_device.cpp - libcamera Android Camera Device
+ * libcamera Android Camera Device
*/
#include "camera_device.h"
-#include "camera_ops.h"
+#include <algorithm>
+#include <fstream>
+#include <set>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/unique_fd.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/control_ids.h>
#include <libcamera/controls.h>
+#include <libcamera/fence.h>
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
#include <libcamera/property_ids.h>
-#include "log.h"
-#include "utils.h"
+#include "system/graphics.h"
-#include "camera_metadata.h"
+#include "camera_buffer.h"
+#include "camera_hal_config.h"
+#include "camera_ops.h"
+#include "camera_request.h"
+#include "hal_framebuffer.h"
using namespace libcamera;
-LOG_DECLARE_CATEGORY(HAL);
+LOG_DECLARE_CATEGORY(HAL)
+
+namespace {
/*
- * \struct Camera3RequestDescriptor
- *
- * A utility structure that groups information about a capture request to be
- * later re-used at request complete time to notify the framework.
+ * \struct Camera3StreamConfig
+ * \brief Data to store StreamConfiguration associated with camera3_stream(s)
+ * \var streams List of the pairs of a stream requested by Android HAL client
+ * and CameraStream::Type associated with the stream
+ * \var config StreamConfiguration for streams
+ */
+struct Camera3StreamConfig {
+ struct Camera3Stream {
+ camera3_stream_t *stream;
+ CameraStream::Type type;
+ };
+
+ std::vector<Camera3Stream> streams;
+ StreamConfiguration config;
+};
+
+/*
+ * Reorder the configurations so that libcamera::Camera can accept them as much
+ * as possible. The sort rule is as follows.
+ * 1.) The configuration for NV12 request whose resolution is the largest.
+ * 2.) The configuration for JPEG request.
+ * 3.) Others. Larger resolutions and different formats are put earlier.
*/
+void sortCamera3StreamConfigs(std::vector<Camera3StreamConfig> &unsortedConfigs,
+ const camera3_stream_t *jpegStream)
+{
+ const Camera3StreamConfig *jpegConfig = nullptr;
+
+ std::map<PixelFormat, std::vector<const Camera3StreamConfig *>> formatToConfigs;
+ for (const auto &streamConfig : unsortedConfigs) {
+ if (jpegStream && !jpegConfig) {
+ const auto &streams = streamConfig.streams;
+ if (std::find_if(streams.begin(), streams.end(),
+ [jpegStream](const auto &stream) {
+ return stream.stream == jpegStream;
+ }) != streams.end()) {
+ jpegConfig = &streamConfig;
+ continue;
+ }
+ }
+ formatToConfigs[streamConfig.config.pixelFormat].push_back(&streamConfig);
+ }
+
+ if (jpegStream && !jpegConfig)
+ LOG(HAL, Fatal) << "No Camera3StreamConfig is found for JPEG";
+
+ for (auto &fmt : formatToConfigs) {
+ auto &streamConfigs = fmt.second;
+
+ /* Sorted by resolution. Smaller is put first. */
+ std::sort(streamConfigs.begin(), streamConfigs.end(),
+ [](const auto *streamConfigA, const auto *streamConfigB) {
+ const Size &sizeA = streamConfigA->config.size;
+ const Size &sizeB = streamConfigB->config.size;
+ return sizeA < sizeB;
+ });
+ }
+
+ std::vector<Camera3StreamConfig> sortedConfigs;
+ sortedConfigs.reserve(unsortedConfigs.size());
+
+ /*
+ * NV12 is the most prioritized format. Put the configuration with NV12
+ * and the largest resolution first.
+ */
+ const auto nv12It = formatToConfigs.find(formats::NV12);
+ if (nv12It != formatToConfigs.end()) {
+ auto &nv12Configs = nv12It->second;
+ const Camera3StreamConfig *nv12Largest = nv12Configs.back();
+
+ /*
+ * If JPEG will be created from NV12 and the size is larger than
+ * the largest NV12 configurations, then put the NV12
+ * configuration for JPEG first.
+ */
+ if (jpegConfig && jpegConfig->config.pixelFormat == formats::NV12) {
+ const Size &nv12SizeForJpeg = jpegConfig->config.size;
+ const Size &nv12LargestSize = nv12Largest->config.size;
+
+ if (nv12LargestSize < nv12SizeForJpeg) {
+ LOG(HAL, Debug) << "Insert " << jpegConfig->config.toString();
+ sortedConfigs.push_back(std::move(*jpegConfig));
+ jpegConfig = nullptr;
+ }
+ }
+
+ LOG(HAL, Debug) << "Insert " << nv12Largest->config.toString();
+ sortedConfigs.push_back(*nv12Largest);
+ nv12Configs.pop_back();
+
+ if (nv12Configs.empty())
+ formatToConfigs.erase(nv12It);
+ }
+
+ /* If the configuration for JPEG is there, then put it. */
+ if (jpegConfig) {
+ LOG(HAL, Debug) << "Insert " << jpegConfig->config.toString();
+ sortedConfigs.push_back(std::move(*jpegConfig));
+ jpegConfig = nullptr;
+ }
-CameraDevice::Camera3RequestDescriptor::Camera3RequestDescriptor(
- unsigned int frameNumber, unsigned int numBuffers)
- : frameNumber(frameNumber), numBuffers(numBuffers)
+ /*
+ * Put configurations with different formats and larger resolutions
+ * earlier.
+ */
+ while (!formatToConfigs.empty()) {
+ for (auto it = formatToConfigs.begin(); it != formatToConfigs.end();) {
+ auto &configs = it->second;
+ LOG(HAL, Debug) << "Insert " << configs.back()->config.toString();
+ sortedConfigs.push_back(*configs.back());
+ configs.pop_back();
+
+ if (configs.empty())
+ it = formatToConfigs.erase(it);
+ else
+ it++;
+ }
+ }
+
+ ASSERT(sortedConfigs.size() == unsortedConfigs.size());
+
+ unsortedConfigs = sortedConfigs;
+}
+
+const char *rotationToString(int rotation)
{
- buffers = new camera3_stream_buffer_t[numBuffers];
+ switch (rotation) {
+ case CAMERA3_STREAM_ROTATION_0:
+ return "0";
+ case CAMERA3_STREAM_ROTATION_90:
+ return "90";
+ case CAMERA3_STREAM_ROTATION_180:
+ return "180";
+ case CAMERA3_STREAM_ROTATION_270:
+ return "270";
+ }
+ return "INVALID";
}
-CameraDevice::Camera3RequestDescriptor::~Camera3RequestDescriptor()
+const char *directionToString(int stream_type)
{
- delete[] buffers;
+ switch (stream_type) {
+ case CAMERA3_STREAM_OUTPUT:
+ return "Output";
+ case CAMERA3_STREAM_INPUT:
+ return "Input";
+ case CAMERA3_STREAM_BIDIRECTIONAL:
+ return "Bidirectional";
+ default:
+ LOG(HAL, Warning) << "Unknown stream type: " << stream_type;
+ return "Unknown";
+ }
}
+#if defined(OS_CHROMEOS)
+/*
+ * Check whether the crop_rotate_scale_degrees values for all streams in
+ * the list are valid according to the Chrome OS camera HAL API.
+ */
+bool validateCropRotate(const camera3_stream_configuration_t &streamList)
+{
+ ASSERT(streamList.num_streams > 0);
+ const int cropRotateScaleDegrees =
+ streamList.streams[0]->crop_rotate_scale_degrees;
+ for (unsigned int i = 0; i < streamList.num_streams; ++i) {
+ const camera3_stream_t &stream = *streamList.streams[i];
+
+ switch (stream.crop_rotate_scale_degrees) {
+ case CAMERA3_STREAM_ROTATION_0:
+ case CAMERA3_STREAM_ROTATION_90:
+ case CAMERA3_STREAM_ROTATION_270:
+ break;
+
+ /* 180° rotation is specified by Chrome OS as invalid. */
+ case CAMERA3_STREAM_ROTATION_180:
+ default:
+ LOG(HAL, Error) << "Invalid crop_rotate_scale_degrees: "
+ << stream.crop_rotate_scale_degrees;
+ return false;
+ }
+
+ if (cropRotateScaleDegrees != stream.crop_rotate_scale_degrees) {
+ LOG(HAL, Error) << "crop_rotate_scale_degrees in all "
+ << "streams are not identical";
+ return false;
+ }
+ }
+
+ return true;
+}
+#endif
+
+} /* namespace */
+
/*
* \class CameraDevice
*
@@ -52,21 +247,143 @@ CameraDevice::Camera3RequestDescriptor::~Camera3RequestDescriptor()
* back to the framework using the designated callbacks.
*/
-CameraDevice::CameraDevice(unsigned int id, const std::shared_ptr<Camera> &camera)
- : running_(false), camera_(camera), staticMetadata_(nullptr)
+CameraDevice::CameraDevice(unsigned int id, std::shared_ptr<Camera> camera)
+ : id_(id), state_(State::Stopped), camera_(std::move(camera)),
+ facing_(CAMERA_FACING_FRONT), orientation_(0)
{
camera_->requestCompleted.connect(this, &CameraDevice::requestComplete);
+
+ maker_ = "libcamera";
+ model_ = "cameraModel";
+
+ /* \todo Support getting properties on Android */
+ std::ifstream fstream("/var/cache/camera/camera.prop");
+ if (!fstream.is_open())
+ return;
+
+ std::string line;
+ while (std::getline(fstream, line)) {
+ std::string::size_type delimPos = line.find("=");
+ if (delimPos == std::string::npos)
+ continue;
+ std::string key = line.substr(0, delimPos);
+ std::string val = line.substr(delimPos + 1);
+
+ if (!key.compare("ro.product.model"))
+ model_ = val;
+ else if (!key.compare("ro.product.manufacturer"))
+ maker_ = val;
+ }
}
-CameraDevice::~CameraDevice()
+CameraDevice::~CameraDevice() = default;
+
+std::unique_ptr<CameraDevice> CameraDevice::create(unsigned int id,
+ std::shared_ptr<Camera> cam)
+{
+ return std::unique_ptr<CameraDevice>(
+ new CameraDevice(id, std::move(cam)));
+}
+
+/*
+ * Initialize the camera static information retrieved from the
+ * Camera::properties or from the cameraConfigData.
+ *
+ * cameraConfigData is optional for external camera devices and can be
+ * nullptr.
+ *
+ * This function is called before the camera device is opened.
+ */
+int CameraDevice::initialize(const CameraConfigData *cameraConfigData)
{
- if (staticMetadata_)
- delete staticMetadata_;
+ /*
+ * Initialize orientation and facing side of the camera.
+ *
+ * If the libcamera::Camera provides those information as retrieved
+ * from firmware use them, otherwise fallback to values parsed from
+ * the configuration file. If the configuration file is not available
+ * the camera is external so its location and rotation can be safely
+ * defaulted.
+ */
+ const ControlList &properties = camera_->properties();
+
+ const auto &location = properties.get(properties::Location);
+ if (location) {
+ switch (*location) {
+ case properties::CameraLocationFront:
+ facing_ = CAMERA_FACING_FRONT;
+ break;
+ case properties::CameraLocationBack:
+ facing_ = CAMERA_FACING_BACK;
+ break;
+ case properties::CameraLocationExternal:
+ /*
+ * If the camera is reported as external, but the
+ * CameraHalManager has overriden it, use what is
+ * reported in the configuration file. This typically
+ * happens for UVC cameras reported as 'External' by
+ * libcamera but installed in fixed position on the
+ * device.
+ */
+ if (cameraConfigData && cameraConfigData->facing != -1)
+ facing_ = cameraConfigData->facing;
+ else
+ facing_ = CAMERA_FACING_EXTERNAL;
+ break;
+ }
- for (auto &it : requestTemplates_)
- delete it.second;
+ if (cameraConfigData && cameraConfigData->facing != -1 &&
+ facing_ != cameraConfigData->facing) {
+ LOG(HAL, Warning)
+ << "Camera location does not match"
+ << " configuration file. Using " << facing_;
+ }
+ } else if (cameraConfigData) {
+ if (cameraConfigData->facing == -1) {
+ LOG(HAL, Error)
+ << "Camera facing not in configuration file";
+ return -EINVAL;
+ }
+ facing_ = cameraConfigData->facing;
+ } else {
+ facing_ = CAMERA_FACING_EXTERNAL;
+ }
+
+ /*
+ * The Android orientation metadata specifies its rotation correction
+ * value in clockwise direction whereas libcamera specifies the
+ * rotation property in anticlockwise direction. Read the libcamera's
+ * rotation property (anticlockwise) and compute the corresponding
+ * value for clockwise direction as required by the Android orientation
+ * metadata.
+ */
+ const auto &rotation = properties.get(properties::Rotation);
+ if (rotation) {
+ orientation_ = (360 - *rotation) % 360;
+ if (cameraConfigData && cameraConfigData->rotation != -1 &&
+ orientation_ != cameraConfigData->rotation) {
+ LOG(HAL, Warning)
+ << "Camera orientation does not match"
+ << " configuration file. Using " << orientation_;
+ }
+ } else if (cameraConfigData) {
+ if (cameraConfigData->rotation == -1) {
+ LOG(HAL, Error)
+ << "Camera rotation not in configuration file";
+ return -EINVAL;
+ }
+ orientation_ = cameraConfigData->rotation;
+ } else {
+ orientation_ = 0;
+ }
+
+ return capabilities_.initialize(camera_, orientation_, facing_);
}
+/*
+ * Open a camera device. The static information on the camera shall have been
+ * initialized with a call to CameraDevice::initialize().
+ */
int CameraDevice::open(const hw_module_t *hardwareModule)
{
int ret = camera_->acquire();
@@ -93,452 +410,56 @@ int CameraDevice::open(const hw_module_t *hardwareModule)
void CameraDevice::close()
{
- camera_->stop();
- camera_->release();
+ stop();
- running_ = false;
-}
-
-void CameraDevice::setCallbacks(const camera3_callback_ops_t *callbacks)
-{
- callbacks_ = callbacks;
+ camera_->release();
}
-/*
- * Return static information for the camera.
- */
-const camera_metadata_t *CameraDevice::getStaticMetadata()
+void CameraDevice::flush()
{
- if (staticMetadata_)
- return staticMetadata_->get();
-
- const ControlList &properties = camera_->properties();
+ {
+ MutexLocker stateLock(stateMutex_);
+ if (state_ != State::Running)
+ return;
- /*
- * The here reported metadata are enough to implement a basic capture
- * example application, but a real camera implementation will require
- * more.
- */
-
- /*
- * \todo Keep this in sync with the actual number of entries.
- * Currently: 50 entries, 666 bytes
- */
- staticMetadata_ = new CameraMetadata(50, 700);
- if (!staticMetadata_->isValid()) {
- LOG(HAL, Error) << "Failed to allocate static metadata";
- delete staticMetadata_;
- staticMetadata_ = nullptr;
- return nullptr;
+ state_ = State::Flushing;
}
- /* Color correction static metadata. */
- std::vector<uint8_t> aberrationModes = {
- ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
- aberrationModes.data(),
- aberrationModes.size());
-
- /* Control static metadata. */
- std::vector<uint8_t> aeAvailableAntiBandingModes = {
- ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF,
- ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ,
- ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ,
- ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
- aeAvailableAntiBandingModes.data(),
- aeAvailableAntiBandingModes.size());
-
- std::vector<uint8_t> aeAvailableModes = {
- ANDROID_CONTROL_AE_MODE_ON,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AE_AVAILABLE_MODES,
- aeAvailableModes.data(),
- aeAvailableModes.size());
-
- std::vector<int32_t> availableAeFpsTarget = {
- 15, 30,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
- availableAeFpsTarget.data(),
- availableAeFpsTarget.size());
-
- std::vector<int32_t> aeCompensationRange = {
- 0, 0,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AE_COMPENSATION_RANGE,
- aeCompensationRange.data(),
- aeCompensationRange.size());
-
- const camera_metadata_rational_t aeCompensationStep[] = {
- { 0, 1 }
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AE_COMPENSATION_STEP,
- aeCompensationStep, 1);
-
- std::vector<uint8_t> availableAfModes = {
- ANDROID_CONTROL_AF_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AF_AVAILABLE_MODES,
- availableAfModes.data(),
- availableAfModes.size());
-
- std::vector<uint8_t> availableEffects = {
- ANDROID_CONTROL_EFFECT_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_EFFECTS,
- availableEffects.data(),
- availableEffects.size());
-
- std::vector<uint8_t> availableSceneModes = {
- ANDROID_CONTROL_SCENE_MODE_DISABLED,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_SCENE_MODES,
- availableSceneModes.data(),
- availableSceneModes.size());
-
- std::vector<uint8_t> availableStabilizationModes = {
- ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
- availableStabilizationModes.data(),
- availableStabilizationModes.size());
-
- std::vector<uint8_t> availableAwbModes = {
- ANDROID_CONTROL_AWB_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AWB_AVAILABLE_MODES,
- availableAwbModes.data(),
- availableAwbModes.size());
-
- std::vector<int32_t> availableMaxRegions = {
- 0, 0, 0,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_MAX_REGIONS,
- availableMaxRegions.data(),
- availableMaxRegions.size());
-
- std::vector<uint8_t> sceneModesOverride = {
- ANDROID_CONTROL_AE_MODE_ON,
- ANDROID_CONTROL_AWB_MODE_AUTO,
- ANDROID_CONTROL_AF_MODE_AUTO,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_SCENE_MODE_OVERRIDES,
- sceneModesOverride.data(),
- sceneModesOverride.size());
-
- uint8_t aeLockAvailable = ANDROID_CONTROL_AE_LOCK_AVAILABLE_FALSE;
- staticMetadata_->addEntry(ANDROID_CONTROL_AE_LOCK_AVAILABLE,
- &aeLockAvailable, 1);
-
- uint8_t awbLockAvailable = ANDROID_CONTROL_AWB_LOCK_AVAILABLE_FALSE;
- staticMetadata_->addEntry(ANDROID_CONTROL_AWB_LOCK_AVAILABLE,
- &awbLockAvailable, 1);
-
- char availableControlModes = ANDROID_CONTROL_MODE_AUTO;
- staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_MODES,
- &availableControlModes, 1);
-
- /* JPEG static metadata. */
- std::vector<int32_t> availableThumbnailSizes = {
- 0, 0,
- };
- staticMetadata_->addEntry(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
- availableThumbnailSizes.data(),
- availableThumbnailSizes.size());
-
- /* Sensor static metadata. */
- int32_t pixelArraySize[] = {
- 2592, 1944,
- };
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
- &pixelArraySize, 2);
-
- int32_t sensorSizes[] = {
- 0, 0, 2560, 1920,
- };
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
- &sensorSizes, 4);
-
- int32_t sensitivityRange[] = {
- 32, 2400,
- };
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
- &sensitivityRange, 2);
-
- uint16_t filterArr = ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_GRBG;
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
- &filterArr, 1);
+ camera_->stop();
- int64_t exposureTimeRange[] = {
- 100000, 200000000,
- };
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE,
- &exposureTimeRange, 2);
+ MutexLocker stateLock(stateMutex_);
+ state_ = State::Stopped;
+}
- /*
- * The Android orientation metadata and libcamera rotation property are
- * defined differently but have identical numerical values for Android
- * devices such as phones and tablets.
- */
- int32_t orientation = 0;
- if (properties.contains(properties::Rotation))
- orientation = properties.get(properties::Rotation);
- staticMetadata_->addEntry(ANDROID_SENSOR_ORIENTATION, &orientation, 1);
+void CameraDevice::stop()
+{
+ MutexLocker stateLock(stateMutex_);
- std::vector<int32_t> testPatterModes = {
- ANDROID_SENSOR_TEST_PATTERN_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES,
- testPatterModes.data(),
- testPatterModes.size());
+ camera_->stop();
- std::vector<float> physicalSize = {
- 2592, 1944,
- };
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
- physicalSize.data(),
- physicalSize.size());
-
- uint8_t timestampSource = ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_UNKNOWN;
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE,
- &timestampSource, 1);
-
- /* Statistics static metadata. */
- uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
- staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
- &faceDetectMode, 1);
-
- int32_t maxFaceCount = 0;
- staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT,
- &maxFaceCount, 1);
-
- /* Sync static metadata. */
- int32_t maxLatency = ANDROID_SYNC_MAX_LATENCY_UNKNOWN;
- staticMetadata_->addEntry(ANDROID_SYNC_MAX_LATENCY, &maxLatency, 1);
-
- /* Flash static metadata. */
- char flashAvailable = ANDROID_FLASH_INFO_AVAILABLE_FALSE;
- staticMetadata_->addEntry(ANDROID_FLASH_INFO_AVAILABLE,
- &flashAvailable, 1);
-
- /* Lens static metadata. */
- std::vector<float> lensApertures = {
- 2.53 / 100,
- };
- staticMetadata_->addEntry(ANDROID_LENS_INFO_AVAILABLE_APERTURES,
- lensApertures.data(),
- lensApertures.size());
-
- uint8_t lensFacing = ANDROID_LENS_FACING_FRONT;
- if (properties.contains(properties::Location)) {
- int32_t location = properties.get(properties::Location);
- switch (location) {
- case properties::CameraLocationFront:
- lensFacing = ANDROID_LENS_FACING_FRONT;
- break;
- case properties::CameraLocationBack:
- lensFacing = ANDROID_LENS_FACING_BACK;
- break;
- case properties::CameraLocationExternal:
- lensFacing = ANDROID_LENS_FACING_EXTERNAL;
- break;
- }
+ {
+ MutexLocker descriptorsLock(descriptorsMutex_);
+ descriptors_ = {};
}
- staticMetadata_->addEntry(ANDROID_LENS_FACING, &lensFacing, 1);
-
- std::vector<float> lensFocalLenghts = {
- 1,
- };
- staticMetadata_->addEntry(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
- lensFocalLenghts.data(),
- lensFocalLenghts.size());
-
- std::vector<uint8_t> opticalStabilizations = {
- ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
- opticalStabilizations.data(),
- opticalStabilizations.size());
-
- float hypeFocalDistance = 0;
- staticMetadata_->addEntry(ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE,
- &hypeFocalDistance, 1);
-
- float minFocusDistance = 0;
- staticMetadata_->addEntry(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
- &minFocusDistance, 1);
-
- /* Noise reduction modes. */
- uint8_t noiseReductionModes = ANDROID_NOISE_REDUCTION_MODE_OFF;
- staticMetadata_->addEntry(ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES,
- &noiseReductionModes, 1);
-
- /* Scaler static metadata. */
- float maxDigitalZoom = 1;
- staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
- &maxDigitalZoom, 1);
-
- std::vector<uint32_t> availableStreamFormats = {
- ANDROID_SCALER_AVAILABLE_FORMATS_BLOB,
- ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888,
- ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED,
- };
- staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_FORMATS,
- availableStreamFormats.data(),
- availableStreamFormats.size());
-
- std::vector<uint32_t> availableStreamConfigurations = {
- ANDROID_SCALER_AVAILABLE_FORMATS_BLOB, 2560, 1920,
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
- ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888, 2560, 1920,
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
- ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED, 2560, 1920,
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
- };
- staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
- availableStreamConfigurations.data(),
- availableStreamConfigurations.size());
-
- std::vector<int64_t> availableStallDurations = {
- ANDROID_SCALER_AVAILABLE_FORMATS_BLOB, 2560, 1920, 33333333,
- };
- staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
- availableStallDurations.data(),
- availableStallDurations.size());
-
- std::vector<int64_t> minFrameDurations = {
- ANDROID_SCALER_AVAILABLE_FORMATS_BLOB, 2560, 1920, 33333333,
- ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED, 2560, 1920, 33333333,
- ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888, 2560, 1920, 33333333,
- };
- staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
- minFrameDurations.data(),
- minFrameDurations.size());
- uint8_t croppingType = ANDROID_SCALER_CROPPING_TYPE_CENTER_ONLY;
- staticMetadata_->addEntry(ANDROID_SCALER_CROPPING_TYPE, &croppingType, 1);
+ streams_.clear();
- /* Info static metadata. */
- uint8_t supportedHWLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED;
- staticMetadata_->addEntry(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL,
- &supportedHWLevel, 1);
-
- /* Request static metadata. */
- int32_t partialResultCount = 1;
- staticMetadata_->addEntry(ANDROID_REQUEST_PARTIAL_RESULT_COUNT,
- &partialResultCount, 1);
+ state_ = State::Stopped;
+}
- uint8_t maxPipelineDepth = 2;
- staticMetadata_->addEntry(ANDROID_REQUEST_PIPELINE_MAX_DEPTH,
- &maxPipelineDepth, 1);
+unsigned int CameraDevice::maxJpegBufferSize() const
+{
+ return capabilities_.maxJpegBufferSize();
+}
- std::vector<uint8_t> availableCapabilities = {
- ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE,
- };
- staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_CAPABILITIES,
- availableCapabilities.data(),
- availableCapabilities.size());
-
- std::vector<int32_t> availableCharacteristicsKeys = {
- ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
- ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
- ANDROID_CONTROL_AE_AVAILABLE_MODES,
- ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
- ANDROID_CONTROL_AE_COMPENSATION_RANGE,
- ANDROID_CONTROL_AE_COMPENSATION_STEP,
- ANDROID_CONTROL_AF_AVAILABLE_MODES,
- ANDROID_CONTROL_AVAILABLE_EFFECTS,
- ANDROID_CONTROL_AVAILABLE_SCENE_MODES,
- ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
- ANDROID_CONTROL_AWB_AVAILABLE_MODES,
- ANDROID_CONTROL_MAX_REGIONS,
- ANDROID_CONTROL_SCENE_MODE_OVERRIDES,
- ANDROID_CONTROL_AE_LOCK_AVAILABLE,
- ANDROID_CONTROL_AWB_LOCK_AVAILABLE,
- ANDROID_CONTROL_AVAILABLE_MODES,
- ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
- ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
- ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
- ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
- ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
- ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE,
- ANDROID_SENSOR_ORIENTATION,
- ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES,
- ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
- ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE,
- ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
- ANDROID_STATISTICS_INFO_MAX_FACE_COUNT,
- ANDROID_SYNC_MAX_LATENCY,
- ANDROID_FLASH_INFO_AVAILABLE,
- ANDROID_LENS_INFO_AVAILABLE_APERTURES,
- ANDROID_LENS_FACING,
- ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
- ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
- ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE,
- ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
- ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES,
- ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
- ANDROID_SCALER_AVAILABLE_FORMATS,
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
- ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
- ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
- ANDROID_SCALER_CROPPING_TYPE,
- ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL,
- ANDROID_REQUEST_PARTIAL_RESULT_COUNT,
- ANDROID_REQUEST_PIPELINE_MAX_DEPTH,
- ANDROID_REQUEST_AVAILABLE_CAPABILITIES,
- };
- staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS,
- availableCharacteristicsKeys.data(),
- availableCharacteristicsKeys.size());
-
- std::vector<int32_t> availableRequestKeys = {
- ANDROID_CONTROL_AE_MODE,
- ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
- ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER,
- ANDROID_CONTROL_AE_LOCK,
- ANDROID_CONTROL_AF_TRIGGER,
- ANDROID_CONTROL_AWB_MODE,
- ANDROID_CONTROL_AWB_LOCK,
- ANDROID_FLASH_MODE,
- ANDROID_STATISTICS_FACE_DETECT_MODE,
- ANDROID_NOISE_REDUCTION_MODE,
- ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
- ANDROID_CONTROL_CAPTURE_INTENT,
- };
- staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS,
- availableRequestKeys.data(),
- availableRequestKeys.size());
-
- std::vector<int32_t> availableResultKeys = {
- ANDROID_CONTROL_AE_STATE,
- ANDROID_CONTROL_AE_LOCK,
- ANDROID_CONTROL_AF_STATE,
- ANDROID_CONTROL_AWB_STATE,
- ANDROID_CONTROL_AWB_LOCK,
- ANDROID_LENS_STATE,
- ANDROID_SCALER_CROP_REGION,
- ANDROID_SENSOR_TIMESTAMP,
- ANDROID_SENSOR_ROLLING_SHUTTER_SKEW,
- ANDROID_SENSOR_EXPOSURE_TIME,
- ANDROID_STATISTICS_LENS_SHADING_MAP_MODE,
- ANDROID_STATISTICS_SCENE_FLICKER,
- };
- staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_RESULT_KEYS,
- availableResultKeys.data(),
- availableResultKeys.size());
-
- if (!staticMetadata_->isValid()) {
- LOG(HAL, Error) << "Failed to construct static metadata";
- delete staticMetadata_;
- staticMetadata_ = nullptr;
- return nullptr;
- }
+void CameraDevice::setCallbacks(const camera3_callback_ops_t *callbacks)
+{
+ callbacks_ = callbacks;
+}
- return staticMetadata_->get();
+const camera_metadata_t *CameraDevice::getStaticMetadata()
+{
+ return capabilities_.staticMetadata()->getMetadata();
}
/*
@@ -548,100 +469,53 @@ const camera_metadata_t *CameraDevice::constructDefaultRequestSettings(int type)
{
auto it = requestTemplates_.find(type);
if (it != requestTemplates_.end())
- return it->second->get();
+ return it->second->getMetadata();
/* Use the capture intent matching the requested template type. */
+ std::unique_ptr<CameraMetadata> requestTemplate;
uint8_t captureIntent;
switch (type) {
case CAMERA3_TEMPLATE_PREVIEW:
captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+ requestTemplate = capabilities_.requestTemplatePreview();
break;
case CAMERA3_TEMPLATE_STILL_CAPTURE:
+ /*
+ * Use the preview template for still capture, they only differ
+ * for the torch mode we currently do not support.
+ */
captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE;
+ requestTemplate = capabilities_.requestTemplateStill();
break;
case CAMERA3_TEMPLATE_VIDEO_RECORD:
captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD;
+ requestTemplate = capabilities_.requestTemplateVideo();
break;
case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT;
- break;
- case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
- captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG;
+ requestTemplate = capabilities_.requestTemplateVideo();
break;
case CAMERA3_TEMPLATE_MANUAL:
captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_MANUAL;
+ requestTemplate = capabilities_.requestTemplateManual();
break;
+ /* \todo Implement templates generation for the remaining use cases. */
+ case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
default:
- LOG(HAL, Error) << "Invalid template request type: " << type;
- return nullptr;
- }
-
- /*
- * \todo Keep this in sync with the actual number of entries.
- * Currently: 12 entries, 15 bytes
- */
- CameraMetadata *requestTemplate = new CameraMetadata(15, 20);
- if (!requestTemplate->isValid()) {
- LOG(HAL, Error) << "Failed to allocate template metadata";
- delete requestTemplate;
+ LOG(HAL, Error) << "Unsupported template request type: " << type;
return nullptr;
}
- uint8_t aeMode = ANDROID_CONTROL_AE_MODE_ON;
- requestTemplate->addEntry(ANDROID_CONTROL_AE_MODE,
- &aeMode, 1);
-
- int32_t aeExposureCompensation = 0;
- requestTemplate->addEntry(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
- &aeExposureCompensation, 1);
-
- uint8_t aePrecaptureTrigger = ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE;
- requestTemplate->addEntry(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER,
- &aePrecaptureTrigger, 1);
-
- uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_OFF;
- requestTemplate->addEntry(ANDROID_CONTROL_AE_LOCK,
- &aeLock, 1);
-
- uint8_t afTrigger = ANDROID_CONTROL_AF_TRIGGER_IDLE;
- requestTemplate->addEntry(ANDROID_CONTROL_AF_TRIGGER,
- &afTrigger, 1);
-
- uint8_t awbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
- requestTemplate->addEntry(ANDROID_CONTROL_AWB_MODE,
- &awbMode, 1);
-
- uint8_t awbLock = ANDROID_CONTROL_AWB_LOCK_OFF;
- requestTemplate->addEntry(ANDROID_CONTROL_AWB_LOCK,
- &awbLock, 1);
-
- uint8_t flashMode = ANDROID_FLASH_MODE_OFF;
- requestTemplate->addEntry(ANDROID_FLASH_MODE,
- &flashMode, 1);
-
- uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
- requestTemplate->addEntry(ANDROID_STATISTICS_FACE_DETECT_MODE,
- &faceDetectMode, 1);
-
- uint8_t noiseReduction = ANDROID_NOISE_REDUCTION_MODE_OFF;
- requestTemplate->addEntry(ANDROID_NOISE_REDUCTION_MODE,
- &noiseReduction, 1);
-
- uint8_t aberrationMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF;
- requestTemplate->addEntry(ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
- &aberrationMode, 1);
-
- requestTemplate->addEntry(ANDROID_CONTROL_CAPTURE_INTENT,
- &captureIntent, 1);
-
- if (!requestTemplate->isValid()) {
+ if (!requestTemplate || !requestTemplate->isValid()) {
LOG(HAL, Error) << "Failed to construct request template";
- delete requestTemplate;
return nullptr;
}
- requestTemplates_[type] = requestTemplate;
- return requestTemplate->get();
+ requestTemplate->updateEntry(ANDROID_CONTROL_CAPTURE_INTENT,
+ captureIntent);
+
+ requestTemplates_[type] = std::move(requestTemplate);
+ return requestTemplates_[type]->getMetadata();
}
/*
@@ -650,217 +524,873 @@ const camera_metadata_t *CameraDevice::constructDefaultRequestSettings(int type)
*/
int CameraDevice::configureStreams(camera3_stream_configuration_t *stream_list)
{
- for (unsigned int i = 0; i < stream_list->num_streams; ++i) {
- camera3_stream_t *stream = stream_list->streams[i];
+ /* Before any configuration attempt, stop the camera. */
+ stop();
- LOG(HAL, Info) << "Stream #" << i
- << ", direction: " << stream->stream_type
- << ", width: " << stream->width
- << ", height: " << stream->height
- << ", format: " << utils::hex(stream->format);
+ if (stream_list->num_streams == 0) {
+ LOG(HAL, Error) << "No streams in configuration";
+ return -EINVAL;
}
- /* Hardcode viewfinder role, collecting sizes from the stream config. */
- if (stream_list->num_streams != 1) {
- LOG(HAL, Error) << "Only one stream supported";
+#if defined(OS_CHROMEOS)
+ if (!validateCropRotate(*stream_list))
return -EINVAL;
- }
+#endif
- StreamRoles roles = { StreamRole::Viewfinder };
- config_ = camera_->generateConfiguration(roles);
- if (!config_ || config_->empty()) {
+ /*
+ * Generate an empty configuration, and construct a StreamConfiguration
+ * for each camera3_stream to add to it.
+ */
+ std::unique_ptr<CameraConfiguration> config = camera_->generateConfiguration();
+ if (!config) {
LOG(HAL, Error) << "Failed to generate camera configuration";
return -EINVAL;
}
- /* Only one stream is supported. */
- camera3_stream_t *camera3Stream = stream_list->streams[0];
- StreamConfiguration *streamConfiguration = &config_->at(0);
- streamConfiguration->size.width = camera3Stream->width;
- streamConfiguration->size.height = camera3Stream->height;
-
/*
- * \todo We'll need to translate from Android defined pixel format codes
- * to the libcamera image format codes. For now, do not change the
- * format returned from Camera::generateConfiguration().
+ * Clear and remove any existing configuration from previous calls, and
+ * ensure the required entries are available without further
+ * reallocation.
*/
+ streams_.clear();
+ streams_.reserve(stream_list->num_streams);
+
+ std::vector<Camera3StreamConfig> streamConfigs;
+ streamConfigs.reserve(stream_list->num_streams);
+
+ /* First handle all non-MJPEG streams. */
+ camera3_stream_t *jpegStream = nullptr;
+ for (unsigned int i = 0; i < stream_list->num_streams; ++i) {
+ camera3_stream_t *stream = stream_list->streams[i];
+ Size size(stream->width, stream->height);
+
+ PixelFormat format = capabilities_.toPixelFormat(stream->format);
+
+ LOG(HAL, Info) << "Stream #" << i
+ << ", direction: " << directionToString(stream->stream_type)
+ << ", width: " << stream->width
+ << ", height: " << stream->height
+ << ", format: " << utils::hex(stream->format)
+ << ", rotation: " << rotationToString(stream->rotation)
+#if defined(OS_CHROMEOS)
+ << ", crop_rotate_scale_degrees: "
+ << rotationToString(stream->crop_rotate_scale_degrees)
+#endif
+ << " (" << format << ")";
+
+ if (!format.isValid())
+ return -EINVAL;
+
+ /* \todo Support rotation. */
+ if (stream->rotation != CAMERA3_STREAM_ROTATION_0) {
+ LOG(HAL, Error) << "Rotation is not supported";
+ return -EINVAL;
+ }
+#if defined(OS_CHROMEOS)
+ if (stream->crop_rotate_scale_degrees != CAMERA3_STREAM_ROTATION_0) {
+ LOG(HAL, Error) << "Rotation is not supported";
+ return -EINVAL;
+ }
+#endif
+
+ /* Defer handling of MJPEG streams until all others are known. */
+ if (stream->format == HAL_PIXEL_FORMAT_BLOB) {
+ if (jpegStream) {
+ LOG(HAL, Error)
+ << "Multiple JPEG streams are not supported";
+ return -EINVAL;
+ }
+
+ jpegStream = stream;
+ continue;
+ }
+
+ /*
+ * While gralloc usage flags are supposed to report usage
+ * patterns to select a suitable buffer allocation strategy, in
+ * practice they're also used to make other decisions, such as
+ * selecting the actual format for the IMPLEMENTATION_DEFINED
+ * HAL pixel format. To avoid issues, we thus have to set the
+ * GRALLOC_USAGE_HW_CAMERA_WRITE flag unconditionally, even for
+ * streams that will be produced in software.
+ */
+ stream->usage |= GRALLOC_USAGE_HW_CAMERA_WRITE;
+
+ /*
+ * If a CameraStream with the same size and format as the
+ * current stream has already been requested, associate the two.
+ */
+ auto iter = std::find_if(
+ streamConfigs.begin(), streamConfigs.end(),
+ [&size, &format](const Camera3StreamConfig &streamConfig) {
+ return streamConfig.config.size == size &&
+ streamConfig.config.pixelFormat == format;
+ });
+ if (iter != streamConfigs.end()) {
+ /* Add usage to copy the buffer in streams[0] to stream. */
+ iter->streams[0].stream->usage |= GRALLOC_USAGE_SW_READ_OFTEN;
+ stream->usage |= GRALLOC_USAGE_SW_WRITE_OFTEN;
+ iter->streams.push_back({ stream, CameraStream::Type::Mapped });
+ continue;
+ }
+
+ Camera3StreamConfig streamConfig;
+ streamConfig.streams = { { stream, CameraStream::Type::Direct } };
+ streamConfig.config.size = size;
+ streamConfig.config.pixelFormat = format;
+ streamConfigs.push_back(std::move(streamConfig));
+ }
+
+ /* Now handle the MJPEG streams, adding a new stream if required. */
+ if (jpegStream) {
+ CameraStream::Type type;
+ int index = -1;
+
+ /* Search for a compatible stream in the non-JPEG ones. */
+ for (size_t i = 0; i < streamConfigs.size(); ++i) {
+ Camera3StreamConfig &streamConfig = streamConfigs[i];
+ const auto &cfg = streamConfig.config;
+
+ /*
+ * \todo The PixelFormat must also be compatible with
+ * the encoder.
+ */
+ if (cfg.size.width != jpegStream->width ||
+ cfg.size.height != jpegStream->height)
+ continue;
+
+ LOG(HAL, Info)
+ << "Android JPEG stream mapped to libcamera stream " << i;
+
+ type = CameraStream::Type::Mapped;
+ index = i;
+
+ /*
+ * The source stream will be read by software to
+ * produce the JPEG stream.
+ */
+ camera3_stream_t *stream = streamConfig.streams[0].stream;
+ stream->usage |= GRALLOC_USAGE_SW_READ_OFTEN;
+ break;
+ }
+
+ /*
+ * Without a compatible match for JPEG encoding we must
+ * introduce a new stream to satisfy the request requirements.
+ */
+ if (index < 0) {
+ /*
+ * \todo The pixelFormat should be a 'best-fit' choice
+ * and may require a validation cycle. This is not yet
+ * handled, and should be considered as part of any
+ * stream configuration reworks.
+ */
+ Camera3StreamConfig streamConfig;
+ streamConfig.config.size.width = jpegStream->width;
+ streamConfig.config.size.height = jpegStream->height;
+ streamConfig.config.pixelFormat = formats::NV12;
+ streamConfigs.push_back(std::move(streamConfig));
+
+ LOG(HAL, Info) << "Adding " << streamConfig.config.toString()
+ << " for MJPEG support";
+
+ type = CameraStream::Type::Internal;
+ index = streamConfigs.size() - 1;
+ }
- switch (config_->validate()) {
+ /* The JPEG stream will be produced by software. */
+ jpegStream->usage |= GRALLOC_USAGE_SW_WRITE_OFTEN;
+
+ streamConfigs[index].streams.push_back({ jpegStream, type });
+ }
+
+ sortCamera3StreamConfigs(streamConfigs, jpegStream);
+ for (const auto &streamConfig : streamConfigs) {
+ config->addConfiguration(streamConfig.config);
+
+ CameraStream *sourceStream = nullptr;
+ for (auto &stream : streamConfig.streams) {
+ streams_.emplace_back(this, config.get(), stream.type,
+ stream.stream, sourceStream,
+ config->size() - 1);
+ stream.stream->priv = static_cast<void *>(&streams_.back());
+
+ /*
+ * The streamConfig.streams vector contains as its first
+ * element a Direct (or Internal) stream, and then an
+ * optional set of Mapped streams derived from the
+ * Direct stream. Cache the Direct stream pointer, to
+ * be used when constructing the subsequent mapped
+ * streams.
+ */
+ if (stream.type == CameraStream::Type::Direct)
+ sourceStream = &streams_.back();
+ }
+ }
+
+ switch (config->validate()) {
case CameraConfiguration::Valid:
break;
case CameraConfiguration::Adjusted:
LOG(HAL, Info) << "Camera configuration adjusted";
- config_.reset();
+
+ for (const StreamConfiguration &cfg : *config)
+ LOG(HAL, Info) << " - " << cfg.toString();
+
return -EINVAL;
case CameraConfiguration::Invalid:
LOG(HAL, Info) << "Camera configuration invalid";
- config_.reset();
return -EINVAL;
}
- camera3Stream->max_buffers = streamConfiguration->bufferCount;
-
/*
* Once the CameraConfiguration has been adjusted/validated
* it can be applied to the camera.
*/
- int ret = camera_->configure(config_.get());
+ int ret = camera_->configure(config.get());
if (ret) {
LOG(HAL, Error) << "Failed to configure camera '"
- << camera_->name() << "'";
+ << camera_->id() << "'";
return ret;
}
+ /*
+ * Configure the HAL CameraStream instances using the associated
+ * StreamConfiguration and set the number of required buffers in
+ * the Android camera3_stream_t.
+ */
+ for (CameraStream &cameraStream : streams_) {
+ ret = cameraStream.configure();
+ if (ret) {
+ LOG(HAL, Error) << "Failed to configure camera stream";
+ return ret;
+ }
+ }
+
+ config_ = std::move(config);
return 0;
}
-int CameraDevice::processCaptureRequest(camera3_capture_request_t *camera3Request)
+std::unique_ptr<HALFrameBuffer>
+CameraDevice::createFrameBuffer(const buffer_handle_t camera3buffer,
+ PixelFormat pixelFormat, const Size &size)
{
- StreamConfiguration *streamConfiguration = &config_->at(0);
- Stream *stream = streamConfiguration->stream();
+ CameraBuffer buf(camera3buffer, pixelFormat, size, PROT_READ);
+ if (!buf.isValid()) {
+ LOG(HAL, Fatal) << "Failed to create CameraBuffer";
+ return nullptr;
+ }
- if (camera3Request->num_output_buffers != 1) {
- LOG(HAL, Error) << "Invalid number of output buffers: "
- << camera3Request->num_output_buffers;
- return -EINVAL;
+ std::vector<FrameBuffer::Plane> planes(buf.numPlanes());
+ for (size_t i = 0; i < buf.numPlanes(); ++i) {
+ SharedFD fd{ camera3buffer->data[i] };
+ if (!fd.isValid()) {
+ LOG(HAL, Fatal) << "No valid fd";
+ return nullptr;
+ }
+
+ planes[i].fd = fd;
+ planes[i].offset = buf.offset(i);
+ planes[i].length = buf.size(i);
}
- /* Start the camera if that's the first request we handle. */
- if (!running_) {
- int ret = camera_->start();
- if (ret) {
- LOG(HAL, Error) << "Failed to start camera";
- return ret;
+ return std::make_unique<HALFrameBuffer>(planes, camera3buffer);
+}
+
+int CameraDevice::processControls(Camera3RequestDescriptor *descriptor)
+{
+ const CameraMetadata &settings = descriptor->settings_;
+ if (!settings.isValid())
+ return 0;
+
+ /* Translate the Android request settings to libcamera controls. */
+ ControlList &controls = descriptor->request_->controls();
+ camera_metadata_ro_entry_t entry;
+ if (settings.getEntry(ANDROID_SCALER_CROP_REGION, &entry)) {
+ const int32_t *data = entry.data.i32;
+ Rectangle cropRegion{ data[0], data[1],
+ static_cast<unsigned int>(data[2]),
+ static_cast<unsigned int>(data[3]) };
+ controls.set(controls::ScalerCrop, cropRegion);
+ }
+
+ if (settings.getEntry(ANDROID_STATISTICS_FACE_DETECT_MODE, &entry)) {
+ const uint8_t *data = entry.data.u8;
+ controls.set(controls::draft::FaceDetectMode, data[0]);
+ }
+
+ if (settings.getEntry(ANDROID_SENSOR_TEST_PATTERN_MODE, &entry)) {
+ const int32_t data = *entry.data.i32;
+ int32_t testPatternMode = controls::draft::TestPatternModeOff;
+ switch (data) {
+ case ANDROID_SENSOR_TEST_PATTERN_MODE_OFF:
+ testPatternMode = controls::draft::TestPatternModeOff;
+ break;
+
+ case ANDROID_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR:
+ testPatternMode = controls::draft::TestPatternModeSolidColor;
+ break;
+
+ case ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS:
+ testPatternMode = controls::draft::TestPatternModeColorBars;
+ break;
+
+ case ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS_FADE_TO_GRAY:
+ testPatternMode = controls::draft::TestPatternModeColorBarsFadeToGray;
+ break;
+
+ case ANDROID_SENSOR_TEST_PATTERN_MODE_PN9:
+ testPatternMode = controls::draft::TestPatternModePn9;
+ break;
+
+ case ANDROID_SENSOR_TEST_PATTERN_MODE_CUSTOM1:
+ testPatternMode = controls::draft::TestPatternModeCustom1;
+ break;
+
+ default:
+ LOG(HAL, Error)
+ << "Unknown test pattern mode: " << data;
+
+ return -EINVAL;
}
- running_ = true;
+ controls.set(controls::draft::TestPatternMode, testPatternMode);
}
- /*
- * Queue a request for the Camera with the provided dmabuf file
- * descriptors.
- */
- const camera3_stream_buffer_t *camera3Buffers =
- camera3Request->output_buffers;
+ return 0;
+}
+
+void CameraDevice::abortRequest(Camera3RequestDescriptor *descriptor) const
+{
+ notifyError(descriptor->frameNumber_, nullptr, CAMERA3_MSG_ERROR_REQUEST);
+
+ for (auto &buffer : descriptor->buffers_)
+ buffer.status = Camera3RequestDescriptor::Status::Error;
+
+ descriptor->status_ = Camera3RequestDescriptor::Status::Error;
+}
+
+bool CameraDevice::isValidRequest(camera3_capture_request_t *camera3Request) const
+{
+ if (!camera3Request) {
+ LOG(HAL, Error) << "No capture request provided";
+ return false;
+ }
+
+ if (!camera3Request->num_output_buffers ||
+ !camera3Request->output_buffers) {
+ LOG(HAL, Error) << "No output buffers provided";
+ return false;
+ }
+
+ /* configureStreams() has not been called or has failed. */
+ if (streams_.empty() || !config_) {
+ LOG(HAL, Error) << "No stream is configured";
+ return false;
+ }
+
+ for (uint32_t i = 0; i < camera3Request->num_output_buffers; i++) {
+ const camera3_stream_buffer_t &outputBuffer =
+ camera3Request->output_buffers[i];
+ if (!outputBuffer.buffer || !(*outputBuffer.buffer)) {
+ LOG(HAL, Error) << "Invalid native handle";
+ return false;
+ }
+
+ const native_handle_t *handle = *outputBuffer.buffer;
+ constexpr int kNativeHandleMaxFds = 1024;
+ if (handle->numFds < 0 || handle->numFds > kNativeHandleMaxFds) {
+ LOG(HAL, Error)
+ << "Invalid number of fds (" << handle->numFds
+ << ") in buffer " << i;
+ return false;
+ }
+
+ constexpr int kNativeHandleMaxInts = 1024;
+ if (handle->numInts < 0 || handle->numInts > kNativeHandleMaxInts) {
+ LOG(HAL, Error)
+ << "Invalid number of ints (" << handle->numInts
+ << ") in buffer " << i;
+ return false;
+ }
+
+ const camera3_stream *camera3Stream = outputBuffer.stream;
+ if (!camera3Stream)
+ return false;
+
+ const CameraStream *cameraStream =
+ static_cast<CameraStream *>(camera3Stream->priv);
+
+ auto found = std::find_if(streams_.begin(), streams_.end(),
+ [cameraStream](const CameraStream &stream) {
+ return &stream == cameraStream;
+ });
+ if (found == streams_.end()) {
+ LOG(HAL, Error)
+ << "No corresponding configured stream found";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+int CameraDevice::processCaptureRequest(camera3_capture_request_t *camera3Request)
+{
+ if (!isValidRequest(camera3Request))
+ return -EINVAL;
/*
* Save the request descriptors for use at completion time.
* The descriptor and the associated memory reserved here are freed
* at request complete time.
*/
- Camera3RequestDescriptor *descriptor =
- new Camera3RequestDescriptor(camera3Request->frame_number,
- camera3Request->num_output_buffers);
- for (unsigned int i = 0; i < descriptor->numBuffers; ++i) {
+ auto descriptor = std::make_unique<Camera3RequestDescriptor>(camera_.get(),
+ camera3Request);
+
+ /*
+ * \todo The Android request model is incremental, settings passed in
+ * previous requests are to be effective until overridden explicitly in
+ * a new request. Do we need to cache settings incrementally here, or is
+ * it handled by the Android camera service ?
+ */
+ if (camera3Request->settings)
+ lastSettings_ = camera3Request->settings;
+
+ descriptor->settings_ = lastSettings_;
+
+ LOG(HAL, Debug) << "Queueing request " << descriptor->request_->cookie()
+ << " with " << descriptor->buffers_.size() << " streams";
+
+ /*
+ * Process all the Direct and Internal streams first, they map directly
+ * to a libcamera stream. Streams of type Mapped will be handled later.
+ *
+ * Collect the CameraStream associated to each requested capture stream.
+ * Since requestedStreams is an std:set<>, no duplications can happen.
+ */
+ std::set<CameraStream *> requestedStreams;
+ for (const auto &[i, buffer] : utils::enumerate(descriptor->buffers_)) {
+ CameraStream *cameraStream = buffer.stream;
+ camera3_stream_t *camera3Stream = cameraStream->camera3Stream();
+
+ std::stringstream ss;
+ ss << i << " - (" << camera3Stream->width << "x"
+ << camera3Stream->height << ")"
+ << "[" << utils::hex(camera3Stream->format) << "] -> "
+ << "(" << cameraStream->configuration().size << ")["
+ << cameraStream->configuration().pixelFormat << "]";
+
/*
- * Keep track of which stream the request belongs to and store
- * the native buffer handles.
- *
- * \todo Currently we only support one capture buffer. Copy
- * all of them to be ready once we'll support more.
+ * Inspect the camera stream type, create buffers opportunely
+ * and add them to the Request if required.
*/
- descriptor->buffers[i].stream = camera3Buffers[i].stream;
- descriptor->buffers[i].buffer = camera3Buffers[i].buffer;
+ FrameBuffer *frameBuffer = nullptr;
+ UniqueFD acquireFence;
+
+ MutexLocker lock(descriptor->streamsProcessMutex_);
+
+ switch (cameraStream->type()) {
+ case CameraStream::Type::Mapped:
+ /* Mapped streams will be handled in the next loop. */
+ continue;
+
+ case CameraStream::Type::Direct:
+ /*
+ * Create a libcamera buffer using the dmabuf
+ * descriptors of the camera3Buffer for each stream and
+ * associate it with the Camera3RequestDescriptor for
+ * lifetime management only.
+ */
+ buffer.frameBuffer =
+ createFrameBuffer(*buffer.camera3Buffer,
+ cameraStream->configuration().pixelFormat,
+ cameraStream->configuration().size);
+ frameBuffer = buffer.frameBuffer.get();
+ acquireFence = std::move(buffer.fence);
+ LOG(HAL, Debug) << ss.str() << " (direct)";
+ break;
+
+ case CameraStream::Type::Internal:
+ /*
+ * Get the frame buffer from the CameraStream internal
+ * buffer pool.
+ *
+ * The buffer has to be returned to the CameraStream
+ * once it has been processed.
+ */
+ frameBuffer = cameraStream->getBuffer();
+ buffer.internalBuffer = frameBuffer;
+ LOG(HAL, Debug) << ss.str() << " (internal)";
+
+ descriptor->pendingStreamsToProcess_.insert(
+ { cameraStream, &buffer });
+ break;
+ }
+
+ if (!frameBuffer) {
+ LOG(HAL, Error) << "Failed to create frame buffer";
+ return -ENOMEM;
+ }
+
+ auto fence = std::make_unique<Fence>(std::move(acquireFence));
+ descriptor->request_->addBuffer(cameraStream->stream(),
+ frameBuffer, std::move(fence));
+
+ requestedStreams.insert(cameraStream);
}
/*
- * Create a libcamera buffer using the dmabuf descriptors of the first
- * and (currently) only supported request buffer.
+ * Now handle the Mapped streams. If no buffer has been added for them
+ * because their corresponding direct source stream is not part of this
+ * particular request, add one here.
*/
- const buffer_handle_t camera3Handle = *camera3Buffers[0].buffer;
+ for (const auto &[i, buffer] : utils::enumerate(descriptor->buffers_)) {
+ CameraStream *cameraStream = buffer.stream;
+ camera3_stream_t *camera3Stream = cameraStream->camera3Stream();
+
+ if (cameraStream->type() != CameraStream::Type::Mapped)
+ continue;
+
+ LOG(HAL, Debug) << i << " - (" << camera3Stream->width << "x"
+ << camera3Stream->height << ")"
+ << "[" << utils::hex(camera3Stream->format) << "] -> "
+ << "(" << cameraStream->configuration().size << ")["
+ << cameraStream->configuration().pixelFormat << "]"
+ << " (mapped)";
+
+ MutexLocker lock(descriptor->streamsProcessMutex_);
+ descriptor->pendingStreamsToProcess_.insert({ cameraStream, &buffer });
+
+ /*
+ * Make sure the CameraStream this stream is mapped on has been
+ * added to the request.
+ */
+ CameraStream *sourceStream = cameraStream->sourceStream();
+ ASSERT(sourceStream);
+ if (requestedStreams.find(sourceStream) != requestedStreams.end())
+ continue;
- std::vector<FrameBuffer::Plane> planes;
- for (int i = 0; i < 3; i++) {
- FrameBuffer::Plane plane;
- plane.fd = FileDescriptor(camera3Handle->data[i]);
/*
- * Setting length to zero here is OK as the length is only used
- * to map the memory of the plane. Libcamera do not need to poke
- * at the memory content queued by the HAL.
+ * If that's not the case, we need to add a buffer to the request
+ * for this stream.
*/
- plane.length = 0;
- planes.push_back(std::move(plane));
+ FrameBuffer *frameBuffer = cameraStream->getBuffer();
+ buffer.internalBuffer = frameBuffer;
+
+ descriptor->request_->addBuffer(sourceStream->stream(),
+ frameBuffer, nullptr);
+
+ requestedStreams.insert(sourceStream);
+ }
+
+ /*
+ * Translate controls from Android to libcamera and queue the request
+ * to the camera.
+ */
+ int ret = processControls(descriptor.get());
+ if (ret)
+ return ret;
+
+ /*
+ * If flush is in progress set the request status to error and place it
+ * on the queue to be later completed. If the camera has been stopped we
+ * have to re-start it to be able to process the request.
+ */
+ MutexLocker stateLock(stateMutex_);
+
+ if (state_ == State::Flushing) {
+ Camera3RequestDescriptor *rawDescriptor = descriptor.get();
+ {
+ MutexLocker descriptorsLock(descriptorsMutex_);
+ descriptors_.push(std::move(descriptor));
+ }
+ abortRequest(rawDescriptor);
+ completeDescriptor(rawDescriptor);
+
+ return 0;
}
- FrameBuffer *buffer = new FrameBuffer(std::move(planes));
- if (!buffer) {
- LOG(HAL, Error) << "Failed to create buffer";
- delete descriptor;
- return -ENOMEM;
+ if (state_ == State::Stopped) {
+ lastSettings_ = {};
+
+ ret = camera_->start();
+ if (ret) {
+ LOG(HAL, Error) << "Failed to start camera";
+ return ret;
+ }
+
+ state_ = State::Running;
}
- Request *request =
- camera_->createRequest(reinterpret_cast<uint64_t>(descriptor));
- request->addBuffer(stream, buffer);
+ Request *request = descriptor->request_.get();
- int ret = camera_->queueRequest(request);
- if (ret) {
- LOG(HAL, Error) << "Failed to queue request";
- delete request;
- delete descriptor;
- return ret;
+ {
+ MutexLocker descriptorsLock(descriptorsMutex_);
+ descriptors_.push(std::move(descriptor));
}
+ camera_->queueRequest(request);
+
return 0;
}
void CameraDevice::requestComplete(Request *request)
{
- const std::map<Stream *, FrameBuffer *> &buffers = request->buffers();
- FrameBuffer *buffer = buffers.begin()->second;
- camera3_buffer_status status = CAMERA3_BUFFER_STATUS_OK;
- std::unique_ptr<CameraMetadata> resultMetadata;
+ Camera3RequestDescriptor *descriptor =
+ reinterpret_cast<Camera3RequestDescriptor *>(request->cookie());
+
+ /*
+ * Prepare the capture result for the Android camera stack.
+ *
+ * The buffer status is set to Success and later changed to Error if
+ * post-processing/compression fails.
+ */
+ for (auto &buffer : descriptor->buffers_) {
+ CameraStream *stream = buffer.stream;
+
+ /*
+ * Streams of type Direct have been queued to the
+ * libcamera::Camera and their acquire fences have
+ * already been waited on by the library.
+ *
+ * Acquire fences of streams of type Internal and Mapped
+ * will be handled during post-processing.
+ */
+ if (stream->type() == CameraStream::Type::Direct) {
+ /* If handling of the fence has failed restore buffer.fence. */
+ std::unique_ptr<Fence> fence = buffer.frameBuffer->releaseFence();
+ if (fence)
+ buffer.fence = fence->release();
+ }
+ buffer.status = Camera3RequestDescriptor::Status::Success;
+ }
+ /*
+ * If the Request has failed, abort the request by notifying the error
+ * and complete the request with all buffers in error state.
+ */
if (request->status() != Request::RequestComplete) {
- LOG(HAL, Error) << "Request not succesfully completed: "
+ LOG(HAL, Error) << "Request " << request->cookie()
+ << " not successfully completed: "
<< request->status();
- status = CAMERA3_BUFFER_STATUS_ERROR;
+
+ abortRequest(descriptor);
+ completeDescriptor(descriptor);
+
+ return;
}
- /* Prepare to call back the Android camera stack. */
- Camera3RequestDescriptor *descriptor =
- reinterpret_cast<Camera3RequestDescriptor *>(request->cookie());
+ /*
+ * Notify shutter as soon as we have verified we have a valid request.
+ *
+ * \todo The shutter event notification should be sent to the framework
+ * as soon as possible, earlier than request completion time.
+ */
+ uint64_t sensorTimestamp = static_cast<uint64_t>(request->metadata()
+ .get(controls::SensorTimestamp)
+ .value_or(0));
+ notifyShutter(descriptor->frameNumber_, sensorTimestamp);
+
+ LOG(HAL, Debug) << "Request " << request->cookie() << " completed with "
+ << descriptor->request_->buffers().size() << " streams";
+
+ /*
+ * Generate the metadata associated with the captured buffers.
+ *
+ * Notify if the metadata generation has failed, but continue processing
+ * buffers and return an empty metadata pack.
+ */
+ descriptor->resultMetadata_ = getResultMetadata(*descriptor);
+ if (!descriptor->resultMetadata_) {
+ notifyError(descriptor->frameNumber_, nullptr, CAMERA3_MSG_ERROR_RESULT);
- camera3_capture_result_t captureResult = {};
- captureResult.frame_number = descriptor->frameNumber;
- captureResult.num_output_buffers = descriptor->numBuffers;
- for (unsigned int i = 0; i < descriptor->numBuffers; ++i) {
/*
- * \todo Currently we only support one capture buffer. Prepare
- * all of them to be ready once we'll support more.
+ * The camera framework expects an empty metadata pack on error.
+ *
+ * \todo Check that the post-processor code handles this situation
+ * correctly.
*/
- descriptor->buffers[i].acquire_fence = -1;
- descriptor->buffers[i].release_fence = -1;
- descriptor->buffers[i].status = status;
+ descriptor->resultMetadata_ = std::make_unique<CameraMetadata>(0, 0);
}
- captureResult.output_buffers =
- const_cast<const camera3_stream_buffer_t *>(descriptor->buffers);
- if (status == CAMERA3_BUFFER_STATUS_OK) {
- notifyShutter(descriptor->frameNumber,
- buffer->metadata().timestamp);
+ /* Handle post-processing. */
+ MutexLocker locker(descriptor->streamsProcessMutex_);
- captureResult.partial_result = 1;
- resultMetadata = getResultMetadata(descriptor->frameNumber,
- buffer->metadata().timestamp);
- captureResult.result = resultMetadata->get();
+ /*
+ * Queue all the post-processing streams request at once. The completion
+ * slot streamProcessingComplete() can only execute when we are out
+ * this critical section. This helps to handle synchronous errors here
+ * itself.
+ */
+ auto iter = descriptor->pendingStreamsToProcess_.begin();
+ while (iter != descriptor->pendingStreamsToProcess_.end()) {
+ CameraStream *stream = iter->first;
+ Camera3RequestDescriptor::StreamBuffer *buffer = iter->second;
+
+ FrameBuffer *src = request->findBuffer(stream->stream());
+ if (!src) {
+ LOG(HAL, Error) << "Failed to find a source stream buffer";
+ setBufferStatus(*buffer, Camera3RequestDescriptor::Status::Error);
+ iter = descriptor->pendingStreamsToProcess_.erase(iter);
+ continue;
+ }
+
+ buffer->srcBuffer = src;
+
+ ++iter;
+ int ret = stream->process(buffer);
+ if (ret) {
+ setBufferStatus(*buffer, Camera3RequestDescriptor::Status::Error);
+ descriptor->pendingStreamsToProcess_.erase(stream);
+
+ /*
+ * If the framebuffer is internal to CameraStream return
+ * it back now that we're done processing it.
+ */
+ if (buffer->internalBuffer)
+ stream->putBuffer(buffer->internalBuffer);
+ }
}
- if (status == CAMERA3_BUFFER_STATUS_ERROR || !captureResult.result) {
- /* \todo Improve error handling. In case we notify an error
- * because the metadata generation fails, a shutter event has
- * already been notified for this frame number before the error
- * is here signalled. Make sure the error path plays well with
- * the camera stack state machine.
- */
- notifyError(descriptor->frameNumber,
- descriptor->buffers[0].stream);
+ if (descriptor->pendingStreamsToProcess_.empty()) {
+ locker.unlock();
+ completeDescriptor(descriptor);
}
+}
- callbacks_->process_capture_result(callbacks_, &captureResult);
+/**
+ * \brief Complete the Camera3RequestDescriptor
+ * \param[in] descriptor The Camera3RequestDescriptor that has completed
+ *
+ * The function marks the Camera3RequestDescriptor as 'complete'. It shall be
+ * called when all the streams in the Camera3RequestDescriptor have completed
+ * capture (or have been generated via post-processing) and the request is ready
+ * to be sent back to the framework.
+ *
+ * \context This function is \threadsafe.
+ */
+void CameraDevice::completeDescriptor(Camera3RequestDescriptor *descriptor)
+{
+ MutexLocker lock(descriptorsMutex_);
+ descriptor->complete_ = true;
- delete descriptor;
- delete buffer;
+ sendCaptureResults();
+}
+
+/**
+ * \brief Sequentially send capture results to the framework
+ *
+ * Iterate over the descriptors queue to send completed descriptors back to the
+ * framework, in the same order as they have been queued. For each complete
+ * descriptor, populate a locally-scoped camera3_capture_result_t from the
+ * descriptor, send the capture result back by calling the
+ * process_capture_result() callback, and remove the descriptor from the queue.
+ * Stop iterating if the descriptor at the front of the queue is not complete.
+ *
+ * This function should never be called directly in the codebase. Use
+ * completeDescriptor() instead.
+ */
+void CameraDevice::sendCaptureResults()
+{
+ while (!descriptors_.empty() && !descriptors_.front()->isPending()) {
+ auto descriptor = std::move(descriptors_.front());
+ descriptors_.pop();
+
+ camera3_capture_result_t captureResult = {};
+
+ captureResult.frame_number = descriptor->frameNumber_;
+
+ if (descriptor->resultMetadata_)
+ captureResult.result =
+ descriptor->resultMetadata_->getMetadata();
+
+ std::vector<camera3_stream_buffer_t> resultBuffers;
+ resultBuffers.reserve(descriptor->buffers_.size());
+
+ for (auto &buffer : descriptor->buffers_) {
+ camera3_buffer_status status = CAMERA3_BUFFER_STATUS_ERROR;
+
+ if (buffer.status == Camera3RequestDescriptor::Status::Success)
+ status = CAMERA3_BUFFER_STATUS_OK;
+
+ /*
+ * Pass the buffer fence back to the camera framework as
+ * a release fence. This instructs the framework to wait
+ * on the acquire fence in case we haven't done so
+ * ourselves for any reason.
+ */
+ resultBuffers.push_back({ buffer.stream->camera3Stream(),
+ buffer.camera3Buffer, status,
+ -1, buffer.fence.release() });
+ }
+
+ captureResult.num_output_buffers = resultBuffers.size();
+ captureResult.output_buffers = resultBuffers.data();
+
+ if (descriptor->status_ == Camera3RequestDescriptor::Status::Success)
+ captureResult.partial_result = 1;
+
+ callbacks_->process_capture_result(callbacks_, &captureResult);
+ }
+}
+
+void CameraDevice::setBufferStatus(Camera3RequestDescriptor::StreamBuffer &streamBuffer,
+ Camera3RequestDescriptor::Status status)
+{
+ streamBuffer.status = status;
+ if (status != Camera3RequestDescriptor::Status::Success) {
+ notifyError(streamBuffer.request->frameNumber_,
+ streamBuffer.stream->camera3Stream(),
+ CAMERA3_MSG_ERROR_BUFFER);
+
+ /* Also set error status on entire request descriptor. */
+ streamBuffer.request->status_ =
+ Camera3RequestDescriptor::Status::Error;
+ }
+}
+
+/**
+ * \brief Handle post-processing completion of a stream in a capture request
+ * \param[in] streamBuffer The StreamBuffer for which processing is complete
+ * \param[in] status Stream post-processing status
+ *
+ * This function is called from the post-processor's thread whenever a camera
+ * stream has finished post processing. The corresponding entry is dropped from
+ * the descriptor's pendingStreamsToProcess_ map.
+ *
+ * If the pendingStreamsToProcess_ map is then empty, all streams requiring to
+ * be generated from post-processing have been completed. Mark the descriptor as
+ * complete using completeDescriptor() in that case.
+ */
+void CameraDevice::streamProcessingComplete(Camera3RequestDescriptor::StreamBuffer *streamBuffer,
+ Camera3RequestDescriptor::Status status)
+{
+ setBufferStatus(*streamBuffer, status);
+
+ /*
+ * If the framebuffer is internal to CameraStream return it back now
+ * that we're done processing it.
+ */
+ if (streamBuffer->internalBuffer)
+ streamBuffer->stream->putBuffer(streamBuffer->internalBuffer);
+
+ Camera3RequestDescriptor *request = streamBuffer->request;
+
+ {
+ MutexLocker locker(request->streamsProcessMutex_);
+
+ request->pendingStreamsToProcess_.erase(streamBuffer->stream);
+ if (!request->pendingStreamsToProcess_.empty())
+ return;
+ }
+
+ completeDescriptor(streamBuffer->request);
+}
+
+std::string CameraDevice::logPrefix() const
+{
+ return "'" + camera_->id() + "'";
}
void CameraDevice::notifyShutter(uint32_t frameNumber, uint64_t timestamp)
@@ -874,14 +1404,15 @@ void CameraDevice::notifyShutter(uint32_t frameNumber, uint64_t timestamp)
callbacks_->notify(callbacks_, &notify);
}
-void CameraDevice::notifyError(uint32_t frameNumber, camera3_stream_t *stream)
+void CameraDevice::notifyError(uint32_t frameNumber, camera3_stream_t *stream,
+ camera3_error_msg_code code) const
{
camera3_notify_msg_t notify = {};
notify.type = CAMERA3_MSG_ERROR;
notify.message.error.error_stream = stream;
notify.message.error.frame_number = frameNumber;
- notify.message.error.error_code = CAMERA3_MSG_ERROR_REQUEST;
+ notify.message.error.error_code = code;
callbacks_->notify(callbacks_, &notify);
}
@@ -889,63 +1420,242 @@ void CameraDevice::notifyError(uint32_t frameNumber, camera3_stream_t *stream)
/*
* Produce a set of fixed result metadata.
*/
-std::unique_ptr<CameraMetadata> CameraDevice::getResultMetadata(int frame_number,
- int64_t timestamp)
+std::unique_ptr<CameraMetadata>
+CameraDevice::getResultMetadata(const Camera3RequestDescriptor &descriptor) const
{
+ const ControlList &metadata = descriptor.request_->metadata();
+ const CameraMetadata &settings = descriptor.settings_;
+ camera_metadata_ro_entry_t entry;
+ bool found;
+
/*
* \todo Keep this in sync with the actual number of entries.
- * Currently: 12 entries, 36 bytes
+ * Currently: 40 entries, 156 bytes
+ *
+ * Reserve more space for the JPEG metadata set by the post-processor.
+ * Currently:
+ * ANDROID_JPEG_GPS_COORDINATES (double x 3) = 24 bytes
+ * ANDROID_JPEG_GPS_PROCESSING_METHOD (byte x 32) = 32 bytes
+ * ANDROID_JPEG_GPS_TIMESTAMP (int64) = 8 bytes
+ * ANDROID_JPEG_SIZE (int32_t) = 4 bytes
+ * ANDROID_JPEG_QUALITY (byte) = 1 byte
+ * ANDROID_JPEG_ORIENTATION (int32_t) = 4 bytes
+ * ANDROID_JPEG_THUMBNAIL_QUALITY (byte) = 1 byte
+ * ANDROID_JPEG_THUMBNAIL_SIZE (int32 x 2) = 8 bytes
+ * Total bytes for JPEG metadata: 82
*/
std::unique_ptr<CameraMetadata> resultMetadata =
- std::make_unique<CameraMetadata>(15, 50);
+ std::make_unique<CameraMetadata>(88, 166);
if (!resultMetadata->isValid()) {
- LOG(HAL, Error) << "Failed to allocate static metadata";
+ LOG(HAL, Error) << "Failed to allocate result metadata";
return nullptr;
}
- const uint8_t ae_state = ANDROID_CONTROL_AE_STATE_CONVERGED;
- resultMetadata->addEntry(ANDROID_CONTROL_AE_STATE, &ae_state, 1);
+ /*
+ * \todo The value of the results metadata copied from the settings
+ * will have to be passed to the libcamera::Camera and extracted
+ * from libcamera::Request::metadata.
+ */
- const uint8_t ae_lock = ANDROID_CONTROL_AE_LOCK_OFF;
- resultMetadata->addEntry(ANDROID_CONTROL_AE_LOCK, &ae_lock, 1);
+ uint8_t value = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
+ value);
- uint8_t af_state = ANDROID_CONTROL_AF_STATE_INACTIVE;
- resultMetadata->addEntry(ANDROID_CONTROL_AF_STATE, &af_state, 1);
+ value = ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_ANTIBANDING_MODE, value);
- const uint8_t awb_state = ANDROID_CONTROL_AWB_STATE_CONVERGED;
- resultMetadata->addEntry(ANDROID_CONTROL_AWB_STATE, &awb_state, 1);
+ int32_t value32 = 0;
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
+ value32);
- const uint8_t awb_lock = ANDROID_CONTROL_AWB_LOCK_OFF;
- resultMetadata->addEntry(ANDROID_CONTROL_AWB_LOCK, &awb_lock, 1);
+ value = ANDROID_CONTROL_AE_LOCK_OFF;
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_LOCK, value);
- const uint8_t lens_state = ANDROID_LENS_STATE_STATIONARY;
- resultMetadata->addEntry(ANDROID_LENS_STATE, &lens_state, 1);
+ value = ANDROID_CONTROL_AE_MODE_ON;
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_MODE, value);
- int32_t sensorSizes[] = {
- 0, 0, 2560, 1920,
- };
- resultMetadata->addEntry(ANDROID_SCALER_CROP_REGION, sensorSizes, 4);
+ if (settings.getEntry(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, &entry))
+ /*
+ * \todo Retrieve the AE FPS range from the libcamera metadata.
+ * As libcamera does not support that control, as a temporary
+ * workaround return what the framework asked.
+ */
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+ entry.data.i32, 2);
+
+ found = settings.getEntry(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, &entry);
+ value = found ? *entry.data.u8 :
+ (uint8_t)ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE;
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, value);
+
+ value = ANDROID_CONTROL_AE_STATE_CONVERGED;
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_STATE, value);
+
+ value = ANDROID_CONTROL_AF_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_CONTROL_AF_MODE, value);
+
+ value = ANDROID_CONTROL_AF_STATE_INACTIVE;
+ resultMetadata->addEntry(ANDROID_CONTROL_AF_STATE, value);
+
+ value = ANDROID_CONTROL_AF_TRIGGER_IDLE;
+ resultMetadata->addEntry(ANDROID_CONTROL_AF_TRIGGER, value);
+
+ value = ANDROID_CONTROL_AWB_MODE_AUTO;
+ resultMetadata->addEntry(ANDROID_CONTROL_AWB_MODE, value);
+
+ value = ANDROID_CONTROL_AWB_LOCK_OFF;
+ resultMetadata->addEntry(ANDROID_CONTROL_AWB_LOCK, value);
+
+ value = ANDROID_CONTROL_AWB_STATE_CONVERGED;
+ resultMetadata->addEntry(ANDROID_CONTROL_AWB_STATE, value);
+
+ value = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+ resultMetadata->addEntry(ANDROID_CONTROL_CAPTURE_INTENT, value);
+
+ value = ANDROID_CONTROL_EFFECT_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_CONTROL_EFFECT_MODE, value);
+
+ value = ANDROID_CONTROL_MODE_AUTO;
+ resultMetadata->addEntry(ANDROID_CONTROL_MODE, value);
- resultMetadata->addEntry(ANDROID_SENSOR_TIMESTAMP, &timestamp, 1);
+ value = ANDROID_CONTROL_SCENE_MODE_DISABLED;
+ resultMetadata->addEntry(ANDROID_CONTROL_SCENE_MODE, value);
+
+ value = ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, value);
+
+ value = ANDROID_FLASH_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_FLASH_MODE, value);
+
+ value = ANDROID_FLASH_STATE_UNAVAILABLE;
+ resultMetadata->addEntry(ANDROID_FLASH_STATE, value);
+
+ if (settings.getEntry(ANDROID_LENS_APERTURE, &entry))
+ resultMetadata->addEntry(ANDROID_LENS_APERTURE, entry.data.f, 1);
+
+ float focal_length = 1.0;
+ resultMetadata->addEntry(ANDROID_LENS_FOCAL_LENGTH, focal_length);
+
+ value = ANDROID_LENS_STATE_STATIONARY;
+ resultMetadata->addEntry(ANDROID_LENS_STATE, value);
+
+ value = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
+ value);
+
+ value32 = ANDROID_SENSOR_TEST_PATTERN_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_SENSOR_TEST_PATTERN_MODE, value32);
+
+ if (settings.getEntry(ANDROID_STATISTICS_FACE_DETECT_MODE, &entry))
+ resultMetadata->addEntry(ANDROID_STATISTICS_FACE_DETECT_MODE,
+ entry.data.u8, 1);
+
+ value = ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE,
+ value);
+
+ value = ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE, value);
+
+ value = ANDROID_STATISTICS_SCENE_FLICKER_NONE;
+ resultMetadata->addEntry(ANDROID_STATISTICS_SCENE_FLICKER, value);
+
+ value = ANDROID_NOISE_REDUCTION_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_NOISE_REDUCTION_MODE, value);
/* 33.3 msec */
const int64_t rolling_shutter_skew = 33300000;
resultMetadata->addEntry(ANDROID_SENSOR_ROLLING_SHUTTER_SKEW,
- &rolling_shutter_skew, 1);
+ rolling_shutter_skew);
+
+ /* Add metadata tags reported by libcamera. */
+ const int64_t timestamp = metadata.get(controls::SensorTimestamp).value_or(0);
+ resultMetadata->addEntry(ANDROID_SENSOR_TIMESTAMP, timestamp);
+
+ const auto &pipelineDepth = metadata.get(controls::draft::PipelineDepth);
+ if (pipelineDepth)
+ resultMetadata->addEntry(ANDROID_REQUEST_PIPELINE_DEPTH,
+ *pipelineDepth);
+
+ const auto &exposureTime = metadata.get(controls::ExposureTime);
+ if (exposureTime)
+ resultMetadata->addEntry(ANDROID_SENSOR_EXPOSURE_TIME,
+ *exposureTime * 1000ULL);
+
+ const auto &frameDuration = metadata.get(controls::FrameDuration);
+ if (frameDuration)
+ resultMetadata->addEntry(ANDROID_SENSOR_FRAME_DURATION,
+ *frameDuration * 1000);
+
+ const auto &faceDetectRectangles =
+ metadata.get(controls::draft::FaceDetectFaceRectangles);
+ if (faceDetectRectangles) {
+ std::vector<int32_t> flatRectangles;
+ for (const Rectangle &rect : *faceDetectRectangles) {
+ flatRectangles.push_back(rect.x);
+ flatRectangles.push_back(rect.y);
+ flatRectangles.push_back(rect.x + rect.width);
+ flatRectangles.push_back(rect.y + rect.height);
+ }
+ resultMetadata->addEntry(
+ ANDROID_STATISTICS_FACE_RECTANGLES, flatRectangles);
+ }
- /* 16.6 msec */
- const int64_t exposure_time = 16600000;
- resultMetadata->addEntry(ANDROID_SENSOR_EXPOSURE_TIME,
- &exposure_time, 1);
+ const auto &faceDetectFaceScores =
+ metadata.get(controls::draft::FaceDetectFaceScores);
+ if (faceDetectRectangles && faceDetectFaceScores) {
+ if (faceDetectFaceScores->size() != faceDetectRectangles->size()) {
+ LOG(HAL, Error) << "Pipeline returned wrong number of face scores; "
+ << "Expected: " << faceDetectRectangles->size()
+ << ", got: " << faceDetectFaceScores->size();
+ }
+ resultMetadata->addEntry(ANDROID_STATISTICS_FACE_SCORES,
+ *faceDetectFaceScores);
+ }
- const uint8_t lens_shading_map_mode =
- ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF;
- resultMetadata->addEntry(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE,
- &lens_shading_map_mode, 1);
+ const auto &faceDetectFaceLandmarks =
+ metadata.get(controls::draft::FaceDetectFaceLandmarks);
+ if (faceDetectRectangles && faceDetectFaceLandmarks) {
+ size_t expectedLandmarks = faceDetectRectangles->size() * 3;
+ if (faceDetectFaceLandmarks->size() != expectedLandmarks) {
+ LOG(HAL, Error) << "Pipeline returned wrong number of face landmarks; "
+ << "Expected: " << expectedLandmarks
+ << ", got: " << faceDetectFaceLandmarks->size();
+ }
- const uint8_t scene_flicker = ANDROID_STATISTICS_SCENE_FLICKER_NONE;
- resultMetadata->addEntry(ANDROID_STATISTICS_SCENE_FLICKER,
- &scene_flicker, 1);
+ std::vector<int32_t> androidLandmarks;
+ for (const Point &landmark : *faceDetectFaceLandmarks) {
+ androidLandmarks.push_back(landmark.x);
+ androidLandmarks.push_back(landmark.y);
+ }
+ resultMetadata->addEntry(
+ ANDROID_STATISTICS_FACE_LANDMARKS, androidLandmarks);
+ }
+
+ const auto &faceDetectFaceIds = metadata.get(controls::draft::FaceDetectFaceIds);
+ if (faceDetectRectangles && faceDetectFaceIds) {
+ if (faceDetectFaceIds->size() != faceDetectRectangles->size()) {
+ LOG(HAL, Error) << "Pipeline returned wrong number of face ids; "
+ << "Expected: " << faceDetectRectangles->size()
+ << ", got: " << faceDetectFaceIds->size();
+ }
+ resultMetadata->addEntry(ANDROID_STATISTICS_FACE_IDS, *faceDetectFaceIds);
+ }
+
+ const auto &scalerCrop = metadata.get(controls::ScalerCrop);
+ if (scalerCrop) {
+ const Rectangle &crop = *scalerCrop;
+ int32_t cropRect[] = {
+ crop.x, crop.y, static_cast<int32_t>(crop.width),
+ static_cast<int32_t>(crop.height),
+ };
+ resultMetadata->addEntry(ANDROID_SCALER_CROP_REGION, cropRect);
+ }
+
+ const auto &testPatternMode = metadata.get(controls::draft::TestPatternMode);
+ if (testPatternMode)
+ resultMetadata->addEntry(ANDROID_SENSOR_TEST_PATTERN_MODE,
+ *testPatternMode);
/*
* Return the result metadata pack even is not valid: get() will return
@@ -955,5 +1665,12 @@ std::unique_ptr<CameraMetadata> CameraDevice::getResultMetadata(int frame_number
LOG(HAL, Error) << "Failed to construct result metadata";
}
+ if (resultMetadata->resized()) {
+ auto [entryCount, dataCount] = resultMetadata->usage();
+ LOG(HAL, Info)
+ << "Result metadata resized: " << entryCount
+ << " entries and " << dataCount << " bytes used";
+ }
+
return resultMetadata;
}
diff --git a/src/android/camera_device.h b/src/android/camera_device.h
index 55eac317..194ca303 100644
--- a/src/android/camera_device.h
+++ b/src/android/camera_device.h
@@ -2,35 +2,62 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_device.h - libcamera Android Camera Device
+ * libcamera Android Camera Device
*/
-#ifndef __ANDROID_CAMERA_DEVICE_H__
-#define __ANDROID_CAMERA_DEVICE_H__
+#pragma once
+
+#include <map>
#include <memory>
+#include <queue>
+#include <vector>
#include <hardware/camera3.h>
-#include <libcamera/buffer.h>
+#include <libcamera/base/class.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/message.h>
+#include <libcamera/base/mutex.h>
+
#include <libcamera/camera.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+#include <libcamera/pixel_format.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
-#include "message.h"
+#include "camera_capabilities.h"
+#include "camera_metadata.h"
+#include "camera_stream.h"
+#include "hal_framebuffer.h"
+#include "jpeg/encoder.h"
-class CameraMetadata;
+class Camera3RequestDescriptor;
+struct CameraConfigData;
-class CameraDevice
+class CameraDevice : protected libcamera::Loggable
{
public:
- CameraDevice(unsigned int id, const std::shared_ptr<libcamera::Camera> &camera);
+ static std::unique_ptr<CameraDevice> create(unsigned int id,
+ std::shared_ptr<libcamera::Camera> cam);
~CameraDevice();
+ int initialize(const CameraConfigData *cameraConfigData);
+
int open(const hw_module_t *hardwareModule);
void close();
+ void flush();
unsigned int id() const { return id_; }
camera3_device_t *camera3Device() { return &camera3Device_; }
+ const CameraCapabilities *capabilities() const { return &capabilities_; }
+ const std::shared_ptr<libcamera::Camera> &camera() const { return camera_; }
+
+ const std::string &maker() const { return maker_; }
+ const std::string &model() const { return model_; }
+ int facing() const { return facing_; }
+ int orientation() const { return orientation_; }
+ unsigned int maxJpegBufferSize() const;
void setCallbacks(const camera3_callback_ops_t *callbacks);
const camera_metadata_t *getStaticMetadata();
@@ -38,33 +65,67 @@ public:
int configureStreams(camera3_stream_configuration_t *stream_list);
int processCaptureRequest(camera3_capture_request_t *request);
void requestComplete(libcamera::Request *request);
+ void streamProcessingComplete(Camera3RequestDescriptor::StreamBuffer *bufferStream,
+ Camera3RequestDescriptor::Status status);
+
+protected:
+ std::string logPrefix() const override;
private:
- struct Camera3RequestDescriptor {
- Camera3RequestDescriptor(unsigned int frameNumber,
- unsigned int numBuffers);
- ~Camera3RequestDescriptor();
-
- uint32_t frameNumber;
- uint32_t numBuffers;
- camera3_stream_buffer_t *buffers;
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraDevice)
+
+ CameraDevice(unsigned int id, std::shared_ptr<libcamera::Camera> camera);
+
+ enum class State {
+ Stopped,
+ Flushing,
+ Running,
};
+ void stop() LIBCAMERA_TSA_EXCLUDES(stateMutex_);
+
+ std::unique_ptr<HALFrameBuffer>
+ createFrameBuffer(const buffer_handle_t camera3buffer,
+ libcamera::PixelFormat pixelFormat,
+ const libcamera::Size &size);
+ void abortRequest(Camera3RequestDescriptor *descriptor) const;
+ bool isValidRequest(camera3_capture_request_t *request) const;
void notifyShutter(uint32_t frameNumber, uint64_t timestamp);
- void notifyError(uint32_t frameNumber, camera3_stream_t *stream);
- std::unique_ptr<CameraMetadata> getResultMetadata(int frame_number,
- int64_t timestamp);
+ void notifyError(uint32_t frameNumber, camera3_stream_t *stream,
+ camera3_error_msg_code code) const;
+ int processControls(Camera3RequestDescriptor *descriptor);
+ void completeDescriptor(Camera3RequestDescriptor *descriptor)
+ LIBCAMERA_TSA_EXCLUDES(descriptorsMutex_);
+ void sendCaptureResults() LIBCAMERA_TSA_REQUIRES(descriptorsMutex_);
+ void setBufferStatus(Camera3RequestDescriptor::StreamBuffer &buffer,
+ Camera3RequestDescriptor::Status status);
+ std::unique_ptr<CameraMetadata> getResultMetadata(
+ const Camera3RequestDescriptor &descriptor) const;
unsigned int id_;
camera3_device_t camera3Device_;
- bool running_;
+ libcamera::Mutex stateMutex_; /* Protects access to the camera state. */
+ State state_ LIBCAMERA_TSA_GUARDED_BY(stateMutex_);
+
std::shared_ptr<libcamera::Camera> camera_;
std::unique_ptr<libcamera::CameraConfiguration> config_;
+ CameraCapabilities capabilities_;
- CameraMetadata *staticMetadata_;
- std::map<unsigned int, CameraMetadata *> requestTemplates_;
+ std::map<unsigned int, std::unique_ptr<CameraMetadata>> requestTemplates_;
const camera3_callback_ops_t *callbacks_;
-};
-#endif /* __ANDROID_CAMERA_DEVICE_H__ */
+ std::vector<CameraStream> streams_;
+
+ libcamera::Mutex descriptorsMutex_ LIBCAMERA_TSA_ACQUIRED_AFTER(stateMutex_);
+ std::queue<std::unique_ptr<Camera3RequestDescriptor>> descriptors_
+ LIBCAMERA_TSA_GUARDED_BY(descriptorsMutex_);
+
+ std::string maker_;
+ std::string model_;
+
+ int facing_;
+ int orientation_;
+
+ CameraMetadata lastSettings_;
+};
diff --git a/src/android/camera_hal_config.cpp b/src/android/camera_hal_config.cpp
new file mode 100644
index 00000000..7ef451ef
--- /dev/null
+++ b/src/android/camera_hal_config.cpp
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Camera HAL configuration file manager
+ */
+#include "camera_hal_config.h"
+
+#include <stdlib.h>
+#include <string>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/yaml_parser.h"
+
+#include <hardware/camera3.h>
+
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(HALConfig)
+
+class CameraHalConfig::Private : public Extensible::Private
+{
+ LIBCAMERA_DECLARE_PUBLIC(CameraHalConfig)
+
+public:
+ Private();
+
+ int parseConfigFile(File &file, std::map<std::string, CameraConfigData> *cameras);
+
+private:
+ int parseCameraConfigData(const std::string &cameraId, const YamlObject &);
+ int parseLocation(const YamlObject &, CameraConfigData &cameraConfigData);
+ int parseRotation(const YamlObject &, CameraConfigData &cameraConfigData);
+
+ std::map<std::string, CameraConfigData> *cameras_;
+};
+
+CameraHalConfig::Private::Private()
+{
+}
+
+int CameraHalConfig::Private::parseConfigFile(File &file,
+ std::map<std::string, CameraConfigData> *cameras)
+{
+ /*
+ * Parse the HAL properties.
+ *
+ * Each camera properties block is a list of properties associated
+ * with the ID (as assembled by CameraSensor::generateId()) of the
+ * camera they refer to.
+ *
+ * cameras:
+ * "camera0 id":
+ * location: value
+ * rotation: value
+ * ...
+ *
+ * "camera1 id":
+ * location: value
+ * rotation: value
+ * ...
+ */
+
+ cameras_ = cameras;
+
+ std::unique_ptr<YamlObject> root = YamlParser::parse(file);
+ if (!root)
+ return -EINVAL;
+
+ if (!root->isDictionary())
+ return -EINVAL;
+
+ /* Parse property "cameras" */
+ if (!root->contains("cameras"))
+ return -EINVAL;
+
+ const YamlObject &yamlObjectCameras = (*root)["cameras"];
+
+ if (!yamlObjectCameras.isDictionary())
+ return -EINVAL;
+
+ for (const auto &[cameraId, configData] : yamlObjectCameras.asDict()) {
+ if (parseCameraConfigData(cameraId, configData))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int CameraHalConfig::Private::parseCameraConfigData(const std::string &cameraId,
+ const YamlObject &cameraObject)
+
+{
+ if (!cameraObject.isDictionary())
+ return -EINVAL;
+
+ CameraConfigData &cameraConfigData = (*cameras_)[cameraId];
+
+ /* Parse property "location" */
+ if (parseLocation(cameraObject, cameraConfigData))
+ return -EINVAL;
+
+ /* Parse property "rotation" */
+ if (parseRotation(cameraObject, cameraConfigData))
+ return -EINVAL;
+
+ return 0;
+}
+
+int CameraHalConfig::Private::parseLocation(const YamlObject &cameraObject,
+ CameraConfigData &cameraConfigData)
+{
+ if (!cameraObject.contains("location"))
+ return -EINVAL;
+
+ std::string location = cameraObject["location"].get<std::string>("");
+
+ if (location == "front")
+ cameraConfigData.facing = CAMERA_FACING_FRONT;
+ else if (location == "back")
+ cameraConfigData.facing = CAMERA_FACING_BACK;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+int CameraHalConfig::Private::parseRotation(const YamlObject &cameraObject,
+ CameraConfigData &cameraConfigData)
+{
+ if (!cameraObject.contains("rotation"))
+ return -EINVAL;
+
+ int32_t rotation = cameraObject["rotation"].get<int32_t>(-1);
+
+ if (rotation < 0 || rotation >= 360) {
+ LOG(HALConfig, Error)
+ << "Unknown rotation: " << rotation;
+ return -EINVAL;
+ }
+
+ cameraConfigData.rotation = rotation;
+ return 0;
+}
+
+CameraHalConfig::CameraHalConfig()
+ : Extensible(std::make_unique<Private>()), exists_(false), valid_(false)
+{
+ parseConfigurationFile();
+}
+
+/*
+ * Open the HAL configuration file and validate its content.
+ * Return 0 on success, a negative error code otherwise
+ * retval -ENOENT The configuration file is not available
+ * retval -EINVAL The configuration file is available but not valid
+ */
+int CameraHalConfig::parseConfigurationFile()
+{
+ std::string filePath = LIBCAMERA_SYSCONF_DIR "/camera_hal.yaml";
+
+ File file(filePath);
+ if (!file.exists()) {
+ LOG(HALConfig, Debug)
+ << "Configuration file: \"" << filePath << "\" not found";
+ return -ENOENT;
+ }
+
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ int ret = file.error();
+ LOG(HALConfig, Error) << "Failed to open configuration file "
+ << filePath << ": " << strerror(-ret);
+ return ret;
+ }
+
+ exists_ = true;
+
+ int ret = _d()->parseConfigFile(file, &cameras_);
+ if (ret)
+ return -EINVAL;
+
+ valid_ = true;
+
+ for (const auto &c : cameras_) {
+ const std::string &cameraId = c.first;
+ const CameraConfigData &camera = c.second;
+ LOG(HALConfig, Debug) << "'" << cameraId << "' "
+ << "(" << camera.facing << ")["
+ << camera.rotation << "]";
+ }
+
+ return 0;
+}
+
+const CameraConfigData *CameraHalConfig::cameraConfigData(const std::string &cameraId) const
+{
+ const auto &it = cameras_.find(cameraId);
+ if (it == cameras_.end()) {
+ LOG(HALConfig, Error)
+ << "Camera '" << cameraId
+ << "' not described in the HAL configuration file";
+ return nullptr;
+ }
+
+ return &it->second;
+}
diff --git a/src/android/camera_hal_config.h b/src/android/camera_hal_config.h
new file mode 100644
index 00000000..a4bedb6e
--- /dev/null
+++ b/src/android/camera_hal_config.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Camera HAL configuration file manager
+ */
+
+#pragma once
+
+#include <map>
+#include <string>
+
+#include <libcamera/base/class.h>
+
+struct CameraConfigData {
+ int facing = -1;
+ int rotation = -1;
+};
+
+class CameraHalConfig final : public libcamera::Extensible
+{
+ LIBCAMERA_DECLARE_PRIVATE()
+
+public:
+ CameraHalConfig();
+
+ bool exists() const { return exists_; }
+ bool isValid() const { return valid_; }
+
+ const CameraConfigData *cameraConfigData(const std::string &cameraId) const;
+
+private:
+ bool exists_;
+ bool valid_;
+ std::map<std::string, CameraConfigData> cameras_;
+
+ int parseConfigurationFile();
+};
diff --git a/src/android/camera_hal_manager.cpp b/src/android/camera_hal_manager.cpp
index 5bd3bdba..7500c749 100644
--- a/src/android/camera_hal_manager.cpp
+++ b/src/android/camera_hal_manager.cpp
@@ -2,20 +2,21 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_hal_manager.cpp - libcamera Android Camera Manager
+ * libcamera Android Camera Manager
*/
#include "camera_hal_manager.h"
-#include <libcamera/camera.h>
+#include <libcamera/base/log.h>
-#include "log.h"
+#include <libcamera/camera.h>
+#include <libcamera/property_ids.h>
#include "camera_device.h"
using namespace libcamera;
-LOG_DECLARE_CATEGORY(HAL);
+LOG_DECLARE_CATEGORY(HAL)
/*
* \class CameraHalManager
@@ -28,71 +29,224 @@ LOG_DECLARE_CATEGORY(HAL);
*/
CameraHalManager::CameraHalManager()
- : cameraManager_(nullptr)
+ : cameraManager_(nullptr), callbacks_(nullptr), numInternalCameras_(0),
+ nextExternalCameraId_(firstExternalCameraId_)
{
}
-CameraHalManager::~CameraHalManager()
-{
- cameras_.clear();
+/* CameraManager calls stop() in the destructor. */
+CameraHalManager::~CameraHalManager() = default;
- if (cameraManager_) {
- cameraManager_->stop();
- delete cameraManager_;
- cameraManager_ = nullptr;
- }
+/* static */
+CameraHalManager *CameraHalManager::instance()
+{
+ static CameraHalManager *cameraHalManager = new CameraHalManager;
+ return cameraHalManager;
}
int CameraHalManager::init()
{
- cameraManager_ = new CameraManager();
+ cameraManager_ = std::make_unique<CameraManager>();
+
+ /*
+ * If the configuration file is not available the HAL only supports
+ * external cameras. If it exists but it's not valid then error out.
+ */
+ if (halConfig_.exists() && !halConfig_.isValid()) {
+ LOG(HAL, Error) << "HAL configuration file is not valid";
+ return -EINVAL;
+ }
+
+ /* Support camera hotplug. */
+ cameraManager_->cameraAdded.connect(this, &CameraHalManager::cameraAdded);
+ cameraManager_->cameraRemoved.connect(this, &CameraHalManager::cameraRemoved);
int ret = cameraManager_->start();
if (ret) {
LOG(HAL, Error) << "Failed to start camera manager: "
<< strerror(-ret);
- delete cameraManager_;
- cameraManager_ = nullptr;
+ cameraManager_.reset();
return ret;
}
+ return 0;
+}
+
+std::tuple<CameraDevice *, int>
+CameraHalManager::open(unsigned int id, const hw_module_t *hardwareModule)
+{
+ MutexLocker locker(mutex_);
+
+ if (!callbacks_) {
+ LOG(HAL, Error) << "Can't open camera before callbacks are set";
+ return { nullptr, -ENODEV };
+ }
+
+ CameraDevice *camera = cameraDeviceFromHalId(id);
+ if (!camera) {
+ LOG(HAL, Error) << "Invalid camera id '" << id << "'";
+ return { nullptr, -ENODEV };
+ }
+
+ int ret = camera->open(hardwareModule);
+ if (ret)
+ return { nullptr, ret };
+
+ LOG(HAL, Info) << "Open camera '" << id << "'";
+
+ return { camera, 0 };
+}
+
+void CameraHalManager::cameraAdded(std::shared_ptr<Camera> cam)
+{
+ unsigned int id;
+ bool isCameraExternal = false;
+ bool isCameraNew = false;
+
+ MutexLocker locker(mutex_);
+
/*
- * For each Camera registered in the system, a CameraDevice
- * gets created here to wraps a libcamera Camera instance.
+ * Each camera is assigned a unique integer ID when it is seen for the
+ * first time. If the camera has been seen before, the previous ID is
+ * re-used.
*
- * \todo Support camera hotplug.
+ * IDs starts from '0' for internal cameras and '1000' for external
+ * cameras.
*/
- unsigned int index = 0;
- for (auto &cam : cameraManager_->cameras()) {
- CameraDevice *camera = new CameraDevice(index, cam);
- cameras_.emplace_back(camera);
+ auto iter = cameraIdsMap_.find(cam->id());
+ if (iter != cameraIdsMap_.end()) {
+ id = iter->second;
+ if (id >= firstExternalCameraId_)
+ isCameraExternal = true;
+ } else {
+ isCameraNew = true;
- ++index;
+ /*
+ * Now check if this is an external camera and assign
+ * its id accordingly.
+ */
+ if (cameraLocation(cam.get()) == properties::CameraLocationExternal) {
+ isCameraExternal = true;
+ id = nextExternalCameraId_;
+ } else {
+ id = numInternalCameras_;
+ }
}
- return 0;
+ /*
+ * The configuration file must be valid, and contain a corresponding
+ * entry for internal cameras. External cameras can be initialized
+ * without configuration file.
+ */
+ if (!isCameraExternal && !halConfig_.exists()) {
+ LOG(HAL, Error)
+ << "HAL configuration file is mandatory for internal cameras."
+ << " Camera " << cam->id() << " failed to load";
+ return;
+ }
+
+ const CameraConfigData *cameraConfigData = halConfig_.cameraConfigData(cam->id());
+
+ /*
+ * Some cameras whose location is reported by libcamera as external may
+ * actually be internal to the device. This is common with UVC cameras
+ * that are integrated in a laptop. In that case the real location
+ * should be specified in the configuration file.
+ *
+ * If the camera location is external and a configuration entry exists
+ * for it, override its location.
+ */
+ if (isCameraNew && isCameraExternal) {
+ if (cameraConfigData && cameraConfigData->facing != -1) {
+ isCameraExternal = false;
+ id = numInternalCameras_;
+ }
+ }
+
+ if (!isCameraExternal && !cameraConfigData) {
+ LOG(HAL, Error)
+ << "HAL configuration entry for internal camera "
+ << cam->id() << " is missing";
+ return;
+ }
+
+ /* Create a CameraDevice instance to wrap the libcamera Camera. */
+ std::unique_ptr<CameraDevice> camera = CameraDevice::create(id, cam);
+
+ int ret = camera->initialize(cameraConfigData);
+ if (ret) {
+ LOG(HAL, Error) << "Failed to initialize camera: " << cam->id();
+ return;
+ }
+
+ if (isCameraNew) {
+ cameraIdsMap_.emplace(cam->id(), id);
+
+ if (isCameraExternal)
+ nextExternalCameraId_++;
+ else
+ numInternalCameras_++;
+ }
+
+ cameras_.emplace_back(std::move(camera));
+
+ if (callbacks_)
+ callbacks_->camera_device_status_change(callbacks_, id,
+ CAMERA_DEVICE_STATUS_PRESENT);
+
+ LOG(HAL, Debug) << "Camera ID: " << id << " added successfully.";
}
-CameraDevice *CameraHalManager::open(unsigned int id,
- const hw_module_t *hardwareModule)
+void CameraHalManager::cameraRemoved(std::shared_ptr<Camera> cam)
{
- if (id >= numCameras()) {
- LOG(HAL, Error) << "Invalid camera id '" << id << "'";
- return nullptr;
- }
+ MutexLocker locker(mutex_);
- CameraDevice *camera = cameras_[id].get();
- if (camera->open(hardwareModule))
- return nullptr;
+ auto iter = std::find_if(cameras_.begin(), cameras_.end(),
+ [&cam](const std::unique_ptr<CameraDevice> &camera) {
+ return cam == camera->camera();
+ });
+ if (iter == cameras_.end())
+ return;
- LOG(HAL, Info) << "Open camera '" << id << "'";
+ /*
+ * CAMERA_DEVICE_STATUS_NOT_PRESENT should be set for external cameras
+ * only.
+ */
+ unsigned int id = (*iter)->id();
+ if (id >= firstExternalCameraId_)
+ callbacks_->camera_device_status_change(callbacks_, id,
+ CAMERA_DEVICE_STATUS_NOT_PRESENT);
+
+ /*
+ * \todo Check if the camera is already open and running.
+ * Inform the framework about its absence before deleting its
+ * reference here.
+ */
+ cameras_.erase(iter);
+
+ LOG(HAL, Debug) << "Camera ID: " << id << " removed successfully.";
+}
+
+int32_t CameraHalManager::cameraLocation(const Camera *cam)
+{
+ return cam->properties().get(properties::Location).value_or(-1);
+}
- return camera;
+CameraDevice *CameraHalManager::cameraDeviceFromHalId(unsigned int id)
+{
+ auto iter = std::find_if(cameras_.begin(), cameras_.end(),
+ [id](const std::unique_ptr<CameraDevice> &camera) {
+ return camera->id() == id;
+ });
+ if (iter == cameras_.end())
+ return nullptr;
+
+ return iter->get();
}
unsigned int CameraHalManager::numCameras() const
{
- return cameraManager_->cameras().size();
+ return numInternalCameras_;
}
int CameraHalManager::getCameraInfo(unsigned int id, struct camera_info *info)
@@ -100,17 +254,17 @@ int CameraHalManager::getCameraInfo(unsigned int id, struct camera_info *info)
if (!info)
return -EINVAL;
- if (id >= numCameras()) {
+ MutexLocker locker(mutex_);
+
+ CameraDevice *camera = cameraDeviceFromHalId(id);
+ if (!camera) {
LOG(HAL, Error) << "Invalid camera id '" << id << "'";
return -EINVAL;
}
- CameraDevice *camera = cameras_[id].get();
-
- /* \todo Get these info dynamically inspecting the camera module. */
- info->facing = id ? CAMERA_FACING_FRONT : CAMERA_FACING_BACK;
- info->orientation = 0;
- info->device_version = 0;
+ info->facing = camera->facing();
+ info->orientation = camera->orientation();
+ info->device_version = CAMERA_DEVICE_API_VERSION_3_3;
info->resource_cost = 0;
info->static_camera_characteristics = camera->getStaticMetadata();
info->conflicting_devices = nullptr;
@@ -118,3 +272,25 @@ int CameraHalManager::getCameraInfo(unsigned int id, struct camera_info *info)
return 0;
}
+
+void CameraHalManager::setCallbacks(const camera_module_callbacks_t *callbacks)
+{
+ callbacks_ = callbacks;
+
+ MutexLocker locker(mutex_);
+
+ /*
+ * Some external cameras may have been identified before the callbacks_
+ * were set. Iterate all existing external cameras and mark them as
+ * CAMERA_DEVICE_STATUS_PRESENT explicitly.
+ *
+ * Internal cameras are already assumed to be present at module load
+ * time by the Android framework.
+ */
+ for (const std::unique_ptr<CameraDevice> &camera : cameras_) {
+ unsigned int id = camera->id();
+ if (id >= firstExternalCameraId_)
+ callbacks_->camera_device_status_change(callbacks_, id,
+ CAMERA_DEVICE_STATUS_PRESENT);
+ }
+}
diff --git a/src/android/camera_hal_manager.h b/src/android/camera_hal_manager.h
index 94d8f005..836a8daf 100644
--- a/src/android/camera_hal_manager.h
+++ b/src/android/camera_hal_manager.h
@@ -2,40 +2,67 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_hal_manager.h - libcamera Android Camera Manager
+ * libcamera Android Camera Manager
*/
-#ifndef __ANDROID_CAMERA_MANAGER_H__
-#define __ANDROID_CAMERA_MANAGER_H__
+#pragma once
+
+#include <map>
#include <stddef.h>
+#include <tuple>
#include <vector>
+#include <hardware/camera_common.h>
#include <hardware/hardware.h>
#include <system/camera_metadata.h>
+#include <libcamera/base/class.h>
+#include <libcamera/base/mutex.h>
+
#include <libcamera/camera_manager.h>
+#include "camera_hal_config.h"
+
class CameraDevice;
class CameraHalManager
{
public:
- CameraHalManager();
~CameraHalManager();
+ static CameraHalManager *instance();
+
int init();
- CameraDevice *open(unsigned int id, const hw_module_t *module);
+ std::tuple<CameraDevice *, int>
+ open(unsigned int id, const hw_module_t *module);
unsigned int numCameras() const;
int getCameraInfo(unsigned int id, struct camera_info *info);
+ void setCallbacks(const camera_module_callbacks_t *callbacks);
private:
- camera_metadata_t *getStaticMetadata(unsigned int id);
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraHalManager)
- libcamera::CameraManager *cameraManager_;
+ static constexpr unsigned int firstExternalCameraId_ = 1000;
- std::vector<std::unique_ptr<CameraDevice>> cameras_;
-};
+ CameraHalManager();
-#endif /* __ANDROID_CAMERA_MANAGER_H__ */
+ static int32_t cameraLocation(const libcamera::Camera *cam);
+
+ void cameraAdded(std::shared_ptr<libcamera::Camera> cam);
+ void cameraRemoved(std::shared_ptr<libcamera::Camera> cam);
+
+ CameraDevice *cameraDeviceFromHalId(unsigned int id) LIBCAMERA_TSA_REQUIRES(mutex_);
+
+ std::unique_ptr<libcamera::CameraManager> cameraManager_;
+ CameraHalConfig halConfig_;
+
+ const camera_module_callbacks_t *callbacks_;
+ std::vector<std::unique_ptr<CameraDevice>> cameras_ LIBCAMERA_TSA_GUARDED_BY(mutex_);
+ std::map<std::string, unsigned int> cameraIdsMap_ LIBCAMERA_TSA_GUARDED_BY(mutex_);
+ libcamera::Mutex mutex_;
+
+ unsigned int numInternalCameras_;
+ unsigned int nextExternalCameraId_;
+};
diff --git a/src/android/camera_metadata.cpp b/src/android/camera_metadata.cpp
index 76965108..99f033f9 100644
--- a/src/android/camera_metadata.cpp
+++ b/src/android/camera_metadata.cpp
@@ -2,34 +2,157 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_metadata.cpp - libcamera Android Camera Metadata Helper
+ * libcamera Android Camera Metadata Helper
*/
#include "camera_metadata.h"
-#include "log.h"
+#include <libcamera/base/log.h>
using namespace libcamera;
-LOG_DEFINE_CATEGORY(CameraMetadata);
+LOG_DEFINE_CATEGORY(CameraMetadata)
+
+CameraMetadata::CameraMetadata()
+ : metadata_(nullptr), valid_(false), resized_(false)
+{
+}
CameraMetadata::CameraMetadata(size_t entryCapacity, size_t dataCapacity)
+ : resized_(false)
{
metadata_ = allocate_camera_metadata(entryCapacity, dataCapacity);
valid_ = metadata_ != nullptr;
}
+CameraMetadata::CameraMetadata(const camera_metadata_t *metadata)
+ : resized_(false)
+{
+ metadata_ = clone_camera_metadata(metadata);
+ valid_ = metadata_ != nullptr;
+}
+
+CameraMetadata::CameraMetadata(const CameraMetadata &other)
+ : CameraMetadata(other.getMetadata())
+{
+}
+
CameraMetadata::~CameraMetadata()
{
if (metadata_)
free_camera_metadata(metadata_);
}
-bool CameraMetadata::addEntry(uint32_t tag, const void *data, size_t count)
+CameraMetadata &CameraMetadata::operator=(const CameraMetadata &other)
+{
+ if (this == &other)
+ return *this;
+
+ if (metadata_)
+ free_camera_metadata(metadata_);
+
+ metadata_ = clone_camera_metadata(other.getMetadata());
+ valid_ = metadata_ != nullptr;
+
+ return *this;
+}
+
+std::tuple<size_t, size_t> CameraMetadata::usage() const
+{
+ size_t currentEntryCount = get_camera_metadata_entry_count(metadata_);
+ size_t currentDataCount = get_camera_metadata_data_count(metadata_);
+
+ return { currentEntryCount, currentDataCount };
+}
+
+bool CameraMetadata::getEntry(uint32_t tag, camera_metadata_ro_entry_t *entry) const
+{
+ if (find_camera_metadata_ro_entry(metadata_, tag, entry))
+ return false;
+
+ return true;
+}
+
+/*
+ * \brief Resize the metadata container, if necessary
+ * \param[in] count Number of entries to add to the container
+ * \param[in] size Total size of entries to add, in bytes
+ * \return True if resize was successful or unnecessary, false otherwise
+ */
+bool CameraMetadata::resize(size_t count, size_t size)
+{
+ if (!valid_)
+ return false;
+
+ if (!count && !size)
+ return true;
+
+ size_t currentEntryCount = get_camera_metadata_entry_count(metadata_);
+ size_t currentEntryCapacity = get_camera_metadata_entry_capacity(metadata_);
+ size_t newEntryCapacity = currentEntryCapacity < currentEntryCount + count ?
+ currentEntryCapacity * 2 : currentEntryCapacity;
+
+ size_t currentDataCount = get_camera_metadata_data_count(metadata_);
+ size_t currentDataCapacity = get_camera_metadata_data_capacity(metadata_);
+ size_t newDataCapacity = currentDataCapacity < currentDataCount + size ?
+ currentDataCapacity * 2 : currentDataCapacity;
+
+ if (newEntryCapacity > currentEntryCapacity ||
+ newDataCapacity > currentDataCapacity) {
+ camera_metadata_t *oldMetadata = metadata_;
+ metadata_ = allocate_camera_metadata(newEntryCapacity, newDataCapacity);
+ if (!metadata_) {
+ metadata_ = oldMetadata;
+ return false;
+ }
+
+ LOG(CameraMetadata, Info)
+ << "Resized: old entry capacity " << currentEntryCapacity
+ << ", old data capacity " << currentDataCapacity
+ << ", new entry capacity " << newEntryCapacity
+ << ", new data capacity " << newDataCapacity;
+
+ append_camera_metadata(metadata_, oldMetadata);
+ free_camera_metadata(oldMetadata);
+
+ resized_ = true;
+ }
+
+ return true;
+}
+
+template<> bool CameraMetadata::entryContains(uint32_t tag, uint8_t value) const
+{
+ camera_metadata_ro_entry_t entry;
+ if (!getEntry(tag, &entry))
+ return false;
+
+ for (unsigned int i = 0; i < entry.count; i++) {
+ if (entry.data.u8[i] == value)
+ return true;
+ }
+
+ return false;
+}
+
+bool CameraMetadata::hasEntry(uint32_t tag) const
+{
+ camera_metadata_ro_entry_t entry;
+ return getEntry(tag, &entry);
+}
+
+bool CameraMetadata::addEntry(uint32_t tag, const void *data, size_t count,
+ size_t elementSize)
{
if (!valid_)
return false;
+ if (!resize(1, count * elementSize)) {
+ LOG(CameraMetadata, Error) << "Failed to resize";
+ valid_ = false;
+ return false;
+ }
+
if (!add_camera_metadata_entry(metadata_, tag, data, count))
return true;
@@ -46,7 +169,63 @@ bool CameraMetadata::addEntry(uint32_t tag, const void *data, size_t count)
return false;
}
-camera_metadata_t *CameraMetadata::get()
+bool CameraMetadata::updateEntry(uint32_t tag, const void *data, size_t count,
+ size_t elementSize)
+{
+ if (!valid_)
+ return false;
+
+ camera_metadata_entry_t entry;
+ int ret = find_camera_metadata_entry(metadata_, tag, &entry);
+ if (ret) {
+ const char *name = get_camera_metadata_tag_name(tag);
+ LOG(CameraMetadata, Error)
+ << "Failed to update tag "
+ << (name ? name : "<unknown>") << ": not present";
+ return false;
+ }
+
+ if (camera_metadata_type_size[entry.type] != elementSize) {
+ const char *name = get_camera_metadata_tag_name(tag);
+ LOG(CameraMetadata, Fatal)
+ << "Invalid element size for tag "
+ << (name ? name : "<unknown>");
+ return false;
+ }
+
+ size_t oldSize =
+ calculate_camera_metadata_entry_data_size(entry.type,
+ entry.count);
+ size_t newSize =
+ calculate_camera_metadata_entry_data_size(entry.type,
+ count);
+ size_t sizeIncrement = newSize - oldSize > 0 ? newSize - oldSize : 0;
+ if (!resize(0, sizeIncrement)) {
+ LOG(CameraMetadata, Error) << "Failed to resize";
+ valid_ = false;
+ return false;
+ }
+
+ ret = update_camera_metadata_entry(metadata_, entry.index, data,
+ count, nullptr);
+ if (!ret)
+ return true;
+
+ const char *name = get_camera_metadata_tag_name(tag);
+ LOG(CameraMetadata, Error)
+ << "Failed to update tag " << (name ? name : "<unknown>");
+
+ valid_ = false;
+
+ return false;
+}
+
+camera_metadata_t *CameraMetadata::getMetadata()
+{
+ return valid_ ? metadata_ : nullptr;
+}
+
+const camera_metadata_t *CameraMetadata::getMetadata() const
{
return valid_ ? metadata_ : nullptr;
}
diff --git a/src/android/camera_metadata.h b/src/android/camera_metadata.h
index 75a9d706..474f280c 100644
--- a/src/android/camera_metadata.h
+++ b/src/android/camera_metadata.h
@@ -2,29 +2,111 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_metadata.h - libcamera Android Camera Metadata Helper
+ * libcamera Android Camera Metadata Helper
*/
-#ifndef __ANDROID_CAMERA_METADATA_H__
-#define __ANDROID_CAMERA_METADATA_H__
+
+#pragma once
#include <stdint.h>
+#include <vector>
#include <system/camera_metadata.h>
class CameraMetadata
{
public:
+ CameraMetadata();
CameraMetadata(size_t entryCapacity, size_t dataCapacity);
+ CameraMetadata(const camera_metadata_t *metadata);
+ CameraMetadata(const CameraMetadata &other);
~CameraMetadata();
- bool isValid() { return valid_; }
- bool addEntry(uint32_t tag, const void *data, size_t data_count);
+ CameraMetadata &operator=(const CameraMetadata &other);
+
+ std::tuple<size_t, size_t> usage() const;
+ bool resized() const { return resized_; }
+
+ bool isValid() const { return valid_; }
+ bool getEntry(uint32_t tag, camera_metadata_ro_entry_t *entry) const;
+
+ template<typename T> bool entryContains(uint32_t tag, T value) const;
+
+ bool hasEntry(uint32_t tag) const;
+
+ template<typename T,
+ std::enable_if_t<std::is_arithmetic_v<T> ||
+ std::is_enum_v<T>> * = nullptr>
+ bool setEntry(uint32_t tag, const T &data)
+ {
+ if (hasEntry(tag))
+ return updateEntry(tag, &data, 1, sizeof(T));
+ else
+ return addEntry(tag, &data, 1, sizeof(T));
+ }
+
+ template<typename T,
+ std::enable_if_t<std::is_arithmetic_v<T> ||
+ std::is_enum_v<T>> * = nullptr>
+ bool addEntry(uint32_t tag, const T &data)
+ {
+ return addEntry(tag, &data, 1, sizeof(T));
+ }
+
+ template<typename T, size_t size>
+ bool addEntry(uint32_t tag, const T (&data)[size])
+ {
+ return addEntry(tag, data, size, sizeof(T));
+ }
- camera_metadata_t *get();
+ template<typename S,
+ typename T = typename S::value_type>
+ bool addEntry(uint32_t tag, const S &data)
+ {
+ return addEntry(tag, data.data(), data.size(), sizeof(T));
+ }
+
+ template<typename T>
+ bool addEntry(uint32_t tag, const T *data, size_t count)
+ {
+ return addEntry(tag, data, count, sizeof(T));
+ }
+
+ template<typename T>
+ bool updateEntry(uint32_t tag, const T &data)
+ {
+ return updateEntry(tag, &data, 1, sizeof(T));
+ }
+
+ template<typename T, size_t size>
+ bool updateEntry(uint32_t tag, const T (&data)[size])
+ {
+ return updateEntry(tag, data, size, sizeof(T));
+ }
+
+ template<typename S,
+ typename T = typename S::value_type>
+ bool updateEntry(uint32_t tag, const S &data)
+ {
+ return updateEntry(tag, data.data(), data.size(), sizeof(T));
+ }
+
+ template<typename T>
+ bool updateEntry(uint32_t tag, const T *data, size_t count)
+ {
+ return updateEntry(tag, data, count, sizeof(T));
+ }
+
+ camera_metadata_t *getMetadata();
+ const camera_metadata_t *getMetadata() const;
private:
+ bool resize(size_t count, size_t size);
+ bool addEntry(uint32_t tag, const void *data, size_t count,
+ size_t elementSize);
+ bool updateEntry(uint32_t tag, const void *data, size_t count,
+ size_t elementSize);
+
camera_metadata_t *metadata_;
bool valid_;
+ bool resized_;
};
-
-#endif /* __ANDROID_CAMERA_METADATA_H__ */
diff --git a/src/android/camera_ops.cpp b/src/android/camera_ops.cpp
index 9dfc2e65..ecaac5a3 100644
--- a/src/android/camera_ops.cpp
+++ b/src/android/camera_ops.cpp
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_ops.h - Android Camera HAL Operations
+ * Android Camera HAL Operations
*/
#include "camera_ops.h"
@@ -14,7 +14,7 @@
using namespace libcamera;
/*
- * Translatation layer between the Android Camera HAL device operations and the
+ * Translation layer between the Android Camera HAL device operations and the
* CameraDevice.
*/
@@ -61,12 +61,19 @@ static int hal_dev_process_capture_request(const struct camera3_device *dev,
return camera->processCaptureRequest(request);
}
-static void hal_dev_dump(const struct camera3_device *dev, int fd)
+static void hal_dev_dump([[maybe_unused]] const struct camera3_device *dev,
+ [[maybe_unused]] int fd)
{
}
static int hal_dev_flush(const struct camera3_device *dev)
{
+ if (!dev)
+ return -EINVAL;
+
+ CameraDevice *camera = reinterpret_cast<CameraDevice *>(dev->priv);
+ camera->flush();
+
return 0;
}
diff --git a/src/android/camera_ops.h b/src/android/camera_ops.h
index 304e7b85..750dc945 100644
--- a/src/android/camera_ops.h
+++ b/src/android/camera_ops.h
@@ -2,14 +2,12 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_ops.h - Android Camera HAL Operations
+ * Android Camera HAL Operations
*/
-#ifndef __ANDROID_CAMERA_OPS_H__
-#define __ANDROID_CAMERA_OPS_H__
+
+#pragma once
#include <hardware/camera3.h>
int hal_dev_close(hw_device_t *hw_device);
extern camera3_device_ops hal_dev_ops;
-
-#endif /* __ANDROID_CAMERA_OPS_H__ */
diff --git a/src/android/camera_request.cpp b/src/android/camera_request.cpp
new file mode 100644
index 00000000..0d45960d
--- /dev/null
+++ b/src/android/camera_request.cpp
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2021, Google Inc.
+ *
+ * libcamera Android Camera Request Descriptor
+ */
+
+#include "camera_request.h"
+
+#include <libcamera/base/span.h>
+
+#include "camera_buffer.h"
+
+using namespace libcamera;
+
+/*
+ * \class Camera3RequestDescriptor
+ *
+ * A utility class that groups information about a capture request to be later
+ * reused at request complete time to notify the framework.
+ *
+ *******************************************************************************
+ * Lifetime of a Camera3RequestDescriptor tracking a capture request placed by
+ * Android Framework
+ *******************************************************************************
+ *
+ *
+ * Android Framework
+ * │
+ * │ ┌──────────────────────────────────┐
+ * │ │camera3_capture_request_t │
+ * │ │ │
+ * │ │Requested output streams │
+ * │ │ stream1 stream2 stream3 ... │
+ * │ └──────────────────────────────────┘
+ * ▼
+ * ┌─────────────────────────────────────────────────────────────┐
+ * │ libcamera HAL │
+ * ├─────────────────────────────────────────────────────────────┤
+ * │ CameraDevice │
+ * │ │
+ * │ processCaptureRequest(camera3_capture_request_t request) │
+ * │ │
+ * │ - Create Camera3RequestDescriptor tracking this request │
+ * │ - Streams requiring post-processing are stored in the │
+ * │ pendingStreamsToProcess map │
+ * │ - Add this Camera3RequestDescriptor to descriptors' queue │
+ * │ CameraDevice::descriptors_ │
+ * │ │ ┌─────────────────────────┐
+ * │ - Queue the capture request to libcamera core ────────────┤►│libcamera core │
+ * │ │ ├─────────────────────────┤
+ * │ │ │- Capture from Camera │
+ * │ │ │ │
+ * │ │ │- Emit │
+ * │ │ │ Camera::requestComplete│
+ * │ requestCompleted(Request *request) ◄───────────────────────┼─┼──── │
+ * │ │ │ │
+ * │ - Check request completion status │ └─────────────────────────┘
+ * │ │
+ * │ - if (pendingStreamsToProcess > 0) │
+ * │ Queue all entries from pendingStreamsToProcess │
+ * │ else │ │
+ * │ completeDescriptor() │ └──────────────────────┐
+ * │ │ │
+ * │ ┌──────────────────────────┴───┬──────────────────┐ │
+ * │ │ │ │ │
+ * │ ┌──────────▼────────────┐ ┌───────────▼─────────┐ ▼ │
+ * │ │CameraStream1 │ │CameraStream2 │ .... │
+ * │ ├┬───┬───┬──────────────┤ ├┬───┬───┬────────────┤ │
+ * │ ││ │ │ │ ││ │ │ │ │
+ * │ │▼───▼───▼──────────────┤ │▼───▼───▼────────────┤ │
+ * │ │PostProcessorWorker │ │PostProcessorWorker │ │
+ * │ │ │ │ │ │
+ * │ │ +------------------+ │ │ +------------------+│ │
+ * │ │ | PostProcessor | │ │ | PostProcessor |│ │
+ * │ │ | process() | │ │ | process() |│ │
+ * │ │ | | │ │ | |│ │
+ * │ │ | Emit | │ │ | Emit |│ │
+ * │ │ | processComplete | │ │ | processComplete |│ │
+ * │ │ | | │ │ | |│ │
+ * │ │ +--------------│---+ │ │ +--------------│---+│ │
+ * │ │ │ │ │ │ │ │
+ * │ │ │ │ │ │ │ │
+ * │ └────────────────┼──────┘ └────────────────┼────┘ │
+ * │ │ │ │
+ * │ │ │ │
+ * │ │ │ │
+ * │ ▼ ▼ │
+ * │ +---------------------------------------+ +--------------+ │
+ * │ | CameraDevice | | | │
+ * │ | | | | │
+ * │ | streamProcessingComplete() | | | │
+ * │ | | | | │
+ * │ | - Check and set buffer status | | .... | │
+ * │ | - Remove post+processing entry | | | │
+ * │ | from pendingStreamsToProcess | | | │
+ * │ | | | | │
+ * │ | - if (pendingStreamsToProcess.empty())| | | │
+ * │ | completeDescriptor | | | │
+ * │ | | | | │
+ * │ +---------------------------------------+ +--------------+ │
+ * │ │
+ * └────────────────────────────────────────────────────────────────────────────────────┘
+ *
+ * +-------------+
+ * | | - PostProcessorWorker's thread
+ * | |
+ * +-------------+
+ */
+
+Camera3RequestDescriptor::Camera3RequestDescriptor(
+ Camera *camera, const camera3_capture_request_t *camera3Request)
+{
+ frameNumber_ = camera3Request->frame_number;
+
+ /* Copy the camera3 request stream information for later access. */
+ const Span<const camera3_stream_buffer_t> buffers{
+ camera3Request->output_buffers,
+ camera3Request->num_output_buffers
+ };
+
+ buffers_.reserve(buffers.size());
+
+ for (const camera3_stream_buffer_t &buffer : buffers) {
+ CameraStream *stream =
+ static_cast<CameraStream *>(buffer.stream->priv);
+
+ buffers_.emplace_back(stream, buffer, this);
+ }
+
+ /* Clone the controls associated with the camera3 request. */
+ settings_ = CameraMetadata(camera3Request->settings);
+
+ /*
+ * Create the CaptureRequest, stored as a unique_ptr<> to tie its
+ * lifetime to the descriptor.
+ */
+ request_ = camera->createRequest(reinterpret_cast<uint64_t>(this));
+}
+
+Camera3RequestDescriptor::~Camera3RequestDescriptor() = default;
+
+/**
+ * \struct Camera3RequestDescriptor::StreamBuffer
+ * \brief Group information for per-stream buffer of Camera3RequestDescriptor
+ *
+ * A capture request placed to the libcamera HAL can contain multiple streams.
+ * Each stream will have an associated buffer to be filled. StreamBuffer
+ * tracks this buffer with contextual information which aids in the stream's
+ * generation. The generation of the stream will depend on its type (refer to
+ * the CameraStream::Type documentation).
+ *
+ * \var Camera3RequestDescriptor::StreamBuffer::stream
+ * \brief Pointer to the corresponding CameraStream
+ *
+ * \var Camera3RequestDescriptor::StreamBuffer::camera3Buffer
+ * \brief Native handle to the buffer
+ *
+ * \var Camera3RequestDescriptor::StreamBuffer::frameBuffer
+ * \brief Encapsulate the dmabuf handle inside a libcamera::FrameBuffer for
+ * direct streams
+ *
+ * \var Camera3RequestDescriptor::StreamBuffer::fence
+ * \brief Acquire fence of the buffer
+ *
+ * \var Camera3RequestDescriptor::StreamBuffer::status
+ * \brief Track the status of the buffer
+ *
+ * \var Camera3RequestDescriptor::StreamBuffer::internalBuffer
+ * \brief Pointer to a buffer internally handled by CameraStream (if any)
+ *
+ * \var Camera3RequestDescriptor::StreamBuffer::srcBuffer
+ * \brief Pointer to the source frame buffer used for post-processing
+ *
+ * \var Camera3RequestDescriptor::StreamBuffer::dstBuffer
+ * \brief Pointer to the destination frame buffer used for post-processing
+ *
+ * \var Camera3RequestDescriptor::StreamBuffer::request
+ * \brief Back pointer to the Camera3RequestDescriptor to which the StreamBuffer belongs
+ */
+Camera3RequestDescriptor::StreamBuffer::StreamBuffer(
+ CameraStream *cameraStream, const camera3_stream_buffer_t &buffer,
+ Camera3RequestDescriptor *requestDescriptor)
+ : stream(cameraStream), camera3Buffer(buffer.buffer),
+ fence(buffer.acquire_fence), request(requestDescriptor)
+{
+}
+
+Camera3RequestDescriptor::StreamBuffer::~StreamBuffer() = default;
+
+Camera3RequestDescriptor::StreamBuffer::StreamBuffer(StreamBuffer &&) = default;
+
+Camera3RequestDescriptor::StreamBuffer &
+Camera3RequestDescriptor::StreamBuffer::operator=(Camera3RequestDescriptor::StreamBuffer &&) = default;
diff --git a/src/android/camera_request.h b/src/android/camera_request.h
new file mode 100644
index 00000000..5b479180
--- /dev/null
+++ b/src/android/camera_request.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2021, Google Inc.
+ *
+ * libcamera Android Camera Request Descriptor
+ */
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/mutex.h>
+#include <libcamera/base/unique_fd.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/framebuffer.h>
+
+#include <hardware/camera3.h>
+
+#include "camera_metadata.h"
+#include "hal_framebuffer.h"
+
+class CameraBuffer;
+class CameraStream;
+
+class Camera3RequestDescriptor
+{
+public:
+ enum class Status {
+ Success,
+ Error,
+ };
+
+ struct StreamBuffer {
+ StreamBuffer(CameraStream *stream,
+ const camera3_stream_buffer_t &buffer,
+ Camera3RequestDescriptor *request);
+ ~StreamBuffer();
+
+ StreamBuffer(StreamBuffer &&);
+ StreamBuffer &operator=(StreamBuffer &&);
+
+ CameraStream *stream;
+ buffer_handle_t *camera3Buffer;
+ std::unique_ptr<HALFrameBuffer> frameBuffer;
+ libcamera::UniqueFD fence;
+ Status status = Status::Success;
+ libcamera::FrameBuffer *internalBuffer = nullptr;
+ const libcamera::FrameBuffer *srcBuffer = nullptr;
+ std::unique_ptr<CameraBuffer> dstBuffer;
+ Camera3RequestDescriptor *request;
+
+ private:
+ LIBCAMERA_DISABLE_COPY(StreamBuffer)
+ };
+
+ /* Keeps track of streams requiring post-processing. */
+ std::map<CameraStream *, StreamBuffer *> pendingStreamsToProcess_
+ LIBCAMERA_TSA_GUARDED_BY(streamsProcessMutex_);
+ libcamera::Mutex streamsProcessMutex_;
+
+ Camera3RequestDescriptor(libcamera::Camera *camera,
+ const camera3_capture_request_t *camera3Request);
+ ~Camera3RequestDescriptor();
+
+ bool isPending() const { return !complete_; }
+
+ uint32_t frameNumber_ = 0;
+
+ std::vector<StreamBuffer> buffers_;
+
+ CameraMetadata settings_;
+ std::unique_ptr<libcamera::Request> request_;
+ std::unique_ptr<CameraMetadata> resultMetadata_;
+
+ bool complete_ = false;
+ Status status_ = Status::Success;
+
+private:
+ LIBCAMERA_DISABLE_COPY(Camera3RequestDescriptor)
+};
diff --git a/src/android/camera_stream.cpp b/src/android/camera_stream.cpp
new file mode 100644
index 00000000..1d68540d
--- /dev/null
+++ b/src/android/camera_stream.cpp
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Camera HAL stream
+ */
+
+#include "camera_stream.h"
+
+#include <errno.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/poll.h>
+#include <unistd.h>
+
+#include <libcamera/formats.h>
+
+#include "jpeg/post_processor_jpeg.h"
+#include "yuv/post_processor_yuv.h"
+
+#include "camera_buffer.h"
+#include "camera_capabilities.h"
+#include "camera_device.h"
+#include "camera_metadata.h"
+#include "frame_buffer_allocator.h"
+#include "post_processor.h"
+
+using namespace libcamera;
+
+LOG_DECLARE_CATEGORY(HAL)
+
+/*
+ * \class CameraStream
+ * \brief Map a camera3_stream_t to a StreamConfiguration
+ *
+ * The CameraStream class maps a camera3_stream_t provided by Android
+ * camera framework to a libcamera::StreamConfiguration.
+ *
+ * The StreamConfiguration is represented by its index as recorded in the
+ * CameraConfiguration and not by pointer as StreamConfiguration is subject to
+ * relocation.
+ *
+ * A single StreamConfiguration may be used to deliver one or more streams to
+ * the Android framework. The mapping type between a camera3 stream to a
+ * StreamConfiguration is described by the CameraStream::Type.
+ *
+ * CameraStream handles all the aspects of producing a stream with the size
+ * and format requested by the camera3 stream from the data produced by
+ * the associated libcamera::Stream, including the creation of the encoder
+ * and buffer allocation.
+ */
+
+CameraStream::CameraStream(CameraDevice *const cameraDevice,
+ CameraConfiguration *config, Type type,
+ camera3_stream_t *camera3Stream,
+ CameraStream *const sourceStream, unsigned int index)
+ : cameraDevice_(cameraDevice), config_(config), type_(type),
+ camera3Stream_(camera3Stream), sourceStream_(sourceStream),
+ index_(index)
+{
+}
+
+CameraStream::CameraStream(CameraStream &&other) = default;
+
+CameraStream::~CameraStream()
+{
+ /*
+ * Manually delete buffers and then the allocator to make sure buffers
+ * are released while the allocator is still valid.
+ */
+ allocatedBuffers_.clear();
+ allocator_.reset();
+}
+
+const StreamConfiguration &CameraStream::configuration() const
+{
+ return config_->at(index_);
+}
+
+Stream *CameraStream::stream() const
+{
+ return configuration().stream();
+}
+
+int CameraStream::configure()
+{
+ if (type_ == Type::Internal || type_ == Type::Mapped) {
+ const PixelFormat outFormat =
+ cameraDevice_->capabilities()->toPixelFormat(camera3Stream_->format);
+ StreamConfiguration output = configuration();
+ output.pixelFormat = outFormat;
+ output.size.width = camera3Stream_->width;
+ output.size.height = camera3Stream_->height;
+
+ switch (outFormat) {
+ case formats::NV12:
+ postProcessor_ = std::make_unique<PostProcessorYuv>();
+ break;
+
+ case formats::MJPEG:
+ postProcessor_ = std::make_unique<PostProcessorJpeg>(cameraDevice_);
+ break;
+
+ default:
+ LOG(HAL, Error) << "Unsupported format: " << outFormat;
+ return -EINVAL;
+ }
+
+ int ret = postProcessor_->configure(configuration(), output);
+ if (ret)
+ return ret;
+
+ worker_ = std::make_unique<PostProcessorWorker>(postProcessor_.get());
+ postProcessor_->processComplete.connect(
+ this, [&](Camera3RequestDescriptor::StreamBuffer *streamBuffer,
+ PostProcessor::Status status) {
+ Camera3RequestDescriptor::Status bufferStatus;
+
+ if (status == PostProcessor::Status::Success)
+ bufferStatus = Camera3RequestDescriptor::Status::Success;
+ else
+ bufferStatus = Camera3RequestDescriptor::Status::Error;
+
+ cameraDevice_->streamProcessingComplete(streamBuffer,
+ bufferStatus);
+ });
+
+ worker_->start();
+ }
+
+ allocator_ = std::make_unique<PlatformFrameBufferAllocator>(cameraDevice_);
+ mutex_ = std::make_unique<Mutex>();
+
+ camera3Stream_->max_buffers = configuration().bufferCount;
+
+ return 0;
+}
+
+int CameraStream::waitFence(int fence)
+{
+ /*
+ * \todo The implementation here is copied from camera_worker.cpp
+ * and both should be removed once libcamera is instrumented to handle
+ * fences waiting in the core.
+ *
+ * \todo Better characterize the timeout. Currently equal to the one
+ * used by the Rockchip Camera HAL on ChromeOS.
+ */
+ constexpr unsigned int timeoutMs = 300;
+ struct pollfd fds = { fence, POLLIN, 0 };
+
+ do {
+ int ret = poll(&fds, 1, timeoutMs);
+ if (ret == 0)
+ return -ETIME;
+
+ if (ret > 0) {
+ if (fds.revents & (POLLERR | POLLNVAL))
+ return -EINVAL;
+
+ return 0;
+ }
+ } while (errno == EINTR || errno == EAGAIN);
+
+ return -errno;
+}
+
+int CameraStream::process(Camera3RequestDescriptor::StreamBuffer *streamBuffer)
+{
+ ASSERT(type_ != Type::Direct);
+
+ /* Handle waiting on fences on the destination buffer. */
+ if (streamBuffer->fence.isValid()) {
+ int ret = waitFence(streamBuffer->fence.get());
+ if (ret < 0) {
+ LOG(HAL, Error) << "Failed waiting for fence: "
+ << streamBuffer->fence.get() << ": "
+ << strerror(-ret);
+ return ret;
+ }
+
+ streamBuffer->fence.reset();
+ }
+
+ const StreamConfiguration &output = configuration();
+ streamBuffer->dstBuffer = std::make_unique<CameraBuffer>(
+ *streamBuffer->camera3Buffer, output.pixelFormat, output.size,
+ PROT_READ | PROT_WRITE);
+ if (!streamBuffer->dstBuffer->isValid()) {
+ LOG(HAL, Error) << "Failed to create destination buffer";
+ return -EINVAL;
+ }
+
+ worker_->queueRequest(streamBuffer);
+
+ return 0;
+}
+
+void CameraStream::flush()
+{
+ if (!postProcessor_)
+ return;
+
+ worker_->flush();
+}
+
+FrameBuffer *CameraStream::getBuffer()
+{
+ if (!allocator_)
+ return nullptr;
+
+ MutexLocker locker(*mutex_);
+
+ if (buffers_.empty()) {
+ /*
+ * Use HAL_PIXEL_FORMAT_YCBCR_420_888 unconditionally.
+ *
+ * YCBCR_420 is the source format for both the JPEG and the YUV
+ * post-processors.
+ *
+ * \todo Store a reference to the format of the source stream
+ * instead of hardcoding.
+ */
+ auto frameBuffer = allocator_->allocate(HAL_PIXEL_FORMAT_YCBCR_420_888,
+ configuration().size,
+ camera3Stream_->usage);
+ allocatedBuffers_.push_back(std::move(frameBuffer));
+ buffers_.emplace_back(allocatedBuffers_.back().get());
+ }
+
+ FrameBuffer *buffer = buffers_.back();
+ buffers_.pop_back();
+
+ return buffer;
+}
+
+void CameraStream::putBuffer(FrameBuffer *buffer)
+{
+ if (!allocator_)
+ return;
+
+ MutexLocker locker(*mutex_);
+
+ buffers_.push_back(buffer);
+}
+
+/**
+ * \class CameraStream::PostProcessorWorker
+ * \brief Post-process a CameraStream in an internal thread
+ *
+ * If the association between CameraStream and camera3_stream_t dictated by
+ * CameraStream::Type is internal or mapped, the stream is generated by post
+ * processing of a libcamera stream. Such a request is queued to a
+ * PostProcessorWorker in CameraStream::process(). A queue of post-processing
+ * requests is maintained by the PostProcessorWorker and it will run the
+ * post-processing on an internal thread as soon as any request is available on
+ * its queue.
+ */
+CameraStream::PostProcessorWorker::PostProcessorWorker(PostProcessor *postProcessor)
+ : postProcessor_(postProcessor)
+{
+}
+
+CameraStream::PostProcessorWorker::~PostProcessorWorker()
+{
+ {
+ MutexLocker lock(mutex_);
+ state_ = State::Stopped;
+ }
+
+ cv_.notify_one();
+ wait();
+}
+
+void CameraStream::PostProcessorWorker::start()
+{
+ {
+ MutexLocker lock(mutex_);
+ ASSERT(state_ != State::Running);
+ state_ = State::Running;
+ }
+
+ Thread::start();
+}
+
+void CameraStream::PostProcessorWorker::queueRequest(Camera3RequestDescriptor::StreamBuffer *dest)
+{
+ {
+ MutexLocker lock(mutex_);
+ ASSERT(state_ == State::Running);
+ requests_.push(dest);
+ }
+
+ cv_.notify_one();
+}
+
+void CameraStream::PostProcessorWorker::run()
+{
+ MutexLocker locker(mutex_);
+
+ while (1) {
+ cv_.wait(locker, [&]() LIBCAMERA_TSA_REQUIRES(mutex_) {
+ return state_ != State::Running || !requests_.empty();
+ });
+
+ if (state_ != State::Running)
+ break;
+
+ Camera3RequestDescriptor::StreamBuffer *streamBuffer = requests_.front();
+ requests_.pop();
+ locker.unlock();
+
+ postProcessor_->process(streamBuffer);
+
+ locker.lock();
+ }
+
+ if (state_ == State::Flushing) {
+ std::queue<Camera3RequestDescriptor::StreamBuffer *> requests =
+ std::move(requests_);
+ locker.unlock();
+
+ while (!requests.empty()) {
+ postProcessor_->processComplete.emit(
+ requests.front(), PostProcessor::Status::Error);
+ requests.pop();
+ }
+
+ locker.lock();
+ state_ = State::Stopped;
+ }
+}
+
+void CameraStream::PostProcessorWorker::flush()
+{
+ MutexLocker lock(mutex_);
+ state_ = State::Flushing;
+ lock.unlock();
+
+ cv_.notify_one();
+}
diff --git a/src/android/camera_stream.h b/src/android/camera_stream.h
new file mode 100644
index 00000000..395552da
--- /dev/null
+++ b/src/android/camera_stream.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Camera HAL stream
+ */
+
+#pragma once
+
+#include <memory>
+#include <queue>
+#include <vector>
+
+#include <hardware/camera3.h>
+
+#include <libcamera/base/mutex.h>
+#include <libcamera/base/thread.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+#include <libcamera/pixel_format.h>
+
+#include "camera_request.h"
+#include "post_processor.h"
+
+class CameraDevice;
+class PlatformFrameBufferAllocator;
+
+class CameraStream
+{
+public:
+ /*
+ * Enumeration of CameraStream types.
+ *
+ * A camera stream associates an Android stream to a libcamera stream.
+ * This enumeration describes how the two streams are associated and how
+ * and where data produced from libcamera are delivered to the
+ * Android framework.
+ *
+ * Direct:
+ *
+ * The Android stream is directly mapped onto a libcamera stream: frames
+ * are delivered by the library directly in the memory location
+ * specified by the Android stream (buffer_handle_t->data) and provided
+ * to the framework as they are. The Android stream characteristics are
+ * directly translated to the libcamera stream configuration.
+ *
+ * +-----+ +-----+
+ * | A | | L |
+ * +-----+ +-----+
+ * | |
+ * V V
+ * +-----+ +------+
+ * | B |<---------------| FB |
+ * +-----+ +------+
+ *
+ *
+ * Internal:
+ *
+ * Data for the Android stream is produced by processing a libcamera
+ * stream created by the HAL for that purpose. The libcamera stream
+ * needs to be supplied with intermediate buffers where the library
+ * delivers frames to be processed and then provided to the framework.
+ * The libcamera stream configuration is not a direct translation of the
+ * Android stream characteristics, but it describes the format and size
+ * required for the processing procedure to produce frames in the
+ * Android required format.
+ *
+ * +-----+ +-----+
+ * | A | | L |
+ * +-----+ +-----+
+ * | |
+ * V V
+ * +-----+ +------+
+ * | B | | FB |
+ * +-----+ +------+
+ * ^ |
+ * |-------Processing------|
+ *
+ *
+ * Mapped:
+ *
+ * Data for the Android stream is produced by processing a libcamera
+ * stream associated with another CameraStream. Mapped camera streams do
+ * not need any memory to be reserved for them as they process data
+ * produced by libcamera for a different stream whose format and size
+ * are compatible with the processing procedure requirements to produce
+ * frames in the Android required format.
+ *
+ * +-----+ +-----+ +-----+
+ * | A | | A' | | L |
+ * +-----+ +-----+ +-----+
+ * | | |
+ * V V V
+ * +-----+ +-----+ +------+
+ * | B | | B' |<---------| FB |
+ * +-----+ +-----+ +------+
+ * ^ |
+ * |--Processing--|
+ *
+ *
+ * --------------------------------------------------------------------
+ * A = Android stream
+ * L = libcamera stream
+ * B = memory buffer
+ * FB = libcamera FrameBuffer
+ * "Processing" = Frame processing procedure (Encoding, scaling etc)
+ */
+ enum class Type {
+ Direct,
+ Internal,
+ Mapped,
+ };
+ CameraStream(CameraDevice *const cameraDevice,
+ libcamera::CameraConfiguration *config, Type type,
+ camera3_stream_t *camera3Stream,
+ CameraStream *const sourceStream,
+ unsigned int index);
+ CameraStream(CameraStream &&other);
+ ~CameraStream();
+
+ Type type() const { return type_; }
+ camera3_stream_t *camera3Stream() const { return camera3Stream_; }
+ const libcamera::StreamConfiguration &configuration() const;
+ libcamera::Stream *stream() const;
+ CameraStream *sourceStream() const { return sourceStream_; }
+
+ int configure();
+ int process(Camera3RequestDescriptor::StreamBuffer *streamBuffer);
+ libcamera::FrameBuffer *getBuffer();
+ void putBuffer(libcamera::FrameBuffer *buffer);
+ void flush();
+
+private:
+ class PostProcessorWorker : public libcamera::Thread
+ {
+ public:
+ enum class State {
+ Stopped,
+ Running,
+ Flushing,
+ };
+
+ PostProcessorWorker(PostProcessor *postProcessor);
+ ~PostProcessorWorker();
+
+ void start();
+ void queueRequest(Camera3RequestDescriptor::StreamBuffer *request);
+ void flush();
+
+ protected:
+ void run() override;
+
+ private:
+ PostProcessor *postProcessor_;
+
+ libcamera::Mutex mutex_;
+ libcamera::ConditionVariable cv_;
+
+ std::queue<Camera3RequestDescriptor::StreamBuffer *> requests_
+ LIBCAMERA_TSA_GUARDED_BY(mutex_);
+
+ State state_ LIBCAMERA_TSA_GUARDED_BY(mutex_) = State::Stopped;
+ };
+
+ int waitFence(int fence);
+
+ CameraDevice *const cameraDevice_;
+ const libcamera::CameraConfiguration *config_;
+ const Type type_;
+ camera3_stream_t *camera3Stream_;
+ CameraStream *const sourceStream_;
+ const unsigned int index_;
+
+ std::unique_ptr<PlatformFrameBufferAllocator> allocator_;
+ std::vector<std::unique_ptr<libcamera::FrameBuffer>> allocatedBuffers_;
+ std::vector<libcamera::FrameBuffer *> buffers_ LIBCAMERA_TSA_GUARDED_BY(mutex_);
+ /*
+ * The class has to be MoveConstructible as instances are stored in
+ * an std::vector in CameraDevice.
+ */
+ std::unique_ptr<libcamera::Mutex> mutex_;
+ std::unique_ptr<PostProcessor> postProcessor_;
+
+ std::unique_ptr<PostProcessorWorker> worker_;
+};
diff --git a/src/android/cros/camera3_hal.cpp b/src/android/cros/camera3_hal.cpp
new file mode 100644
index 00000000..6010a5ad
--- /dev/null
+++ b/src/android/cros/camera3_hal.cpp
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * cros-specific components of Android Camera HALv3 module
+ */
+
+#include <cros-camera/cros_camera_hal.h>
+
+#include "../camera_hal_manager.h"
+#include "../cros_mojo_token.h"
+
+static void set_up(cros::CameraMojoChannelManagerToken *token)
+{
+ gCrosMojoToken = token;
+}
+
+static void tear_down()
+{
+ delete CameraHalManager::instance();
+}
+
+cros::cros_camera_hal_t CROS_CAMERA_EXPORT CROS_CAMERA_HAL_INFO_SYM = {
+ .set_up = set_up,
+ .tear_down = tear_down
+};
diff --git a/src/android/cros/meson.build b/src/android/cros/meson.build
new file mode 100644
index 00000000..35995dd8
--- /dev/null
+++ b/src/android/cros/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: CC0-1.0
+
+if get_option('android_platform') != 'cros'
+ subdir_done()
+endif
+
+android_hal_sources += files([
+ 'camera3_hal.cpp',
+])
+
+android_deps += dependency('libcros_camera')
+
+android_cpp_args += ['-DOS_CHROMEOS']
diff --git a/src/android/cros_mojo_token.h b/src/android/cros_mojo_token.h
new file mode 100644
index 00000000..d0baa80f
--- /dev/null
+++ b/src/android/cros_mojo_token.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * cros-specific mojo token
+ */
+
+#pragma once
+
+#include <cros-camera/cros_camera_hal.h>
+
+inline cros::CameraMojoChannelManagerToken *gCrosMojoToken = nullptr;
diff --git a/src/android/data/nautilus/camera_hal.yaml b/src/android/data/nautilus/camera_hal.yaml
new file mode 100644
index 00000000..2105fcca
--- /dev/null
+++ b/src/android/data/nautilus/camera_hal.yaml
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: CC0-1.0
+
+cameras:
+ "\\_SB_.PCI0.I2C2.CAM0":
+ location: back
+ rotation: 0
+
+ "\\_SB_.PCI0.XHCI.RHUB.HS09-9:1.0-04f2:b647":
+ location: front
+ rotation: 0
diff --git a/src/android/data/soraka/camera_hal.yaml b/src/android/data/soraka/camera_hal.yaml
new file mode 100644
index 00000000..d886af06
--- /dev/null
+++ b/src/android/data/soraka/camera_hal.yaml
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: CC0-1.0
+
+cameras:
+ "\\_SB_.PCI0.I2C4.CAM1":
+ location: front
+ rotation: 0
+
+ "\\_SB_.PCI0.I2C2.CAM0":
+ location: back
+ rotation: 0
diff --git a/src/android/frame_buffer_allocator.h b/src/android/frame_buffer_allocator.h
new file mode 100644
index 00000000..3e68641c
--- /dev/null
+++ b/src/android/frame_buffer_allocator.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Interface definition to allocate Frame buffer in
+ * platform dependent way.
+ */
+#ifndef __ANDROID_FRAME_BUFFER_ALLOCATOR_H__
+#define __ANDROID_FRAME_BUFFER_ALLOCATOR_H__
+
+#include <memory>
+
+#include <libcamera/base/class.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/geometry.h>
+
+#include "hal_framebuffer.h"
+
+class CameraDevice;
+
+class PlatformFrameBufferAllocator : libcamera::Extensible
+{
+ LIBCAMERA_DECLARE_PRIVATE()
+
+public:
+ explicit PlatformFrameBufferAllocator(CameraDevice *const cameraDevice);
+ ~PlatformFrameBufferAllocator();
+
+ /*
+ * FrameBuffer owns the underlying buffer. Returns nullptr on failure.
+ * Note: The returned FrameBuffer needs to be destroyed before
+ * PlatformFrameBufferAllocator is destroyed.
+ */
+ std::unique_ptr<HALFrameBuffer> allocate(
+ int halPixelFormat, const libcamera::Size &size, uint32_t usage);
+};
+
+#define PUBLIC_FRAME_BUFFER_ALLOCATOR_IMPLEMENTATION \
+PlatformFrameBufferAllocator::PlatformFrameBufferAllocator( \
+ CameraDevice *const cameraDevice) \
+ : Extensible(std::make_unique<Private>(cameraDevice)) \
+{ \
+} \
+PlatformFrameBufferAllocator::~PlatformFrameBufferAllocator() \
+{ \
+} \
+std::unique_ptr<HALFrameBuffer> \
+PlatformFrameBufferAllocator::allocate(int halPixelFormat, \
+ const libcamera::Size &size, \
+ uint32_t usage) \
+{ \
+ return _d()->allocate(halPixelFormat, size, usage); \
+}
+
+#endif /* __ANDROID_FRAME_BUFFER_ALLOCATOR_H__ */
diff --git a/src/android/hal_framebuffer.cpp b/src/android/hal_framebuffer.cpp
new file mode 100644
index 00000000..d4899f45
--- /dev/null
+++ b/src/android/hal_framebuffer.cpp
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * HAL Frame Buffer Handling
+ */
+
+#include "hal_framebuffer.h"
+
+#include <hardware/camera3.h>
+
+HALFrameBuffer::HALFrameBuffer(std::unique_ptr<Private> d,
+ buffer_handle_t handle)
+ : FrameBuffer(std::move(d)), handle_(handle)
+{
+}
+
+HALFrameBuffer::HALFrameBuffer(const std::vector<Plane> &planes,
+ buffer_handle_t handle)
+ : FrameBuffer(planes), handle_(handle)
+{
+}
diff --git a/src/android/hal_framebuffer.h b/src/android/hal_framebuffer.h
new file mode 100644
index 00000000..cea49e2d
--- /dev/null
+++ b/src/android/hal_framebuffer.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * HAL Frame Buffer Handling
+ */
+
+#pragma once
+
+#include "libcamera/internal/framebuffer.h"
+
+#include <hardware/camera3.h>
+
+class HALFrameBuffer final : public libcamera::FrameBuffer
+{
+public:
+ HALFrameBuffer(std::unique_ptr<Private> d,
+ buffer_handle_t handle);
+ HALFrameBuffer(const std::vector<Plane> &planes,
+ buffer_handle_t handle);
+
+ buffer_handle_t handle() const { return handle_; }
+
+private:
+ buffer_handle_t handle_;
+};
diff --git a/src/android/jpeg/encoder.h b/src/android/jpeg/encoder.h
new file mode 100644
index 00000000..ed033c19
--- /dev/null
+++ b/src/android/jpeg/encoder.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Image encoding interface
+ */
+
+#pragma once
+
+#include <libcamera/base/span.h>
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/stream.h>
+
+#include "../camera_request.h"
+
+class Encoder
+{
+public:
+ virtual ~Encoder() = default;
+
+ virtual int configure(const libcamera::StreamConfiguration &cfg) = 0;
+ virtual int encode(Camera3RequestDescriptor::StreamBuffer *buffer,
+ libcamera::Span<const uint8_t> exifData,
+ unsigned int quality) = 0;
+};
diff --git a/src/android/jpeg/encoder_jea.cpp b/src/android/jpeg/encoder_jea.cpp
new file mode 100644
index 00000000..25dc4317
--- /dev/null
+++ b/src/android/jpeg/encoder_jea.cpp
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * JPEG encoding using CrOS JEA
+ */
+
+#include "encoder_jea.h"
+
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include <cros-camera/camera_mojo_channel_manager_token.h>
+
+#include "../cros_mojo_token.h"
+#include "../hal_framebuffer.h"
+
+EncoderJea::EncoderJea() = default;
+
+EncoderJea::~EncoderJea() = default;
+
+int EncoderJea::configure(const libcamera::StreamConfiguration &cfg)
+{
+ size_ = cfg.size;
+
+ if (jpegCompressor_)
+ return 0;
+
+ if (gCrosMojoToken == nullptr)
+ return -ENOTSUP;
+
+ jpegCompressor_ = cros::JpegCompressor::GetInstance(gCrosMojoToken);
+
+ return 0;
+}
+
+int EncoderJea::encode(Camera3RequestDescriptor::StreamBuffer *buffer,
+ libcamera::Span<const uint8_t> exifData,
+ unsigned int quality)
+{
+ if (!jpegCompressor_)
+ return -ENOTSUP;
+
+ uint32_t outDataSize = 0;
+ const HALFrameBuffer *fb =
+ dynamic_cast<const HALFrameBuffer *>(buffer->srcBuffer);
+
+ if (!jpegCompressor_->CompressImageFromHandle(fb->handle(),
+ *buffer->camera3Buffer,
+ size_.width, size_.height,
+ quality, exifData.data(),
+ exifData.size(),
+ &outDataSize))
+ return -EBUSY;
+
+ return outDataSize;
+}
diff --git a/src/android/jpeg/encoder_jea.h b/src/android/jpeg/encoder_jea.h
new file mode 100644
index 00000000..91115d2e
--- /dev/null
+++ b/src/android/jpeg/encoder_jea.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2022, Google Inc.
+ *
+ * JPEG encoding using CrOS JEA
+ */
+
+#pragma once
+
+#include <libcamera/geometry.h>
+
+#include <cros-camera/jpeg_compressor.h>
+
+#include "encoder.h"
+
+class EncoderJea : public Encoder
+{
+public:
+ EncoderJea();
+ ~EncoderJea();
+
+ int configure(const libcamera::StreamConfiguration &cfg) override;
+ int encode(Camera3RequestDescriptor::StreamBuffer *buffer,
+ libcamera::Span<const uint8_t> exifData,
+ unsigned int quality) override;
+
+private:
+ libcamera::Size size_;
+
+ std::unique_ptr<cros::JpegCompressor> jpegCompressor_;
+};
diff --git a/src/android/jpeg/encoder_libjpeg.cpp b/src/android/jpeg/encoder_libjpeg.cpp
new file mode 100644
index 00000000..cb242b5e
--- /dev/null
+++ b/src/android/jpeg/encoder_libjpeg.cpp
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * JPEG encoding using libjpeg native API
+ */
+
+#include "encoder_libjpeg.h"
+
+#include <fcntl.h>
+#include <iomanip>
+#include <iostream>
+#include <sstream>
+#include <string.h>
+#include <unistd.h>
+#include <vector>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/formats.h>
+#include <libcamera/pixel_format.h>
+
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include "../camera_buffer.h"
+
+using namespace libcamera;
+
+LOG_DECLARE_CATEGORY(JPEG)
+
+namespace {
+
+struct JPEGPixelFormatInfo {
+ J_COLOR_SPACE colorSpace;
+ const PixelFormatInfo &pixelFormatInfo;
+ bool nvSwap;
+};
+
+const std::map<PixelFormat, JPEGPixelFormatInfo> pixelInfo{
+ { formats::R8, { JCS_GRAYSCALE, PixelFormatInfo::info(formats::R8), false } },
+
+ { formats::RGB888, { JCS_EXT_BGR, PixelFormatInfo::info(formats::RGB888), false } },
+ { formats::BGR888, { JCS_EXT_RGB, PixelFormatInfo::info(formats::BGR888), false } },
+
+ { formats::NV12, { JCS_YCbCr, PixelFormatInfo::info(formats::NV12), false } },
+ { formats::NV21, { JCS_YCbCr, PixelFormatInfo::info(formats::NV21), true } },
+ { formats::NV16, { JCS_YCbCr, PixelFormatInfo::info(formats::NV16), false } },
+ { formats::NV61, { JCS_YCbCr, PixelFormatInfo::info(formats::NV61), true } },
+ { formats::NV24, { JCS_YCbCr, PixelFormatInfo::info(formats::NV24), false } },
+ { formats::NV42, { JCS_YCbCr, PixelFormatInfo::info(formats::NV42), true } },
+};
+
+const struct JPEGPixelFormatInfo &findPixelInfo(const PixelFormat &format)
+{
+ static const struct JPEGPixelFormatInfo invalidPixelFormat {
+ JCS_UNKNOWN, PixelFormatInfo(), false
+ };
+
+ const auto iter = pixelInfo.find(format);
+ if (iter == pixelInfo.end()) {
+ LOG(JPEG, Error) << "Unsupported pixel format for JPEG encoder: "
+ << format;
+ return invalidPixelFormat;
+ }
+
+ return iter->second;
+}
+
+} /* namespace */
+
+EncoderLibJpeg::EncoderLibJpeg()
+{
+ /* \todo Expand error handling coverage with a custom handler. */
+ compress_.err = jpeg_std_error(&jerr_);
+
+ jpeg_create_compress(&compress_);
+}
+
+EncoderLibJpeg::~EncoderLibJpeg()
+{
+ jpeg_destroy_compress(&compress_);
+}
+
+int EncoderLibJpeg::configure(const StreamConfiguration &cfg)
+{
+ const struct JPEGPixelFormatInfo info = findPixelInfo(cfg.pixelFormat);
+ if (info.colorSpace == JCS_UNKNOWN)
+ return -ENOTSUP;
+
+ compress_.image_width = cfg.size.width;
+ compress_.image_height = cfg.size.height;
+ compress_.in_color_space = info.colorSpace;
+
+ compress_.input_components = info.colorSpace == JCS_GRAYSCALE ? 1 : 3;
+
+ jpeg_set_defaults(&compress_);
+
+ pixelFormatInfo_ = &info.pixelFormatInfo;
+
+ nv_ = pixelFormatInfo_->numPlanes() == 2;
+ nvSwap_ = info.nvSwap;
+
+ return 0;
+}
+
+void EncoderLibJpeg::compressRGB(const std::vector<Span<uint8_t>> &planes)
+{
+ unsigned char *src = const_cast<unsigned char *>(planes[0].data());
+ /* \todo Stride information should come from buffer configuration. */
+ unsigned int stride = pixelFormatInfo_->stride(compress_.image_width, 0);
+
+ JSAMPROW row_pointer[1];
+
+ while (compress_.next_scanline < compress_.image_height) {
+ row_pointer[0] = &src[compress_.next_scanline * stride];
+ jpeg_write_scanlines(&compress_, row_pointer, 1);
+ }
+}
+
+/*
+ * Compress the incoming buffer from a supported NV format.
+ * This naively unpacks the semi-planar NV12 to a YUV888 format for libjpeg.
+ */
+void EncoderLibJpeg::compressNV(const std::vector<Span<uint8_t>> &planes)
+{
+ std::vector<uint8_t> tmprowbuf(compress_.image_width * 3);
+
+ /*
+ * \todo Use the raw api, and only unpack the cb/cr samples to new line
+ * buffers. If possible, see if we can set appropriate pixel strides
+ * too to save even that copy.
+ *
+ * Possible hints at:
+ * https://sourceforge.net/p/libjpeg/mailman/message/30815123/
+ */
+ unsigned int y_stride = pixelFormatInfo_->stride(compress_.image_width, 0);
+ unsigned int c_stride = pixelFormatInfo_->stride(compress_.image_width, 1);
+
+ unsigned int horzSubSample = 2 * compress_.image_width / c_stride;
+ unsigned int vertSubSample = pixelFormatInfo_->planes[1].verticalSubSampling;
+
+ unsigned int c_inc = horzSubSample == 1 ? 2 : 0;
+ unsigned int cb_pos = nvSwap_ ? 1 : 0;
+ unsigned int cr_pos = nvSwap_ ? 0 : 1;
+
+ const unsigned char *src = planes[0].data();
+ const unsigned char *src_c = planes[1].data();
+
+ JSAMPROW row_pointer[1];
+ row_pointer[0] = tmprowbuf.data();
+
+ for (unsigned int y = 0; y < compress_.image_height; y++) {
+ unsigned char *dst = tmprowbuf.data();
+
+ const unsigned char *src_y = src + y * y_stride;
+ const unsigned char *src_cb = src_c + (y / vertSubSample) * c_stride + cb_pos;
+ const unsigned char *src_cr = src_c + (y / vertSubSample) * c_stride + cr_pos;
+
+ for (unsigned int x = 0; x < compress_.image_width; x += 2) {
+ dst[0] = *src_y;
+ dst[1] = *src_cb;
+ dst[2] = *src_cr;
+ src_y++;
+ src_cb += c_inc;
+ src_cr += c_inc;
+ dst += 3;
+
+ dst[0] = *src_y;
+ dst[1] = *src_cb;
+ dst[2] = *src_cr;
+ src_y++;
+ src_cb += 2;
+ src_cr += 2;
+ dst += 3;
+ }
+
+ jpeg_write_scanlines(&compress_, row_pointer, 1);
+ }
+}
+
+int EncoderLibJpeg::encode(Camera3RequestDescriptor::StreamBuffer *buffer,
+ libcamera::Span<const uint8_t> exifData,
+ unsigned int quality)
+{
+ MappedFrameBuffer frame(buffer->srcBuffer,
+ MappedFrameBuffer::MapFlag::Read);
+ if (!frame.isValid()) {
+ LOG(JPEG, Error) << "Failed to map FrameBuffer : "
+ << strerror(frame.error());
+ return frame.error();
+ }
+
+ return encode(frame.planes(), buffer->dstBuffer->plane(0),
+ exifData, quality);
+}
+
+int EncoderLibJpeg::encode(const std::vector<Span<uint8_t>> &src,
+ Span<uint8_t> dest, Span<const uint8_t> exifData,
+ unsigned int quality)
+{
+ unsigned char *destination = dest.data();
+ unsigned long size = dest.size();
+
+ jpeg_set_quality(&compress_, quality, TRUE);
+
+ /*
+ * The jpeg_mem_dest will reallocate if the required size is not
+ * sufficient. That means the output won't be written to the correct
+ * buffers.
+ *
+ * \todo Implement our own custom memory destination to prevent
+ * reallocation and prefer failure with correct reporting.
+ */
+ jpeg_mem_dest(&compress_, &destination, &size);
+
+ jpeg_start_compress(&compress_, TRUE);
+
+ if (exifData.size())
+ /* Store Exif data in the JPEG_APP1 data block. */
+ jpeg_write_marker(&compress_, JPEG_APP0 + 1,
+ static_cast<const JOCTET *>(exifData.data()),
+ exifData.size());
+
+ LOG(JPEG, Debug) << "JPEG Encode Starting:" << compress_.image_width
+ << "x" << compress_.image_height;
+
+ ASSERT(src.size() == pixelFormatInfo_->numPlanes());
+
+ if (nv_)
+ compressNV(src);
+ else
+ compressRGB(src);
+
+ jpeg_finish_compress(&compress_);
+
+ return size;
+}
diff --git a/src/android/jpeg/encoder_libjpeg.h b/src/android/jpeg/encoder_libjpeg.h
new file mode 100644
index 00000000..4ac85c22
--- /dev/null
+++ b/src/android/jpeg/encoder_libjpeg.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * JPEG encoding using libjpeg
+ */
+
+#pragma once
+
+#include "encoder.h"
+
+#include <vector>
+
+#include "libcamera/internal/formats.h"
+
+#include <jpeglib.h>
+
+class EncoderLibJpeg : public Encoder
+{
+public:
+ EncoderLibJpeg();
+ ~EncoderLibJpeg();
+
+ int configure(const libcamera::StreamConfiguration &cfg) override;
+ int encode(Camera3RequestDescriptor::StreamBuffer *buffer,
+ libcamera::Span<const uint8_t> exifData,
+ unsigned int quality) override;
+ int encode(const std::vector<libcamera::Span<uint8_t>> &planes,
+ libcamera::Span<uint8_t> destination,
+ libcamera::Span<const uint8_t> exifData,
+ unsigned int quality);
+
+private:
+ void compressRGB(const std::vector<libcamera::Span<uint8_t>> &planes);
+ void compressNV(const std::vector<libcamera::Span<uint8_t>> &planes);
+
+ struct jpeg_compress_struct compress_;
+ struct jpeg_error_mgr jerr_;
+
+ const libcamera::PixelFormatInfo *pixelFormatInfo_;
+
+ bool nv_;
+ bool nvSwap_;
+};
diff --git a/src/android/jpeg/exif.cpp b/src/android/jpeg/exif.cpp
new file mode 100644
index 00000000..b8c871df
--- /dev/null
+++ b/src/android/jpeg/exif.cpp
@@ -0,0 +1,522 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * EXIF tag creation using libexif
+ */
+
+#include "exif.h"
+
+#include <cmath>
+#include <iomanip>
+#include <map>
+#include <sstream>
+#include <tuple>
+#include <uchar.h>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(EXIF)
+
+/*
+ * List of EXIF tags that we set directly because they are not supported
+ * by libexif version 0.6.21.
+ */
+enum class _ExifTag {
+ OFFSET_TIME = 0x9010,
+ OFFSET_TIME_ORIGINAL = 0x9011,
+ OFFSET_TIME_DIGITIZED = 0x9012,
+};
+
+/*
+ * The Exif class should be instantiated and specific properties set
+ * through the exposed public API.
+ *
+ * Once all desired properties have been set, the user shall call
+ * generate() to process the entries and generate the Exif data.
+ *
+ * Calls to generate() must check the return code to determine if any error
+ * occurred during the construction of the Exif data, and if successful the
+ * data can be obtained using the data() function.
+ */
+Exif::Exif()
+ : valid_(false), data_(nullptr), order_(EXIF_BYTE_ORDER_INTEL),
+ exifData_(0), size_(0)
+{
+ /* Create an ExifMem allocator to construct entries. */
+ mem_ = exif_mem_new_default();
+ if (!mem_) {
+ LOG(EXIF, Error) << "Failed to allocate ExifMem Allocator";
+ return;
+ }
+
+ data_ = exif_data_new_mem(mem_);
+ if (!data_) {
+ LOG(EXIF, Error) << "Failed to allocate an ExifData structure";
+ return;
+ }
+
+ valid_ = true;
+
+ exif_data_set_option(data_, EXIF_DATA_OPTION_FOLLOW_SPECIFICATION);
+ exif_data_set_data_type(data_, EXIF_DATA_TYPE_COMPRESSED);
+
+ /*
+ * Big-Endian: EXIF_BYTE_ORDER_MOTOROLA
+ * Little Endian: EXIF_BYTE_ORDER_INTEL
+ */
+ exif_data_set_byte_order(data_, order_);
+
+ setString(EXIF_IFD_EXIF, EXIF_TAG_EXIF_VERSION,
+ EXIF_FORMAT_UNDEFINED, "0231");
+
+ /* Create the mandatory EXIF fields with default data. */
+ exif_data_fix(data_);
+}
+
+Exif::~Exif()
+{
+ if (exifData_)
+ free(exifData_);
+
+ if (data_) {
+ /*
+ * Reset thumbnail data to avoid getting double-freed by
+ * libexif. It is owned by the caller (i.e. PostProcessorJpeg).
+ */
+ data_->data = nullptr;
+ data_->size = 0;
+
+ exif_data_unref(data_);
+ }
+
+ if (mem_)
+ exif_mem_unref(mem_);
+}
+
+ExifEntry *Exif::createEntry(ExifIfd ifd, ExifTag tag)
+{
+ ExifContent *content = data_->ifd[ifd];
+ ExifEntry *entry = exif_content_get_entry(content, tag);
+
+ if (entry) {
+ exif_entry_ref(entry);
+ return entry;
+ }
+
+ entry = exif_entry_new_mem(mem_);
+ if (!entry) {
+ LOG(EXIF, Error) << "Failed to allocated new entry";
+ valid_ = false;
+ return nullptr;
+ }
+
+ exif_content_add_entry(content, entry);
+ exif_entry_initialize(entry, tag);
+
+ return entry;
+}
+
+ExifEntry *Exif::createEntry(ExifIfd ifd, ExifTag tag, ExifFormat format,
+ unsigned long components, unsigned int size)
+{
+ ExifContent *content = data_->ifd[ifd];
+
+ /* Replace any existing entry with the same tag. */
+ ExifEntry *existing = exif_content_get_entry(content, tag);
+ exif_content_remove_entry(content, existing);
+
+ ExifEntry *entry = exif_entry_new_mem(mem_);
+ if (!entry) {
+ LOG(EXIF, Error) << "Failed to allocated new entry";
+ valid_ = false;
+ return nullptr;
+ }
+
+ void *buffer = exif_mem_alloc(mem_, size);
+ if (!buffer) {
+ LOG(EXIF, Error) << "Failed to allocate buffer for variable entry";
+ exif_mem_unref(mem_);
+ valid_ = false;
+ return nullptr;
+ }
+
+ entry->data = static_cast<unsigned char *>(buffer);
+ entry->components = components;
+ entry->format = format;
+ entry->size = size;
+ entry->tag = tag;
+
+ exif_content_add_entry(content, entry);
+
+ return entry;
+}
+
+void Exif::setByte(ExifIfd ifd, ExifTag tag, uint8_t item)
+{
+ ExifEntry *entry = createEntry(ifd, tag, EXIF_FORMAT_BYTE, 1, 1);
+ if (!entry)
+ return;
+
+ entry->data[0] = item;
+ exif_entry_unref(entry);
+}
+
+void Exif::setShort(ExifIfd ifd, ExifTag tag, uint16_t item)
+{
+ ExifEntry *entry = createEntry(ifd, tag);
+ if (!entry)
+ return;
+
+ exif_set_short(entry->data, order_, item);
+ exif_entry_unref(entry);
+}
+
+void Exif::setLong(ExifIfd ifd, ExifTag tag, uint32_t item)
+{
+ ExifEntry *entry = createEntry(ifd, tag);
+ if (!entry)
+ return;
+
+ exif_set_long(entry->data, order_, item);
+ exif_entry_unref(entry);
+}
+
+void Exif::setRational(ExifIfd ifd, ExifTag tag, ExifRational item)
+{
+ setRational(ifd, tag, { &item, 1 });
+}
+
+void Exif::setRational(ExifIfd ifd, ExifTag tag, Span<const ExifRational> items)
+{
+ ExifEntry *entry = createEntry(ifd, tag, EXIF_FORMAT_RATIONAL,
+ items.size(),
+ items.size() * sizeof(ExifRational));
+ if (!entry)
+ return;
+
+ for (size_t i = 0; i < items.size(); i++)
+ exif_set_rational(entry->data + i * sizeof(ExifRational),
+ order_, items[i]);
+ exif_entry_unref(entry);
+}
+
+static const std::map<Exif::StringEncoding, std::array<uint8_t, 8>> stringEncodingCodes = {
+ { Exif::ASCII, { 0x41, 0x53, 0x43, 0x49, 0x49, 0x00, 0x00, 0x00 } },
+ { Exif::Unicode, { 0x55, 0x4e, 0x49, 0x43, 0x4f, 0x44, 0x45, 0x00 } },
+};
+
+void Exif::setString(ExifIfd ifd, ExifTag tag, ExifFormat format,
+ const std::string &item, StringEncoding encoding)
+{
+ std::string ascii;
+ size_t length;
+ const char *str;
+ std::vector<uint8_t> buf;
+
+ if (format == EXIF_FORMAT_ASCII) {
+ ascii = utils::toAscii(item);
+ str = ascii.c_str();
+
+ /* Pad 1 extra byte to null-terminate the ASCII string. */
+ length = ascii.length() + 1;
+ } else {
+ std::u16string u16str;
+
+ auto encodingString = stringEncodingCodes.find(encoding);
+ if (encodingString != stringEncodingCodes.end()) {
+ buf = {
+ encodingString->second.begin(),
+ encodingString->second.end()
+ };
+ }
+
+ switch (encoding) {
+ case Unicode:
+ u16str = utf8ToUtf16(item);
+
+ buf.resize(8 + u16str.size() * 2);
+ for (size_t i = 0; i < u16str.size(); i++) {
+ if (order_ == EXIF_BYTE_ORDER_INTEL) {
+ buf[8 + 2 * i] = u16str[i] & 0xff;
+ buf[8 + 2 * i + 1] = (u16str[i] >> 8) & 0xff;
+ } else {
+ buf[8 + 2 * i] = (u16str[i] >> 8) & 0xff;
+ buf[8 + 2 * i + 1] = u16str[i] & 0xff;
+ }
+ }
+
+ break;
+
+ case ASCII:
+ case NoEncoding:
+ buf.insert(buf.end(), item.begin(), item.end());
+ break;
+ }
+
+ str = reinterpret_cast<const char *>(buf.data());
+
+ /*
+ * Strings stored in different formats (EXIF_FORMAT_UNDEFINED)
+ * are not null-terminated.
+ */
+ length = buf.size();
+ }
+
+ ExifEntry *entry = createEntry(ifd, tag, format, length, length);
+ if (!entry)
+ return;
+
+ memcpy(entry->data, str, length);
+ exif_entry_unref(entry);
+}
+
+void Exif::setMake(const std::string &make)
+{
+ setString(EXIF_IFD_0, EXIF_TAG_MAKE, EXIF_FORMAT_ASCII, make);
+}
+
+void Exif::setModel(const std::string &model)
+{
+ setString(EXIF_IFD_0, EXIF_TAG_MODEL, EXIF_FORMAT_ASCII, model);
+}
+
+void Exif::setSize(const Size &size)
+{
+ setLong(EXIF_IFD_EXIF, EXIF_TAG_PIXEL_Y_DIMENSION, size.height);
+ setLong(EXIF_IFD_EXIF, EXIF_TAG_PIXEL_X_DIMENSION, size.width);
+}
+
+void Exif::setTimestamp(time_t timestamp, std::chrono::milliseconds msec)
+{
+ struct tm tm;
+ localtime_r(&timestamp, &tm);
+
+ char str[20];
+ strftime(str, sizeof(str), "%Y:%m:%d %H:%M:%S", &tm);
+ std::string ts(str);
+
+ setString(EXIF_IFD_0, EXIF_TAG_DATE_TIME, EXIF_FORMAT_ASCII, ts);
+ setString(EXIF_IFD_EXIF, EXIF_TAG_DATE_TIME_ORIGINAL, EXIF_FORMAT_ASCII, ts);
+ setString(EXIF_IFD_EXIF, EXIF_TAG_DATE_TIME_DIGITIZED, EXIF_FORMAT_ASCII, ts);
+
+ /* Query and set timezone information if available. */
+ int r = strftime(str, sizeof(str), "%z", &tm);
+ if (r <= 0)
+ return;
+
+ std::string tz(str);
+ tz.insert(3, 1, ':');
+ setString(EXIF_IFD_EXIF,
+ static_cast<ExifTag>(_ExifTag::OFFSET_TIME),
+ EXIF_FORMAT_ASCII, tz);
+ setString(EXIF_IFD_EXIF,
+ static_cast<ExifTag>(_ExifTag::OFFSET_TIME_ORIGINAL),
+ EXIF_FORMAT_ASCII, tz);
+ setString(EXIF_IFD_EXIF,
+ static_cast<ExifTag>(_ExifTag::OFFSET_TIME_DIGITIZED),
+ EXIF_FORMAT_ASCII, tz);
+
+ std::stringstream sstr;
+ sstr << std::setfill('0') << std::setw(3) << msec.count();
+ std::string subsec = sstr.str();
+
+ setString(EXIF_IFD_EXIF, EXIF_TAG_SUB_SEC_TIME,
+ EXIF_FORMAT_ASCII, subsec);
+ setString(EXIF_IFD_EXIF, EXIF_TAG_SUB_SEC_TIME_ORIGINAL,
+ EXIF_FORMAT_ASCII, subsec);
+ setString(EXIF_IFD_EXIF, EXIF_TAG_SUB_SEC_TIME_DIGITIZED,
+ EXIF_FORMAT_ASCII, subsec);
+}
+
+void Exif::setGPSDateTimestamp(time_t timestamp)
+{
+ struct tm tm;
+ gmtime_r(&timestamp, &tm);
+
+ char str[11];
+ strftime(str, sizeof(str), "%Y:%m:%d", &tm);
+ std::string tsStr(str);
+
+ setString(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_DATE_STAMP),
+ EXIF_FORMAT_ASCII, tsStr);
+
+ /* Set GPS_TIME_STAMP */
+ ExifRational ts[] = {
+ { static_cast<ExifLong>(tm.tm_hour), 1 },
+ { static_cast<ExifLong>(tm.tm_min), 1 },
+ { static_cast<ExifLong>(tm.tm_sec), 1 },
+ };
+
+ setRational(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_TIME_STAMP),
+ ts);
+}
+
+std::tuple<int, int, int> Exif::degreesToDMS(double decimalDegrees)
+{
+ int degrees = std::trunc(decimalDegrees);
+ double minutes = std::abs((decimalDegrees - degrees) * 60);
+ double seconds = (minutes - std::trunc(minutes)) * 60;
+
+ return { degrees, std::trunc(minutes), std::round(seconds) };
+}
+
+void Exif::setGPSDMS(ExifIfd ifd, ExifTag tag, int deg, int min, int sec)
+{
+ ExifRational coords[] = {
+ { static_cast<ExifLong>(deg), 1 },
+ { static_cast<ExifLong>(min), 1 },
+ { static_cast<ExifLong>(sec), 1 },
+ };
+
+ setRational(ifd, tag, coords);
+}
+
+/*
+ * \brief Set GPS location (lat, long, alt)
+ * \param[in] coords Pointer to coordinates latitude, longitude, and altitude,
+ * first two in degrees, the third in meters
+ */
+void Exif::setGPSLocation(const double *coords)
+{
+ int deg, min, sec;
+
+ std::tie<int, int, int>(deg, min, sec) = degreesToDMS(coords[0]);
+ setString(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_LATITUDE_REF),
+ EXIF_FORMAT_ASCII, deg >= 0 ? "N" : "S");
+ setGPSDMS(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_LATITUDE),
+ std::abs(deg), min, sec);
+
+ std::tie<int, int, int>(deg, min, sec) = degreesToDMS(coords[1]);
+ setString(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_LONGITUDE_REF),
+ EXIF_FORMAT_ASCII, deg >= 0 ? "E" : "W");
+ setGPSDMS(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_LONGITUDE),
+ std::abs(deg), min, sec);
+
+ setByte(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_ALTITUDE_REF),
+ coords[2] >= 0 ? 0 : 1);
+ setRational(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_ALTITUDE),
+ ExifRational{ static_cast<ExifLong>(std::abs(coords[2])), 1 });
+}
+
+void Exif::setGPSMethod(const std::string &method)
+{
+ setString(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_PROCESSING_METHOD),
+ EXIF_FORMAT_UNDEFINED, method, NoEncoding);
+}
+
+void Exif::setOrientation(int orientation)
+{
+ int value;
+ switch (orientation) {
+ case 0:
+ default:
+ value = 1;
+ break;
+ case 90:
+ value = 6;
+ break;
+ case 180:
+ value = 3;
+ break;
+ case 270:
+ value = 8;
+ break;
+ }
+
+ setShort(EXIF_IFD_0, EXIF_TAG_ORIENTATION, value);
+}
+
+void Exif::setThumbnail(std::vector<unsigned char> &&thumbnail,
+ Compression compression)
+{
+ thumbnailData_ = std::move(thumbnail);
+
+ data_->data = thumbnailData_.data();
+ data_->size = thumbnailData_.size();
+
+ setShort(EXIF_IFD_0, EXIF_TAG_COMPRESSION, compression);
+}
+
+void Exif::setFocalLength(float length)
+{
+ ExifRational rational = { static_cast<ExifLong>(length * 1000), 1000 };
+ setRational(EXIF_IFD_EXIF, EXIF_TAG_FOCAL_LENGTH, rational);
+}
+
+void Exif::setExposureTime(uint64_t nsec)
+{
+ ExifRational rational = { static_cast<ExifLong>(nsec), 1000000000 };
+ setRational(EXIF_IFD_EXIF, EXIF_TAG_EXPOSURE_TIME, rational);
+}
+
+void Exif::setAperture(float size)
+{
+ ExifRational rational = { static_cast<ExifLong>(size * 10000), 10000 };
+ setRational(EXIF_IFD_EXIF, EXIF_TAG_FNUMBER, rational);
+}
+
+void Exif::setISO(uint16_t iso)
+{
+ setShort(EXIF_IFD_EXIF, EXIF_TAG_ISO_SPEED_RATINGS, iso);
+}
+
+void Exif::setFlash(Flash flash)
+{
+ setShort(EXIF_IFD_EXIF, EXIF_TAG_FLASH, static_cast<ExifShort>(flash));
+}
+
+void Exif::setWhiteBalance(WhiteBalance wb)
+{
+ setShort(EXIF_IFD_EXIF, EXIF_TAG_WHITE_BALANCE, static_cast<ExifShort>(wb));
+}
+
+/**
+ * \brief Convert UTF-8 string to UTF-16 string
+ * \param[in] str String to convert
+ *
+ * \return \a str in UTF-16
+ */
+std::u16string Exif::utf8ToUtf16(const std::string &str)
+{
+ mbstate_t state{};
+ char16_t c16;
+ const char *ptr = str.data();
+ const char *end = ptr + str.size();
+
+ std::u16string ret;
+ while (size_t rc = mbrtoc16(&c16, ptr, end - ptr + 1, &state)) {
+ if (rc == static_cast<size_t>(-2) ||
+ rc == static_cast<size_t>(-1))
+ break;
+
+ ret.push_back(c16);
+
+ if (rc > 0)
+ ptr += rc;
+ }
+
+ return ret;
+}
+
+[[nodiscard]] int Exif::generate()
+{
+ if (exifData_) {
+ free(exifData_);
+ exifData_ = nullptr;
+ }
+
+ if (!valid_) {
+ LOG(EXIF, Error) << "Generated EXIF data is invalid";
+ return -1;
+ }
+
+ exif_data_save_data(data_, &exifData_, &size_);
+
+ LOG(EXIF, Debug) << "Created EXIF instance (" << size_ << " bytes)";
+
+ return 0;
+}
diff --git a/src/android/jpeg/exif.h b/src/android/jpeg/exif.h
new file mode 100644
index 00000000..446d53f3
--- /dev/null
+++ b/src/android/jpeg/exif.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * EXIF tag creator using libexif
+ */
+
+#pragma once
+
+#include <chrono>
+#include <string>
+#include <time.h>
+#include <vector>
+
+#include <libexif/exif-data.h>
+
+#include <libcamera/base/span.h>
+
+#include <libcamera/geometry.h>
+
+class Exif
+{
+public:
+ Exif();
+ ~Exif();
+
+ enum Compression {
+ None = 1,
+ JPEG = 6,
+ };
+
+ enum Flash {
+ /* bit 0 */
+ Fired = 0x01,
+ /* bits 1 and 2 */
+ StrobeDetected = 0x04,
+ StrobeNotDetected = 0x06,
+ /* bits 3 and 4 */
+ ModeCompulsoryFiring = 0x08,
+ ModeCompulsorySuppression = 0x10,
+ ModeAuto = 0x18,
+ /* bit 5 */
+ FlashNotPresent = 0x20,
+ /* bit 6 */
+ RedEye = 0x40,
+ };
+
+ enum WhiteBalance {
+ Auto = 0,
+ Manual = 1,
+ };
+
+ enum StringEncoding {
+ NoEncoding = 0,
+ ASCII = 1,
+ Unicode = 2,
+ };
+
+ void setMake(const std::string &make);
+ void setModel(const std::string &model);
+
+ void setOrientation(int orientation);
+ void setSize(const libcamera::Size &size);
+ void setThumbnail(std::vector<unsigned char> &&thumbnail,
+ Compression compression);
+ void setTimestamp(time_t timestamp, std::chrono::milliseconds msec);
+
+ void setGPSDateTimestamp(time_t timestamp);
+ void setGPSLocation(const double *coords);
+ void setGPSMethod(const std::string &method);
+
+ void setFocalLength(float length);
+ void setExposureTime(uint64_t nsec);
+ void setAperture(float size);
+ void setISO(uint16_t iso);
+ void setFlash(Flash flash);
+ void setWhiteBalance(WhiteBalance wb);
+
+ libcamera::Span<const uint8_t> data() const { return { exifData_, size_ }; }
+ [[nodiscard]] int generate();
+
+private:
+ ExifEntry *createEntry(ExifIfd ifd, ExifTag tag);
+ ExifEntry *createEntry(ExifIfd ifd, ExifTag tag, ExifFormat format,
+ unsigned long components, unsigned int size);
+
+ void setByte(ExifIfd ifd, ExifTag tag, uint8_t item);
+ void setShort(ExifIfd ifd, ExifTag tag, uint16_t item);
+ void setLong(ExifIfd ifd, ExifTag tag, uint32_t item);
+ void setString(ExifIfd ifd, ExifTag tag, ExifFormat format,
+ const std::string &item,
+ StringEncoding encoding = NoEncoding);
+ void setRational(ExifIfd ifd, ExifTag tag, ExifRational item);
+ void setRational(ExifIfd ifd, ExifTag tag,
+ libcamera::Span<const ExifRational> items);
+
+ std::tuple<int, int, int> degreesToDMS(double decimalDegrees);
+ void setGPSDMS(ExifIfd ifd, ExifTag tag, int deg, int min, int sec);
+
+ std::u16string utf8ToUtf16(const std::string &str);
+
+ bool valid_;
+
+ ExifData *data_;
+ ExifMem *mem_;
+ ExifByteOrder order_;
+
+ unsigned char *exifData_;
+ unsigned int size_;
+
+ std::vector<unsigned char> thumbnailData_;
+};
diff --git a/src/android/jpeg/meson.build b/src/android/jpeg/meson.build
new file mode 100644
index 00000000..3402e614
--- /dev/null
+++ b/src/android/jpeg/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: CC0-1.0
+
+android_hal_sources += files([
+ 'encoder_libjpeg.cpp',
+ 'exif.cpp',
+ 'post_processor_jpeg.cpp',
+ 'thumbnailer.cpp'
+])
+
+platform = get_option('android_platform')
+if platform == 'cros'
+ android_hal_sources += files(['encoder_jea.cpp'])
+ android_deps += [dependency('libcros_camera')]
+endif
diff --git a/src/android/jpeg/post_processor_jpeg.cpp b/src/android/jpeg/post_processor_jpeg.cpp
new file mode 100644
index 00000000..89b8a401
--- /dev/null
+++ b/src/android/jpeg/post_processor_jpeg.cpp
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * JPEG Post Processor
+ */
+
+#include "post_processor_jpeg.h"
+
+#include <chrono>
+
+#include "../camera_device.h"
+#include "../camera_metadata.h"
+#include "../camera_request.h"
+#if defined(OS_CHROMEOS)
+#include "encoder_jea.h"
+#else /* !defined(OS_CHROMEOS) */
+#include "encoder_libjpeg.h"
+#endif
+#include "exif.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/formats.h>
+
+using namespace libcamera;
+using namespace std::chrono_literals;
+
+LOG_DEFINE_CATEGORY(JPEG)
+
+PostProcessorJpeg::PostProcessorJpeg(CameraDevice *const device)
+ : cameraDevice_(device)
+{
+}
+
+int PostProcessorJpeg::configure(const StreamConfiguration &inCfg,
+ const StreamConfiguration &outCfg)
+{
+ if (inCfg.size != outCfg.size) {
+ LOG(JPEG, Error) << "Mismatch of input and output stream sizes";
+ return -EINVAL;
+ }
+
+ if (outCfg.pixelFormat != formats::MJPEG) {
+ LOG(JPEG, Error) << "Output stream pixel format is not JPEG";
+ return -EINVAL;
+ }
+
+ streamSize_ = outCfg.size;
+
+ thumbnailer_.configure(inCfg.size, inCfg.pixelFormat);
+
+#if defined(OS_CHROMEOS)
+ encoder_ = std::make_unique<EncoderJea>();
+#else /* !defined(OS_CHROMEOS) */
+ encoder_ = std::make_unique<EncoderLibJpeg>();
+#endif
+
+ return encoder_->configure(inCfg);
+}
+
+void PostProcessorJpeg::generateThumbnail(const FrameBuffer &source,
+ const Size &targetSize,
+ unsigned int quality,
+ std::vector<unsigned char> *thumbnail)
+{
+ /* Stores the raw scaled-down thumbnail bytes. */
+ std::vector<unsigned char> rawThumbnail;
+
+ thumbnailer_.createThumbnail(source, targetSize, &rawThumbnail);
+
+ StreamConfiguration thCfg;
+ thCfg.size = targetSize;
+ thCfg.pixelFormat = thumbnailer_.pixelFormat();
+ int ret = thumbnailEncoder_.configure(thCfg);
+
+ if (!rawThumbnail.empty() && !ret) {
+ /*
+ * \todo Avoid value-initialization of all elements of the
+ * vector.
+ */
+ thumbnail->resize(rawThumbnail.size());
+
+ /*
+ * Split planes manually as the encoder expects a vector of
+ * planes.
+ *
+ * \todo Pass a vector of planes directly to
+ * Thumbnailer::createThumbnailer above and remove the manual
+ * planes split from here.
+ */
+ std::vector<Span<uint8_t>> thumbnailPlanes;
+ const PixelFormatInfo &formatNV12 = PixelFormatInfo::info(formats::NV12);
+ size_t yPlaneSize = formatNV12.planeSize(targetSize, 0);
+ size_t uvPlaneSize = formatNV12.planeSize(targetSize, 1);
+ thumbnailPlanes.push_back({ rawThumbnail.data(), yPlaneSize });
+ thumbnailPlanes.push_back({ rawThumbnail.data() + yPlaneSize, uvPlaneSize });
+
+ int jpeg_size = thumbnailEncoder_.encode(thumbnailPlanes,
+ *thumbnail, {}, quality);
+ thumbnail->resize(jpeg_size);
+
+ LOG(JPEG, Debug)
+ << "Thumbnail compress returned "
+ << jpeg_size << " bytes";
+ }
+}
+
+void PostProcessorJpeg::process(Camera3RequestDescriptor::StreamBuffer *streamBuffer)
+{
+ ASSERT(encoder_);
+
+ const FrameBuffer &source = *streamBuffer->srcBuffer;
+ CameraBuffer *destination = streamBuffer->dstBuffer.get();
+
+ ASSERT(destination->numPlanes() == 1);
+
+ const CameraMetadata &requestMetadata = streamBuffer->request->settings_;
+ CameraMetadata *resultMetadata = streamBuffer->request->resultMetadata_.get();
+ camera_metadata_ro_entry_t entry;
+ int ret;
+
+ /* Set EXIF metadata for various tags. */
+ Exif exif;
+ exif.setMake(cameraDevice_->maker());
+ exif.setModel(cameraDevice_->model());
+
+ ret = requestMetadata.getEntry(ANDROID_JPEG_ORIENTATION, &entry);
+
+ const uint32_t jpegOrientation = ret ? *entry.data.i32 : 0;
+ resultMetadata->addEntry(ANDROID_JPEG_ORIENTATION, jpegOrientation);
+ exif.setOrientation(jpegOrientation);
+
+ exif.setSize(streamSize_);
+ /*
+ * We set the frame's EXIF timestamp as the time of encode.
+ * Since the precision we need for EXIF timestamp is only one
+ * second, it is good enough.
+ */
+ exif.setTimestamp(std::time(nullptr), 0ms);
+
+ ret = resultMetadata->getEntry(ANDROID_SENSOR_EXPOSURE_TIME, &entry);
+ exif.setExposureTime(ret ? *entry.data.i64 : 0);
+ ret = requestMetadata.getEntry(ANDROID_LENS_APERTURE, &entry);
+ if (ret)
+ exif.setAperture(*entry.data.f);
+
+ ret = resultMetadata->getEntry(ANDROID_SENSOR_SENSITIVITY, &entry);
+ exif.setISO(ret ? *entry.data.i32 : 100);
+
+ exif.setFlash(Exif::Flash::FlashNotPresent);
+ exif.setWhiteBalance(Exif::WhiteBalance::Auto);
+
+ exif.setFocalLength(1.0);
+
+ ret = requestMetadata.getEntry(ANDROID_JPEG_GPS_TIMESTAMP, &entry);
+ if (ret) {
+ exif.setGPSDateTimestamp(*entry.data.i64);
+ resultMetadata->addEntry(ANDROID_JPEG_GPS_TIMESTAMP,
+ *entry.data.i64);
+ }
+
+ ret = requestMetadata.getEntry(ANDROID_JPEG_THUMBNAIL_SIZE, &entry);
+ if (ret) {
+ const int32_t *data = entry.data.i32;
+ Size thumbnailSize = { static_cast<uint32_t>(data[0]),
+ static_cast<uint32_t>(data[1]) };
+
+ ret = requestMetadata.getEntry(ANDROID_JPEG_THUMBNAIL_QUALITY, &entry);
+ uint8_t quality = ret ? *entry.data.u8 : 95;
+ resultMetadata->addEntry(ANDROID_JPEG_THUMBNAIL_QUALITY, quality);
+
+ if (thumbnailSize != Size(0, 0)) {
+ std::vector<unsigned char> thumbnail;
+ generateThumbnail(source, thumbnailSize, quality, &thumbnail);
+ if (!thumbnail.empty())
+ exif.setThumbnail(std::move(thumbnail), Exif::Compression::JPEG);
+ }
+
+ resultMetadata->addEntry(ANDROID_JPEG_THUMBNAIL_SIZE, data, 2);
+ }
+
+ ret = requestMetadata.getEntry(ANDROID_JPEG_GPS_COORDINATES, &entry);
+ if (ret) {
+ exif.setGPSLocation(entry.data.d);
+ resultMetadata->addEntry(ANDROID_JPEG_GPS_COORDINATES,
+ entry.data.d, 3);
+ }
+
+ ret = requestMetadata.getEntry(ANDROID_JPEG_GPS_PROCESSING_METHOD, &entry);
+ if (ret) {
+ std::string method(entry.data.u8, entry.data.u8 + entry.count);
+ exif.setGPSMethod(method);
+ resultMetadata->addEntry(ANDROID_JPEG_GPS_PROCESSING_METHOD,
+ entry.data.u8, entry.count);
+ }
+
+ if (exif.generate() != 0)
+ LOG(JPEG, Error) << "Failed to generate valid EXIF data";
+
+ ret = requestMetadata.getEntry(ANDROID_JPEG_QUALITY, &entry);
+ const uint8_t quality = ret ? *entry.data.u8 : 95;
+ resultMetadata->addEntry(ANDROID_JPEG_QUALITY, quality);
+
+ int jpeg_size = encoder_->encode(streamBuffer, exif.data(), quality);
+ if (jpeg_size < 0) {
+ LOG(JPEG, Error) << "Failed to encode stream image";
+ processComplete.emit(streamBuffer, PostProcessor::Status::Error);
+ return;
+ }
+
+ /* Fill in the JPEG blob header. */
+ uint8_t *resultPtr = destination->plane(0).data()
+ + destination->jpegBufferSize(cameraDevice_->maxJpegBufferSize())
+ - sizeof(struct camera3_jpeg_blob);
+ auto *blob = reinterpret_cast<struct camera3_jpeg_blob *>(resultPtr);
+ blob->jpeg_blob_id = CAMERA3_JPEG_BLOB_ID;
+ blob->jpeg_size = jpeg_size;
+
+ /* Update the JPEG result Metadata. */
+ resultMetadata->addEntry(ANDROID_JPEG_SIZE, jpeg_size);
+ processComplete.emit(streamBuffer, PostProcessor::Status::Success);
+}
diff --git a/src/android/jpeg/post_processor_jpeg.h b/src/android/jpeg/post_processor_jpeg.h
new file mode 100644
index 00000000..6fe21457
--- /dev/null
+++ b/src/android/jpeg/post_processor_jpeg.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * JPEG Post Processor
+ */
+
+#pragma once
+
+#include "../post_processor.h"
+#include "encoder_libjpeg.h"
+#include "thumbnailer.h"
+
+#include <libcamera/geometry.h>
+
+class CameraDevice;
+
+class PostProcessorJpeg : public PostProcessor
+{
+public:
+ PostProcessorJpeg(CameraDevice *const device);
+
+ int configure(const libcamera::StreamConfiguration &incfg,
+ const libcamera::StreamConfiguration &outcfg) override;
+ void process(Camera3RequestDescriptor::StreamBuffer *streamBuffer) override;
+
+private:
+ void generateThumbnail(const libcamera::FrameBuffer &source,
+ const libcamera::Size &targetSize,
+ unsigned int quality,
+ std::vector<unsigned char> *thumbnail);
+
+ CameraDevice *const cameraDevice_;
+ std::unique_ptr<Encoder> encoder_;
+ libcamera::Size streamSize_;
+ EncoderLibJpeg thumbnailEncoder_;
+ Thumbnailer thumbnailer_;
+};
diff --git a/src/android/jpeg/thumbnailer.cpp b/src/android/jpeg/thumbnailer.cpp
new file mode 100644
index 00000000..adafc468
--- /dev/null
+++ b/src/android/jpeg/thumbnailer.cpp
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Simple image thumbnailer
+ */
+
+#include "thumbnailer.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/formats.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(Thumbnailer)
+
+Thumbnailer::Thumbnailer()
+ : valid_(false)
+{
+}
+
+void Thumbnailer::configure(const Size &sourceSize, PixelFormat pixelFormat)
+{
+ sourceSize_ = sourceSize;
+ pixelFormat_ = pixelFormat;
+
+ if (pixelFormat_ != formats::NV12) {
+ LOG(Thumbnailer, Error)
+ << "Failed to configure: Pixel Format "
+ << pixelFormat_ << " unsupported.";
+ return;
+ }
+
+ valid_ = true;
+}
+
+void Thumbnailer::createThumbnail(const FrameBuffer &source,
+ const Size &targetSize,
+ std::vector<unsigned char> *destination)
+{
+ MappedFrameBuffer frame(&source, MappedFrameBuffer::MapFlag::Read);
+ if (!frame.isValid()) {
+ LOG(Thumbnailer, Error)
+ << "Failed to map FrameBuffer : "
+ << strerror(frame.error());
+ return;
+ }
+
+ if (!valid_) {
+ LOG(Thumbnailer, Error) << "Config is unconfigured or invalid.";
+ return;
+ }
+
+ const unsigned int sw = sourceSize_.width;
+ const unsigned int sh = sourceSize_.height;
+ const unsigned int tw = targetSize.width;
+ const unsigned int th = targetSize.height;
+
+ ASSERT(frame.planes().size() == 2);
+ ASSERT(tw % 2 == 0 && th % 2 == 0);
+
+ /* Image scaling block implementing nearest-neighbour algorithm. */
+ unsigned char *src = frame.planes()[0].data();
+ unsigned char *srcC = frame.planes()[1].data();
+ unsigned char *srcCb, *srcCr;
+ unsigned char *dstY, *srcY;
+
+ size_t dstSize = (th * tw) + ((th / 2) * tw);
+ destination->resize(dstSize);
+ unsigned char *dst = destination->data();
+ unsigned char *dstC = dst + th * tw;
+
+ for (unsigned int y = 0; y < th; y += 2) {
+ unsigned int sourceY = (sh * y + th / 2) / th;
+
+ dstY = dst + y * tw;
+ srcY = src + sw * sourceY;
+ srcCb = srcC + (sourceY / 2) * sw + 0;
+ srcCr = srcC + (sourceY / 2) * sw + 1;
+
+ for (unsigned int x = 0; x < tw; x += 2) {
+ unsigned int sourceX = (sw * x + tw / 2) / tw;
+
+ dstY[x] = srcY[sourceX];
+ dstY[tw + x] = srcY[sw + sourceX];
+ dstY[x + 1] = srcY[sourceX + 1];
+ dstY[tw + x + 1] = srcY[sw + sourceX + 1];
+
+ dstC[(y / 2) * tw + x + 0] = srcCb[(sourceX / 2) * 2];
+ dstC[(y / 2) * tw + x + 1] = srcCr[(sourceX / 2) * 2];
+ }
+ }
+}
diff --git a/src/android/jpeg/thumbnailer.h b/src/android/jpeg/thumbnailer.h
new file mode 100644
index 00000000..1b836e59
--- /dev/null
+++ b/src/android/jpeg/thumbnailer.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * Simple image thumbnailer
+ */
+
+#pragma once
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/geometry.h>
+
+#include "libcamera/internal/formats.h"
+
+class Thumbnailer
+{
+public:
+ Thumbnailer();
+
+ void configure(const libcamera::Size &sourceSize,
+ libcamera::PixelFormat pixelFormat);
+ void createThumbnail(const libcamera::FrameBuffer &source,
+ const libcamera::Size &targetSize,
+ std::vector<unsigned char> *dest);
+ const libcamera::PixelFormat &pixelFormat() const { return pixelFormat_; }
+
+private:
+ libcamera::PixelFormat pixelFormat_;
+ libcamera::Size sourceSize_;
+
+ bool valid_;
+};
diff --git a/src/android/meson.build b/src/android/meson.build
index 5a5a332e..7b226a4b 100644
--- a/src/android/meson.build
+++ b/src/android/meson.build
@@ -1,15 +1,55 @@
+# SPDX-License-Identifier: CC0-1.0
+
+android_deps = [
+ dependency('libexif', required : get_option('android')),
+ dependency('libjpeg', required : get_option('android')),
+ libcamera_private,
+ libyuv_dep,
+]
+
+android_enabled = true
+
+foreach dep : android_deps
+ if not dep.found()
+ android_enabled = false
+ subdir_done()
+ endif
+endforeach
+
android_hal_sources = files([
'camera3_hal.cpp',
- 'camera_hal_manager.cpp',
+ 'camera_capabilities.cpp',
'camera_device.cpp',
+ 'camera_hal_config.cpp',
+ 'camera_hal_manager.cpp',
'camera_metadata.cpp',
'camera_ops.cpp',
+ 'camera_request.cpp',
+ 'camera_stream.cpp',
+ 'hal_framebuffer.cpp',
+ 'yuv/post_processor_yuv.cpp'
])
+android_cpp_args = []
+
+subdir('cros')
+subdir('jpeg')
+subdir('mm')
+
android_camera_metadata_sources = files([
'metadata/camera_metadata.c',
])
android_camera_metadata = static_library('camera_metadata',
android_camera_metadata_sources,
+ c_args : '-Wno-shadow',
include_directories : android_includes)
+
+libcamera_hal = shared_library('libcamera-hal',
+ android_hal_sources,
+ name_prefix : '',
+ link_with : android_camera_metadata,
+ install : true,
+ cpp_args : android_cpp_args,
+ include_directories : android_includes,
+ dependencies : android_deps)
diff --git a/src/android/mm/cros_camera_buffer.cpp b/src/android/mm/cros_camera_buffer.cpp
new file mode 100644
index 00000000..e2a44a2a
--- /dev/null
+++ b/src/android/mm/cros_camera_buffer.cpp
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Chromium OS buffer backend using CameraBufferManager
+ */
+
+#include "../camera_buffer.h"
+
+#include <libcamera/base/log.h>
+
+#include "cros-camera/camera_buffer_manager.h"
+
+using namespace libcamera;
+
+LOG_DECLARE_CATEGORY(HAL)
+
+class CameraBuffer::Private : public Extensible::Private
+{
+ LIBCAMERA_DECLARE_PUBLIC(CameraBuffer)
+
+public:
+ Private(CameraBuffer *cameraBuffer, buffer_handle_t camera3Buffer,
+ PixelFormat pixelFormat, const Size &size,
+ int flags);
+ ~Private();
+
+ bool isValid() const { return registered_; }
+
+ unsigned int numPlanes() const;
+
+ Span<uint8_t> plane(unsigned int plane);
+
+ unsigned int stride(unsigned int plane) const;
+ unsigned int offset(unsigned int plane) const;
+ unsigned int size(unsigned int plane) const;
+
+ size_t jpegBufferSize(size_t maxJpegBufferSize) const;
+
+private:
+ void map();
+
+ cros::CameraBufferManager *bufferManager_;
+ buffer_handle_t handle_;
+ unsigned int numPlanes_;
+ bool mapped_;
+ bool registered_;
+ union {
+ void *addr;
+ android_ycbcr ycbcr;
+ } mem;
+};
+
+CameraBuffer::Private::Private([[maybe_unused]] CameraBuffer *cameraBuffer,
+ buffer_handle_t camera3Buffer,
+ [[maybe_unused]] PixelFormat pixelFormat,
+ [[maybe_unused]] const Size &size,
+ [[maybe_unused]] int flags)
+ : handle_(camera3Buffer), numPlanes_(0), mapped_(false),
+ registered_(false)
+{
+ bufferManager_ = cros::CameraBufferManager::GetInstance();
+ if (!bufferManager_) {
+ LOG(HAL, Fatal)
+ << "Failed to get cros CameraBufferManager instance";
+ return;
+ }
+
+ int ret = bufferManager_->Register(camera3Buffer);
+ if (ret) {
+ LOG(HAL, Error) << "Failed registering a buffer: " << ret;
+ return;
+ }
+
+ registered_ = true;
+ numPlanes_ = bufferManager_->GetNumPlanes(camera3Buffer);
+}
+
+CameraBuffer::Private::~Private()
+{
+ int ret;
+ if (mapped_) {
+ ret = bufferManager_->Unlock(handle_);
+ if (ret != 0)
+ LOG(HAL, Error) << "Failed to unlock buffer: "
+ << strerror(-ret);
+ }
+
+ if (registered_) {
+ ret = bufferManager_->Deregister(handle_);
+ if (ret != 0)
+ LOG(HAL, Error) << "Failed to deregister buffer: "
+ << strerror(-ret);
+ }
+}
+
+unsigned int CameraBuffer::Private::numPlanes() const
+{
+ return bufferManager_->GetNumPlanes(handle_);
+}
+
+Span<uint8_t> CameraBuffer::Private::plane(unsigned int plane)
+{
+ if (!mapped_)
+ map();
+ if (!mapped_)
+ return {};
+
+ void *addr;
+
+ switch (numPlanes()) {
+ case 1:
+ addr = mem.addr;
+ break;
+ default:
+ switch (plane) {
+ case 0:
+ addr = mem.ycbcr.y;
+ break;
+ case 1:
+ addr = mem.ycbcr.cb;
+ break;
+ case 2:
+ addr = mem.ycbcr.cr;
+ break;
+ }
+ }
+
+ return { static_cast<uint8_t *>(addr),
+ bufferManager_->GetPlaneSize(handle_, plane) };
+}
+
+unsigned int CameraBuffer::Private::stride(unsigned int plane) const
+{
+ return cros::CameraBufferManager::GetPlaneStride(handle_, plane);
+}
+
+unsigned int CameraBuffer::Private::offset(unsigned int plane) const
+{
+ return cros::CameraBufferManager::GetPlaneOffset(handle_, plane);
+}
+
+unsigned int CameraBuffer::Private::size(unsigned int plane) const
+{
+ return cros::CameraBufferManager::GetPlaneSize(handle_, plane);
+}
+
+size_t CameraBuffer::Private::jpegBufferSize([[maybe_unused]] size_t maxJpegBufferSize) const
+{
+ return bufferManager_->GetPlaneSize(handle_, 0);
+}
+
+void CameraBuffer::Private::map()
+{
+ int ret;
+ switch (numPlanes_) {
+ case 1: {
+ ret = bufferManager_->Lock(handle_, 0, 0, 0, 0, 0, &mem.addr);
+ if (ret) {
+ LOG(HAL, Error) << "Single plane buffer mapping failed";
+ return;
+ }
+ break;
+ }
+ case 2:
+ case 3: {
+ ret = bufferManager_->LockYCbCr(handle_, 0, 0, 0, 0, 0,
+ &mem.ycbcr);
+ if (ret) {
+ LOG(HAL, Error) << "YCbCr buffer mapping failed";
+ return;
+ }
+ break;
+ }
+ default:
+ LOG(HAL, Error) << "Invalid number of planes: " << numPlanes_;
+ return;
+ }
+
+ mapped_ = true;
+ return;
+}
+
+PUBLIC_CAMERA_BUFFER_IMPLEMENTATION
diff --git a/src/android/mm/cros_frame_buffer_allocator.cpp b/src/android/mm/cros_frame_buffer_allocator.cpp
new file mode 100644
index 00000000..264c0d48
--- /dev/null
+++ b/src/android/mm/cros_frame_buffer_allocator.cpp
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Allocate FrameBuffer for Chromium OS using CameraBufferManager
+ */
+
+#include <memory>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/shared_fd.h>
+
+#include "libcamera/internal/framebuffer.h"
+
+#include "../camera_device.h"
+#include "../frame_buffer_allocator.h"
+#include "../hal_framebuffer.h"
+#include "cros-camera/camera_buffer_manager.h"
+
+using namespace libcamera;
+
+LOG_DECLARE_CATEGORY(HAL)
+
+namespace {
+class CrosFrameBufferData : public FrameBuffer::Private
+{
+ LIBCAMERA_DECLARE_PUBLIC(FrameBuffer)
+
+public:
+ CrosFrameBufferData(cros::ScopedBufferHandle scopedHandle,
+ const std::vector<FrameBuffer::Plane> &planes)
+ : FrameBuffer::Private(planes), scopedHandle_(std::move(scopedHandle))
+ {
+ }
+
+private:
+ cros::ScopedBufferHandle scopedHandle_;
+};
+} /* namespace */
+
+class PlatformFrameBufferAllocator::Private : public Extensible::Private
+{
+ LIBCAMERA_DECLARE_PUBLIC(PlatformFrameBufferAllocator)
+
+public:
+ Private([[maybe_unused]] CameraDevice *const cameraDevice)
+ {
+ }
+
+ std::unique_ptr<HALFrameBuffer>
+ allocate(int halPixelFormat, const libcamera::Size &size, uint32_t usage);
+};
+
+std::unique_ptr<HALFrameBuffer>
+PlatformFrameBufferAllocator::Private::allocate(int halPixelFormat,
+ const libcamera::Size &size,
+ uint32_t usage)
+{
+ cros::ScopedBufferHandle scopedHandle =
+ cros::CameraBufferManager::AllocateScopedBuffer(
+ size.width, size.height, halPixelFormat, usage);
+ if (!scopedHandle) {
+ LOG(HAL, Error) << "Failed to allocate buffer handle";
+ return nullptr;
+ }
+
+ buffer_handle_t handle = *scopedHandle;
+ SharedFD fd{ handle->data[0] };
+ if (!fd.isValid()) {
+ LOG(HAL, Fatal) << "Invalid fd";
+ return nullptr;
+ }
+
+ /* This code assumes all the planes are located in the same buffer. */
+ const size_t numPlanes = cros::CameraBufferManager::GetNumPlanes(handle);
+ std::vector<FrameBuffer::Plane> planes(numPlanes);
+ for (auto [i, plane] : utils::enumerate(planes)) {
+ plane.fd = fd;
+ plane.offset = cros::CameraBufferManager::GetPlaneOffset(handle, i);
+ plane.length = cros::CameraBufferManager::GetPlaneSize(handle, i);
+ }
+
+ return std::make_unique<HALFrameBuffer>(
+ std::make_unique<CrosFrameBufferData>(std::move(scopedHandle), planes), handle);
+}
+
+PUBLIC_FRAME_BUFFER_ALLOCATOR_IMPLEMENTATION
diff --git a/src/android/mm/generic_camera_buffer.cpp b/src/android/mm/generic_camera_buffer.cpp
new file mode 100644
index 00000000..0ffcb445
--- /dev/null
+++ b/src/android/mm/generic_camera_buffer.cpp
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Generic Android frame buffer backend
+ */
+
+#include "../camera_buffer.h"
+
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include <libcamera/base/log.h>
+
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/mapped_framebuffer.h"
+
+using namespace libcamera;
+
+LOG_DECLARE_CATEGORY(HAL)
+
+class CameraBuffer::Private : public Extensible::Private,
+ public MappedBuffer
+{
+ LIBCAMERA_DECLARE_PUBLIC(CameraBuffer)
+
+public:
+ Private(CameraBuffer *cameraBuffer, buffer_handle_t camera3Buffer,
+ PixelFormat pixelFormat, const Size &size, int flags);
+ ~Private();
+
+ unsigned int numPlanes() const;
+
+ Span<uint8_t> plane(unsigned int plane);
+
+ unsigned int stride(unsigned int plane) const;
+ unsigned int offset(unsigned int plane) const;
+ unsigned int size(unsigned int plane) const;
+
+ size_t jpegBufferSize(size_t maxJpegBufferSize) const;
+
+private:
+ struct PlaneInfo {
+ unsigned int stride;
+ unsigned int offset;
+ unsigned int size;
+ };
+
+ void map();
+
+ int fd_;
+ int flags_;
+ off_t bufferLength_;
+ bool mapped_;
+ std::vector<PlaneInfo> planeInfo_;
+};
+
+CameraBuffer::Private::Private([[maybe_unused]] CameraBuffer *cameraBuffer,
+ buffer_handle_t camera3Buffer,
+ PixelFormat pixelFormat,
+ const Size &size, int flags)
+ : fd_(-1), flags_(flags), bufferLength_(-1), mapped_(false)
+{
+ error_ = 0;
+
+ const auto &info = PixelFormatInfo::info(pixelFormat);
+ if (!info.isValid()) {
+ error_ = -EINVAL;
+ LOG(HAL, Error) << "Invalid pixel format: " << pixelFormat;
+ return;
+ }
+
+ /*
+ * As Android doesn't offer an API to query buffer layouts, assume for
+ * now that the buffer is backed by a single dmabuf, with planes being
+ * stored contiguously.
+ */
+ for (int i = 0; i < camera3Buffer->numFds; i++) {
+ if (camera3Buffer->data[i] == -1 || camera3Buffer->data[i] == fd_)
+ continue;
+
+ if (fd_ != -1) {
+ error_ = -EINVAL;
+ LOG(HAL, Error) << "Discontiguous planes are not supported";
+ return;
+ }
+
+ fd_ = camera3Buffer->data[i];
+ }
+
+ if (fd_ == -1) {
+ error_ = -EINVAL;
+ LOG(HAL, Error) << "No valid file descriptor";
+ return;
+ }
+
+ bufferLength_ = lseek(fd_, 0, SEEK_END);
+ if (bufferLength_ < 0) {
+ error_ = -errno;
+ LOG(HAL, Error) << "Failed to get buffer length";
+ return;
+ }
+
+ const unsigned int numPlanes = info.numPlanes();
+ planeInfo_.resize(numPlanes);
+
+ unsigned int offset = 0;
+ for (unsigned int i = 0; i < numPlanes; ++i) {
+ const unsigned int planeSize = info.planeSize(size, i);
+
+ planeInfo_[i].stride = info.stride(size.width, i, 1u);
+ planeInfo_[i].offset = offset;
+ planeInfo_[i].size = planeSize;
+
+ if (bufferLength_ < offset + planeSize) {
+ LOG(HAL, Error) << "Plane " << i << " is out of buffer:"
+ << " plane offset=" << offset
+ << ", plane size=" << planeSize
+ << ", buffer length=" << bufferLength_;
+ return;
+ }
+
+ offset += planeSize;
+ }
+}
+
+CameraBuffer::Private::~Private()
+{
+}
+
+unsigned int CameraBuffer::Private::numPlanes() const
+{
+ return planeInfo_.size();
+}
+
+Span<uint8_t> CameraBuffer::Private::plane(unsigned int plane)
+{
+ if (!mapped_)
+ map();
+ if (!mapped_)
+ return {};
+
+ return planes_[plane];
+}
+
+unsigned int CameraBuffer::Private::stride(unsigned int plane) const
+{
+ if (plane >= planeInfo_.size())
+ return 0;
+
+ return planeInfo_[plane].stride;
+}
+
+unsigned int CameraBuffer::Private::offset(unsigned int plane) const
+{
+ if (plane >= planeInfo_.size())
+ return 0;
+
+ return planeInfo_[plane].offset;
+}
+
+unsigned int CameraBuffer::Private::size(unsigned int plane) const
+{
+ if (plane >= planeInfo_.size())
+ return 0;
+
+ return planeInfo_[plane].size;
+}
+
+size_t CameraBuffer::Private::jpegBufferSize(size_t maxJpegBufferSize) const
+{
+ ASSERT(bufferLength_ >= 0);
+
+ return std::min<unsigned int>(bufferLength_, maxJpegBufferSize);
+}
+
+void CameraBuffer::Private::map()
+{
+ ASSERT(fd_ != -1);
+ ASSERT(bufferLength_ >= 0);
+
+ void *address = mmap(nullptr, bufferLength_, flags_, MAP_SHARED, fd_, 0);
+ if (address == MAP_FAILED) {
+ error_ = -errno;
+ LOG(HAL, Error) << "Failed to mmap plane";
+ return;
+ }
+ maps_.emplace_back(static_cast<uint8_t *>(address), bufferLength_);
+
+ planes_.reserve(planeInfo_.size());
+ for (const auto &info : planeInfo_) {
+ planes_.emplace_back(
+ static_cast<uint8_t *>(address) + info.offset, info.size);
+ }
+
+ mapped_ = true;
+}
+
+PUBLIC_CAMERA_BUFFER_IMPLEMENTATION
diff --git a/src/android/mm/generic_frame_buffer_allocator.cpp b/src/android/mm/generic_frame_buffer_allocator.cpp
new file mode 100644
index 00000000..79625a9a
--- /dev/null
+++ b/src/android/mm/generic_frame_buffer_allocator.cpp
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Allocate FrameBuffer using gralloc API
+ */
+
+#include <dlfcn.h>
+#include <memory>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/shared_fd.h>
+
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/framebuffer.h"
+
+#include <hardware/camera3.h>
+#include <hardware/gralloc.h>
+#include <hardware/hardware.h>
+
+#include "../camera_device.h"
+#include "../frame_buffer_allocator.h"
+#include "../hal_framebuffer.h"
+
+using namespace libcamera;
+
+LOG_DECLARE_CATEGORY(HAL)
+
+namespace {
+class GenericFrameBufferData : public FrameBuffer::Private
+{
+ LIBCAMERA_DECLARE_PUBLIC(FrameBuffer)
+
+public:
+ GenericFrameBufferData(struct alloc_device_t *allocDevice,
+ buffer_handle_t handle,
+ const std::vector<FrameBuffer::Plane> &planes)
+ : FrameBuffer::Private(planes), allocDevice_(allocDevice),
+ handle_(handle)
+ {
+ ASSERT(allocDevice_);
+ ASSERT(handle_);
+ }
+
+ ~GenericFrameBufferData() override
+ {
+ /*
+ * allocDevice_ is used to destroy handle_. allocDevice_ is
+ * owned by PlatformFrameBufferAllocator::Private.
+ * GenericFrameBufferData must be destroyed before it is
+ * destroyed.
+ *
+ * \todo Consider managing alloc_device_t with std::shared_ptr
+ * if this is difficult to maintain.
+ *
+ * \todo Thread safety against alloc_device_t is not documented.
+ * Is it no problem to call alloc/free in parallel?
+ */
+ allocDevice_->free(allocDevice_, handle_);
+ }
+
+private:
+ struct alloc_device_t *allocDevice_;
+ const buffer_handle_t handle_;
+};
+} /* namespace */
+
+class PlatformFrameBufferAllocator::Private : public Extensible::Private
+{
+ LIBCAMERA_DECLARE_PUBLIC(PlatformFrameBufferAllocator)
+
+public:
+ Private(CameraDevice *const cameraDevice)
+ : cameraDevice_(cameraDevice),
+ hardwareModule_(nullptr),
+ allocDevice_(nullptr)
+ {
+ hw_get_module(GRALLOC_HARDWARE_MODULE_ID, &hardwareModule_);
+ ASSERT(hardwareModule_);
+ }
+
+ ~Private() override;
+
+ std::unique_ptr<HALFrameBuffer>
+ allocate(int halPixelFormat, const libcamera::Size &size, uint32_t usage);
+
+private:
+ const CameraDevice *const cameraDevice_;
+ const struct hw_module_t *hardwareModule_;
+ struct alloc_device_t *allocDevice_;
+};
+
+PlatformFrameBufferAllocator::Private::~Private()
+{
+ if (allocDevice_)
+ gralloc_close(allocDevice_);
+ dlclose(hardwareModule_->dso);
+}
+
+std::unique_ptr<HALFrameBuffer>
+PlatformFrameBufferAllocator::Private::allocate(int halPixelFormat,
+ const libcamera::Size &size,
+ uint32_t usage)
+{
+ if (!allocDevice_) {
+ int ret = gralloc_open(hardwareModule_, &allocDevice_);
+ if (ret) {
+ LOG(HAL, Fatal) << "gralloc_open() failed: " << ret;
+ return nullptr;
+ }
+ }
+
+ int stride = 0;
+ buffer_handle_t handle = nullptr;
+ int ret = allocDevice_->alloc(allocDevice_, size.width, size.height,
+ halPixelFormat, usage, &handle, &stride);
+ if (ret) {
+ LOG(HAL, Error) << "failed buffer allocation: " << ret;
+ return nullptr;
+ }
+ if (!handle) {
+ LOG(HAL, Fatal) << "invalid buffer_handle_t";
+ return nullptr;
+ }
+
+ /* This code assumes the planes are mapped consecutively. */
+ const libcamera::PixelFormat pixelFormat =
+ cameraDevice_->capabilities()->toPixelFormat(halPixelFormat);
+ const auto &info = PixelFormatInfo::info(pixelFormat);
+ std::vector<FrameBuffer::Plane> planes(info.numPlanes());
+
+ SharedFD fd{ handle->data[0] };
+ size_t offset = 0;
+ for (auto [i, plane] : utils::enumerate(planes)) {
+ const size_t planeSize = info.planeSize(size.height, i, stride);
+
+ plane.fd = fd;
+ plane.offset = offset;
+ plane.length = planeSize;
+ offset += planeSize;
+ }
+
+ return std::make_unique<HALFrameBuffer>(
+ std::make_unique<GenericFrameBufferData>(
+ allocDevice_, handle, planes),
+ handle);
+}
+
+PUBLIC_FRAME_BUFFER_ALLOCATOR_IMPLEMENTATION
diff --git a/src/android/mm/libhardware_stub.c b/src/android/mm/libhardware_stub.c
new file mode 100644
index 00000000..28faa638
--- /dev/null
+++ b/src/android/mm/libhardware_stub.c
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+/*
+ * Copyright (C) 2023, Ideas on Board
+ *
+ * Android libhardware stub for test compilation
+ */
+
+#include <errno.h>
+
+#include <hardware/hardware.h>
+
+int hw_get_module(const char *id __attribute__((__unused__)),
+ const struct hw_module_t **module)
+{
+ *module = NULL;
+ return -ENOTSUP;
+}
diff --git a/src/android/mm/meson.build b/src/android/mm/meson.build
new file mode 100644
index 00000000..e3e0484c
--- /dev/null
+++ b/src/android/mm/meson.build
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: CC0-1.0
+
+platform = get_option('android_platform')
+if platform == 'generic'
+ android_hal_sources += files(['generic_camera_buffer.cpp',
+ 'generic_frame_buffer_allocator.cpp'])
+ android_deps += [libdl]
+
+ libhardware = dependency('libhardware', required : false)
+ if libhardware.found()
+ android_deps += [libhardware]
+ else
+ android_hal_sources += files(['libhardware_stub.c'])
+ endif
+elif platform == 'cros'
+ android_hal_sources += files(['cros_camera_buffer.cpp',
+ 'cros_frame_buffer_allocator.cpp'])
+ android_deps += [dependency('libcros_camera')]
+endif
diff --git a/src/android/post_processor.h b/src/android/post_processor.h
new file mode 100644
index 00000000..b504a379
--- /dev/null
+++ b/src/android/post_processor.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Google Inc.
+ *
+ * CameraStream Post Processing Interface
+ */
+
+#pragma once
+
+#include <libcamera/base/signal.h>
+
+#include <libcamera/framebuffer.h>
+#include <libcamera/stream.h>
+
+#include "camera_buffer.h"
+#include "camera_request.h"
+
+class PostProcessor
+{
+public:
+ enum class Status {
+ Error,
+ Success
+ };
+
+ virtual ~PostProcessor() = default;
+
+ virtual int configure(const libcamera::StreamConfiguration &inCfg,
+ const libcamera::StreamConfiguration &outCfg) = 0;
+ virtual void process(Camera3RequestDescriptor::StreamBuffer *streamBuffer) = 0;
+
+ libcamera::Signal<Camera3RequestDescriptor::StreamBuffer *, Status> processComplete;
+};
diff --git a/src/android/yuv/post_processor_yuv.cpp b/src/android/yuv/post_processor_yuv.cpp
new file mode 100644
index 00000000..c998807b
--- /dev/null
+++ b/src/android/yuv/post_processor_yuv.cpp
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Post Processor using libyuv
+ */
+
+#include "post_processor_yuv.h"
+
+#include <libyuv/scale.h>
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
+#include <libcamera/pixel_format.h>
+
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/mapped_framebuffer.h"
+
+using namespace libcamera;
+
+LOG_DEFINE_CATEGORY(YUV)
+
+int PostProcessorYuv::configure(const StreamConfiguration &inCfg,
+ const StreamConfiguration &outCfg)
+{
+ if (inCfg.pixelFormat != outCfg.pixelFormat) {
+ LOG(YUV, Error) << "Pixel format conversion is not supported"
+ << " (from " << inCfg.pixelFormat
+ << " to " << outCfg.pixelFormat << ")";
+ return -EINVAL;
+ }
+
+ if (inCfg.size < outCfg.size) {
+ LOG(YUV, Error) << "Up-scaling is not supported"
+ << " (from " << inCfg.size
+ << " to " << outCfg.size << ")";
+ return -EINVAL;
+ }
+
+ if (inCfg.pixelFormat != formats::NV12) {
+ LOG(YUV, Error) << "Unsupported format " << inCfg.pixelFormat
+ << " (only NV12 is supported)";
+ return -EINVAL;
+ }
+
+ calculateLengths(inCfg, outCfg);
+ return 0;
+}
+
+void PostProcessorYuv::process(Camera3RequestDescriptor::StreamBuffer *streamBuffer)
+{
+ const FrameBuffer &source = *streamBuffer->srcBuffer;
+ CameraBuffer *destination = streamBuffer->dstBuffer.get();
+
+ if (!isValidBuffers(source, *destination)) {
+ processComplete.emit(streamBuffer, PostProcessor::Status::Error);
+ return;
+ }
+
+ const MappedFrameBuffer sourceMapped(&source, MappedFrameBuffer::MapFlag::Read);
+ if (!sourceMapped.isValid()) {
+ LOG(YUV, Error) << "Failed to mmap camera frame buffer";
+ processComplete.emit(streamBuffer, PostProcessor::Status::Error);
+ return;
+ }
+
+ int ret = libyuv::NV12Scale(sourceMapped.planes()[0].data(),
+ sourceStride_[0],
+ sourceMapped.planes()[1].data(),
+ sourceStride_[1],
+ sourceSize_.width, sourceSize_.height,
+ destination->plane(0).data(),
+ destinationStride_[0],
+ destination->plane(1).data(),
+ destinationStride_[1],
+ destinationSize_.width,
+ destinationSize_.height,
+ libyuv::FilterMode::kFilterBilinear);
+ if (ret) {
+ LOG(YUV, Error) << "Failed NV12 scaling: " << ret;
+ processComplete.emit(streamBuffer, PostProcessor::Status::Error);
+ return;
+ }
+
+ processComplete.emit(streamBuffer, PostProcessor::Status::Success);
+}
+
+bool PostProcessorYuv::isValidBuffers(const FrameBuffer &source,
+ const CameraBuffer &destination) const
+{
+ if (source.planes().size() != 2) {
+ LOG(YUV, Error) << "Invalid number of source planes: "
+ << source.planes().size();
+ return false;
+ }
+ if (destination.numPlanes() != 2) {
+ LOG(YUV, Error) << "Invalid number of destination planes: "
+ << destination.numPlanes();
+ return false;
+ }
+
+ if (source.planes()[0].length < sourceLength_[0] ||
+ source.planes()[1].length < sourceLength_[1]) {
+ LOG(YUV, Error)
+ << "The source planes lengths are too small, actual size: {"
+ << source.planes()[0].length << ", "
+ << source.planes()[1].length
+ << "}, expected size: {"
+ << sourceLength_[0] << ", "
+ << sourceLength_[1] << "}";
+ return false;
+ }
+ if (destination.plane(0).size() < destinationLength_[0] ||
+ destination.plane(1).size() < destinationLength_[1]) {
+ LOG(YUV, Error)
+ << "The destination planes lengths are too small, actual size: {"
+ << destination.plane(0).size() << ", "
+ << destination.plane(1).size()
+ << "}, expected size: {"
+ << sourceLength_[0] << ", "
+ << sourceLength_[1] << "}";
+ return false;
+ }
+
+ return true;
+}
+
+void PostProcessorYuv::calculateLengths(const StreamConfiguration &inCfg,
+ const StreamConfiguration &outCfg)
+{
+ sourceSize_ = inCfg.size;
+ destinationSize_ = outCfg.size;
+
+ const PixelFormatInfo &nv12Info = PixelFormatInfo::info(formats::NV12);
+ for (unsigned int i = 0; i < 2; i++) {
+ sourceStride_[i] = inCfg.stride;
+ destinationStride_[i] = nv12Info.stride(destinationSize_.width, i, 1);
+
+ sourceLength_[i] = nv12Info.planeSize(sourceSize_.height, i,
+ sourceStride_[i]);
+ destinationLength_[i] = nv12Info.planeSize(destinationSize_.height, i,
+ destinationStride_[i]);
+ }
+}
diff --git a/src/android/yuv/post_processor_yuv.h b/src/android/yuv/post_processor_yuv.h
new file mode 100644
index 00000000..ed7bb1fb
--- /dev/null
+++ b/src/android/yuv/post_processor_yuv.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2021, Google Inc.
+ *
+ * Post Processor using libyuv
+ */
+
+#pragma once
+
+#include "../post_processor.h"
+
+#include <libcamera/geometry.h>
+
+class PostProcessorYuv : public PostProcessor
+{
+public:
+ PostProcessorYuv() = default;
+
+ int configure(const libcamera::StreamConfiguration &incfg,
+ const libcamera::StreamConfiguration &outcfg) override;
+ void process(Camera3RequestDescriptor::StreamBuffer *streamBuffer) override;
+
+private:
+ bool isValidBuffers(const libcamera::FrameBuffer &source,
+ const CameraBuffer &destination) const;
+ void calculateLengths(const libcamera::StreamConfiguration &inCfg,
+ const libcamera::StreamConfiguration &outCfg);
+
+ libcamera::Size sourceSize_;
+ libcamera::Size destinationSize_;
+ unsigned int sourceLength_[2] = {};
+ unsigned int destinationLength_[2] = {};
+ unsigned int sourceStride_[2] = {};
+ unsigned int destinationStride_[2] = {};
+};