summaryrefslogtreecommitdiff
path: root/src/android/camera_device.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/android/camera_device.cpp')
-rw-r--r--src/android/camera_device.cpp2079
1 files changed, 1398 insertions, 681 deletions
diff --git a/src/android/camera_device.cpp b/src/android/camera_device.cpp
index 76af70eb..a038131a 100644
--- a/src/android/camera_device.cpp
+++ b/src/android/camera_device.cpp
@@ -2,43 +2,238 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_device.cpp - libcamera Android Camera Device
+ * libcamera Android Camera Device
*/
#include "camera_device.h"
-#include "camera_ops.h"
+#include <algorithm>
+#include <fstream>
+#include <set>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/unique_fd.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/control_ids.h>
#include <libcamera/controls.h>
+#include <libcamera/fence.h>
+#include <libcamera/formats.h>
+#include <libcamera/geometry.h>
#include <libcamera/property_ids.h>
-#include "log.h"
-#include "utils.h"
+#include "system/graphics.h"
-#include "camera_metadata.h"
+#include "camera_buffer.h"
+#include "camera_hal_config.h"
+#include "camera_ops.h"
+#include "camera_request.h"
+#include "hal_framebuffer.h"
using namespace libcamera;
-LOG_DECLARE_CATEGORY(HAL);
+LOG_DECLARE_CATEGORY(HAL)
+
+namespace {
/*
- * \struct Camera3RequestDescriptor
- *
- * A utility structure that groups information about a capture request to be
- * later re-used at request complete time to notify the framework.
+ * \struct Camera3StreamConfig
+ * \brief Data to store StreamConfiguration associated with camera3_stream(s)
+ * \var streams List of the pairs of a stream requested by Android HAL client
+ * and CameraStream::Type associated with the stream
+ * \var config StreamConfiguration for streams
+ */
+struct Camera3StreamConfig {
+ struct Camera3Stream {
+ camera3_stream_t *stream;
+ CameraStream::Type type;
+ };
+
+ std::vector<Camera3Stream> streams;
+ StreamConfiguration config;
+};
+
+/*
+ * Reorder the configurations so that libcamera::Camera can accept them as much
+ * as possible. The sort rule is as follows.
+ * 1.) The configuration for NV12 request whose resolution is the largest.
+ * 2.) The configuration for JPEG request.
+ * 3.) Others. Larger resolutions and different formats are put earlier.
*/
+void sortCamera3StreamConfigs(std::vector<Camera3StreamConfig> &unsortedConfigs,
+ const camera3_stream_t *jpegStream)
+{
+ const Camera3StreamConfig *jpegConfig = nullptr;
+
+ std::map<PixelFormat, std::vector<const Camera3StreamConfig *>> formatToConfigs;
+ for (const auto &streamConfig : unsortedConfigs) {
+ if (jpegStream && !jpegConfig) {
+ const auto &streams = streamConfig.streams;
+ if (std::find_if(streams.begin(), streams.end(),
+ [jpegStream](const auto &stream) {
+ return stream.stream == jpegStream;
+ }) != streams.end()) {
+ jpegConfig = &streamConfig;
+ continue;
+ }
+ }
+ formatToConfigs[streamConfig.config.pixelFormat].push_back(&streamConfig);
+ }
+
+ if (jpegStream && !jpegConfig)
+ LOG(HAL, Fatal) << "No Camera3StreamConfig is found for JPEG";
+
+ for (auto &fmt : formatToConfigs) {
+ auto &streamConfigs = fmt.second;
+
+ /* Sorted by resolution. Smaller is put first. */
+ std::sort(streamConfigs.begin(), streamConfigs.end(),
+ [](const auto *streamConfigA, const auto *streamConfigB) {
+ const Size &sizeA = streamConfigA->config.size;
+ const Size &sizeB = streamConfigB->config.size;
+ return sizeA < sizeB;
+ });
+ }
+
+ std::vector<Camera3StreamConfig> sortedConfigs;
+ sortedConfigs.reserve(unsortedConfigs.size());
+
+ /*
+ * NV12 is the most prioritized format. Put the configuration with NV12
+ * and the largest resolution first.
+ */
+ const auto nv12It = formatToConfigs.find(formats::NV12);
+ if (nv12It != formatToConfigs.end()) {
+ auto &nv12Configs = nv12It->second;
+ const Camera3StreamConfig *nv12Largest = nv12Configs.back();
+
+ /*
+ * If JPEG will be created from NV12 and the size is larger than
+ * the largest NV12 configurations, then put the NV12
+ * configuration for JPEG first.
+ */
+ if (jpegConfig && jpegConfig->config.pixelFormat == formats::NV12) {
+ const Size &nv12SizeForJpeg = jpegConfig->config.size;
+ const Size &nv12LargestSize = nv12Largest->config.size;
+
+ if (nv12LargestSize < nv12SizeForJpeg) {
+ LOG(HAL, Debug) << "Insert " << jpegConfig->config.toString();
+ sortedConfigs.push_back(std::move(*jpegConfig));
+ jpegConfig = nullptr;
+ }
+ }
+
+ LOG(HAL, Debug) << "Insert " << nv12Largest->config.toString();
+ sortedConfigs.push_back(*nv12Largest);
+ nv12Configs.pop_back();
+
+ if (nv12Configs.empty())
+ formatToConfigs.erase(nv12It);
+ }
+
+ /* If the configuration for JPEG is there, then put it. */
+ if (jpegConfig) {
+ LOG(HAL, Debug) << "Insert " << jpegConfig->config.toString();
+ sortedConfigs.push_back(std::move(*jpegConfig));
+ jpegConfig = nullptr;
+ }
-CameraDevice::Camera3RequestDescriptor::Camera3RequestDescriptor(
- unsigned int frameNumber, unsigned int numBuffers)
- : frameNumber(frameNumber), numBuffers(numBuffers)
+ /*
+ * Put configurations with different formats and larger resolutions
+ * earlier.
+ */
+ while (!formatToConfigs.empty()) {
+ for (auto it = formatToConfigs.begin(); it != formatToConfigs.end();) {
+ auto &configs = it->second;
+ LOG(HAL, Debug) << "Insert " << configs.back()->config.toString();
+ sortedConfigs.push_back(*configs.back());
+ configs.pop_back();
+
+ if (configs.empty())
+ it = formatToConfigs.erase(it);
+ else
+ it++;
+ }
+ }
+
+ ASSERT(sortedConfigs.size() == unsortedConfigs.size());
+
+ unsortedConfigs = sortedConfigs;
+}
+
+const char *rotationToString(int rotation)
{
- buffers = new camera3_stream_buffer_t[numBuffers];
+ switch (rotation) {
+ case CAMERA3_STREAM_ROTATION_0:
+ return "0";
+ case CAMERA3_STREAM_ROTATION_90:
+ return "90";
+ case CAMERA3_STREAM_ROTATION_180:
+ return "180";
+ case CAMERA3_STREAM_ROTATION_270:
+ return "270";
+ }
+ return "INVALID";
}
-CameraDevice::Camera3RequestDescriptor::~Camera3RequestDescriptor()
+const char *directionToString(int stream_type)
{
- delete[] buffers;
+ switch (stream_type) {
+ case CAMERA3_STREAM_OUTPUT:
+ return "Output";
+ case CAMERA3_STREAM_INPUT:
+ return "Input";
+ case CAMERA3_STREAM_BIDIRECTIONAL:
+ return "Bidirectional";
+ default:
+ LOG(HAL, Warning) << "Unknown stream type: " << stream_type;
+ return "Unknown";
+ }
}
+#if defined(OS_CHROMEOS)
+/*
+ * Check whether the crop_rotate_scale_degrees values for all streams in
+ * the list are valid according to the Chrome OS camera HAL API.
+ */
+bool validateCropRotate(const camera3_stream_configuration_t &streamList)
+{
+ ASSERT(streamList.num_streams > 0);
+ const int cropRotateScaleDegrees =
+ streamList.streams[0]->crop_rotate_scale_degrees;
+ for (unsigned int i = 0; i < streamList.num_streams; ++i) {
+ const camera3_stream_t &stream = *streamList.streams[i];
+
+ switch (stream.crop_rotate_scale_degrees) {
+ case CAMERA3_STREAM_ROTATION_0:
+ case CAMERA3_STREAM_ROTATION_90:
+ case CAMERA3_STREAM_ROTATION_270:
+ break;
+
+ /* 180° rotation is specified by Chrome OS as invalid. */
+ case CAMERA3_STREAM_ROTATION_180:
+ default:
+ LOG(HAL, Error) << "Invalid crop_rotate_scale_degrees: "
+ << stream.crop_rotate_scale_degrees;
+ return false;
+ }
+
+ if (cropRotateScaleDegrees != stream.crop_rotate_scale_degrees) {
+ LOG(HAL, Error) << "crop_rotate_scale_degrees in all "
+ << "streams are not identical";
+ return false;
+ }
+ }
+
+ return true;
+}
+#endif
+
+} /* namespace */
+
/*
* \class CameraDevice
*
@@ -52,21 +247,143 @@ CameraDevice::Camera3RequestDescriptor::~Camera3RequestDescriptor()
* back to the framework using the designated callbacks.
*/
-CameraDevice::CameraDevice(unsigned int id, const std::shared_ptr<Camera> &camera)
- : running_(false), camera_(camera), staticMetadata_(nullptr)
+CameraDevice::CameraDevice(unsigned int id, std::shared_ptr<Camera> camera)
+ : id_(id), state_(State::Stopped), camera_(std::move(camera)),
+ facing_(CAMERA_FACING_FRONT), orientation_(0)
{
camera_->requestCompleted.connect(this, &CameraDevice::requestComplete);
+
+ maker_ = "libcamera";
+ model_ = "cameraModel";
+
+ /* \todo Support getting properties on Android */
+ std::ifstream fstream("/var/cache/camera/camera.prop");
+ if (!fstream.is_open())
+ return;
+
+ std::string line;
+ while (std::getline(fstream, line)) {
+ std::string::size_type delimPos = line.find("=");
+ if (delimPos == std::string::npos)
+ continue;
+ std::string key = line.substr(0, delimPos);
+ std::string val = line.substr(delimPos + 1);
+
+ if (!key.compare("ro.product.model"))
+ model_ = val;
+ else if (!key.compare("ro.product.manufacturer"))
+ maker_ = val;
+ }
}
-CameraDevice::~CameraDevice()
+CameraDevice::~CameraDevice() = default;
+
+std::unique_ptr<CameraDevice> CameraDevice::create(unsigned int id,
+ std::shared_ptr<Camera> cam)
+{
+ return std::unique_ptr<CameraDevice>(
+ new CameraDevice(id, std::move(cam)));
+}
+
+/*
+ * Initialize the camera static information retrieved from the
+ * Camera::properties or from the cameraConfigData.
+ *
+ * cameraConfigData is optional for external camera devices and can be
+ * nullptr.
+ *
+ * This function is called before the camera device is opened.
+ */
+int CameraDevice::initialize(const CameraConfigData *cameraConfigData)
{
- if (staticMetadata_)
- delete staticMetadata_;
+ /*
+ * Initialize orientation and facing side of the camera.
+ *
+ * If the libcamera::Camera provides those information as retrieved
+ * from firmware use them, otherwise fallback to values parsed from
+ * the configuration file. If the configuration file is not available
+ * the camera is external so its location and rotation can be safely
+ * defaulted.
+ */
+ const ControlList &properties = camera_->properties();
+
+ const auto &location = properties.get(properties::Location);
+ if (location) {
+ switch (*location) {
+ case properties::CameraLocationFront:
+ facing_ = CAMERA_FACING_FRONT;
+ break;
+ case properties::CameraLocationBack:
+ facing_ = CAMERA_FACING_BACK;
+ break;
+ case properties::CameraLocationExternal:
+ /*
+ * If the camera is reported as external, but the
+ * CameraHalManager has overriden it, use what is
+ * reported in the configuration file. This typically
+ * happens for UVC cameras reported as 'External' by
+ * libcamera but installed in fixed position on the
+ * device.
+ */
+ if (cameraConfigData && cameraConfigData->facing != -1)
+ facing_ = cameraConfigData->facing;
+ else
+ facing_ = CAMERA_FACING_EXTERNAL;
+ break;
+ }
- for (auto &it : requestTemplates_)
- delete it.second;
+ if (cameraConfigData && cameraConfigData->facing != -1 &&
+ facing_ != cameraConfigData->facing) {
+ LOG(HAL, Warning)
+ << "Camera location does not match"
+ << " configuration file. Using " << facing_;
+ }
+ } else if (cameraConfigData) {
+ if (cameraConfigData->facing == -1) {
+ LOG(HAL, Error)
+ << "Camera facing not in configuration file";
+ return -EINVAL;
+ }
+ facing_ = cameraConfigData->facing;
+ } else {
+ facing_ = CAMERA_FACING_EXTERNAL;
+ }
+
+ /*
+ * The Android orientation metadata specifies its rotation correction
+ * value in clockwise direction whereas libcamera specifies the
+ * rotation property in anticlockwise direction. Read the libcamera's
+ * rotation property (anticlockwise) and compute the corresponding
+ * value for clockwise direction as required by the Android orientation
+ * metadata.
+ */
+ const auto &rotation = properties.get(properties::Rotation);
+ if (rotation) {
+ orientation_ = (360 - *rotation) % 360;
+ if (cameraConfigData && cameraConfigData->rotation != -1 &&
+ orientation_ != cameraConfigData->rotation) {
+ LOG(HAL, Warning)
+ << "Camera orientation does not match"
+ << " configuration file. Using " << orientation_;
+ }
+ } else if (cameraConfigData) {
+ if (cameraConfigData->rotation == -1) {
+ LOG(HAL, Error)
+ << "Camera rotation not in configuration file";
+ return -EINVAL;
+ }
+ orientation_ = cameraConfigData->rotation;
+ } else {
+ orientation_ = 0;
+ }
+
+ return capabilities_.initialize(camera_, orientation_, facing_);
}
+/*
+ * Open a camera device. The static information on the camera shall have been
+ * initialized with a call to CameraDevice::initialize().
+ */
int CameraDevice::open(const hw_module_t *hardwareModule)
{
int ret = camera_->acquire();
@@ -93,452 +410,56 @@ int CameraDevice::open(const hw_module_t *hardwareModule)
void CameraDevice::close()
{
- camera_->stop();
- camera_->release();
+ stop();
- running_ = false;
-}
-
-void CameraDevice::setCallbacks(const camera3_callback_ops_t *callbacks)
-{
- callbacks_ = callbacks;
+ camera_->release();
}
-/*
- * Return static information for the camera.
- */
-const camera_metadata_t *CameraDevice::getStaticMetadata()
+void CameraDevice::flush()
{
- if (staticMetadata_)
- return staticMetadata_->get();
-
- const ControlList &properties = camera_->properties();
+ {
+ MutexLocker stateLock(stateMutex_);
+ if (state_ != State::Running)
+ return;
- /*
- * The here reported metadata are enough to implement a basic capture
- * example application, but a real camera implementation will require
- * more.
- */
-
- /*
- * \todo Keep this in sync with the actual number of entries.
- * Currently: 50 entries, 666 bytes
- */
- staticMetadata_ = new CameraMetadata(50, 700);
- if (!staticMetadata_->isValid()) {
- LOG(HAL, Error) << "Failed to allocate static metadata";
- delete staticMetadata_;
- staticMetadata_ = nullptr;
- return nullptr;
+ state_ = State::Flushing;
}
- /* Color correction static metadata. */
- std::vector<uint8_t> aberrationModes = {
- ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
- aberrationModes.data(),
- aberrationModes.size());
-
- /* Control static metadata. */
- std::vector<uint8_t> aeAvailableAntiBandingModes = {
- ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF,
- ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ,
- ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ,
- ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
- aeAvailableAntiBandingModes.data(),
- aeAvailableAntiBandingModes.size());
-
- std::vector<uint8_t> aeAvailableModes = {
- ANDROID_CONTROL_AE_MODE_ON,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AE_AVAILABLE_MODES,
- aeAvailableModes.data(),
- aeAvailableModes.size());
-
- std::vector<int32_t> availableAeFpsTarget = {
- 15, 30,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
- availableAeFpsTarget.data(),
- availableAeFpsTarget.size());
-
- std::vector<int32_t> aeCompensationRange = {
- 0, 0,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AE_COMPENSATION_RANGE,
- aeCompensationRange.data(),
- aeCompensationRange.size());
-
- const camera_metadata_rational_t aeCompensationStep[] = {
- { 0, 1 }
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AE_COMPENSATION_STEP,
- aeCompensationStep, 1);
-
- std::vector<uint8_t> availableAfModes = {
- ANDROID_CONTROL_AF_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AF_AVAILABLE_MODES,
- availableAfModes.data(),
- availableAfModes.size());
-
- std::vector<uint8_t> availableEffects = {
- ANDROID_CONTROL_EFFECT_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_EFFECTS,
- availableEffects.data(),
- availableEffects.size());
-
- std::vector<uint8_t> availableSceneModes = {
- ANDROID_CONTROL_SCENE_MODE_DISABLED,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_SCENE_MODES,
- availableSceneModes.data(),
- availableSceneModes.size());
-
- std::vector<uint8_t> availableStabilizationModes = {
- ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
- availableStabilizationModes.data(),
- availableStabilizationModes.size());
-
- std::vector<uint8_t> availableAwbModes = {
- ANDROID_CONTROL_AWB_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_AWB_AVAILABLE_MODES,
- availableAwbModes.data(),
- availableAwbModes.size());
-
- std::vector<int32_t> availableMaxRegions = {
- 0, 0, 0,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_MAX_REGIONS,
- availableMaxRegions.data(),
- availableMaxRegions.size());
-
- std::vector<uint8_t> sceneModesOverride = {
- ANDROID_CONTROL_AE_MODE_ON,
- ANDROID_CONTROL_AWB_MODE_AUTO,
- ANDROID_CONTROL_AF_MODE_AUTO,
- };
- staticMetadata_->addEntry(ANDROID_CONTROL_SCENE_MODE_OVERRIDES,
- sceneModesOverride.data(),
- sceneModesOverride.size());
-
- uint8_t aeLockAvailable = ANDROID_CONTROL_AE_LOCK_AVAILABLE_FALSE;
- staticMetadata_->addEntry(ANDROID_CONTROL_AE_LOCK_AVAILABLE,
- &aeLockAvailable, 1);
-
- uint8_t awbLockAvailable = ANDROID_CONTROL_AWB_LOCK_AVAILABLE_FALSE;
- staticMetadata_->addEntry(ANDROID_CONTROL_AWB_LOCK_AVAILABLE,
- &awbLockAvailable, 1);
-
- char availableControlModes = ANDROID_CONTROL_MODE_AUTO;
- staticMetadata_->addEntry(ANDROID_CONTROL_AVAILABLE_MODES,
- &availableControlModes, 1);
-
- /* JPEG static metadata. */
- std::vector<int32_t> availableThumbnailSizes = {
- 0, 0,
- };
- staticMetadata_->addEntry(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
- availableThumbnailSizes.data(),
- availableThumbnailSizes.size());
-
- /* Sensor static metadata. */
- int32_t pixelArraySize[] = {
- 2592, 1944,
- };
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
- &pixelArraySize, 2);
-
- int32_t sensorSizes[] = {
- 0, 0, 2560, 1920,
- };
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
- &sensorSizes, 4);
-
- int32_t sensitivityRange[] = {
- 32, 2400,
- };
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
- &sensitivityRange, 2);
-
- uint16_t filterArr = ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_GRBG;
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
- &filterArr, 1);
+ camera_->stop();
- int64_t exposureTimeRange[] = {
- 100000, 200000000,
- };
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE,
- &exposureTimeRange, 2);
+ MutexLocker stateLock(stateMutex_);
+ state_ = State::Stopped;
+}
- /*
- * The Android orientation metadata and libcamera rotation property are
- * defined differently but have identical numerical values for Android
- * devices such as phones and tablets.
- */
- int32_t orientation = 0;
- if (properties.contains(properties::Rotation))
- orientation = properties.get(properties::Rotation);
- staticMetadata_->addEntry(ANDROID_SENSOR_ORIENTATION, &orientation, 1);
+void CameraDevice::stop()
+{
+ MutexLocker stateLock(stateMutex_);
- std::vector<int32_t> testPatterModes = {
- ANDROID_SENSOR_TEST_PATTERN_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES,
- testPatterModes.data(),
- testPatterModes.size());
+ camera_->stop();
- std::vector<float> physicalSize = {
- 2592, 1944,
- };
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
- physicalSize.data(),
- physicalSize.size());
-
- uint8_t timestampSource = ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_UNKNOWN;
- staticMetadata_->addEntry(ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE,
- &timestampSource, 1);
-
- /* Statistics static metadata. */
- uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
- staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
- &faceDetectMode, 1);
-
- int32_t maxFaceCount = 0;
- staticMetadata_->addEntry(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT,
- &maxFaceCount, 1);
-
- /* Sync static metadata. */
- int32_t maxLatency = ANDROID_SYNC_MAX_LATENCY_UNKNOWN;
- staticMetadata_->addEntry(ANDROID_SYNC_MAX_LATENCY, &maxLatency, 1);
-
- /* Flash static metadata. */
- char flashAvailable = ANDROID_FLASH_INFO_AVAILABLE_FALSE;
- staticMetadata_->addEntry(ANDROID_FLASH_INFO_AVAILABLE,
- &flashAvailable, 1);
-
- /* Lens static metadata. */
- std::vector<float> lensApertures = {
- 2.53 / 100,
- };
- staticMetadata_->addEntry(ANDROID_LENS_INFO_AVAILABLE_APERTURES,
- lensApertures.data(),
- lensApertures.size());
-
- uint8_t lensFacing = ANDROID_LENS_FACING_FRONT;
- if (properties.contains(properties::Location)) {
- int32_t location = properties.get(properties::Location);
- switch (location) {
- case properties::CameraLocationFront:
- lensFacing = ANDROID_LENS_FACING_FRONT;
- break;
- case properties::CameraLocationBack:
- lensFacing = ANDROID_LENS_FACING_BACK;
- break;
- case properties::CameraLocationExternal:
- lensFacing = ANDROID_LENS_FACING_EXTERNAL;
- break;
- }
+ {
+ MutexLocker descriptorsLock(descriptorsMutex_);
+ descriptors_ = {};
}
- staticMetadata_->addEntry(ANDROID_LENS_FACING, &lensFacing, 1);
-
- std::vector<float> lensFocalLenghts = {
- 1,
- };
- staticMetadata_->addEntry(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
- lensFocalLenghts.data(),
- lensFocalLenghts.size());
-
- std::vector<uint8_t> opticalStabilizations = {
- ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF,
- };
- staticMetadata_->addEntry(ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
- opticalStabilizations.data(),
- opticalStabilizations.size());
-
- float hypeFocalDistance = 0;
- staticMetadata_->addEntry(ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE,
- &hypeFocalDistance, 1);
-
- float minFocusDistance = 0;
- staticMetadata_->addEntry(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
- &minFocusDistance, 1);
-
- /* Noise reduction modes. */
- uint8_t noiseReductionModes = ANDROID_NOISE_REDUCTION_MODE_OFF;
- staticMetadata_->addEntry(ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES,
- &noiseReductionModes, 1);
-
- /* Scaler static metadata. */
- float maxDigitalZoom = 1;
- staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
- &maxDigitalZoom, 1);
-
- std::vector<uint32_t> availableStreamFormats = {
- ANDROID_SCALER_AVAILABLE_FORMATS_BLOB,
- ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888,
- ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED,
- };
- staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_FORMATS,
- availableStreamFormats.data(),
- availableStreamFormats.size());
-
- std::vector<uint32_t> availableStreamConfigurations = {
- ANDROID_SCALER_AVAILABLE_FORMATS_BLOB, 2560, 1920,
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
- ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888, 2560, 1920,
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
- ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED, 2560, 1920,
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
- };
- staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
- availableStreamConfigurations.data(),
- availableStreamConfigurations.size());
-
- std::vector<int64_t> availableStallDurations = {
- ANDROID_SCALER_AVAILABLE_FORMATS_BLOB, 2560, 1920, 33333333,
- };
- staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
- availableStallDurations.data(),
- availableStallDurations.size());
-
- std::vector<int64_t> minFrameDurations = {
- ANDROID_SCALER_AVAILABLE_FORMATS_BLOB, 2560, 1920, 33333333,
- ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED, 2560, 1920, 33333333,
- ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888, 2560, 1920, 33333333,
- };
- staticMetadata_->addEntry(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
- minFrameDurations.data(),
- minFrameDurations.size());
- uint8_t croppingType = ANDROID_SCALER_CROPPING_TYPE_CENTER_ONLY;
- staticMetadata_->addEntry(ANDROID_SCALER_CROPPING_TYPE, &croppingType, 1);
+ streams_.clear();
- /* Info static metadata. */
- uint8_t supportedHWLevel = ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED;
- staticMetadata_->addEntry(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL,
- &supportedHWLevel, 1);
-
- /* Request static metadata. */
- int32_t partialResultCount = 1;
- staticMetadata_->addEntry(ANDROID_REQUEST_PARTIAL_RESULT_COUNT,
- &partialResultCount, 1);
+ state_ = State::Stopped;
+}
- uint8_t maxPipelineDepth = 2;
- staticMetadata_->addEntry(ANDROID_REQUEST_PIPELINE_MAX_DEPTH,
- &maxPipelineDepth, 1);
+unsigned int CameraDevice::maxJpegBufferSize() const
+{
+ return capabilities_.maxJpegBufferSize();
+}
- std::vector<uint8_t> availableCapabilities = {
- ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE,
- };
- staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_CAPABILITIES,
- availableCapabilities.data(),
- availableCapabilities.size());
-
- std::vector<int32_t> availableCharacteristicsKeys = {
- ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
- ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
- ANDROID_CONTROL_AE_AVAILABLE_MODES,
- ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
- ANDROID_CONTROL_AE_COMPENSATION_RANGE,
- ANDROID_CONTROL_AE_COMPENSATION_STEP,
- ANDROID_CONTROL_AF_AVAILABLE_MODES,
- ANDROID_CONTROL_AVAILABLE_EFFECTS,
- ANDROID_CONTROL_AVAILABLE_SCENE_MODES,
- ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
- ANDROID_CONTROL_AWB_AVAILABLE_MODES,
- ANDROID_CONTROL_MAX_REGIONS,
- ANDROID_CONTROL_SCENE_MODE_OVERRIDES,
- ANDROID_CONTROL_AE_LOCK_AVAILABLE,
- ANDROID_CONTROL_AWB_LOCK_AVAILABLE,
- ANDROID_CONTROL_AVAILABLE_MODES,
- ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
- ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
- ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
- ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
- ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
- ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE,
- ANDROID_SENSOR_ORIENTATION,
- ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES,
- ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
- ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE,
- ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
- ANDROID_STATISTICS_INFO_MAX_FACE_COUNT,
- ANDROID_SYNC_MAX_LATENCY,
- ANDROID_FLASH_INFO_AVAILABLE,
- ANDROID_LENS_INFO_AVAILABLE_APERTURES,
- ANDROID_LENS_FACING,
- ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
- ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
- ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE,
- ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
- ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES,
- ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
- ANDROID_SCALER_AVAILABLE_FORMATS,
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
- ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
- ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
- ANDROID_SCALER_CROPPING_TYPE,
- ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL,
- ANDROID_REQUEST_PARTIAL_RESULT_COUNT,
- ANDROID_REQUEST_PIPELINE_MAX_DEPTH,
- ANDROID_REQUEST_AVAILABLE_CAPABILITIES,
- };
- staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS,
- availableCharacteristicsKeys.data(),
- availableCharacteristicsKeys.size());
-
- std::vector<int32_t> availableRequestKeys = {
- ANDROID_CONTROL_AE_MODE,
- ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
- ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER,
- ANDROID_CONTROL_AE_LOCK,
- ANDROID_CONTROL_AF_TRIGGER,
- ANDROID_CONTROL_AWB_MODE,
- ANDROID_CONTROL_AWB_LOCK,
- ANDROID_FLASH_MODE,
- ANDROID_STATISTICS_FACE_DETECT_MODE,
- ANDROID_NOISE_REDUCTION_MODE,
- ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
- ANDROID_CONTROL_CAPTURE_INTENT,
- };
- staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS,
- availableRequestKeys.data(),
- availableRequestKeys.size());
-
- std::vector<int32_t> availableResultKeys = {
- ANDROID_CONTROL_AE_STATE,
- ANDROID_CONTROL_AE_LOCK,
- ANDROID_CONTROL_AF_STATE,
- ANDROID_CONTROL_AWB_STATE,
- ANDROID_CONTROL_AWB_LOCK,
- ANDROID_LENS_STATE,
- ANDROID_SCALER_CROP_REGION,
- ANDROID_SENSOR_TIMESTAMP,
- ANDROID_SENSOR_ROLLING_SHUTTER_SKEW,
- ANDROID_SENSOR_EXPOSURE_TIME,
- ANDROID_STATISTICS_LENS_SHADING_MAP_MODE,
- ANDROID_STATISTICS_SCENE_FLICKER,
- };
- staticMetadata_->addEntry(ANDROID_REQUEST_AVAILABLE_RESULT_KEYS,
- availableResultKeys.data(),
- availableResultKeys.size());
-
- if (!staticMetadata_->isValid()) {
- LOG(HAL, Error) << "Failed to construct static metadata";
- delete staticMetadata_;
- staticMetadata_ = nullptr;
- return nullptr;
- }
+void CameraDevice::setCallbacks(const camera3_callback_ops_t *callbacks)
+{
+ callbacks_ = callbacks;
+}
- return staticMetadata_->get();
+const camera_metadata_t *CameraDevice::getStaticMetadata()
+{
+ return capabilities_.staticMetadata()->getMetadata();
}
/*
@@ -548,100 +469,53 @@ const camera_metadata_t *CameraDevice::constructDefaultRequestSettings(int type)
{
auto it = requestTemplates_.find(type);
if (it != requestTemplates_.end())
- return it->second->get();
+ return it->second->getMetadata();
/* Use the capture intent matching the requested template type. */
+ std::unique_ptr<CameraMetadata> requestTemplate;
uint8_t captureIntent;
switch (type) {
case CAMERA3_TEMPLATE_PREVIEW:
captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+ requestTemplate = capabilities_.requestTemplatePreview();
break;
case CAMERA3_TEMPLATE_STILL_CAPTURE:
+ /*
+ * Use the preview template for still capture, they only differ
+ * for the torch mode we currently do not support.
+ */
captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE;
+ requestTemplate = capabilities_.requestTemplateStill();
break;
case CAMERA3_TEMPLATE_VIDEO_RECORD:
captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD;
+ requestTemplate = capabilities_.requestTemplateVideo();
break;
case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT;
- break;
- case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
- captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG;
+ requestTemplate = capabilities_.requestTemplateVideo();
break;
case CAMERA3_TEMPLATE_MANUAL:
captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_MANUAL;
+ requestTemplate = capabilities_.requestTemplateManual();
break;
+ /* \todo Implement templates generation for the remaining use cases. */
+ case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
default:
- LOG(HAL, Error) << "Invalid template request type: " << type;
- return nullptr;
- }
-
- /*
- * \todo Keep this in sync with the actual number of entries.
- * Currently: 12 entries, 15 bytes
- */
- CameraMetadata *requestTemplate = new CameraMetadata(15, 20);
- if (!requestTemplate->isValid()) {
- LOG(HAL, Error) << "Failed to allocate template metadata";
- delete requestTemplate;
+ LOG(HAL, Error) << "Unsupported template request type: " << type;
return nullptr;
}
- uint8_t aeMode = ANDROID_CONTROL_AE_MODE_ON;
- requestTemplate->addEntry(ANDROID_CONTROL_AE_MODE,
- &aeMode, 1);
-
- int32_t aeExposureCompensation = 0;
- requestTemplate->addEntry(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
- &aeExposureCompensation, 1);
-
- uint8_t aePrecaptureTrigger = ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE;
- requestTemplate->addEntry(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER,
- &aePrecaptureTrigger, 1);
-
- uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_OFF;
- requestTemplate->addEntry(ANDROID_CONTROL_AE_LOCK,
- &aeLock, 1);
-
- uint8_t afTrigger = ANDROID_CONTROL_AF_TRIGGER_IDLE;
- requestTemplate->addEntry(ANDROID_CONTROL_AF_TRIGGER,
- &afTrigger, 1);
-
- uint8_t awbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
- requestTemplate->addEntry(ANDROID_CONTROL_AWB_MODE,
- &awbMode, 1);
-
- uint8_t awbLock = ANDROID_CONTROL_AWB_LOCK_OFF;
- requestTemplate->addEntry(ANDROID_CONTROL_AWB_LOCK,
- &awbLock, 1);
-
- uint8_t flashMode = ANDROID_FLASH_MODE_OFF;
- requestTemplate->addEntry(ANDROID_FLASH_MODE,
- &flashMode, 1);
-
- uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
- requestTemplate->addEntry(ANDROID_STATISTICS_FACE_DETECT_MODE,
- &faceDetectMode, 1);
-
- uint8_t noiseReduction = ANDROID_NOISE_REDUCTION_MODE_OFF;
- requestTemplate->addEntry(ANDROID_NOISE_REDUCTION_MODE,
- &noiseReduction, 1);
-
- uint8_t aberrationMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF;
- requestTemplate->addEntry(ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
- &aberrationMode, 1);
-
- requestTemplate->addEntry(ANDROID_CONTROL_CAPTURE_INTENT,
- &captureIntent, 1);
-
- if (!requestTemplate->isValid()) {
+ if (!requestTemplate || !requestTemplate->isValid()) {
LOG(HAL, Error) << "Failed to construct request template";
- delete requestTemplate;
return nullptr;
}
- requestTemplates_[type] = requestTemplate;
- return requestTemplate->get();
+ requestTemplate->updateEntry(ANDROID_CONTROL_CAPTURE_INTENT,
+ captureIntent);
+
+ requestTemplates_[type] = std::move(requestTemplate);
+ return requestTemplates_[type]->getMetadata();
}
/*
@@ -650,217 +524,873 @@ const camera_metadata_t *CameraDevice::constructDefaultRequestSettings(int type)
*/
int CameraDevice::configureStreams(camera3_stream_configuration_t *stream_list)
{
- for (unsigned int i = 0; i < stream_list->num_streams; ++i) {
- camera3_stream_t *stream = stream_list->streams[i];
+ /* Before any configuration attempt, stop the camera. */
+ stop();
- LOG(HAL, Info) << "Stream #" << i
- << ", direction: " << stream->stream_type
- << ", width: " << stream->width
- << ", height: " << stream->height
- << ", format: " << utils::hex(stream->format);
+ if (stream_list->num_streams == 0) {
+ LOG(HAL, Error) << "No streams in configuration";
+ return -EINVAL;
}
- /* Hardcode viewfinder role, collecting sizes from the stream config. */
- if (stream_list->num_streams != 1) {
- LOG(HAL, Error) << "Only one stream supported";
+#if defined(OS_CHROMEOS)
+ if (!validateCropRotate(*stream_list))
return -EINVAL;
- }
+#endif
- StreamRoles roles = { StreamRole::Viewfinder };
- config_ = camera_->generateConfiguration(roles);
- if (!config_ || config_->empty()) {
+ /*
+ * Generate an empty configuration, and construct a StreamConfiguration
+ * for each camera3_stream to add to it.
+ */
+ std::unique_ptr<CameraConfiguration> config = camera_->generateConfiguration();
+ if (!config) {
LOG(HAL, Error) << "Failed to generate camera configuration";
return -EINVAL;
}
- /* Only one stream is supported. */
- camera3_stream_t *camera3Stream = stream_list->streams[0];
- StreamConfiguration *streamConfiguration = &config_->at(0);
- streamConfiguration->size.width = camera3Stream->width;
- streamConfiguration->size.height = camera3Stream->height;
-
/*
- * \todo We'll need to translate from Android defined pixel format codes
- * to the libcamera image format codes. For now, do not change the
- * format returned from Camera::generateConfiguration().
+ * Clear and remove any existing configuration from previous calls, and
+ * ensure the required entries are available without further
+ * reallocation.
*/
+ streams_.clear();
+ streams_.reserve(stream_list->num_streams);
+
+ std::vector<Camera3StreamConfig> streamConfigs;
+ streamConfigs.reserve(stream_list->num_streams);
+
+ /* First handle all non-MJPEG streams. */
+ camera3_stream_t *jpegStream = nullptr;
+ for (unsigned int i = 0; i < stream_list->num_streams; ++i) {
+ camera3_stream_t *stream = stream_list->streams[i];
+ Size size(stream->width, stream->height);
+
+ PixelFormat format = capabilities_.toPixelFormat(stream->format);
+
+ LOG(HAL, Info) << "Stream #" << i
+ << ", direction: " << directionToString(stream->stream_type)
+ << ", width: " << stream->width
+ << ", height: " << stream->height
+ << ", format: " << utils::hex(stream->format)
+ << ", rotation: " << rotationToString(stream->rotation)
+#if defined(OS_CHROMEOS)
+ << ", crop_rotate_scale_degrees: "
+ << rotationToString(stream->crop_rotate_scale_degrees)
+#endif
+ << " (" << format << ")";
+
+ if (!format.isValid())
+ return -EINVAL;
+
+ /* \todo Support rotation. */
+ if (stream->rotation != CAMERA3_STREAM_ROTATION_0) {
+ LOG(HAL, Error) << "Rotation is not supported";
+ return -EINVAL;
+ }
+#if defined(OS_CHROMEOS)
+ if (stream->crop_rotate_scale_degrees != CAMERA3_STREAM_ROTATION_0) {
+ LOG(HAL, Error) << "Rotation is not supported";
+ return -EINVAL;
+ }
+#endif
+
+ /* Defer handling of MJPEG streams until all others are known. */
+ if (stream->format == HAL_PIXEL_FORMAT_BLOB) {
+ if (jpegStream) {
+ LOG(HAL, Error)
+ << "Multiple JPEG streams are not supported";
+ return -EINVAL;
+ }
+
+ jpegStream = stream;
+ continue;
+ }
+
+ /*
+ * While gralloc usage flags are supposed to report usage
+ * patterns to select a suitable buffer allocation strategy, in
+ * practice they're also used to make other decisions, such as
+ * selecting the actual format for the IMPLEMENTATION_DEFINED
+ * HAL pixel format. To avoid issues, we thus have to set the
+ * GRALLOC_USAGE_HW_CAMERA_WRITE flag unconditionally, even for
+ * streams that will be produced in software.
+ */
+ stream->usage |= GRALLOC_USAGE_HW_CAMERA_WRITE;
+
+ /*
+ * If a CameraStream with the same size and format as the
+ * current stream has already been requested, associate the two.
+ */
+ auto iter = std::find_if(
+ streamConfigs.begin(), streamConfigs.end(),
+ [&size, &format](const Camera3StreamConfig &streamConfig) {
+ return streamConfig.config.size == size &&
+ streamConfig.config.pixelFormat == format;
+ });
+ if (iter != streamConfigs.end()) {
+ /* Add usage to copy the buffer in streams[0] to stream. */
+ iter->streams[0].stream->usage |= GRALLOC_USAGE_SW_READ_OFTEN;
+ stream->usage |= GRALLOC_USAGE_SW_WRITE_OFTEN;
+ iter->streams.push_back({ stream, CameraStream::Type::Mapped });
+ continue;
+ }
+
+ Camera3StreamConfig streamConfig;
+ streamConfig.streams = { { stream, CameraStream::Type::Direct } };
+ streamConfig.config.size = size;
+ streamConfig.config.pixelFormat = format;
+ streamConfigs.push_back(std::move(streamConfig));
+ }
+
+ /* Now handle the MJPEG streams, adding a new stream if required. */
+ if (jpegStream) {
+ CameraStream::Type type;
+ int index = -1;
+
+ /* Search for a compatible stream in the non-JPEG ones. */
+ for (size_t i = 0; i < streamConfigs.size(); ++i) {
+ Camera3StreamConfig &streamConfig = streamConfigs[i];
+ const auto &cfg = streamConfig.config;
+
+ /*
+ * \todo The PixelFormat must also be compatible with
+ * the encoder.
+ */
+ if (cfg.size.width != jpegStream->width ||
+ cfg.size.height != jpegStream->height)
+ continue;
+
+ LOG(HAL, Info)
+ << "Android JPEG stream mapped to libcamera stream " << i;
+
+ type = CameraStream::Type::Mapped;
+ index = i;
+
+ /*
+ * The source stream will be read by software to
+ * produce the JPEG stream.
+ */
+ camera3_stream_t *stream = streamConfig.streams[0].stream;
+ stream->usage |= GRALLOC_USAGE_SW_READ_OFTEN;
+ break;
+ }
+
+ /*
+ * Without a compatible match for JPEG encoding we must
+ * introduce a new stream to satisfy the request requirements.
+ */
+ if (index < 0) {
+ /*
+ * \todo The pixelFormat should be a 'best-fit' choice
+ * and may require a validation cycle. This is not yet
+ * handled, and should be considered as part of any
+ * stream configuration reworks.
+ */
+ Camera3StreamConfig streamConfig;
+ streamConfig.config.size.width = jpegStream->width;
+ streamConfig.config.size.height = jpegStream->height;
+ streamConfig.config.pixelFormat = formats::NV12;
+ streamConfigs.push_back(std::move(streamConfig));
+
+ LOG(HAL, Info) << "Adding " << streamConfig.config.toString()
+ << " for MJPEG support";
+
+ type = CameraStream::Type::Internal;
+ index = streamConfigs.size() - 1;
+ }
- switch (config_->validate()) {
+ /* The JPEG stream will be produced by software. */
+ jpegStream->usage |= GRALLOC_USAGE_SW_WRITE_OFTEN;
+
+ streamConfigs[index].streams.push_back({ jpegStream, type });
+ }
+
+ sortCamera3StreamConfigs(streamConfigs, jpegStream);
+ for (const auto &streamConfig : streamConfigs) {
+ config->addConfiguration(streamConfig.config);
+
+ CameraStream *sourceStream = nullptr;
+ for (auto &stream : streamConfig.streams) {
+ streams_.emplace_back(this, config.get(), stream.type,
+ stream.stream, sourceStream,
+ config->size() - 1);
+ stream.stream->priv = static_cast<void *>(&streams_.back());
+
+ /*
+ * The streamConfig.streams vector contains as its first
+ * element a Direct (or Internal) stream, and then an
+ * optional set of Mapped streams derived from the
+ * Direct stream. Cache the Direct stream pointer, to
+ * be used when constructing the subsequent mapped
+ * streams.
+ */
+ if (stream.type == CameraStream::Type::Direct)
+ sourceStream = &streams_.back();
+ }
+ }
+
+ switch (config->validate()) {
case CameraConfiguration::Valid:
break;
case CameraConfiguration::Adjusted:
LOG(HAL, Info) << "Camera configuration adjusted";
- config_.reset();
+
+ for (const StreamConfiguration &cfg : *config)
+ LOG(HAL, Info) << " - " << cfg.toString();
+
return -EINVAL;
case CameraConfiguration::Invalid:
LOG(HAL, Info) << "Camera configuration invalid";
- config_.reset();
return -EINVAL;
}
- camera3Stream->max_buffers = streamConfiguration->bufferCount;
-
/*
* Once the CameraConfiguration has been adjusted/validated
* it can be applied to the camera.
*/
- int ret = camera_->configure(config_.get());
+ int ret = camera_->configure(config.get());
if (ret) {
LOG(HAL, Error) << "Failed to configure camera '"
- << camera_->name() << "'";
+ << camera_->id() << "'";
return ret;
}
+ /*
+ * Configure the HAL CameraStream instances using the associated
+ * StreamConfiguration and set the number of required buffers in
+ * the Android camera3_stream_t.
+ */
+ for (CameraStream &cameraStream : streams_) {
+ ret = cameraStream.configure();
+ if (ret) {
+ LOG(HAL, Error) << "Failed to configure camera stream";
+ return ret;
+ }
+ }
+
+ config_ = std::move(config);
return 0;
}
-int CameraDevice::processCaptureRequest(camera3_capture_request_t *camera3Request)
+std::unique_ptr<HALFrameBuffer>
+CameraDevice::createFrameBuffer(const buffer_handle_t camera3buffer,
+ PixelFormat pixelFormat, const Size &size)
{
- StreamConfiguration *streamConfiguration = &config_->at(0);
- Stream *stream = streamConfiguration->stream();
+ CameraBuffer buf(camera3buffer, pixelFormat, size, PROT_READ);
+ if (!buf.isValid()) {
+ LOG(HAL, Fatal) << "Failed to create CameraBuffer";
+ return nullptr;
+ }
- if (camera3Request->num_output_buffers != 1) {
- LOG(HAL, Error) << "Invalid number of output buffers: "
- << camera3Request->num_output_buffers;
- return -EINVAL;
+ std::vector<FrameBuffer::Plane> planes(buf.numPlanes());
+ for (size_t i = 0; i < buf.numPlanes(); ++i) {
+ SharedFD fd{ camera3buffer->data[i] };
+ if (!fd.isValid()) {
+ LOG(HAL, Fatal) << "No valid fd";
+ return nullptr;
+ }
+
+ planes[i].fd = fd;
+ planes[i].offset = buf.offset(i);
+ planes[i].length = buf.size(i);
}
- /* Start the camera if that's the first request we handle. */
- if (!running_) {
- int ret = camera_->start();
- if (ret) {
- LOG(HAL, Error) << "Failed to start camera";
- return ret;
+ return std::make_unique<HALFrameBuffer>(planes, camera3buffer);
+}
+
+int CameraDevice::processControls(Camera3RequestDescriptor *descriptor)
+{
+ const CameraMetadata &settings = descriptor->settings_;
+ if (!settings.isValid())
+ return 0;
+
+ /* Translate the Android request settings to libcamera controls. */
+ ControlList &controls = descriptor->request_->controls();
+ camera_metadata_ro_entry_t entry;
+ if (settings.getEntry(ANDROID_SCALER_CROP_REGION, &entry)) {
+ const int32_t *data = entry.data.i32;
+ Rectangle cropRegion{ data[0], data[1],
+ static_cast<unsigned int>(data[2]),
+ static_cast<unsigned int>(data[3]) };
+ controls.set(controls::ScalerCrop, cropRegion);
+ }
+
+ if (settings.getEntry(ANDROID_STATISTICS_FACE_DETECT_MODE, &entry)) {
+ const uint8_t *data = entry.data.u8;
+ controls.set(controls::draft::FaceDetectMode, data[0]);
+ }
+
+ if (settings.getEntry(ANDROID_SENSOR_TEST_PATTERN_MODE, &entry)) {
+ const int32_t data = *entry.data.i32;
+ int32_t testPatternMode = controls::draft::TestPatternModeOff;
+ switch (data) {
+ case ANDROID_SENSOR_TEST_PATTERN_MODE_OFF:
+ testPatternMode = controls::draft::TestPatternModeOff;
+ break;
+
+ case ANDROID_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR:
+ testPatternMode = controls::draft::TestPatternModeSolidColor;
+ break;
+
+ case ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS:
+ testPatternMode = controls::draft::TestPatternModeColorBars;
+ break;
+
+ case ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS_FADE_TO_GRAY:
+ testPatternMode = controls::draft::TestPatternModeColorBarsFadeToGray;
+ break;
+
+ case ANDROID_SENSOR_TEST_PATTERN_MODE_PN9:
+ testPatternMode = controls::draft::TestPatternModePn9;
+ break;
+
+ case ANDROID_SENSOR_TEST_PATTERN_MODE_CUSTOM1:
+ testPatternMode = controls::draft::TestPatternModeCustom1;
+ break;
+
+ default:
+ LOG(HAL, Error)
+ << "Unknown test pattern mode: " << data;
+
+ return -EINVAL;
}
- running_ = true;
+ controls.set(controls::draft::TestPatternMode, testPatternMode);
}
- /*
- * Queue a request for the Camera with the provided dmabuf file
- * descriptors.
- */
- const camera3_stream_buffer_t *camera3Buffers =
- camera3Request->output_buffers;
+ return 0;
+}
+
+void CameraDevice::abortRequest(Camera3RequestDescriptor *descriptor) const
+{
+ notifyError(descriptor->frameNumber_, nullptr, CAMERA3_MSG_ERROR_REQUEST);
+
+ for (auto &buffer : descriptor->buffers_)
+ buffer.status = Camera3RequestDescriptor::Status::Error;
+
+ descriptor->status_ = Camera3RequestDescriptor::Status::Error;
+}
+
+bool CameraDevice::isValidRequest(camera3_capture_request_t *camera3Request) const
+{
+ if (!camera3Request) {
+ LOG(HAL, Error) << "No capture request provided";
+ return false;
+ }
+
+ if (!camera3Request->num_output_buffers ||
+ !camera3Request->output_buffers) {
+ LOG(HAL, Error) << "No output buffers provided";
+ return false;
+ }
+
+ /* configureStreams() has not been called or has failed. */
+ if (streams_.empty() || !config_) {
+ LOG(HAL, Error) << "No stream is configured";
+ return false;
+ }
+
+ for (uint32_t i = 0; i < camera3Request->num_output_buffers; i++) {
+ const camera3_stream_buffer_t &outputBuffer =
+ camera3Request->output_buffers[i];
+ if (!outputBuffer.buffer || !(*outputBuffer.buffer)) {
+ LOG(HAL, Error) << "Invalid native handle";
+ return false;
+ }
+
+ const native_handle_t *handle = *outputBuffer.buffer;
+ constexpr int kNativeHandleMaxFds = 1024;
+ if (handle->numFds < 0 || handle->numFds > kNativeHandleMaxFds) {
+ LOG(HAL, Error)
+ << "Invalid number of fds (" << handle->numFds
+ << ") in buffer " << i;
+ return false;
+ }
+
+ constexpr int kNativeHandleMaxInts = 1024;
+ if (handle->numInts < 0 || handle->numInts > kNativeHandleMaxInts) {
+ LOG(HAL, Error)
+ << "Invalid number of ints (" << handle->numInts
+ << ") in buffer " << i;
+ return false;
+ }
+
+ const camera3_stream *camera3Stream = outputBuffer.stream;
+ if (!camera3Stream)
+ return false;
+
+ const CameraStream *cameraStream =
+ static_cast<CameraStream *>(camera3Stream->priv);
+
+ auto found = std::find_if(streams_.begin(), streams_.end(),
+ [cameraStream](const CameraStream &stream) {
+ return &stream == cameraStream;
+ });
+ if (found == streams_.end()) {
+ LOG(HAL, Error)
+ << "No corresponding configured stream found";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+int CameraDevice::processCaptureRequest(camera3_capture_request_t *camera3Request)
+{
+ if (!isValidRequest(camera3Request))
+ return -EINVAL;
/*
* Save the request descriptors for use at completion time.
* The descriptor and the associated memory reserved here are freed
* at request complete time.
*/
- Camera3RequestDescriptor *descriptor =
- new Camera3RequestDescriptor(camera3Request->frame_number,
- camera3Request->num_output_buffers);
- for (unsigned int i = 0; i < descriptor->numBuffers; ++i) {
+ auto descriptor = std::make_unique<Camera3RequestDescriptor>(camera_.get(),
+ camera3Request);
+
+ /*
+ * \todo The Android request model is incremental, settings passed in
+ * previous requests are to be effective until overridden explicitly in
+ * a new request. Do we need to cache settings incrementally here, or is
+ * it handled by the Android camera service ?
+ */
+ if (camera3Request->settings)
+ lastSettings_ = camera3Request->settings;
+
+ descriptor->settings_ = lastSettings_;
+
+ LOG(HAL, Debug) << "Queueing request " << descriptor->request_->cookie()
+ << " with " << descriptor->buffers_.size() << " streams";
+
+ /*
+ * Process all the Direct and Internal streams first, they map directly
+ * to a libcamera stream. Streams of type Mapped will be handled later.
+ *
+ * Collect the CameraStream associated to each requested capture stream.
+ * Since requestedStreams is an std:set<>, no duplications can happen.
+ */
+ std::set<CameraStream *> requestedStreams;
+ for (const auto &[i, buffer] : utils::enumerate(descriptor->buffers_)) {
+ CameraStream *cameraStream = buffer.stream;
+ camera3_stream_t *camera3Stream = cameraStream->camera3Stream();
+
+ std::stringstream ss;
+ ss << i << " - (" << camera3Stream->width << "x"
+ << camera3Stream->height << ")"
+ << "[" << utils::hex(camera3Stream->format) << "] -> "
+ << "(" << cameraStream->configuration().size << ")["
+ << cameraStream->configuration().pixelFormat << "]";
+
/*
- * Keep track of which stream the request belongs to and store
- * the native buffer handles.
- *
- * \todo Currently we only support one capture buffer. Copy
- * all of them to be ready once we'll support more.
+ * Inspect the camera stream type, create buffers opportunely
+ * and add them to the Request if required.
*/
- descriptor->buffers[i].stream = camera3Buffers[i].stream;
- descriptor->buffers[i].buffer = camera3Buffers[i].buffer;
+ FrameBuffer *frameBuffer = nullptr;
+ UniqueFD acquireFence;
+
+ MutexLocker lock(descriptor->streamsProcessMutex_);
+
+ switch (cameraStream->type()) {
+ case CameraStream::Type::Mapped:
+ /* Mapped streams will be handled in the next loop. */
+ continue;
+
+ case CameraStream::Type::Direct:
+ /*
+ * Create a libcamera buffer using the dmabuf
+ * descriptors of the camera3Buffer for each stream and
+ * associate it with the Camera3RequestDescriptor for
+ * lifetime management only.
+ */
+ buffer.frameBuffer =
+ createFrameBuffer(*buffer.camera3Buffer,
+ cameraStream->configuration().pixelFormat,
+ cameraStream->configuration().size);
+ frameBuffer = buffer.frameBuffer.get();
+ acquireFence = std::move(buffer.fence);
+ LOG(HAL, Debug) << ss.str() << " (direct)";
+ break;
+
+ case CameraStream::Type::Internal:
+ /*
+ * Get the frame buffer from the CameraStream internal
+ * buffer pool.
+ *
+ * The buffer has to be returned to the CameraStream
+ * once it has been processed.
+ */
+ frameBuffer = cameraStream->getBuffer();
+ buffer.internalBuffer = frameBuffer;
+ LOG(HAL, Debug) << ss.str() << " (internal)";
+
+ descriptor->pendingStreamsToProcess_.insert(
+ { cameraStream, &buffer });
+ break;
+ }
+
+ if (!frameBuffer) {
+ LOG(HAL, Error) << "Failed to create frame buffer";
+ return -ENOMEM;
+ }
+
+ auto fence = std::make_unique<Fence>(std::move(acquireFence));
+ descriptor->request_->addBuffer(cameraStream->stream(),
+ frameBuffer, std::move(fence));
+
+ requestedStreams.insert(cameraStream);
}
/*
- * Create a libcamera buffer using the dmabuf descriptors of the first
- * and (currently) only supported request buffer.
+ * Now handle the Mapped streams. If no buffer has been added for them
+ * because their corresponding direct source stream is not part of this
+ * particular request, add one here.
*/
- const buffer_handle_t camera3Handle = *camera3Buffers[0].buffer;
+ for (const auto &[i, buffer] : utils::enumerate(descriptor->buffers_)) {
+ CameraStream *cameraStream = buffer.stream;
+ camera3_stream_t *camera3Stream = cameraStream->camera3Stream();
+
+ if (cameraStream->type() != CameraStream::Type::Mapped)
+ continue;
+
+ LOG(HAL, Debug) << i << " - (" << camera3Stream->width << "x"
+ << camera3Stream->height << ")"
+ << "[" << utils::hex(camera3Stream->format) << "] -> "
+ << "(" << cameraStream->configuration().size << ")["
+ << cameraStream->configuration().pixelFormat << "]"
+ << " (mapped)";
+
+ MutexLocker lock(descriptor->streamsProcessMutex_);
+ descriptor->pendingStreamsToProcess_.insert({ cameraStream, &buffer });
+
+ /*
+ * Make sure the CameraStream this stream is mapped on has been
+ * added to the request.
+ */
+ CameraStream *sourceStream = cameraStream->sourceStream();
+ ASSERT(sourceStream);
+ if (requestedStreams.find(sourceStream) != requestedStreams.end())
+ continue;
- std::vector<FrameBuffer::Plane> planes;
- for (int i = 0; i < 3; i++) {
- FrameBuffer::Plane plane;
- plane.fd = FileDescriptor(camera3Handle->data[i]);
/*
- * Setting length to zero here is OK as the length is only used
- * to map the memory of the plane. Libcamera do not need to poke
- * at the memory content queued by the HAL.
+ * If that's not the case, we need to add a buffer to the request
+ * for this stream.
*/
- plane.length = 0;
- planes.push_back(std::move(plane));
+ FrameBuffer *frameBuffer = cameraStream->getBuffer();
+ buffer.internalBuffer = frameBuffer;
+
+ descriptor->request_->addBuffer(sourceStream->stream(),
+ frameBuffer, nullptr);
+
+ requestedStreams.insert(sourceStream);
+ }
+
+ /*
+ * Translate controls from Android to libcamera and queue the request
+ * to the camera.
+ */
+ int ret = processControls(descriptor.get());
+ if (ret)
+ return ret;
+
+ /*
+ * If flush is in progress set the request status to error and place it
+ * on the queue to be later completed. If the camera has been stopped we
+ * have to re-start it to be able to process the request.
+ */
+ MutexLocker stateLock(stateMutex_);
+
+ if (state_ == State::Flushing) {
+ Camera3RequestDescriptor *rawDescriptor = descriptor.get();
+ {
+ MutexLocker descriptorsLock(descriptorsMutex_);
+ descriptors_.push(std::move(descriptor));
+ }
+ abortRequest(rawDescriptor);
+ completeDescriptor(rawDescriptor);
+
+ return 0;
}
- FrameBuffer *buffer = new FrameBuffer(std::move(planes));
- if (!buffer) {
- LOG(HAL, Error) << "Failed to create buffer";
- delete descriptor;
- return -ENOMEM;
+ if (state_ == State::Stopped) {
+ lastSettings_ = {};
+
+ ret = camera_->start();
+ if (ret) {
+ LOG(HAL, Error) << "Failed to start camera";
+ return ret;
+ }
+
+ state_ = State::Running;
}
- Request *request =
- camera_->createRequest(reinterpret_cast<uint64_t>(descriptor));
- request->addBuffer(stream, buffer);
+ Request *request = descriptor->request_.get();
- int ret = camera_->queueRequest(request);
- if (ret) {
- LOG(HAL, Error) << "Failed to queue request";
- delete request;
- delete descriptor;
- return ret;
+ {
+ MutexLocker descriptorsLock(descriptorsMutex_);
+ descriptors_.push(std::move(descriptor));
}
+ camera_->queueRequest(request);
+
return 0;
}
void CameraDevice::requestComplete(Request *request)
{
- const std::map<Stream *, FrameBuffer *> &buffers = request->buffers();
- FrameBuffer *buffer = buffers.begin()->second;
- camera3_buffer_status status = CAMERA3_BUFFER_STATUS_OK;
- std::unique_ptr<CameraMetadata> resultMetadata;
+ Camera3RequestDescriptor *descriptor =
+ reinterpret_cast<Camera3RequestDescriptor *>(request->cookie());
+
+ /*
+ * Prepare the capture result for the Android camera stack.
+ *
+ * The buffer status is set to Success and later changed to Error if
+ * post-processing/compression fails.
+ */
+ for (auto &buffer : descriptor->buffers_) {
+ CameraStream *stream = buffer.stream;
+
+ /*
+ * Streams of type Direct have been queued to the
+ * libcamera::Camera and their acquire fences have
+ * already been waited on by the library.
+ *
+ * Acquire fences of streams of type Internal and Mapped
+ * will be handled during post-processing.
+ */
+ if (stream->type() == CameraStream::Type::Direct) {
+ /* If handling of the fence has failed restore buffer.fence. */
+ std::unique_ptr<Fence> fence = buffer.frameBuffer->releaseFence();
+ if (fence)
+ buffer.fence = fence->release();
+ }
+ buffer.status = Camera3RequestDescriptor::Status::Success;
+ }
+ /*
+ * If the Request has failed, abort the request by notifying the error
+ * and complete the request with all buffers in error state.
+ */
if (request->status() != Request::RequestComplete) {
- LOG(HAL, Error) << "Request not succesfully completed: "
+ LOG(HAL, Error) << "Request " << request->cookie()
+ << " not successfully completed: "
<< request->status();
- status = CAMERA3_BUFFER_STATUS_ERROR;
+
+ abortRequest(descriptor);
+ completeDescriptor(descriptor);
+
+ return;
}
- /* Prepare to call back the Android camera stack. */
- Camera3RequestDescriptor *descriptor =
- reinterpret_cast<Camera3RequestDescriptor *>(request->cookie());
+ /*
+ * Notify shutter as soon as we have verified we have a valid request.
+ *
+ * \todo The shutter event notification should be sent to the framework
+ * as soon as possible, earlier than request completion time.
+ */
+ uint64_t sensorTimestamp = static_cast<uint64_t>(request->metadata()
+ .get(controls::SensorTimestamp)
+ .value_or(0));
+ notifyShutter(descriptor->frameNumber_, sensorTimestamp);
+
+ LOG(HAL, Debug) << "Request " << request->cookie() << " completed with "
+ << descriptor->request_->buffers().size() << " streams";
+
+ /*
+ * Generate the metadata associated with the captured buffers.
+ *
+ * Notify if the metadata generation has failed, but continue processing
+ * buffers and return an empty metadata pack.
+ */
+ descriptor->resultMetadata_ = getResultMetadata(*descriptor);
+ if (!descriptor->resultMetadata_) {
+ notifyError(descriptor->frameNumber_, nullptr, CAMERA3_MSG_ERROR_RESULT);
- camera3_capture_result_t captureResult = {};
- captureResult.frame_number = descriptor->frameNumber;
- captureResult.num_output_buffers = descriptor->numBuffers;
- for (unsigned int i = 0; i < descriptor->numBuffers; ++i) {
/*
- * \todo Currently we only support one capture buffer. Prepare
- * all of them to be ready once we'll support more.
+ * The camera framework expects an empty metadata pack on error.
+ *
+ * \todo Check that the post-processor code handles this situation
+ * correctly.
*/
- descriptor->buffers[i].acquire_fence = -1;
- descriptor->buffers[i].release_fence = -1;
- descriptor->buffers[i].status = status;
+ descriptor->resultMetadata_ = std::make_unique<CameraMetadata>(0, 0);
}
- captureResult.output_buffers =
- const_cast<const camera3_stream_buffer_t *>(descriptor->buffers);
- if (status == CAMERA3_BUFFER_STATUS_OK) {
- notifyShutter(descriptor->frameNumber,
- buffer->metadata().timestamp);
+ /* Handle post-processing. */
+ MutexLocker locker(descriptor->streamsProcessMutex_);
- captureResult.partial_result = 1;
- resultMetadata = getResultMetadata(descriptor->frameNumber,
- buffer->metadata().timestamp);
- captureResult.result = resultMetadata->get();
+ /*
+ * Queue all the post-processing streams request at once. The completion
+ * slot streamProcessingComplete() can only execute when we are out
+ * this critical section. This helps to handle synchronous errors here
+ * itself.
+ */
+ auto iter = descriptor->pendingStreamsToProcess_.begin();
+ while (iter != descriptor->pendingStreamsToProcess_.end()) {
+ CameraStream *stream = iter->first;
+ Camera3RequestDescriptor::StreamBuffer *buffer = iter->second;
+
+ FrameBuffer *src = request->findBuffer(stream->stream());
+ if (!src) {
+ LOG(HAL, Error) << "Failed to find a source stream buffer";
+ setBufferStatus(*buffer, Camera3RequestDescriptor::Status::Error);
+ iter = descriptor->pendingStreamsToProcess_.erase(iter);
+ continue;
+ }
+
+ buffer->srcBuffer = src;
+
+ ++iter;
+ int ret = stream->process(buffer);
+ if (ret) {
+ setBufferStatus(*buffer, Camera3RequestDescriptor::Status::Error);
+ descriptor->pendingStreamsToProcess_.erase(stream);
+
+ /*
+ * If the framebuffer is internal to CameraStream return
+ * it back now that we're done processing it.
+ */
+ if (buffer->internalBuffer)
+ stream->putBuffer(buffer->internalBuffer);
+ }
}
- if (status == CAMERA3_BUFFER_STATUS_ERROR || !captureResult.result) {
- /* \todo Improve error handling. In case we notify an error
- * because the metadata generation fails, a shutter event has
- * already been notified for this frame number before the error
- * is here signalled. Make sure the error path plays well with
- * the camera stack state machine.
- */
- notifyError(descriptor->frameNumber,
- descriptor->buffers[0].stream);
+ if (descriptor->pendingStreamsToProcess_.empty()) {
+ locker.unlock();
+ completeDescriptor(descriptor);
}
+}
- callbacks_->process_capture_result(callbacks_, &captureResult);
+/**
+ * \brief Complete the Camera3RequestDescriptor
+ * \param[in] descriptor The Camera3RequestDescriptor that has completed
+ *
+ * The function marks the Camera3RequestDescriptor as 'complete'. It shall be
+ * called when all the streams in the Camera3RequestDescriptor have completed
+ * capture (or have been generated via post-processing) and the request is ready
+ * to be sent back to the framework.
+ *
+ * \context This function is \threadsafe.
+ */
+void CameraDevice::completeDescriptor(Camera3RequestDescriptor *descriptor)
+{
+ MutexLocker lock(descriptorsMutex_);
+ descriptor->complete_ = true;
- delete descriptor;
- delete buffer;
+ sendCaptureResults();
+}
+
+/**
+ * \brief Sequentially send capture results to the framework
+ *
+ * Iterate over the descriptors queue to send completed descriptors back to the
+ * framework, in the same order as they have been queued. For each complete
+ * descriptor, populate a locally-scoped camera3_capture_result_t from the
+ * descriptor, send the capture result back by calling the
+ * process_capture_result() callback, and remove the descriptor from the queue.
+ * Stop iterating if the descriptor at the front of the queue is not complete.
+ *
+ * This function should never be called directly in the codebase. Use
+ * completeDescriptor() instead.
+ */
+void CameraDevice::sendCaptureResults()
+{
+ while (!descriptors_.empty() && !descriptors_.front()->isPending()) {
+ auto descriptor = std::move(descriptors_.front());
+ descriptors_.pop();
+
+ camera3_capture_result_t captureResult = {};
+
+ captureResult.frame_number = descriptor->frameNumber_;
+
+ if (descriptor->resultMetadata_)
+ captureResult.result =
+ descriptor->resultMetadata_->getMetadata();
+
+ std::vector<camera3_stream_buffer_t> resultBuffers;
+ resultBuffers.reserve(descriptor->buffers_.size());
+
+ for (auto &buffer : descriptor->buffers_) {
+ camera3_buffer_status status = CAMERA3_BUFFER_STATUS_ERROR;
+
+ if (buffer.status == Camera3RequestDescriptor::Status::Success)
+ status = CAMERA3_BUFFER_STATUS_OK;
+
+ /*
+ * Pass the buffer fence back to the camera framework as
+ * a release fence. This instructs the framework to wait
+ * on the acquire fence in case we haven't done so
+ * ourselves for any reason.
+ */
+ resultBuffers.push_back({ buffer.stream->camera3Stream(),
+ buffer.camera3Buffer, status,
+ -1, buffer.fence.release() });
+ }
+
+ captureResult.num_output_buffers = resultBuffers.size();
+ captureResult.output_buffers = resultBuffers.data();
+
+ if (descriptor->status_ == Camera3RequestDescriptor::Status::Success)
+ captureResult.partial_result = 1;
+
+ callbacks_->process_capture_result(callbacks_, &captureResult);
+ }
+}
+
+void CameraDevice::setBufferStatus(Camera3RequestDescriptor::StreamBuffer &streamBuffer,
+ Camera3RequestDescriptor::Status status)
+{
+ streamBuffer.status = status;
+ if (status != Camera3RequestDescriptor::Status::Success) {
+ notifyError(streamBuffer.request->frameNumber_,
+ streamBuffer.stream->camera3Stream(),
+ CAMERA3_MSG_ERROR_BUFFER);
+
+ /* Also set error status on entire request descriptor. */
+ streamBuffer.request->status_ =
+ Camera3RequestDescriptor::Status::Error;
+ }
+}
+
+/**
+ * \brief Handle post-processing completion of a stream in a capture request
+ * \param[in] streamBuffer The StreamBuffer for which processing is complete
+ * \param[in] status Stream post-processing status
+ *
+ * This function is called from the post-processor's thread whenever a camera
+ * stream has finished post processing. The corresponding entry is dropped from
+ * the descriptor's pendingStreamsToProcess_ map.
+ *
+ * If the pendingStreamsToProcess_ map is then empty, all streams requiring to
+ * be generated from post-processing have been completed. Mark the descriptor as
+ * complete using completeDescriptor() in that case.
+ */
+void CameraDevice::streamProcessingComplete(Camera3RequestDescriptor::StreamBuffer *streamBuffer,
+ Camera3RequestDescriptor::Status status)
+{
+ setBufferStatus(*streamBuffer, status);
+
+ /*
+ * If the framebuffer is internal to CameraStream return it back now
+ * that we're done processing it.
+ */
+ if (streamBuffer->internalBuffer)
+ streamBuffer->stream->putBuffer(streamBuffer->internalBuffer);
+
+ Camera3RequestDescriptor *request = streamBuffer->request;
+
+ {
+ MutexLocker locker(request->streamsProcessMutex_);
+
+ request->pendingStreamsToProcess_.erase(streamBuffer->stream);
+ if (!request->pendingStreamsToProcess_.empty())
+ return;
+ }
+
+ completeDescriptor(streamBuffer->request);
+}
+
+std::string CameraDevice::logPrefix() const
+{
+ return "'" + camera_->id() + "'";
}
void CameraDevice::notifyShutter(uint32_t frameNumber, uint64_t timestamp)
@@ -874,14 +1404,15 @@ void CameraDevice::notifyShutter(uint32_t frameNumber, uint64_t timestamp)
callbacks_->notify(callbacks_, &notify);
}
-void CameraDevice::notifyError(uint32_t frameNumber, camera3_stream_t *stream)
+void CameraDevice::notifyError(uint32_t frameNumber, camera3_stream_t *stream,
+ camera3_error_msg_code code) const
{
camera3_notify_msg_t notify = {};
notify.type = CAMERA3_MSG_ERROR;
notify.message.error.error_stream = stream;
notify.message.error.frame_number = frameNumber;
- notify.message.error.error_code = CAMERA3_MSG_ERROR_REQUEST;
+ notify.message.error.error_code = code;
callbacks_->notify(callbacks_, &notify);
}
@@ -889,63 +1420,242 @@ void CameraDevice::notifyError(uint32_t frameNumber, camera3_stream_t *stream)
/*
* Produce a set of fixed result metadata.
*/
-std::unique_ptr<CameraMetadata> CameraDevice::getResultMetadata(int frame_number,
- int64_t timestamp)
+std::unique_ptr<CameraMetadata>
+CameraDevice::getResultMetadata(const Camera3RequestDescriptor &descriptor) const
{
+ const ControlList &metadata = descriptor.request_->metadata();
+ const CameraMetadata &settings = descriptor.settings_;
+ camera_metadata_ro_entry_t entry;
+ bool found;
+
/*
* \todo Keep this in sync with the actual number of entries.
- * Currently: 12 entries, 36 bytes
+ * Currently: 40 entries, 156 bytes
+ *
+ * Reserve more space for the JPEG metadata set by the post-processor.
+ * Currently:
+ * ANDROID_JPEG_GPS_COORDINATES (double x 3) = 24 bytes
+ * ANDROID_JPEG_GPS_PROCESSING_METHOD (byte x 32) = 32 bytes
+ * ANDROID_JPEG_GPS_TIMESTAMP (int64) = 8 bytes
+ * ANDROID_JPEG_SIZE (int32_t) = 4 bytes
+ * ANDROID_JPEG_QUALITY (byte) = 1 byte
+ * ANDROID_JPEG_ORIENTATION (int32_t) = 4 bytes
+ * ANDROID_JPEG_THUMBNAIL_QUALITY (byte) = 1 byte
+ * ANDROID_JPEG_THUMBNAIL_SIZE (int32 x 2) = 8 bytes
+ * Total bytes for JPEG metadata: 82
*/
std::unique_ptr<CameraMetadata> resultMetadata =
- std::make_unique<CameraMetadata>(15, 50);
+ std::make_unique<CameraMetadata>(88, 166);
if (!resultMetadata->isValid()) {
- LOG(HAL, Error) << "Failed to allocate static metadata";
+ LOG(HAL, Error) << "Failed to allocate result metadata";
return nullptr;
}
- const uint8_t ae_state = ANDROID_CONTROL_AE_STATE_CONVERGED;
- resultMetadata->addEntry(ANDROID_CONTROL_AE_STATE, &ae_state, 1);
+ /*
+ * \todo The value of the results metadata copied from the settings
+ * will have to be passed to the libcamera::Camera and extracted
+ * from libcamera::Request::metadata.
+ */
- const uint8_t ae_lock = ANDROID_CONTROL_AE_LOCK_OFF;
- resultMetadata->addEntry(ANDROID_CONTROL_AE_LOCK, &ae_lock, 1);
+ uint8_t value = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
+ value);
- uint8_t af_state = ANDROID_CONTROL_AF_STATE_INACTIVE;
- resultMetadata->addEntry(ANDROID_CONTROL_AF_STATE, &af_state, 1);
+ value = ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_ANTIBANDING_MODE, value);
- const uint8_t awb_state = ANDROID_CONTROL_AWB_STATE_CONVERGED;
- resultMetadata->addEntry(ANDROID_CONTROL_AWB_STATE, &awb_state, 1);
+ int32_t value32 = 0;
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
+ value32);
- const uint8_t awb_lock = ANDROID_CONTROL_AWB_LOCK_OFF;
- resultMetadata->addEntry(ANDROID_CONTROL_AWB_LOCK, &awb_lock, 1);
+ value = ANDROID_CONTROL_AE_LOCK_OFF;
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_LOCK, value);
- const uint8_t lens_state = ANDROID_LENS_STATE_STATIONARY;
- resultMetadata->addEntry(ANDROID_LENS_STATE, &lens_state, 1);
+ value = ANDROID_CONTROL_AE_MODE_ON;
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_MODE, value);
- int32_t sensorSizes[] = {
- 0, 0, 2560, 1920,
- };
- resultMetadata->addEntry(ANDROID_SCALER_CROP_REGION, sensorSizes, 4);
+ if (settings.getEntry(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, &entry))
+ /*
+ * \todo Retrieve the AE FPS range from the libcamera metadata.
+ * As libcamera does not support that control, as a temporary
+ * workaround return what the framework asked.
+ */
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+ entry.data.i32, 2);
+
+ found = settings.getEntry(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, &entry);
+ value = found ? *entry.data.u8 :
+ (uint8_t)ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE;
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, value);
+
+ value = ANDROID_CONTROL_AE_STATE_CONVERGED;
+ resultMetadata->addEntry(ANDROID_CONTROL_AE_STATE, value);
+
+ value = ANDROID_CONTROL_AF_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_CONTROL_AF_MODE, value);
+
+ value = ANDROID_CONTROL_AF_STATE_INACTIVE;
+ resultMetadata->addEntry(ANDROID_CONTROL_AF_STATE, value);
+
+ value = ANDROID_CONTROL_AF_TRIGGER_IDLE;
+ resultMetadata->addEntry(ANDROID_CONTROL_AF_TRIGGER, value);
+
+ value = ANDROID_CONTROL_AWB_MODE_AUTO;
+ resultMetadata->addEntry(ANDROID_CONTROL_AWB_MODE, value);
+
+ value = ANDROID_CONTROL_AWB_LOCK_OFF;
+ resultMetadata->addEntry(ANDROID_CONTROL_AWB_LOCK, value);
+
+ value = ANDROID_CONTROL_AWB_STATE_CONVERGED;
+ resultMetadata->addEntry(ANDROID_CONTROL_AWB_STATE, value);
+
+ value = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+ resultMetadata->addEntry(ANDROID_CONTROL_CAPTURE_INTENT, value);
+
+ value = ANDROID_CONTROL_EFFECT_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_CONTROL_EFFECT_MODE, value);
+
+ value = ANDROID_CONTROL_MODE_AUTO;
+ resultMetadata->addEntry(ANDROID_CONTROL_MODE, value);
- resultMetadata->addEntry(ANDROID_SENSOR_TIMESTAMP, &timestamp, 1);
+ value = ANDROID_CONTROL_SCENE_MODE_DISABLED;
+ resultMetadata->addEntry(ANDROID_CONTROL_SCENE_MODE, value);
+
+ value = ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, value);
+
+ value = ANDROID_FLASH_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_FLASH_MODE, value);
+
+ value = ANDROID_FLASH_STATE_UNAVAILABLE;
+ resultMetadata->addEntry(ANDROID_FLASH_STATE, value);
+
+ if (settings.getEntry(ANDROID_LENS_APERTURE, &entry))
+ resultMetadata->addEntry(ANDROID_LENS_APERTURE, entry.data.f, 1);
+
+ float focal_length = 1.0;
+ resultMetadata->addEntry(ANDROID_LENS_FOCAL_LENGTH, focal_length);
+
+ value = ANDROID_LENS_STATE_STATIONARY;
+ resultMetadata->addEntry(ANDROID_LENS_STATE, value);
+
+ value = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
+ value);
+
+ value32 = ANDROID_SENSOR_TEST_PATTERN_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_SENSOR_TEST_PATTERN_MODE, value32);
+
+ if (settings.getEntry(ANDROID_STATISTICS_FACE_DETECT_MODE, &entry))
+ resultMetadata->addEntry(ANDROID_STATISTICS_FACE_DETECT_MODE,
+ entry.data.u8, 1);
+
+ value = ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE,
+ value);
+
+ value = ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE, value);
+
+ value = ANDROID_STATISTICS_SCENE_FLICKER_NONE;
+ resultMetadata->addEntry(ANDROID_STATISTICS_SCENE_FLICKER, value);
+
+ value = ANDROID_NOISE_REDUCTION_MODE_OFF;
+ resultMetadata->addEntry(ANDROID_NOISE_REDUCTION_MODE, value);
/* 33.3 msec */
const int64_t rolling_shutter_skew = 33300000;
resultMetadata->addEntry(ANDROID_SENSOR_ROLLING_SHUTTER_SKEW,
- &rolling_shutter_skew, 1);
+ rolling_shutter_skew);
+
+ /* Add metadata tags reported by libcamera. */
+ const int64_t timestamp = metadata.get(controls::SensorTimestamp).value_or(0);
+ resultMetadata->addEntry(ANDROID_SENSOR_TIMESTAMP, timestamp);
+
+ const auto &pipelineDepth = metadata.get(controls::draft::PipelineDepth);
+ if (pipelineDepth)
+ resultMetadata->addEntry(ANDROID_REQUEST_PIPELINE_DEPTH,
+ *pipelineDepth);
+
+ const auto &exposureTime = metadata.get(controls::ExposureTime);
+ if (exposureTime)
+ resultMetadata->addEntry(ANDROID_SENSOR_EXPOSURE_TIME,
+ *exposureTime * 1000ULL);
+
+ const auto &frameDuration = metadata.get(controls::FrameDuration);
+ if (frameDuration)
+ resultMetadata->addEntry(ANDROID_SENSOR_FRAME_DURATION,
+ *frameDuration * 1000);
+
+ const auto &faceDetectRectangles =
+ metadata.get(controls::draft::FaceDetectFaceRectangles);
+ if (faceDetectRectangles) {
+ std::vector<int32_t> flatRectangles;
+ for (const Rectangle &rect : *faceDetectRectangles) {
+ flatRectangles.push_back(rect.x);
+ flatRectangles.push_back(rect.y);
+ flatRectangles.push_back(rect.x + rect.width);
+ flatRectangles.push_back(rect.y + rect.height);
+ }
+ resultMetadata->addEntry(
+ ANDROID_STATISTICS_FACE_RECTANGLES, flatRectangles);
+ }
- /* 16.6 msec */
- const int64_t exposure_time = 16600000;
- resultMetadata->addEntry(ANDROID_SENSOR_EXPOSURE_TIME,
- &exposure_time, 1);
+ const auto &faceDetectFaceScores =
+ metadata.get(controls::draft::FaceDetectFaceScores);
+ if (faceDetectRectangles && faceDetectFaceScores) {
+ if (faceDetectFaceScores->size() != faceDetectRectangles->size()) {
+ LOG(HAL, Error) << "Pipeline returned wrong number of face scores; "
+ << "Expected: " << faceDetectRectangles->size()
+ << ", got: " << faceDetectFaceScores->size();
+ }
+ resultMetadata->addEntry(ANDROID_STATISTICS_FACE_SCORES,
+ *faceDetectFaceScores);
+ }
- const uint8_t lens_shading_map_mode =
- ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF;
- resultMetadata->addEntry(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE,
- &lens_shading_map_mode, 1);
+ const auto &faceDetectFaceLandmarks =
+ metadata.get(controls::draft::FaceDetectFaceLandmarks);
+ if (faceDetectRectangles && faceDetectFaceLandmarks) {
+ size_t expectedLandmarks = faceDetectRectangles->size() * 3;
+ if (faceDetectFaceLandmarks->size() != expectedLandmarks) {
+ LOG(HAL, Error) << "Pipeline returned wrong number of face landmarks; "
+ << "Expected: " << expectedLandmarks
+ << ", got: " << faceDetectFaceLandmarks->size();
+ }
- const uint8_t scene_flicker = ANDROID_STATISTICS_SCENE_FLICKER_NONE;
- resultMetadata->addEntry(ANDROID_STATISTICS_SCENE_FLICKER,
- &scene_flicker, 1);
+ std::vector<int32_t> androidLandmarks;
+ for (const Point &landmark : *faceDetectFaceLandmarks) {
+ androidLandmarks.push_back(landmark.x);
+ androidLandmarks.push_back(landmark.y);
+ }
+ resultMetadata->addEntry(
+ ANDROID_STATISTICS_FACE_LANDMARKS, androidLandmarks);
+ }
+
+ const auto &faceDetectFaceIds = metadata.get(controls::draft::FaceDetectFaceIds);
+ if (faceDetectRectangles && faceDetectFaceIds) {
+ if (faceDetectFaceIds->size() != faceDetectRectangles->size()) {
+ LOG(HAL, Error) << "Pipeline returned wrong number of face ids; "
+ << "Expected: " << faceDetectRectangles->size()
+ << ", got: " << faceDetectFaceIds->size();
+ }
+ resultMetadata->addEntry(ANDROID_STATISTICS_FACE_IDS, *faceDetectFaceIds);
+ }
+
+ const auto &scalerCrop = metadata.get(controls::ScalerCrop);
+ if (scalerCrop) {
+ const Rectangle &crop = *scalerCrop;
+ int32_t cropRect[] = {
+ crop.x, crop.y, static_cast<int32_t>(crop.width),
+ static_cast<int32_t>(crop.height),
+ };
+ resultMetadata->addEntry(ANDROID_SCALER_CROP_REGION, cropRect);
+ }
+
+ const auto &testPatternMode = metadata.get(controls::draft::TestPatternMode);
+ if (testPatternMode)
+ resultMetadata->addEntry(ANDROID_SENSOR_TEST_PATTERN_MODE,
+ *testPatternMode);
/*
* Return the result metadata pack even is not valid: get() will return
@@ -955,5 +1665,12 @@ std::unique_ptr<CameraMetadata> CameraDevice::getResultMetadata(int frame_number
LOG(HAL, Error) << "Failed to construct result metadata";
}
+ if (resultMetadata->resized()) {
+ auto [entryCount, dataCount] = resultMetadata->usage();
+ LOG(HAL, Info)
+ << "Result metadata resized: " << entryCount
+ << " entries and " << dataCount << " bytes used";
+ }
+
return resultMetadata;
}