summaryrefslogtreecommitdiff
path: root/test/v4l2_videodevice/v4l2_m2mdevice.cpp
blob: ebf3e245f86b19a065b2fcc86904233a30d403d8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
 * Copyright (C) 2019, Google Inc.
 *
 * libcamera V4L2 M2M video device tests
 */

#include <iostream>

#include <libcamera/framebuffer.h>

#include <libcamera/base/event_dispatcher.h>
#include <libcamera/base/thread.h>
#include <libcamera/base/timer.h>

#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/v4l2_videodevice.h"

#include "test.h"

using namespace std;
using namespace libcamera;

class V4L2M2MDeviceTest : public Test
{
public:
	V4L2M2MDeviceTest()
		: vim2m_(nullptr), outputFrames_(0), captureFrames_(0)
	{
	}

	void outputBufferComplete(FrameBuffer *buffer)
	{
		cout << "Received output buffer" << endl;

		outputFrames_++;

		/* Requeue the buffer for further use. */
		vim2m_->output()->queueBuffer(buffer);
	}

	void receiveCaptureBuffer(FrameBuffer *buffer)
	{
		cout << "Received capture buffer" << endl;

		captureFrames_++;

		/* Requeue the buffer for further use. */
		vim2m_->capture()->queueBuffer(buffer);
	}

protected:
	int init()
	{
		enumerator_ = DeviceEnumerator::create();
		if (!enumerator_) {
			cerr << "Failed to create device enumerator" << endl;
			return TestFail;
		}

		if (enumerator_->enumerate()) {
			cerr << "Failed to enumerate media devices" << endl;
			return TestFail;
		}

		DeviceMatch dm("vim2m");
		dm.add("vim2m-source");
		dm.add("vim2m-sink");

		media_ = enumerator_->search(dm);
		if (!media_) {
			cerr << "No vim2m device found" << endl;
			return TestSkip;
		}

		return TestPass;
	}

	int run()
	{
		constexpr unsigned int bufferCount = 4;

		EventDispatcher *dispatcher = Thread::current()->eventDispatcher();
		int ret;

		MediaEntity *entity = media_->getEntityByName("vim2m-source");
		vim2m_ = new V4L2M2MDevice(entity->deviceNode());
		if (vim2m_->open()) {
			cerr << "Failed to open VIM2M device" << endl;
			return TestFail;
		}

		V4L2VideoDevice *capture = vim2m_->capture();
		V4L2VideoDevice *output = vim2m_->output();

		V4L2DeviceFormat format = {};
		if (capture->getFormat(&format)) {
			cerr << "Failed to get capture format" << endl;
			return TestFail;
		}

		format.size.width = 640;
		format.size.height = 480;

		if (capture->setFormat(&format)) {
			cerr << "Failed to set capture format" << endl;
			return TestFail;
		}

		if (output->setFormat(&format)) {
			cerr << "Failed to set output format" << endl;
			return TestFail;
		}

		ret = capture->allocateBuffers(bufferCount, &captureBuffers_);
		if (ret < 0) {
			cerr << "Failed to allocate Capture Buffers" << endl;
			return TestFail;
		}

		ret = output->allocateBuffers(bufferCount, &outputBuffers_);
		if (ret < 0) {
			cerr << "Failed to allocate Output Buffers" << endl;
			return TestFail;
		}

		capture->bufferReady.connect(this, &V4L2M2MDeviceTest::receiveCaptureBuffer);
		output->bufferReady.connect(this, &V4L2M2MDeviceTest::outputBufferComplete);

		for (const std::unique_ptr<FrameBuffer> &buffer : captureBuffers_) {
			if (capture->queueBuffer(buffer.get())) {
				std::cout << "Failed to queue capture buffer" << std::endl;
				return TestFail;
			}
		}

		for (const std::unique_ptr<FrameBuffer> &buffer : outputBuffers_) {
			if (output->queueBuffer(buffer.get())) {
				std::cout << "Failed to queue output buffer" << std::endl;
				return TestFail;
			}
		}

		ret = capture->streamOn();
		if (ret) {
			cerr << "Failed to streamOn capture" << endl;
			return TestFail;
		}

		ret = output->streamOn();
		if (ret) {
			cerr << "Failed to streamOn output" << endl;
			return TestFail;
		}

		Timer timeout;
		timeout.start(5000);
		while (timeout.isRunning()) {
			dispatcher->processEvents();
			if (captureFrames_ > 30)
				break;
		}

		cerr << "Output " << outputFrames_ << " frames" << std::endl;
		cerr << "Captured " << captureFrames_ << " frames" << std::endl;

		if (captureFrames_ < 30) {
			cerr << "Failed to capture 30 frames within timeout." << std::endl;
			return TestFail;
		}

		ret = capture->streamOff();
		if (ret) {
			cerr << "Failed to StreamOff the capture device." << std::endl;
			return TestFail;
		}

		ret = output->streamOff();
		if (ret) {
			cerr << "Failed to StreamOff the output device." << std::endl;
			return TestFail;
		}

		return TestPass;
	}

	void cleanup()
	{
		delete vim2m_;
	}

private:
	std::unique_ptr<DeviceEnumerator> enumerator_;
	std::shared_ptr<MediaDevice> media_;
	V4L2M2MDevice *vim2m_;

	std::vector<std::unique_ptr<FrameBuffer>> captureBuffers_;
	std::vector<std::unique_ptr<FrameBuffer>> outputBuffers_;

	unsigned int outputFrames_;
	unsigned int captureFrames_;
};

TEST_REGISTER(V4L2M2MDeviceTest)
n class="hl opt">(unsigned int id); MediaDevice *media_; std::unique_ptr<CameraSensor> sensor_; std::unique_ptr<V4L2Subdevice> debayer_; std::unique_ptr<V4L2Subdevice> scaler_; std::unique_ptr<V4L2VideoDevice> video_; std::unique_ptr<V4L2VideoDevice> raw_; Stream stream_; std::unique_ptr<ipa::vimc::IPAProxyVimc> ipa_; std::vector<std::unique_ptr<FrameBuffer>> mockIPABufs_; }; class VimcCameraConfiguration : public CameraConfiguration { public: VimcCameraConfiguration(VimcCameraData *data); Status validate() override; private: VimcCameraData *data_; }; class PipelineHandlerVimc : public PipelineHandler { public: PipelineHandlerVimc(CameraManager *manager); CameraConfiguration *generateConfiguration(Camera *camera, const StreamRoles &roles) override; int configure(Camera *camera, CameraConfiguration *config) override; int exportFrameBuffers(Camera *camera, Stream *stream, std::vector<std::unique_ptr<FrameBuffer>> *buffers) override; int start(Camera *camera, const ControlList *controls) override; void stopDevice(Camera *camera) override; int queueRequestDevice(Camera *camera, Request *request) override; bool match(DeviceEnumerator *enumerator) override; private: int processControls(VimcCameraData *data, Request *request); VimcCameraData *cameraData(Camera *camera) { return static_cast<VimcCameraData *>(camera->_d()); } }; namespace { static const std::map<PixelFormat, uint32_t> pixelformats{ { formats::RGB888, MEDIA_BUS_FMT_BGR888_1X24 }, { formats::BGR888, MEDIA_BUS_FMT_RGB888_1X24 }, }; } /* namespace */ VimcCameraConfiguration::VimcCameraConfiguration(VimcCameraData *data) : CameraConfiguration(), data_(data) { } CameraConfiguration::Status VimcCameraConfiguration::validate() { Status status = Valid; if (config_.empty()) return Invalid; if (transform != Transform::Identity) { transform = Transform::Identity; status = Adjusted; } /* Cap the number of entries to the available streams. */ if (config_.size() > 1) { config_.resize(1); status = Adjusted; } StreamConfiguration &cfg = config_[0]; /* Adjust the pixel format. */ const std::vector<libcamera::PixelFormat> formats = cfg.formats().pixelformats(); if (std::find(formats.begin(), formats.end(), cfg.pixelFormat) == formats.end()) { LOG(VIMC, Debug) << "Adjusting format to BGR888"; cfg.pixelFormat = formats::BGR888; status = Adjusted; } /* Clamp the size based on the device limits. */ const Size size = cfg.size; /* * The scaler hardcodes a x3 scale-up ratio, and the sensor output size * is aligned to two pixels in both directions. The output width and * height thus have to be multiples of 6. */ cfg.size.width = std::max(48U, std::min(4096U, cfg.size.width)); cfg.size.height = std::max(48U, std::min(2160U, cfg.size.height)); cfg.size.width -= cfg.size.width % 6; cfg.size.height -= cfg.size.height % 6; if (cfg.size != size) { LOG(VIMC, Debug) << "Adjusting size to " << cfg.size.toString(); status = Adjusted; } cfg.bufferCount = 4; V4L2DeviceFormat format; format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat); format.size = cfg.size; int ret = data_->video_->tryFormat(&format); if (ret) return Invalid; cfg.stride = format.planes[0].bpl; cfg.frameSize = format.planes[0].size; return status; } PipelineHandlerVimc::PipelineHandlerVimc(CameraManager *manager) : PipelineHandler(manager) { } CameraConfiguration *PipelineHandlerVimc::generateConfiguration(Camera *camera, const StreamRoles &roles) { VimcCameraData *data = cameraData(camera); CameraConfiguration *config = new VimcCameraConfiguration(data); if (roles.empty()) return config; std::map<PixelFormat, std::vector<SizeRange>> formats; for (const auto &pixelformat : pixelformats) { /* * Kernels prior to v5.7 incorrectly report support for RGB888, * but it isn't functional within the pipeline. */ if (data->media_->version() < KERNEL_VERSION(5, 7, 0)) { if (pixelformat.first != formats::BGR888) { LOG(VIMC, Info) << "Skipping unsupported pixel format " << pixelformat.first.toString(); continue; } } /* The scaler hardcodes a x3 scale-up ratio. */ std::vector<SizeRange> sizes{ SizeRange{ { 48, 48 }, { 4096, 2160 } } }; formats[pixelformat.first] = sizes; } StreamConfiguration cfg(formats); cfg.pixelFormat = formats::BGR888; cfg.size = { 1920, 1080 }; cfg.bufferCount = 4; config->addConfiguration(cfg); config->validate(); return config; } int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config) { VimcCameraData *data = cameraData(camera); StreamConfiguration &cfg = config->at(0); int ret; /* The scaler hardcodes a x3 scale-up ratio. */ V4L2SubdeviceFormat subformat = {}; subformat.mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8; subformat.size = { cfg.size.width / 3, cfg.size.height / 3 }; ret = data->sensor_->setFormat(&subformat); if (ret) return ret; ret = data->debayer_->setFormat(0, &subformat); if (ret) return ret; subformat.mbus_code = pixelformats.find(cfg.pixelFormat)->second; ret = data->debayer_->setFormat(1, &subformat); if (ret) return ret; ret = data->scaler_->setFormat(0, &subformat); if (ret) return ret; if (data->media_->version() >= KERNEL_VERSION(5, 6, 0)) { Rectangle crop{ 0, 0, subformat.size }; ret = data->scaler_->setSelection(0, V4L2_SEL_TGT_CROP, &crop); if (ret) return ret; } subformat.size = cfg.size; ret = data->scaler_->setFormat(1, &subformat); if (ret) return ret; V4L2DeviceFormat format; format.fourcc = V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat); format.size = cfg.size; ret = data->video_->setFormat(&format); if (ret) return ret; if (format.size != cfg.size || format.fourcc != V4L2PixelFormat::fromPixelFormat(cfg.pixelFormat)) return -EINVAL; /* * Format has to be set on the raw capture video node, otherwise the * vimc driver will fail pipeline validation. */ format.fourcc = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG8); format.size = { cfg.size.width / 3, cfg.size.height / 3 }; ret = data->raw_->setFormat(&format); if (ret) return ret; cfg.setStream(&data->stream_); if (data->ipa_) { /* Inform IPA of stream configuration and sensor controls. */ std::map<unsigned int, IPAStream> streamConfig; streamConfig.emplace(std::piecewise_construct, std::forward_as_tuple(0), std::forward_as_tuple(cfg.pixelFormat, cfg.size)); std::map<unsigned int, ControlInfoMap> entityControls; entityControls.emplace(0, data->sensor_->controls()); IPACameraSensorInfo sensorInfo; data->sensor_->sensorInfo(&sensorInfo); data->ipa_->configure(sensorInfo, streamConfig, entityControls); } return 0; } int PipelineHandlerVimc::exportFrameBuffers(Camera *camera, Stream *stream, std::vector<std::unique_ptr<FrameBuffer>> *buffers) { VimcCameraData *data = cameraData(camera); unsigned int count = stream->configuration().bufferCount; return data->video_->exportBuffers(count, buffers); } int PipelineHandlerVimc::start(Camera *camera, [[maybe_unused]] const ControlList *controls) { VimcCameraData *data = cameraData(camera); unsigned int count = data->stream_.configuration().bufferCount; int ret = data->video_->importBuffers(count); if (ret < 0) return ret; /* Map the mock IPA buffers to VIMC IPA to exercise IPC code paths. */ std::vector<IPABuffer> ipaBuffers; for (auto [i, buffer] : utils::enumerate(data->mockIPABufs_)) { buffer->setCookie(i + 1); ipaBuffers.emplace_back(buffer->cookie(), buffer->planes()); } data->ipa_->mapBuffers(ipaBuffers); ret = data->ipa_->start(); if (ret) { data->video_->releaseBuffers(); return ret; } ret = data->video_->streamOn(); if (ret < 0) { data->ipa_->stop(); data->video_->releaseBuffers(); return ret; } return 0; } void PipelineHandlerVimc::stopDevice(Camera *camera) { VimcCameraData *data = cameraData(camera); data->video_->streamOff(); std::vector<unsigned int> ids; for (const std::unique_ptr<FrameBuffer> &buffer : data->mockIPABufs_) ids.push_back(buffer->cookie()); data->ipa_->unmapBuffers(ids); data->ipa_->stop(); data->video_->releaseBuffers(); } int PipelineHandlerVimc::processControls(VimcCameraData *data, Request *request) { ControlList controls(data->sensor_->controls()); for (auto it : request->controls()) { unsigned int id = it.first; unsigned int offset; uint32_t cid; if (id == controls::Brightness) { cid = V4L2_CID_BRIGHTNESS; offset = 128; } else if (id == controls::Contrast) { cid = V4L2_CID_CONTRAST; offset = 0; } else if (id == controls::Saturation) { cid = V4L2_CID_SATURATION; offset = 0; } else { continue; } int32_t value = lroundf(it.second.get<float>() * 128 + offset); controls.set(cid, std::clamp(value, 0, 255)); } for (const auto &ctrl : controls) LOG(VIMC, Debug) << "Setting control " << utils::hex(ctrl.first) << " to " << ctrl.second.toString(); int ret = data->sensor_->setControls(&controls); if (ret) { LOG(VIMC, Error) << "Failed to set controls: " << ret; return ret < 0 ? ret : -EINVAL; } return ret; } int PipelineHandlerVimc::queueRequestDevice(Camera *camera, Request *request) { VimcCameraData *data = cameraData(camera); FrameBuffer *buffer = request->findBuffer(&data->stream_); if (!buffer) { LOG(VIMC, Error) << "Attempt to queue request with invalid stream"; return -ENOENT; } int ret = processControls(data, request); if (ret < 0) return ret; ret = data->video_->queueBuffer(buffer); if (ret < 0) return ret; data->ipa_->processControls(request->sequence(), request->controls()); return 0; } bool PipelineHandlerVimc::match(DeviceEnumerator *enumerator) { DeviceMatch dm("vimc"); dm.add("Raw Capture 0"); dm.add("Raw Capture 1"); dm.add("RGB/YUV Capture"); dm.add("Sensor A"); dm.add("Sensor B"); dm.add("Debayer A"); dm.add("Debayer B"); dm.add("RGB/YUV Input"); dm.add("Scaler"); MediaDevice *media = acquireMediaDevice(enumerator, dm); if (!media) return false; std::unique_ptr<VimcCameraData> data = std::make_unique<VimcCameraData>(this, media); /* Locate and open the capture video node. */ if (data->init()) return false; data->ipa_ = IPAManager::createIPA<ipa::vimc::IPAProxyVimc>(this, 0, 0); if (!data->ipa_) { LOG(VIMC, Error) << "no matching IPA found"; return false; } data->ipa_->paramsFilled.connect(data.get(), &VimcCameraData::paramsFilled); std::string conf = data->ipa_->configurationFile("vimc.conf"); data->ipa_->init(IPASettings{ conf, data->sensor_->model() }); /* Create and register the camera. */ std::set<Stream *> streams{ &data->stream_ }; const std::string &id = data->sensor_->id(); std::shared_ptr<Camera> camera = Camera::create(std::move(data), id, streams); registerCamera(std::move(camera)); return true; } int VimcCameraData::init() { int ret; ret = media_->disableLinks(); if (ret < 0) return ret; MediaLink *link = media_->link("Debayer B", 1, "Scaler", 0); if (!link) return -ENODEV; ret = link->setEnabled(true); if (ret < 0) return ret; /* Create and open the camera sensor, debayer, scaler and video device. */ sensor_ = std::make_unique<CameraSensor>(media_->getEntityByName("Sensor B")); ret = sensor_->init(); if (ret) return ret; debayer_ = V4L2Subdevice::fromEntityName(media_, "Debayer B"); if (debayer_->open()) return -ENODEV; scaler_ = V4L2Subdevice::fromEntityName(media_, "Scaler"); if (scaler_->open()) return -ENODEV; video_ = V4L2VideoDevice::fromEntityName(media_, "RGB/YUV Capture"); if (video_->open()) return -ENODEV; video_->bufferReady.connect(this, &VimcCameraData::bufferReady); raw_ = V4L2VideoDevice::fromEntityName(media_, "Raw Capture 1"); if (raw_->open()) return -ENODEV; ret = allocateMockIPABuffers(); if (ret < 0) { LOG(VIMC, Warning) << "Cannot allocate mock IPA buffers"; return ret; } /* Initialise the supported controls. */ const ControlInfoMap &controls = sensor_->controls(); ControlInfoMap::Map ctrls; for (const auto &ctrl : controls) { const ControlId *id; ControlInfo info; switch (ctrl.first->id()) { case V4L2_CID_BRIGHTNESS: id = &controls::Brightness; info = ControlInfo{ { -1.0f }, { 1.0f }, { 0.0f } }; break; case V4L2_CID_CONTRAST: id = &controls::Contrast; info = ControlInfo{ { 0.0f }, { 2.0f }, { 1.0f } }; break; case V4L2_CID_SATURATION: id = &controls::Saturation; info = ControlInfo{ { 0.0f }, { 2.0f }, { 1.0f } }; break; default: continue; } ctrls.emplace(id, info); } controlInfo_ = ControlInfoMap(std::move(ctrls), controls::controls); /* Initialize the camera properties. */ properties_ = sensor_->properties(); return 0; } void VimcCameraData::bufferReady(FrameBuffer *buffer) { PipelineHandlerVimc *pipe = static_cast<PipelineHandlerVimc *>(this->pipe()); Request *request = buffer->request(); /* If the buffer is cancelled force a complete of the whole request. */ if (buffer->metadata().status == FrameMetadata::FrameCancelled) { for (auto it : request->buffers()) { FrameBuffer *b = it.second; b->cancel(); pipe->completeBuffer(request, b); } pipe->completeRequest(request); return; } /* Record the sensor's timestamp in the request metadata. */ request->metadata().set(controls::SensorTimestamp, buffer->metadata().timestamp); pipe->completeBuffer(request, buffer); pipe->completeRequest(request); ipa_->fillParams(request->sequence(), mockIPABufs_[0]->cookie()); } int VimcCameraData::allocateMockIPABuffers() { constexpr unsigned int kBufCount = 2; V4L2DeviceFormat format; format.fourcc = V4L2PixelFormat::fromPixelFormat(formats::BGR888); format.size = Size (160, 120); int ret = video_->setFormat(&format); if (ret < 0) return ret; return video_->exportBuffers(kBufCount, &mockIPABufs_); } void VimcCameraData::paramsFilled([[maybe_unused]] unsigned int id) { } REGISTER_PIPELINE_HANDLER(PipelineHandlerVimc) } /* namespace libcamera */