summaryrefslogtreecommitdiff
path: root/src/qcam/dng_writer.cpp
blob: d04a8e1612183014ae46ebbeb6006968e9a14d83 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
 * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
 *
 * dng_writer.cpp - DNG writer
 */

#include "dng_writer.h"

#include <algorithm>
#include <iostream>
#include <map>

#include <tiffio.h>

#include <libcamera/control_ids.h>

using namespace libcamera;

enum CFAPatternColour : uint8_t {
	CFAPatternRed = 0,
	CFAPatternGreen = 1,
	CFAPatternBlue = 2,
};

struct FormatInfo {
	uint8_t bitsPerSample;
	CFAPatternColour pattern[4];
	void (*packScanline)(void *output, const void *input,
			     unsigned int width);
	void (*thumbScanline)(const FormatInfo &info, void *output,
			      const void *input, unsigned int width,
			      unsigned int stride);
};

void packScanlineSBGGR10P(void *output, const void *input, unsigned int width)
{
	const uint8_t *in = static_cast<const uint8_t *>(input);
	uint8_t *out = static_cast<uint8_t *>(output);

	/* \todo Can this be made more efficient? */
	for (unsigned int x = 0; x < width; x += 4) {
		*out++ = in[0];
		*out++ = (in[4] & 0x03) << 6 | in[1] >> 2;
		*out++ = (in[1] & 0x03) << 6 | (in[4] & 0x0c) << 2 | in[2] >> 4;
		*out++ = (in[2] & 0x0f) << 4 | (in[4] & 0x30) >> 2 | in[3] >> 6;
		*out++ = (in[3] & 0x3f) << 2 | (in[4] & 0xc0) >> 6;
		in += 5;
	}
}

void packScanlineSBGGR12P(void *output, const void *input, unsigned int width)
{
	const uint8_t *in = static_cast<const uint8_t *>(input);
	uint8_t *out = static_cast<uint8_t *>(output);

	/* \todo: Can this be made more efficient? */
	for (unsigned int i = 0; i < width; i += 2) {
		*out++ = in[0];
		*out++ = (in[2] & 0x0f) << 4 | in[1] >> 4;
		*out++ = (in[1] & 0x0f) << 4 | in[2] >> 4;
		in += 3;
	}
}

void thumbScanlineSBGGRxxP(const FormatInfo &info, void *output,
			   const void *input, unsigned int width,
			   unsigned int stride)
{
	const uint8_t *in = static_cast<const uint8_t *>(input);
	uint8_t *out = static_cast<uint8_t *>(output);

	/* Number of bytes corresponding to 16 pixels. */
	unsigned int skip = info.bitsPerSample * 16 / 8;

	for (unsigned int x = 0; x < width; x++) {
		*out++ = (in[0] + in[1] + in[stride] + in[stride + 1]) >> 2;
		in += skip;
	}
}

static const std::map<PixelFormat, FormatInfo> formatInfo = {
	{ PixelFormat(DRM_FORMAT_SBGGR10, MIPI_FORMAT_MOD_CSI2_PACKED), {
		.bitsPerSample = 10,
		.pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
		.packScanline = packScanlineSBGGR10P,
		.thumbScanline = thumbScanlineSBGGRxxP,
	} },
	{ PixelFormat(DRM_FORMAT_SGBRG10, MIPI_FORMAT_MOD_CSI2_PACKED), {
		.bitsPerSample = 10,
		.pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
		.packScanline = packScanlineSBGGR10P,
		.thumbScanline = thumbScanlineSBGGRxxP,
	} },
	{ PixelFormat(DRM_FORMAT_SGRBG10, MIPI_FORMAT_MOD_CSI2_PACKED), {
		.bitsPerSample = 10,
		.pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
		.packScanline = packScanlineSBGGR10P,
		.thumbScanline = thumbScanlineSBGGRxxP,
	} },
	{ PixelFormat(DRM_FORMAT_SRGGB10, MIPI_FORMAT_MOD_CSI2_PACKED), {
		.bitsPerSample = 10,
		.pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
		.packScanline = packScanlineSBGGR10P,
		.thumbScanline = thumbScanlineSBGGRxxP,
	} },
	{ PixelFormat(DRM_FORMAT_SBGGR12, MIPI_FORMAT_MOD_CSI2_PACKED), {
		.bitsPerSample = 12,
		.pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
		.packScanline = packScanlineSBGGR12P,
		.thumbScanline = thumbScanlineSBGGRxxP,
	} },
	{ PixelFormat(DRM_FORMAT_SGBRG12, MIPI_FORMAT_MOD_CSI2_PACKED), {
		.bitsPerSample = 12,
		.pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
		.packScanline = packScanlineSBGGR12P,
		.thumbScanline = thumbScanlineSBGGRxxP,
	} },
	{ PixelFormat(DRM_FORMAT_SGRBG12, MIPI_FORMAT_MOD_CSI2_PACKED), {
		.bitsPerSample = 12,
		.pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
		.packScanline = packScanlineSBGGR12P,
		.thumbScanline = thumbScanlineSBGGRxxP,
	} },
	{ PixelFormat(DRM_FORMAT_SRGGB12, MIPI_FORMAT_MOD_CSI2_PACKED), {
		.bitsPerSample = 12,
		.pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
		.packScanline = packScanlineSBGGR12P,
		.thumbScanline = thumbScanlineSBGGRxxP,
	} },
};

int DNGWriter::write(const char *filename, const Camera *camera,
		     const StreamConfiguration &config,
		     const ControlList &metadata,
		     const FrameBuffer *buffer, const void *data)
{
	const auto it = formatInfo.find(config.pixelFormat);
	if (it == formatInfo.cend()) {
		std::cerr << "Unsupported pixel format" << std::endl;
		return -EINVAL;
	}
	const FormatInfo *info = &it->second;

	TIFF *tif = TIFFOpen(filename, "w");
	if (!tif) {
		std::cerr << "Failed to open tiff file" << std::endl;
		return -EINVAL;
	}

	/*
	 * Scanline buffer, has to be large enough to store both a RAW scanline
	 * or a thumbnail scanline. The latter will always be much smaller than
	 * the former as we downscale by 16 in both directions.
	 */
	uint8_t scanline[(config.size.width * info->bitsPerSample + 7) / 8];

	toff_t rawIFDOffset = 0;
	toff_t exifIFDOffset = 0;

	/*
	 * Start with a thumbnail in IFD 0 for compatibility with TIFF baseline
	 * readers, as required by the TIFF/EP specification. Tags that apply to
	 * the whole file are stored here.
	 */
	const uint8_t version[] = { 1, 2, 0, 0 };

	TIFFSetField(tif, TIFFTAG_DNGVERSION, version);
	TIFFSetField(tif, TIFFTAG_DNGBACKWARDVERSION, version);
	TIFFSetField(tif, TIFFTAG_FILLORDER, FILLORDER_MSB2LSB);
	TIFFSetField(tif, TIFFTAG_MAKE, "libcamera");
	TIFFSetField(tif, TIFFTAG_MODEL, camera->name().c_str());
	TIFFSetField(tif, TIFFTAG_UNIQUECAMERAMODEL, camera->name().c_str());
	TIFFSetField(tif, TIFFTAG_SOFTWARE, "qcam");
	TIFFSetField(tif, TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT);

	/*
	 * Thumbnail-specific tags. The thumbnail is stored as a greyscale image
	 * with 1/16 of the raw image resolution.
	 */
	TIFFSetField(tif, TIFFTAG_SUBFILETYPE, FILETYPE_REDUCEDIMAGE);
	TIFFSetField(tif, TIFFTAG_IMAGEWIDTH, config.size.width / 16);
	TIFFSetField(tif, TIFFTAG_IMAGELENGTH, config.size.height / 16);
	TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 8);
	TIFFSetField(tif, TIFFTAG_COMPRESSION, COMPRESSION_NONE);
	TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_MINISBLACK);
	TIFFSetField(tif, TIFFTAG_SAMPLESPERPIXEL, 1);
	TIFFSetField(tif, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG);
	TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_UINT);

	/*
	 * Reserve space for the SubIFD and ExifIFD tags, pointing to the IFD
	 * for the raw image and EXIF data respectively. The real offsets will
	 * be set later.
	 */
	TIFFSetField(tif, TIFFTAG_SUBIFD, 1, &rawIFDOffset);
	TIFFSetField(tif, TIFFTAG_EXIFIFD, exifIFDOffset);

	/* Write the thumbnail. */
	const uint8_t *row = static_cast<const uint8_t *>(data);
	for (unsigned int y = 0; y < config.size.height / 16; y++) {
		info->thumbScanline(*info, &scanline, row,
				    config.size.width / 16, config.stride);

		if (TIFFWriteScanline(tif, &scanline, y, 0) != 1) {
			std::cerr << "Failed to write thumbnail scanline"
				  << std::endl;
			TIFFClose(tif);
			return -EINVAL;
		}

		row += config.stride * 16;
	}

	TIFFWriteDirectory(tif);

	/* Create a new IFD for the RAW image. */
	const uint16_t cfaRepeatPatternDim[] = { 2, 2 };
	const uint8_t cfaPlaneColor[] = {
		CFAPatternRed,
		CFAPatternGreen,
		CFAPatternBlue
	};

	TIFFSetField(tif, TIFFTAG_SUBFILETYPE, 0);
	TIFFSetField(tif, TIFFTAG_IMAGEWIDTH, config.size.width);
	TIFFSetField(tif, TIFFTAG_IMAGELENGTH, config.size.height);
	TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, info->bitsPerSample);
	TIFFSetField(tif, TIFFTAG_COMPRESSION, COMPRESSION_NONE);
	TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_CFA);
	TIFFSetField(tif, TIFFTAG_SAMPLESPERPIXEL, 1);
	TIFFSetField(tif, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG);
	TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_UINT);
	TIFFSetField(tif, TIFFTAG_CFAREPEATPATTERNDIM, cfaRepeatPatternDim);
	TIFFSetField(tif, TIFFTAG_CFAPATTERN, info->pattern);
	TIFFSetField(tif, TIFFTAG_CFAPLANECOLOR, 3, cfaPlaneColor);
	TIFFSetField(tif, TIFFTAG_CFALAYOUT, 1);

	const uint16_t blackLevelRepeatDim[] = { 2, 2 };
	float blackLevel[] = { 0.0f, 0.0f, 0.0f, 0.0f };
	uint32_t whiteLevel = (1 << info->bitsPerSample) - 1;

	if (metadata.contains(controls::SensorBlackLevels)) {
		Span<const int32_t> levels = metadata.get(controls::SensorBlackLevels);

		/*
		 * The black levels control is specified in R, Gr, Gb, B order.
		 * Map it to the TIFF tag that is specified in CFA pattern
		 * order.
		 */
		unsigned int green = (info->pattern[0] == CFAPatternRed ||
				      info->pattern[1] == CFAPatternRed)
				   ? 0 : 1;

		for (unsigned int i = 0; i < 4; ++i) {
			unsigned int level;

			switch (info->pattern[i]) {
			case CFAPatternRed:
				level = levels[0];
				break;
			case CFAPatternGreen:
				level = levels[green + 1];
				green = (green + 1) % 2;
				break;
			case CFAPatternBlue:
			default:
				level = levels[3];
				break;
			}

			/* Map the 16-bit value to the bits per sample range. */
			blackLevel[i] = level >> (16 - info->bitsPerSample);
		}
	}

	TIFFSetField(tif, TIFFTAG_BLACKLEVELREPEATDIM, &blackLevelRepeatDim);
	TIFFSetField(tif, TIFFTAG_BLACKLEVEL, 4, &blackLevel);
	TIFFSetField(tif, TIFFTAG_WHITELEVEL, 1, &whiteLevel);

	/* Write RAW content. */
	row = static_cast<const uint8_t *>(data);
	for (unsigned int y = 0; y < config.size.height; y++) {
		info->packScanline(&scanline, row, config.size.width);

		if (TIFFWriteScanline(tif, &scanline, y, 0) != 1) {
			std::cerr << "Failed to write RAW scanline"
				  << std::endl;
			TIFFClose(tif);
			return -EINVAL;
		}

		row += config.stride;
	}

	/* Checkpoint the IFD to retrieve its offset, and write it out. */
	TIFFCheckpointDirectory(tif);
	rawIFDOffset = TIFFCurrentDirOffset(tif);
	TIFFWriteDirectory(tif);

	/* Create a new IFD for the EXIF data and fill it. */
	TIFFCreateEXIFDirectory(tif);

	if (metadata.contains(controls::AnalogueGain)) {
		float gain = metadata.get(controls::AnalogueGain);
		uint16_t iso = std::min(std::max(gain * 100, 0.0f), 65535.0f);
		TIFFSetField(tif, EXIFTAG_ISOSPEEDRATINGS, 1, &iso);
	}

	if (metadata.contains(controls::ExposureTime)) {
		float exposureTime = metadata.get(controls::ExposureTime) / 1e6;
		TIFFSetField(tif, EXIFTAG_EXPOSURETIME, exposureTime);
	}

	TIFFCheckpointDirectory(tif);
	exifIFDOffset = TIFFCurrentDirOffset(tif);
	TIFFWriteDirectory(tif);

	/* Update the IFD offsets and close the file. */
	TIFFSetDirectory(tif, 0);
	TIFFSetField(tif, TIFFTAG_SUBIFD, 1, &rawIFDOffset);
	TIFFSetField(tif, TIFFTAG_EXIFIFD, exifIFDOffset);
	TIFFWriteDirectory(tif);

	TIFFClose(tif);

	return 0;
}
an class="hl kwd">toString(); }) << " ]"; /* * Store the configuration in the formats_ map, mapping the * PixelFormat to the corresponding configuration. Any * previously stored value is overwritten, as the pipeline * handler currently doesn't care about how a particular * PixelFormat is achieved. */ for (const auto &videoFormat : videoFormats) { PixelFormat pixelFormat = videoFormat.first.toPixelFormat(); if (!pixelFormat) continue; Configuration config; config.code = code; config.pixelFormat = pixelFormat; config.captureSize = format.size; if (!converter) { config.outputSizes = config.captureSize; formats_[pixelFormat] = config; continue; } config.outputSizes = converter->sizes(format.size); for (PixelFormat fmt : converter->formats(pixelFormat)) formats_[fmt] = config; } } if (formats_.empty()) { LOG(SimplePipeline, Error) << "No valid configuration found"; return -EINVAL; } properties_ = sensor_->properties(); return 0; } int SimpleCameraData::setupLinks() { int ret; /* * Configure all links along the pipeline. Some entities may not allow * multiple sink links to be enabled together, even on different sink * pads. We must thus start by disabling all sink links (but the one we * want to enable) before enabling the pipeline link. */ for (SimpleCameraData::Entity &e : entities_) { MediaEntity *remote = e.link->sink()->entity(); for (MediaPad *pad : remote->pads()) { for (MediaLink *link : pad->links()) { if (link == e.link) continue; if ((link->flags() & MEDIA_LNK_FL_ENABLED) && !(link->flags() & MEDIA_LNK_FL_IMMUTABLE)) { ret = link->setEnabled(false); if (ret < 0) return ret; } } } if (!(e.link->flags() & MEDIA_LNK_FL_ENABLED)) { ret = e.link->setEnabled(true); if (ret < 0) return ret; } } return 0; } int SimpleCameraData::setupFormats(V4L2SubdeviceFormat *format, V4L2Subdevice::Whence whence) { SimplePipelineHandler *pipe = static_cast<SimplePipelineHandler *>(pipe_); int ret; /* * Configure the format on the sensor output and propagate it through * the pipeline. */ ret = sensor_->setFormat(format); if (ret < 0) return ret; for (const Entity &e : entities_) { MediaLink *link = e.link; MediaPad *source = link->source(); MediaPad *sink = link->sink(); if (source->entity() != sensor_->entity()) { V4L2Subdevice *subdev = pipe->subdev(source->entity()); ret = subdev->getFormat(source->index(), format, whence); if (ret < 0) return ret; } if (sink->entity()->function() != MEDIA_ENT_F_IO_V4L) { V4L2SubdeviceFormat sourceFormat = *format; V4L2Subdevice *subdev = pipe->subdev(sink->entity()); ret = subdev->setFormat(sink->index(), format, whence); if (ret < 0) return ret; if (format->mbus_code != sourceFormat.mbus_code || format->size != sourceFormat.size) { LOG(SimplePipeline, Debug) << "Source '" << source->entity()->name() << "':" << source->index() << " produces " << sourceFormat.toString() << ", sink '" << sink->entity()->name() << "':" << sink->index() << " requires " << format->toString(); return -EINVAL; } } LOG(SimplePipeline, Debug) << "Link '" << source->entity()->name() << "':" << source->index() << " -> '" << sink->entity()->name() << "':" << sink->index() << " configured with format " << format->toString(); } return 0; } /* ----------------------------------------------------------------------------- * Camera Configuration */ SimpleCameraConfiguration::SimpleCameraConfiguration(Camera *camera, SimpleCameraData *data) : CameraConfiguration(), camera_(camera->shared_from_this()), data_(data) { } CameraConfiguration::Status SimpleCameraConfiguration::validate() { Status status = Valid; if (config_.empty()) return Invalid; if (transform != Transform::Identity) { transform = Transform::Identity; status = Adjusted; } /* Cap the number of entries to the available streams. */ if (config_.size() > 1) { config_.resize(1); status = Adjusted; } StreamConfiguration &cfg = config_[0]; /* Adjust the pixel format. */ auto it = data_->formats_.find(cfg.pixelFormat); if (it == data_->formats_.end()) it = data_->formats_.begin(); PixelFormat pixelFormat = it->first; if (cfg.pixelFormat != pixelFormat) { LOG(SimplePipeline, Debug) << "Adjusting pixel format"; cfg.pixelFormat = pixelFormat; status = Adjusted; } const SimpleCameraData::Configuration &pipeConfig = it->second; if (!pipeConfig.outputSizes.contains(cfg.size)) { LOG(SimplePipeline, Debug) << "Adjusting size from " << cfg.size.toString() << " to " << pipeConfig.captureSize.toString(); cfg.size = pipeConfig.captureSize; status = Adjusted; } needConversion_ = cfg.pixelFormat != pipeConfig.pixelFormat || cfg.size != pipeConfig.captureSize; cfg.bufferCount = 3; /* Set the stride and frameSize. */ if (!needConversion_) { V4L2DeviceFormat format; format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat); format.size = cfg.size; int ret = data_->video_->tryFormat(&format); if (ret < 0) return Invalid; cfg.stride = format.planes[0].bpl; cfg.frameSize = format.planes[0].size; return status; } SimplePipelineHandler *pipe = static_cast<SimplePipelineHandler *>(data_->pipe_); SimpleConverter *converter = pipe->converter(); std::tie(cfg.stride, cfg.frameSize) = converter->strideAndFrameSize(cfg.pixelFormat, cfg.size); if (cfg.stride == 0) return Invalid; return status; } /* ----------------------------------------------------------------------------- * Pipeline Handler */ SimplePipelineHandler::SimplePipelineHandler(CameraManager *manager) : PipelineHandler(manager) { } CameraConfiguration *SimplePipelineHandler::generateConfiguration(Camera *camera, const StreamRoles &roles) { SimpleCameraData *data = cameraData(camera); CameraConfiguration *config = new SimpleCameraConfiguration(camera, data); if (roles.empty()) return config; /* Create the formats map. */ std::map<PixelFormat, std::vector<SizeRange>> formats; std::transform(data->formats_.begin(), data->formats_.end(), std::inserter(formats, formats.end()), [](const auto &format) -> decltype(formats)::value_type { const PixelFormat &pixelFormat = format.first; const Size &size = format.second.captureSize; return { pixelFormat, { size } }; }); /* * Create the stream configuration. Take the first entry in the formats * map as the default, for lack of a better option. * * \todo Implement a better way to pick the default format */ StreamConfiguration cfg{ StreamFormats{ formats } }; cfg.pixelFormat = formats.begin()->first; cfg.size = formats.begin()->second[0].max; config->addConfiguration(cfg); config->validate(); return config; } int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c) { SimpleCameraConfiguration *config = static_cast<SimpleCameraConfiguration *>(c); SimpleCameraData *data = cameraData(camera); V4L2VideoDevice *video = data->video_; StreamConfiguration &cfg = config->at(0); int ret; /* * Configure links on the pipeline and propagate formats from the * sensor to the video node. */ ret = data->setupLinks(); if (ret < 0) return ret; const SimpleCameraData::Configuration &pipeConfig = data->formats_[cfg.pixelFormat]; V4L2SubdeviceFormat format{ pipeConfig.code, data->sensor_->resolution() }; ret = data->setupFormats(&format, V4L2Subdevice::ActiveFormat); if (ret < 0) return ret; /* Configure the video node. */ V4L2PixelFormat videoFormat = video->toV4L2PixelFormat(pipeConfig.pixelFormat); V4L2DeviceFormat captureFormat; captureFormat.fourcc = videoFormat; captureFormat.size = pipeConfig.captureSize; ret = video->setFormat(&captureFormat); if (ret) return ret; if (captureFormat.planesCount != 1) { LOG(SimplePipeline, Error) << "Planar formats using non-contiguous memory not supported"; return -EINVAL; } if (captureFormat.fourcc != videoFormat || captureFormat.size != pipeConfig.captureSize) { LOG(SimplePipeline, Error) << "Unable to configure capture in " << pipeConfig.captureSize.toString() << "-" << videoFormat.toString(); return -EINVAL; } /* Configure the converter if required. */ useConverter_ = config->needConversion(); if (useConverter_) { StreamConfiguration inputCfg; inputCfg.pixelFormat = pipeConfig.pixelFormat; inputCfg.size = pipeConfig.captureSize; inputCfg.stride = captureFormat.planes[0].bpl; inputCfg.bufferCount = cfg.bufferCount; ret = converter_->configure(inputCfg, cfg); if (ret < 0) { LOG(SimplePipeline, Error) << "Unable to configure converter"; return ret; } LOG(SimplePipeline, Debug) << "Using format converter"; } cfg.setStream(&data->stream_); return 0; } int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream, std::vector<std::unique_ptr<FrameBuffer>> *buffers) { SimpleCameraData *data = cameraData(camera); unsigned int count = stream->configuration().bufferCount; /* * Export buffers on the converter or capture video node, depending on * whether the converter is used or not. */ if (useConverter_) return converter_->exportBuffers(count, buffers); else return data->video_->exportBuffers(count, buffers); } int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlList *controls) { SimpleCameraData *data = cameraData(camera); V4L2VideoDevice *video = data->video_; unsigned int count = data->stream_.configuration().bufferCount; int ret; if (useConverter_) ret = video->allocateBuffers(count, &converterBuffers_); else ret = video->importBuffers(count); if (ret < 0) return ret; ret = video->streamOn(); if (ret < 0) { stop(camera); return ret; } if (useConverter_) { ret = converter_->start(); if (ret < 0) { stop(camera); return ret; } /* Queue all internal buffers for capture. */ for (std::unique_ptr<FrameBuffer> &buffer : converterBuffers_) video->queueBuffer(buffer.get()); } activeCamera_ = camera; return 0; } void SimplePipelineHandler::stop(Camera *camera) { SimpleCameraData *data = cameraData(camera); V4L2VideoDevice *video = data->video_; if (useConverter_) converter_->stop(); video->streamOff(); video->releaseBuffers(); converterBuffers_.clear(); activeCamera_ = nullptr; } int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request) { SimpleCameraData *data = cameraData(camera); Stream *stream = &data->stream_; FrameBuffer *buffer = request->findBuffer(stream); if (!buffer) { LOG(SimplePipeline, Error) << "Attempt to queue request with invalid stream"; return -ENOENT; } /* * If conversion is needed, push the buffer to the converter queue, it * will be handed to the converter in the capture completion handler. */ if (useConverter_) { converterQueue_.push(buffer); return 0; } return data->video_->queueBuffer(buffer); } /* ----------------------------------------------------------------------------- * Match and Setup */ bool SimplePipelineHandler::match(DeviceEnumerator *enumerator) { MediaDevice *converter = nullptr; for (const SimplePipelineInfo &info : supportedDevices) { DeviceMatch dm(info.driver); media_ = acquireMediaDevice(enumerator, dm); if (!media_) continue; if (!info.converter) break; DeviceMatch converterMatch(info.converter); converter = acquireMediaDevice(enumerator, converterMatch); break; } if (!media_) return false; /* Locate the sensors. */ std::vector<MediaEntity *> sensors; for (MediaEntity *entity : media_->entities()) { switch (entity->function()) { case MEDIA_ENT_F_CAM_SENSOR: sensors.push_back(entity); break; default: break; } } if (sensors.empty()) { LOG(SimplePipeline, Error) << "No sensor found"; return false; } /* Open the converter, if any. */ if (converter) { converter_ = std::make_unique<SimpleConverter>(converter); if (converter_->open() < 0) { LOG(SimplePipeline, Warning) << "Failed to open converter, disabling format conversion"; converter_.reset(); } else { converter_->bufferReady.connect(this, &SimplePipelineHandler::converterDone); } } /* * Create one camera data instance for each sensor and gather all * entities in all pipelines. */ std::vector<std::unique_ptr<SimpleCameraData>> pipelines; std::set<MediaEntity *> entities; pipelines.reserve(sensors.size()); for (MediaEntity *sensor : sensors) { std::unique_ptr<SimpleCameraData> data = std::make_unique<SimpleCameraData>(this, sensor); if (!data->isValid()) { LOG(SimplePipeline, Error) << "No valid pipeline for sensor '" << sensor->name() << "', skipping"; continue; } for (SimpleCameraData::Entity &entity : data->entities_) entities.insert(entity.entity); pipelines.push_back(std::move(data)); } if (entities.empty()) return false; /* Create and open V4L2Subdev instances for all the entities. */ for (MediaEntity *entity : entities) { auto elem = subdevs_.emplace(std::piecewise_construct, std::forward_as_tuple(entity), std::forward_as_tuple(entity)); V4L2Subdevice *subdev = &elem.first->second; int ret = subdev->open(); if (ret < 0) { LOG(SimplePipeline, Error) << "Failed to open " << subdev->deviceNode() << ": " << strerror(-ret); return false; } } /* Initialize each pipeline and register a corresponding camera. */ bool registered = false; for (std::unique_ptr<SimpleCameraData> &data : pipelines) { int ret = data->init(); if (ret < 0) continue; std::shared_ptr<Camera> camera = Camera::create(this, data->sensor_->id(), data->streams()); registerCamera(std::move(camera), std::move(data)); registered = true; } return registered; } V4L2VideoDevice *SimplePipelineHandler::video(const MediaEntity *entity) { /* * Return the V4L2VideoDevice corresponding to the media entity, either * as a previously constructed device if available from the cache, or * by constructing a new one. */ auto iter = videos_.find(entity); if (iter != videos_.end()) return iter->second.get(); std::unique_ptr<V4L2VideoDevice> video = std::make_unique<V4L2VideoDevice>(entity); if (video->open() < 0) return nullptr; video->bufferReady.connect(this, &SimplePipelineHandler::bufferReady); auto element = videos_.emplace(entity, std::move(video)); return element.first->second.get(); } V4L2Subdevice *SimplePipelineHandler::subdev(const MediaEntity *entity) { auto iter = subdevs_.find(entity); if (iter == subdevs_.end()) return nullptr; return &iter->second; } /* ----------------------------------------------------------------------------- * Buffer Handling */ void SimplePipelineHandler::bufferReady(FrameBuffer *buffer) { ASSERT(activeCamera_); SimpleCameraData *data = cameraData(activeCamera_); /* * If an error occurred during capture, or if the buffer was cancelled, * complete the request, even if the converter is in use as there's no * point converting an erroneous buffer. */ if (buffer->metadata().status != FrameMetadata::FrameSuccess) { if (useConverter_) { /* Requeue the buffer for capture. */ data->video_->queueBuffer(buffer); /* * Get the next user-facing buffer to complete the * request. */ if (converterQueue_.empty()) return; buffer = converterQueue_.front(); converterQueue_.pop(); } Request *request = buffer->request(); completeBuffer(request, buffer); completeRequest(request); return; } /* * Queue the captured and the request buffer to the converter if format * conversion is needed. If there's no queued request, just requeue the * captured buffer for capture. */ if (useConverter_) { if (converterQueue_.empty()) { data->video_->queueBuffer(buffer); return; } FrameBuffer *output = converterQueue_.front(); converterQueue_.pop(); converter_->queueBuffers(buffer, output); return; } /* Otherwise simply complete the request. */ Request *request = buffer->request(); completeBuffer(request, buffer); completeRequest(request); } void SimplePipelineHandler::converterDone(FrameBuffer *input, FrameBuffer *output) { ASSERT(activeCamera_); SimpleCameraData *data = cameraData(activeCamera_); /* Complete the request. */ Request *request = output->request(); completeBuffer(request, output); completeRequest(request); /* Queue the input buffer back for capture. */ data->video_->queueBuffer(input); } REGISTER_PIPELINE_HANDLER(SimplePipelineHandler) } /* namespace libcamera */