summaryrefslogtreecommitdiff
path: root/test/v4l2_videodevice/buffer_cache.cpp
blob: b3f2bec117834221631ea91f11291700ff4fee02 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
 * Copyright (C) 2020, Google Inc.
 *
 * Test the buffer cache different operation modes
 */

#include <iostream>
#include <random>
#include <vector>

#include <libcamera/formats.h>
#include <libcamera/stream.h>

#include "buffer_source.h"

#include "test.h"

using namespace libcamera;

namespace {

class BufferCacheTest : public Test
{
public:
	/*
	 * Test that a cache with the same size as there are buffers results in
	 * a sequential run over; 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, ...
	 *
	 * The test is only valid when the cache size is as least as big as the
	 * number of buffers.
	 */
	int testSequential(V4L2BufferCache *cache,
			   const std::vector<std::unique_ptr<FrameBuffer>> &buffers)
	{
		for (unsigned int i = 0; i < buffers.size() * 100; i++) {
			int nBuffer = i % buffers.size();
			int index = cache->get(*buffers[nBuffer].get());

			if (index != nBuffer) {
				std::cout << "Expected index " << nBuffer
					  << " got " << index << std::endl;
				return TestFail;
			}

			cache->put(index);
		}

		return TestPass;
	}

	/*
	 * Test that randomly putting buffers to the cache always results in a
	 * valid index.
	 */
	int testRandom(V4L2BufferCache *cache,
		       const std::vector<std::unique_ptr<FrameBuffer>> &buffers)
	{
		std::uniform_int_distribution<> dist(0, buffers.size() - 1);

		for (unsigned int i = 0; i < buffers.size() * 100; i++) {
			int nBuffer = dist(generator_);
			int index = cache->get(*buffers[nBuffer].get());

			if (index < 0) {
				std::cout << "Failed lookup from cache"
					  << std::endl;
				return TestFail;
			}

			cache->put(index);
		}

		return TestPass;
	}

	/*
	 * Test that using a buffer more frequently keeps it hot in the cache at
	 * all times.
	 */
	int testHot(V4L2BufferCache *cache,
		    const std::vector<std::unique_ptr<FrameBuffer>> &buffers,
		    unsigned int hotFrequency)
	{
		/* Run the random test on the cache to make it messy. */
		if (testRandom(cache, buffers) != TestPass)
			return TestFail;

		std::uniform_int_distribution<> dist(0, buffers.size() - 1);

		/* Pick a hot buffer at random and store its index. */
		int hotBuffer = dist(generator_);
		int hotIndex = cache->get(*buffers[hotBuffer].get());
		cache->put(hotIndex);

		/*
		 * Queue hot buffer at the requested frequency and make sure
		 * it stays hot.
		 */
		for (unsigned int i = 0; i < buffers.size() * 100; i++) {
			int nBuffer, index;
			bool hotQueue = i % hotFrequency == 0;

			if (hotQueue)
				nBuffer = hotBuffer;
			else
				nBuffer = dist(generator_);

			index = cache->get(*buffers[nBuffer].get());

			if (index < 0) {
				std::cout << "Failed lookup from cache"
					  << std::endl;
				return TestFail;
			}

			if (hotQueue && index != hotIndex) {
				std::cout << "Hot buffer got cold"
					  << std::endl;
				return TestFail;
			}

			cache->put(index);
		}

		return TestPass;
	}

	int init() override
	{
		std::random_device rd;
		unsigned int seed = rd();

		std::cout << "Random seed is " << seed << std::endl;

		generator_.seed(seed);

		return TestPass;
	}

	int run() override
	{
		const unsigned int numBuffers = 8;

		StreamConfiguration cfg;
		cfg.pixelFormat = formats::YUYV;
		cfg.size = Size(600, 800);
		cfg.bufferCount = numBuffers;

		BufferSource source;
		int ret = source.allocate(cfg);
		if (ret != TestPass)
			return ret;

		const std::vector<std::unique_ptr<FrameBuffer>> &buffers =
			source.buffers();

		if (buffers.size() != numBuffers) {
			std::cout << "Got " << buffers.size()
				  << " buffers, expected " << numBuffers
				  << std::endl;
			return TestFail;
		}

		/*
		 * Test cache of same size as there are buffers, the cache is
		 * created from a list of buffers and will be pre-populated.
		 */
		V4L2BufferCache cacheFromBuffers(buffers);

		if (testSequential(&cacheFromBuffers, buffers) != TestPass)
			return TestFail;

		if (testRandom(&cacheFromBuffers, buffers) != TestPass)
			return TestFail;

		if (testHot(&cacheFromBuffers, buffers, numBuffers) != TestPass)
			return TestFail;

		/*
		 * Test cache of same size as there are buffers, the cache is
		 * not pre-populated.
		 */
		V4L2BufferCache cacheFromNumbers(numBuffers);

		if (testSequential(&cacheFromNumbers, buffers) != TestPass)
			return TestFail;

		if (testRandom(&cacheFromNumbers, buffers) != TestPass)
			return TestFail;

		if (testHot(&cacheFromNumbers, buffers, numBuffers) != TestPass)
			return TestFail;

		/*
		 * Test cache half the size of number of buffers used, the cache
		 * is not pre-populated.
		 */
		V4L2BufferCache cacheHalf(numBuffers / 2);

		if (testRandom(&cacheHalf, buffers) != TestPass)
			return TestFail;

		if (testHot(&cacheHalf, buffers, numBuffers / 2) != TestPass)
			return TestFail;

		return TestPass;
	}

private:
	std::mt19937 generator_;
};

} /* namespace */

TEST_REGISTER(BufferCacheTest)
s="hl str">'Rs' is a 2-dimensional reference system on the focal plane of the camera module. The origin is placed on the top-left corner of the visible scene, the X-axis points towards the right, and the Y-axis points towards the bottom of the scene. The top, bottom, left and right directions are intentionally not defined and depend on the environment in which the camera is used. A typical example of a (very common) picture of a shark swimming from left to right, as seen from the camera, is 0 X-axis 0 +-------------------------------------> ! ! ! ! |\____)\___ ! ) _____ __`< ! |/ )/ ! ! ! V Y-axis With the reference system 'Rs' placed on the camera focal plane. ¸.·˙! ¸.·˙ ! _ ¸.·˙ ! +-/ \-+¸.·˙ ! | (o) | ! Camera focal plane +-----+˙·.¸ ! ˙·.¸ ! ˙·.¸ ! ˙·.¸! When projected on the sensor's pixel array, the image and the associated reference system 'Rs' are typically (but not always) inverted, due to the camera module's lens optical inversion effect. Assuming the above represented scene of the swimming shark, the lens inversion projects the scene and its reference system onto the sensor pixel array, seen from the front of the camera sensor, as follow Y-axis ^ ! ! ! ! |\_____)\__ ! ) ____ ___.< ! |/ )/ ! ! ! 0 +-------------------------------------> 0 X-axis Note the shark being upside-down. The resulting projected reference system is named 'Rp'. The camera rotation property is then defined as the angular difference in the counter-clockwise direction between the camera reference system 'Rc' and the projected scene reference system 'Rp'. It is expressed in degrees as a number in the range [0, 360[. Examples 0 degrees camera rotation Y-Rp ^ Y-Rc ! ^ ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! 0 +-------------------------------------> ! 0 X-Rp 0 +-------------------------------------> 0 X-Rc X-Rc 0 <------------------------------------+ 0 X-Rp 0 ! <------------------------------------+ 0 ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! V ! Y-Rc V Y-Rp 90 degrees camera rotation 0 Y-Rc 0 +--------------------> ! Y-Rp ! ^ ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! 0 +-------------------------------------> ! 0 X-Rp ! ! ! ! V X-Rc 180 degrees camera rotation 0 <------------------------------------+ 0 X-Rc ! Y-Rp ! ^ ! ! ! ! ! ! ! ! ! ! ! ! ! ! V ! Y-Rc 0 +-------------------------------------> 0 X-Rp 270 degrees camera rotation 0 Y-Rc 0 +--------------------> ! 0 ! <-----------------------------------+ 0 ! X-Rp ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! V ! Y-Rp ! ! ! ! V X-Rc Example one - Webcam A camera module installed on the user facing part of a laptop screen casing used for video calls. The captured images are meant to be displayed in landscape mode (width > height) on the laptop screen. The camera is typically mounted upside-down to compensate the lens optical inversion effect. Y-Rp Y-Rc ^ ^ ! ! ! ! ! |\_____)\__ ! ! ) ____ ___.< ! ! |/ )/ ! ! ! ! ! ! ! 0 +-------------------------------------> ! 0 X-Rp 0 +-------------------------------------> 0 X-Rc The two reference systems are aligned, the resulting camera rotation is 0 degrees, no rotation correction needs to be applied to the resulting image once captured to memory buffers to correctly display it to users. +--------------------------------------+ ! ! ! ! ! ! ! |\____)\___ ! ! ) _____ __`< ! ! |/ )/ ! ! ! ! ! ! ! +--------------------------------------+ If the camera sensor is not mounted upside-down to compensate for the lens optical inversion, the two reference systems will not be aligned, with 'Rp' being rotated 180 degrees relatively to 'Rc'. X-Rc 0 <------------------------------------+ 0 ! Y-Rp ! ^ ! ! ! ! |\_____)\__ ! ! ) ____ ___.< ! ! |/ )/ ! ! ! ! ! ! V ! Y-Rc 0 +-------------------------------------> 0 X-Rp The image once captured to memory will then be rotated by 180 degrees +--------------------------------------+ ! ! ! ! ! ! ! __/(_____/| ! ! >.___ ____ ( ! ! \( \| ! ! ! ! ! ! ! +--------------------------------------+ A software rotation correction of 180 degrees should be applied to correctly display the image. +--------------------------------------+ ! ! ! ! ! ! ! |\____)\___ ! ! ) _____ __`< ! ! |/ )/ ! ! ! ! ! ! ! +--------------------------------------+ Example two - Phone camera A camera installed on the back side of a mobile device facing away from the user. The captured images are meant to be displayed in portrait mode (height > width) to match the device screen orientation and the device usage orientation used when taking the picture. The camera sensor is typically mounted with its pixel array longer side aligned to the device longer side, upside-down mounted to compensate for the lens optical inversion effect. 0 Y-Rc 0 +--------------------> ! Y-Rp ! ^ ! ! ! ! ! ! ! ! |\_____)\__ ! ! ) ____ ___.< ! ! |/ )/ ! ! ! ! ! ! ! 0 +-------------------------------------> ! 0 X-Rp ! ! ! ! V X-Rc The two reference systems are not aligned and the 'Rp' reference system is rotated by 90 degrees in the counter-clockwise direction relatively to the 'Rc' reference system. The image once captured to memory will be rotated. +-------------------------------------+ | _ _ | | \ / | | | | | | | | | | | > | | < | | | | | | | . | | V | +-------------------------------------+ A correction of 90 degrees in counter-clockwise direction has to be applied to correctly display the image in portrait mode on the device screen. +--------------------+ | | | | | | | | | | | | | |\____)\___ | | ) _____ __`< | | |/ )/ | | | | | | | | | | | +--------------------+ - Model: type: string description: | The model name shall to the extent possible describe the sensor. For most devices this is the model name of the sensor. While for some devices the sensor model is unavailable as the sensor or the entire camera is part of a larger unit and exposed as a black-box to the system. In such cases the model name of the smallest device that contains the camera sensor shall be used. The model name is not meant to be a camera name displayed to the end-user, but may be combined with other camera information to create a camera name. The model name is not guaranteed to be unique in the system nor is it guaranteed to be stable or have any other properties required to make it a good candidate to be used as a permanent identifier of a camera. The model name shall describe the camera in a human readable format and shall be encoded in ASCII. Example model names are 'ov5670', 'imx219' or 'Logitech Webcam C930e'. - UnitCellSize: type: Size description: | The pixel unit cell physical size, in nanometers. The UnitCellSize properties defines the horizontal and vertical sizes of a single pixel unit, including its active and non-active parts. In other words, it expresses the horizontal and vertical distance between the top-left corners of adjacent pixels. The property can be used to calculate the physical size of the sensor's pixel array area and for calibration purposes. - PixelArraySize: type: Size description: | The camera sensor pixel array readable area vertical and horizontal sizes, in pixels. The PixelArraySize property defines the size in pixel units of the readable part of full pixel array matrix, including optical black pixels used for calibration, pixels which are not considered valid for capture and active pixels containing valid image data. The property describes the maximum size of the raw data captured by the camera, which might not correspond to the physical size of the sensor pixel array matrix, as some portions of the physical pixel array matrix are not accessible and cannot be transmitted out. For example, let's consider a pixel array matrix assembled as follows +--------------------------------------------------+ |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx| |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx| |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| ... ... ... ... ... ... ... ... ... ... |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| |xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx| |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx| |xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx| +--------------------------------------------------+ starting with two lines of non-readable pixels (x), followed by N lines of readable data (D) surrounded by two columns of non-readable pixels on each side, and ending with two more lines of non-readable pixels. Only the readable portion is transmitted to the receiving side, defining the sizes of the largest possible buffer of raw data that can be presented to applications. PixelArraySize.width /----------------------------------------------/ +----------------------------------------------+ / |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | PixelArraySize.height ... ... ... ... ... ... ... ... ... ... |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | |DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | +----------------------------------------------+ / This defines a rectangle whose top-left corner is placed in position (0, 0) and whose vertical and horizontal sizes are defined by this property. All other rectangles that describe portions of the pixel array, such as the optical black pixels rectangles and active pixel areas, are defined relatively to this rectangle. All the coordinates are expressed relative to the default sensor readout direction, without any transformation (such as horizontal and vertical flipping) applied. When mapping them to the raw pixel buffer, applications shall take any configured transformation into account. \todo Rename this property to Size once we will have property categories (i.e. Properties::PixelArray::Size) - PixelArrayOpticalBlackRectangles: type: Rectangle size: [n] description: | The pixel array region(s) which contain optical black pixels considered valid for calibration purposes. This property describes the position and size of optical black pixel regions in the raw data buffer as stored in memory, which might differ from their actual physical location in the pixel array matrix. It is important to note, in fact, that camera sensors might automatically reorder or skip portions of their pixels array matrix when transmitting data to the receiver. For instance, a sensor may merge the top and bottom optical black rectangles into a single rectangle, transmitted at the beginning of the frame. The pixel array contains several areas with different purposes, interleaved by lines and columns which are said not to be valid for capturing purposes. Invalid lines and columns are defined as invalid as they could be positioned too close to the chip margins or to the optical black shielding placed on top of optical black pixels. PixelArraySize.width /----------------------------------------------/ x1 x2 +--o---------------------------------------o---+ / |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| | |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| | y1 oIIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| | |IIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| | |IIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| | y2 oIIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| | |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| | |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| | y3 |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| | |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| | PixelArraySize.height |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| | ... ... ... ... ... ... ... ... ... ... y4 |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| | |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| | |IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| | +----------------------------------------------+ / The readable pixel array matrix is composed by 2 invalid lines (I) 4 lines of valid optical black pixels (O) 2 invalid lines (I) n lines of valid pixel data (P) 2 invalid lines (I) And the position of the optical black pixel rectangles is defined by PixelArrayOpticalBlackRectangles = { { x1, y1, x2 - x1 + 1, y2 - y1 + 1 }, { x1, y3, 2, y4 - y3 + 1 }, { x2, y3, 2, y4 - y3 + 1 }, }; If the camera, when capturing the full pixel array matrix, automatically skips the invalid lines and columns, producing the following data buffer, when captured to memory PixelArraySize.width /----------------------------------------------/ x1 +--------------------------------------------o-+ / |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| | |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| | |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| | |OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| | y1 oOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| | |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| | |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| | PixelArraySize.height ... ... ... ... ... | ... ... ... ... ... | |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| | |OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| | +----------------------------------------------+ / then the invalid lines and columns should not be reported as part of the PixelArraySize property in first place. In this case, the position of the black pixel rectangles will be PixelArrayOpticalBlackRectangles = {