summaryrefslogtreecommitdiff
path: root/src/android/mm/cros_camera_buffer.cpp
blob: e2a44a2a3437415ff01193212eab18acc9714dde (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
 * Copyright (C) 2021, Google Inc.
 *
 * Chromium OS buffer backend using CameraBufferManager
 */

#include "../camera_buffer.h"

#include <libcamera/base/log.h>

#include "cros-camera/camera_buffer_manager.h"

using namespace libcamera;

LOG_DECLARE_CATEGORY(HAL)

class CameraBuffer::Private : public Extensible::Private
{
	LIBCAMERA_DECLARE_PUBLIC(CameraBuffer)

public:
	Private(CameraBuffer *cameraBuffer, buffer_handle_t camera3Buffer,
		PixelFormat pixelFormat, const Size &size,
		int flags);
	~Private();

	bool isValid() const { return registered_; }

	unsigned int numPlanes() const;

	Span<uint8_t> plane(unsigned int plane);

	unsigned int stride(unsigned int plane) const;
	unsigned int offset(unsigned int plane) const;
	unsigned int size(unsigned int plane) const;

	size_t jpegBufferSize(size_t maxJpegBufferSize) const;

private:
	void map();

	cros::CameraBufferManager *bufferManager_;
	buffer_handle_t handle_;
	unsigned int numPlanes_;
	bool mapped_;
	bool registered_;
	union {
		void *addr;
		android_ycbcr ycbcr;
	} mem;
};

CameraBuffer::Private::Private([[maybe_unused]] CameraBuffer *cameraBuffer,
			       buffer_handle_t camera3Buffer,
			       [[maybe_unused]] PixelFormat pixelFormat,
			       [[maybe_unused]] const Size &size,
			       [[maybe_unused]] int flags)
	: handle_(camera3Buffer), numPlanes_(0), mapped_(false),
	  registered_(false)
{
	bufferManager_ = cros::CameraBufferManager::GetInstance();
	if (!bufferManager_) {
		LOG(HAL, Fatal)
			<< "Failed to get cros CameraBufferManager instance";
		return;
	}

	int ret = bufferManager_->Register(camera3Buffer);
	if (ret) {
		LOG(HAL, Error) << "Failed registering a buffer: " << ret;
		return;
	}

	registered_ = true;
	numPlanes_ = bufferManager_->GetNumPlanes(camera3Buffer);
}

CameraBuffer::Private::~Private()
{
	int ret;
	if (mapped_) {
		ret = bufferManager_->Unlock(handle_);
		if (ret != 0)
			LOG(HAL, Error) << "Failed to unlock buffer: "
					<< strerror(-ret);
	}

	if (registered_) {
		ret = bufferManager_->Deregister(handle_);
		if (ret != 0)
			LOG(HAL, Error) << "Failed to deregister buffer: "
					<< strerror(-ret);
	}
}

unsigned int CameraBuffer::Private::numPlanes() const
{
	return bufferManager_->GetNumPlanes(handle_);
}

Span<uint8_t> CameraBuffer::Private::plane(unsigned int plane)
{
	if (!mapped_)
		map();
	if (!mapped_)
		return {};

	void *addr;

	switch (numPlanes()) {
	case 1:
		addr = mem.addr;
		break;
	default:
		switch (plane) {
		case 0:
			addr = mem.ycbcr.y;
			break;
		case 1:
			addr = mem.ycbcr.cb;
			break;
		case 2:
			addr = mem.ycbcr.cr;
			break;
		}
	}

	return { static_cast<uint8_t *>(addr),
		 bufferManager_->GetPlaneSize(handle_, plane) };
}

unsigned int CameraBuffer::Private::stride(unsigned int plane) const
{
	return cros::CameraBufferManager::GetPlaneStride(handle_, plane);
}

unsigned int CameraBuffer::Private::offset(unsigned int plane) const
{
	return cros::CameraBufferManager::GetPlaneOffset(handle_, plane);
}

unsigned int CameraBuffer::Private::size(unsigned int plane) const
{
	return cros::CameraBufferManager::GetPlaneSize(handle_, plane);
}

size_t CameraBuffer::Private::jpegBufferSize([[maybe_unused]] size_t maxJpegBufferSize) const
{
	return bufferManager_->GetPlaneSize(handle_, 0);
}

void CameraBuffer::Private::map()
{
	int ret;
	switch (numPlanes_) {
	case 1: {
		ret = bufferManager_->Lock(handle_, 0, 0, 0, 0, 0, &mem.addr);
		if (ret) {
			LOG(HAL, Error) << "Single plane buffer mapping failed";
			return;
		}
		break;
	}
	case 2:
	case 3: {
		ret = bufferManager_->LockYCbCr(handle_, 0, 0, 0, 0, 0,
						&mem.ycbcr);
		if (ret) {
			LOG(HAL, Error) << "YCbCr buffer mapping failed";
			return;
		}
		break;
	}
	default:
		LOG(HAL, Error) << "Invalid number of planes: " << numPlanes_;
		return;
	}

	mapped_ = true;
	return;
}

PUBLIC_CAMERA_BUFFER_IMPLEMENTATION
ior.Read(params.get_child("prior")); } static void read_ct_curve(Pwl &ct_r, Pwl &ct_b, boost::property_tree::ptree const &params) { int num = 0; for (auto it = params.begin(); it != params.end(); it++) { double ct = it->second.get_value<double>(); assert(it == params.begin() || ct != ct_r.Domain().end); if (++it == params.end()) throw std::runtime_error( "AwbConfig: incomplete CT curve entry"); ct_r.Append(ct, it->second.get_value<double>()); if (++it == params.end()) throw std::runtime_error( "AwbConfig: incomplete CT curve entry"); ct_b.Append(ct, it->second.get_value<double>()); num++; } if (num < 2) throw std::runtime_error( "AwbConfig: insufficient points in CT curve"); } void AwbConfig::Read(boost::property_tree::ptree const &params) { bayes = params.get<int>("bayes", 1); frame_period = params.get<uint16_t>("frame_period", 10); startup_frames = params.get<uint16_t>("startup_frames", 10); convergence_frames = params.get<unsigned int>("convergence_frames", 3); speed = params.get<double>("speed", 0.05); if (params.get_child_optional("ct_curve")) read_ct_curve(ct_r, ct_b, params.get_child("ct_curve")); if (params.get_child_optional("priors")) { for (auto &p : params.get_child("priors")) { AwbPrior prior; prior.Read(p.second); if (!priors.empty() && prior.lux <= priors.back().lux) throw std::runtime_error( "AwbConfig: Prior must be ordered in increasing lux value"); priors.push_back(prior); } if (priors.empty()) throw std::runtime_error( "AwbConfig: no AWB priors configured"); } if (params.get_child_optional("modes")) { for (auto &p : params.get_child("modes")) { modes[p.first].Read(p.second); if (default_mode == nullptr) default_mode = &modes[p.first]; } if (default_mode == nullptr) throw std::runtime_error( "AwbConfig: no AWB modes configured"); } min_pixels = params.get<double>("min_pixels", 16.0); min_G = params.get<uint16_t>("min_G", 32); min_regions = params.get<uint32_t>("min_regions", 10); delta_limit = params.get<double>("delta_limit", 0.2); coarse_step = params.get<double>("coarse_step", 0.2); transverse_pos = params.get<double>("transverse_pos", 0.01); transverse_neg = params.get<double>("transverse_neg", 0.01); if (transverse_pos <= 0 || transverse_neg <= 0) throw std::runtime_error( "AwbConfig: transverse_pos/neg must be > 0"); sensitivity_r = params.get<double>("sensitivity_r", 1.0); sensitivity_b = params.get<double>("sensitivity_b", 1.0); if (bayes) { if (ct_r.Empty() || ct_b.Empty() || priors.empty() || default_mode == nullptr) { LOG(RPiAwb, Warning) << "Bayesian AWB mis-configured - switch to Grey method"; bayes = false; } } fast = params.get<int>( "fast", bayes); // default to fast for Bayesian, otherwise slow whitepoint_r = params.get<double>("whitepoint_r", 0.0); whitepoint_b = params.get<double>("whitepoint_b", 0.0); if (bayes == false) sensitivity_r = sensitivity_b = 1.0; // nor do sensitivities make any sense } Awb::Awb(Controller *controller) : AwbAlgorithm(controller) { async_abort_ = async_start_ = async_started_ = async_finished_ = false; mode_ = nullptr; manual_r_ = manual_b_ = 0.0; first_switch_mode_ = true; async_thread_ = std::thread(std::bind(&Awb::asyncFunc, this)); } Awb::~Awb() { { std::lock_guard<std::mutex> lock(mutex_); async_abort_ = true; async_signal_.notify_one(); } async_thread_.join(); } char const *Awb::Name() const { return NAME; } void Awb::Read(boost::property_tree::ptree const &params) { config_.Read(params); } void Awb::Initialise() { frame_count2_ = frame_count_ = frame_phase_ = 0; // Put something sane into the status that we are filtering towards, // just in case the first few frames don't have anything meaningful in // them. if (!config_.ct_r.Empty() && !config_.ct_b.Empty()) { sync_results_.temperature_K = config_.ct_r.Domain().Clip(4000); sync_results_.gain_r = 1.0 / config_.ct_r.Eval(sync_results_.temperature_K); sync_results_.gain_g = 1.0; sync_results_.gain_b = 1.0 / config_.ct_b.Eval(sync_results_.temperature_K); } else { // random values just to stop the world blowing up sync_results_.temperature_K = 4500; sync_results_.gain_r = sync_results_.gain_g = sync_results_.gain_b = 1.0; } prev_sync_results_ = sync_results_; } unsigned int Awb::GetConvergenceFrames() const { // If colour gains have been explicitly set, there is no convergence // to happen, so no need to drop any frames - return zero. if (manual_r_ && manual_b_) return 0; else return config_.convergence_frames; } void Awb::SetMode(std::string const &mode_name) { std::unique_lock<std::mutex> lock(settings_mutex_); mode_name_ = mode_name; } void Awb::SetManualGains(double manual_r, double manual_b) { std::unique_lock<std::mutex> lock(settings_mutex_); // If any of these are 0.0, we swich back to auto. manual_r_ = manual_r; manual_b_ = manual_b; } void Awb::SwitchMode([[maybe_unused]] CameraMode const &camera_mode, Metadata *metadata) { // If fixed colour gains have been set, we should let other algorithms // know by writing it into the image metadata. if (manual_r_ != 0.0 && manual_b_ != 0.0) { prev_sync_results_.gain_r = manual_r_; prev_sync_results_.gain_g = 1.0; prev_sync_results_.gain_b = manual_b_; // If we're starting up for the first time, try and // "dead reckon" the corresponding colour temperature. if (first_switch_mode_ && config_.bayes) { Pwl ct_r_inverse = config_.ct_r.Inverse(); Pwl ct_b_inverse = config_.ct_b.Inverse(); double ct_r = ct_r_inverse.Eval(ct_r_inverse.Domain().Clip(1 / manual_r_)); double ct_b = ct_b_inverse.Eval(ct_b_inverse.Domain().Clip(1 / manual_b_)); prev_sync_results_.temperature_K = (ct_r + ct_b) / 2; } sync_results_ = prev_sync_results_; } metadata->Set("awb.status", prev_sync_results_); first_switch_mode_ = false; } void Awb::fetchAsyncResults() { LOG(RPiAwb, Debug) << "Fetch AWB results"; async_finished_ = false; async_started_ = false; sync_results_ = async_results_; } void Awb::restartAsync(StatisticsPtr &stats, std::string const &mode_name, double lux) { LOG(RPiAwb, Debug) << "Starting AWB calculation"; // this makes a new reference which belongs to the asynchronous thread statistics_ = stats; // store the mode as it could technically change auto m = config_.modes.find(mode_name); mode_ = m != config_.modes.end() ? &m->second : (mode_ == nullptr ? config_.default_mode : mode_); lux_ = lux; frame_phase_ = 0; async_start_ = true; async_started_ = true; size_t len = mode_name.copy(async_results_.mode, sizeof(async_results_.mode) - 1); async_results_.mode[len] = '\0'; async_signal_.notify_one(); } void Awb::Prepare(Metadata *image_metadata) { if (frame_count_ < (int)config_.startup_frames) frame_count_++; double speed = frame_count_ < (int)config_.startup_frames ? 1.0 : config_.speed; LOG(RPiAwb, Debug) << "frame_count " << frame_count_ << " speed " << speed; { std::unique_lock<std::mutex> lock(mutex_); if (async_started_ && async_finished_) fetchAsyncResults(); } // Finally apply IIR filter to results and put into metadata. memcpy(prev_sync_results_.mode, sync_results_.mode, sizeof(prev_sync_results_.mode)); prev_sync_results_.temperature_K = speed * sync_results_.temperature_K + (1.0 - speed) * prev_sync_results_.temperature_K; prev_sync_results_.gain_r = speed * sync_results_.gain_r + (1.0 - speed) * prev_sync_results_.gain_r; prev_sync_results_.gain_g = speed * sync_results_.gain_g + (1.0 - speed) * prev_sync_results_.gain_g; prev_sync_results_.gain_b = speed * sync_results_.gain_b + (1.0 - speed) * prev_sync_results_.gain_b; image_metadata->Set("awb.status", prev_sync_results_); LOG(RPiAwb, Debug) << "Using AWB gains r " << prev_sync_results_.gain_r << " g " << prev_sync_results_.gain_g << " b " << prev_sync_results_.gain_b; } void Awb::Process(StatisticsPtr &stats, Metadata *image_metadata) { // Count frames since we last poked the async thread. if (frame_phase_ < (int)config_.frame_period) frame_phase_++; if (frame_count2_ < (int)config_.startup_frames) frame_count2_++; LOG(RPiAwb, Debug) << "frame_phase " << frame_phase_; if (frame_phase_ >= (int)config_.frame_period || frame_count2_ < (int)config_.startup_frames) { // Update any settings and any image metadata that we need. std::string mode_name; { std::unique_lock<std::mutex> lock(settings_mutex_); mode_name = mode_name_; } struct LuxStatus lux_status = {}; lux_status.lux = 400; // in case no metadata if (image_metadata->Get("lux.status", lux_status) != 0) LOG(RPiAwb, Debug) << "No lux metadata found"; LOG(RPiAwb, Debug) << "Awb lux value is " << lux_status.lux; std::unique_lock<std::mutex> lock(mutex_); if (async_started_ == false) restartAsync(stats, mode_name, lux_status.lux); } } void Awb::asyncFunc() { while (true) { { std::unique_lock<std::mutex> lock(mutex_); async_signal_.wait(lock, [&] { return async_start_ || async_abort_; }); async_start_ = false; if (async_abort_) break; } doAwb(); { std::lock_guard<std::mutex> lock(mutex_); async_finished_ = true; sync_signal_.notify_one(); } } } static void generate_stats(std::vector<Awb::RGB> &zones, bcm2835_isp_stats_region *stats, double min_pixels, double min_G) { for (int i = 0; i < AWB_STATS_SIZE_X * AWB_STATS_SIZE_Y; i++) { Awb::RGB zone; // this is "invalid", unless R gets overwritten later double counted = stats[i].counted; if (counted >= min_pixels) { zone.G = stats[i].g_sum / counted; if (zone.G >= min_G) { zone.R = stats[i].r_sum / counted; zone.B = stats[i].b_sum / counted; } } zones.push_back(zone); } } void Awb::prepareStats() { zones_.clear(); // LSC has already been applied to the stats in this pipeline, so stop // any LSC compensation. We also ignore config_.fast in this version. generate_stats(zones_, statistics_->awb_stats, config_.min_pixels, config_.min_G); // we're done with these; we may as well relinquish our hold on the // pointer. statistics_.reset(); // apply sensitivities, so values appear to come from our "canonical" // sensor. for (auto &zone : zones_) zone.R *= config_.sensitivity_r, zone.B *= config_.sensitivity_b; } double Awb::computeDelta2Sum(double gain_r, double gain_b) { // Compute the sum of the squared colour error (non-greyness) as it // appears in the log likelihood equation. double delta2_sum = 0; for (auto &z : zones_) { double delta_r = gain_r * z.R - 1 - config_.whitepoint_r; double delta_b = gain_b * z.B - 1 - config_.whitepoint_b; double delta2 = delta_r * delta_r + delta_b * delta_b; //LOG(RPiAwb, Debug) << "delta_r " << delta_r << " delta_b " << delta_b << " delta2 " << delta2; delta2 = std::min(delta2, config_.delta_limit); delta2_sum += delta2; } return delta2_sum; } Pwl Awb::interpolatePrior() { // Interpolate the prior log likelihood function for our current lux // value. if (lux_ <= config_.priors.front().lux) return config_.priors.front().prior; else if (lux_ >= config_.priors.back().lux) return config_.priors.back().prior; else { int idx = 0; // find which two we lie between while (config_.priors[idx + 1].lux < lux_) idx++; double lux0 = config_.priors[idx].lux, lux1 = config_.priors[idx + 1].lux; return Pwl::Combine(config_.priors[idx].prior, config_.priors[idx + 1].prior, [&](double /*x*/, double y0, double y1) { return y0 + (y1 - y0) * (lux_ - lux0) / (lux1 - lux0); }); } } static double interpolate_quadatric(Pwl::Point const &A, Pwl::Point const &B, Pwl::Point const &C) { // Given 3 points on a curve, find the extremum of the function in that // interval by fitting a quadratic. const double eps = 1e-3; Pwl::Point CA = C - A, BA = B - A; double denominator = 2 * (BA.y * CA.x - CA.y * BA.x); if (abs(denominator) > eps) { double numerator = BA.y * CA.x * CA.x - CA.y * BA.x * BA.x; double result = numerator / denominator + A.x; return std::max(A.x, std::min(C.x, result)); } // has degenerated to straight line segment return A.y < C.y - eps ? A.x : (C.y < A.y - eps ? C.x : B.x); } double Awb::coarseSearch(Pwl const &prior) { points_.clear(); // assume doesn't deallocate memory size_t best_point = 0; double t = mode_->ct_lo; int span_r = 0, span_b = 0; // Step down the CT curve evaluating log likelihood. while (true) { double r = config_.ct_r.Eval(t, &span_r); double b = config_.ct_b.Eval(t, &span_b); double gain_r = 1 / r, gain_b = 1 / b; double delta2_sum = computeDelta2Sum(gain_r, gain_b); double prior_log_likelihood = prior.Eval(prior.Domain().Clip(t)); double final_log_likelihood = delta2_sum - prior_log_likelihood; LOG(RPiAwb, Debug) << "t: " << t << " gain_r " << gain_r << " gain_b " << gain_b << " delta2_sum " << delta2_sum << " prior " << prior_log_likelihood << " final " << final_log_likelihood; points_.push_back(Pwl::Point(t, final_log_likelihood)); if (points_.back().y < points_[best_point].y) best_point = points_.size() - 1; if (t == mode_->ct_hi) break; // for even steps along the r/b curve scale them by the current t t = std::min(t + t / 10 * config_.coarse_step, mode_->ct_hi); } t = points_[best_point].x; LOG(RPiAwb, Debug) << "Coarse search found CT " << t; // We have the best point of the search, but refine it with a quadratic // interpolation around its neighbours. if (points_.size() > 2) { unsigned long bp = std::min(best_point, points_.size() - 2); best_point = std::max(1UL, bp); t = interpolate_quadatric(points_[best_point - 1], points_[best_point], points_[best_point + 1]); LOG(RPiAwb, Debug) << "After quadratic refinement, coarse search has CT " << t; } return t; } void Awb::fineSearch(double &t, double &r, double &b, Pwl const &prior) { int span_r = -1, span_b = -1; config_.ct_r.Eval(t, &span_r); config_.ct_b.Eval(t, &span_b); double step = t / 10 * config_.coarse_step * 0.1; int nsteps = 5; double r_diff = config_.ct_r.Eval(t + nsteps * step, &span_r) - config_.ct_r.Eval(t - nsteps * step, &span_r); double b_diff = config_.ct_b.Eval(t + nsteps * step, &span_b) - config_.ct_b.Eval(t - nsteps * step, &span_b); Pwl::Point transverse(b_diff, -r_diff); if (transverse.Len2() < 1e-6) return; // unit vector orthogonal to the b vs. r function (pointing outwards // with r and b increasing) transverse = transverse / transverse.Len(); double best_log_likelihood = 0, best_t = 0, best_r = 0, best_b = 0; double transverse_range = config_.transverse_neg + config_.transverse_pos; const int MAX_NUM_DELTAS = 12; // a transverse step approximately every 0.01 r/b units