summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorDavid Plowman <david.plowman@raspberrypi.com>2020-11-23 07:37:58 +0000
committerKieran Bingham <kieran.bingham@ideasonboard.com>2020-11-23 14:24:24 +0000
commit9db94a3635b8cc0963fdbc8e33c07890ce177359 (patch)
tree11743b34587a39b1fb288600e3e3c6e2e750f617 /src
parent6af665992d48eb30bda901af73294f83b17876e7 (diff)
libcamera: ipa: raspberrypi: agc: Improve centre-weighted luminance calucation
Previously the calculation computed Y for each region before returning the weighted average, which "baked in" the over-importance of small statistics regions. The revised calculation will treat all pixels equally when the region weights are the same, making it easier to use. With the previous scheme, proper "average" metering was difficult to implement. Signed-off-by: David Plowman <david.plowman@raspberrypi.com> Reviewed-by: Naushir Patuck <naush@raspberrypi.com> Acked-by: Kieran Bingham <kieran.bingham@ideasonboard.com> Signed-off-by: Kieran Bingham <kieran.bingham@ideasonboard.com>
Diffstat (limited to 'src')
-rw-r--r--src/ipa/raspberrypi/controller/rpi/agc.cpp25
1 files changed, 15 insertions, 10 deletions
diff --git a/src/ipa/raspberrypi/controller/rpi/agc.cpp b/src/ipa/raspberrypi/controller/rpi/agc.cpp
index 9a5d84f7..f0c70a0a 100644
--- a/src/ipa/raspberrypi/controller/rpi/agc.cpp
+++ b/src/ipa/raspberrypi/controller/rpi/agc.cpp
@@ -385,18 +385,23 @@ static double compute_initial_Y(bcm2835_isp_stats *stats, Metadata *image_metada
awb.gain_r = awb.gain_g = awb.gain_b = 1.0; // in case no metadata
if (image_metadata->Get("awb.status", awb) != 0)
LOG(RPiAgc, Warning) << "Agc: no AWB status found";
- double Y_sum = 0, weight_sum = 0;
+ // Note how the calculation below means that equal weights give you
+ // "average" metering (i.e. all pixels equally important).
+ double R_sum = 0, G_sum = 0, B_sum = 0, pixel_sum = 0;
for (int i = 0; i < AGC_STATS_SIZE; i++) {
- if (regions[i].counted == 0)
- continue;
- weight_sum += weights[i];
- double Y = regions[i].r_sum * awb.gain_r * .299 +
- regions[i].g_sum * awb.gain_g * .587 +
- regions[i].b_sum * awb.gain_b * .114;
- Y /= regions[i].counted;
- Y_sum += Y * weights[i];
+ R_sum += regions[i].r_sum * weights[i];
+ G_sum += regions[i].g_sum * weights[i];
+ B_sum += regions[i].b_sum * weights[i];
+ pixel_sum += regions[i].counted * weights[i];
}
- return Y_sum / weight_sum / (1 << PIPELINE_BITS);
+ if (pixel_sum == 0.0) {
+ LOG(RPiAgc, Warning) << "compute_initial_Y: pixel_sum is zero";
+ return 0;
+ }
+ double Y_sum = R_sum * awb.gain_r * .299 +
+ G_sum * awb.gain_g * .587 +
+ B_sum * awb.gain_b * .114;
+ return Y_sum / pixel_sum / (1 << PIPELINE_BITS);
}
// We handle extra gain through EV by adjusting our Y targets. However, you
'#n197'>197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
 * Copyright (C) 2020, Google Inc.
 *
 * encoder_libjpeg.cpp - JPEG encoding using libjpeg native API
 */

#include "encoder_libjpeg.h"

#include <fcntl.h>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <string.h>
#include <sys/mman.h>
#include <unistd.h>
#include <vector>

#include <libcamera/camera.h>
#include <libcamera/formats.h>
#include <libcamera/pixel_format.h>

#include "libcamera/internal/formats.h"
#include "libcamera/internal/log.h"

using namespace libcamera;

LOG_DEFINE_CATEGORY(JPEG)

namespace {

struct JPEGPixelFormatInfo {
	J_COLOR_SPACE colorSpace;
	const PixelFormatInfo &pixelFormatInfo;
	bool nvSwap;
};

const std::map<PixelFormat, JPEGPixelFormatInfo> pixelInfo{
	{ formats::R8, { JCS_GRAYSCALE, PixelFormatInfo::info(formats::R8), false } },

	{ formats::RGB888, { JCS_EXT_BGR, PixelFormatInfo::info(formats::RGB888), false } },
	{ formats::BGR888, { JCS_EXT_RGB, PixelFormatInfo::info(formats::BGR888), false } },

	{ formats::NV12, { JCS_YCbCr, PixelFormatInfo::info(formats::NV12), false } },
	{ formats::NV21, { JCS_YCbCr, PixelFormatInfo::info(formats::NV21), true } },
	{ formats::NV16, { JCS_YCbCr, PixelFormatInfo::info(formats::NV16), false } },
	{ formats::NV61, { JCS_YCbCr, PixelFormatInfo::info(formats::NV61), true } },
	{ formats::NV24, { JCS_YCbCr, PixelFormatInfo::info(formats::NV24), false } },
	{ formats::NV42, { JCS_YCbCr, PixelFormatInfo::info(formats::NV42), true } },
};

const struct JPEGPixelFormatInfo &findPixelInfo(const PixelFormat &format)
{
	static const struct JPEGPixelFormatInfo invalidPixelFormat {
		JCS_UNKNOWN, PixelFormatInfo(), false
	};

	const auto iter = pixelInfo.find(format);
	if (iter == pixelInfo.end()) {
		LOG(JPEG, Error) << "Unsupported pixel format for JPEG encoder: "
				 << format.toString();
		return invalidPixelFormat;
	}

	return iter->second;
}

} /* namespace */

EncoderLibJpeg::EncoderLibJpeg()
	: quality_(95)
{
	/* \todo Expand error handling coverage with a custom handler. */
	compress_.err = jpeg_std_error(&jerr_);

	jpeg_create_compress(&compress_);
}

EncoderLibJpeg::~EncoderLibJpeg()
{
	jpeg_destroy_compress(&compress_);
}

int EncoderLibJpeg::configure(const StreamConfiguration &cfg)
{
	const struct JPEGPixelFormatInfo info = findPixelInfo(cfg.pixelFormat);
	if (info.colorSpace == JCS_UNKNOWN)
		return -ENOTSUP;

	compress_.image_width = cfg.size.width;
	compress_.image_height = cfg.size.height;
	compress_.in_color_space = info.colorSpace;

	compress_.input_components = info.colorSpace == JCS_GRAYSCALE ? 1 : 3;

	jpeg_set_defaults(&compress_);
	jpeg_set_quality(&compress_, quality_, TRUE);

	pixelFormatInfo_ = &info.pixelFormatInfo;

	nv_ = pixelFormatInfo_->numPlanes() == 2;
	nvSwap_ = info.nvSwap;

	return 0;
}

void EncoderLibJpeg::compressRGB(const libcamera::MappedBuffer *frame)
{
	unsigned char *src = static_cast<unsigned char *>(frame->maps()[0].data());
	/* \todo Stride information should come from buffer configuration. */
	unsigned int stride = pixelFormatInfo_->stride(compress_.image_width, 0);

	JSAMPROW row_pointer[1];

	while (compress_.next_scanline < compress_.image_height) {
		row_pointer[0] = &src[compress_.next_scanline * stride];
		jpeg_write_scanlines(&compress_, row_pointer, 1);
	}
}

/*
 * Compress the incoming buffer from a supported NV format.
 * This naively unpacks the semi-planar NV12 to a YUV888 format for libjpeg.
 */
void EncoderLibJpeg::compressNV(const libcamera::MappedBuffer *frame)
{
	uint8_t tmprowbuf[compress_.image_width * 3];

	/*
	 * \todo Use the raw api, and only unpack the cb/cr samples to new line
	 * buffers. If possible, see if we can set appropriate pixel strides
	 * too to save even that copy.
	 *
	 * Possible hints at:
	 * https://sourceforge.net/p/libjpeg/mailman/message/30815123/
	 */
	unsigned int y_stride = pixelFormatInfo_->stride(compress_.image_width, 0);
	unsigned int c_stride = pixelFormatInfo_->stride(compress_.image_width, 1);

	unsigned int horzSubSample = 2 * compress_.image_width / c_stride;
	unsigned int vertSubSample = pixelFormatInfo_->planes[1].verticalSubSampling;

	unsigned int c_inc = horzSubSample == 1 ? 2 : 0;
	unsigned int cb_pos = nvSwap_ ? 1 : 0;
	unsigned int cr_pos = nvSwap_ ? 0 : 1;

	const unsigned char *src = static_cast<unsigned char *>(frame->maps()[0].data());
	const unsigned char *src_c = src + y_stride * compress_.image_height;

	JSAMPROW row_pointer[1];
	row_pointer[0] = &tmprowbuf[0];

	for (unsigned int y = 0; y < compress_.image_height; y++) {
		unsigned char *dst = &tmprowbuf[0];

		const unsigned char *src_y = src + y * compress_.image_width;
		const unsigned char *src_cb = src_c + (y / vertSubSample) * c_stride + cb_pos;
		const unsigned char *src_cr = src_c + (y / vertSubSample) * c_stride + cr_pos;

		for (unsigned int x = 0; x < compress_.image_width; x += 2) {
			dst[0] = *src_y;
			dst[1] = *src_cb;
			dst[2] = *src_cr;
			src_y++;
			src_cb += c_inc;
			src_cr += c_inc;
			dst += 3;

			dst[0] = *src_y;
			dst[1] = *src_cb;
			dst[2] = *src_cr;
			src_y++;
			src_cb += 2;
			src_cr += 2;
			dst += 3;
		}

		jpeg_write_scanlines(&compress_, row_pointer, 1);
	}
}

int EncoderLibJpeg::encode(const FrameBuffer *source,
			   const libcamera::Span<uint8_t> &dest,
			   const libcamera::Span<const uint8_t> &exifData)
{
	MappedFrameBuffer frame(source, PROT_READ);
	if (!frame.isValid()) {
		LOG(JPEG, Error) << "Failed to map FrameBuffer : "
				 << strerror(frame.error());
		return frame.error();
	}

	unsigned char *destination = dest.data();
	unsigned long size = dest.size();

	/*
	 * The jpeg_mem_dest will reallocate if the required size is not
	 * sufficient. That means the output won't be written to the correct
	 * buffers.
	 *
	 * \todo Implement our own custom memory destination to prevent
	 * reallocation and prefer failure with correct reporting.
	 */
	jpeg_mem_dest(&compress_, &destination, &size);

	jpeg_start_compress(&compress_, TRUE);

	if (exifData.size())
		/* Store Exif data in the JPEG_APP1 data block. */
		jpeg_write_marker(&compress_, JPEG_APP0 + 1,
				  static_cast<const JOCTET *>(exifData.data()),
				  exifData.size());

	LOG(JPEG, Debug) << "JPEG Encode Starting:" << compress_.image_width
			 << "x" << compress_.image_height;

	if (nv_)
		compressNV(&frame);
	else
		compressRGB(&frame);

	jpeg_finish_compress(&compress_);

	return size;
}