/* SPDX-License-Identifier: LGPL-2.1-or-later */ /* * Copyright (C) 2019, Google Inc. * * camera_device.cpp - libcamera Android Camera Device */ #include "camera_device.h" #include #include #include #include #include #include #include #include #include #include #include #include #include "system/graphics.h" #include "camera_buffer.h" #include "camera_hal_config.h" #include "camera_ops.h" #include "camera_request.h" using namespace libcamera; LOG_DECLARE_CATEGORY(HAL) namespace { /* * \struct Camera3StreamConfig * \brief Data to store StreamConfiguration associated with camera3_stream(s) * \var streams List of the pairs of a stream requested by Android HAL client * and CameraStream::Type associated with the stream * \var config StreamConfiguration for streams */ struct Camera3StreamConfig { struct Camera3Stream { camera3_stream_t *stream; CameraStream::Type type; }; std::vector streams; StreamConfiguration config; }; /* * Reorder the configurations so that libcamera::Camera can accept them as much * as possible. The sort rule is as follows. * 1.) The configuration for NV12 request whose resolution is the largest. * 2.) The configuration for JPEG request. * 3.) Others. Larger resolutions and different formats are put earlier. */ void sortCamera3StreamConfigs(std::vector &unsortedConfigs, const camera3_stream_t *jpegStream) { const Camera3StreamConfig *jpegConfig = nullptr; std::map> formatToConfigs; for (const auto &streamConfig : unsortedConfigs) { if (jpegStream && !jpegConfig) { const auto &streams = streamConfig.streams; if (std::find_if(streams.begin(), streams.end(), [jpegStream](const auto &stream) { return stream.stream == jpegStream; }) != streams.end()) { jpegConfig = &streamConfig; continue; } } formatToConfigs[streamConfig.config.pixelFormat].push_back(&streamConfig); } if (jpegStream && !jpegConfig) LOG(HAL, Fatal) << "No Camera3StreamConfig is found for JPEG"; for (auto &fmt : formatToConfigs) { auto &streamConfigs = fmt.second; /* Sorted by resolution. Smaller is put first. */ std::sort(streamConfigs.begin(), streamConfigs.end(), [](const auto *streamConfigA, const auto *streamConfigB) { const Size &sizeA = streamConfigA->config.size; const Size &sizeB = streamConfigB->config.size; return sizeA < sizeB; }); } std::vector sortedConfigs; sortedConfigs.reserve(unsortedConfigs.size()); /* * NV12 is the most prioritized format. Put the configuration with NV12 * and the largest resolution first. */ const auto nv12It = formatToConfigs.find(formats::NV12); if (nv12It != formatToConfigs.end()) { auto &nv12Configs = nv12It->second; const Camera3StreamConfig *nv12Largest = nv12Configs.back(); /* * If JPEG will be created from NV12 and the size is larger than * the largest NV12 configurations, then put the NV12 * configuration for JPEG first. */ if (jpegConfig && jpegConfig->config.pixelFormat == formats::NV12) { const Size &nv12SizeForJpeg = jpegConfig->config.size; const Size &nv12LargestSize = nv12Largest->config.size; if (nv12LargestSize < nv12SizeForJpeg) { LOG(HAL, Debug) << "Insert " << jpegConfig->config.toString(); sortedConfigs.push_back(std::move(*jpegConfig)); jpegConfig = nullptr; } } LOG(HAL, Debug) << "Insert " << nv12Largest->config.toString(); sortedConfigs.push_back(*nv12Largest); nv12Configs.pop_back(); if (nv12Configs.empty()) formatToConfigs.erase(nv12It); } /* If the configuration for JPEG is there, then put it. */ if (jpegConfig) { LOG(HAL, Debug) << "Insert " << jpegConfig->config.toString(); sortedConfigs.push_back(std::move(*jpegConfig)); jpegConfig = nullptr; } /* * Put configurations with different formats and larger resolutions * earlier. */ while (!formatToConfigs.empty()) { for (auto it = formatToConfigs.begin(); it != formatToConfigs.end();) { auto &configs = it->second; LOG(HAL, Debug) << "Insert " << configs.back()->config.toString(); sortedConfigs.push_back(*configs.back()); configs.pop_back(); if (configs.empty()) it = formatToConfigs.erase(it); else it++; } } ASSERT(sortedConfigs.size() == unsortedConfigs.size()); unsortedConfigs = sortedConfigs; } const char *rotationToString(int rotation) { switch (rotation) { case CAMERA3_STREAM_ROTATION_0: return "0"; case CAMERA3_STREAM_ROTATION_90: return "90"; case CAMERA3_STREAM_ROTATION_180: return "180"; case CAMERA3_STREAM_ROTATION_270: return "270"; } return "INVALID"; } #if defined(OS_CHROMEOS) /* * Check whether the crop_rotate_scale_degrees values for all streams in * the list are valid according to the Chrome OS camera HAL API. */ bool validateCropRotate(const camera3_stream_configuration_t &streamList) { ASSERT(streamList.num_streams > 0); const int cropRotateScaleDegrees = streamList.streams[0]->crop_rotate_scale_degrees; for (unsigned int i = 0; i < streamList.num_streams; ++i) { const camera3_stream_t &stream = *streamList.streams[i]; switch (stream.crop_rotate_scale_degrees) { case CAMERA3_STREAM_ROTATION_0: case CAMERA3_STREAM_ROTATION_90: case CAMERA3_STREAM_ROTATION_270: break; /* 180° rotation is specified by Chrome OS as invalid. */ case CAMERA3_STREAM_ROTATION_180: default: LOG(HAL, Error) << "Invalid crop_rotate_scale_degrees: " << stream.crop_rotate_scale_degrees; return false; } if (cropRotateScaleDegrees != stream.crop_rotate_scale_degrees) { LOG(HAL, Error) << "crop_rotate_scale_degrees in all " << "streams are not identical"; return false; } } return true; } #endif } /* namespace */ /* * \class CameraDevice * * The CameraDevice class wraps a libcamera::Camera instance, and implements * the camera3_device_t interface, bridging calls received from the Android * camera service to the CameraDevice. * * The class translates parameters and operations from the Camera HALv3 API to * the libcamera API to provide static information for a Camera, create request * templates for it, process capture requests and then deliver capture results * back to the framework using the designated callbacks. */ CameraDevice::CameraDevice(unsigned int id, std::shared_ptr camera) : id_(id), state_(State::Stopped), camera_(std::move(camera)), facing_(CAMERA_FACING_FRONT), orientation_(0) { camera_->requestCompleted.connect(this, &CameraDevice::requestComplete); maker_ = "libcamera"; model_ = "cameraModel"; /* \todo Support getting properties on Android */ std::ifstream fstream("/var/cache/camera/camera.prop"); if (!fstream.is_open()) return; std::string line; while (std::getline(fstream, line)) { std::string::size_type delimPos = line.find("="); if (delimPos == std::string::npos) continue; std::string key = line.substr(0, delimPos); std::string val = line.substr(delimPos + 1); if (!key.compare("ro.product.model")) model_ = val; else if (!key.compare("ro.product.manufacturer")) maker_ = val; } } CameraDevice::~CameraDevice() = default; std::unique_ptr CameraDevice::create(unsigned int id, std::shared_ptr cam) { return std::unique_ptr( new CameraDevice(id, std::move(cam))); } /* * Initialize the camera static information retrieved from the * Camera::properties or from the cameraConfigData. * * cameraConfigData is optional for external camera devices and can be * nullptr. * * This function is called before the camera device is opened. */ int CameraDevice::initialize(const CameraConfigData *cameraConfigData) { /* * Initialize orientation and facing side of the camera. * * If the libcamera::Camera provides those information as retrieved * from firmware use them, otherwise fallback to values parsed from * the configuration file. If the configuration file is not available * the camera is external so its location and rotation can be safely * defaulted. */ const ControlList &properties = camera_->properties(); if (properties.contains(properties::Location)) { int32_t location = properties.get(properties::Location); switch (location) { case properties::CameraLocationFront: facing_ = CAMERA_FACING_FRONT; break; case properties::CameraLocationBack: facing_ = CAMERA_FACING_BACK; break; case properties::CameraLocationExternal: /* * If the camera is reported as external, but the * CameraHalManager has overriden it, use what is * reported in the configuration file. This typically * happens for UVC cameras reported as 'External' by * libcamera but installed in fixed position on the * device. */ if (cameraConfigData && cameraConfigData->facing != -1) facing_ = cameraConfigData->facing; else facing_ = CAMERA_FACING_EXTERNAL; break; } if (cameraConfigData && cameraConfigData->facing != -1 && facing_ != cameraConfigData->facing) { LOG(HAL, Warning) << "Camera location does not match" << " configuration file. Using " << facing_; } } else if (cameraConfigData) { if (cameraConfigData->facing == -1) { LOG(HAL, Error) << "Camera facing not in configuration file"; return -EINVAL; } facing_ = cameraConfigData->facing; } else { facing_ = CAMERA_FACING_EXTERNAL; } /* * The Android orientation metadata specifies its rotation correction * value in clockwise direction whereas libcamera specifies the * rotation property in anticlockwise direction. Read the libcamera's * rotation property (anticlockwise) and compute the corresponding * value for clockwise direction as required by the Android orientation * metadata. */ if (properties.contains(properties::Rotation)) { int rotation = properties.get(properties::Rotation); orientation_ = (360 - rotation) % 360; if (cameraConfigData && cameraConfigData->rotation != -1 && orientation_ != cameraConfigData->rotation) { LOG(HAL, Warning) << "Camera orientation does not match" << " configuration file. Using " << orientation_; } } else if (cameraConfigData) { if (cameraConfigData->rotation == -1) { LOG(HAL, Error) << "Camera rotation not in configuration file"; return -EINVAL; } orientation_ = cameraConfigData->rotation; } else { orientation_ = 0; } return capabilities_.initialize(camera_, orientation_, facing_); } /* * Open a camera device. The static information on the camera shall have been * initialized with a call to CameraDevice::initialize(). */ int CameraDevice::open(const hw_module_t *hardwareModule) { int ret = camera_->acquire(); if (ret) { LOG(HAL, Error) << "Failed to acquire the camera"; return ret; } /* Initialize the hw_device_t in the instance camera3_module_t. */ camera3Device_.common.tag = HARDWARE_DEVICE_TAG; camera3Device_.common.version = CAMERA_DEVICE_API_VERSION_3_3; camera3Device_.common.module = (hw_module_t *)hardwareModule; camera3Device_.common.close = hal_dev_close; /* * The camera device operations. These actually implement * the Android Camera HALv3 interface. */ camera3Device_.ops = &hal_dev_ops; camera3Device_.priv = this; return 0; } void CameraDevice::close() { stop(); camera_->release(); } void CameraDevice::flush() { { MutexLocker stateLock(stateMutex_); if (state_ != State::Running) return; state_ = State::Flushing; } worker_.stop(); camera_->stop(); MutexLocker stateLock(stateMutex_); state_ = State::Stopped; } void CameraDevice::stop() { MutexLocker stateLock(stateMutex_); if (state_ == State::Stopped) return; worker_.stop(); camera_->stop(); descriptors_ = {}; streams_.clear(); state_ = State::Stopped; } unsigned int CameraDevice::maxJpegBufferSize() const { return capabilities_.maxJpegBufferSize(); } void CameraDevice::setCallbacks(const camera3_callback_ops_t *callbacks) { callbacks_ = callbacks; } const camera_metadata_t *CameraDevice::getStaticMetadata() { return capabilities_.staticMetadata()->getMetadata(); } /* * Produce a metadata pack to be used as template for a capture request. */ const camera_metadata_t *CameraDevice::constructDefaultRequestSettings(int type) { auto it = requestTemplates_.find(type); if (it != requestTemplates_.end()) return it->second->getMetadata(); /* Use the capture intent matching the requested template type. */ std::unique_ptr requestTemplate; uint8_t captureIntent; switch (type) { case CAMERA3_TEMPLATE_PREVIEW: captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW; requestTemplate = capabilities_.requestTemplatePreview(); break; case CAMERA3_TEMPLATE_STILL_CAPTURE: /* * Use the preview template for still capture, they only differ * for the torch mode we currently do not support. */ captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE; requestTemplate = capabilities_.requestTemplateStill(); break; case CAMERA3_TEMPLATE_VIDEO_RECORD: captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD; requestTemplate = capabilities_.requestTemplateVideo(); break; case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT: captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT; requestTemplate = capabilities_.requestTemplateVideo(); break; case CAMERA3_TEMPLATE_MANUAL: captureIntent = ANDROID_CONTROL_CAPTURE_INTENT_MANUAL; requestTemplate = capabilities_.requestTemplateManual(); break; /* \todo Implement templates generation for the remaining use cases. */ case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG: default: LOG(HAL, Error) << "Unsupported template request type: " << type; return nullptr; } if (!requestTemplate || !requestTemplate->isValid()) { LOG(HAL, Error) << "Failed to construct request template"; return nullptr; } requestTemplate->updateEntry(ANDROID_CONTROL_CAPTURE_INTENT, captureIntent); requestTemplates_[type] = std::move(requestTemplate); return requestTemplates_[type]->getMetadata(); } /* * Inspect the stream_list to produce a list of StreamConfiguration to * be use to configure the Camera. */ int CameraDevice::configureStreams(camera3_stream_configuration_t *stream_list) { /* Before any configuration attempt, stop the camera. */ stop(); if (stream_list->num_streams == 0) { LOG(HAL, Error) << "No streams in configuration"; return -EINVAL; } #if defined(OS_CHROMEOS) if (!validateCropRotate(*stream_list)) return -EINVAL; #endif /* * Generate an empty configuration, and construct a StreamConfiguration * for each camera3_stream to add to it. */ std::unique_ptr config = camera_->generateConfiguration(); if (!config) { LOG(HAL, Error) << "Failed to generate camera configuration"; return -EINVAL; } /* * Clear and remove any existing configuration from previous calls, and * ensure the required entries are available without further * reallocation. */ streams_.clear(); streams_.reserve(stream_list->num_streams); std::vector streamConfigs; streamConfigs.reserve(stream_list->num_streams); /* First handle all non-MJPEG streams. */ camera3_stream_t *jpegStream = nullptr; for (unsigned int i = 0; i < stream_list->num_streams; ++i) { camera3_stream_t *stream = stream_list->streams[i]; Size size(stream->width, stream->height); PixelFormat format = capabilities_.toPixelFormat(stream->format); LOG(HAL, Info) << "Stream #" << i << ", direction: " << stream->stream_type << ", width: " << stream->width << ", height: " << stream->height << ", format: " << utils::hex(stream->format) << ", rotation: " << rotationToString(stream->rotation) #if defined(OS_CHROMEOS) << ", crop_rotate_scale_degrees: " << rotationToString(stream->crop_rotate_scale_degrees) #endif << " (" << format.toString() << ")"; if (!format.isValid()) return -EINVAL; /* \todo Support rotation. */ if (stream->rotation != CAMERA3_STREAM_ROTATION_0) { LOG(HAL, Error) << "Rotation is not supported"; return -EINVAL; } #if defined(OS_CHROMEOS) if (stream->crop_rotate_scale_degrees != CAMERA3_STREAM_ROTATION_0) { LOG(HAL, Error) << "Rotation is not supported"; return -EINVAL; } #endif /* Defer handling of MJPEG streams until all others are known. */ if (stream->format == HAL_PIXEL_FORMAT_BLOB) { if (jpegStream) { LOG(HAL, Error) << "Multiple JPEG streams are not supported"; return -EINVAL; } jpegStream = stream; continue; } Camera3StreamConfig streamConfig; streamConfig.streams = { { stream, CameraStream::Type::Direct } }; streamConfig.config.size = size; streamConfig.config.pixelFormat = format; streamConfigs.push_back(std::move(streamConfig)); /* This stream will be produced by hardware. */ stream->usage |= GRALLOC_USAGE_HW_CAMERA_WRITE; } /* Now handle the MJPEG streams, adding a new stream if required. */ if (jpegStream) { CameraStream::Type type; int index = -1; /* Search for a compatible stream in the non-JPEG ones. */ for (size_t i = 0; i < streamConfigs.size(); ++i) { Camera3StreamConfig &streamConfig = streamConfigs[i]; const auto &cfg = streamConfig.config; /* * \todo The PixelFormat must also be compatible with * the encoder. */ if (cfg.size.width != jpegStream->width || cfg.size.height != jpegStream->height) continue; LOG(HAL, Info) << "Android JPEG stream mapped to libcamera stream " << i; type = CameraStream::Type::Mapped; index = i; /* * The source stream will be read by software to * produce the JPEG stream. */ camera3_stream_t *stream = streamConfig.streams[0].stream; stream->usage |= GRALLOC_USAGE_SW_READ_OFTEN; break; } /* * Without a compatible match for JPEG encoding we must * introduce a new stream to satisfy the request requirements. */ if (index < 0) { /* * \todo The pixelFormat should be a 'best-fit' choice * and may require a validation cycle. This is not yet * handled, and should be considered as part of any * stream configuration reworks. */ Camera3StreamConfig streamConfig; streamConfig.config.size.width = jpegStream->width; streamConfig.config.size.height = jpegStream->height; streamConfig.config.pixelFormat = formats::NV12; streamConfigs.push_back(std::move(streamConfig)); LOG(HAL, Info) << "Adding " << streamConfig.config.toString() << " for MJPEG support"; type = CameraStream::Type::Internal; index = streamConfigs.size() - 1; } /* The JPEG stream will be produced by software. */ jpegStream->usage |= GRALLOC_USAGE_SW_WRITE_OFTEN; streamConfigs[index].streams.push_back({ jpegStream, type }); } sortCamera3StreamConfigs(streamConfigs, jpegStream); for (const auto &streamConfig : streamConfigs) { config->addConfiguration(streamConfig.config); for (auto &stream : streamConfig.streams) { streams_.emplace_back(this, config.get(), stream.type, stream.stream, config->size() - 1); stream.stream->priv = static_cast(&streams_.back()); } } switch (config->validate()) { case CameraConfiguration::Valid: break; case CameraConfiguration::Adjusted: LOG(HAL, Info) << "Camera configuration adjusted"; for (const StreamConfiguration &cfg : *config) LOG(HAL, Info) << " - " << cfg.toString(); return -EINVAL; case CameraConfiguration::Invalid: LOG(HAL, Info) << "Camera configuration invalid"; return -EINVAL; } /* * Once the CameraConfiguration has been adjusted/validated * it can be applied to the camera. */ int ret = camera_->configure(config.get()); if (ret) { LOG(HAL, Error) << "Failed to configure camera '" << camera_->id() << "'"; return ret; } /* * Configure the HAL CameraStream instances using the associated * StreamConfiguration and set the number of required buffers in * the Android camera3_stream_t. */ for (CameraStream &cameraStream : streams_) { ret = cameraStream.configure(); if (ret) { LOG(HAL, Error) << "Failed to configure camera stream"; return ret; } } config_ = std::move(config); return 0; } std::unique_ptr CameraDevice::createFrameBuffer(const buffer_handle_t camera3buffer, PixelFormat pixelFormat, const Size &size) { CameraB for ct in sorted(set(list_col)): Cam.log += '\nColour temperature: {} K'.format(ct) """ average tables for the same colour temperature """ indices = np.where(list_col == ct) ct = int(ct) t_r = np.mean(list_cr[indices], axis=0) t_b = np.mean(list_cb[indices], axis=0) """ force numbers to be stored to 3dp.... :( """ t_r = np.where((100*t_r) % 1 <= 0.05, t_r+0.001, t_r) t_b = np.where((100*t_b) % 1 <= 0.05, t_b+0.001, t_b) t_r = np.where((100*t_r) % 1 >= 0.95, t_r-0.001, t_r) t_b = np.where((100*t_b) % 1 >= 0.95, t_b-0.001, t_b) t_r = np.round(t_r, 3) t_b = np.round(t_b, 3) r_corners = (t_r[0], t_r[15], t_r[-1], t_r[-16]) b_corners = (t_b[0], t_b[15], t_b[-1], t_b[-16]) r_cen = t_r[5*16+7]+t_r[5*16+8]+t_r[6*16+7]+t_r[6*16+8] r_cen = round(r_cen/4, 3) b_cen = t_b[5*16+7]+t_b[5*16+8]+t_b[6*16+7]+t_b[6*16+8] b_cen = round(b_cen/4, 3) Cam.log += '\nRed table corners: {}'.format(r_corners) Cam.log += '\nRed table centre: {}'.format(r_cen) Cam.log += '\nBlue table corners: {}'.format(b_corners) Cam.log += '\nBlue table centre: {}'.format(b_cen) cr_dict = { 'ct': ct, 'table': list(t_r) } cb_dict = { 'ct': ct, 'table': list(t_b) } cal_cr_list.append(cr_dict) cal_cb_list.append(cb_dict) Cam.log += '\n' else: cal_cr_list, cal_cb_list = None, None """ average all values for luminance shading and return one table for all temperatures """ lum_lut = np.mean(list_cg, axis=0) lum_lut = np.where((100*lum_lut) % 1 <= 0.05, lum_lut+0.001, lum_lut) lum_lut = np.where((100*lum_lut) % 1 >= 0.95, lum_lut-0.001, lum_lut) lum_lut = list(np.round(lum_lut, 3)) """ calculate average corner for lsc gain calculation further on """ corners = (lum_lut[0], lum_lut[15], lum_lut[-1], lum_lut[-16]) Cam.log += '\nLuminance table corners: {}'.format(corners) l_cen = lum_lut[5*16+7]+lum_lut[5*16+8]+lum_lut[6*16+7]+lum_lut[6*16+8] l_cen = round(l_cen/4, 3) Cam.log += '\nLuminance table centre: {}'.format(l_cen) av_corn = np.sum(corners)/4 return cal_cr_list, cal_cb_list, lum_lut, av_corn """ calculate g/r and g/b for 32x32 points arranged in a grid for a single image """ def alsc(Cam, Img, do_alsc_colour, plot=False): Cam.log += '\nProcessing image: ' + Img.name """ get channel in correct order """ channels = [Img.channels[i] for i in Img.order] """ calculate size of single rectangle. -(-(w-1)//32) is a ceiling division. w-1 is to deal robustly with the case where w is a multiple of 32. """ w, h = Img.w/2, Img.h/2 dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12)) """ average the green channels into one """ av_ch_g = np.mean((channels[1:2]), axis=0) if do_alsc_colour: """ obtain 16x12 grid of intensities for each channel and subtract black level """ g = get_16x12_grid(av_ch_g, dx, dy) - Img.blacklevel_16 r = get_16x12_grid(channels[0], dx, dy) - Img.blacklevel_16 b = get_16x12_grid(channels[3], dx, dy) - Img.blacklevel_16 """ calculate ratios as 32 bit in order to be supported by medianBlur function """ cr = np.reshape(g/r, (12, 16)).astype('float32') cb = np.reshape(g/b, (12, 16)).astype('float32') cg = np.reshape(1/g, (12, 16)).astype('float32') """ median blur to remove peaks and save as float 64 """ cr = cv2.medianBlur(cr, 3).astype('float64') cb = cv2.medianBlur(cb, 3).astype('float64') cg = cv2.medianBlur(cg, 3).astype('float64') cg = cg/np.min(cg) """ debugging code showing 2D surface plot of vignetting. Quite useful for for sanity check """ if plot: hf = plt.figure(figsize=(8, 8)) ha = hf.add_subplot(311, projection='3d') """ note Y is plotted as -Y so plot has same axes as image """ X, Y = np.meshgrid(range(16), range(12)) ha.plot_surface(X, -Y, cr, cmap=cm.coolwarm, linewidth=0) ha.set_title('ALSC Plot\nImg: {}\n\ncr'.format(Img.str)) hb = hf.add_subplot(312, projection='3d') hb.plot_surface(X, -Y, cb, cmap=cm.coolwarm, linewidth=0) hb.set_title('cb') hc = hf.add_subplot(313, projection='3d') hc.plot_surface(X, -Y, cg, cmap=cm.coolwarm, linewidth=0) hc.set_title('g') # print(Img.str) plt.show() return Img.col, cr.flatten(), cb.flatten(), cg.flatten(), (w, h, dx, dy) else: """ only perform calculations for luminance shading """ g = get_16x12_grid(av_ch_g, dx, dy) - Img.blacklevel_16 cg = np.reshape(1/g, (12, 16)).astype('float32') cg = cv2.medianBlur(cg, 3).astype('float64') cg = cg/np.min(cg) if plot: hf = plt.figure(figssize=(8, 8)) ha = hf.add_subplot(1, 1, 1, projection='3d') X, Y = np.meashgrid(range(16), range(12)) ha.plot_surface(X, -Y, cg, cmap=cm.coolwarm, linewidth=0) ha.set_title('ALSC Plot (Luminance only!)\nImg: {}\n\ncg').format(Img.str) plt.show() return Img.col, None, None, cg.flatten(), (w, h, dx, dy) """ Compresses channel down to a 16x12 grid """ def get_16x12_grid(chan, dx, dy): grid = [] """ since left and bottom border will not necessarily have rectangles of dimension dx x dy, the 32nd iteration has to be handled separately. """ for i in range(11): for j in range(15): grid.append(np.mean(chan[dy*i:dy*(1+i), dx*j:dx*(1+j)])) grid.append(np.mean(chan[dy*i:dy*(1+i), 15*dx:])) for j in range(15): grid.append(np.mean(chan[11*dy:, dx*j:dx*(1+j)])) grid.append(np.mean(chan[11*dy:, 15*dx:])) """ return as np.array, ready for further manipulation """ return np.array(grid) """ obtains sigmas for red and blue, effectively a measure of the 'error' """ def get_sigma(Cam, cal_cr_list, cal_cb_list): Cam.log += '\nCalculating sigmas' """ provided colour alsc tables were generated for two different colour temperatures sigma is calculated by comparing two calibration temperatures adjacent in colour space """ """ create list of colour temperatures """ cts = [cal['ct'] for cal in cal_cr_list] # print(cts) """ calculate sigmas for each adjacent cts and return worst one """ sigma_rs = [] sigma_bs = [] for i in range(len(cts)-1): sigma_rs.append(calc_sigma(cal_cr_list[i]['table'], cal_cr_list[i+1]['table'])) sigma_bs.append(calc_sigma(cal_cb_list[i]['table'], cal_cb_list[i+1]['table'])) Cam.log += '\nColour temperature interval {} - {} K'.format(cts[i], cts[i+1]) Cam.log += '\nSigma red: {}'.format(sigma_rs[-1]) Cam.log += '\nSigma blue: {}'.format(sigma_bs[-1]) """ return maximum sigmas, not necessarily from the same colour temperature interval """ sigma_r = max(sigma_rs) if sigma_rs else 0.005 sigma_b = max(sigma_bs) if sigma_bs else 0.005 Cam.log += '\nMaximum sigmas: Red = {} Blue = {}'.format(sigma_r, sigma_b) # print(sigma_rs, sigma_bs) # print(sigma_r, sigma_b) return sigma_r, sigma_b """ calculate sigma from two adjacent gain tables """ def calc_sigma(g1, g2): """ reshape into 16x12 matrix """ g1 = np.reshape(g1, (12, 16)) g2 = np.reshape(g2, (12, 16)) """ apply gains to gain table """ gg = g1/g2 if np.mean(gg) < 1: gg = 1/gg """ for each internal patch, compute average difference between it and its 4 neighbours, then append to list """ diffs = [] for i in range(10): for j in range(14): """ note indexing is incremented by 1 since all patches on borders are not counted """ diff = np.abs(gg[i+1][j+1]-gg[i][j+1]) diff += np.abs(gg[i+1][j+1]-gg[i+2][j+1]) diff += np.abs(gg[i+1][j+1]-gg[i+1][j]) diff += np.abs(gg[i+1][j+1]-gg[i+1][j+2]) diffs.append(diff/4) """ return mean difference """ mean_diff = np.mean(diffs) return(np.round(mean_diff, 5))