/* SPDX-License-Identifier: BSD-2-Clause */ /* * Copyright (C) 2019, Raspberry Pi (Trading) Limited * * agc.hpp - AGC/AEC control algorithm */ #pragma once #include #include #include #include "../agc_algorithm.hpp" #include "../agc_status.h" #include "../pwl.hpp" // This is our implementation of AGC. // This is the number actually set up by the firmware, not the maximum possible // number (which is 16). #define AGC_STATS_SIZE 15 namespace RPiController { using namespace std::literals::chrono_literals; struct AgcMeteringMode { double weights[AGC_STATS_SIZE]; void Read(boost::property_tree::ptree const ¶ms); }; struct AgcExposureMode { std::vector shutter; std::vector gain; void Read(boost::property_tree::ptree const ¶ms); }; struct AgcConstraint { enum class Bound { LOWER = 0, UPPER = 1 }; Bound bound; double q_lo; double q_hi; Pwl Y_target; void Read(boost::property_tree::ptree const ¶ms); }; typedef std::vector AgcConstraintMode; struct AgcConfig { void Read(boost::property_tree::ptree const ¶ms); std::map metering_modes; std::map exposure_modes; std::map constraint_modes; Pwl Y_target; double speed; uint16_t startup_frames; unsigned int convergence_frames; double max_change; double min_change; double fast_reduce_threshold; double speed_up_threshold; std::string default_metering_mode; std::string default_exposure_mode; std::string default_constraint_mode; double base_ev; libcamera::utils::Duration default_exposure_time; double default_analogue_gain; }; class Agc : public AgcAlgorithm { public: Agc(Controller *controller); char const *Name() const override; void Read(boost::property_tree::ptree const ¶ms) override; // AGC handles "pausing" for itself. bool IsPaused() const override; void Pause() override; void Resume() override; unsigned int GetConvergenceFrames() const override; void SetEv(double ev) override; void SetFlickerPeriod(libcamera::utils::Duration flicker_period) override; void SetMaxShutter(libcamera::utils::Duration max_shutter) override; void SetFixedShutter(libcamera::utils::Duration fixed_shutter) override; void SetFixedAnalogueGain(double fixed_analogue_gain) override; void SetMeteringMode(std::string const &metering_mode_name) override; void SetExposureMode(std::string const &exposure_mode_name) override; void SetConstraintMode(std::string const &contraint_mode_name) override; void SwitchMode(CameraMode const &camera_mode, Metadata *metadata) override; void Prepare(Metadata *image_metadata) override; void Process(StatisticsPtr &stats, Metadata *image_metadata) override; private: void updateLockStatus(DeviceStatus const &device_status); AgcConfig config_; void housekeepConfig(); void fetchCurrentExposure(Metadata *image_metadata); void fetchAwbStatus(Metadata *image_metadata); void computeGain(bcm2835_isp_stats *statistics, Metadata *image_metadata, double &gain, double &target_Y); void computeTargetExposure(double gain); bool applyDigitalGain(double gain, double target_Y); void filterExposure(bool desaturate); void divideUpExposure(); void writeAndFinish(Metadata *image_metadata, bool desaturate); libcamera::utils::Duration clipShutter(libcamera::utils::Duration shutter); AgcMeteringMode *metering_mode_; AgcExposureMode *exposure_mode_; AgcConstraintMode *constraint_mode_; uint64_t frame_count_; AwbStatus awb_; struct ExposureValues { ExposureValues() : shutter(0s), analogue_gain(0), total_exposure(0s), total_exposure_no_dg(0s) {} libcamera::utils::Duration shutter; double analogue_gain; libcamera::utils::Duration total_exposure; libcamera::utils::Duration total_exposure_no_dg; // without digital gain }; ExposureValues current_; // values for the current frame ExposureValues target_; // calculate the values we want here ExposureValues filtered_; // these values are filtered towards target AgcStatus status_; int lock_count_; DeviceStatus last_device_status_; libcamera::utils::Duration last_target_exposure_; // Below here the "settings" that applications can change. std::string metering_mode_name_; std::string exposure_mode_name_; std::string constraint_mode_name_; double ev_; libcamera::utils::Duration flicker_period_; libcamera::utils::Duration max_shutter_; libcamera::utils::Duration fixed_shutter_; double fixed_analogue_gain_; }; } // namespace RPiController '>37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi (Trading) Limited
#
# ctt_tools.py - camera tuning tool miscellaneous

import time
import re
import binascii
import os
import cv2
import numpy as np
import imutils
import sys
import matplotlib.pyplot as plt
from sklearn import cluster as cluster
from sklearn.neighbors import NearestCentroid as get_centroids

"""
This file contains some useful tools, the details of which aren't important to
understanding of the code. They ar collated here to attempt to improve code
readability in the main files.
"""


"""
obtain config values, unless it doesnt exist, in which case pick default
Furthermore, it can check if the input is the correct type
"""
def get_config(dictt, key, default, ttype):
    try:
        val = dictt[key]
        if ttype == 'string':
            val = str(val)
        elif ttype == 'num':
            if 'int' not in str(type(val)):
                if 'float' not in str(type(val)):
                    raise ValueError
        elif ttype == 'dict':
            if not isinstance(val, dict):
                raise ValueError
        elif ttype == 'list':
            if not isinstance(val, list):
                raise ValueError
        elif ttype == 'bool':
            ttype = int(bool(ttype))
        else:
            val = dictt[key]
    except (KeyError, ValueError):
        val = default
    return val


"""
argument parser
"""
def parse_input():
    arguments = sys.argv[1:]
    if len(arguments) % 2 != 0:
        raise ArgError('\n\nERROR! Enter value for each arguent passed.')
    params = arguments[0::2]
    vals = arguments[1::2]
    args_dict = dict(zip(params, vals))
    json_output = get_config(args_dict, '-o', None, 'string')
    directory = get_config(args_dict, '-i', None, 'string')
    config = get_config(args_dict, '-c', None, 'string')
    log_path = get_config(args_dict, '-l', None, 'string')
    if directory is None:
        raise ArgError('\n\nERROR! No input directory given.')
    if json_output is None:
        raise ArgError('\n\nERROR! No output json given.')
    return json_output, directory, config, log_path


"""
custom arg and macbeth error class
"""
class ArgError(Exception):
    pass
class MacbethError(Exception):
    pass


"""
correlation function to quantify match
"""
def correlate(im1, im2):
    f1 = im1.flatten()
    f2 = im2.flatten()
    cor = np.corrcoef(f1, f2)
    return cor[0][1]


"""
get list of files from directory
"""
def get_photos(directory='photos'):
    filename_list = []
    for filename in os.listdir(directory):
        if 'jp' in filename or '.dng' in filename:
            filename_list.append(filename)
    return filename_list


"""
display image for debugging... read at your own risk...
"""
def represent(img, name='image'):
    # if type(img) == tuple or type(img) == list:
    #     for i in range(len(img)):
    #         name = 'image {}'.format(i)
    #         cv2.imshow(name, img[i])
    # else:
    #     cv2.imshow(name, img)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()
    # return 0
    """
    code above displays using opencv, but this doesn't catch users pressing 'x'
    with their mouse to close the window....  therefore matplotlib is used....
    (thanks a lot opencv)
    """
    grid = plt.GridSpec(22, 1)
    plt.subplot(grid[:19, 0])
    plt.imshow(img, cmap='gray')
    plt.axis('off')
    plt.subplot(grid[21, 0])
    plt.title('press \'q\' to continue')
    plt.axis('off')
    plt.show()

    # f = plt.figure()
    # ax = f.add_subplot(211)
    # ax2 = f.add_subplot(122)
    # ax.imshow(img, cmap='gray')
    # ax.axis('off')
    # ax2.set_figheight(2)
    # ax2.title('press \'q\' to continue')
    # ax2.axis('off')
    # plt.show()


"""
reshape image to fixed width without distorting
returns image and scale factor
"""
def reshape(img, width):
    factor = width/img.shape[0]
    return cv2.resize(img, None, fx=factor, fy=factor), factor