summaryrefslogtreecommitdiff
path: root/utils/raspberrypi/ctt/ctt_tools.py
blob: 50b01ecfe94ba5af1a85596a6a7baf361ea2f738 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool miscellaneous

import time
import re
import binascii
import os
import cv2
import numpy as np
import imutils
import sys
import matplotlib.pyplot as plt
from sklearn import cluster as cluster
from sklearn.neighbors import NearestCentroid as get_centroids

"""
This file contains some useful tools, the details of which aren't important to
understanding of the code. They ar collated here to attempt to improve code
readability in the main files.
"""


"""
obtain config values, unless it doesnt exist, in which case pick default
Furthermore, it can check if the input is the correct type
"""
def get_config(dictt, key, default, ttype):
    try:
        val = dictt[key]
        if ttype == 'string':
            val = str(val)
        elif ttype == 'num':
            if 'int' not in str(type(val)):
                if 'float' not in str(type(val)):
                    raise ValueError
        elif ttype == 'dict':
            if not isinstance(val, dict):
                raise ValueError
        elif ttype == 'list':
            if not isinstance(val, list):
                raise ValueError
        elif ttype == 'bool':
            ttype = int(bool(ttype))
        else:
            val = dictt[key]
    except (KeyError, ValueError):
        val = default
    return val


"""
argument parser
"""
def parse_input():
    arguments = sys.argv[1:]
    if len(arguments) % 2 != 0:
        raise ArgError('\n\nERROR! Enter value for each arguent passed.')
    params = arguments[0::2]
    vals = arguments[1::2]
    args_dict = dict(zip(params, vals))
    json_output = get_config(args_dict, '-o', None, 'string')
    directory = get_config(args_dict, '-i', None, 'string')
    config = get_config(args_dict, '-c', None, 'string')
    log_path = get_config(args_dict, '-l', None, 'string')
    target = get_config(args_dict, '-t', "vc4", 'string')
    if directory is None:
        raise ArgError('\n\nERROR! No input directory given.')
    if json_output is None:
        raise ArgError('\n\nERROR! No output json given.')
    return json_output, directory, config, log_path, target


"""
custom arg and macbeth error class
"""
class ArgError(Exception):
    pass
class MacbethError(Exception):
    pass


"""
correlation function to quantify match
"""
def correlate(im1, im2):
    f1 = im1.flatten()
    f2 = im2.flatten()
    cor = np.corrcoef(f1, f2)
    return cor[0][1]


"""
get list of files from directory
"""
def get_photos(directory='photos'):
    filename_list = []
    for filename in os.listdir(directory):
        if 'jp' in filename or '.dng' in filename:
            filename_list.append(filename)
    return filename_list


"""
display image for debugging... read at your own risk...
"""
def represent(img, name='image'):
    # if type(img) == tuple or type(img) == list:
    #     for i in range(len(img)):
    #         name = 'image {}'.format(i)
    #         cv2.imshow(name, img[i])
    # else:
    #     cv2.imshow(name, img)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()
    # return 0
    """
    code above displays using opencv, but this doesn't catch users pressing 'x'
    with their mouse to close the window....  therefore matplotlib is used....
    (thanks a lot opencv)
    """
    grid = plt.GridSpec(22, 1)
    plt.subplot(grid[:19, 0])
    plt.imshow(img, cmap='gray')
    plt.axis('off')
    plt.subplot(grid[21, 0])
    plt.title('press \'q\' to continue')
    plt.axis('off')
    plt.show()

    # f = plt.figure()
    # ax = f.add_subplot(211)
    # ax2 = f.add_subplot(122)
    # ax.imshow(img, cmap='gray')
    # ax.axis('off')
    # ax2.set_figheight(2)
    # ax2.title('press \'q\' to continue')
    # ax2.axis('off')
    # plt.show()


"""
reshape image to fixed width without distorting
returns image and scale factor
"""
def reshape(img, width):
    factor = width/img.shape[0]
    return cv2.resize(img, None, fx=factor, fy=factor), factor
/span>options_[OptStream]); std::unique_ptr<CameraConfiguration> config = camera_->generateConfiguration(roles); if (!config || config->size() != roles.size()) { std::cerr << "Failed to get default stream configuration" << std::endl; return; } /* Apply configuration if explicitly requested. */ if (StreamKeyValueParser::updateConfiguration(config.get(), options_[OptStream])) { std::cerr << "Failed to update configuration" << std::endl; return; } bool strictFormats = options_.isSet(OptStrictFormats); #ifdef HAVE_KMS if (options_.isSet(OptDisplay)) { if (options_.isSet(OptFile)) { std::cerr << "--display and --file options are mutually exclusive" << std::endl; return; } if (roles.size() != 1) { std::cerr << "Display doesn't support multiple streams" << std::endl; return; } if (roles[0] != StreamRole::Viewfinder) { std::cerr << "Display requires a viewfinder stream" << std::endl; return; } } #endif if (options_.isSet(OptCaptureScript)) { std::string scriptName = options_[OptCaptureScript].toString(); script_ = std::make_unique<CaptureScript>(camera_, scriptName); if (!script_->valid()) { std::cerr << "Invalid capture script '" << scriptName << "'" << std::endl; return; } } switch (config->validate()) { case CameraConfiguration::Valid: break; case CameraConfiguration::Adjusted: if (strictFormats) { std::cout << "Adjusting camera configuration disallowed by --strict-formats argument" << std::endl; return; } std::cout << "Camera configuration adjusted" << std::endl; break; case CameraConfiguration::Invalid: std::cout << "Camera configuration invalid" << std::endl; return; } config_ = std::move(config); } CameraSession::~CameraSession() { if (camera_) camera_->release(); } void CameraSession::listControls() const { for (const auto &[id, info] : camera_->controls()) { std::cout << "Control: " << id->name() << ": " << info.toString() << std::endl; } } void CameraSession::listProperties() const { for (const auto &[key, value] : camera_->properties()) { const ControlId *id = properties::properties.at(key); std::cout << "Property: " << id->name() << " = " << value.toString() << std::endl; } } void CameraSession::infoConfiguration() const { unsigned int index = 0; for (const StreamConfiguration &cfg : *config_) { std::cout << index << ": " << cfg.toString() << std::endl; const StreamFormats &formats = cfg.formats(); for (PixelFormat pixelformat : formats.pixelformats()) { std::cout << " * Pixelformat: " << pixelformat << " " << formats.range(pixelformat).toString() << std::endl; for (const Size &size : formats.sizes(pixelformat)) std::cout << " - " << size << std::endl; } index++; } } int CameraSession::start() { int ret; queueCount_ = 0; captureCount_ = 0; captureLimit_ = options_[OptCapture].toInteger(); printMetadata_ = options_.isSet(OptMetadata); ret = camera_->configure(config_.get()); if (ret < 0) { std::cout << "Failed to configure camera" << std::endl; return ret; } streamNames_.clear(); for (unsigned int index = 0; index < config_->size(); ++index) { StreamConfiguration &cfg = config_->at(index); streamNames_[cfg.stream()] = "cam" + std::to_string(cameraIndex_) + "-stream" + std::to_string(index); } camera_->requestCompleted.connect(this, &CameraSession::requestComplete); #ifdef HAVE_KMS if (options_.isSet(OptDisplay)) sink_ = std::make_unique<KMSSink>(options_[OptDisplay].toString()); #endif #ifdef HAVE_SDL if (options_.isSet(OptSDL)) sink_ = std::make_unique<SDLSink>(); #endif if (options_.isSet(OptFile)) { if (!options_[OptFile].toString().empty()) sink_ = std::make_unique<FileSink>(streamNames_, options_[OptFile]); else sink_ = std::make_unique<FileSink>(streamNames_); } if (sink_) { ret = sink_->configure(*config_); if (ret < 0) { std::cout << "Failed to configure frame sink" << std::endl; return ret; } sink_->requestProcessed.connect(this, &CameraSession::sinkRelease); } allocator_ = std::make_unique<FrameBufferAllocator>(camera_); return startCapture(); } void CameraSession::stop() { int ret = camera_->stop(); if (ret) std::cout << "Failed to stop capture" << std::endl; if (sink_) { ret = sink_->stop(); if (ret) std::cout << "Failed to stop frame sink" << std::endl; } sink_.reset(); requests_.clear(); allocator_.reset(); } int CameraSession::startCapture() { int ret; /* Identify the stream with the least number of buffers. */ unsigned int nbuffers = UINT_MAX; for (StreamConfiguration &cfg : *config_) { ret = allocator_->allocate(cfg.stream()); if (ret < 0) { std::cerr << "Can't allocate buffers" << std::endl; return -ENOMEM; } unsigned int allocated = allocator_->buffers(cfg.stream()).size(); nbuffers = std::min(nbuffers, allocated); } /* * TODO: make cam tool smarter to support still capture by for * example pushing a button. For now run all streams all the time. */ for (unsigned int i = 0; i < nbuffers; i++) { std::unique_ptr<Request> request = camera_->createRequest(); if (!request) { std::cerr << "Can't create request" << std::endl; return -ENOMEM; } for (StreamConfiguration &cfg : *config_) { Stream *stream = cfg.stream(); const std::vector<std::unique_ptr<FrameBuffer>> &buffers = allocator_->buffers(stream); const std::unique_ptr<FrameBuffer> &buffer = buffers[i]; ret = request->addBuffer(stream, buffer.get()); if (ret < 0) { std::cerr << "Can't set buffer for request" << std::endl; return ret; } if (sink_) sink_->mapBuffer(buffer.get()); } requests_.push_back(std::move(request)); } if (sink_) { ret = sink_->start(); if (ret) { std::cout << "Failed to start frame sink" << std::endl; return ret; } } ret = camera_->start(); if (ret) { std::cout << "Failed to start capture" << std::endl; if (sink_) sink_->stop(); return ret; } for (std::unique_ptr<Request> &request : requests_) { ret = queueRequest(request.get()); if (ret < 0) { std::cerr << "Can't queue request" << std::endl; camera_->stop(); if (sink_) sink_->stop(); return ret; } } if (captureLimit_) std::cout << "cam" << cameraIndex_ << ": Capture " << captureLimit_ << " frames" << std::endl; else std::cout << "cam" << cameraIndex_ << ": Capture until user interrupts by SIGINT" << std::endl; return 0; } int CameraSession::queueRequest(Request *request) { if (captureLimit_ && queueCount_ >= captureLimit_) return 0; if (script_) request->controls() = script_->frameControls(queueCount_); queueCount_++; return camera_->queueRequest(request); } void CameraSession::requestComplete(Request *request) { if (request->status() == Request::RequestCancelled) return; /* * Defer processing of the completed request to the event loop, to avoid * blocking the camera manager thread. */ EventLoop::instance()->callLater([=]() { processRequest(request); }); } void CameraSession::processRequest(Request *request) { /* * If we've reached the capture limit, we're done. This doesn't * duplicate the check below that emits the captureDone signal, as this * function will be called for each request still in flight after the * capture limit is reached and we don't want to emit the signal every * single time. */ if (captureLimit_ && captureCount_ >= captureLimit_) return; const Request::BufferMap &buffers = request->buffers(); /* * Compute the frame rate. The timestamp is arbitrarily retrieved from * the first buffer, as all buffers should have matching timestamps. */ uint64_t ts = buffers.begin()->second->metadata().timestamp; double fps = ts - last_; fps = last_ != 0 && fps ? 1000000000.0 / fps : 0.0; last_ = ts; bool requeue = true; std::stringstream info; info << ts / 1000000000 << "." << std::setw(6) << std::setfill('0') << ts / 1000 % 1000000 << " (" << std::fixed << std::setprecision(2) << fps << " fps)"; for (const auto &[stream, buffer] : buffers) { const FrameMetadata &metadata = buffer->metadata(); info << " " << streamNames_[stream] << " seq: " << std::setw(6) << std::setfill('0') << metadata.sequence << " bytesused: "; unsigned int nplane = 0; for (const FrameMetadata::Plane &plane : metadata.planes()) { info << plane.bytesused; if (++nplane < metadata.planes().size()) info << "/"; } } if (sink_) { if (!sink_->processRequest(request)) requeue = false; } std::cout << info.str() << std::endl; if (printMetadata_) { const ControlList &requestMetadata = request->metadata(); for (const auto &[key, value] : requestMetadata) { const ControlId *id = controls::controls.at(key); std::cout << "\t" << id->name() << " = " << value.toString() << std::endl; } } /* * Notify the user that capture is complete if the limit has just been * reached. */ captureCount_++; if (captureLimit_ && captureCount_ >= captureLimit_) { captureDone.emit(); return; } /* * If the frame sink holds on the request, we'll requeue it later in the * complete handler. */ if (!requeue) return; request->reuse(Request::ReuseBuffers); queueRequest(request); } void CameraSession::sinkRelease(Request *request) { request->reuse(Request::ReuseBuffers); queueRequest(request); }