#!/usr/bin/env python3 # # SPDX-License-Identifier: BSD-2-Clause # # Copyright (C) 2019, Raspberry Pi (Trading) Limited # # ctt.py - camera tuning tool import os import sys from ctt_image_load import * from ctt_ccm import * from ctt_awb import * from ctt_alsc import * from ctt_lux import * from ctt_noise import * from ctt_geq import * from ctt_pretty_print_json import * import random import json import re """ This file houses the camera object, which is used to perform the calibrations. The camera object houses all the calibration images as attributes in two lists: - imgs (macbeth charts) - imgs_alsc (alsc correction images) Various calibrations are methods of the camera object, and the output is stored in a dictionary called self.json. Once all the caibration has been completed, the Camera.json is written into a json file. The camera object initialises its json dictionary by reading from a pre-written blank json file. This has been done to avoid reproducing the entire json file in the code here, thereby avoiding unecessary clutter. """ """ Get the colour and lux values from the strings of each inidvidual image """ def get_col_lux(string): """ Extract colour and lux values from filename """ col = re.search(r'([0-9]+)[kK](\.(jpg|jpeg|brcm|dng)|_.*\.(jpg|jpeg|brcm|dng))$', string) lux = re.search(r'([0-9]+)[lL](\.(jpg|jpeg|brcm|dng)|_.*\.(jpg|jpeg|brcm|dng))$', string) try: col = col.group(1) except AttributeError: """ Catch error if images labelled incorrectly and pass reasonable defaults """ return None, None try: lux = lux.group(1) except AttributeError: """ Catch error if images labelled incorrectly and pass reasonable defaults Still returns colour if that has been found. """ return col, None return int(col), int(lux) """ Camera object that is the backbone of the tuning tool. Input is the desired path of the output json. """ class Camera: def __init__(self, jfile): self.path = os.path.dirname(os.path.expanduser(__file__)) + '/' if self.path == '/': self.path = '' self.imgs = [] self.imgs_alsc = [] self.log = 'Log created : ' + time.asctime(time.localtime(time.time())) self.log_separator = '\n'+'-'*70+'\n' self.jf = jfile """ initial json dict populated by uncalibrated values """ self.json = { "rpi.black_level": { "black_level": 4096 }, "rpi.dpc": { }, "rpi.lux": { "reference_shutter_speed": 10000, "reference_gain": 1, "reference_aperture": 1.0 }, "rpi.noise": { }, "rpi.geq": { }, "rpi.sdn": { }, "rpi.awb": { "priors": [ {"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]}, {"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]}, {"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]} ], "modes": { "auto": {"lo": 2500, "hi": 8000}, "incandescent": {"lo": 2500, "hi": 3000}, "tungsten": {"lo": 3000, "hi": 3500}, "fluorescent": {"lo": 4000, "hi": 4700}, "indoor": {"lo": 3000, "hi": 5000}, "daylight": {"lo": 5500, "hi": 6500}, "cloudy": {"lo": 7000, "hi": 8600} }, "bayes": 1 }, "rpi.agc": { "metering_modes": { "centre-weighted": { "weights": [3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0] }, "spot": { "weights": [2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] }, "matrix": { "weights": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] } }, "exposure_modes": { "normal": { "shutter": [100, 10000, 30000, 60000, 120000], "gain": [1.0, 2.0, 4.0, 6.0, 6.0] }, "short": { "shutter": [100, 5000, 10000, 20000, 120000], "gain": [1.0, 2.0, 4.0, 6.0, 6.0] } }, "constraint_modes": { "normal": [ {"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]} ], "highlight": [ {"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]}, {"bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.8, 1000, 0.8]} ] }, "y_target": [0, 0.16, 1000, 0.165, 10000, 0.17] }, "rpi.alsc": { 'omega': 1.3, 'n_iter': 100, 'luminance_strength': 0.7, }, "rpi.contrast": { "ce_enable": 1, "gamma_curve": [ 0, 0, 1024, 5040, 2048, 9338, 3072, 12356, 4096, 15312, 5120, 18051, 6144, 20790, 7168, 23193, 8192, 25744, 9216, 27942, 10240, 30035, 11264, 32005, 12288, 33975, 13312, 35815, 14336, 37600, 15360, 39168, 16384, 40642, 18432, 43379, 20480, 45749, 22528, 47753, 24576, 49621, 26624, 51253, 28672, 52698, 30720, 53796, 32768, 54876, 36864, 57012, 40960, 58656, 45056, 59954, 49152, 61183, 53248, 62355, 57344, 63419, 61440, 64476, 65535, 65535 ] }, "rpi.ccm": { }, "rpi.sharpen": { } } """ Perform colour correction calibrations by comparing macbeth patch colours to standard macbeth chart colours. """ def ccm_cal(self, do_alsc_colour): if 'rpi.ccm' in self.disable: return 1 print('\nStarting CCM calibration') self.log_new_sec('CCM') """ if image is greyscale then CCm makes no sense """ if self.grey: print('\nERROR: Can\'t do CCM on greyscale image!') self.log += '\nERROR: Cannot perform CCM calibration ' self.log += 'on greyscale image!\nCCM aborted!' del self.json['rpi.ccm'] return 0 a = time.time() """ Check if alsc tables have been generated, if not then do ccm without alsc """ if ("rpi.alsc" not in self.disable) and do_alsc_colour: """ case where ALSC colour has been done, so no errors should be expected... """ try: cal_cr_list = self.json['rpi.alsc']['calibrations_Cr'] cal_cb_list = self.json['rpi.alsc']['calibrations_Cb'] self.log += '\nALSC tables found successfully' except KeyError: cal_cr_list, cal_cb_list = None, None print('WARNING! No ALSC tables found for CCM!') print('Performing CCM calibrations without ALSC correction...') self.log += '\nWARNING: No ALSC tables found.\nCCM calibration ' self.log += 'performed without ALSC correction...' else: """ case where config options result in CCM done without ALSC colour tables """ cal_cr_list, cal_cb_list = None, None self.log += '\nWARNING: No ALSC tables found.\nCCM calibration ' self.log += 'performed without ALSC correction...' """ Do CCM calibration """ try: ccms = ccm(self, cal_cr_list, cal_cb_list) except ArithmeticError: print('ERROR: Matrix is singular!\nTake new pictures and try again...') self.log += '\nERROR: Singular matrix encountered during fit!' self.log += '\nCCM aborted!' return 1 """ Write output to json """ self.json['rpi.ccm']['ccms'] = ccms self.log += '\nCCM calibration written to json file' print('Finished CCM calibration') """ Auto white balance calibration produces a colour curve for various colour temperatures, as well as providing a maximum 'wiggle room' distance from this curve (transverse_neg/pos). """ def awb_cal(self, greyworld, do_alsc_colour): if 'rpi.awb' in self.disable: return 1 print('\nStarting AWB calibration') self.log_new_sec('AWB') """ if image is greyscale then AWB makes no sense """ if self.grey: print('\nERROR: Can\'t do AWB on greyscale image!') self.log += '\nERROR: Cannot perform AWB calibration ' self.log += 'on greyscale image!\nAWB aborted!' del self.json['rpi.awb'] return 0 """ optional set greyworld (e.g. for noir cameras) """ if greyworld: self.json['rpi.awb']['bayes'] = 0 self.log += '\nGreyworld set' """ Check if alsc tables have been generated, if not then do awb without alsc correction """ if ("rpi.alsc" not in self.disable) and do_alsc_colour: try: cal_cr_list = self.json['rpi.alsc']['calibrations_Cr'] cal_cb_list = self.json['rpi.alsc']['calibrations_Cb'] self.log += '\nALSC tables found successfully' except KeyError: cal_cr_list, cal_cb_list = None, None print('ERROR, no ALSC calibrations found for AWB') print('Performing AWB without ALSC tables') self.log += '\nWARNING: No ALSC tables found.\nAWB calibration ' self.log += 'performed without ALSC correction...' else: cal_cr_list, cal_cb_list = None, None self.log += '\nWARNING: No ALSC tables found.\nAWB calibration ' self.log += 'performed without ALSC correction...' """ call calibration function """ plot = "rpi.awb" in self.plot awb_out = awb(self, cal_cr_list, cal_cb_list, plot) ct_curve, transverse_neg, transverse_pos = awb_out """ write output to json """ self.json['rpi.awb']['ct_curve'] = ct_curve self.json['rpi.awb']['sensitivity_r'] = 1.0 self.json['rpi.awb']['sensitivity_b'] = 1.0 self.json['rpi.awb']['transverse_pos'] = transverse_pos self.json['rpi.awb']['transverse_neg'] = transverse_neg self.log += '\nAWB calibration written to json file' print('Finished AWB calibration') """ Auto lens shading correction completely mitigates the effects of lens shading for ech colour channel seperately, and then partially corrects for vignetting. The extent of the correction depends on the 'luminance_strength' parameter. """ def alsc_cal(self, luminance_strength, do_alsc_colour): if 'rpi.alsc' in self.disable: return 1 print('\nStarting ALSC calibration') self.log_new_sec('ALSC') """ check if alsc images have been taken """ if len(self.imgs_alsc) == 0: print('\nError:\nNo alsc calibration images found') self.log += '\nERROR: No ALSC calibration images found!' self.log += '\nALSC calibration aborted!' return 1 self.json['rpi.alsc']['luminance_strength'] = luminance_strength if self.grey and do_alsc_colour: print('Greyscale camera so only luminance_lut calculated') do_alsc_colour = False self.log += '\nWARNING: ALSC colour correction cannot be done on ' self.log += 'greyscale image!\nALSC colour corrections forced off!' """ call calibration function """ plot = "rpi.alsc" in self.plot alsc_out = alsc_all(self, do_alsc_colour, plot) cal_cr_list, cal_cb_list, luminance_lut, av_corn = alsc_out """ write ouput to json and finish if not do_alsc_colour """ if not do_alsc_colour: self.json['rpi.alsc']['luminance_lut'] = luminance_lut self.json['rpi.alsc']['n_iter'] = 0 self.log += '\nALSC calibrations written to json file' self.log += '\nNo colour calibrations performed' print('Finished ALSC calibrations') return 1 self.json['rpi.alsc']['calibrations_Cr'] = cal_cr_list self.json['rpi.alsc']['calibrations_Cb'] = cal_cb_list self.json['rpi.alsc']['luminance_lut'] = luminance_lut self.log += '\nALSC colour and luminance tables written to json file' """ The sigmas determine the strength of the adaptive algorithm, that cleans up any lens shading that has slipped through the alsc. These are determined by measuring a 'worst-case' difference between two alsc tables that are adjacent in colour space. If, however, only one colour temperature has been provided, then this difference can not be computed as only one table is available. To determine the sigmas you would have to estimate the error of an alsc table with only the image it was taken on as a check. To avoid circularity, dfault exaggerated sigmas are used, which can result in too much alsc and is therefore not advised. In general, just take another alsc picture at another colour temperature! """ if len(self.imgs_alsc) == 1: self.json['rpi.alsc']['sigma'] = 0.005 self.json['rpi.alsc']['sigma_Cb'] = 0.005 print('\nWarning:\nOnly one alsc calibration found' '\nStandard sigmas used for adaptive algorithm.') print('Finished ALSC calibrations') self.log += '\nWARNING: Only one colour temperature found in ' self.log += 'calibration images.\nStandard sigmas used for adaptive ' self.log += 'algorithm!' return 1 """ obtain worst-case scenario residual sigmas """ sigma_r, sigma_b = get_sigma(self, cal_cr_list, cal_cb_list) """ write output to json """ self.json['rpi.alsc']['sigma'] = np.round(sigma_r, 5) self.json['rpi.alsc']['sigma_Cb'] = np.round(sigma_b, 5) self.log += '\nCalibrated sigmas written to json file' print('Finished ALSC calibrations') """ Green equalisation fixes problems caused by discrepancies in green channels. This is done by measuring the effect on macbeth chart patches, which ideally would have the same green values throughout. An upper bound linear model is fit, fixing a threshold for the green differences that are corrected. """ def geq_cal(self): if 'rpi.geq' in self.disable: return 1 print('\nStarting GEQ calibrations') self.log_new_sec('GEQ') """ perform calibration """ plot = 'rpi.geq' in self.plot slope, offset = geq_fit(self, plot) """ write output to json """ self.json['rpi.geq']['offset'] = offset self.json['rpi.geq']['slope'] = slope self.log += '\nGEQ calibrations written to json file' print('Finished GEQ calibrations') """ Lux calibrations allow the lux level of a scene to be estimated by a ratio calculation. Lux values are used in the pipeline for algorithms such as AGC and AWB """ def lux_cal(self): if 'rpi.lux' in self.disable: return 1 print('\nStarting LUX calibrations') self.log_new_sec('LUX') """ The lux calibration is done on a single image. For best effects, the image with lux level closest to 1000 is chosen. """ luxes = [Img.lux for Img in self.imgs] argmax = luxes.index(min(luxes, key=lambda l: abs(1000-l))) Img = self.imgs[argmax] self.log += '\nLux found closest to 1000: {} lx'.format(Img.lux) self.log += '\nImage used: ' + Img.name if Img.lux < 50: self.log += '\nWARNING: Low lux could cause inaccurate calibrations!' """ do calibration """ lux_out, shutter_speed, gain = lux(self, Img) """ write output to json """ self.json['rpi.lux']['reference_shutter_speed'] = shutter_speed self.json['rpi.lux']['reference_gain'] = gain self.json['rpi.lux']['reference_lux'] = Img.lux self.json['rpi.lux']['reference_Y'] = lux_out self.log += '\nLUX calibrations written to json file' print('Finished LUX calibrations') """ Noise alibration attempts to describe the noise profile of the sensor. The calibration is run on macbeth images and the final output is taken as the average """ def noise_cal(self): if 'rpi.noise' in self.disable: return 1 print('\nStarting NOISE calibrations') self.log_new_sec('NOISE') """ run calibration on all images and sort by slope. """ plot = "rpi.noise" in self.plot noise_out = sorted([noise(self, Img, plot) for Img in self.imgs], key=lambda x: x[0]) self.log += '\nFinished processing images' """ take the average of the interquartile """ length = len(noise_out) noise_out = np.mean(noise_out[length//4:1+3*length//4], axis=0) self.log += '\nAverage noise profile: constant = {} '.format(int(noise_out[1])) self.log += 'slope = {:.3f}'.format(noise_out[0]) """ write to json """ self.json['rpi.noise']['reference_constant'] = int(noise_out[1]) self.json['rpi.noise']['reference_slope'] = round(noise_out[0], 3) self.log += '\nNOISE calibrations written to json' print('Finished NOISE calibrations') """ Removes json entries that are turned off """ def json_remove(self, disable): self.log_new_sec('Disabling Options', cal=False) if len(self.disable) == 0: self.log += '\nNothing disabled!' return 1 for key in disable: try: del self.json[key] self.log += '\nDisabled: ' + key except KeyError: self.log += '\nERROR: ' + key + ' not found!' """ writes the json dictionary to the raw json file then make pretty """ def write_json(self): """ Write json dictionary to file """ jstring = json.dumps(self.json, sort_keys=False) """ make it pretty :) """ pretty_print_json(jstring, self.jf) """ add a new section to the log file """ def log_new_sec(self, section, cal=True): self.log += '\n'+self.log_separator self.log += section if cal: self.log += ' Calibration' self.log += self.log_separator """ write script arguments to log file """ def log_user_input(self, json_output, directory, config, log_output): self.log_new_sec('User Arguments', cal=False) self.log += '\nJson file output: ' + json_output self.log += '\nCalibration images directory: ' + directory if config is None: self.log += '\nNo configuration file input... using default options' elif config is False: self.log += '\nWARNING: Invalid configuration file path...' self.log += ' using default options' elif config is True: self.log += '\nWARNING: Invalid syntax in configuration file...' self.log += ' using default options' else: self.log += '\nConfiguration file: ' + config if log_output is None: self.log += '\nNo log file path input... using default: ctt_log.txt' else: self.log += '\nLog file output: ' + log_output # if log_output """ write log file """ def write_log(self, filename): if filename is None: filename = 'ctt_log.txt' self.log += '\n' + self.log_separator with open(filename, 'w') as logfile: logfile.write(self.log) """ Add all images from directory, pass into relevant list of images and extrace lux and temperature values. """ def add_imgs(self, directory, mac_config, blacklevel=-1): self.log_new_sec('Image Loading', cal=False) img_suc_msg = 'Image loaded successfully!' print('\n\nLoading images from '+directory) self.log += '\nDirectory: ' + directory """ get list of files """ filename_list = get_photos(directory) print("Files found: {}".format(len(filename_list))) self.log += '\nFiles found: {}'.format(len(filename_list)) """ iterate over files """ filename_list.sort() for filename in filename_list: address = directory + filename print('\nLoading image: '+filename) self.log += '\n\nImage: ' + filename """ obtain colour and lux value """ col, lux = get_col_lux(filename) """ Check if image is an alsc calibration image """ if 'alsc' in filename: Img = load_image(self, address, mac=False) self.log += '\nIdentified as an ALSC image' """ check if imagae data has been successfully unpacked """ if Img == 0: print('\nDISCARDED') self.log += '\nImage discarded!' continue """ check that image colour temperature has been successfuly obtained """ elif col is not None: """ if successful, append to list and continue to next image """ Img.col = col Img.name = filename self.log += '\nColour temperature: {} K'.format(col) self.imgs_alsc.append(Img) if blacklevel != -1: Img.blacklevel_16 = blacklevel print(img_suc_msg) continue else: print('Error! No colour temperature found!') self.log += '\nWARNING: Error reading colour temperature' self.log += '\nImage discarded!' print('DISCARDED') else: self.log += '\nIdentified as macbeth chart image' """ if image isn't an alsc correction then it must have a lux and a colour temperature value to be useful """ if lux is None: print('DISCARDED') self.log += '\nWARNING: Error reading lux value' self.log += '\nImage discarded!' continue Img = load_image(self, address, mac_config) """ check that image data has been successfuly unpacked """ if Img == 0: print('DISCARDED') self.log += '\nImage discarded!' continue else: """ if successful, append to list and continue to next image """ Img.col, Img.lux = col, lux Img.name = filename self.log += '\nColour temperature: {} K'.format(col) self.log += '\nLux value: {} lx'.format(lux) if blacklevel != -1: Img.blacklevel_16 = blacklevel print(img_suc_msg) self.imgs.append(Img) print('\nFinished loading images') """ Check that usable images have been found Possible errors include: - no macbeth chart - incorrect filename/extension - images from different cameras """ def check_imgs(self): self.log += '\n\nImages found:' self.log += '\nMacbeth : {}'.format(len(self.imgs)) self.log += '\nALSC : {} '.format(len(self.imgs_alsc)) self.log += '\n\nCamera metadata' """ check usable images found """ if len(self.imgs) == 0: print('\nERROR: No usable macbeth chart images found') self.log += '\nERROR: No usable macbeth chart images found' return 0 """ Double check that every image has come from the same camera... """ all_imgs = self.imgs + self.imgs_alsc camNames = list(set([Img.camName for Img in all_imgs])) patterns = list(set([Img.pattern for Img in all_imgs])) sigbitss = list(set([Img.sigbits for Img in all_imgs])) blacklevels = list(set([Img.blacklevel_16 for Img in all_imgs])) sizes = list(set([(Img.w, Img.h) for Img in all_imgs])) if len(camNames) == 1 and len(patterns) == 1 and len(sigbitss) == 1 and \ len(blacklevels) == 1 and len(sizes) == 1: self.grey = (patterns[0] == 128) self.blacklevel_16 = blacklevels[0] self.log += '\nName: {}'.format(camNames[0]) self.log += '\nBayer pattern case: {}'.format(patterns[0]) if self.grey: self.log += '\nGreyscale camera identified' self.log += '\nSignificant bits: {}'.format(sigbitss[0]) self.log += '\nBlacklevel: {}'.format(blacklevels[0]) self.log += '\nImage size: w = {} h = {}'.format(sizes[0][0], sizes[0][1]) return 1 else: print('\nERROR: Images from different cameras') self.log += '\nERROR: Images are from different cameras' return 0 def run_ctt(json_output, directory, config, log_output): """ check input files are jsons """ if json_output[-5:] != '.json': raise ArgError('\n\nError: Output must be a json file!') if config is not None: """ check if config file is actually a json """ if config[-5:] != '.json': raise ArgError('\n\nError: Config file must be a json file!') """ read configurations """ try: with open(config, 'r') as config_json: configs = json.load(config_json) except FileNotFoundError: configs = {} config = False except json.decoder.JSONDecodeError: configs = {} config = True else: configs = {} """ load configurations from config file, if not given then set default """ disable = get_config(configs, "disable", [], 'list') plot = get_config(configs, "plot", [], 'list') awb_d = get_config(configs, "awb", {}, 'dict') greyworld = get_config(awb_d, "greyworld", 0, 'bool') alsc_d = get_config(configs, "alsc", {}, 'dict') do_alsc_colour = get_config(alsc_d, "do_alsc_colour", 1, 'bool') luminance_strength = get_config(alsc_d, "luminance_strength", 0.5, 'num') blacklevel = get_config(configs, "blacklevel", -1, 'num') macbeth_d = get_config(configs, "macbeth", {}, 'dict') mac_small = get_config(macbeth_d, "small", 0, 'bool') mac_show = get_config(macbeth_d, "show", 0, 'bool') mac_config = (mac_small, mac_show) if blacklevel < -1 or blacklevel >= 2**16: print('\nInvalid blacklevel, defaulted to 64') blacklevel = -1 if luminance_strength < 0 or luminance_strength > 1: print('\nInvalid luminance_strength strength, defaulted to 0.5') luminance_strength = 0.5 """ sanitise directory path """ if directory[-1] != '/': directory += '/' """ initialise tuning tool and load images """ try: Cam = Camera(json_output) Cam.log_user_input(json_output, directory, config, log_output) Cam.disable = disable Cam.plot = plot Cam.add_imgs(directory, mac_config, blacklevel) except FileNotFoundError: raise ArgError('\n\nError: Input image directory not found!') """ preform calibrations as long as check_imgs returns True If alsc is activated then it must be done before awb and ccm since the alsc tables are used in awb and ccm calibrations ccm also technically does an awb but it measures this from the macbeth chart in the image rather than using calibration data """ if Cam.check_imgs(): Cam.json['rpi.black_level']['black_level'] = Cam.blacklevel_16 Cam.json_remove(disable) print('\nSTARTING CALIBRATIONS') Cam.alsc_cal(luminance_strength, do_alsc_colour) Cam.geq_cal() Cam.lux_cal() Cam.noise_cal() Cam.awb_cal(greyworld, do_alsc_colour) Cam.ccm_cal(do_alsc_colour) print('\nFINISHED CALIBRATIONS') Cam.write_json() Cam.write_log(log_output) print('\nCalibrations written to: '+json_output) if log_output is None: log_output = 'ctt_log.txt' print('Log file written to: '+log_output) pass else: Cam.write_log(log_output) if __name__ == '__main__': """ initialise calibration """ if len(sys.argv) == 1: print(""" Pisp Camera Tuning Tool version 1.0 Required Arguments: '-i' : Calibration image directory. '-o' : Name of output json file. Optional Arguments: '-c' : Config file for the CTT. If not passed, default parameters used. '-l' : Name of output log file. If not passed, 'ctt_log.txt' used. """) quit(0) else: """ parse input arguments """ json_output, directory, config, log_output = parse_input() run_ctt(json_output, directory, config, log_output) 6' href='#n836'>836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2018, Google Inc.
*
* camera.cpp - Camera device
*/
#include <libcamera/camera.h>
#include <array>
#include <atomic>
#include <iomanip>
#include <libcamera/base/log.h>
#include <libcamera/base/thread.h>
#include <libcamera/framebuffer_allocator.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
#include "libcamera/internal/pipeline_handler.h"
/**
* \file camera.h
* \brief Camera device handling
*
* \page camera-model Camera Model
*
* libcamera acts as a middleware between applications and camera hardware. It
* provides a solution to an unsolvable problem: reconciling applications,
* which need to run on different systems without dealing with device-specific
* details, and camera hardware, which exhibits a wide variety of features,
* limitations and architecture variations. In order to do so, it creates an
* abstract camera model that hides the camera hardware from applications. The
* model is designed to strike the right balance between genericity, to please
* generic applications, and flexibility, to expose even the most specific
* hardware features to the most demanding applications.
*
* In libcamera, a Camera is defined as a device that can capture frames
* continuously from a camera sensor and store them in memory. If supported by
* the device and desired by the application, the camera may store each
* captured frame in multiple copies, possibly in different formats and sizes.
* Each of these memory outputs of the camera is called a Stream.
*
* A camera contains a single image source, and separate camera instances
* relate to different image sources. For instance, a phone containing front
* and back image sensors will be modelled with two cameras, one for each
* sensor. When multiple streams can be produced from the same image source,
* all those streams are guaranteed to be part of the same camera.
*
* While not sharing image sources, separate cameras can share other system
* resources, such as ISPs. For this reason camera instances may not be fully
* independent, in which case usage restrictions may apply. For instance, a
* phone with a front and a back camera may not allow usage of the two cameras
* simultaneously.
*
* The camera model defines an implicit pipeline, whose input is the camera
* sensor, and whose outputs are the streams. Along the pipeline, the frames
* produced by the camera sensor are transformed by the camera into a format
* suitable for applications, with image processing that improves the quality
* of the captured frames. The camera exposes a set of controls that
* applications may use to manually control the processing steps. This
* high-level camera model is the minimum baseline that all cameras must
* conform to.
*
* \section camera-pipeline-model Pipeline Model
*
* Camera hardware differs in the supported image processing operations and the
* order in which they are applied. The libcamera pipelines abstract the
* hardware differences and expose a logical view of the processing operations
* with a fixed order. This offers low-level control of those operations to
* applications, while keeping application code generic.
*
* Starting from the camera sensor, a pipeline applies the following
* operations, in that order.
*
* - Pixel exposure
* - Analog to digital conversion and readout
* - Black level subtraction
* - Defective pixel correction
* - Lens shading correction
* - Spatial noise filtering
* - Per-channel gains (white balance)
* - Demosaicing (color filter array interpolation)
* - Color correction matrix (typically RGB to RGB)
* - Gamma correction
* - Color space transformation (typically RGB to YUV)
* - Cropping
* - Scaling
*
* Not all cameras implement all operations, and they are not necessarily
* implemented in the above order at the hardware level. The libcamera pipeline
* handlers translate the pipeline model to the real hardware configuration.
*
* \subsection digital-zoom Digital Zoom
*
* Digital zoom is implemented as a combination of the cropping and scaling
* stages of the pipeline. Cropping is controlled explicitly through the
* controls::ScalerCrop control, while scaling is controlled implicitly based
* on the crop rectangle and the output stream size. The crop rectangle is
* expressed relatively to the full pixel array size and indicates how the field
* of view is affected by the pipeline.
*/
namespace libcamera {
LOG_DECLARE_CATEGORY(Camera)
/**
* \class CameraConfiguration
* \brief Hold configuration for streams of the camera
* The CameraConfiguration holds an ordered list of stream configurations. It
* supports iterators and operates as a vector of StreamConfiguration instances.
* The stream configurations are inserted by addConfiguration(), and the
* operator[](int) returns a reference to the StreamConfiguration based on its
* insertion index. Accessing a stream configuration with an invalid index
* results in undefined behaviour.
*
* CameraConfiguration instances are retrieved from the camera with
* Camera::generateConfiguration(). Applications may then inspect the
* configuration, modify it, and possibly add new stream configuration entries
* with addConfiguration(). Once the camera configuration satisfies the
* application, it shall be validated by a call to validate(). The validation
* implements "try" semantics: it adjusts invalid configurations to the closest
* achievable parameters instead of rejecting them completely. Applications
* then decide whether to accept the modified configuration, or try again with
* a different set of parameters. Once the configuration is valid, it is passed
* to Camera::configure().
*/
/**
* \enum CameraConfiguration::Status
* \brief Validity of a camera configuration
* \var CameraConfiguration::Valid
* The configuration is fully valid
* \var CameraConfiguration::Adjusted
* The configuration has been adjusted to a valid configuration
* \var CameraConfiguration::Invalid
* The configuration is invalid and can't be adjusted automatically
*/
/**
* \typedef CameraConfiguration::iterator
* \brief Iterator for the stream configurations in the camera configuration
*/
/**
* \typedef CameraConfiguration::const_iterator
* \brief Const iterator for the stream configuration in the camera
* configuration
*/
/**
* \brief Create an empty camera configuration
*/
CameraConfiguration::CameraConfiguration()
: transform(Transform::Identity), config_({})
{
}
CameraConfiguration::~CameraConfiguration()
{
}
/**
* \brief Add a stream configuration to the camera configuration
* \param[in] cfg The stream configuration
*/
void CameraConfiguration::addConfiguration(const StreamConfiguration &cfg)
{
config_.push_back(cfg);
}
/**
* \fn CameraConfiguration::validate()
* \brief Validate and possibly adjust the camera configuration
*
* This method adjusts the camera configuration to the closest valid
* configuration and returns the validation status.
*
* \todo: Define exactly when to return each status code. Should stream
* parameters set to 0 by the caller be adjusted without returning Adjusted ?
* This would potentially be useful for applications but would get in the way
* in Camera::configure(). Do we need an extra status code to signal this ?
*
* \todo: Handle validation of buffers count when refactoring the buffers API.
*
* \return A CameraConfiguration::Status value that describes the validation
* status.
* \retval CameraConfiguration::Invalid The configuration is invalid and can't
* be adjusted. This may only occur in extreme cases such as when the
* configuration is empty.
* \retval CameraConfigutation::Adjusted The configuration has been adjusted
* and is now valid. Parameters may have changed for any stream, and stream
* configurations may have been removed. The caller shall check the
* configuration carefully.
* \retval CameraConfiguration::Valid The configuration was already valid and
* hasn't been adjusted.
*/
/**
* \brief Retrieve a reference to a stream configuration
* \param[in] index Numerical index
*
* The \a index represents the zero based insertion order of stream
* configuration into the camera configuration with addConfiguration(). Calling
* this method with an invalid index results in undefined behaviour.
*
* \return The stream configuration
*/
StreamConfiguration &CameraConfiguration::at(unsigned int index)
{
return config_[index];
}
/**
* \brief Retrieve a const reference to a stream configuration
* \param[in] index Numerical index
*
* The \a index represents the zero based insertion order of stream
* configuration into the camera configuration with addConfiguration(). Calling
* this method with an invalid index results in undefined behaviour.
*
* \return The stream configuration
*/
const StreamConfiguration &CameraConfiguration::at(unsigned int index) const
{
return config_[index];
}
/**
* \fn StreamConfiguration &CameraConfiguration::operator[](unsigned int)
* \brief Retrieve a reference to a stream configuration
* \param[in] index Numerical index
*
* The \a index represents the zero based insertion order of stream
* configuration into the camera configuration with addConfiguration(). Calling
* this method with an invalid index results in undefined behaviour.
*
* \return The stream configuration
*/
/**
* \fn const StreamConfiguration &CameraConfiguration::operator[](unsigned int) const
* \brief Retrieve a const reference to a stream configuration
* \param[in] index Numerical index
*
* The \a index represents the zero based insertion order of stream
* configuration into the camera configuration with addConfiguration(). Calling
* this method with an invalid index results in undefined behaviour.
*
* \return The stream configuration
*/
/**
* \brief Retrieve an iterator to the first stream configuration in the
* sequence
* \return An iterator to the first stream configuration
*/
CameraConfiguration::iterator CameraConfiguration::begin()
{
return config_.begin();
}
/**
* \brief Retrieve a const iterator to the first element of the stream
* configurations
* \return A const iterator to the first stream configuration
*/
CameraConfiguration::const_iterator CameraConfiguration::begin() const
{
return config_.begin();
}
/**
* \brief Retrieve an iterator pointing to the past-the-end stream
* configuration in the sequence
* \return An iterator to the element following the last stream configuration
*/
CameraConfiguration::iterator CameraConfiguration::end()
{
return config_.end();
}
/**
* \brief Retrieve a const iterator pointing to the past-the-end stream
* configuration in the sequence
* \return A const iterator to the element following the last stream
* configuration
*/
CameraConfiguration::const_iterator CameraConfiguration::end() const
{
return config_.end();
}
/**
* \brief Check if the camera configuration is empty
* \return True if the configuration is empty
*/
bool CameraConfiguration::empty() const
{
return config_.empty();
}
/**
* \brief Retrieve the number of stream configurations
* \return Number of stream configurations
*/
std::size_t CameraConfiguration::size() const
{
return config_.size();
}
/**
* \var CameraConfiguration::transform
* \brief User-specified transform to be applied to the image
*
* The transform is a user-specified 2D plane transform that will be applied
* to the camera images by the processing pipeline before being handed to
* the application. This is subsequent to any transform that is already
* required to fix up any platform-defined rotation.
*
* The usual 2D plane transforms are allowed here (horizontal/vertical
* flips, multiple of 90-degree rotations etc.), but the validate() function
* may adjust this field at its discretion if the selection is not supported.
*/
/**
* \var CameraConfiguration::config_
* \brief The vector of stream configurations
*/
class Camera::Private : public Extensible::Private
{
LIBCAMERA_DECLARE_PUBLIC(Camera)
public:
enum State {
CameraAvailable,
CameraAcquired,
CameraConfigured,
CameraStopping,
CameraRunning,
};
Private(Camera *camera, PipelineHandler *pipe, const std::string &id,
const std::set<Stream *> &streams);
~Private();
int isAccessAllowed(State state, bool allowDisconnected = false,
const char *from = __builtin_FUNCTION()) const;
int isAccessAllowed(State low, State high,
bool allowDisconnected = false,
const char *from = __builtin_FUNCTION()) const;
void disconnect();
void setState(State state);
std::shared_ptr<PipelineHandler> pipe_;
std::string id_;
std::set<Stream *> streams_;
std::set<const Stream *> activeStreams_;
private:
bool disconnected_;
std::atomic<State> state_;
};
Camera::Private::Private(Camera *camera, PipelineHandler *pipe,
const std::string &id,
const std::set<Stream *> &streams)
: Extensible::Private(camera), pipe_(pipe->shared_from_this()), id_(id),
streams_(streams), disconnected_(false), state_(CameraAvailable)
{
}
Camera::Private::~Private()
{
if (state_.load(std::memory_order_acquire) != Private::CameraAvailable)
LOG(Camera, Error) << "Removing camera while still in use";
}
static const char *const camera_state_names[] = {
"Available",
"Acquired",
"Configured",
"Stopping",
"Running",
};
int Camera::Private::isAccessAllowed(State state, bool allowDisconnected,
const char *from) const
{
if (!allowDisconnected && disconnected_)
return -ENODEV;
State currentState = state_.load(std::memory_order_acquire);
if (currentState == state)
return 0;
ASSERT(static_cast<unsigned int>(state) < std::size(camera_state_names));
LOG(Camera, Error) << "Camera in " << camera_state_names[currentState]
<< " state trying " << from << "() requiring state "
<< camera_state_names[state];
return -EACCES;
}
int Camera::Private::isAccessAllowed(State low, State high,
bool allowDisconnected,
const char *from) const
{
if (!allowDisconnected && disconnected_)
return -ENODEV;
State currentState = state_.load(std::memory_order_acquire);
if (currentState >= low && currentState <= high)
return 0;
ASSERT(static_cast<unsigned int>(low) < std::size(camera_state_names) &&
static_cast<unsigned int>(high) < std::size(camera_state_names));
LOG(Camera, Error) << "Camera in " << camera_state_names[currentState]
<< " state trying " << from
<< "() requiring state between "
<< camera_state_names[low] << " and "
<< camera_state_names[high];
return -EACCES;
}
void Camera::Private::disconnect()
{
/*
* If the camera was running when the hardware was removed force the
* state to Configured state to allow applications to free resources
* and call release() before deleting the camera.
*/
if (state_.load(std::memory_order_acquire) == Private::CameraRunning)
state_.store(Private::CameraConfigured, std::memory_order_release);
disconnected_ = true;
}
void Camera::Private::setState(State state)
{
state_.store(state, std::memory_order_release);
}
/**
* \class Camera
* \brief Camera device
*
* \todo Add documentation for camera start timings. What exactly does the
* camera expect the pipeline handler to do when start() is called?
*
* The Camera class models a camera capable of producing one or more image
* streams from a single image source. It provides the main interface to
* configuring and controlling the device, and capturing image streams. It is
* the central object exposed by libcamera.
*
* To support the central nature of Camera objects, libcamera manages the
* lifetime of camera instances with std::shared_ptr<>. Instances shall be
* created with the create() function which returns a shared pointer. The
* Camera constructors and destructor are private, to prevent instances from
* being constructed and destroyed manually.
*
* \section camera_operation Operating the Camera
*
* An application needs to perform a sequence of operations on a camera before
* it is ready to process requests. The camera needs to be acquired and
* configured to prepare the camera for capture. Once started the camera can
* process requests until it is stopped. When an application is done with a
* camera, the camera needs to be released.
*
* An application may start and stop a camera multiple times as long as it is
* not released. The camera may also be reconfigured.
*
* Functions that affect the camera state as defined below are generally not
* synchronized with each other by the Camera class. The caller is responsible
* for ensuring their synchronization if necessary.
*
* \subsection Camera States
*
* To help manage the sequence of operations needed to control the camera a set
* of states are defined. Each state describes which operations may be performed
* on the camera. Performing an operation not allowed in the camera state
* results in undefined behaviour. Operations not listed at all in the state
* diagram are allowed in all states.
*
* \dot
* digraph camera_state_machine {
* node [shape = doublecircle ]; Available;
* node [shape = circle ]; Acquired;
* node [shape = circle ]; Configured;
* node [shape = circle ]; Stopping;
* node [shape = circle ]; Running;
*
* Available -> Available [label = "release()"];
* Available -> Acquired [label = "acquire()"];
*
* Acquired -> Available [label = "release()"];
* Acquired -> Configured [label = "configure()"];
*
* Configured -> Available [label = "release()"];
* Configured -> Configured [label = "configure(), createRequest()"];
* Configured -> Running [label = "start()"];
*
* Running -> Stopping [label = "stop()"];
* Stopping -> Configured;
* Running -> Running [label = "createRequest(), queueRequest()"];
* }
* \enddot
*
* \subsubsection Available
* The base state of a camera, an application can inspect the properties of the
* camera to determine if it wishes to use it. If an application wishes to use
* a camera it should acquire() it to proceed to the Acquired state.
*
* \subsubsection Acquired
* In the acquired state an application has exclusive access to the camera and
* may modify the camera's parameters to configure it and proceed to the
* Configured state.
*
* \subsubsection Configured
* The camera is configured and ready to be started. The application may
* release() the camera and to get back to the Available state or start()
* it to progress to the Running state.
*
* \subsubsection Stopping
* The camera has been asked to stop. Pending requests are being completed or
* cancelled, and no new requests are permitted to be queued. The camera will
* transition to the Configured state when all queued requests have been
* returned to the application.
*
* \subsubsection Running
* The camera is running and ready to process requests queued by the
* application. The camera remains in this state until it is stopped and moved
* to the Configured state.
*/
/**
* \brief Create a camera instance
* \param[in] pipe The pipeline handler responsible for the camera device
* \param[in] id The ID of the camera device
* \param[in] streams Array of streams the camera provides
*
* The caller is responsible for guaranteeing a stable and unique camera ID
* matching the constraints described by Camera::id(). Parameters that are
* allocated dynamically at system startup, such as bus numbers that may be
* enumerated differently, are therefore not suitable to use in the ID.
*
* Pipeline handlers that use a CameraSensor may use the CameraSensor::id() to
* generate an ID that satisfies the criteria of a stable and unique camera ID.
*
* \return A shared pointer to the newly created camera object
*/
std::shared_ptr<Camera> Camera::create(PipelineHandler *pipe,
const std::string &id,
const std::set<Stream *> &streams)
{
struct Deleter : std::default_delete<Camera> {
void operator()(Camera *camera)
{
if (Thread::current() == camera->thread())
delete camera;
else
camera->deleteLater();
}
};
Camera *camera = new Camera(pipe, id, streams);
return std::shared_ptr<Camera>(camera, Deleter());
}
/**
* \brief Retrieve the ID of the camera
*
* The camera ID is a free-form string that identifies a camera in the system.
* IDs are guaranteed to be unique and stable: the same camera, when connected
* to the system in the same way (e.g. in the same USB port), will have the same
* ID across both unplug/replug and system reboots.
*
* Applications may store the camera ID and use it later to acquire the same
* camera. They shall treat the ID as an opaque identifier, without interpreting
* its value.
*
* Camera IDs may change when the system hardware or firmware is modified, for
* instance when replacing a PCI USB controller or moving it to another PCI
* slot, or updating the ACPI tables or Device Tree.
*
* \context This function is \threadsafe.
*
* \return ID of the camera device
*/
const std::string &Camera::id() const
{
const Private *const d = LIBCAMERA_D_PTR();
return d->id_;
}
/**
* \var Camera::bufferCompleted
* \brief Signal emitted when a buffer for a request queued to the camera has
* completed
*/
/**
* \var Camera::requestCompleted
* \brief Signal emitted when a request queued to the camera has completed
*/
/**
* \var Camera::disconnected
* \brief Signal emitted when the camera is disconnected from the system
*
* This signal is emitted when libcamera detects that the camera has been
* removed from the system. For hot-pluggable devices this is usually caused by
* physical device disconnection. The media device is passed as a parameter.
*
* As soon as this signal is emitted the camera instance will refuse all new
* application API calls by returning errors immediately.
*/
Camera::Camera(PipelineHandler *pipe, const std::string &id,
const std::set<Stream *> &streams)
: Extensible(new Private(this, pipe, id, streams))
{
}
Camera::~Camera()
{
}
/**
* \brief Notify camera disconnection
*
* This method is used to notify the camera instance that the underlying
* hardware has been unplugged. In response to the disconnection the camera
* instance notifies the application by emitting the #disconnected signal, and
* ensures that all new calls to the application-facing Camera API return an
* error immediately.
*
* \todo Deal with pending requests if the camera is disconnected in a
* running state.
*/
void Camera::disconnect()
{
Private *const d = LIBCAMERA_D_PTR();
LOG(Camera, Debug) << "Disconnecting camera " << id();
d->disconnect();
disconnected.emit(this);
}
int Camera::exportFrameBuffers(Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
Private *const d = LIBCAMERA_D_PTR();
int ret = d->isAccessAllowed(Private::CameraConfigured);
if (ret < 0)
return ret;
if (streams().find(stream) == streams().end())
return -EINVAL;
if (d->activeStreams_.find(stream) == d->activeStreams_.end())
return -EINVAL;
return d->pipe_->invokeMethod(&PipelineHandler::exportFrameBuffers,
ConnectionTypeBlocking, this, stream,
buffers);
}
/**
* \brief Acquire the camera device for exclusive access
*
* After opening the device with open(), exclusive access must be obtained
* before performing operations that change the device state. This function is
* not blocking, if the device has already been acquired (by the same or another
* process) the -EBUSY error code is returned.
*
* Acquiring a camera will limit usage of any other camera(s) provided by the
* same pipeline handler to the same instance of libcamera. The limit is in
* effect until all cameras from the pipeline handler are released. Other
* instances of libcamera can still list and examine the cameras but will fail
* if they attempt to acquire() any of them.
*
* Once exclusive access isn't needed anymore, the device should be released
* with a call to the release() function.
*
* \context This function is \threadsafe. It may only be called when the camera
* is in the Available state as defined in \ref camera_operation.
*
* \return 0 on success or a negative error code otherwise
* \retval -ENODEV The camera has been disconnected from the system
* \retval -EBUSY The camera is not free and can't be acquired by the caller
*/
int Camera::acquire()
{
Private *const d = LIBCAMERA_D_PTR();
/*
* No manual locking is required as PipelineHandler::lock() is
* thread-safe.
*/
int ret = d->isAccessAllowed(Private::CameraAvailable);
if (ret < 0)
return ret == -EACCES ? -EBUSY : ret;
if (!d->pipe_->lock()) {
LOG(Camera, Info)
<< "Pipeline handler in use by another process";
return -EBUSY;
}
d->setState(Private::CameraAcquired);
return 0;
}
/**
* \brief Release exclusive access to the camera device
*
* Releasing the camera device allows other users to acquire exclusive access
* with the acquire() function.
*
* \context This function may only be called when the camera is in the
* Available or Configured state as defined in \ref camera_operation, and shall
* be synchronized by the caller with other functions that affect the camera
* state.
*
* \return 0 on success or a negative error code otherwise
* \retval -EBUSY The camera is running and can't be released
*/
int Camera::release()
{
Private *const d = LIBCAMERA_D_PTR();
int ret = d->isAccessAllowed(Private::CameraAvailable,
Private::CameraConfigured, true);
if (ret < 0)
return ret == -EACCES ? -EBUSY : ret;
d->pipe_->unlock();
d->setState(Private::CameraAvailable);
return 0;
}
/**
* \brief Retrieve the list of controls supported by the camera
*
* The list of controls supported by the camera and their associated
* constraints remain constant through the lifetime of the Camera object.
*
* \context This function is \threadsafe.
*
* \return A ControlInfoMap listing the controls supported by the camera
*/
const ControlInfoMap &Camera::controls() const
{
const Private *const d = LIBCAMERA_D_PTR();
return d->pipe_->controls(this);
}
/**
* \brief Retrieve the list of properties of the camera
*
* Camera properties are static information that describe the capabilities of
* the camera. They remain constant through the lifetime of the Camera object.
*
* \return A ControlList of properties supported by the camera
*/
const ControlList &Camera::properties() const
{
const Private *const d = LIBCAMERA_D_PTR();
return d->pipe_->properties(this);
}
/**
* \brief Retrieve all the camera's stream information
*
* Retrieve all of the camera's static stream information. The static
* information describes among other things how many streams the camera
* supports and the capabilities of each stream.
*
* \context This function is \threadsafe.
*
* \return An array of all the camera's streams
*/
const std::set<Stream *> &Camera::streams() const
{
const Private *const d = LIBCAMERA_D_PTR();
return d->streams_;
}
/**
* \brief Generate a default camera configuration according to stream roles
* \param[in] roles A list of stream roles
*
* Generate a camera configuration for a set of desired stream roles. The caller
* specifies a list of stream roles and the camera returns a configuration
* containing suitable streams and their suggested default configurations. An
* empty list of roles is valid, and will generate an empty configuration that
* can be filled by the caller.
*
* \context This function is \threadsafe.
*
* \return A CameraConfiguration if the requested roles can be satisfied, or a
* null pointer otherwise. The ownership of the returned configuration is
* passed to the caller.
*/
std::unique_ptr<CameraConfiguration> Camera::generateConfiguration(const StreamRoles &roles)
{
Private *const d = LIBCAMERA_D_PTR();
int ret = d->isAccessAllowed(Private::CameraAvailable,
Private::CameraRunning);
if (ret < 0)
return nullptr;
if (roles.size() > streams().size())
return nullptr;
CameraConfiguration *config = d->pipe_->generateConfiguration(this, roles);
if (!config) {
LOG(Camera, Debug)
<< "Pipeline handler failed to generate configuration";
return nullptr;
}
std::ostringstream msg("streams configuration:", std::ios_base::ate);
if (config->empty())
msg << " empty";
for (unsigned int index = 0; index < config->size(); ++index)
msg << " (" << index << ") " << config->at(index).toString();
LOG(Camera, Debug) << msg.str();
return std::unique_ptr<CameraConfiguration>(config);
}
/**
* \brief Configure the camera prior to capture
* \param[in] config The camera configurations to setup
*
* Prior to starting capture, the camera must be configured to select a
* group of streams to be involved in the capture and their configuration.
* The caller specifies which streams are to be involved and their configuration
* by populating \a config.
*
* The configuration is created by generateConfiguration(), and adjusted by the
* caller with CameraConfiguration::validate(). This method only accepts fully
* valid configurations and returns an error if \a config is not valid.
*
* Exclusive access to the camera shall be ensured by a call to acquire() prior
* to calling this function, otherwise an -EACCES error will be returned.
*
* \context This function may only be called when the camera is in the Acquired
* or Configured state as defined in \ref camera_operation, and shall be
* synchronized by the caller with other functions that affect the camera
* state.
*
* Upon return the StreamConfiguration entries in \a config are associated with
* Stream instances which can be retrieved with StreamConfiguration::stream().
*
* \return 0 on success or a negative error code otherwise
* \retval -ENODEV The camera has been disconnected from the system
* \retval -EACCES The camera is not in a state where it can be configured
* \retval -EINVAL The configuration is not valid
*/
int Camera::configure(CameraConfiguration *config)
{
Private *const d = LIBCAMERA_D_PTR();
int ret = d->isAccessAllowed(Private::CameraAcquired,
Private::CameraConfigured);
if (ret < 0)
return ret;
for (auto it : *config)
it.setStream(nullptr);
if (config->validate() != CameraConfiguration::Valid) {
LOG(Camera, Error)
<< "Can't configure camera with invalid configuration";
return -EINVAL;
}
std::ostringstream msg("configuring streams:", std::ios_base::ate);
for (unsigned int index = 0; index < config->size(); ++index) {
StreamConfiguration &cfg = config->at(index);
msg << " (" << index << ") " << cfg.toString();
}
LOG(Camera, Info) << msg.str();
ret = d->pipe_->invokeMethod(&PipelineHandler::configure,
ConnectionTypeBlocking, this, config);
if (ret)
return ret;
d->activeStreams_.clear();
for (const StreamConfiguration &cfg : *config) {
Stream *stream = cfg.stream();
if (!stream) {
LOG(Camera, Fatal)
<< "Pipeline handler failed to update stream configuration";
d->activeStreams_.clear();
return -EINVAL;
}
stream->configuration_ = cfg;
d->activeStreams_.insert(stream);
}
d->setState(Private::CameraConfigured);
return 0;
}
/**
* \brief Create a request object for the camera
* \param[in] cookie Opaque cookie for application use
*
* This method creates an empty request for the application to fill with
* buffers and parameters, and queue for capture.
*
* The \a cookie is stored in the request and is accessible through the
* Request::cookie() method at any time. It is typically used by applications
* to map the request to an external resource in the request completion
* handler, and is completely opaque to libcamera.
*
* The ownership of the returned request is passed to the caller, which is
* responsible for deleting it. The request may be deleted in the completion
* handler, or reused after resetting its state with Request::reuse().
*
* \context This function is \threadsafe. It may only be called when the camera
* is in the Configured or Running state as defined in \ref camera_operation.
*
* \return A pointer to the newly created request, or nullptr on error
*/
std::unique_ptr<Request> Camera::createRequest(uint64_t cookie)
{
Private *const d = LIBCAMERA_D_PTR();
int ret = d->isAccessAllowed(Private::CameraConfigured,
Private::CameraRunning);
if (ret < 0)
return nullptr;
return std::make_unique<Request>(this, cookie);
}
/**
* \brief Queue a request to the camera
* \param[in] request The request to queue to the camera
*
* This method queues a \a request to the camera for capture.
*
* After allocating the request with createRequest(), the application shall
* fill it with at least one capture buffer before queuing it. Requests that
* contain no buffers are invalid and are rejected without being queued.
*
* Once the request has been queued, the camera will notify its completion
* through the \ref requestCompleted signal.
*
* \context This function is \threadsafe. It may only be called when the camera
* is in the Running state as defined in \ref camera_operation.
*
* \return 0 on success or a negative error code otherwise
* \retval -ENODEV The camera has been disconnected from the system
* \retval -EACCES The camera is not running so requests can't be queued
* \retval -EINVAL The request is invalid
* \retval -ENOMEM No buffer memory was available to handle the request
*/
int Camera::queueRequest(Request *request)
{
Private *const d = LIBCAMERA_D_PTR();
int ret = d->isAccessAllowed(Private::CameraRunning);
if (ret < 0)
return ret;
/*
* The camera state may change until the end of the function. No locking
* is however needed as PipelineHandler::queueRequest() will handle
* this.
*/
if (request->buffers().empty()) {
LOG(Camera, Error) << "Request contains no buffers";
return -EINVAL;
}
for (auto const &it : request->buffers()) {
const Stream *stream = it.first;