# SPDX-License-Identifier: BSD-2-Clause # # Copyright (C) 2019, Raspberry Pi (Trading) Limited # # ctt_awb.py - camera tuning tool for AWB from ctt_image_load import * import matplotlib.pyplot as plt from bisect import bisect_left from scipy.optimize import fmin """ obtain piecewise linear approximation for colour curve """ def awb(Cam, cal_cr_list, cal_cb_list, plot): imgs = Cam.imgs """ condense alsc calibration tables into one dictionary """ if cal_cr_list is None: colour_cals = None else: colour_cals = {} for cr, cb in zip(cal_cr_list, cal_cb_list): cr_tab = cr['table'] cb_tab = cb['table'] """ normalise tables so min value is 1 """ cr_tab = cr_tab/np.min(cr_tab) cb_tab = cb_tab/np.min(cb_tab) colour_cals[cr['ct']] = [cr_tab, cb_tab] """ obtain data from greyscale macbeth patches """ rb_raw = [] rbs_hat = [] for Img in imgs: Cam.log += '\nProcessing '+Img.name """ get greyscale patches with alsc applied if alsc enabled. Note: if alsc is disabled then colour_cals will be set to None and the function will just return the greyscale patches """ r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals) """ calculate ratio of r, b to g """ r_g = np.mean(r_patchs/g_patchs) b_g = np.mean(b_patchs/g_patchs) Cam.log += '\n r : {:.4f} b : {:.4f}'.format(r_g, b_g) """ The curve tends to be better behaved in so-called hatspace. R, B, G represent the individual channels. The colour curve is plotted in r, b space, where: r = R/G b = B/G This will be referred to as dehatspace... (sorry) Hatspace is defined as: r_hat = R/(R+B+G) b_hat = B/(R+B+G) To convert from dehatspace to hastpace (hat operation): r_hat = r/(1+r+b) b_hat = b/(1+r+b) To convert from hatspace to dehatspace (dehat operation): r = r_hat/(1-r_hat-b_hat) b = b_hat/(1-r_hat-b_hat) Proof is left as an excercise to the reader... Throughout the code, r and b are sometimes referred to as r_g and b_g as a reminder that they are ratios """ r_g_hat = r_g/(1+r_g+b_g) b_g_hat = b_g/(1+r_g+b_g) Cam.log += '\n r_hat : {:.4f} b_hat : {:.4f}'.format(r_g_hat, b_g_hat) rbs_hat.append((r_g_hat, b_g_hat, Img.col)) rb_raw.append((r_g, b_g)) Cam.log += '\n' Cam.log += '\nFinished processing images' """ sort all lits simultaneously by r_hat """ rbs_zip = list(zip(rbs_hat, rb_raw)) rbs_zip.sort(key=lambda x: x[0][0]) rbs_hat, rb_raw = list(zip(*rbs_zip)) """ unzip tuples ready for processing """ rbs_hat = list(zip(*rbs_hat)) rb_raw = list(zip(*rb_raw)) """ fit quadratic fit to r_g hat and b_g_hat """ a, b, c = np.polyfit(rbs_hat[0], rbs_hat[1], 2) Cam.log += '\nFit quadratic curve in hatspace' """ the algorithm now approximates the shortest distance from each point to the curve in dehatspace. Since the fit is done in hatspace, it is easier to find the actual shortest distance in hatspace and use the projection back into dehatspace as an overestimate. The distance will be used for two things: 1) In the case that colour temperature does not strictly decrease with increasing r/g, the closest point to the line will be chosen out of an increasing pair of colours. 2) To calculate transverse negative an dpositive, the maximum positive and negative distance from the line are chosen. This benefits from the overestimate as the transverse pos/neg are upper bound values. """ """ define fit function """ def f(x): return a*x**2 + b*x + c """ iterate over points (R, B are x and y coordinates of points) and calculate distance to line in dehatspace """ dists = [] for i, (R, B) in enumerate(zip(rbs_hat[0], rbs_hat[1])): """ define function to minimise as square distance between datapoint and point on curve. Squaring is monotonic so minimising radius squared is equivalent to minimising radius """ def f_min(x): y = f(x) return((x-R)**2+(y-B)**2) """ perform optimisation with scipy.optmisie.fmin """ x_hat = fmin(f_min, R, disp=0)[0] y_hat = f(x_hat) """ dehat """ x = x_hat/(1-x_hat-y_hat) y = y_hat/(1-x_hat-y_hat) rr = R/(1-R-B) bb = B/(1-R-B) """ calculate euclidean distance in dehatspace """ dist = ((x-rr)**2+(y-bb)**2)**0.5 """ return negative if point is below the fit curve """ if (x+y) > (rr+bb): dist *= -1 dists.append(dist) Cam.log += '\nFound closest point on fit line to each point in dehatspace' """ calculate wiggle factors in awb. 10% added since this is an upper bound """ transverse_neg = - np.min(dists) * 1.1 transverse_pos = np.max(dists) * 1.1 Cam.log += '\nTransverse pos : {:.5f}'.format(transverse_pos) Cam.log += '\nTransverse neg : {:.5f}'.format(transverse_neg) """ set minimum transverse wiggles to 0.1 . Wiggle factors dictate how far off of the curve the algorithm searches. 0.1 is a suitable minimum that gives better results for lighting conditions not within calibration dataset. Anything less will generalise poorly. """ if transverse_pos < 0.01: transverse_pos = 0.01 Cam.log += '\nForced transverse pos to 0.01' if transverse_neg < 0.01: transverse_neg = 0.01 Cam.log += '\nForced transverse neg to 0.01' """ generate new b_hat values at each r_hat according to fit """ r_hat_fit = np.array(rbs_hat[0]) b_hat_fit = a*r_hat_fit**2 + b*r_hat_fit + c """ transform from hatspace to dehatspace """ r_fit = r_hat_fit/(1-r_hat_fit-b_hat_fit) b_fit = b_hat_fit/(1-r_hat_fit-b_hat_fit) c_fit = np.round(rbs_hat[2], 0) """ round to 4dp """ r_fit = np.where((1000*r_fit) % 1 <= 0.05, r_fit+0.0001, r_fit) r_fit = np.where((1000*r_fit) % 1 >= 0.95, r_fit-0.0001, r_fit) b_fit = np.where((1000*b_fit) % 1 <= 0.05, b_fit+0.0001, b_fit) b_fit = np.where((1000*b_fit) % 1 >= 0.95, b_fit-0.0001, b_fit) r_fit = np.round(r_fit, 4) b_fit = np.round(b_fit, 4) """ The following code ensures that colour temperature decreases with increasing r/g """ """ iterate backwards over list for easier indexing """ i = len(c_fit) - 1 while i > 0: if c_fit[i] > c_fit[i-1]: Cam.log += '\nColour temperature increase found\n' Cam.log += '{} K at r = {} to '.format(c_fit[i-1], r_fit[i-1]) Cam.log += '{} K at r = {}'.format(c_fit[i], r_fit[i]) """ if colour temperature increases then discard point furthest from the transformed fit (dehatspace) """ error_1 = abs(dists[i-1]) error_2 = abs(dists[i]) Cam.log += '\nDistances from fit:\n' Cam.log += '{} K : {:.5f} , '.format(c_fit[i], error_1) Cam.log += '{} K : {:.5f}'.format(c_fit[i-1], error_2) """ find bad index note that in python false = 0 and true = 1 """ bad = i - (error_1 < error_2) Cam.log += '\nPoint at {} K deleted as '.format(c_fit[bad]) Cam.log += 'it is furthest from fit' """ delete bad point """ r_fit = np.delete(r_fit, bad) b_fit = np.delete(b_fit, bad) c_fit = np.delete(c_fit, bad).astype(np.uint16) """ note that if a point has been discarded then the length has decreased by one, meaning that decreasing the index by one will reassess the kept point against the next point. It is therefore possible, in theory, for two adjacent points to be discarded, although probably rare """ i -= 1 """ return formatted ct curve, ordered by increasing colour temperature """ ct_curve = list(np.array(list(zip(b_fit, r_fit, c_fit))).flatten())[::-1] Cam.log += '\nFinal CT curve:' for i in range(len(ct_curve)//3): j = 3*i Cam.log += '\n ct: {} '.format(ct_curve[j]) Cam.log += ' r: {} '.format(ct_curve[j+1]) Cam.log += ' b: {} '.format(ct_curve[j+2]) """ plotting code for debug """ if plot: x = np.linspace(np.min(rbs_hat[0]), np.max(rbs_hat[0]), 100) y = a*x**2 + b*x + c plt.subplot(2, 1, 1) plt.title('hatspace') plt.plot(rbs_hat[0], rbs_hat[1], ls='--', color='blue') plt.plot(x, y, color='green', ls='-') plt.scatter(rbs_hat[0], rbs_hat[1], color='red') for i, ct in enumerate(rbs_hat[2]): plt.annotate(str(ct), (rbs_hat[0][i], rbs_hat[1][i])) plt.xlabel('$\\hat{r}$') plt.ylabel('$\\hat{b}$') """ optional set axes equal to shortest distance so line really does looks perpendicular and everybody is happy """ # ax = plt.gca() # ax.set_aspect('equal') plt.grid() plt.subplot(2, 1, 2) plt.title('dehatspace - indoors?') plt.plot(r_fit, b_fit, color='blue') plt.scatter(rb_raw[0], rb_raw[1], color='green') plt.scatter(r_fit, b_fit, color='red') for i, ct in enumerate(c_fit): plt.annotate(str(ct), (r_fit[i], b_fit[i])) plt.xlabel('$r$') plt.ylabel('$b$') """ optional set axes equal to shortest distance so line really does looks perpendicular and everybody is happy """ # ax = plt.gca() # ax.set_aspect('equal') plt.subplots_adjust(hspace=0.5) plt.grid() plt.show() """ end of plotting code """ return(ct_curve, np.round(transverse_pos, 5), np.round(transverse_neg, 5)) """ obtain greyscale patches and perform alsc colour correction """ def get_alsc_patches(Img, colour_cals, grey=True): """ get patch centre coordinates, image colour and the actual patches for each channel, remembering to subtract blacklevel If grey then only greyscale patches considered """ if grey: cen_coords = Img.cen_coords[3::4] col = Img.col patches = [np.array(Img.patches[i]) for i in Img.order] r_patchs = patches[0][3::4] - Img.blacklevel_16 b_patchs = patches[3][3::4] - Img.blacklevel_16 """ note two green channels are averages """ g_patchs = (patches[1][3::4]+patches[2][3::4])/2 - Img.blacklevel_16 else: cen_coords = Img.cen_coords col = Img.col patches = [np.array(Img.patches[i]) for i in Img.order] r_patchs = patches[0] - Img.blacklevel_16 b_patchs = patches[3] - Img.blacklevel_16 g_patchs = (patches[1]+patches[2])/2 - Img.blacklevel_16 if colour_cals is None: return r_patchs, b_patchs, g_patchs """ find where image colour fits in alsc colour calibration tables """ cts = list(colour_cals.keys()) pos = bisect_left(cts, col) """ if img colour is below minimum or above maximum alsc calibration colour, simply pick extreme closest to img colour """ if pos % len(cts) == 0: """ this works because -0 = 0 = first and -1 = last index """ col_tabs = np.array(colour_cals[cts[-pos//len(cts)]]) """ else, perform linear interpolation between existing alsc colour calibration tables """ else: bef = cts[pos-1] aft = cts[pos] da = col-bef db = aft-col bef_tabs = np.array(colour_cals[bef]) aft_tabs = np.array(colour_cals[aft]) col_tabs = (bef_tabs*db + aft_tabs*da)/(da+db) col_tabs = np.reshape(col_tabs, (2, 12, 16)) """ calculate dx, dy used to calculate alsc table """ w, h = Img.w/2, Img.h/2 dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12)) """ make list of pairs of gains for each patch by selecting the correct value in alsc colour calibration table """ patch_gains = [] for cen in cen_coords: x, y = cen[0]//dx, cen[1]//dy # We could probably do with some better spatial interpolation here? col_gains = (col_tabs[0][y][x], col_tabs[1][y][x]) patch_gains.append(col_gains) """ multiply the r and b channels in each patch by the respective gain, finally performing the alsc colour correction """ for i, gains in enumerate(patch_gains): r_patchs[i] = r_patchs[i] * gains[0] b_patchs[i] = b_patchs[i] * gains[1] """ return greyscale patches, g channel and correct r, b channels """ return r_patchs, b_patchs, g_patchs #n299'>299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019-2020, Raspberry Pi Ltd
#
# camera tuning tool image loading
from ctt_tools import *
from ctt_macbeth_locator import *
import json
import pyexiv2 as pyexif
import rawpy as raw
"""
Image class load image from raw data and extracts metadata.
Once image is extracted from data, it finds 24 16x16 patches for each
channel, centred at the macbeth chart squares
"""
class Image:
def __init__(self, buf):
self.buf = buf
self.patches = None
self.saturated = False
'''
obtain metadata from buffer
'''
def get_meta(self):
self.ver = ba_to_b(self.buf[4:5])
self.w = ba_to_b(self.buf[0xd0:0xd2])
self.h = ba_to_b(self.buf[0xd2:0xd4])
self.pad = ba_to_b(self.buf[0xd4:0xd6])
self.fmt = self.buf[0xf5]
self.sigbits = 2*self.fmt + 4
self.pattern = self.buf[0xf4]
self.exposure = ba_to_b(self.buf[0x90:0x94])
self.againQ8 = ba_to_b(self.buf[0x94:0x96])
self.againQ8_norm = self.againQ8/256
camName = self.buf[0x10:0x10+128]
camName_end = camName.find(0x00)
self.camName = self.buf[0x10:0x10+128][:camName_end].decode()
"""
Channel order depending on bayer pattern
"""
bayer_case = {
0: (0, 1, 2, 3), # red
1: (2, 0, 3, 1), # green next to red
2: (3, 2, 1, 0), # green next to blue
3: (1, 0, 3, 2), # blue
128: (0, 1, 2, 3) # arbitrary order for greyscale casw
}
self.order = bayer_case[self.pattern]
'''
manual blacklevel - not robust
'''
if 'ov5647' in self.camName:
self.blacklevel = 16
else:
self.blacklevel = 64
self.blacklevel_16 = self.blacklevel << (6)
return 1
'''
print metadata for debug
'''
def print_meta(self):
print('\nData:')
print(' ver = {}'.format(self.ver))
print(' w = {}'.format(self.w))
print(' h = {}'.format(self.h))
print(' pad = {}'.format(self.pad))
print(' fmt = {}'.format(self.fmt))
print(' sigbits = {}'.format(self.sigbits))
print(' pattern = {}'.format(self.pattern))
print(' exposure = {}'.format(self.exposure))
print(' againQ8 = {}'.format(self.againQ8))
print(' againQ8_norm = {}'.format(self.againQ8_norm))
print(' camName = {}'.format(self.camName))
print(' blacklevel = {}'.format(self.blacklevel))
print(' blacklevel_16 = {}'.format(self.blacklevel_16))
return 1
"""
get image from raw scanline data
"""
def get_image(self, raw):
self.dptr = []
"""
check if data is 10 or 12 bits
"""
if self.sigbits == 10:
"""
calc length of scanline
"""
lin_len = ((((((self.w+self.pad+3)>>2)) * 5)+31)>>5) * 32
"""
stack scan lines into matrix
"""
raw = np.array(raw).reshape(-1, lin_len).astype(np.int64)[:self.h, ...]
"""
separate 5 bits in each package, stopping when w is satisfied
"""
ba0 = raw[..., 0:5*((self.w+3)>>2):5]
ba1 = raw[..., 1:5*((self.w+3)>>2):5]
ba2 = raw[..., 2:5*((self.w+3)>>2):5]
ba3 = raw[..., 3:5*((self.w+3)>>2):5]
ba4 = raw[..., 4:5*((self.w+3)>>2):5]
"""
assemble 10 bit numbers
"""
ch0 = np.left_shift((np.left_shift(ba0, 2) + (ba4 % 4)), 6)
ch1 = np.left_shift((np.left_shift(ba1, 2) + (np.right_shift(ba4, 2) % 4)), 6)
ch2 = np.left_shift((np.left_shift(ba2, 2) + (np.right_shift(ba4, 4) % 4)), 6)
ch3 = np.left_shift((np.left_shift(ba3, 2) + (np.right_shift(ba4, 6) % 4)), 6)
"""
interleave bits
"""
mat = np.empty((self.h, self.w), dtype=ch0.dtype)
mat[..., 0::4] = ch0
mat[..., 1::4] = ch1
mat[..., 2::4] = ch2
mat[..., 3::4] = ch3
"""
There is som eleaking memory somewhere in the code. This code here
seemed to make things good enough that the code would run for
reasonable numbers of images, however this is techincally just a
workaround. (sorry)
"""
ba0, ba1, ba2, ba3, ba4 = None, None, None, None, None
del ba0, ba1, ba2, ba3, ba4
ch0, ch1, ch2, ch3 = None, None, None, None
del ch0, ch1, ch2, ch3
"""
same as before but 12 bit case
"""
elif self.sigbits == 12:
lin_len = ((((((self.w+self.pad+1)>>1)) * 3)+31)>>5) * 32
raw = np.array(raw).reshape(-1, lin_len).astype(np.int64)[:self.h, ...]
ba0 = raw[..., 0:3*((self.w+1)>>1):3]
ba1 = raw[..., 1:3*((self.w+1)>>1):3]
ba2 = raw[..., 2:3*((self.w+1)>>1):3]
ch0 = np.left_shift((np.left_shift(ba0, 4) + ba2 % 16), 4)
ch1 = np.left_shift((np.left_shift(ba1, 4) + (np.right_shift(ba2, 4)) % 16), 4)
mat = np.empty((self.h, self.w), dtype=ch0.dtype)
mat[..., 0::2] = ch0
mat[..., 1::2] = ch1
else:
"""
data is neither 10 nor 12 or incorrect data
"""
print('ERROR: wrong bit format, only 10 or 12 bit supported')
return 0
"""
separate bayer channels
"""
c0 = mat[0::2, 0::2]
c1 = mat[0::2, 1::2]
c2 = mat[1::2, 0::2]
c3 = mat[1::2, 1::2]
self.channels = [c0, c1, c2, c3]
return 1
"""
obtain 16x16 patch centred at macbeth square centre for each channel
"""
def get_patches(self, cen_coords, size=16):
"""
obtain channel widths and heights
"""
ch_w, ch_h = self.w, self.h
cen_coords = list(np.array((cen_coords[0])).astype(np.int32))
self.cen_coords = cen_coords
"""
squares are ordered by stacking macbeth chart columns from
left to right. Some useful patch indices:
white = 3
black = 23
'reds' = 9, 10
'blues' = 2, 5, 8, 20, 22
'greens' = 6, 12, 17
greyscale = 3, 7, 11, 15, 19, 23
"""
all_patches = []
for ch in self.channels:
ch_patches = []
for cen in cen_coords:
'''
macbeth centre is placed at top left of central 2x2 patch
to account for rounding
Patch pixels are sorted by pixel brightness so spatial
information is lost.
'''
patch = ch[cen[1]-7:cen[1]+9, cen[0]-7:cen[0]+9].flatten()
patch.sort()
if patch[-5] == (2**self.sigbits-1)*2**(16-self.sigbits):
self.saturated = True
ch_patches.append(patch)
# print('\nNew Patch\n')
all_patches.append(ch_patches)
# print('\n\nNew Channel\n\n')
self.patches = all_patches
return 1
def brcm_load_image(Cam, im_str):
"""
Load image where raw data and metadata is in the BRCM format
"""
try:
"""
create byte array
"""
with open(im_str, 'rb') as image:
f = image.read()
b = bytearray(f)
"""
return error if incorrect image address
"""
except FileNotFoundError:
print('\nERROR:\nInvalid image address')
Cam.log += '\nWARNING: Invalid image address'
return 0
"""
return error if problem reading file
"""
if f is None:
print('\nERROR:\nProblem reading file')
Cam.log += '\nWARNING: Problem readin file'
return 0
# print('\nLooking for EOI and BRCM header')
"""
find end of image followed by BRCM header by turning
bytearray into hex string and string matching with regexp
"""
start = -1
match = bytearray(b'\xff\xd9@BRCM')
match_str = binascii.hexlify(match)
b_str = binascii.hexlify(b)
"""
note index is divided by two to go from string to hex
"""
indices = [m.start()//2 for m in re.finditer(match_str, b_str)]
# print(indices)
try:
start = indices[0] + 3
except IndexError:
print('\nERROR:\nNo Broadcom header found')
Cam.log += '\nWARNING: No Broadcom header found!'
return 0
"""
extract data after header
"""
# print('\nExtracting data after header')
buf = b[start:start+32768]
Img = Image(buf)
Img.str = im_str
# print('Data found successfully')
"""
obtain metadata
"""
# print('\nReading metadata')
Img.get_meta()
Cam.log += '\nExposure : {} us'.format(Img.exposure)
Cam.log += '\nNormalised gain : {}'.format(Img.againQ8_norm)
# print('Metadata read successfully')
"""
obtain raw image data
"""
# print('\nObtaining raw image data')
raw = b[start+32768:]
Img.get_image(raw)
"""
delete raw to stop memory errors
"""
raw = None
del raw
# print('Raw image data obtained successfully')
return Img
def dng_load_image(Cam, im_str):
try:
Img = Image(None)
# RawPy doesn't load all the image tags that we need, so we use py3exiv2
metadata = pyexif.ImageMetadata(im_str)
metadata.read()
Img.ver = 100 # random value
"""
The DNG and TIFF/EP specifications use different IFDs to store the raw
image data and the Exif tags. DNG stores them in a SubIFD and in an Exif
IFD respectively (named "SubImage1" and "Photo" by pyexiv2), while
TIFF/EP stores them both in IFD0 (name "Image"). Both are used in "DNG"
files, with libcamera-apps following the DNG recommendation and
applications based on picamera2 following TIFF/EP.
This code detects which tags are being used, and therefore extracts the
correct values.
"""
try:
Img.w = metadata['Exif.SubImage1.ImageWidth'].value
subimage = "SubImage1"
photo = "Photo"
except KeyError:
Img.w = metadata['Exif.Image.ImageWidth'].value
subimage = "Image"
photo = "Image"
Img.pad = 0
Img.h = metadata[f'Exif.{subimage}.ImageLength'].value
white = metadata[f'Exif.{subimage}.WhiteLevel'].value
Img.sigbits = int(white).bit_length()
Img.fmt = (Img.sigbits - 4) // 2
Img.exposure = int(metadata[f'Exif.{photo}.ExposureTime'].value * 1000000)
Img.againQ8 = metadata[f'Exif.{photo}.ISOSpeedRatings'].value * 256 / 100
Img.againQ8_norm = Img.againQ8 / 256
Img.camName = metadata['Exif.Image.Model'].value
Img.blacklevel = int(metadata[f'Exif.{subimage}.BlackLevel'].value[0])
Img.blacklevel_16 = Img.blacklevel << (16 - Img.sigbits)
bayer_case = {
'0 1 1 2': (0, (0, 1, 2, 3)),
'1 2 0 1': (1, (2, 0, 3, 1)),
'2 1 1 0': (2, (3, 2, 1, 0)),
'1 0 2 1': (3, (1, 0, 3, 2))
}
cfa_pattern = metadata[f'Exif.{subimage}.CFAPattern'].value
Img.pattern = bayer_case[cfa_pattern][0]
Img.order = bayer_case[cfa_pattern][1]
# Now use RawPy tp get the raw Bayer pixels
raw_im = raw.imread(im_str)
raw_data = raw_im.raw_image
shift = 16 - Img.sigbits
c0 = np.left_shift(raw_data[0::2, 0::2].astype(np.int64), shift)
c1 = np.left_shift(raw_data[0::2, 1::2].astype(np.int64), shift)
c2 = np.left_shift(raw_data[1::2, 0::2].astype(np.int64), shift)
c3 = np.left_shift(raw_data[1::2, 1::2].astype(np.int64), shift)
Img.channels = [c0, c1, c2, c3]
Img.rgb = raw_im.postprocess()
except Exception:
print("\nERROR: failed to load DNG file", im_str)
print("Either file does not exist or is incompatible")
Cam.log += '\nERROR: DNG file does not exist or is incompatible'
raise
return Img
'''
load image from file location and perform calibration
check correct filetype
mac boolean is true if image is expected to contain macbeth chart and false
if not (alsc images don't have macbeth charts)
'''
def load_image(Cam, im_str, mac_config=None, show=False, mac=True, show_meta=False):
"""
check image is correct filetype
"""
if '.jpg' in im_str or '.jpeg' in im_str or '.brcm' in im_str or '.dng' in im_str:
if '.dng' in im_str:
Img = dng_load_image(Cam, im_str)
else:
Img = brcm_load_image(Cam, im_str)
"""
handle errors smoothly if loading image failed
"""
if Img == 0:
return 0
if show_meta:
Img.print_meta()
if mac:
"""
find macbeth centres, discarding images that are too dark or light
"""
av_chan = (np.mean(np.array(Img.channels), axis=0)/(2**16))
av_val = np.mean(av_chan)
# print(av_val)
if av_val < Img.blacklevel_16/(2**16)+1/64:
macbeth = None
print('\nError: Image too dark!')
Cam.log += '\nWARNING: Image too dark!'
else:
macbeth = find_macbeth(Cam, av_chan, mac_config)
"""
if no macbeth found return error
"""
if macbeth is None:
print('\nERROR: No macbeth chart found')
return 0
mac_cen_coords = macbeth[1]
# print('\nMacbeth centres located successfully')
"""
obtain image patches
"""
# print('\nObtaining image patches')
Img.get_patches(mac_cen_coords)
if Img.saturated:
print('\nERROR: Macbeth patches have saturated')
Cam.log += '\nWARNING: Macbeth patches have saturated!'
return 0
"""
clear memory
"""
Img.buf = None
del Img.buf
# print('Image patches obtained successfully')
"""
optional debug
"""
if show and __name__ == '__main__':
copy = sum(Img.channels)/2**18
copy = np.reshape(copy, (Img.h//2, Img.w//2)).astype(np.float64)
copy, _ = reshape(copy, 800)
represent(copy)
return Img
"""
return error if incorrect filetype
"""
else:
# print('\nERROR:\nInvalid file extension')
return 0
"""
bytearray splice to number little endian
"""
def ba_to_b(b):
total = 0
for i in range(len(b)):
total += 256**i * b[i]
return total