summaryrefslogtreecommitdiff
path: root/utils/raspberrypi/ctt/ctt_alsc.py
blob: 89e86469a78e9c9fac7479f56be0d8adcbf16d47 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi (Trading) Limited
#
# ctt_alsc.py - camera tuning tool for ALSC (auto lens shading correction)

from ctt_image_load import *
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D


"""
preform alsc calibration on a set of images
"""
def alsc_all(Cam, do_alsc_colour, plot):
    imgs_alsc = Cam.imgs_alsc
    """
    create list of colour temperatures and associated calibration tables
    """
    list_col = []
    list_cr = []
    list_cb = []
    list_cg = []
    for Img in imgs_alsc:
        col, cr, cb, cg, size = alsc(Cam, Img, do_alsc_colour, plot)
        list_col.append(col)
        list_cr.append(cr)
        list_cb.append(cb)
        list_cg.append(cg)
        Cam.log += '\n'
    Cam.log += '\nFinished processing images'
    w, h, dx, dy = size
    Cam.log += '\nChannel dimensions: w = {}  h = {}'.format(int(w), int(h))
    Cam.log += '\n16x12 grid rectangle size: w = {} h = {}'.format(dx, dy)

    """
    convert to numpy array for data manipulation
    """
    list_col = np.array(list_col)
    list_cr = np.array(list_cr)
    list_cb = np.array(list_cb)
    list_cg = np.array(list_cg)

    cal_cr_list = []
    cal_cb_list = []

    """
    only do colour calculations if required
    """
    if do_alsc_colour:
        Cam.log += '\nALSC colour tables'
        for ct in sorted(set(list_col)):
            Cam.log += '\nColour temperature: {} K'.format(ct)
            """
            average tables for the same colour temperature
            """
            indices = np.where(list_col == ct)
            ct = int(ct)
            t_r = np.mean(list_cr[indices], axis=0)
            t_b = np.mean(list_cb[indices], axis=0)
            """
            force numbers to be stored to 3dp.... :(
            """
            t_r = np.where((100*t_r) % 1 <= 0.05, t_r+0.001, t_r)
            t_b = np.where((100*t_b) % 1 <= 0.05, t_b+0.001, t_b)
            t_r = np.where((100*t_r) % 1 >= 0.95, t_r-0.001, t_r)
            t_b = np.where((100*t_b) % 1 >= 0.95, t_b-0.001, t_b)
            t_r = np.round(t_r, 3)
            t_b = np.round(t_b, 3)
            r_corners = (t_r[0], t_r[15], t_r[-1], t_r[-16])
            b_corners = (t_b[0], t_b[15], t_b[-1], t_b[-16])
            r_cen = t_r[5*16+7]+t_r[5*16+8]+t_r[6*16+7]+t_r[6*16+8]
            r_cen = round(r_cen/4, 3)
            b_cen = t_b[5*16+7]+t_b[5*16+8]+t_b[6*16+7]+t_b[6*16+8]
            b_cen = round(b_cen/4, 3)
            Cam.log += '\nRed table corners: {}'.format(r_corners)
            Cam.log += '\nRed table centre: {}'.format(r_cen)
            Cam.log += '\nBlue table corners: {}'.format(b_corners)
            Cam.log += '\nBlue table centre: {}'.format(b_cen)
            cr_dict = {
                'ct': ct,
                'table': list(t_r)
            }
            cb_dict = {
                'ct': ct,
                'table': list(t_b)
            }
            cal_cr_list.append(cr_dict)
            cal_cb_list.append(cb_dict)
            Cam.log += '\n'
    else:
        cal_cr_list, cal_cb_list = None, None

    """
    average all values for luminance shading and return one table for all temperatures
    """
    lum_lut = np.mean(list_cg, axis=0)
    lum_lut = np.where((100*lum_lut) % 1 <= 0.05, lum_lut+0.001, lum_lut)
    lum_lut = np.where((100*lum_lut) % 1 >= 0.95, lum_lut-0.001, lum_lut)
    lum_lut = list(np.round(lum_lut, 3))

    """
    calculate average corner for lsc gain calculation further on
    """
    corners = (lum_lut[0], lum_lut[15], lum_lut[-1], lum_lut[-16])
    Cam.log += '\nLuminance table corners: {}'.format(corners)
    l_cen = lum_lut[5*16+7]+lum_lut[5*16+8]+lum_lut[6*16+7]+lum_lut[6*16+8]
    l_cen = round(l_cen/4, 3)
    Cam.log += '\nLuminance table centre: {}'.format(l_cen)
    av_corn = np.sum(corners)/4

    return cal_cr_list, cal_cb_list, lum_lut, av_corn


"""
calculate g/r and g/b for 32x32 points arranged in a grid for a single image
"""
def alsc(Cam, Img, do_alsc_colour, plot=False):
    Cam.log += '\nProcessing image: ' + Img.name
    """
    get channel in correct order
    """
    channels = [Img.channels[i] for i in Img.order]
    """
    calculate size of single rectangle.
    -(-(w-1)//32) is a ceiling division. w-1 is to deal robustly with the case
    where w is a multiple of 32.
    """
    w, h = Img.w/2, Img.h/2
    dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12))
    """
    average the green channels into one
    """
    av_ch_g = np.mean((channels[1:2]), axis=0)
    if do_alsc_colour:
        """
        obtain 16x12 grid of intensities for each channel and subtract black level
        """
        g = get_16x12_grid(av_ch_g, dx, dy) - Img.blacklevel_16
        r = get_16x12_grid(channels[0], dx, dy) - Img.blacklevel_16
        b = get_16x12_grid(channels[3], dx, dy) - Img.blacklevel_16
        """
        calculate ratios as 32 bit in order to be supported by medianBlur function
        """
        cr = np.reshape(g/r, (12, 16)).astype('float32')
        cb = np.reshape(g/b, (12, 16)).astype('float32')
        cg = np.reshape(1/g, (12, 16)).astype('float32')
        """
        median blur to remove peaks and save as float 64
        """
        cr = cv2.medianBlur(cr, 3).astype('float64')
        cb = cv2.medianBlur(cb, 3).astype('float64')
        cg = cv2.medianBlur(cg, 3).astype('float64')
        cg = cg/np.min(cg)

        """
        debugging code showing 2D surface plot of vignetting. Quite useful for
        for sanity check
        """
        if plot:
            hf = plt.figure(figsize=(8, 8))
            ha = hf.add_subplot(311, projection='3d')
            """
            note Y is plotted as -Y so plot has same axes as image
            """
            X, Y = np.meshgrid(range(16), range(12))
            ha.plot_surface(X, -Y, cr, cmap=cm.coolwarm, linewidth=0)
            ha.set_title('ALSC Plot\nImg: {}\n\ncr'.format(Img.str))
            hb = hf.add_subplot(312, projection='3d')
            hb.plot_surface(X, -Y, cb, cmap=cm.coolwarm, linewidth=0)
            hb.set_title('cb')
            hc = hf.add_subplot(313, projection='3d')
            hc.plot_surface(X, -Y, cg, cmap=cm.coolwarm, linewidth=0)
            hc.set_title('g')
            # print(Img.str)
            plt.show()

        return Img.col, cr.flatten(), cb.flatten(), cg.flatten(), (w, h, dx, dy)

    else:
        """
        only perform calculations for luminance shading
        """
        g = get_16x12_grid(av_ch_g, dx, dy) - Img.blacklevel_16
        cg = np.reshape(1/g, (12, 16)).astype('float32')
        cg = cv2.medianBlur(cg, 3).astype('float64')
        cg = cg/np.min(cg)

        if plot:
            hf = plt.figure(figssize=(8, 8))
            ha = hf.add_subplot(1, 1, 1, projection='3d')
            X, Y = np.meashgrid(range(16), range(12))
            ha.plot_surface(X, -Y, cg, cmap=cm.coolwarm, linewidth=0)
            ha.set_title('ALSC Plot (Luminance only!)\nImg: {}\n\ncg').format(Img.str)
            plt.show()

        return Img.col, None, None, cg.flatten(), (w, h, dx, dy)


"""
Compresses channel down to a 16x12 grid
"""
def get_16x12_grid(chan, dx, dy):
    grid = []
    """
    since left and bottom border will not necessarily have rectangles of
    dimension dx x dy, the 32nd iteration has to be handled separately.
    """
    for i in range(11):
        for j in range(15):
            grid.append(np.mean(chan[dy*i:dy*(1+i), dx*j:dx*(1+j)]))
        grid.append(np.mean(chan[dy*i:dy*(1+i), 15*dx:]))
    for j in range(15):
        grid.append(np.mean(chan[11*dy:, dx*j:dx*(1+j)]))
    grid.append(np.mean(chan[11*dy:, 15*dx:]))
    """
    return as np.array, ready for further manipulation
    """
    return np.array(grid)


"""
obtains sigmas for red and blue, effectively a measure of the 'error'
"""
def get_sigma(Cam, cal_cr_list, cal_cb_list):
    Cam.log += '\nCalculating sigmas'
    """
    provided colour alsc tables were generated for two different colour
    temperatures sigma is calculated by comparing two calibration temperatures
    adjacent in colour space
    """
    """
    create list of colour temperatures
    """
    cts = [cal['ct'] for cal in cal_cr_list]
    # print(cts)
    """
    calculate sigmas for each adjacent cts and return worst one
    """
    sigma_rs = []
    sigma_bs = []
    for i in range(len(cts)-1):
        sigma_rs.append(calc_sigma(cal_cr_list[i]['table'], cal_cr_list[i+1]['table']))
        sigma_bs.append(calc_sigma(cal_cb_list[i]['table'], cal_cb_list[i+1]['table']))
        Cam.log += '\nColour temperature interval {} - {} K'.format(cts[i], cts[i+1])
        Cam.log += '\nSigma red: {}'.format(sigma_rs[-1])
        Cam.log += '\nSigma blue: {}'.format(sigma_bs[-1])

    """
    return maximum sigmas, not necessarily from the same colour temperature
    interval
    """
    sigma_r = max(sigma_rs) if sigma_rs else 0.005
    sigma_b = max(sigma_bs) if sigma_bs else 0.005
    Cam.log += '\nMaximum sigmas: Red = {} Blue = {}'.format(sigma_r, sigma_b)

    # print(sigma_rs, sigma_bs)
    # print(sigma_r, sigma_b)
    return sigma_r, sigma_b


"""
calculate sigma from two adjacent gain tables
"""
def calc_sigma(g1, g2):
    """
    reshape into 16x12 matrix
    """
    g1 = np.reshape(g1, (12, 16))
    g2 = np.reshape(g2, (12, 16))
    """
    apply gains to gain table
    """
    gg = g1/g2
    if np.mean(gg) < 1:
        gg = 1/gg
    """
    for each internal patch, compute average difference between it and its 4
    neighbours, then append to list
    """
    diffs = []
    for i in range(10):
        for j in range(14):
            """
            note indexing is incremented by 1 since all patches on borders are
            not counted
            """
            diff = np.abs(gg[i+1][j+1]-gg[i][j+1])
            diff += np.abs(gg[i+1][j+1]-gg[i+2][j+1])
            diff += np.abs(gg[i+1][j+1]-gg[i+1][j])
            diff += np.abs(gg[i+1][j+1]-gg[i+1][j+2])
            diffs.append(diff/4)

    """
    return mean difference
    """
    mean_diff = np.mean(diffs)
    return(np.round(mean_diff, 5))
GC has converged. // Insist AGC is steady for MAX_LOCK_COUNT // frames before we say we are "locked". // (The hard-coded constants may need to // become customisable.) if (status_.target_exposure_value) { #define MAX_LOCK_COUNT 3 double err = 0.10 * status_.target_exposure_value + 200; if (actual_exposure < status_.target_exposure_value + err && actual_exposure > status_.target_exposure_value - err) lock_count_ = std::min(lock_count + 1, MAX_LOCK_COUNT); else if (actual_exposure < status_.target_exposure_value + 1.5 * err && actual_exposure > status_.target_exposure_value - 1.5 * err) lock_count_ = lock_count; LOG(RPiAgc, Debug) << "Lock count: " << lock_count_; } } } else LOG(RPiAgc, Debug) << Name() << ": no device metadata"; status_.locked = lock_count_ >= MAX_LOCK_COUNT; image_metadata->Set("agc.status", status_); } } void Agc::Process(StatisticsPtr &stats, Metadata *image_metadata) { frame_count_++; // First a little bit of housekeeping, fetching up-to-date settings and // configuration, that kind of thing. housekeepConfig(); // Get the current exposure values for the frame that's just arrived. fetchCurrentExposure(image_metadata); // Compute the total gain we require relative to the current exposure. double gain, target_Y; computeGain(stats.get(), image_metadata, gain, target_Y); // Now compute the target (final) exposure which we think we want. computeTargetExposure(gain); // Some of the exposure has to be applied as digital gain, so work out // what that is. This function also tells us whether it's decided to // "desaturate" the image more quickly. bool desaturate = applyDigitalGain(gain, target_Y); // The results have to be filtered so as not to change too rapidly. filterExposure(desaturate); // The last thing is to divide up the exposure value into a shutter time // and analogue_gain, according to the current exposure mode. divideUpExposure(); // Finally advertise what we've done. writeAndFinish(image_metadata, desaturate); } static void copy_string(std::string const &s, char *d, size_t size) { size_t length = s.copy(d, size - 1); d[length] = '\0'; } void Agc::housekeepConfig() { // First fetch all the up-to-date settings, so no one else has to do it. status_.ev = ev_; status_.fixed_shutter = fixed_shutter_; status_.fixed_analogue_gain = fixed_analogue_gain_; status_.flicker_period = flicker_period_; LOG(RPiAgc, Debug) << "ev " << status_.ev << " fixed_shutter " << status_.fixed_shutter << " fixed_analogue_gain " << status_.fixed_analogue_gain; // Make sure the "mode" pointers point to the up-to-date things, if // they've changed. if (strcmp(metering_mode_name_.c_str(), status_.metering_mode)) { auto it = config_.metering_modes.find(metering_mode_name_); if (it == config_.metering_modes.end()) throw std::runtime_error("Agc: no metering mode " + metering_mode_name_); metering_mode_ = &it->second; copy_string(metering_mode_name_, status_.metering_mode, sizeof(status_.metering_mode)); } if (strcmp(exposure_mode_name_.c_str(), status_.exposure_mode)) { auto it = config_.exposure_modes.find(exposure_mode_name_); if (it == config_.exposure_modes.end()) throw std::runtime_error("Agc: no exposure profile " + exposure_mode_name_); exposure_mode_ = &it->second; copy_string(exposure_mode_name_, status_.exposure_mode, sizeof(status_.exposure_mode)); } if (strcmp(constraint_mode_name_.c_str(), status_.constraint_mode)) { auto it = config_.constraint_modes.find(constraint_mode_name_); if (it == config_.constraint_modes.end()) throw std::runtime_error("Agc: no constraint list " + constraint_mode_name_); constraint_mode_ = &it->second; copy_string(constraint_mode_name_, status_.constraint_mode, sizeof(status_.constraint_mode)); } LOG(RPiAgc, Debug) << "exposure_mode " << exposure_mode_name_ << " constraint_mode " << constraint_mode_name_ << " metering_mode " << metering_mode_name_; } void Agc::fetchCurrentExposure(Metadata *image_metadata) { std::unique_lock<Metadata> lock(*image_metadata); DeviceStatus *device_status = image_metadata->GetLocked<DeviceStatus>("device.status"); if (!device_status) throw std::runtime_error("Agc: no device metadata"); current_.shutter = device_status->shutter_speed; current_.analogue_gain = device_status->analogue_gain; AgcStatus *agc_status = image_metadata->GetLocked<AgcStatus>("agc.status"); current_.total_exposure = agc_status ? agc_status->total_exposure_value : 0; current_.total_exposure_no_dg = current_.shutter * current_.analogue_gain; } void Agc::fetchAwbStatus(Metadata *image_metadata) { awb_.gain_r = 1.0; // in case not found in metadata awb_.gain_g = 1.0; awb_.gain_b = 1.0; if (image_metadata->Get("awb.status", awb_) != 0) LOG(RPiAgc, Warning) << "Agc: no AWB status found"; } static double compute_initial_Y(bcm2835_isp_stats *stats, AwbStatus const &awb, double weights[], double gain) { bcm2835_isp_stats_region *regions = stats->agc_stats; // Note how the calculation below means that equal weights give you // "average" metering (i.e. all pixels equally important). double R_sum = 0, G_sum = 0, B_sum = 0, pixel_sum = 0; for (int i = 0; i < AGC_STATS_SIZE; i++) { double counted = regions[i].counted; double r_sum = std::min(regions[i].r_sum * gain, ((1 << PIPELINE_BITS) - 1) * counted); double g_sum = std::min(regions[i].g_sum * gain, ((1 << PIPELINE_BITS) - 1) * counted); double b_sum = std::min(regions[i].b_sum * gain, ((1 << PIPELINE_BITS) - 1) * counted); R_sum += r_sum * weights[i]; G_sum += g_sum * weights[i]; B_sum += b_sum * weights[i]; pixel_sum += counted * weights[i]; } if (pixel_sum == 0.0) { LOG(RPiAgc, Warning) << "compute_initial_Y: pixel_sum is zero"; return 0; } double Y_sum = R_sum * awb.gain_r * .299 + G_sum * awb.gain_g * .587 + B_sum * awb.gain_b * .114; return Y_sum / pixel_sum / (1 << PIPELINE_BITS); } // We handle extra gain through EV by adjusting our Y targets. However, you // simply can't monitor histograms once they get very close to (or beyond!) // saturation, so we clamp the Y targets to this value. It does mean that EV // increases don't necessarily do quite what you might expect in certain // (contrived) cases. #define EV_GAIN_Y_TARGET_LIMIT 0.9 static double constraint_compute_gain(AgcConstraint &c, Histogram &h, double lux, double ev_gain, double &target_Y) { target_Y = c.Y_target.Eval(c.Y_target.Domain().Clip(lux)); target_Y = std::min(EV_GAIN_Y_TARGET_LIMIT, target_Y * ev_gain); double iqm = h.InterQuantileMean(c.q_lo, c.q_hi); return (target_Y * NUM_HISTOGRAM_BINS) / iqm; } void Agc::computeGain(bcm2835_isp_stats *statistics, Metadata *image_metadata, double &gain, double &target_Y) { struct LuxStatus lux = {}; lux.lux = 400; // default lux level to 400 in case no metadata found if (image_metadata->Get("lux.status", lux) != 0) LOG(RPiAgc, Warning) << "Agc: no lux level found"; Histogram h(statistics->hist[0].g_hist, NUM_HISTOGRAM_BINS); double ev_gain = status_.ev * config_.base_ev; // The initial gain and target_Y come from some of the regions. After // that we consider the histogram constraints. target_Y = config_.Y_target.Eval(config_.Y_target.Domain().Clip(lux.lux)); target_Y = std::min(EV_GAIN_Y_TARGET_LIMIT, target_Y * ev_gain); // Do this calculation a few times as brightness increase can be // non-linear when there are saturated regions. gain = 1.0; for (int i = 0; i < 8; i++) { double initial_Y = compute_initial_Y(statistics, awb_, metering_mode_->weights, gain); double extra_gain = std::min(10.0, target_Y / (initial_Y + .001)); gain *= extra_gain; LOG(RPiAgc, Debug) << "Initial Y " << initial_Y << " target " << target_Y << " gives gain " << gain; if (extra_gain < 1.01) // close enough break; } for (auto &c : *constraint_mode_) { double new_target_Y; double new_gain = constraint_compute_gain(c, h, lux.lux, ev_gain, new_target_Y); LOG(RPiAgc, Debug) << "Constraint has target_Y " << new_target_Y << " giving gain " << new_gain; if (c.bound == AgcConstraint::Bound::LOWER && new_gain > gain) { LOG(RPiAgc, Debug) << "Lower bound constraint adopted"; gain = new_gain, target_Y = new_target_Y; } else if (c.bound == AgcConstraint::Bound::UPPER && new_gain < gain) { LOG(RPiAgc, Debug) << "Upper bound constraint adopted"; gain = new_gain, target_Y = new_target_Y; } } LOG(RPiAgc, Debug) << "Final gain " << gain << " (target_Y " << target_Y << " ev " << status_.ev << " base_ev " << config_.base_ev << ")"; } void Agc::computeTargetExposure(double gain) { if (status_.fixed_shutter != 0.0 && status_.fixed_analogue_gain != 0.0) { // When ag and shutter are both fixed, we need to drive the // total exposure so that we end up with a digital gain of at least // 1/min_colour_gain. Otherwise we'd desaturate channels causing // white to go cyan or magenta. double min_colour_gain = std::min({ awb_.gain_r, awb_.gain_g, awb_.gain_b, 1.0 }); ASSERT(min_colour_gain != 0.0); target_.total_exposure = status_.fixed_shutter * status_.fixed_analogue_gain / min_colour_gain; } else { // The statistics reflect the image without digital gain, so the final // total exposure we're aiming for is: target_.total_exposure = current_.total_exposure_no_dg * gain; // The final target exposure is also limited to what the exposure // mode allows. double max_total_exposure = (status_.fixed_shutter != 0.0 ? status_.fixed_shutter : exposure_mode_->shutter.back()) * (status_.fixed_analogue_gain != 0.0 ? status_.fixed_analogue_gain : exposure_mode_->gain.back()); target_.total_exposure = std::min(target_.total_exposure, max_total_exposure); } LOG(RPiAgc, Debug) << "Target total_exposure " << target_.total_exposure; } bool Agc::applyDigitalGain(double gain, double target_Y) { double min_colour_gain = std::min({ awb_.gain_r, awb_.gain_g, awb_.gain_b, 1.0 }); ASSERT(min_colour_gain != 0.0); double dg = 1.0 / min_colour_gain; // I think this pipeline subtracts black level and rescales before we // get the stats, so no need to worry about it. LOG(RPiAgc, Debug) << "after AWB, target dg " << dg << " gain " << gain << " target_Y " << target_Y; // Finally, if we're trying to reduce exposure but the target_Y is // "close" to 1.0, then the gain computed for that constraint will be // only slightly less than one, because the measured Y can never be // larger than 1.0. When this happens, demand a large digital gain so // that the exposure can be reduced, de-saturating the image much more // quickly (and we then approach the correct value more quickly from // below). bool desaturate = target_Y > config_.fast_reduce_threshold && gain < sqrt(target_Y); if (desaturate) dg /= config_.fast_reduce_threshold; LOG(RPiAgc, Debug) << "Digital gain " << dg << " desaturate? " << desaturate; target_.total_exposure_no_dg = target_.total_exposure / dg; LOG(RPiAgc, Debug) << "Target total_exposure_no_dg " << target_.total_exposure_no_dg; return desaturate; } void Agc::filterExposure(bool desaturate) { double speed = frame_count_ <= config_.startup_frames ? 1.0 : config_.speed; if (filtered_.total_exposure == 0.0) { filtered_.total_exposure = target_.total_exposure; filtered_.total_exposure_no_dg = target_.total_exposure_no_dg; } else { // If close to the result go faster, to save making so many // micro-adjustments on the way. (Make this customisable?) if (filtered_.total_exposure < 1.2 * target_.total_exposure && filtered_.total_exposure > 0.8 * target_.total_exposure) speed = sqrt(speed); filtered_.total_exposure = speed * target_.total_exposure + filtered_.total_exposure * (1.0 - speed); // When desaturing, take a big jump down in exposure_no_dg, // which we'll hide with digital gain. if (desaturate) filtered_.total_exposure_no_dg = target_.total_exposure_no_dg; else filtered_.total_exposure_no_dg = speed * target_.total_exposure_no_dg + filtered_.total_exposure_no_dg * (1.0 - speed); } // We can't let the no_dg exposure deviate too far below the // total exposure, as there might not be enough digital gain available // in the ISP to hide it (which will cause nasty oscillation). if (filtered_.total_exposure_no_dg < filtered_.total_exposure * config_.fast_reduce_threshold) filtered_.total_exposure_no_dg = filtered_.total_exposure * config_.fast_reduce_threshold; LOG(RPiAgc, Debug) << "After filtering, total_exposure " << filtered_.total_exposure << " no dg " << filtered_.total_exposure_no_dg; } void Agc::divideUpExposure() { // Sending the fixed shutter/gain cases through the same code may seem // unnecessary, but it will make more sense when extend this to cover // variable aperture. double exposure_value = filtered_.total_exposure_no_dg; double shutter_time, analogue_gain; shutter_time = status_.fixed_shutter != 0.0 ? status_.fixed_shutter : exposure_mode_->shutter[0]; analogue_gain = status_.fixed_analogue_gain != 0.0 ? status_.fixed_analogue_gain : exposure_mode_->gain[0]; if (shutter_time * analogue_gain < exposure_value) { for (unsigned int stage = 1; stage < exposure_mode_->gain.size(); stage++) { if (status_.fixed_shutter == 0.0) { if (exposure_mode_->shutter[stage] * analogue_gain >= exposure_value) { shutter_time = exposure_value / analogue_gain; break; } shutter_time = exposure_mode_->shutter[stage]; } if (status_.fixed_analogue_gain == 0.0) { if (exposure_mode_->gain[stage] * shutter_time >= exposure_value) { analogue_gain = exposure_value / shutter_time; break; } analogue_gain = exposure_mode_->gain[stage]; } } } LOG(RPiAgc, Debug) << "Divided up shutter and gain are " << shutter_time << " and " << analogue_gain; // Finally adjust shutter time for flicker avoidance (require both // shutter and gain not to be fixed). if (status_.fixed_shutter == 0.0 && status_.fixed_analogue_gain == 0.0 && status_.flicker_period != 0.0) { int flicker_periods = shutter_time / status_.flicker_period; if (flicker_periods > 0) { double new_shutter_time = flicker_periods * status_.flicker_period; analogue_gain *= shutter_time / new_shutter_time; // We should still not allow the ag to go over the // largest value in the exposure mode. Note that this // may force more of the total exposure into the digital // gain as a side-effect. analogue_gain = std::min(analogue_gain, exposure_mode_->gain.back()); shutter_time = new_shutter_time; } LOG(RPiAgc, Debug) << "After flicker avoidance, shutter " << shutter_time << " gain " << analogue_gain; } filtered_.shutter = shutter_time; filtered_.analogue_gain = analogue_gain; } void Agc::writeAndFinish(Metadata *image_metadata, bool desaturate) { status_.total_exposure_value = filtered_.total_exposure; status_.target_exposure_value = desaturate ? 0 : target_.total_exposure_no_dg; status_.shutter_time = filtered_.shutter; status_.analogue_gain = filtered_.analogue_gain; // Write to metadata as well, in case anyone wants to update the camera // immediately. image_metadata->Set("agc.status", status_); LOG(RPiAgc, Debug) << "Output written, total exposure requested is " << filtered_.total_exposure; LOG(RPiAgc, Debug) << "Camera exposure update: shutter time " << filtered_.shutter << " analogue gain " << filtered_.analogue_gain; } // Register algorithm with the system. static Algorithm *Create(Controller *controller) { return (Algorithm *)new Agc(controller); } static RegisterAlgorithm reg(NAME, &Create);