# SPDX-License-Identifier: BSD-2-Clause # # Copyright (C) 2019, Raspberry Pi Ltd # # camera tuning tool for AWB from ctt_image_load import * import matplotlib.pyplot as plt from bisect import bisect_left from scipy.optimize import fmin """ obtain piecewise linear approximation for colour curve """ def awb(Cam, cal_cr_list, cal_cb_list, plot, grid_size): imgs = Cam.imgs """ condense alsc calibration tables into one dictionary """ if cal_cr_list is None: colour_cals = None else: colour_cals = {} for cr, cb in zip(cal_cr_list, cal_cb_list): cr_tab = cr['table'] cb_tab = cb['table'] """ normalise tables so min value is 1 """ cr_tab = cr_tab/np.min(cr_tab) cb_tab = cb_tab/np.min(cb_tab) colour_cals[cr['ct']] = [cr_tab, cb_tab] """ obtain data from greyscale macbeth patches """ rb_raw = [] rbs_hat = [] for Img in imgs: Cam.log += '\nProcessing '+Img.name """ get greyscale patches with alsc applied if alsc enabled. Note: if alsc is disabled then colour_cals will be set to None and the function will just return the greyscale patches """ r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals, grid_size=grid_size) """ calculate ratio of r, b to g """ r_g = np.mean(r_patchs/g_patchs) b_g = np.mean(b_patchs/g_patchs) Cam.log += '\n r : {:.4f} b : {:.4f}'.format(r_g, b_g) """ The curve tends to be better behaved in so-called hatspace. R, B, G represent the individual channels. The colour curve is plotted in r, b space, where: r = R/G b = B/G This will be referred to as dehatspace... (sorry) Hatspace is defined as: r_hat = R/(R+B+G) b_hat = B/(R+B+G) To convert from dehatspace to hastpace (hat operation): r_hat = r/(1+r+b) b_hat = b/(1+r+b) To convert from hatspace to dehatspace (dehat operation): r = r_hat/(1-r_hat-b_hat) b = b_hat/(1-r_hat-b_hat) Proof is left as an excercise to the reader... Throughout the code, r and b are sometimes referred to as r_g and b_g as a reminder that they are ratios """ r_g_hat = r_g/(1+r_g+b_g) b_g_hat = b_g/(1+r_g+b_g) Cam.log += '\n r_hat : {:.4f} b_hat : {:.4f}'.format(r_g_hat, b_g_hat) rbs_hat.append((r_g_hat, b_g_hat, Img.col)) rb_raw.append((r_g, b_g)) Cam.log += '\n' Cam.log += '\nFinished processing images' """ sort all lits simultaneously by r_hat """ rbs_zip = list(zip(rbs_hat, rb_raw)) rbs_zip.sort(key=lambda x: x[0][0]) rbs_hat, rb_raw = list(zip(*rbs_zip)) """ unzip tuples ready for processing """ rbs_hat = list(zip(*rbs_hat)) rb_raw = list(zip(*rb_raw)) """ fit quadratic fit to r_g hat and b_g_hat """ a, b, c = np.polyfit(rbs_hat[0], rbs_hat[1], 2) Cam.log += '\nFit quadratic curve in hatspace' """ the algorithm now approximates the shortest distance from each point to the curve in dehatspace. Since the fit is done in hatspace, it is easier to find the actual shortest distance in hatspace and use the projection back into dehatspace as an overestimate. The distance will be used for two things: 1) In the case that colour temperature does not strictly decrease with increasing r/g, the closest point to the line will be chosen out of an increasing pair of colours. 2) To calculate transverse negative an dpositive, the maximum positive and negative distance from the line are chosen. This benefits from the overestimate as the transverse pos/neg are upper bound values. """ """ define fit function """ def f(x): return a*x**2 + b*x + c """ iterate over points (R, B are x and y coordinates of points) and calculate distance to line in dehatspace """ dists = [] for i, (R, B) in enumerate(zip(rbs_hat[0], rbs_hat[1])): """ define function to minimise as square distance between datapoint and point on curve. Squaring is monotonic so minimising radius squared is equivalent to minimising radius """ def f_min(x): y = f(x) return((x-R)**2+(y-B)**2) """ perform optimisation with scipy.optmisie.fmin """ x_hat = fmin(f_min, R, disp=0)[0] y_hat = f(x_hat) """ dehat """ x = x_hat/(1-x_hat-y_hat) y = y_hat/(1-x_hat-y_hat) rr = R/(1-R-B) bb = B/(1-R-B) """ calculate euclidean distance in dehatspace """ dist = ((x-rr)**2+(y-bb)**2)**0.5 """ return negative if point is below the fit curve """ if (x+y) > (rr+bb): dist *= -1 dists.append(dist) Cam.log += '\nFound closest point on fit line to each point in dehatspace' """ calculate wiggle factors in awb. 10% added since this is an upper bound """ transverse_neg = - np.min(dists) * 1.1 transverse_pos = np.max(dists) * 1.1 Cam.log += '\nTransverse pos : {:.5f}'.format(transverse_pos) Cam.log += '\nTransverse neg : {:.5f}'.format(transverse_neg) """ set minimum transverse wiggles to 0.1 . Wiggle factors dictate how far off of the curve the algorithm searches. 0.1 is a suitable minimum that gives better results for lighting conditions not within calibration dataset. Anything less will generalise poorly. """ if transverse_pos < 0.01: transverse_pos = 0.01 Cam.log += '\nForced transverse pos to 0.01' if transverse_neg < 0.01: transverse_neg = 0.01 Cam.log += '\nForced transverse neg to 0.01' """ generate new b_hat values at each r_hat according to fit """ r_hat_fit = np.array(rbs_hat[0]) b_hat_fit = a*r_hat_fit**2 + b*r_hat_fit + c """ transform from hatspace to dehatspace """ r_fit = r_hat_fit/(1-r_hat_fit-b_hat_fit) b_fit = b_hat_fit/(1-r_hat_fit-b_hat_fit) c_fit = np.round(rbs_hat[2], 0) """ round to 4dp """ r_fit = np.where((1000*r_fit) % 1 <= 0.05, r_fit+0.0001, r_fit) r_fit = np.where((1000*r_fit) % 1 >= 0.95, r_fit-0.0001, r_fit) b_fit = np.where((1000*b_fit) % 1 <= 0.05, b_fit+0.0001, b_fit) b_fit = np.where((1000*b_fit) % 1 >= 0.95, b_fit-0.0001, b_fit) r_fit = np.round(r_fit, 4) b_fit = np.round(b_fit, 4) """ The following code ensures that colour temperature decreases with increasing r/g """ """ iterate backwards over list for easier indexing """ i = len(c_fit) - 1 while i > 0: if c_fit[i] > c_fit[i-1]: Cam.log += '\nColour temperature increase found\n' Cam.log += '{} K at r = {} to '.format(c_fit[i-1], r_fit[i-1]) Cam.log += '{} K at r = {}'.format(c_fit[i], r_fit[i]) """ if colour temperature increases then discard point furthest from the transformed fit (dehatspace) """ error_1 = abs(dists[i-1]) error_2 = abs(dists[i]) Cam.log += '\nDistances from fit:\n' Cam.log += '{} K : {:.5f} , '.format(c_fit[i], error_1) Cam.log += '{} K : {:.5f}'.format(c_fit[i-1], error_2) """ find bad index note that in python false = 0 and true = 1 """ bad = i - (error_1 < error_2) Cam.log += '\nPoint at {} K deleted as '.format(c_fit[bad]) Cam.log += 'it is furthest from fit' """ delete bad point """ r_fit = np.delete(r_fit, bad) b_fit = np.delete(b_fit, bad) c_fit = np.delete(c_fit, bad).astype(np.uint16) """ note that if a point has been discarded then the length has decreased by one, meaning that decreasing the index by one will reassess the kept point against the next point. It is therefore possible, in theory, for two adjacent points to be discarded, although probably rare """ i -= 1 """ return formatted ct curve, ordered by increasing colour temperature """ ct_curve = list(np.array(list(zip(b_fit, r_fit, c_fit))).flatten())[::-1] Cam.log += '\nFinal CT curve:' for i in range(len(ct_curve)//3): j = 3*i Cam.log += '\n ct: {} '.format(ct_curve[j]) Cam.log += ' r: {} '.format(ct_curve[j+1]) Cam.log += ' b: {} '.format(ct_curve[j+2]) """ plotting code for debug """ if plot: x = np.linspace(np.min(rbs_hat[0]), np.max(rbs_hat[0]), 100) y = a*x**2 + b*x + c plt.subplot(2, 1, 1) plt.title('hatspace') plt.plot(rbs_hat[0], rbs_hat[1], ls='--', color='blue') plt.plot(x, y, color='green', ls='-') plt.scatter(rbs_hat[0], rbs_hat[1], color='red') for i, ct in enumerate(rbs_hat[2]): plt.annotate(str(ct), (rbs_hat[0][i], rbs_hat[1][i])) plt.xlabel('$\\hat{r}$') plt.ylabel('$\\hat{b}$') """ optional set axes equal to shortest distance so line really does looks perpendicular and everybody is happy """ # ax = plt.gca() # ax.set_aspect('equal') plt.grid() plt.subplot(2, 1, 2) plt.title('dehatspace - indoors?') plt.plot(r_fit, b_fit, color='blue') plt.scatter(rb_raw[0], rb_raw[1], color='green') plt.scatter(r_fit, b_fit, color='red') for i, ct in enumerate(c_fit): plt.annotate(str(ct), (r_fit[i], b_fit[i])) plt.xlabel('$r$') plt.ylabel('$b$') """ optional set axes equal to shortest distance so line really does looks perpendicular and everybody is happy """ # ax = plt.gca() # ax.set_aspect('equal') plt.subplots_adjust(hspace=0.5) plt.grid() plt.show() """ end of plotting code """ return(ct_curve, np.round(transverse_pos, 5), np.round(transverse_neg, 5)) """ obtain greyscale patches and perform alsc colour correction """ def get_alsc_patches(Img, colour_cals, grey=True, grid_size=(16, 12)): """ get patch centre coordinates, image colour and the actual patches for each channel, remembering to subtract blacklevel If grey then only greyscale patches considered """ grid_w, grid_h = grid_size if grey: cen_coords = Img.cen_coords[3::4] col = Img.col patches = [np.array(Img.patches[i]) for i in Img.order] r_patchs = patches[0][3::4] - Img.blacklevel_16 b_patchs = patches[3][3::4] - Img.blacklevel_16 """ note two green channels are averages """ g_patchs = (patches[1][3::4]+patches[2][3::4])/2 - Img.blacklevel_16 else: cen_coords = Img.cen_coords col = Img.col patches = [np.array(Img.patches[i]) for i in Img.order] r_patchs = patches[0] - Img.blacklevel_16 b_patchs = patches[3] - Img.blacklevel_16 g_patchs = (patches[1]+patches[2])/2 - Img.blacklevel_16 if colour_cals is None: return r_patchs, b_patchs, g_patchs """ find where image colour fits in alsc colour calibration tables """ cts = list(colour_cals.keys()) pos = bisect_left(cts, col) """ if img colour is below minimum or above maximum alsc calibration colour, simply pick extreme closest to img colour """ if pos % len(cts) == 0: """ this works because -0 = 0 = first and -1 = last index """ col_tabs = np.array(colour_cals[cts[-pos//len(cts)]]) """ else, perform linear interpolation between existing alsc colour calibration tables """ else: bef = cts[pos-1] aft = cts[pos] da = col-bef db = aft-col bef_tabs = np.array(colour_cals[bef]) aft_tabs = np.array(colour_cals[aft]) col_tabs = (bef_tabs*db + aft_tabs*da)/(da+db) col_tabs = np.reshape(col_tabs, (2, grid_h, grid_w)) """ calculate dx, dy used to calculate alsc table """ w, h = Img.w/2, Img.h/2 dx, dy = int(-(-(w-1)//grid_w)), int(-(-(h-1)//grid_h)) """ make list of pairs of gains for each patch by selecting the correct value in alsc colour calibration table """ patch_gains = [] for cen in cen_coords: x, y = cen[0]//dx, cen[1]//dy # We could probably do with some better spatial interpolation here? col_gains = (col_tabs[0][y][x], col_tabs[1][y][x]) patch_gains.append(col_gains) """ multiply the r and b channels in each patch by the respective gain, finally performing the alsc colour correction """ for i, gains in enumerate(patch_gains): r_patchs[i] = r_patchs[i] * gains[0] b_patchs[i] = b_patchs[i] * gains[1] """ return greyscale patches, g channel and correct r, b channels """ return r_patchs, b_patchs, g_patchs 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* ipu3.cpp - Pipeline handler for Intel IPU3
*/
#include <algorithm>
#include <iomanip>
#include <memory>
#include <queue>
#include <vector>
#include <libcamera/camera.h>
#include <libcamera/formats.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/log.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/utils.h"
#include "libcamera/internal/v4l2_controls.h"
#include "cio2.h"
#include "imgu.h"
namespace libcamera {
LOG_DEFINE_CATEGORY(IPU3)
class IPU3CameraData : public CameraData
{
public:
IPU3CameraData(PipelineHandler *pipe)
: CameraData(pipe)
{
}
void imguOutputBufferReady(FrameBuffer *buffer);
void cio2BufferReady(FrameBuffer *buffer);
CIO2Device cio2_;
ImgUDevice *imgu_;
Stream outStream_;
Stream vfStream_;
Stream rawStream_;
};
class IPU3CameraConfiguration : public CameraConfiguration
{
public:
IPU3CameraConfiguration(Camera *camera, IPU3CameraData *data);
Status validate() override;
const StreamConfiguration &cio2Format() const { return cio2Configuration_; };
const std::vector<const Stream *> &streams() { return streams_; }
private:
static constexpr unsigned int IPU3_BUFFER_COUNT = 4;
static constexpr unsigned int IPU3_MAX_STREAMS = 3;
void assignStreams();
void adjustStream(StreamConfiguration &cfg, bool scale);
/*
* The IPU3CameraData instance is guaranteed to be valid as long as the
* corresponding Camera instance is valid. In order to borrow a
* reference to the camera data, store a new reference to the camera.
*/
std::shared_ptr<Camera> camera_;
const IPU3CameraData *data_;
StreamConfiguration cio2Configuration_;
std::vector<const Stream *> streams_;
};
class PipelineHandlerIPU3 : public PipelineHandler
{
public:
static constexpr unsigned int V4L2_CID_IPU3_PIPE_MODE = 0x009819c1;
enum IPU3PipeModes {
IPU3PipeModeVideo = 0,
IPU3PipeModeStillCapture = 1,
};
PipelineHandlerIPU3(CameraManager *manager);
CameraConfiguration *generateConfiguration(Camera *camera,
const StreamRoles &roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
int start(Camera *camera) override;
void stop(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override;
bool match(DeviceEnumerator *enumerator) override;
private:
IPU3CameraData *cameraData(const Camera *camera)
{
return static_cast<IPU3CameraData *>(
PipelineHandler::cameraData(camera));
}
int registerCameras();
int allocateBuffers(Camera *camera);
int freeBuffers(Camera *camera);
ImgUDevice imgu0_;
ImgUDevice imgu1_;
MediaDevice *cio2MediaDev_;
MediaDevice *imguMediaDev_;
};
IPU3CameraConfiguration::IPU3CameraConfiguration(Camera *camera,
IPU3CameraData *data)
: CameraConfiguration()
{
camera_ = camera->shared_from_this();
data_ = data;
}
void IPU3CameraConfiguration::assignStreams()
{
/*
* Verify and update all configuration entries, and assign a stream to
* each of them. The viewfinder stream can scale, while the output
* stream can crop only, so select the output stream when the requested
* resolution is equal to the sensor resolution, and the viewfinder
* stream otherwise.
*/
std::set<const Stream *> availableStreams = {
&data_->outStream_,
&data_->vfStream_,
&data_->rawStream_,
};
/*
* The caller is responsible to limit the number of requested streams
* to a number supported by the pipeline before calling this function.
*/
ASSERT(availableStreams.size() >= config_.size());
streams_.clear();
streams_.reserve(config_.size());
for (const StreamConfiguration &cfg : config_) {
const PixelFormatInfo &info =
PixelFormatInfo::info(cfg.pixelFormat);
const Stream *stream;
if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
stream = &data_->rawStream_;
else if (cfg.size == cio2Configuration_.size)
stream = &data_->outStream_;
else
stream = &data_->vfStream_;
if (availableStreams.find(stream) == availableStreams.end())
stream = *availableStreams.begin();
streams_.push_back(stream);
availableStreams.erase(stream);
}
}
void IPU3CameraConfiguration::adjustStream(StreamConfiguration &cfg, bool scale)
{
/* The only pixel format the driver supports is NV12. */
cfg.pixelFormat = formats::NV12;
if (scale) {
/*
* Provide a suitable default that matches the sensor aspect
* ratio.
*/
if (!cfg.size.width || !cfg.size.height) {
cfg.size.width = 1280;
cfg.size.height = 1280 * cio2Configuration_.size.height
/ cio2Configuration_.size.width;
}
/*
* \todo: Clamp the size to the hardware bounds when we will
* figure them out.
*
* \todo: Handle the scaler (BDS) restrictions. The BDS can
* only scale with the same factor in both directions, and the
* scaling factor is limited to a multiple of 1/32. At the
* moment the ImgU driver hides these constraints by applying
* additional cropping, this should be fixed on the driver
* side, and cropping should be exposed to us.
*/
} else {
/*
* \todo: Properly support cropping when the ImgU driver
* interface will be cleaned up.
*/
cfg.size = cio2Configuration_.size;
}
/*
* Clamp the size to match the ImgU alignment constraints. The width
* shall be a multiple of 8 pixels and the height a multiple of 4
* pixels.
*/
if (cfg.size.width % 8 || cfg.size.height % 4) {
cfg.size.width &= ~7;
cfg.size.height &= ~3;
}
}
CameraConfiguration::Status IPU3CameraConfiguration::validate()
{
Status status = Valid;
if (config_.empty())
return Invalid;
/* Cap the number of entries to the available streams. */
if (config_.size() > IPU3_MAX_STREAMS) {
config_.resize(IPU3_MAX_STREAMS);
status = Adjusted;
}
/*
* Select the sensor format by collecting the maximum width and height
* and picking the closest larger match, as the IPU3 can downscale
* only. If no resolution is requested for any stream, or if no sensor
* resolution is large enough, pick the largest one.
*/
Size size = {};
for (const StreamConfiguration &cfg : config_) {
if (cfg.size.width > size.width)
size.width = cfg.size.width;
if (cfg.size.height > size.height)
size.height = cfg.size.height;
}
/* Generate raw configuration from CIO2. */
cio2Configuration_ = data_->cio2_.generateConfiguration(size);
if (!cio2Configuration_.pixelFormat.isValid())
return Invalid;
/* Assign streams to each configuration entry. */
assignStreams();
/* Verify and adjust configuration if needed. */
for (unsigned int i = 0; i < config_.size(); ++i) {
StreamConfiguration &cfg = config_[i];
const StreamConfiguration oldCfg = cfg;
const Stream *stream = streams_[i];
if (stream == &data_->rawStream_) {
cfg = cio2Configuration_;
} else {
bool scale = stream == &data_->vfStream_;
adjustStream(config_[i], scale);
cfg.bufferCount = IPU3_BUFFER_COUNT;
}
if (cfg.pixelFormat != oldCfg.pixelFormat ||
cfg.size != oldCfg.size) {
LOG(IPU3, Debug)
<< "Stream " << i << " configuration adjusted to "
<< cfg.toString();
status = Adjusted;
}
}
return status;
}
PipelineHandlerIPU3::PipelineHandlerIPU3(CameraManager *manager)
: PipelineHandler(manager), cio2MediaDev_(nullptr), imguMediaDev_(nullptr)
{
}
CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
const StreamRoles &roles)
{
IPU3CameraData *data = cameraData(camera);
IPU3CameraConfiguration *config = new IPU3CameraConfiguration(camera, data);
std::set<Stream *> streams = {
&data->outStream_,
&data->vfStream_,
&data->rawStream_,
};
if (roles.empty())
return config;
for (const StreamRole role : roles) {
StreamConfiguration cfg = {};
Stream *stream = nullptr;
cfg.pixelFormat = formats::NV12;
switch (role) {
case StreamRole::StillCapture:
/*
* Pick the output stream by default as the Viewfinder
* and VideoRecording roles are not allowed on
* the output stream.
*/
if (streams.find(&data->outStream_) != streams.end()) {
stream = &data->outStream_;
} else if (streams.find(&data->vfStream_) != streams.end()) {
stream = &data->vfStream_;
} else {
LOG(IPU3, Error)
<< "No stream available for requested role "
<< role;
break;
}
/*
* FIXME: Soraka: the maximum resolution reported by
* both sensors (2592x1944 for ov5670 and 4224x3136 for
* ov13858) are returned as default configurations but
* they're not correctly processed by the ImgU.
* Resolutions up tp 2560x1920 have been validated.
*
* \todo Clarify ImgU alignment requirements.
*/
cfg.size = { 2560, 1920 };
break;
case StreamRole::StillCaptureRaw: {
if (streams.find(&data->rawStream_) == streams.end()) {
LOG(IPU3, Error)
<< "Multiple raw streams are not supported";
break;
}
stream = &data->rawStream_;
cfg.size = data->cio2_.sensor()->resolution();
cfg = data->cio2_.generateConfiguration(cfg.size);
break;
}
case StreamRole::Viewfinder:
case StreamRole::VideoRecording: {
/*
* We can't use the 'output' stream for viewfinder or
* video capture roles.
*
* \todo This is an artificial limitation until we
* figure out the exact capabilities of the hardware.
*/
if (streams.find(&data->vfStream_) == streams.end()) {
LOG(IPU3, Error)
<< "No stream available for requested role "
<< role;
break;
}
stream = &data->vfStream_;
/*
* Align the default viewfinder size to the maximum
* available sensor resolution and to the IPU3
* alignment constraints.
*/
const Size &res = data->cio2_.sensor()->resolution();
unsigned int width = std::min(1280U, res.width);
unsigned int height = std::min(720U, res.height);
cfg.size = { width & ~7, height & ~3 };
break;
}
default:
LOG(IPU3, Error)
<< "Requested stream role not supported: " << role;
break;
}
if (!stream) {
delete config;
return nullptr;
}
streams.erase(stream);
config->addConfiguration(cfg);
}
config->validate();
return config;
}
int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
{
IPU3CameraConfiguration *config =
static_cast<IPU3CameraConfiguration *>(c);
IPU3CameraData *data = cameraData(camera);
Stream *outStream = &data->outStream_;
Stream *vfStream = &data->vfStream_;
CIO2Device *cio2 = &data->cio2_;
ImgUDevice *imgu = data->imgu_;
V4L2DeviceFormat outputFormat;
int ret;
/*
* FIXME: enabled links in one ImgU pipe interfere with capture
* operations on the other one. This can be easily triggered by
* capturing from one camera and then trying to capture from the other
* one right after, without disabling media links on the first used
* pipe.
*
* The tricky part here is where to disable links on the ImgU instance
* which is currently not in use:
* 1) Link enable/disable cannot be done at start()/stop() time as video
* devices needs to be linked first before format can be configured on
* them.
* 2) As link enable has to be done at the least in configure(),
* before configuring formats, the only place where to disable links
* would be 'stop()', but the Camera class state machine allows
* start()<->stop() sequences without any configure() in between.
*
* As of now, disable all links in the ImgU media graph before
* configuring the device, to allow alternate the usage of the two
* ImgU pipes.
*
* As a consequence, a Camera using an ImgU shall be configured before
* any start()/stop() sequence. An application that wants to
* pre-configure all the camera and then start/stop them alternatively
* without going through any re-configuration (a sequence that is
* allowed by the Camera state machine) would now fail on the IPU3.
*/
ret = imguMediaDev_->disableLinks();
if (ret)
return ret;
/*
* \todo: Enable links selectively based on the requested streams.
* As of now, enable all links unconditionally.
* \todo Don't configure the ImgU at all if we only have a single
* stream which is for raw capture, in which case no buffers will
* ever be queued to the ImgU.
*/
ret = data->imgu_->enableLinks(true);
if (ret)
return ret;
/*