summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/android/system/core/include/system/graphics-base-v1.0.h1
-rw-r--r--include/android/system/core/include/system/graphics-base-v1.1.h1
-rw-r--r--include/android/system/core/include/system/graphics-base.h1
-rw-r--r--include/android/system/core/include/system/graphics-sw.h1
-rw-r--r--include/libcamera/base/backtrace.h2
-rw-r--r--include/libcamera/base/bound_method.h4
-rw-r--r--include/libcamera/base/class.h2
-rw-r--r--include/libcamera/base/compiler.h2
-rw-r--r--include/libcamera/base/event_dispatcher.h2
-rw-r--r--include/libcamera/base/event_dispatcher_poll.h2
-rw-r--r--include/libcamera/base/event_notifier.h2
-rw-r--r--include/libcamera/base/file.h2
-rw-r--r--include/libcamera/base/flags.h10
-rw-r--r--include/libcamera/base/log.h12
-rw-r--r--include/libcamera/base/meson.build28
-rw-r--r--include/libcamera/base/message.h4
-rw-r--r--include/libcamera/base/mutex.h4
-rw-r--r--include/libcamera/base/object.h6
-rw-r--r--include/libcamera/base/private.h2
-rw-r--r--include/libcamera/base/semaphore.h12
-rw-r--r--include/libcamera/base/shared_fd.h2
-rw-r--r--include/libcamera/base/signal.h21
-rw-r--r--include/libcamera/base/span.h2
-rw-r--r--include/libcamera/base/thread.h2
-rw-r--r--include/libcamera/base/thread_annotations.h4
-rw-r--r--include/libcamera/base/timer.h2
-rw-r--r--include/libcamera/base/unique_fd.h2
-rw-r--r--include/libcamera/base/utils.h16
-rw-r--r--include/libcamera/camera.h45
-rw-r--r--include/libcamera/camera_manager.h9
-rw-r--r--include/libcamera/color_space.h12
-rw-r--r--include/libcamera/control_ids.h.in10
-rw-r--r--include/libcamera/controls.h74
-rw-r--r--include/libcamera/fence.h2
-rw-r--r--include/libcamera/formats.h.in2
-rw-r--r--include/libcamera/framebuffer.h24
-rw-r--r--include/libcamera/framebuffer_allocator.h2
-rw-r--r--include/libcamera/geometry.h2
-rw-r--r--include/libcamera/internal/bayer_format.h6
-rw-r--r--include/libcamera/internal/byte_stream_buffer.h2
-rw-r--r--include/libcamera/internal/camera.h3
-rw-r--r--include/libcamera/internal/camera_controls.h2
-rw-r--r--include/libcamera/internal/camera_lens.h2
-rw-r--r--include/libcamera/internal/camera_manager.h69
-rw-r--r--include/libcamera/internal/camera_sensor.h48
-rw-r--r--include/libcamera/internal/camera_sensor_properties.h2
-rw-r--r--include/libcamera/internal/control_serializer.h6
-rw-r--r--include/libcamera/internal/control_validator.h2
-rw-r--r--include/libcamera/internal/converter.h108
-rw-r--r--include/libcamera/internal/converter/converter_v4l2_m2m.h98
-rw-r--r--include/libcamera/internal/converter/meson.build5
-rw-r--r--include/libcamera/internal/delayed_controls.h9
-rw-r--r--include/libcamera/internal/device_enumerator.h2
-rw-r--r--include/libcamera/internal/device_enumerator_sysfs.h2
-rw-r--r--include/libcamera/internal/device_enumerator_udev.h2
-rw-r--r--include/libcamera/internal/dma_heaps.h38
-rw-r--r--include/libcamera/internal/formats.h7
-rw-r--r--include/libcamera/internal/framebuffer.h12
-rw-r--r--include/libcamera/internal/ipa_data_serializer.h62
-rw-r--r--include/libcamera/internal/ipa_manager.h9
-rw-r--r--include/libcamera/internal/ipa_module.h2
-rw-r--r--include/libcamera/internal/ipa_proxy.h2
-rw-r--r--include/libcamera/internal/ipc_pipe.h2
-rw-r--r--include/libcamera/internal/ipc_pipe_unixsocket.h2
-rw-r--r--include/libcamera/internal/ipc_unixsocket.h2
-rw-r--r--include/libcamera/internal/mapped_framebuffer.h2
-rw-r--r--include/libcamera/internal/media_device.h2
-rw-r--r--include/libcamera/internal/media_object.h2
-rw-r--r--include/libcamera/internal/meson.build13
-rw-r--r--include/libcamera/internal/pipeline_handler.h69
-rw-r--r--include/libcamera/internal/process.h2
-rw-r--r--include/libcamera/internal/pub_key.h10
-rw-r--r--include/libcamera/internal/request.h10
-rw-r--r--include/libcamera/internal/shared_mem_object.h127
-rw-r--r--include/libcamera/internal/software_isp/debayer_params.h29
-rw-r--r--include/libcamera/internal/software_isp/meson.build7
-rw-r--r--include/libcamera/internal/software_isp/software_isp.h99
-rw-r--r--include/libcamera/internal/software_isp/swisp_stats.h49
-rw-r--r--include/libcamera/internal/source_paths.h2
-rw-r--r--include/libcamera/internal/sysfs.h2
-rw-r--r--include/libcamera/internal/tracepoints.h.in2
-rw-r--r--include/libcamera/internal/tracepoints/request.tp4
-rw-r--r--include/libcamera/internal/v4l2_device.h11
-rw-r--r--include/libcamera/internal/v4l2_pixelformat.h24
-rw-r--r--include/libcamera/internal/v4l2_subdevice.h132
-rw-r--r--include/libcamera/internal/v4l2_videodevice.h11
-rw-r--r--include/libcamera/internal/yaml_parser.h70
-rw-r--r--include/libcamera/ipa/core.mojom49
-rw-r--r--include/libcamera/ipa/ipa_controls.h2
-rw-r--r--include/libcamera/ipa/ipa_interface.h7
-rw-r--r--include/libcamera/ipa/ipa_module_info.h2
-rw-r--r--include/libcamera/ipa/meson.build45
-rw-r--r--include/libcamera/ipa/raspberrypi.mojom267
-rw-r--r--include/libcamera/ipa/rkisp1.mojom18
-rw-r--r--include/libcamera/ipa/soft.mojom28
-rw-r--r--include/libcamera/ipa/vimc.mojom14
-rw-r--r--include/libcamera/logging.h2
-rw-r--r--include/libcamera/meson.build57
-rw-r--r--include/libcamera/orientation.h30
-rw-r--r--include/libcamera/pixel_format.h2
-rw-r--r--include/libcamera/property_ids.h.in12
-rw-r--r--include/libcamera/request.h2
-rw-r--r--include/libcamera/stream.h7
-rw-r--r--include/libcamera/transform.h9
-rw-r--r--include/libcamera/version.h.in2
-rw-r--r--include/linux/README2
-rw-r--r--include/linux/bcm2835-isp.h2
-rw-r--r--include/linux/dma-buf.h88
-rw-r--r--include/linux/drm_fourcc.h218
-rw-r--r--include/linux/intel-ipu3.h42
-rw-r--r--include/linux/media-bus-format.h13
-rw-r--r--include/linux/media.h29
-rw-r--r--include/linux/rkisp1-config.h85
-rw-r--r--include/linux/v4l2-common.h39
-rw-r--r--include/linux/v4l2-controls.h1551
-rw-r--r--include/linux/v4l2-mediabus.h4
-rw-r--r--include/linux/v4l2-subdev.h108
-rw-r--r--include/linux/videodev2.h107
-rw-r--r--include/meson.build2
119 files changed, 3802 insertions, 561 deletions
diff --git a/include/android/system/core/include/system/graphics-base-v1.0.h b/include/android/system/core/include/system/graphics-base-v1.0.h
index 44913cc6..7548d879 100644
--- a/include/android/system/core/include/system/graphics-base-v1.0.h
+++ b/include/android/system/core/include/system/graphics-base-v1.0.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: Apache-2.0 */
// This file is autogenerated by hidl-gen. Do not edit manually.
// Source: android.hardware.graphics.common@1.0
// Location: hardware/interfaces/graphics/common/1.0/
diff --git a/include/android/system/core/include/system/graphics-base-v1.1.h b/include/android/system/core/include/system/graphics-base-v1.1.h
index f95b9ba2..35130724 100644
--- a/include/android/system/core/include/system/graphics-base-v1.1.h
+++ b/include/android/system/core/include/system/graphics-base-v1.1.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: Apache-2.0 */
// This file is autogenerated by hidl-gen. Do not edit manually.
// Source: android.hardware.graphics.common@1.1
// Location: hardware/interfaces/graphics/common/1.1/
diff --git a/include/android/system/core/include/system/graphics-base.h b/include/android/system/core/include/system/graphics-base.h
index ea920071..d01e9874 100644
--- a/include/android/system/core/include/system/graphics-base.h
+++ b/include/android/system/core/include/system/graphics-base.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: Apache-2.0 */
#ifndef SYSTEM_CORE_GRAPHICS_BASE_H_
#define SYSTEM_CORE_GRAPHICS_BASE_H_
diff --git a/include/android/system/core/include/system/graphics-sw.h b/include/android/system/core/include/system/graphics-sw.h
index 9e1a88e9..4a1cf829 100644
--- a/include/android/system/core/include/system/graphics-sw.h
+++ b/include/android/system/core/include/system/graphics-sw.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: Apache-2.0 */
#ifndef SYSTEM_CORE_GRAPHICS_SW_H_
#define SYSTEM_CORE_GRAPHICS_SW_H_
diff --git a/include/libcamera/base/backtrace.h b/include/libcamera/base/backtrace.h
index 752034d1..699ddd9e 100644
--- a/include/libcamera/base/backtrace.h
+++ b/include/libcamera/base/backtrace.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
- * backtrace.h - Call stack backtraces
+ * Call stack backtraces
*/
#pragma once
diff --git a/include/libcamera/base/bound_method.h b/include/libcamera/base/bound_method.h
index e73a4d98..dd3488ee 100644
--- a/include/libcamera/base/bound_method.h
+++ b/include/libcamera/base/bound_method.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * bound_method.h - Method bind and invocation
+ * Method bind and invocation
*/
#pragma once
@@ -72,7 +72,7 @@ public:
}
virtual ~BoundMethodBase() = default;
- template<typename T, typename std::enable_if_t<!std::is_same<Object, T>::value> * = nullptr>
+ template<typename T, std::enable_if_t<!std::is_same<Object, T>::value> * = nullptr>
bool match(T *obj) { return obj == obj_; }
bool match(Object *object) { return object == object_; }
diff --git a/include/libcamera/base/class.h b/include/libcamera/base/class.h
index 571eecf4..a808422e 100644
--- a/include/libcamera/base/class.h
+++ b/include/libcamera/base/class.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * class.h - Utilities and helpers for classes
+ * Utilities and helpers for classes
*/
#pragma once
diff --git a/include/libcamera/base/compiler.h b/include/libcamera/base/compiler.h
index 02564f2f..fda8fdfd 100644
--- a/include/libcamera/base/compiler.h
+++ b/include/libcamera/base/compiler.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * compiler.h - Compiler support
+ * Compiler support
*/
#pragma once
diff --git a/include/libcamera/base/event_dispatcher.h b/include/libcamera/base/event_dispatcher.h
index 184f1b12..e9a09c6e 100644
--- a/include/libcamera/base/event_dispatcher.h
+++ b/include/libcamera/base/event_dispatcher.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_dispatcher.h - Event dispatcher
+ * Event dispatcher
*/
#pragma once
diff --git a/include/libcamera/base/event_dispatcher_poll.h b/include/libcamera/base/event_dispatcher_poll.h
index b7840309..1f7e05cf 100644
--- a/include/libcamera/base/event_dispatcher_poll.h
+++ b/include/libcamera/base/event_dispatcher_poll.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_dispatcher_poll.h - Poll-based event dispatcher
+ * Poll-based event dispatcher
*/
#pragma once
diff --git a/include/libcamera/base/event_notifier.h b/include/libcamera/base/event_notifier.h
index e5c0594d..158f2d44 100644
--- a/include/libcamera/base/event_notifier.h
+++ b/include/libcamera/base/event_notifier.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * event_notifier.h - File descriptor event notifier
+ * File descriptor event notifier
*/
#pragma once
diff --git a/include/libcamera/base/file.h b/include/libcamera/base/file.h
index 0cdc2ed0..5637934c 100644
--- a/include/libcamera/base/file.h
+++ b/include/libcamera/base/file.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * file.h - File I/O operations
+ * File I/O operations
*/
#pragma once
diff --git a/include/libcamera/base/flags.h b/include/libcamera/base/flags.h
index bff3b93c..af4f6e35 100644
--- a/include/libcamera/base/flags.h
+++ b/include/libcamera/base/flags.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * flags.h - Type-safe enum-based bitfields
+ * Type-safe enum-based bitfields
*/
#pragma once
@@ -147,7 +147,7 @@ struct flags_enable_operators {
};
template<typename E>
-typename std::enable_if_t<flags_enable_operators<E>::enable, Flags<E>>
+std::enable_if_t<flags_enable_operators<E>::enable, Flags<E>>
operator|(E lhs, E rhs)
{
using type = std::underlying_type_t<E>;
@@ -155,7 +155,7 @@ operator|(E lhs, E rhs)
}
template<typename E>
-typename std::enable_if_t<flags_enable_operators<E>::enable, Flags<E>>
+std::enable_if_t<flags_enable_operators<E>::enable, Flags<E>>
operator&(E lhs, E rhs)
{
using type = std::underlying_type_t<E>;
@@ -163,7 +163,7 @@ operator&(E lhs, E rhs)
}
template<typename E>
-typename std::enable_if_t<flags_enable_operators<E>::enable, Flags<E>>
+std::enable_if_t<flags_enable_operators<E>::enable, Flags<E>>
operator^(E lhs, E rhs)
{
using type = std::underlying_type_t<E>;
@@ -171,7 +171,7 @@ operator^(E lhs, E rhs)
}
template<typename E>
-typename std::enable_if_t<flags_enable_operators<E>::enable, Flags<E>>
+std::enable_if_t<flags_enable_operators<E>::enable, Flags<E>>
operator~(E rhs)
{
using type = std::underlying_type_t<E>;
diff --git a/include/libcamera/base/log.h b/include/libcamera/base/log.h
index 3fc5ced3..b5775e49 100644
--- a/include/libcamera/base/log.h
+++ b/include/libcamera/base/log.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * log.h - Logging infrastructure
+ * Logging infrastructure
*/
#pragma once
@@ -29,16 +29,18 @@ enum LogSeverity {
class LogCategory
{
public:
- explicit LogCategory(const char *name);
+ static LogCategory *create(const char *name);
- const char *name() const { return name_; }
+ const std::string &name() const { return name_; }
LogSeverity severity() const { return severity_; }
void setSeverity(LogSeverity severity);
static const LogCategory &defaultCategory();
private:
- const char *name_;
+ explicit LogCategory(const char *name);
+
+ const std::string name_;
LogSeverity severity_;
};
@@ -49,7 +51,7 @@ extern const LogCategory &_LOG_CATEGORY(name)();
const LogCategory &_LOG_CATEGORY(name)() \
{ \
/* The instance will be deleted by the Logger destructor. */ \
- static LogCategory *category = new LogCategory(#name); \
+ static LogCategory *category = LogCategory::create(#name); \
return *category; \
}
diff --git a/include/libcamera/base/meson.build b/include/libcamera/base/meson.build
index 4410aba8..bace25d5 100644
--- a/include/libcamera/base/meson.build
+++ b/include/libcamera/base/meson.build
@@ -2,31 +2,39 @@
libcamera_base_include_dir = libcamera_include_dir / 'base'
-libcamera_base_headers = files([
- 'backtrace.h',
+libcamera_base_public_headers = files([
'bound_method.h',
'class.h',
'compiler.h',
+ 'flags.h',
+ 'object.h',
+ 'shared_fd.h',
+ 'signal.h',
+ 'span.h',
+ 'unique_fd.h',
+])
+
+libcamera_base_private_headers = files([
+ 'backtrace.h',
'event_dispatcher.h',
'event_dispatcher_poll.h',
'event_notifier.h',
'file.h',
- 'flags.h',
'log.h',
'message.h',
'mutex.h',
- 'object.h',
'private.h',
'semaphore.h',
- 'shared_fd.h',
- 'signal.h',
- 'span.h',
'thread.h',
'thread_annotations.h',
'timer.h',
- 'unique_fd.h',
'utils.h',
])
-install_headers(libcamera_base_headers,
- subdir: libcamera_base_include_dir)
+libcamera_base_headers = [
+ libcamera_base_public_headers,
+ libcamera_base_private_headers,
+]
+
+install_headers(libcamera_base_public_headers,
+ subdir : libcamera_base_include_dir)
diff --git a/include/libcamera/base/message.h b/include/libcamera/base/message.h
index 65572c74..4b232031 100644
--- a/include/libcamera/base/message.h
+++ b/include/libcamera/base/message.h
@@ -2,13 +2,15 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * message.h - Message queue support
+ * Message queue support
*/
#pragma once
#include <atomic>
+#include <libcamera/base/private.h>
+
#include <libcamera/base/bound_method.h>
namespace libcamera {
diff --git a/include/libcamera/base/mutex.h b/include/libcamera/base/mutex.h
index 2d23e49e..fa9a8d0d 100644
--- a/include/libcamera/base/mutex.h
+++ b/include/libcamera/base/mutex.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * mutex.h - Mutex classes with clang thread safety annotation
+ * Mutex classes with clang thread safety annotation
*/
#pragma once
@@ -10,6 +10,8 @@
#include <condition_variable>
#include <mutex>
+#include <libcamera/base/private.h>
+
#include <libcamera/base/thread_annotations.h>
namespace libcamera {
diff --git a/include/libcamera/base/object.h b/include/libcamera/base/object.h
index eef1a2c9..508773cd 100644
--- a/include/libcamera/base/object.h
+++ b/include/libcamera/base/object.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * object.h - Base object
+ * Base object
*/
#pragma once
@@ -32,7 +32,7 @@ public:
void postMessage(std::unique_ptr<Message> msg);
template<typename T, typename R, typename... FuncArgs, typename... Args,
- typename std::enable_if_t<std::is_base_of<Object, T>::value> * = nullptr>
+ std::enable_if_t<std::is_base_of<Object, T>::value> * = nullptr>
R invokeMethod(R (T::*func)(FuncArgs...), ConnectionType type,
Args&&... args)
{
@@ -49,6 +49,8 @@ public:
protected:
virtual void message(Message *msg);
+ bool assertThreadBound(const char *message);
+
private:
friend class SignalBase;
friend class Thread;
diff --git a/include/libcamera/base/private.h b/include/libcamera/base/private.h
index 163012bf..8670c40b 100644
--- a/include/libcamera/base/private.h
+++ b/include/libcamera/base/private.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * private.h - Private Header Validation
+ * Private Header Validation
*
* A selection of internal libcamera headers are installed as part
* of the libcamera package to allow sharing of a select subset of
diff --git a/include/libcamera/base/semaphore.h b/include/libcamera/base/semaphore.h
index c11e8dd1..59d4aa44 100644
--- a/include/libcamera/base/semaphore.h
+++ b/include/libcamera/base/semaphore.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * semaphore.h - General-purpose counting semaphore
+ * General-purpose counting semaphore
*/
#pragma once
@@ -18,15 +18,15 @@ class Semaphore
public:
Semaphore(unsigned int n = 0);
- unsigned int available();
- void acquire(unsigned int n = 1);
- bool tryAcquire(unsigned int n = 1);
- void release(unsigned int n = 1);
+ unsigned int available() LIBCAMERA_TSA_EXCLUDES(mutex_);
+ void acquire(unsigned int n = 1) LIBCAMERA_TSA_EXCLUDES(mutex_);
+ bool tryAcquire(unsigned int n = 1) LIBCAMERA_TSA_EXCLUDES(mutex_);
+ void release(unsigned int n = 1) LIBCAMERA_TSA_EXCLUDES(mutex_);
private:
Mutex mutex_;
ConditionVariable cv_;
- unsigned int available_;
+ unsigned int available_ LIBCAMERA_TSA_GUARDED_BY(mutex_);
};
} /* namespace libcamera */
diff --git a/include/libcamera/base/shared_fd.h b/include/libcamera/base/shared_fd.h
index e53a8b88..61fe11c1 100644
--- a/include/libcamera/base/shared_fd.h
+++ b/include/libcamera/base/shared_fd.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * shared_fd.h - File descriptor wrapper with shared ownership
+ * File descriptor wrapper with shared ownership
*/
#pragma once
diff --git a/include/libcamera/base/signal.h b/include/libcamera/base/signal.h
index 91000d0d..849fbbda 100644
--- a/include/libcamera/base/signal.h
+++ b/include/libcamera/base/signal.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * signal.h - Signal & slot implementation
+ * Signal & slot implementation
*/
#pragma once
@@ -13,10 +13,11 @@
#include <vector>
#include <libcamera/base/bound_method.h>
-#include <libcamera/base/object.h>
namespace libcamera {
+class Object;
+
class SignalBase
{
public:
@@ -44,7 +45,7 @@ public:
}
#ifndef __DOXYGEN__
- template<typename T, typename R, typename std::enable_if_t<std::is_base_of<Object, T>::value> * = nullptr>
+ template<typename T, typename R, std::enable_if_t<std::is_base_of<Object, T>::value> * = nullptr>
void connect(T *obj, R (T::*func)(Args...),
ConnectionType type = ConnectionTypeAuto)
{
@@ -52,7 +53,7 @@ public:
SignalBase::connect(new BoundMethodMember<T, R, Args...>(obj, object, func, type));
}
- template<typename T, typename R, typename std::enable_if_t<!std::is_base_of<Object, T>::value> * = nullptr>
+ template<typename T, typename R, std::enable_if_t<!std::is_base_of<Object, T>::value> * = nullptr>
#else
template<typename T, typename R>
#endif
@@ -63,7 +64,11 @@ public:
#ifndef __DOXYGEN__
template<typename T, typename Func,
- typename std::enable_if_t<std::is_base_of<Object, T>::value> * = nullptr>
+ std::enable_if_t<std::is_base_of<Object, T>::value
+#if __cplusplus >= 201703L
+ && std::is_invocable_v<Func, Args...>
+#endif
+ > * = nullptr>
void connect(T *obj, Func func, ConnectionType type = ConnectionTypeAuto)
{
Object *object = static_cast<Object *>(obj);
@@ -71,7 +76,11 @@ public:
}
template<typename T, typename Func,
- typename std::enable_if_t<!std::is_base_of<Object, T>::value> * = nullptr>
+ std::enable_if_t<!std::is_base_of<Object, T>::value
+#if __cplusplus >= 201703L
+ && std::is_invocable_v<Func, Args...>
+#endif
+ > * = nullptr>
#else
template<typename T, typename Func>
#endif
diff --git a/include/libcamera/base/span.h b/include/libcamera/base/span.h
index 88d2e3de..c3e63f69 100644
--- a/include/libcamera/base/span.h
+++ b/include/libcamera/base/span.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * span.h - C++20 std::span<> implementation for C++11
+ * C++20 std::span<> implementation for C++11
*/
#pragma once
diff --git a/include/libcamera/base/thread.h b/include/libcamera/base/thread.h
index 9d00f102..4f33de63 100644
--- a/include/libcamera/base/thread.h
+++ b/include/libcamera/base/thread.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * thread.h - Thread support
+ * Thread support
*/
#pragma once
diff --git a/include/libcamera/base/thread_annotations.h b/include/libcamera/base/thread_annotations.h
index e81929f6..81930f08 100644
--- a/include/libcamera/base/thread_annotations.h
+++ b/include/libcamera/base/thread_annotations.h
@@ -2,11 +2,13 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * thread_annotation.h - Macro of Clang thread safety analysis
+ * Macro of Clang thread safety analysis
*/
#pragma once
+#include <libcamera/base/private.h>
+
/*
* Enable thread safety attributes only with clang.
* The attributes can be safely erased when compiling with other compilers.
diff --git a/include/libcamera/base/timer.h b/include/libcamera/base/timer.h
index 759b68ad..5ef45959 100644
--- a/include/libcamera/base/timer.h
+++ b/include/libcamera/base/timer.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * timer.h - Generic timer
+ * Generic timer
*/
#pragma once
diff --git a/include/libcamera/base/unique_fd.h b/include/libcamera/base/unique_fd.h
index ae4d96b7..c9a3b5d0 100644
--- a/include/libcamera/base/unique_fd.h
+++ b/include/libcamera/base/unique_fd.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * unique_fd.h - File descriptor wrapper that owns a file descriptor.
+ * File descriptor wrapper that owns a file descriptor.
*/
#pragma once
diff --git a/include/libcamera/base/utils.h b/include/libcamera/base/utils.h
index cfff0583..4ae02dc9 100644
--- a/include/libcamera/base/utils.h
+++ b/include/libcamera/base/utils.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * utils.h - Miscellaneous utility functions
+ * Miscellaneous utility functions
*/
#pragma once
@@ -170,6 +170,12 @@ public:
class iterator
{
public:
+ using difference_type = std::size_t;
+ using value_type = std::string;
+ using pointer = value_type *;
+ using reference = value_type &;
+ using iterator_category = std::input_iterator_tag;
+
iterator(const StringSplitter *ss, std::string::size_type pos);
iterator &operator++();
@@ -361,6 +367,14 @@ decltype(auto) abs_diff(const T &a, const T &b)
return a - b;
}
+double strtod(const char *__restrict nptr, char **__restrict endptr);
+
+template<class Enum>
+constexpr std::underlying_type_t<Enum> to_underlying(Enum e) noexcept
+{
+ return static_cast<std::underlying_type_t<Enum>>(e);
+}
+
} /* namespace utils */
#ifndef __DOXYGEN__
diff --git a/include/libcamera/camera.h b/include/libcamera/camera.h
index 5bb06584..94cee7bd 100644
--- a/include/libcamera/camera.h
+++ b/include/libcamera/camera.h
@@ -2,12 +2,14 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * camera.h - Camera object interface
+ * Camera object interface
*/
#pragma once
+#include <initializer_list>
#include <memory>
+#include <optional>
#include <set>
#include <stdint.h>
#include <string>
@@ -18,9 +20,10 @@
#include <libcamera/base/signal.h>
#include <libcamera/controls.h>
+#include <libcamera/geometry.h>
+#include <libcamera/orientation.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
-#include <libcamera/transform.h>
namespace libcamera {
@@ -29,6 +32,30 @@ class FrameBufferAllocator;
class PipelineHandler;
class Request;
+class SensorConfiguration
+{
+public:
+ unsigned int bitDepth = 0;
+
+ Rectangle analogCrop;
+
+ struct {
+ unsigned int binX = 1;
+ unsigned int binY = 1;
+ } binning;
+
+ struct {
+ unsigned int xOddInc = 1;
+ unsigned int xEvenInc = 1;
+ unsigned int yOddInc = 1;
+ unsigned int yEvenInc = 1;
+ } skipping;
+
+ Size outputSize;
+
+ bool isValid() const;
+};
+
class CameraConfiguration
{
public:
@@ -65,7 +92,8 @@ public:
bool empty() const;
std::size_t size() const;
- Transform transform;
+ std::optional<SensorConfiguration> sensorConfig;
+ Orientation orientation;
protected:
CameraConfiguration();
@@ -105,7 +133,16 @@ public:
const ControlList &properties() const;
const std::set<Stream *> &streams() const;
- std::unique_ptr<CameraConfiguration> generateConfiguration(const StreamRoles &roles = {});
+
+ std::unique_ptr<CameraConfiguration>
+ generateConfiguration(Span<const StreamRole> roles = {});
+
+ std::unique_ptr<CameraConfiguration>
+ generateConfiguration(std::initializer_list<StreamRole> roles)
+ {
+ return generateConfiguration(Span(roles.begin(), roles.end()));
+ }
+
int configure(CameraConfiguration *config);
std::unique_ptr<Request> createRequest(uint64_t cookie = 0);
diff --git a/include/libcamera/camera_manager.h b/include/libcamera/camera_manager.h
index 7647c2a1..b50df782 100644
--- a/include/libcamera/camera_manager.h
+++ b/include/libcamera/camera_manager.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * camera_manager.h - Camera management
+ * Camera management
*/
#pragma once
@@ -31,12 +31,7 @@ public:
void stop();
std::vector<std::shared_ptr<Camera>> cameras() const;
- std::shared_ptr<Camera> get(const std::string &name);
- std::shared_ptr<Camera> get(dev_t devnum);
-
- void addCamera(std::shared_ptr<Camera> camera,
- const std::vector<dev_t> &devnums);
- void removeCamera(std::shared_ptr<Camera> camera);
+ std::shared_ptr<Camera> get(const std::string &id);
static const std::string &version() { return version_; }
diff --git a/include/libcamera/color_space.h b/include/libcamera/color_space.h
index 086c56c1..7b483cd1 100644
--- a/include/libcamera/color_space.h
+++ b/include/libcamera/color_space.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2021, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2021, Raspberry Pi Ltd
*
- * color_space.h - color space definitions
+ * color space definitions
*/
#pragma once
@@ -12,6 +12,8 @@
namespace libcamera {
+class PixelFormat;
+
class ColorSpace
{
public:
@@ -46,8 +48,8 @@ public:
}
static const ColorSpace Raw;
- static const ColorSpace Jpeg;
static const ColorSpace Srgb;
+ static const ColorSpace Sycc;
static const ColorSpace Smpte170m;
static const ColorSpace Rec709;
static const ColorSpace Rec2020;
@@ -59,6 +61,10 @@ public:
std::string toString() const;
static std::string toString(const std::optional<ColorSpace> &colorSpace);
+
+ static std::optional<ColorSpace> fromString(const std::string &str);
+
+ bool adjust(PixelFormat format);
};
bool operator==(const ColorSpace &lhs, const ColorSpace &rhs);
diff --git a/include/libcamera/control_ids.h.in b/include/libcamera/control_ids.h.in
index 0718a888..293ba966 100644
--- a/include/libcamera/control_ids.h.in
+++ b/include/libcamera/control_ids.h.in
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_ids.h - Control ID list
+ * Control ID list
*
* This file is auto-generated. Do not edit.
*/
@@ -10,7 +10,9 @@
#pragma once
#include <array>
+#include <map>
#include <stdint.h>
+#include <string>
#include <libcamera/controls.h>
@@ -26,11 +28,7 @@ ${controls}
extern const ControlIdMap controls;
-namespace draft {
-
-${draft_controls}
-
-} /* namespace draft */
+${vendor_controls}
} /* namespace controls */
diff --git a/include/libcamera/controls.h b/include/libcamera/controls.h
index 665bcac1..7c2bb287 100644
--- a/include/libcamera/controls.h
+++ b/include/libcamera/controls.h
@@ -2,12 +2,13 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * controls.h - Control handling
+ * Control handling
*/
#pragma once
#include <assert.h>
+#include <optional>
#include <set>
#include <stdint.h>
#include <string>
@@ -98,10 +99,10 @@ public:
ControlValue();
#ifndef __DOXYGEN__
- template<typename T, typename std::enable_if_t<!details::is_span<T>::value &&
- details::control_type<T>::value &&
- !std::is_same<std::string, std::remove_cv_t<T>>::value,
- std::nullptr_t> = nullptr>
+ template<typename T, std::enable_if_t<!details::is_span<T>::value &&
+ details::control_type<T>::value &&
+ !std::is_same<std::string, std::remove_cv_t<T>>::value,
+ std::nullptr_t> = nullptr>
ControlValue(const T &value)
: type_(ControlTypeNone), numElements_(0)
{
@@ -109,9 +110,9 @@ public:
&value, 1, sizeof(T));
}
- template<typename T, typename std::enable_if_t<details::is_span<T>::value ||
- std::is_same<std::string, std::remove_cv_t<T>>::value,
- std::nullptr_t> = nullptr>
+ template<typename T, std::enable_if_t<details::is_span<T>::value ||
+ std::is_same<std::string, std::remove_cv_t<T>>::value,
+ std::nullptr_t> = nullptr>
#else
template<typename T>
#endif
@@ -143,9 +144,9 @@ public:
}
#ifndef __DOXYGEN__
- template<typename T, typename std::enable_if_t<!details::is_span<T>::value &&
- !std::is_same<std::string, std::remove_cv_t<T>>::value,
- std::nullptr_t> = nullptr>
+ template<typename T, std::enable_if_t<!details::is_span<T>::value &&
+ !std::is_same<std::string, std::remove_cv_t<T>>::value,
+ std::nullptr_t> = nullptr>
T get() const
{
assert(type_ == details::control_type<std::remove_cv_t<T>>::value);
@@ -154,9 +155,9 @@ public:
return *reinterpret_cast<const T *>(data().data());
}
- template<typename T, typename std::enable_if_t<details::is_span<T>::value ||
- std::is_same<std::string, std::remove_cv_t<T>>::value,
- std::nullptr_t> = nullptr>
+ template<typename T, std::enable_if_t<details::is_span<T>::value ||
+ std::is_same<std::string, std::remove_cv_t<T>>::value,
+ std::nullptr_t> = nullptr>
#else
template<typename T>
#endif
@@ -167,22 +168,22 @@ public:
using V = typename T::value_type;
const V *value = reinterpret_cast<const V *>(data().data());
- return { value, numElements_ };
+ return T{ value, numElements_ };
}
#ifndef __DOXYGEN__
- template<typename T, typename std::enable_if_t<!details::is_span<T>::value &&
- !std::is_same<std::string, std::remove_cv_t<T>>::value,
- std::nullptr_t> = nullptr>
+ template<typename T, std::enable_if_t<!details::is_span<T>::value &&
+ !std::is_same<std::string, std::remove_cv_t<T>>::value,
+ std::nullptr_t> = nullptr>
void set(const T &value)
{
set(details::control_type<std::remove_cv_t<T>>::value, false,
reinterpret_cast<const void *>(&value), 1, sizeof(T));
}
- template<typename T, typename std::enable_if_t<details::is_span<T>::value ||
- std::is_same<std::string, std::remove_cv_t<T>>::value,
- std::nullptr_t> = nullptr>
+ template<typename T, std::enable_if_t<details::is_span<T>::value ||
+ std::is_same<std::string, std::remove_cv_t<T>>::value,
+ std::nullptr_t> = nullptr>
#else
template<typename T>
#endif
@@ -267,9 +268,9 @@ private:
class ControlInfo
{
public:
- explicit ControlInfo(const ControlValue &min = 0,
- const ControlValue &max = 0,
- const ControlValue &def = 0);
+ explicit ControlInfo(const ControlValue &min = {},
+ const ControlValue &max = {},
+ const ControlValue &def = {});
explicit ControlInfo(Span<const ControlValue> values,
const ControlValue &def = {});
explicit ControlInfo(std::set<bool> values, bool def);
@@ -351,6 +352,11 @@ private:
using ControlListMap = std::unordered_map<unsigned int, ControlValue>;
public:
+ enum class MergePolicy {
+ KeepExisting = 0,
+ OverwriteExisting,
+ };
+
ControlList();
ControlList(const ControlIdMap &idmap, const ControlValidator *validator = nullptr);
ControlList(const ControlInfoMap &infoMap, const ControlValidator *validator = nullptr);
@@ -367,19 +373,19 @@ public:
std::size_t size() const { return controls_.size(); }
void clear() { controls_.clear(); }
- void merge(const ControlList &source);
+ void merge(const ControlList &source, MergePolicy policy = MergePolicy::KeepExisting);
- bool contains(const ControlId &id) const;
bool contains(unsigned int id) const;
template<typename T>
- T get(const Control<T> &ctrl) const
+ std::optional<T> get(const Control<T> &ctrl) const
{
- const ControlValue *val = find(ctrl.id());
- if (!val)
- return T{};
+ const auto entry = controls_.find(ctrl.id());
+ if (entry == controls_.end())
+ return std::nullopt;
- return val->get<T>();
+ const ControlValue &val = entry->second;
+ return val.get<T>();
}
template<typename T, typename V>
@@ -392,14 +398,14 @@ public:
val->set<T>(value);
}
- template<typename T, typename V>
- void set(const Control<T> &ctrl, const std::initializer_list<V> &value)
+ template<typename T, typename V, size_t Size>
+ void set(const Control<Span<T, Size>> &ctrl, const std::initializer_list<V> &value)
{
ControlValue *val = find(ctrl.id());
if (!val)
return;
- val->set<T>(Span<const typename std::remove_cv_t<V>>{ value.begin(), value.size() });
+ val->set(Span<const typename std::remove_cv_t<V>, Size>{ value.begin(), value.size() });
}
const ControlValue &get(unsigned int id) const;
diff --git a/include/libcamera/fence.h b/include/libcamera/fence.h
index c0c916c2..598336cb 100644
--- a/include/libcamera/fence.h
+++ b/include/libcamera/fence.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * internal/fence.h - Synchronization fence
+ * Synchronization fence
*/
#pragma once
diff --git a/include/libcamera/formats.h.in b/include/libcamera/formats.h.in
index ead5287d..6ae7634f 100644
--- a/include/libcamera/formats.h.in
+++ b/include/libcamera/formats.h.in
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * formats.h - Formats
+ * Formats
*
* This file is auto-generated. Do not edit.
*/
diff --git a/include/libcamera/framebuffer.h b/include/libcamera/framebuffer.h
index 3b1118d1..5ae2270b 100644
--- a/include/libcamera/framebuffer.h
+++ b/include/libcamera/framebuffer.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * framebuffer.h - Frame buffer handling
+ * Frame buffer handling
*/
#pragma once
@@ -46,7 +46,7 @@ private:
std::vector<Plane> planes_;
};
-class FrameBuffer final : public Extensible
+class FrameBuffer : public Extensible
{
LIBCAMERA_DECLARE_PRIVATE()
@@ -59,28 +59,20 @@ public:
};
FrameBuffer(const std::vector<Plane> &planes, unsigned int cookie = 0);
- FrameBuffer(std::unique_ptr<Private> d,
- const std::vector<Plane> &planes, unsigned int cookie = 0);
+ FrameBuffer(std::unique_ptr<Private> d);
+ virtual ~FrameBuffer() {}
- const std::vector<Plane> &planes() const { return planes_; }
+ const std::vector<Plane> &planes() const;
Request *request() const;
- const FrameMetadata &metadata() const { return metadata_; }
+ const FrameMetadata &metadata() const;
- unsigned int cookie() const { return cookie_; }
- void setCookie(unsigned int cookie) { cookie_ = cookie; }
+ uint64_t cookie() const;
+ void setCookie(uint64_t cookie);
std::unique_ptr<Fence> releaseFence();
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(FrameBuffer)
-
- friend class V4L2VideoDevice; /* Needed to update metadata_. */
-
- std::vector<Plane> planes_;
-
- FrameMetadata metadata_;
-
- unsigned int cookie_;
};
} /* namespace libcamera */
diff --git a/include/libcamera/framebuffer_allocator.h b/include/libcamera/framebuffer_allocator.h
index 45ff232b..f3896bf2 100644
--- a/include/libcamera/framebuffer_allocator.h
+++ b/include/libcamera/framebuffer_allocator.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * framebuffer_allocator.h - FrameBuffer allocator
+ * FrameBuffer allocator
*/
#pragma once
diff --git a/include/libcamera/geometry.h b/include/libcamera/geometry.h
index d7fdbe70..3e6f0f5d 100644
--- a/include/libcamera/geometry.h
+++ b/include/libcamera/geometry.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * geometry.h - Geometry-related classes
+ * Geometry-related classes
*/
#pragma once
diff --git a/include/libcamera/internal/bayer_format.h b/include/libcamera/internal/bayer_format.h
index 7d3e37c6..5c14bb5f 100644
--- a/include/libcamera/internal/bayer_format.h
+++ b/include/libcamera/internal/bayer_format.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * bayer_format.h - Bayer Pixel Format
+ * Bayer Pixel Format
*/
#pragma once
@@ -34,6 +34,8 @@ public:
None = 0,
CSI2 = 1,
IPU3 = 2,
+ PISP1 = 3,
+ PISP2 = 4,
};
constexpr BayerFormat()
diff --git a/include/libcamera/internal/byte_stream_buffer.h b/include/libcamera/internal/byte_stream_buffer.h
index 0f4fce6f..5b1c10ab 100644
--- a/include/libcamera/internal/byte_stream_buffer.h
+++ b/include/libcamera/internal/byte_stream_buffer.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * byte_stream_buffer.h - Byte stream buffer
+ * Byte stream buffer
*/
#pragma once
diff --git a/include/libcamera/internal/camera.h b/include/libcamera/internal/camera.h
index 597426a6..0add0428 100644
--- a/include/libcamera/internal/camera.h
+++ b/include/libcamera/internal/camera.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera.h - Camera private data
+ * Camera private data
*/
#pragma once
@@ -50,6 +50,7 @@ private:
CameraRunning,
};
+ bool isAcquired() const;
bool isRunning() const;
int isAccessAllowed(State state, bool allowDisconnected = false,
const char *from = __builtin_FUNCTION()) const;
diff --git a/include/libcamera/internal/camera_controls.h b/include/libcamera/internal/camera_controls.h
index ee6d382f..4a5a3ebc 100644
--- a/include/libcamera/internal/camera_controls.h
+++ b/include/libcamera/internal/camera_controls.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_controls.h - Camera controls
+ * Camera controls
*/
#pragma once
diff --git a/include/libcamera/internal/camera_lens.h b/include/libcamera/internal/camera_lens.h
index 277417da..5a4b993b 100644
--- a/include/libcamera/internal/camera_lens.h
+++ b/include/libcamera/internal/camera_lens.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_lens.h - A camera lens controller
+ * A camera lens controller
*/
#pragma once
diff --git a/include/libcamera/internal/camera_manager.h b/include/libcamera/internal/camera_manager.h
new file mode 100644
index 00000000..af9ed60a
--- /dev/null
+++ b/include/libcamera/internal/camera_manager.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Ideas on Board Oy.
+ *
+ * Camera manager private data
+ */
+
+#pragma once
+
+#include <libcamera/camera_manager.h>
+
+#include <map>
+#include <memory>
+#include <sys/types.h>
+#include <vector>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/mutex.h>
+#include <libcamera/base/thread.h>
+#include <libcamera/base/thread_annotations.h>
+
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/process.h"
+
+namespace libcamera {
+
+class Camera;
+class DeviceEnumerator;
+
+class CameraManager::Private : public Extensible::Private, public Thread
+{
+ LIBCAMERA_DECLARE_PUBLIC(CameraManager)
+
+public:
+ Private();
+
+ int start();
+ void addCamera(std::shared_ptr<Camera> camera) LIBCAMERA_TSA_EXCLUDES(mutex_);
+ void removeCamera(std::shared_ptr<Camera> camera) LIBCAMERA_TSA_EXCLUDES(mutex_);
+
+protected:
+ void run() override;
+
+private:
+ int init();
+ void createPipelineHandlers();
+ void pipelineFactoryMatch(const PipelineHandlerFactoryBase *factory);
+ void cleanup() LIBCAMERA_TSA_EXCLUDES(mutex_);
+
+ /*
+ * This mutex protects
+ *
+ * - initialized_ and status_ during initialization
+ * - cameras_ after initialization
+ */
+ mutable Mutex mutex_;
+ std::vector<std::shared_ptr<Camera>> cameras_ LIBCAMERA_TSA_GUARDED_BY(mutex_);
+
+ ConditionVariable cv_;
+ bool initialized_ LIBCAMERA_TSA_GUARDED_BY(mutex_);
+ int status_ LIBCAMERA_TSA_GUARDED_BY(mutex_);
+
+ std::unique_ptr<DeviceEnumerator> enumerator_;
+
+ IPAManager ipaManager_;
+ ProcessManager processManager_;
+};
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/camera_sensor.h b/include/libcamera/internal/camera_sensor.h
index b9f4d786..fc44ab98 100644
--- a/include/libcamera/internal/camera_sensor.h
+++ b/include/libcamera/internal/camera_sensor.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * camera_sensor.h - A camera sensor
+ * A camera sensor
*/
#pragma once
@@ -17,20 +17,25 @@
#include <libcamera/control_ids.h>
#include <libcamera/controls.h>
#include <libcamera/geometry.h>
+#include <libcamera/orientation.h>
+#include <libcamera/transform.h>
#include <libcamera/ipa/core_ipa_interface.h>
+#include "libcamera/internal/bayer_format.h"
#include "libcamera/internal/formats.h"
#include "libcamera/internal/v4l2_subdevice.h"
namespace libcamera {
-class BayerFormat;
class CameraLens;
class MediaEntity;
+class SensorConfiguration;
struct CameraSensorProperties;
+enum class Orientation;
+
class CameraSensor : protected Loggable
{
public:
@@ -41,32 +46,40 @@ public:
const std::string &model() const { return model_; }
const std::string &id() const { return id_; }
+
const MediaEntity *entity() const { return entity_; }
+ V4L2Subdevice *device() { return subdev_.get(); }
+
+ CameraLens *focusLens() { return focusLens_.get(); }
+
const std::vector<unsigned int> &mbusCodes() const { return mbusCodes_; }
std::vector<Size> sizes(unsigned int mbusCode) const;
Size resolution() const;
- const std::vector<controls::draft::TestPatternModeEnum> &testPatternModes() const
- {
- return testPatternModes_;
- }
- int setTestPatternMode(controls::draft::TestPatternModeEnum mode);
V4L2SubdeviceFormat getFormat(const std::vector<unsigned int> &mbusCodes,
const Size &size) const;
- int setFormat(V4L2SubdeviceFormat *format);
+ int setFormat(V4L2SubdeviceFormat *format,
+ Transform transform = Transform::Identity);
+ int tryFormat(V4L2SubdeviceFormat *format) const;
- const ControlInfoMap &controls() const;
- ControlList getControls(const std::vector<uint32_t> &ids);
- int setControls(ControlList *ctrls);
-
- V4L2Subdevice *device() { return subdev_.get(); }
+ int applyConfiguration(const SensorConfiguration &config,
+ Transform transform = Transform::Identity,
+ V4L2SubdeviceFormat *sensorFormat = nullptr);
const ControlList &properties() const { return properties_; }
int sensorInfo(IPACameraSensorInfo *info) const;
+ Transform computeTransform(Orientation *orientation) const;
+ BayerFormat::Order bayerOrder(Transform t) const;
- void updateControlInfo();
+ const ControlInfoMap &controls() const;
+ ControlList getControls(const std::vector<uint32_t> &ids);
+ int setControls(ControlList *ctrls);
- CameraLens *focusLens() { return focusLens_.get(); }
+ const std::vector<controls::draft::TestPatternModeEnum> &testPatternModes() const
+ {
+ return testPatternModes_;
+ }
+ int setTestPatternMode(controls::draft::TestPatternModeEnum mode);
protected:
std::string logPrefix() const override;
@@ -80,8 +93,8 @@ private:
void initStaticProperties();
void initTestPatternModes();
int initProperties();
- int applyTestPatternMode(controls::draft::TestPatternModeEnum mode);
int discoverAncillaryDevices();
+ int applyTestPatternMode(controls::draft::TestPatternModeEnum mode);
const MediaEntity *entity_;
std::unique_ptr<V4L2Subdevice> subdev_;
@@ -101,6 +114,9 @@ private:
Size pixelArraySize_;
Rectangle activeArea_;
const BayerFormat *bayerFormat_;
+ bool supportFlips_;
+ bool flipsAlterBayerOrder_;
+ Orientation mountingOrientation_;
ControlList properties_;
diff --git a/include/libcamera/internal/camera_sensor_properties.h b/include/libcamera/internal/camera_sensor_properties.h
index 1ee3cb99..480ac121 100644
--- a/include/libcamera/internal/camera_sensor_properties.h
+++ b/include/libcamera/internal/camera_sensor_properties.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * camera_sensor_properties.h - Database of camera sensor properties
+ * Database of camera sensor properties
*/
#pragma once
diff --git a/include/libcamera/internal/control_serializer.h b/include/libcamera/internal/control_serializer.h
index 99e57fee..8a63ae44 100644
--- a/include/libcamera/internal/control_serializer.h
+++ b/include/libcamera/internal/control_serializer.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_serializer.h - Control (de)serializer
+ * Control (de)serializer
*/
#pragma once
@@ -47,9 +47,9 @@ private:
static void store(const ControlValue &value, ByteStreamBuffer &buffer);
static void store(const ControlInfo &info, ByteStreamBuffer &buffer);
- ControlValue loadControlValue(ControlType type, ByteStreamBuffer &buffer,
+ ControlValue loadControlValue(ByteStreamBuffer &buffer,
bool isArray = false, unsigned int count = 1);
- ControlInfo loadControlInfo(ControlType type, ByteStreamBuffer &buffer);
+ ControlInfo loadControlInfo(ByteStreamBuffer &buffer);
unsigned int serial_;
unsigned int serialSeed_;
diff --git a/include/libcamera/internal/control_validator.h b/include/libcamera/internal/control_validator.h
index 26412d8b..260602f2 100644
--- a/include/libcamera/internal/control_validator.h
+++ b/include/libcamera/internal/control_validator.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * control_validator.h - Control validator
+ * Control validator
*/
#pragma once
diff --git a/include/libcamera/internal/converter.h b/include/libcamera/internal/converter.h
new file mode 100644
index 00000000..5d74db6b
--- /dev/null
+++ b/include/libcamera/internal/converter.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Laurent Pinchart
+ * Copyright 2022 NXP
+ *
+ * Generic format converter interface
+ */
+
+#pragma once
+
+#include <functional>
+#include <initializer_list>
+#include <map>
+#include <memory>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/signal.h>
+
+#include <libcamera/geometry.h>
+
+namespace libcamera {
+
+class FrameBuffer;
+class MediaDevice;
+class PixelFormat;
+struct StreamConfiguration;
+
+class Converter
+{
+public:
+ Converter(MediaDevice *media);
+ virtual ~Converter();
+
+ virtual int loadConfiguration(const std::string &filename) = 0;
+
+ virtual bool isValid() const = 0;
+
+ virtual std::vector<PixelFormat> formats(PixelFormat input) = 0;
+ virtual SizeRange sizes(const Size &input) = 0;
+
+ virtual std::tuple<unsigned int, unsigned int>
+ strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size) = 0;
+
+ virtual int configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs) = 0;
+ virtual int exportBuffers(unsigned int output, unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers) = 0;
+
+ virtual int start() = 0;
+ virtual void stop() = 0;
+
+ virtual int queueBuffers(FrameBuffer *input,
+ const std::map<unsigned int, FrameBuffer *> &outputs) = 0;
+
+ Signal<FrameBuffer *> inputBufferReady;
+ Signal<FrameBuffer *> outputBufferReady;
+
+ const std::string &deviceNode() const { return deviceNode_; }
+
+private:
+ std::string deviceNode_;
+};
+
+class ConverterFactoryBase
+{
+public:
+ ConverterFactoryBase(const std::string name, std::initializer_list<std::string> compatibles);
+ virtual ~ConverterFactoryBase() = default;
+
+ const std::vector<std::string> &compatibles() const { return compatibles_; }
+
+ static std::unique_ptr<Converter> create(MediaDevice *media);
+ static std::vector<ConverterFactoryBase *> &factories();
+ static std::vector<std::string> names();
+
+private:
+ LIBCAMERA_DISABLE_COPY_AND_MOVE(ConverterFactoryBase)
+
+ static void registerType(ConverterFactoryBase *factory);
+
+ virtual std::unique_ptr<Converter> createInstance(MediaDevice *media) const = 0;
+
+ std::string name_;
+ std::vector<std::string> compatibles_;
+};
+
+template<typename _Converter>
+class ConverterFactory : public ConverterFactoryBase
+{
+public:
+ ConverterFactory(const char *name, std::initializer_list<std::string> compatibles)
+ : ConverterFactoryBase(name, compatibles)
+ {
+ }
+
+ std::unique_ptr<Converter> createInstance(MediaDevice *media) const override
+ {
+ return std::make_unique<_Converter>(media);
+ }
+};
+
+#define REGISTER_CONVERTER(name, converter, compatibles) \
+ static ConverterFactory<converter> global_##converter##Factory(name, compatibles);
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/converter/converter_v4l2_m2m.h b/include/libcamera/internal/converter/converter_v4l2_m2m.h
new file mode 100644
index 00000000..1126050c
--- /dev/null
+++ b/include/libcamera/internal/converter/converter_v4l2_m2m.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Laurent Pinchart
+ * Copyright 2022 NXP
+ *
+ * V4l2 M2M Format converter interface
+ */
+
+#pragma once
+
+#include <functional>
+#include <map>
+#include <memory>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include <libcamera/base/log.h>
+#include <libcamera/base/signal.h>
+
+#include <libcamera/pixel_format.h>
+
+#include "libcamera/internal/converter.h"
+
+namespace libcamera {
+
+class FrameBuffer;
+class MediaDevice;
+class Size;
+class SizeRange;
+struct StreamConfiguration;
+class V4L2M2MDevice;
+
+class V4L2M2MConverter : public Converter
+{
+public:
+ V4L2M2MConverter(MediaDevice *media);
+
+ int loadConfiguration([[maybe_unused]] const std::string &filename) { return 0; }
+ bool isValid() const { return m2m_ != nullptr; }
+
+ std::vector<PixelFormat> formats(PixelFormat input);
+ SizeRange sizes(const Size &input);
+
+ std::tuple<unsigned int, unsigned int>
+ strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size);
+
+ int configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfg);
+ int exportBuffers(unsigned int output, unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers);
+
+ int start();
+ void stop();
+
+ int queueBuffers(FrameBuffer *input,
+ const std::map<unsigned int, FrameBuffer *> &outputs);
+
+private:
+ class Stream : protected Loggable
+ {
+ public:
+ Stream(V4L2M2MConverter *converter, unsigned int index);
+
+ bool isValid() const { return m2m_ != nullptr; }
+
+ int configure(const StreamConfiguration &inputCfg,
+ const StreamConfiguration &outputCfg);
+ int exportBuffers(unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers);
+
+ int start();
+ void stop();
+
+ int queueBuffers(FrameBuffer *input, FrameBuffer *output);
+
+ protected:
+ std::string logPrefix() const override;
+
+ private:
+ void captureBufferReady(FrameBuffer *buffer);
+ void outputBufferReady(FrameBuffer *buffer);
+
+ V4L2M2MConverter *converter_;
+ unsigned int index_;
+ std::unique_ptr<V4L2M2MDevice> m2m_;
+
+ unsigned int inputBufferCount_;
+ unsigned int outputBufferCount_;
+ };
+
+ std::unique_ptr<V4L2M2MDevice> m2m_;
+
+ std::vector<Stream> streams_;
+ std::map<FrameBuffer *, unsigned int> queue_;
+};
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/converter/meson.build b/include/libcamera/internal/converter/meson.build
new file mode 100644
index 00000000..891e79e7
--- /dev/null
+++ b/include/libcamera/internal/converter/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_headers += files([
+ 'converter_v4l2_m2m.h',
+])
diff --git a/include/libcamera/internal/delayed_controls.h b/include/libcamera/internal/delayed_controls.h
index 703fdb66..e8d3014d 100644
--- a/include/libcamera/internal/delayed_controls.h
+++ b/include/libcamera/internal/delayed_controls.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * delayed_controls.h - Helper to deal with controls that take effect with a delay
+ * Helper to deal with controls that take effect with a delay
*/
#pragma once
@@ -51,7 +51,7 @@ private:
bool updated;
};
- /* \todo: Make the listSize configurable at instance creation time. */
+ /* \todo Make the listSize configurable at instance creation time. */
static constexpr int listSize = 16;
class ControlRingBuffer : public std::array<Info, listSize>
{
@@ -72,9 +72,6 @@ private:
std::unordered_map<const ControlId *, ControlParams> controlParams_;
unsigned int maxDelay_;
- bool running_;
- uint32_t firstSequence_;
-
uint32_t queueCount_;
uint32_t writeCount_;
/* \todo Evaluate if we should index on ControlId * or unsigned int */
diff --git a/include/libcamera/internal/device_enumerator.h b/include/libcamera/internal/device_enumerator.h
index 72ec9a60..db3532a9 100644
--- a/include/libcamera/internal/device_enumerator.h
+++ b/include/libcamera/internal/device_enumerator.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * device_enumerator.h - API to enumerate and find media devices
+ * API to enumerate and find media devices
*/
#pragma once
diff --git a/include/libcamera/internal/device_enumerator_sysfs.h b/include/libcamera/internal/device_enumerator_sysfs.h
index 3e84b83f..a5bfc711 100644
--- a/include/libcamera/internal/device_enumerator_sysfs.h
+++ b/include/libcamera/internal/device_enumerator_sysfs.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * device_enumerator_sysfs.h - sysfs-based device enumerator
+ * sysfs-based device enumerator
*/
#pragma once
diff --git a/include/libcamera/internal/device_enumerator_udev.h b/include/libcamera/internal/device_enumerator_udev.h
index 1b3360df..1378c190 100644
--- a/include/libcamera/internal/device_enumerator_udev.h
+++ b/include/libcamera/internal/device_enumerator_udev.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018-2019, Google Inc.
*
- * device_enumerator_udev.h - udev-based device enumerator
+ * udev-based device enumerator
*/
#pragma once
diff --git a/include/libcamera/internal/dma_heaps.h b/include/libcamera/internal/dma_heaps.h
new file mode 100644
index 00000000..f0a8aa5d
--- /dev/null
+++ b/include/libcamera/internal/dma_heaps.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2020, Raspberry Pi Ltd
+ *
+ * Helper class for dma-heap allocations.
+ */
+
+#pragma once
+
+#include <stddef.h>
+
+#include <libcamera/base/flags.h>
+#include <libcamera/base/unique_fd.h>
+
+namespace libcamera {
+
+class DmaHeap
+{
+public:
+ enum class DmaHeapFlag {
+ Cma = 1 << 0,
+ System = 1 << 1,
+ };
+
+ using DmaHeapFlags = Flags<DmaHeapFlag>;
+
+ DmaHeap(DmaHeapFlags flags = DmaHeapFlag::Cma);
+ ~DmaHeap();
+ bool isValid() const { return dmaHeapHandle_.isValid(); }
+ UniqueFD alloc(const char *name, std::size_t size);
+
+private:
+ UniqueFD dmaHeapHandle_;
+};
+
+LIBCAMERA_FLAGS_ENABLE_OPERATORS(DmaHeap::DmaHeapFlag)
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/formats.h b/include/libcamera/internal/formats.h
index ee599765..71895cd8 100644
--- a/include/libcamera/internal/formats.h
+++ b/include/libcamera/internal/formats.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * formats.h - libcamera image formats
+ * libcamera image formats
*/
#pragma once
@@ -53,10 +53,7 @@ public:
/* \todo Add support for non-contiguous memory planes */
const char *name;
PixelFormat format;
- struct {
- V4L2PixelFormat single;
- V4L2PixelFormat multi;
- } v4l2Formats;
+ std::vector<V4L2PixelFormat> v4l2Formats;
unsigned int bitsPerPixel;
enum ColourEncoding colourEncoding;
bool packed;
diff --git a/include/libcamera/internal/framebuffer.h b/include/libcamera/internal/framebuffer.h
index 8a9cc98e..e6698a45 100644
--- a/include/libcamera/internal/framebuffer.h
+++ b/include/libcamera/internal/framebuffer.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * framebuffer.h - Internal frame buffer handling
+ * Internal frame buffer handling
*/
#pragma once
@@ -22,7 +22,7 @@ class FrameBuffer::Private : public Extensible::Private
LIBCAMERA_DECLARE_PUBLIC(FrameBuffer)
public:
- Private();
+ Private(const std::vector<Plane> &planes, uint64_t cookie = 0);
virtual ~Private();
void setRequest(Request *request) { request_ = request; }
@@ -31,9 +31,15 @@ public:
Fence *fence() const { return fence_.get(); }
void setFence(std::unique_ptr<Fence> fence) { fence_ = std::move(fence); }
- void cancel() { LIBCAMERA_O_PTR()->metadata_.status = FrameMetadata::FrameCancelled; }
+ void cancel() { metadata_.status = FrameMetadata::FrameCancelled; }
+
+ FrameMetadata &metadata() { return metadata_; }
private:
+ std::vector<Plane> planes_;
+ FrameMetadata metadata_;
+ uint64_t cookie_;
+
std::unique_ptr<Fence> fence_;
Request *request_;
bool isContiguous_;
diff --git a/include/libcamera/internal/ipa_data_serializer.h b/include/libcamera/internal/ipa_data_serializer.h
index a87449c9..337c948c 100644
--- a/include/libcamera/internal/ipa_data_serializer.h
+++ b/include/libcamera/internal/ipa_data_serializer.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipa_data_serializer.h - Image Processing Algorithm data serializer
+ * Image Processing Algorithm data serializer
*/
#pragma once
@@ -14,6 +14,7 @@
#include <type_traits>
#include <vector>
+#include <libcamera/base/flags.h>
#include <libcamera/base/log.h>
#include <libcamera/control_ids.h>
@@ -32,7 +33,7 @@ LOG_DECLARE_CATEGORY(IPADataSerializer)
namespace {
template<typename T,
- typename std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr>
+ std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr>
void appendPOD(std::vector<uint8_t> &vec, T val)
{
constexpr size_t byteWidth = sizeof(val);
@@ -134,7 +135,7 @@ public:
static std::vector<V> deserialize(std::vector<uint8_t> &data, ControlSerializer *cs = nullptr)
{
- return deserialize(data.cbegin(), data.end(), cs);
+ return deserialize(data.cbegin(), data.cend(), cs);
}
static std::vector<V> deserialize(std::vector<uint8_t>::const_iterator dataBegin,
@@ -142,13 +143,13 @@ public:
ControlSerializer *cs = nullptr)
{
std::vector<SharedFD> fds;
- return deserialize(dataBegin, dataEnd, fds.cbegin(), fds.end(), cs);
+ return deserialize(dataBegin, dataEnd, fds.cbegin(), fds.cend(), cs);
}
static std::vector<V> deserialize(std::vector<uint8_t> &data, std::vector<SharedFD> &fds,
ControlSerializer *cs = nullptr)
{
- return deserialize(data.cbegin(), data.end(), fds.cbegin(), fds.end(), cs);
+ return deserialize(data.cbegin(), data.cend(), fds.cbegin(), fds.cend(), cs);
}
static std::vector<V> deserialize(std::vector<uint8_t>::const_iterator dataBegin,
@@ -240,7 +241,7 @@ public:
static std::map<K, V> deserialize(std::vector<uint8_t> &data, ControlSerializer *cs = nullptr)
{
- return deserialize(data.cbegin(), data.end(), cs);
+ return deserialize(data.cbegin(), data.cend(), cs);
}
static std::map<K, V> deserialize(std::vector<uint8_t>::const_iterator dataBegin,
@@ -248,13 +249,13 @@ public:
ControlSerializer *cs = nullptr)
{
std::vector<SharedFD> fds;
- return deserialize(dataBegin, dataEnd, fds.cbegin(), fds.end(), cs);
+ return deserialize(dataBegin, dataEnd, fds.cbegin(), fds.cend(), cs);
}
static std::map<K, V> deserialize(std::vector<uint8_t> &data, std::vector<SharedFD> &fds,
ControlSerializer *cs = nullptr)
{
- return deserialize(data.cbegin(), data.end(), fds.cbegin(), fds.end(), cs);
+ return deserialize(data.cbegin(), data.cend(), fds.cbegin(), fds.cend(), cs);
}
static std::map<K, V> deserialize(std::vector<uint8_t>::const_iterator dataBegin,
@@ -301,6 +302,51 @@ public:
}
};
+/* Serialization format for Flags is same as for PODs */
+template<typename E>
+class IPADataSerializer<Flags<E>>
+{
+public:
+ static std::tuple<std::vector<uint8_t>, std::vector<SharedFD>>
+ serialize(const Flags<E> &data, [[maybe_unused]] ControlSerializer *cs = nullptr)
+ {
+ std::vector<uint8_t> dataVec;
+ dataVec.reserve(sizeof(Flags<E>));
+ appendPOD<uint32_t>(dataVec, static_cast<typename Flags<E>::Type>(data));
+
+ return { dataVec, {} };
+ }
+
+ static Flags<E> deserialize(std::vector<uint8_t> &data,
+ [[maybe_unused]] ControlSerializer *cs = nullptr)
+ {
+ return deserialize(data.cbegin(), data.cend());
+ }
+
+ static Flags<E> deserialize(std::vector<uint8_t>::const_iterator dataBegin,
+ std::vector<uint8_t>::const_iterator dataEnd,
+ [[maybe_unused]] ControlSerializer *cs = nullptr)
+ {
+ return Flags<E>{ static_cast<E>(readPOD<uint32_t>(dataBegin, 0, dataEnd)) };
+ }
+
+ static Flags<E> deserialize(std::vector<uint8_t> &data,
+ [[maybe_unused]] std::vector<SharedFD> &fds,
+ [[maybe_unused]] ControlSerializer *cs = nullptr)
+ {
+ return deserialize(data.cbegin(), data.cend());
+ }
+
+ static Flags<E> deserialize(std::vector<uint8_t>::const_iterator dataBegin,
+ std::vector<uint8_t>::const_iterator dataEnd,
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsBegin,
+ [[maybe_unused]] std::vector<SharedFD>::const_iterator fdsEnd,
+ [[maybe_unused]] ControlSerializer *cs = nullptr)
+ {
+ return deserialize(dataBegin, dataEnd);
+ }
+};
+
#endif /* __DOXYGEN__ */
} /* namespace libcamera */
diff --git a/include/libcamera/internal/ipa_manager.h b/include/libcamera/internal/ipa_manager.h
index 7f36e58e..c6f74e11 100644
--- a/include/libcamera/internal/ipa_manager.h
+++ b/include/libcamera/internal/ipa_manager.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_manager.h - Image Processing Algorithm module manager
+ * Image Processing Algorithm module manager
*/
#pragma once
@@ -47,6 +47,13 @@ public:
return proxy;
}
+#if HAVE_IPA_PUBKEY
+ static const PubKey &pubKey()
+ {
+ return pubKey_;
+ }
+#endif
+
private:
static IPAManager *self_;
diff --git a/include/libcamera/internal/ipa_module.h b/include/libcamera/internal/ipa_module.h
index 8038bdee..7c49d3f3 100644
--- a/include/libcamera/internal/ipa_module.h
+++ b/include/libcamera/internal/ipa_module.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_module.h - Image Processing Algorithm module
+ * Image Processing Algorithm module
*/
#pragma once
diff --git a/include/libcamera/internal/ipa_proxy.h b/include/libcamera/internal/ipa_proxy.h
index 781c8b62..ed6a5bcf 100644
--- a/include/libcamera/internal/ipa_proxy.h
+++ b/include/libcamera/internal/ipa_proxy.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_proxy.h - Image Processing Algorithm proxy
+ * Image Processing Algorithm proxy
*/
#pragma once
diff --git a/include/libcamera/internal/ipc_pipe.h b/include/libcamera/internal/ipc_pipe.h
index ab5dd67c..a4560752 100644
--- a/include/libcamera/internal/ipc_pipe.h
+++ b/include/libcamera/internal/ipc_pipe.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipc_pipe.h - Image Processing Algorithm IPC module for IPA proxies
+ * Image Processing Algorithm IPC module for IPA proxies
*/
#pragma once
diff --git a/include/libcamera/internal/ipc_pipe_unixsocket.h b/include/libcamera/internal/ipc_pipe_unixsocket.h
index 004d9539..4a0f6d57 100644
--- a/include/libcamera/internal/ipc_pipe_unixsocket.h
+++ b/include/libcamera/internal/ipc_pipe_unixsocket.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * ipc_pipe_unixsocket.h - Image Processing Algorithm IPC module using unix socket
+ * Image Processing Algorithm IPC module using unix socket
*/
#pragma once
diff --git a/include/libcamera/internal/ipc_unixsocket.h b/include/libcamera/internal/ipc_unixsocket.h
index 3963d182..48bb7a94 100644
--- a/include/libcamera/internal/ipc_unixsocket.h
+++ b/include/libcamera/internal/ipc_unixsocket.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipc_unixsocket.h - IPC mechanism based on Unix sockets
+ * IPC mechanism based on Unix sockets
*/
#pragma once
diff --git a/include/libcamera/internal/mapped_framebuffer.h b/include/libcamera/internal/mapped_framebuffer.h
index fb39adbf..6aaabf50 100644
--- a/include/libcamera/internal/mapped_framebuffer.h
+++ b/include/libcamera/internal/mapped_framebuffer.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * mapped_framebuffer.h - Frame buffer memory mapping support
+ * Frame buffer memory mapping support
*/
#pragma once
diff --git a/include/libcamera/internal/media_device.h b/include/libcamera/internal/media_device.h
index eb8cfde4..bf2e475d 100644
--- a/include/libcamera/internal/media_device.h
+++ b/include/libcamera/internal/media_device.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * media_device.h - Media device handler
+ * Media device handler
*/
#pragma once
diff --git a/include/libcamera/internal/media_object.h b/include/libcamera/internal/media_object.h
index b1572968..c9d77511 100644
--- a/include/libcamera/internal/media_object.h
+++ b/include/libcamera/internal/media_object.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * media_object.h - Media Device objects: entities, pads and links.
+ * Media Device objects: entities, pads and links.
*/
#pragma once
diff --git a/include/libcamera/internal/meson.build b/include/libcamera/internal/meson.build
index 7a780d48..160fdc37 100644
--- a/include/libcamera/internal/meson.build
+++ b/include/libcamera/internal/meson.build
@@ -4,9 +4,9 @@ subdir('tracepoints')
libcamera_tracepoint_header = custom_target(
'tp_header',
- input: ['tracepoints.h.in', tracepoint_files],
- output: 'tracepoints.h',
- command: [gen_tracepoints_header, '@OUTPUT@', '@INPUT@'],
+ input : ['tracepoints.h.in', tracepoint_files],
+ output : 'tracepoints.h',
+ command : [gen_tracepoints_header, include_build_dir, '@OUTPUT@', '@INPUT@'],
)
libcamera_internal_headers = files([
@@ -15,14 +15,17 @@ libcamera_internal_headers = files([
'camera.h',
'camera_controls.h',
'camera_lens.h',
+ 'camera_manager.h',
'camera_sensor.h',
'camera_sensor_properties.h',
'control_serializer.h',
'control_validator.h',
+ 'converter.h',
'delayed_controls.h',
'device_enumerator.h',
'device_enumerator_sysfs.h',
'device_enumerator_udev.h',
+ 'dma_heaps.h',
'formats.h',
'framebuffer.h',
'ipa_manager.h',
@@ -36,6 +39,7 @@ libcamera_internal_headers = files([
'process.h',
'pub_key.h',
'request.h',
+ 'shared_mem_object.h',
'source_paths.h',
'sysfs.h',
'v4l2_device.h',
@@ -44,3 +48,6 @@ libcamera_internal_headers = files([
'v4l2_videodevice.h',
'yaml_parser.h',
])
+
+subdir('converter')
+subdir('software_isp')
diff --git a/include/libcamera/internal/pipeline_handler.h b/include/libcamera/internal/pipeline_handler.h
index c3e4c258..746a34f8 100644
--- a/include/libcamera/internal/pipeline_handler.h
+++ b/include/libcamera/internal/pipeline_handler.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2018, Google Inc.
*
- * pipeline_handler.h - Pipeline handler infrastructure
+ * Pipeline handler infrastructure
*/
#pragma once
@@ -45,11 +45,11 @@ public:
MediaDevice *acquireMediaDevice(DeviceEnumerator *enumerator,
const DeviceMatch &dm);
- bool lock();
- void unlock();
+ bool acquire();
+ void release(Camera *camera);
- virtual CameraConfiguration *generateConfiguration(Camera *camera,
- const StreamRoles &roles) = 0;
+ virtual std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
+ Span<const StreamRole> roles) = 0;
virtual int configure(Camera *camera, CameraConfiguration *config) = 0;
virtual int exportFrameBuffers(Camera *camera, Stream *stream,
@@ -65,6 +65,9 @@ public:
bool completeBuffer(Request *request, FrameBuffer *buffer);
void completeRequest(Request *request);
+ std::string configurationFile(const std::string &subdir,
+ const std::string &name) const;
+
const char *name() const { return name_; }
protected:
@@ -74,9 +77,13 @@ protected:
virtual int queueRequestDevice(Camera *camera, Request *request) = 0;
virtual void stopDevice(Camera *camera) = 0;
+ virtual void releaseDevice(Camera *camera);
+
CameraManager *manager_;
private:
+ void unlockMediaDevices();
+
void mediaDeviceDisconnected(MediaDevice *media);
virtual void disconnect();
@@ -91,42 +98,50 @@ private:
const char *name_;
Mutex lock_;
- bool lockOwner_ LIBCAMERA_TSA_GUARDED_BY(lock_); /* *Not* ownership of lock_ */
+ unsigned int useCount_ LIBCAMERA_TSA_GUARDED_BY(lock_);
- friend class PipelineHandlerFactory;
+ friend class PipelineHandlerFactoryBase;
};
-class PipelineHandlerFactory
+class PipelineHandlerFactoryBase
{
public:
- PipelineHandlerFactory(const char *name);
- virtual ~PipelineHandlerFactory() = default;
+ PipelineHandlerFactoryBase(const char *name);
+ virtual ~PipelineHandlerFactoryBase() = default;
- std::shared_ptr<PipelineHandler> create(CameraManager *manager);
+ std::shared_ptr<PipelineHandler> create(CameraManager *manager) const;
const std::string &name() const { return name_; }
- static void registerType(PipelineHandlerFactory *factory);
- static std::vector<PipelineHandlerFactory *> &factories();
+ static std::vector<PipelineHandlerFactoryBase *> &factories();
+ static const PipelineHandlerFactoryBase *getFactoryByName(const std::string &name);
private:
- virtual PipelineHandler *createInstance(CameraManager *manager) = 0;
+ static void registerType(PipelineHandlerFactoryBase *factory);
+
+ virtual std::unique_ptr<PipelineHandler>
+ createInstance(CameraManager *manager) const = 0;
std::string name_;
};
-#define REGISTER_PIPELINE_HANDLER(handler) \
-class handler##Factory final : public PipelineHandlerFactory \
-{ \
-public: \
- handler##Factory() : PipelineHandlerFactory(#handler) {} \
- \
-private: \
- PipelineHandler *createInstance(CameraManager *manager) \
- { \
- return new handler(manager); \
- } \
-}; \
-static handler##Factory global_##handler##Factory;
+template<typename _PipelineHandler>
+class PipelineHandlerFactory final : public PipelineHandlerFactoryBase
+{
+public:
+ PipelineHandlerFactory(const char *name)
+ : PipelineHandlerFactoryBase(name)
+ {
+ }
+
+ std::unique_ptr<PipelineHandler>
+ createInstance(CameraManager *manager) const override
+ {
+ return std::make_unique<_PipelineHandler>(manager);
+ }
+};
+
+#define REGISTER_PIPELINE_HANDLER(handler, name) \
+ static PipelineHandlerFactory<handler> global_##handler##Factory(name);
} /* namespace libcamera */
diff --git a/include/libcamera/internal/process.h b/include/libcamera/internal/process.h
index 95e67e10..b1d07a5a 100644
--- a/include/libcamera/internal/process.h
+++ b/include/libcamera/internal/process.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * process.h - Process object
+ * Process object
*/
#pragma once
diff --git a/include/libcamera/internal/pub_key.h b/include/libcamera/internal/pub_key.h
index a22ba037..c8cc04cb 100644
--- a/include/libcamera/internal/pub_key.h
+++ b/include/libcamera/internal/pub_key.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * pub_key.h - Public key signature verification
+ * Public key signature verification
*/
#pragma once
@@ -11,7 +11,9 @@
#include <libcamera/base/span.h>
-#if HAVE_GNUTLS
+#if HAVE_CRYPTO
+struct evp_pkey_st;
+#elif HAVE_GNUTLS
struct gnutls_pubkey_st;
#endif
@@ -28,7 +30,9 @@ public:
private:
bool valid_;
-#if HAVE_GNUTLS
+#if HAVE_CRYPTO
+ struct evp_pkey_st *pubkey_;
+#elif HAVE_GNUTLS
struct gnutls_pubkey_st *pubkey_;
#endif
};
diff --git a/include/libcamera/internal/request.h b/include/libcamera/internal/request.h
index 9dadd6c6..f5d98069 100644
--- a/include/libcamera/internal/request.h
+++ b/include/libcamera/internal/request.h
@@ -2,10 +2,10 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * request.h - Request class private data
+ * Request class private data
*/
-#ifndef __LIBCAMERA_INTERNAL_REQUEST_H__
-#define __LIBCAMERA_INTERNAL_REQUEST_H__
+
+#pragma once
#include <chrono>
#include <map>
@@ -37,7 +37,7 @@ public:
bool completeBuffer(FrameBuffer *buffer);
void complete();
void cancel();
- void reuse();
+ void reset();
void prepare(std::chrono::milliseconds timeout = 0ms);
Signal<> prepared;
@@ -62,5 +62,3 @@ private:
};
} /* namespace libcamera */
-
-#endif /* __LIBCAMERA_INTERNAL_REQUEST_H__ */
diff --git a/include/libcamera/internal/shared_mem_object.h b/include/libcamera/internal/shared_mem_object.h
new file mode 100644
index 00000000..2ab0189f
--- /dev/null
+++ b/include/libcamera/internal/shared_mem_object.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd
+ * Copyright (C) 2024 Andrei Konovalov
+ * Copyright (C) 2024 Dennis Bonke
+ *
+ * Helpers for shared memory allocations
+ */
+#pragma once
+
+#include <stddef.h>
+#include <stdint.h>
+#include <string>
+#include <sys/mman.h>
+#include <type_traits>
+#include <utility>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/shared_fd.h>
+#include <libcamera/base/span.h>
+
+namespace libcamera {
+
+class SharedMem
+{
+public:
+ SharedMem();
+
+ SharedMem(const std::string &name, std::size_t size);
+ SharedMem(SharedMem &&rhs);
+
+ virtual ~SharedMem();
+
+ SharedMem &operator=(SharedMem &&rhs);
+
+ const SharedFD &fd() const
+ {
+ return fd_;
+ }
+
+ Span<uint8_t> mem() const
+ {
+ return mem_;
+ }
+
+ explicit operator bool() const
+ {
+ return !mem_.empty();
+ }
+
+private:
+ LIBCAMERA_DISABLE_COPY(SharedMem)
+
+ SharedFD fd_;
+
+ Span<uint8_t> mem_;
+};
+
+template<class T, typename = std::enable_if_t<std::is_standard_layout<T>::value>>
+class SharedMemObject : public SharedMem
+{
+public:
+ static constexpr std::size_t kSize = sizeof(T);
+
+ SharedMemObject()
+ : SharedMem(), obj_(nullptr)
+ {
+ }
+
+ template<class... Args>
+ SharedMemObject(const std::string &name, Args &&...args)
+ : SharedMem(name, kSize), obj_(nullptr)
+ {
+ if (mem().empty())
+ return;
+
+ obj_ = new (mem().data()) T(std::forward<Args>(args)...);
+ }
+
+ SharedMemObject(SharedMemObject<T> &&rhs)
+ : SharedMem(std::move(rhs))
+ {
+ this->obj_ = rhs.obj_;
+ rhs.obj_ = nullptr;
+ }
+
+ ~SharedMemObject()
+ {
+ if (obj_)
+ obj_->~T();
+ }
+
+ SharedMemObject<T> &operator=(SharedMemObject<T> &&rhs)
+ {
+ SharedMem::operator=(std::move(rhs));
+ this->obj_ = rhs.obj_;
+ rhs.obj_ = nullptr;
+ return *this;
+ }
+
+ T *operator->()
+ {
+ return obj_;
+ }
+
+ const T *operator->() const
+ {
+ return obj_;
+ }
+
+ T &operator*()
+ {
+ return *obj_;
+ }
+
+ const T &operator*() const
+ {
+ return *obj_;
+ }
+
+private:
+ LIBCAMERA_DISABLE_COPY(SharedMemObject)
+
+ T *obj_;
+};
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/software_isp/debayer_params.h b/include/libcamera/internal/software_isp/debayer_params.h
new file mode 100644
index 00000000..ce1b5945
--- /dev/null
+++ b/include/libcamera/internal/software_isp/debayer_params.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Red Hat Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ *
+ * DebayerParams header
+ */
+
+#pragma once
+
+namespace libcamera {
+
+struct DebayerParams {
+ static constexpr unsigned int kGain10 = 256;
+
+ unsigned int gainR;
+ unsigned int gainG;
+ unsigned int gainB;
+
+ float gamma;
+ /**
+ * \brief Level of the black point, 0..255, 0 is no correction.
+ */
+ unsigned int blackLevel;
+};
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/software_isp/meson.build b/include/libcamera/internal/software_isp/meson.build
new file mode 100644
index 00000000..508ddddc
--- /dev/null
+++ b/include/libcamera/internal/software_isp/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: CC0-1.0
+
+libcamera_internal_headers += files([
+ 'debayer_params.h',
+ 'software_isp.h',
+ 'swisp_stats.h',
+])
diff --git a/include/libcamera/internal/software_isp/software_isp.h b/include/libcamera/internal/software_isp/software_isp.h
new file mode 100644
index 00000000..7e9fae6a
--- /dev/null
+++ b/include/libcamera/internal/software_isp/software_isp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ *
+ * Simple software ISP implementation
+ */
+
+#pragma once
+
+#include <functional>
+#include <initializer_list>
+#include <map>
+#include <memory>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/signal.h>
+#include <libcamera/base/thread.h>
+
+#include <libcamera/geometry.h>
+#include <libcamera/pixel_format.h>
+
+#include <libcamera/ipa/soft_ipa_interface.h>
+#include <libcamera/ipa/soft_ipa_proxy.h>
+
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/dma_heaps.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/shared_mem_object.h"
+#include "libcamera/internal/software_isp/debayer_params.h"
+
+namespace libcamera {
+
+class DebayerCpu;
+class FrameBuffer;
+class PixelFormat;
+struct StreamConfiguration;
+
+LOG_DECLARE_CATEGORY(SoftwareIsp)
+
+class SoftwareIsp
+{
+public:
+ SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor);
+ ~SoftwareIsp();
+
+ int loadConfiguration([[maybe_unused]] const std::string &filename) { return 0; }
+
+ bool isValid() const;
+
+ std::vector<PixelFormat> formats(PixelFormat input);
+
+ SizeRange sizes(PixelFormat inputFormat, const Size &inputSize);
+
+ std::tuple<unsigned int, unsigned int>
+ strideAndFrameSize(const PixelFormat &outputFormat, const Size &size);
+
+ int configure(const StreamConfiguration &inputCfg,
+ const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs,
+ const ControlInfoMap &sensorControls);
+
+ int exportBuffers(unsigned int output, unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers);
+
+ void processStats(const ControlList &sensorControls);
+
+ int start();
+ void stop();
+
+ int queueBuffers(FrameBuffer *input,
+ const std::map<unsigned int, FrameBuffer *> &outputs);
+
+ void process(FrameBuffer *input, FrameBuffer *output);
+
+ Signal<FrameBuffer *> inputBufferReady;
+ Signal<FrameBuffer *> outputBufferReady;
+ Signal<> ispStatsReady;
+ Signal<const ControlList &> setSensorControls;
+
+private:
+ void saveIspParams();
+ void setSensorCtrls(const ControlList &sensorControls);
+ void statsReady();
+ void inputReady(FrameBuffer *input);
+ void outputReady(FrameBuffer *output);
+
+ std::unique_ptr<DebayerCpu> debayer_;
+ Thread ispWorkerThread_;
+ SharedMemObject<DebayerParams> sharedParams_;
+ DebayerParams debayerParams_;
+ DmaHeap dmaHeap_;
+
+ std::unique_ptr<ipa::soft::IPAProxySoft> ipa_;
+};
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/software_isp/swisp_stats.h b/include/libcamera/internal/software_isp/swisp_stats.h
new file mode 100644
index 00000000..ae11f112
--- /dev/null
+++ b/include/libcamera/internal/software_isp/swisp_stats.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Linaro Ltd
+ *
+ * Statistics data format used by the software ISP and software IPA
+ */
+
+#pragma once
+
+#include <array>
+#include <stdint.h>
+
+namespace libcamera {
+
+/**
+ * \brief Struct that holds the statistics for the Software ISP
+ *
+ * The struct value types are large enough to not overflow.
+ * Should they still overflow for some reason, no check is performed and they
+ * wrap around.
+ */
+struct SwIspStats {
+ /**
+ * \brief Holds the sum of all sampled red pixels
+ */
+ uint64_t sumR_;
+ /**
+ * \brief Holds the sum of all sampled green pixels
+ */
+ uint64_t sumG_;
+ /**
+ * \brief Holds the sum of all sampled blue pixels
+ */
+ uint64_t sumB_;
+ /**
+ * \brief Number of bins in the yHistogram
+ */
+ static constexpr unsigned int kYHistogramSize = 64;
+ /**
+ * \brief Type of the histogram.
+ */
+ using Histogram = std::array<uint32_t, kYHistogramSize>;
+ /**
+ * \brief A histogram of luminance values
+ */
+ Histogram yHistogram;
+};
+
+} /* namespace libcamera */
diff --git a/include/libcamera/internal/source_paths.h b/include/libcamera/internal/source_paths.h
index be6f153b..14e64717 100644
--- a/include/libcamera/internal/source_paths.h
+++ b/include/libcamera/internal/source_paths.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
- * source_paths.h - Identify libcamera source and build paths
+ * Identify libcamera source and build paths
*/
#pragma once
diff --git a/include/libcamera/internal/sysfs.h b/include/libcamera/internal/sysfs.h
index 917457be..aca60fb6 100644
--- a/include/libcamera/internal/sysfs.h
+++ b/include/libcamera/internal/sysfs.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
- * sysfs.h - Miscellaneous utility functions to access sysfs
+ * Miscellaneous utility functions to access sysfs
*/
#pragma once
diff --git a/include/libcamera/internal/tracepoints.h.in b/include/libcamera/internal/tracepoints.h.in
index d0fc1365..f0962091 100644
--- a/include/libcamera/internal/tracepoints.h.in
+++ b/include/libcamera/internal/tracepoints.h.in
@@ -2,7 +2,7 @@
/*
* Copyright (C) {{year}}, Google Inc.
*
- * tracepoints.h - Tracepoints with lttng
+ * Tracepoints with lttng
*
* This file is auto-generated. Do not edit.
*/
diff --git a/include/libcamera/internal/tracepoints/request.tp b/include/libcamera/internal/tracepoints/request.tp
index f1e54497..4f367e91 100644
--- a/include/libcamera/internal/tracepoints/request.tp
+++ b/include/libcamera/internal/tracepoints/request.tp
@@ -5,10 +5,10 @@
* request.tp - Tracepoints for the request object
*/
-#include <libcamera/internal/request.h>
-
#include <libcamera/framebuffer.h>
+#include "libcamera/internal/request.h"
+
TRACEPOINT_EVENT_CLASS(
libcamera,
request,
diff --git a/include/libcamera/internal/v4l2_device.h b/include/libcamera/internal/v4l2_device.h
index a52a5f2c..f5aa5024 100644
--- a/include/libcamera/internal/v4l2_device.h
+++ b/include/libcamera/internal/v4l2_device.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_device.h - Common base for V4L2 video devices and subdevices
+ * Common base for V4L2 video devices and subdevices
*/
#pragma once
@@ -22,6 +22,8 @@
#include <libcamera/color_space.h>
#include <libcamera/controls.h>
+#include "libcamera/internal/formats.h"
+
namespace libcamera {
class EventNotifier;
@@ -59,7 +61,8 @@ protected:
int fd() const { return fd_.get(); }
template<typename T>
- static std::optional<ColorSpace> toColorSpace(const T &v4l2Format);
+ static std::optional<ColorSpace> toColorSpace(const T &v4l2Format,
+ PixelFormatInfo::ColourEncoding colourEncoding);
template<typename T>
static int fromColorSpace(const std::optional<ColorSpace> &colorSpace, T &v4l2Format);
@@ -67,8 +70,8 @@ protected:
private:
static ControlType v4l2CtrlType(uint32_t ctrlType);
static std::unique_ptr<ControlId> v4l2ControlId(const v4l2_query_ext_ctrl &ctrl);
- ControlInfo v4l2ControlInfo(const v4l2_query_ext_ctrl &ctrl);
- ControlInfo v4l2MenuControlInfo(const v4l2_query_ext_ctrl &ctrl);
+ std::optional<ControlInfo> v4l2ControlInfo(const v4l2_query_ext_ctrl &ctrl);
+ std::optional<ControlInfo> v4l2MenuControlInfo(const v4l2_query_ext_ctrl &ctrl);
void listControls();
void updateControls(ControlList *ctrls,
diff --git a/include/libcamera/internal/v4l2_pixelformat.h b/include/libcamera/internal/v4l2_pixelformat.h
index fb2d5d0b..c836346b 100644
--- a/include/libcamera/internal/v4l2_pixelformat.h
+++ b/include/libcamera/internal/v4l2_pixelformat.h
@@ -1,16 +1,18 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
- * Copyright (C) 2020, Raspberry Pi (Trading) Ltd.
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * v4l2_pixelformat.h - V4L2 Pixel Format
+ * V4L2 Pixel Format
*/
#pragma once
+#include <functional>
#include <ostream>
#include <stdint.h>
#include <string>
+#include <vector>
#include <linux/videodev2.h>
@@ -43,9 +45,9 @@ public:
std::string toString() const;
const char *description() const;
- PixelFormat toPixelFormat() const;
- static V4L2PixelFormat fromPixelFormat(const PixelFormat &pixelFormat,
- bool multiplanar = false);
+ PixelFormat toPixelFormat(bool warn = true) const;
+ static const std::vector<V4L2PixelFormat> &
+ fromPixelFormat(const PixelFormat &pixelFormat);
private:
uint32_t fourcc_;
@@ -54,3 +56,15 @@ private:
std::ostream &operator<<(std::ostream &out, const V4L2PixelFormat &f);
} /* namespace libcamera */
+
+namespace std {
+
+template<>
+struct hash<libcamera::V4L2PixelFormat> {
+ size_t operator()(libcamera::V4L2PixelFormat const &format) const noexcept
+ {
+ return format.fourcc();
+ }
+};
+
+} /* namespace std */
diff --git a/include/libcamera/internal/v4l2_subdevice.h b/include/libcamera/internal/v4l2_subdevice.h
index 6fda52ad..a1de0fb0 100644
--- a/include/libcamera/internal/v4l2_subdevice.h
+++ b/include/libcamera/internal/v4l2_subdevice.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_subdevice.h - V4L2 Subdevice
+ * V4L2 Subdevice
*/
#pragma once
@@ -13,6 +13,8 @@
#include <string>
#include <vector>
+#include <linux/v4l2-subdev.h>
+
#include <libcamera/base/class.h>
#include <libcamera/base/log.h>
@@ -27,13 +29,43 @@ namespace libcamera {
class MediaDevice;
+class MediaBusFormatInfo
+{
+public:
+ enum class Type {
+ Image,
+ Metadata,
+ EmbeddedData,
+ };
+
+ bool isValid() const { return code != 0; }
+
+ static const MediaBusFormatInfo &info(uint32_t code);
+
+ const char *name;
+ uint32_t code;
+ Type type;
+ unsigned int bitsPerPixel;
+ PixelFormatInfo::ColourEncoding colourEncoding;
+};
+
+struct V4L2SubdeviceCapability final : v4l2_subdev_capability {
+ bool isReadOnly() const
+ {
+ return capabilities & V4L2_SUBDEV_CAP_RO_SUBDEV;
+ }
+ bool hasStreams() const
+ {
+ return capabilities & V4L2_SUBDEV_CAP_STREAMS;
+ }
+};
+
struct V4L2SubdeviceFormat {
- uint32_t mbus_code;
+ uint32_t code;
Size size;
std::optional<ColorSpace> colorSpace;
const std::string toString() const;
- uint8_t bitsPerPixel() const;
};
std::ostream &operator<<(std::ostream &out, const V4L2SubdeviceFormat &f);
@@ -44,10 +76,43 @@ public:
using Formats = std::map<unsigned int, std::vector<SizeRange>>;
enum Whence {
- ActiveFormat,
- TryFormat,
+ TryFormat = V4L2_SUBDEV_FORMAT_TRY,
+ ActiveFormat = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ struct Stream {
+ Stream()
+ : pad(0), stream(0)
+ {
+ }
+
+ Stream(unsigned int p, unsigned int s)
+ : pad(p), stream(s)
+ {
+ }
+
+ unsigned int pad;
+ unsigned int stream;
};
+ struct Route {
+ Route()
+ : flags(0)
+ {
+ }
+
+ Route(const Stream &snk, const Stream &src, uint32_t f)
+ : sink(snk), source(src), flags(f)
+ {
+ }
+
+ Stream sink;
+ Stream source;
+ uint32_t flags;
+ };
+
+ using Routing = std::vector<Route>;
+
explicit V4L2Subdevice(const MediaEntity *entity);
~V4L2Subdevice();
@@ -55,19 +120,45 @@ public:
const MediaEntity *entity() const { return entity_; }
- int getSelection(unsigned int pad, unsigned int target,
+ int getSelection(const Stream &stream, unsigned int target,
Rectangle *rect);
- int setSelection(unsigned int pad, unsigned int target,
+ int getSelection(unsigned int pad, unsigned int target, Rectangle *rect)
+ {
+ return getSelection({ pad, 0 }, target, rect);
+ }
+ int setSelection(const Stream &stream, unsigned int target,
Rectangle *rect);
-
- Formats formats(unsigned int pad);
-
+ int setSelection(unsigned int pad, unsigned int target, Rectangle *rect)
+ {
+ return setSelection({ pad, 0 }, target, rect);
+ }
+
+ Formats formats(const Stream &stream);
+ Formats formats(unsigned int pad)
+ {
+ return formats({ pad, 0 });
+ }
+
+ int getFormat(const Stream &stream, V4L2SubdeviceFormat *format,
+ Whence whence = ActiveFormat);
int getFormat(unsigned int pad, V4L2SubdeviceFormat *format,
+ Whence whence = ActiveFormat)
+ {
+ return getFormat({ pad, 0 }, format, whence);
+ }
+ int setFormat(const Stream &stream, V4L2SubdeviceFormat *format,
Whence whence = ActiveFormat);
int setFormat(unsigned int pad, V4L2SubdeviceFormat *format,
- Whence whence = ActiveFormat);
+ Whence whence = ActiveFormat)
+ {
+ return setFormat({ pad, 0 }, format, whence);
+ }
+
+ int getRouting(Routing *routing, Whence whence = ActiveFormat);
+ int setRouting(Routing *routing, Whence whence = ActiveFormat);
const std::string &model();
+ const V4L2SubdeviceCapability &caps() const { return caps_; }
static std::unique_ptr<V4L2Subdevice>
fromEntityName(const MediaDevice *media, const std::string &entity);
@@ -78,13 +169,28 @@ protected:
private:
LIBCAMERA_DISABLE_COPY(V4L2Subdevice)
- std::vector<unsigned int> enumPadCodes(unsigned int pad);
- std::vector<SizeRange> enumPadSizes(unsigned int pad,
+ std::optional<ColorSpace>
+ toColorSpace(const v4l2_mbus_framefmt &format) const;
+
+ std::vector<unsigned int> enumPadCodes(const Stream &stream);
+ std::vector<SizeRange> enumPadSizes(const Stream &stream,
unsigned int code);
const MediaEntity *entity_;
std::string model_;
+ struct V4L2SubdeviceCapability caps_;
};
+bool operator==(const V4L2Subdevice::Stream &lhs, const V4L2Subdevice::Stream &rhs);
+static inline bool operator!=(const V4L2Subdevice::Stream &lhs,
+ const V4L2Subdevice::Stream &rhs)
+{
+ return !(lhs == rhs);
+}
+
+std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Stream &stream);
+std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Route &route);
+std::ostream &operator<<(std::ostream &out, const V4L2Subdevice::Routing &routing);
+
} /* namespace libcamera */
diff --git a/include/libcamera/internal/v4l2_videodevice.h b/include/libcamera/internal/v4l2_videodevice.h
index 8525acbc..9057be08 100644
--- a/include/libcamera/internal/v4l2_videodevice.h
+++ b/include/libcamera/internal/v4l2_videodevice.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * v4l2_videodevice.h - V4L2 Video Device
+ * V4L2 Video Device
*/
#pragma once
@@ -14,6 +14,7 @@
#include <ostream>
#include <stdint.h>
#include <string>
+#include <unordered_set>
#include <vector>
#include <linux/videodev2.h>
@@ -228,6 +229,8 @@ public:
static std::unique_ptr<V4L2VideoDevice>
fromEntityName(const MediaDevice *media, const std::string &entity);
+ V4L2PixelFormat toV4L2PixelFormat(const PixelFormat &pixelFormat) const;
+
protected:
std::string logPrefix() const override;
@@ -240,6 +243,8 @@ private:
Stopped,
};
+ int initFormats();
+
int getFormatMeta(V4L2DeviceFormat *format);
int trySetFormatMeta(V4L2DeviceFormat *format, bool set);
@@ -263,9 +268,13 @@ private:
void watchdogExpired();
+ template<typename T>
+ static std::optional<ColorSpace> toColorSpace(const T &v4l2Format);
+
V4L2Capability caps_;
V4L2DeviceFormat format_;
const PixelFormatInfo *formatInfo_;
+ std::unordered_set<V4L2PixelFormat> pixelFormats_;
enum v4l2_buf_type bufferType_;
enum v4l2_memory memoryType_;
diff --git a/include/libcamera/internal/yaml_parser.h b/include/libcamera/internal/yaml_parser.h
index 064cf443..b6979d73 100644
--- a/include/libcamera/internal/yaml_parser.h
+++ b/include/libcamera/internal/yaml_parser.h
@@ -2,13 +2,14 @@
/*
* Copyright (C) 2022, Google Inc.
*
- * yaml_parser.h - libcamera YAML parsing helper
+ * libcamera YAML parsing helper
*/
#pragma once
#include <iterator>
#include <map>
+#include <optional>
#include <string>
#include <vector>
@@ -24,12 +25,21 @@ class YamlParserContext;
class YamlObject
{
private:
- using DictContainer = std::map<std::string, std::unique_ptr<YamlObject>>;
+ struct Value {
+ Value(std::string &&k, std::unique_ptr<YamlObject> &&v)
+ : key(std::move(k)), value(std::move(v))
+ {
+ }
+ std::string key;
+ std::unique_ptr<YamlObject> value;
+ };
+
+ using Container = std::vector<Value>;
using ListContainer = std::vector<std::unique_ptr<YamlObject>>;
public:
#ifndef __DOXYGEN__
- template<typename Container, typename Derived>
+ template<typename Derived>
class Iterator
{
public:
@@ -65,10 +75,10 @@ public:
}
protected:
- typename Container::const_iterator it_;
+ Container::const_iterator it_;
};
- template<typename Container, typename Iterator>
+ template<typename Iterator>
class Adapter
{
public:
@@ -91,7 +101,7 @@ public:
const Container &container_;
};
- class ListIterator : public Iterator<ListContainer, ListIterator>
+ class ListIterator : public Iterator<ListIterator>
{
public:
using value_type = const YamlObject &;
@@ -100,16 +110,16 @@ public:
value_type operator*() const
{
- return *it_->get();
+ return *it_->value.get();
}
pointer operator->() const
{
- return it_->get();
+ return it_->value.get();
}
};
- class DictIterator : public Iterator<DictContainer, DictIterator>
+ class DictIterator : public Iterator<DictIterator>
{
public:
using value_type = std::pair<const std::string &, const YamlObject &>;
@@ -118,17 +128,17 @@ public:
value_type operator*() const
{
- return { it_->first, *it_->second.get() };
+ return { it_->key, *it_->value.get() };
}
};
- class DictAdapter : public Adapter<DictContainer, DictIterator>
+ class DictAdapter : public Adapter<DictIterator>
{
public:
using key_type = std::string;
};
- class ListAdapter : public Adapter<ListContainer, ListIterator>
+ class ListAdapter : public Adapter<ListIterator>
{
};
#endif /* __DOXYGEN__ */
@@ -153,9 +163,35 @@ public:
#ifndef __DOXYGEN__
template<typename T,
- typename std::enable_if_t<
+ std::enable_if_t<
+ std::is_same_v<bool, T> ||
+ std::is_same_v<double, T> ||
+ std::is_same_v<int8_t, T> ||
+ std::is_same_v<uint8_t, T> ||
+ std::is_same_v<int16_t, T> ||
+ std::is_same_v<uint16_t, T> ||
+ std::is_same_v<int32_t, T> ||
+ std::is_same_v<uint32_t, T> ||
+ std::is_same_v<std::string, T> ||
+ std::is_same_v<Size, T>> * = nullptr>
+#else
+ template<typename T>
+#endif
+ std::optional<T> get() const;
+
+ template<typename T>
+ T get(const T &defaultValue) const
+ {
+ return get<T>().value_or(defaultValue);
+ }
+
+#ifndef __DOXYGEN__
+ template<typename T,
+ std::enable_if_t<
std::is_same_v<bool, T> ||
std::is_same_v<double, T> ||
+ std::is_same_v<int8_t, T> ||
+ std::is_same_v<uint8_t, T> ||
std::is_same_v<int16_t, T> ||
std::is_same_v<uint16_t, T> ||
std::is_same_v<int32_t, T> ||
@@ -165,9 +201,9 @@ public:
#else
template<typename T>
#endif
- T get(const T &defaultValue, bool *ok = nullptr) const;
+ std::optional<std::vector<T>> getList() const;
- DictAdapter asDict() const { return DictAdapter{ dictionary_ }; }
+ DictAdapter asDict() const { return DictAdapter{ list_ }; }
ListAdapter asList() const { return ListAdapter{ list_ }; }
const YamlObject &operator[](std::size_t index) const;
@@ -189,8 +225,8 @@ private:
Type type_;
std::string value_;
- ListContainer list_;
- DictContainer dictionary_;
+ Container list_;
+ std::map<std::string, YamlObject *> dictionary_;
};
class YamlParser final
diff --git a/include/libcamera/ipa/core.mojom b/include/libcamera/ipa/core.mojom
index 74f3339e..bce79724 100644
--- a/include/libcamera/ipa/core.mojom
+++ b/include/libcamera/ipa/core.mojom
@@ -14,8 +14,8 @@ module libcamera;
* - structs
*
* Attributes:
- * - skipHeader - structs only, and only in core.mojom
- * - Do not generate a C++ definition for the structure
+ * - skipHeader - allowed only for structs and enums in core.mojom
+ * - Do not generate a C++ definition for the structure or enum
* - Any type used in a mojom interface definition must have a corresponding
* definition in a mojom file for the code generator to accept it, except
* for types solely used as map/array members for which a definition is not
@@ -33,6 +33,15 @@ module libcamera;
* available for the type and there's no need to generate one
* - hasFd - struct fields or empty structs only
* - Designate that this field or empty struct contains a SharedFD
+ * - scopedEnum - enum definitions
+ * - Designate that this enum should be an enum class, as opposed to a pure
+ * enum
+ * - flags - struct fields or function parameters that are enums
+ * - Designate that this enum type E should be Flags<E> in the generated C++
+ * code
+ * - For example, if a struct field is defined as `[flags] ErrorFlag f;`
+ * (where ErrorFlag is defined as an enum elsewhere in mojom), then the
+ * generated code for this field will be `Flags<ErrorFlag> f`
*
* Rules:
* - If the type is defined in a libcamera C++ header *and* a (de)serializer is
@@ -43,6 +52,8 @@ module libcamera;
* then the type definition in the core.mojom file should have the
* [skipHeader] attribute only
* - A (de)serializer will be generated for the type
+ * - enums that are defined in a libcamera C++ header also fall in this
+ * category
* - If a type definition has [skipHeader], then the header where the type is
* defined must be included in ipa_interface.h
* - Types that are solely used as array/map members do not require a mojom
@@ -130,6 +141,18 @@ module libcamera;
*/
/**
+ * \var IPACameraSensorInfo::cfaPattern
+ * \brief The arrangement of colour filters on the image sensor
+ *
+ * This takes a value defined by properties::draft::ColorFilterArrangementEnum.
+ * For non-Bayer colour sensors, the cfaPattern will be set to
+ * properties::draft::ColorFilterArrangementEnum::RGB.
+ *
+ * \todo Make this variable optional once mojom supports it, instead of using
+ * RGB for sensors that don't have a CFA.
+ */
+
+/**
* \var IPACameraSensorInfo::activeAreaSize
* \brief The size of the pixel array active area of the sensor
*/
@@ -172,10 +195,17 @@ module libcamera;
*/
/**
- * \var IPACameraSensorInfo::lineLength
- * \brief Total line length in pixels
+ * \var IPACameraSensorInfo::minLineLength
+ * \brief The minimum line length in pixels
*
- * The total line length in pixel clock periods, including blanking.
+ * The minimum allowable line length in pixel clock periods, including blanking.
+ */
+
+/**
+ * \var IPACameraSensorInfo::maxLineLength
+ * \brief The maximum line length in pixels
+ *
+ * The maximum allowable line length in pixel clock periods, including blanking.
*/
/**
@@ -189,7 +219,7 @@ module libcamera;
* To obtain the minimum frame duration:
*
* \verbatim
- frameDuration(s) = minFrameLength(lines) * lineLength(pixels) / pixelRate(pixels per second)
+ frameDuration(s) = minFrameLength(lines) * minLineLength(pixels) / pixelRate(pixels per second)
\endverbatim
*/
@@ -204,20 +234,23 @@ module libcamera;
* To obtain the maximum frame duration:
*
* \verbatim
- frameDuration(s) = maxFrameLength(lines) * lineLength(pixels) / pixelRate(pixels per second)
+ frameDuration(s) = maxFrameLength(lines) * maxLineLength(pixels) / pixelRate(pixels per second)
\endverbatim
*/
struct IPACameraSensorInfo {
string model;
uint32 bitsPerPixel;
+ uint32 cfaPattern;
Size activeAreaSize;
Rectangle analogCrop;
Size outputSize;
uint64 pixelRate;
- uint32 lineLength;
+
+ uint32 minLineLength;
+ uint32 maxLineLength;
uint32 minFrameLength;
uint32 maxFrameLength;
diff --git a/include/libcamera/ipa/ipa_controls.h b/include/libcamera/ipa/ipa_controls.h
index e5da1946..5fd13394 100644
--- a/include/libcamera/ipa/ipa_controls.h
+++ b/include/libcamera/ipa/ipa_controls.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_controls.h - IPA Control handling
+ * IPA Control handling
*/
#pragma once
diff --git a/include/libcamera/ipa/ipa_interface.h b/include/libcamera/ipa/ipa_interface.h
index 50ca0e7b..b93f1a15 100644
--- a/include/libcamera/ipa/ipa_interface.h
+++ b/include/libcamera/ipa/ipa_interface.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_interface.h - Image Processing Algorithm interface
+ * Image Processing Algorithm interface
*/
#pragma once
@@ -13,6 +13,7 @@
#include <map>
#include <vector>
+#include <libcamera/base/flags.h>
#include <libcamera/base/signal.h>
#include <libcamera/controls.h>
@@ -22,8 +23,8 @@
namespace libcamera {
/*
- * Structs that are defined in core.mojom and have the skipHeader tag must be
- * #included here.
+ * Structs and enums that are defined in core.mojom that have the skipHeader
+ * tag must be #included here.
*/
class IPAInterface
diff --git a/include/libcamera/ipa/ipa_module_info.h b/include/libcamera/ipa/ipa_module_info.h
index b19b00f7..3507a6d7 100644
--- a/include/libcamera/ipa/ipa_module_info.h
+++ b/include/libcamera/ipa/ipa_module_info.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * ipa_module_info.h - Image Processing Algorithm module information
+ * Image Processing Algorithm module information
*/
#pragma once
diff --git a/include/libcamera/ipa/meson.build b/include/libcamera/ipa/meson.build
index 442ca3dd..3352d08f 100644
--- a/include/libcamera/ipa/meson.build
+++ b/include/libcamera/ipa/meson.build
@@ -9,7 +9,7 @@ libcamera_ipa_headers = files([
])
install_headers(libcamera_ipa_headers,
- subdir: libcamera_ipa_include_dir)
+ subdir : libcamera_ipa_include_dir)
libcamera_generated_ipa_headers = []
@@ -60,14 +60,14 @@ libcamera_generated_ipa_headers += custom_target('core_ipa_serializer_h',
'./' +'@INPUT@'
])
-ipa_mojom_files = [
- 'ipu3.mojom',
- 'raspberrypi.mojom',
- 'rkisp1.mojom',
- 'vimc.mojom',
-]
-
-ipa_mojoms = []
+# Mapping from pipeline handler name to mojom file
+pipeline_ipa_mojom_mapping = {
+ 'ipu3': 'ipu3.mojom',
+ 'rkisp1': 'rkisp1.mojom',
+ 'rpi/vc4': 'raspberrypi.mojom',
+ 'simple': 'soft.mojom',
+ 'vimc': 'vimc.mojom',
+}
#
# Generate headers from templates.
@@ -75,14 +75,23 @@ ipa_mojoms = []
# TODO Define per-pipeline ControlInfoMap with yaml?
-foreach file : ipa_mojom_files
+ipa_mojoms = []
+mojoms_built = []
+foreach pipeline, file : pipeline_ipa_mojom_mapping
name = file.split('.')[0]
- if name not in pipelines
+ # Avoid building duplicate mojom interfaces with the same interface file
+ if name in mojoms_built
continue
endif
- # {pipeline}.mojom-module
+ if pipeline not in pipelines
+ continue
+ endif
+
+ mojoms_built += name
+
+ # {interface}.mojom-module
mojom = custom_target(name + '_mojom_module',
input : file,
output : file + '-module',
@@ -94,7 +103,7 @@ foreach file : ipa_mojom_files
'--mojoms', '@INPUT@'
])
- # {pipeline}_ipa_interface.h
+ # {interface}_ipa_interface.h
header = custom_target(name + '_ipa_interface_h',
input : mojom,
output : name + '_ipa_interface.h',
@@ -110,7 +119,7 @@ foreach file : ipa_mojom_files
'./' +'@INPUT@'
])
- # {pipeline}_ipa_serializer.h
+ # {interface}_ipa_serializer.h
serializer = custom_target(name + '_ipa_serializer_h',
input : mojom,
output : name + '_ipa_serializer.h',
@@ -124,7 +133,7 @@ foreach file : ipa_mojom_files
'./' +'@INPUT@'
])
- # {pipeline}_ipa_proxy.h
+ # {interface}_ipa_proxy.h
proxy_header = custom_target(name + '_proxy_h',
input : mojom,
output : name + '_ipa_proxy.h',
@@ -146,6 +155,12 @@ foreach file : ipa_mojom_files
libcamera_generated_ipa_headers += [header, serializer, proxy_header]
endforeach
+ipa_mojom_files = []
+foreach pipeline, file : pipeline_ipa_mojom_mapping
+ if file not in ipa_mojom_files
+ ipa_mojom_files += file
+ endif
+endforeach
ipa_mojom_files = files(ipa_mojom_files)
# Pass this to the documentation generator in src/libcamera/ipa
diff --git a/include/libcamera/ipa/raspberrypi.mojom b/include/libcamera/ipa/raspberrypi.mojom
index c0de435b..5986c436 100644
--- a/include/libcamera/ipa/raspberrypi.mojom
+++ b/include/libcamera/ipa/raspberrypi.mojom
@@ -8,82 +8,139 @@ module ipa.RPi;
import "include/libcamera/ipa/core.mojom";
-enum BufferMask {
- MaskID = 0x00ffff,
- MaskStats = 0x010000,
- MaskEmbeddedData = 0x020000,
- MaskBayerData = 0x040000,
- MaskExternalBuffer = 0x100000,
-};
-
-/* Size of the LS grid allocation. */
+/* Size of the LS grid allocation on VC4. */
const uint32 MaxLsGridSize = 0x8000;
struct SensorConfig {
uint32 gainDelay;
uint32 exposureDelay;
uint32 vblankDelay;
+ uint32 hblankDelay;
uint32 sensorMetadata;
};
-struct IPAInitResult {
+struct InitParams {
+ bool lensPresent;
+ libcamera.IPACameraSensorInfo sensorInfo;
+ /* PISP specific */
+ libcamera.SharedFD fe;
+ libcamera.SharedFD be;
+};
+
+struct InitResult {
SensorConfig sensorConfig;
libcamera.ControlInfoMap controlInfo;
};
-struct ISPConfig {
- uint32 embeddedBufferId;
- uint32 bayerBufferId;
- bool embeddedBufferPresent;
- libcamera.ControlList controls;
+struct BufferIds {
+ uint32 bayer;
+ uint32 embedded;
+ uint32 stats;
};
-struct IPAConfig {
+struct ConfigParams {
uint32 transform;
+ libcamera.ControlInfoMap sensorControls;
+ libcamera.ControlInfoMap ispControls;
+ libcamera.ControlInfoMap lensControls;
+ /* VC4 specific */
libcamera.SharedFD lsTableHandle;
};
-struct IPAConfigResult {
- float modeSensitivity;
- libcamera.ControlInfoMap controlInfo;
+struct ConfigResult {
+ float modeSensitivity;
+ libcamera.ControlInfoMap controlInfo;
+ libcamera.ControlList sensorControls;
+ libcamera.ControlList lensControls;
};
-struct StartConfig {
+struct StartResult {
libcamera.ControlList controls;
int32 dropFrameCount;
- uint32 maxSensorFrameLengthMs;
+};
+
+struct PrepareParams {
+ BufferIds buffers;
+ libcamera.ControlList sensorControls;
+ libcamera.ControlList requestControls;
+ uint32 ipaContext;
+ uint32 delayContext;
+};
+
+struct ProcessParams {
+ BufferIds buffers;
+ uint32 ipaContext;
};
interface IPARPiInterface {
- init(libcamera.IPASettings settings)
- => (int32 ret, IPAInitResult result);
- start(libcamera.ControlList controls) => (StartConfig startConfig);
+ /**
+ * \fn init()
+ * \brief Initialise the IPA
+ * \param[in] settings Camera sensor information and configuration file
+ * \param[in] params Platform specific initialisation parameters
+ * \param[out] ret 0 on success or a negative error code otherwise
+ * \param[out] result Static sensor configuration and controls available
+ *
+ * This function initialises the IPA for a particular sensor from the
+ * pipeline handler.
+ *
+ * The \a settings conveys information about the camera sensor and
+ * configuration file requested by the pipeline handler.
+ *
+ * The \a result parameter returns the sensor delay for the given camera
+ * as well as a ControlInfoMap of available controls that can be handled
+ * by the IPA.
+ */
+ init(libcamera.IPASettings settings, InitParams params)
+ => (int32 ret, InitResult result);
+
+ /**
+ * \fn start()
+ * \brief Start the IPA
+ * \param[in] controls List of control to handle
+ * \param[out] result Controls to apply and number of dropped frames
+ *
+ * This function sets the IPA to a started state.
+ *
+ * The \a controls provide a list of controls to handle immediately. The
+ * actual controls to apply on the sensor and ISP in the pipeline
+ * handler are returned in \a result.
+ *
+ * The number of convergence frames to be dropped is also returned in
+ * \a result.
+ */
+ start(libcamera.ControlList controls) => (StartResult result);
+
+ /**
+ * \fn start()
+ * \brief Stop the IPA
+ *
+ * This function sets the IPA to a stopped state.
+ */
stop();
/**
* \fn configure()
- * \brief Configure the IPA stream and sensor settings
- * \param[in] sensorInfo Camera sensor information
- * \param[in] streamConfig Configuration of all active streams
- * \param[in] entityControls Controls provided by the pipeline entities
- * \param[in] ipaConfig Pipeline-handler-specific configuration data
- * \param[out] controls Controls to apply by the pipeline entity
- * \param[out] result Other results that the pipeline handler may require
- *
- * This function shall be called when the camera is configured to inform
- * the IPA of the camera's streams and the sensor settings.
- *
- * The \a sensorInfo conveys information about the camera sensor settings that
- * the pipeline handler has selected for the configuration.
- *
- * The \a ipaConfig and \a controls parameters carry data passed by the
- * pipeline handler to the IPA and back.
+ * \brief Configure the IPA
+ * \param[in] sensorInfo Sensor mode configuration
+ * \param[in] params Platform configuration parameters
+ * \param[out] ret 0 on success or a negative error code otherwise
+ * \param[out] result Results of the configuration operation
+ *
+ * This function configures the IPA for a particular camera
+ * configuration
+ *
+ * The \a params parameter provides a list of available controls for the
+ * ISP, sensor and lens devices, and the user requested transform
+ * operation. It can also provide platform specific configuration
+ * parameters, e.g. the lens shading table memory handle for VC4.
+ *
+ * The \a result parameter returns the available controls for the given
+ * camera mode, a list of controls to apply to the sensor device, and
+ * the requested mode's sensitivity characteristics.
*/
- configure(libcamera.IPACameraSensorInfo sensorInfo,
- map<uint32, libcamera.IPAStream> streamConfig,
- map<uint32, libcamera.ControlInfoMap> entityControls,
- IPAConfig ipaConfig)
- => (int32 ret, libcamera.ControlList controls, IPAConfigResult result);
+ configure(libcamera.IPACameraSensorInfo sensorInfo, ConfigParams params)
+ => (int32 ret, ConfigResult result);
/**
* \fn mapBuffers()
@@ -106,7 +163,7 @@ interface IPARPiInterface {
* depending on the IPA protocol. Regardless of the protocol, all
* buffers mapped at a given time shall have unique numerical IDs.
*
- * The numerical IDs have no meaning defined by the IPA interface, and
+ * The numerical IDs have no meaning defined by the IPA interface, and
* should be treated as opaque handles by IPAs, with the only exception
* that ID zero is invalid.
*
@@ -126,15 +183,119 @@ interface IPARPiInterface {
*/
unmapBuffers(array<uint32> ids);
- [async] signalStatReady(uint32 bufferId);
- [async] signalQueueRequest(libcamera.ControlList controls);
- [async] signalIspPrepare(ISPConfig data);
+ /**
+ * \fn prepareIsp()
+ * \brief Prepare the ISP configuration for a frame
+ * \param[in] params Parameter set for the frame to process
+ *
+ * This function call into all the algorithms in preparation for the
+ * frame to be processed by the ISP.
+ *
+ * The \a params parameter lists the buffer IDs for the Bayer and
+ * embedded data buffers, a ControlList of sensor frame params, and
+ * a ControlList of request controls for the current frame.
+ *
+ * Additionally, \a params also contains the IPA context (ipaContext) to
+ * use as an index location to store control algorithm results, and a
+ * historical IPA context (delayContext) that was active when the sensor
+ * settings were requested by the IPA.
+ */
+ [async] prepareIsp(PrepareParams params);
+
+ /**
+ * \fn processStats()
+ * \brief Process the statistics provided by the ISP
+ * \param[in] params Parameter set for the statistics to process
+ *
+ * This function call into all the algorithms to provide the statistics
+ * generated by the ISP for the processed frame.
+ *
+ * The \a params parameter lists the buffer ID for the statistics buffer
+ * and an IPA context (ipaContext) to use as an index location to store
+ * algorithm results.
+ */
+ [async] processStats(ProcessParams params);
};
interface IPARPiEventInterface {
- statsMetadataComplete(uint32 bufferId, libcamera.ControlList controls);
- runIsp(uint32 bufferId);
- embeddedComplete(uint32 bufferId);
+ /**
+ * \fn prepareIspComplete()
+ * \brief Signal completion of \a prepareIsp
+ * \param[in] buffers Bayer and embedded buffers actioned.
+ * \param[in] stitchSwapBuffers Whether the stitch block buffers need to be swapped.
+ *
+ * This asynchronous event is signalled to the pipeline handler once
+ * the \a prepareIsp signal has completed, and the ISP is ready to start
+ * processing the frame. The embedded data buffer may be recycled after
+ * this event.
+ */
+ prepareIspComplete(BufferIds buffers, bool stitchSwapBuffers);
+
+ /**
+ * \fn processStatsComplete()
+ * \brief Signal completion of \a processStats
+ * \param[in] buffers Statistics buffers actioned.
+ *
+ * This asynchronous event is signalled to the pipeline handler once
+ * the \a processStats signal has completed. The statistics buffer may
+ * be recycled after this event.
+ */
+ processStatsComplete(BufferIds buffers);
+
+ /**
+ * \fn metadataReady()
+ * \brief Signal request metadata is to be merged
+ * \param[in] metadata Control list of metadata to be merged
+ *
+ * This asynchronous event is signalled to the pipeline handler once
+ * all the frame metadata has been gathered. The pipeline handler will
+ * copy or merge this metadata into the \a Request returned back to the
+ * application.
+ */
+ metadataReady(libcamera.ControlList metadata);
+
+ /**
+ * \fn setIspControls()
+ * \brief Signal ISP controls to be applied.
+ * \param[in] controls List of controls to be applied.
+ *
+ * This asynchronous event is signalled to the pipeline handler during
+ * the \a prepareISP signal after all algorithms have been run and the
+ * IPA requires ISP controls to be applied for the frame.
+ */
setIspControls(libcamera.ControlList controls);
- setDelayedControls(libcamera.ControlList controls);
+
+ /**
+ * \fn setDelayedControls()
+ * \brief Signal Sensor controls to be applied.
+ * \param[in] controls List of controls to be applied.
+ * \param[in] delayContext IPA context index used for this request
+ *
+ * This asynchronous event is signalled to the pipeline handler when
+ * the IPA requires sensor specific controls (e.g. shutter speed, gain,
+ * blanking) to be applied.
+ */
+ setDelayedControls(libcamera.ControlList controls, uint32 delayContext);
+
+ /**
+ * \fn setLensControls()
+ * \brief Signal lens controls to be applied.
+ * \param[in] controls List of controls to be applied.
+ *
+ * This asynchronous event is signalled to the pipeline handler when
+ * the IPA requires a lens movement control to be applied.
+ */
+ setLensControls(libcamera.ControlList controls);
+
+ /**
+ * \fn setCameraTimeout()
+ * \brief Request a watchdog timeout value to use
+ * \param[in] maxFrameLengthMs Timeout value in ms
+ *
+ * This asynchronous event is used by the IPA to inform the pipeline
+ * handler of an acceptable watchdog timer value to use for the sensor
+ * stream. This value is based on the history of frame lengths requested
+ * by the IPA.
+ */
+ setCameraTimeout(uint32 maxFrameLengthMs);
};
diff --git a/include/libcamera/ipa/rkisp1.mojom b/include/libcamera/ipa/rkisp1.mojom
index e3537385..1009e970 100644
--- a/include/libcamera/ipa/rkisp1.mojom
+++ b/include/libcamera/ipa/rkisp1.mojom
@@ -8,17 +8,23 @@ module ipa.rkisp1;
import "include/libcamera/ipa/core.mojom";
+struct IPAConfigInfo {
+ libcamera.IPACameraSensorInfo sensorInfo;
+ libcamera.ControlInfoMap sensorControls;
+};
+
interface IPARkISP1Interface {
init(libcamera.IPASettings settings,
- uint32 hwRevision)
- => (int32 ret);
+ uint32 hwRevision,
+ libcamera.IPACameraSensorInfo sensorInfo,
+ libcamera.ControlInfoMap sensorControls)
+ => (int32 ret, libcamera.ControlInfoMap ipaControls);
start() => (int32 ret);
stop();
- configure(libcamera.IPACameraSensorInfo sensorInfo,
- map<uint32, libcamera.IPAStream> streamConfig,
- map<uint32, libcamera.ControlInfoMap> entityControls)
- => (int32 ret);
+ configure(IPAConfigInfo configInfo,
+ map<uint32, libcamera.IPAStream> streamConfig)
+ => (int32 ret, libcamera.ControlInfoMap ipaControls);
mapBuffers(array<libcamera.IPABuffer> buffers);
unmapBuffers(array<uint32> ids);
diff --git a/include/libcamera/ipa/soft.mojom b/include/libcamera/ipa/soft.mojom
new file mode 100644
index 00000000..3aa2066e
--- /dev/null
+++ b/include/libcamera/ipa/soft.mojom
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+/*
+ * \todo Document the interface and remove the related EXCLUDE_PATTERNS entry.
+ */
+
+module ipa.soft;
+
+import "include/libcamera/ipa/core.mojom";
+
+interface IPASoftInterface {
+ init(libcamera.IPASettings settings,
+ libcamera.SharedFD fdStats,
+ libcamera.SharedFD fdParams,
+ libcamera.ControlInfoMap sensorCtrlInfoMap)
+ => (int32 ret);
+ start() => (int32 ret);
+ stop();
+ configure(libcamera.ControlInfoMap sensorCtrlInfoMap)
+ => (int32 ret);
+
+ [async] processStats(libcamera.ControlList sensorControls);
+};
+
+interface IPASoftEventInterface {
+ setSensorControls(libcamera.ControlList sensorControls);
+ setIspParams();
+};
diff --git a/include/libcamera/ipa/vimc.mojom b/include/libcamera/ipa/vimc.mojom
index 718b9674..dd991f7e 100644
--- a/include/libcamera/ipa/vimc.mojom
+++ b/include/libcamera/ipa/vimc.mojom
@@ -17,8 +17,18 @@ enum IPAOperationCode {
IPAOperationStop,
};
+[scopedEnum] enum TestFlag {
+ Flag1 = 0x1,
+ Flag2 = 0x2,
+ Flag3 = 0x4,
+ Flag4 = 0x8,
+};
+
interface IPAVimcInterface {
- init(libcamera.IPASettings settings) => (int32 ret);
+ init(libcamera.IPASettings settings,
+ IPAOperationCode code,
+ [flags] TestFlag inFlags)
+ => (int32 ret, [flags] TestFlag outFlags);
configure(libcamera.IPACameraSensorInfo sensorInfo,
map<uint32, libcamera.IPAStream> streamConfig,
@@ -41,5 +51,5 @@ interface IPAVimcInterface {
};
interface IPAVimcEventInterface {
- paramsBufferReady(uint32 bufferId);
+ paramsBufferReady(uint32 bufferId, [flags] TestFlag flags);
};
diff --git a/include/libcamera/logging.h b/include/libcamera/logging.h
index cd842f67..e334d87b 100644
--- a/include/libcamera/logging.h
+++ b/include/libcamera/logging.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * logging.h - Logging infrastructure
+ * Logging infrastructure
*/
#pragma once
diff --git a/include/libcamera/meson.build b/include/libcamera/meson.build
index 408b7acf..84c6c4cb 100644
--- a/include/libcamera/meson.build
+++ b/include/libcamera/meson.build
@@ -12,6 +12,7 @@ libcamera_public_headers = files([
'framebuffer_allocator.h',
'geometry.h',
'logging.h',
+ 'orientation.h',
'pixel_format.h',
'request.h',
'stream.h',
@@ -31,20 +32,58 @@ install_headers(libcamera_public_headers,
libcamera_headers_install_dir = get_option('includedir') / libcamera_include_dir
-# control_ids.h and property_ids.h
-control_source_files = [
- 'control_ids',
- 'property_ids',
-]
+controls_map = {
+ 'controls': {
+ 'draft': 'control_ids_draft.yaml',
+ 'core': 'control_ids_core.yaml',
+ 'rpi/vc4': 'control_ids_rpi.yaml',
+ },
+
+ 'properties': {
+ 'draft': 'property_ids_draft.yaml',
+ 'core': 'property_ids_core.yaml',
+ }
+}
control_headers = []
+controls_files = []
+properties_files = []
+
+foreach mode, entry : controls_map
+ files_list = []
+ input_files = []
+ foreach vendor, header : entry
+ if vendor != 'core' and vendor != 'draft'
+ if vendor not in pipelines
+ continue
+ endif
+ endif
+
+ if header in files_list
+ continue
+ endif
+
+ files_list += header
+ input_files += files('../../src/libcamera/' + header)
+ endforeach
+
+ outfile = ''
+ if mode == 'controls'
+ outfile = 'control_ids.h'
+ controls_files += files_list
+ else
+ outfile = 'property_ids.h'
+ properties_files += files_list
+ endif
-foreach header : control_source_files
- input_files = files('../../src/libcamera/' + header +'.yaml', header + '.h.in')
+ template_file = files(outfile + '.in')
+ ranges_file = files('../../src/libcamera/control_ranges.yaml')
control_headers += custom_target(header + '_h',
input : input_files,
- output : header + '.h',
- command : [gen_controls, '-o', '@OUTPUT@', '@INPUT@'],
+ output : outfile,
+ command : [gen_controls, '-o', '@OUTPUT@',
+ '--mode', mode, '-t', template_file,
+ '-r', ranges_file, '@INPUT@'],
install : true,
install_dir : libcamera_headers_install_dir)
endforeach
diff --git a/include/libcamera/orientation.h b/include/libcamera/orientation.h
new file mode 100644
index 00000000..a3b40e63
--- /dev/null
+++ b/include/libcamera/orientation.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Ideas On Board Oy
+ *
+ * Image orientation
+ */
+
+#pragma once
+
+#include <iostream>
+
+namespace libcamera {
+
+enum class Orientation {
+ /* EXIF tag 274 starts from '1' */
+ Rotate0 = 1,
+ Rotate0Mirror,
+ Rotate180,
+ Rotate180Mirror,
+ Rotate90Mirror,
+ Rotate270,
+ Rotate270Mirror,
+ Rotate90,
+};
+
+Orientation orientationFromRotation(int angle, bool *success = nullptr);
+
+std::ostream &operator<<(std::ostream &out, const Orientation &orientation);
+
+} /* namespace libcamera */
diff --git a/include/libcamera/pixel_format.h b/include/libcamera/pixel_format.h
index d49c5f78..ea60fe72 100644
--- a/include/libcamera/pixel_format.h
+++ b/include/libcamera/pixel_format.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * pixel_format.h - libcamera Pixel Format
+ * libcamera Pixel Format
*/
#pragma once
diff --git a/include/libcamera/property_ids.h.in b/include/libcamera/property_ids.h.in
index ff019408..e6edabca 100644
--- a/include/libcamera/property_ids.h.in
+++ b/include/libcamera/property_ids.h.in
@@ -2,14 +2,16 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * property_ids.h - Property ID list
+ * Property ID list
*
* This file is auto-generated. Do not edit.
*/
#pragma once
+#include <map>
#include <stdint.h>
+#include <string>
#include <libcamera/controls.h>
@@ -23,14 +25,10 @@ ${ids}
${controls}
-namespace draft {
-
-${draft_controls}
-
-} /* namespace draft */
-
extern const ControlIdMap properties;
+${vendor_controls}
+
} /* namespace properties */
} /* namespace libcamera */
diff --git a/include/libcamera/request.h b/include/libcamera/request.h
index dffde153..2c78d9bb 100644
--- a/include/libcamera/request.h
+++ b/include/libcamera/request.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * request.h - Capture request handling
+ * Capture request handling
*/
#pragma once
diff --git a/include/libcamera/stream.h b/include/libcamera/stream.h
index f0ae7e62..d510238a 100644
--- a/include/libcamera/stream.h
+++ b/include/libcamera/stream.h
@@ -2,13 +2,14 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * stream.h - Video stream for a Camera
+ * Video stream for a Camera
*/
#pragma once
#include <map>
#include <memory>
+#include <ostream>
#include <string>
#include <vector>
@@ -61,14 +62,14 @@ private:
StreamFormats formats_;
};
-enum StreamRole {
+enum class StreamRole {
Raw,
StillCapture,
VideoRecording,
Viewfinder,
};
-using StreamRoles = std::vector<StreamRole>;
+std::ostream &operator<<(std::ostream &out, StreamRole role);
class Stream
{
diff --git a/include/libcamera/transform.h b/include/libcamera/transform.h
index 2e76b940..a88f809e 100644
--- a/include/libcamera/transform.h
+++ b/include/libcamera/transform.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * Copyright (C) 2020, Raspberry Pi (Trading) Limited
+ * Copyright (C) 2020, Raspberry Pi Ltd
*
- * transform.h - 2D plane transforms
+ * 2D plane transforms
*/
#pragma once
@@ -11,6 +11,8 @@
namespace libcamera {
+enum class Orientation;
+
enum class Transform : int {
Identity = 0,
Rot0 = Identity,
@@ -70,6 +72,9 @@ constexpr Transform operator~(Transform t)
Transform transformFromRotation(int angle, bool *success = nullptr);
+Transform operator/(const Orientation &o1, const Orientation &o2);
+Orientation operator*(const Orientation &o, const Transform &t);
+
const char *transformToString(Transform t);
} /* namespace libcamera */
diff --git a/include/libcamera/version.h.in b/include/libcamera/version.h.in
index 6e24d0a8..50bf1001 100644
--- a/include/libcamera/version.h.in
+++ b/include/libcamera/version.h.in
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
- * version.h - Library version information
+ * Library version information
*
* This file is auto-generated. Do not edit.
*/
diff --git a/include/linux/README b/include/linux/README
index 4e314b98..101e4997 100644
--- a/include/linux/README
+++ b/include/linux/README
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: CC0-1.0
-Files in this directory are imported from v5.16-rc7 of the Linux kernel. Do not
+Files in this directory are imported from v6.7 of the Linux kernel. Do not
modify them manually.
diff --git a/include/linux/bcm2835-isp.h b/include/linux/bcm2835-isp.h
index 94c3af94..5f0f78e3 100644
--- a/include/linux/bcm2835-isp.h
+++ b/include/linux/bcm2835-isp.h
@@ -4,7 +4,7 @@
*
* BCM2835 ISP driver - user space header file.
*
- * Copyright © 2019-2020 Raspberry Pi (Trading) Ltd.
+ * Copyright © 2019-2020 Raspberry Pi Ltd
*
* Author: Naushir Patuck (naush@raspberrypi.com)
*
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 8e4a2ca0..5a6fda66 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -85,6 +85,88 @@ struct dma_buf_sync {
#define DMA_BUF_NAME_LEN 32
+/**
+ * struct dma_buf_export_sync_file - Get a sync_file from a dma-buf
+ *
+ * Userspace can perform a DMA_BUF_IOCTL_EXPORT_SYNC_FILE to retrieve the
+ * current set of fences on a dma-buf file descriptor as a sync_file. CPU
+ * waits via poll() or other driver-specific mechanisms typically wait on
+ * whatever fences are on the dma-buf at the time the wait begins. This
+ * is similar except that it takes a snapshot of the current fences on the
+ * dma-buf for waiting later instead of waiting immediately. This is
+ * useful for modern graphics APIs such as Vulkan which assume an explicit
+ * synchronization model but still need to inter-operate with dma-buf.
+ *
+ * The intended usage pattern is the following:
+ *
+ * 1. Export a sync_file with flags corresponding to the expected GPU usage
+ * via DMA_BUF_IOCTL_EXPORT_SYNC_FILE.
+ *
+ * 2. Submit rendering work which uses the dma-buf. The work should wait on
+ * the exported sync file before rendering and produce another sync_file
+ * when complete.
+ *
+ * 3. Import the rendering-complete sync_file into the dma-buf with flags
+ * corresponding to the GPU usage via DMA_BUF_IOCTL_IMPORT_SYNC_FILE.
+ *
+ * Unlike doing implicit synchronization via a GPU kernel driver's exec ioctl,
+ * the above is not a single atomic operation. If userspace wants to ensure
+ * ordering via these fences, it is the respnosibility of userspace to use
+ * locks or other mechanisms to ensure that no other context adds fences or
+ * submits work between steps 1 and 3 above.
+ */
+struct dma_buf_export_sync_file {
+ /**
+ * @flags: Read/write flags
+ *
+ * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both.
+ *
+ * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set,
+ * the returned sync file waits on any writers of the dma-buf to
+ * complete. Waiting on the returned sync file is equivalent to
+ * poll() with POLLIN.
+ *
+ * If DMA_BUF_SYNC_WRITE is set, the returned sync file waits on
+ * any users of the dma-buf (read or write) to complete. Waiting
+ * on the returned sync file is equivalent to poll() with POLLOUT.
+ * If both DMA_BUF_SYNC_WRITE and DMA_BUF_SYNC_READ are set, this
+ * is equivalent to just DMA_BUF_SYNC_WRITE.
+ */
+ __u32 flags;
+ /** @fd: Returned sync file descriptor */
+ __s32 fd;
+};
+
+/**
+ * struct dma_buf_import_sync_file - Insert a sync_file into a dma-buf
+ *
+ * Userspace can perform a DMA_BUF_IOCTL_IMPORT_SYNC_FILE to insert a
+ * sync_file into a dma-buf for the purposes of implicit synchronization
+ * with other dma-buf consumers. This allows clients using explicitly
+ * synchronized APIs such as Vulkan to inter-op with dma-buf consumers
+ * which expect implicit synchronization such as OpenGL or most media
+ * drivers/video.
+ */
+struct dma_buf_import_sync_file {
+ /**
+ * @flags: Read/write flags
+ *
+ * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both.
+ *
+ * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set,
+ * this inserts the sync_file as a read-only fence. Any subsequent
+ * implicitly synchronized writes to this dma-buf will wait on this
+ * fence but reads will not.
+ *
+ * If DMA_BUF_SYNC_WRITE is set, this inserts the sync_file as a
+ * write fence. All subsequent implicitly synchronized access to
+ * this dma-buf will wait on this fence.
+ */
+ __u32 flags;
+ /** @fd: Sync file descriptor */
+ __s32 fd;
+};
+
#define DMA_BUF_BASE 'b'
#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
@@ -92,7 +174,9 @@ struct dma_buf_sync {
* between them in actual uapi, they're just different numbers.
*/
#define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *)
-#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, u32)
-#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, u64)
+#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, __u32)
+#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, __u64)
+#define DMA_BUF_IOCTL_EXPORT_SYNC_FILE _IOWR(DMA_BUF_BASE, 2, struct dma_buf_export_sync_file)
+#define DMA_BUF_IOCTL_IMPORT_SYNC_FILE _IOW(DMA_BUF_BASE, 3, struct dma_buf_import_sync_file)
#endif
diff --git a/include/linux/drm_fourcc.h b/include/linux/drm_fourcc.h
index ea11dcb4..b4e1a092 100644
--- a/include/linux/drm_fourcc.h
+++ b/include/linux/drm_fourcc.h
@@ -88,6 +88,18 @@ extern "C" {
*
* The authoritative list of format modifier codes is found in
* `include/uapi/drm/drm_fourcc.h`
+ *
+ * Open Source User Waiver
+ * -----------------------
+ *
+ * Because this is the authoritative source for pixel formats and modifiers
+ * referenced by GL, Vulkan extensions and other standards and hence used both
+ * by open source and closed source driver stacks, the usual requirement for an
+ * upstream in-kernel or open source userspace user does not apply.
+ *
+ * To ensure, as much as feasible, compatibility across stacks and avoid
+ * confusion with incompatible enumerations stakeholders for all relevant driver
+ * stacks should approve additions.
*/
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
@@ -99,18 +111,42 @@ extern "C" {
#define DRM_FORMAT_INVALID 0
/* color index */
+#define DRM_FORMAT_C1 fourcc_code('C', '1', ' ', ' ') /* [7:0] C0:C1:C2:C3:C4:C5:C6:C7 1:1:1:1:1:1:1:1 eight pixels/byte */
+#define DRM_FORMAT_C2 fourcc_code('C', '2', ' ', ' ') /* [7:0] C0:C1:C2:C3 2:2:2:2 four pixels/byte */
+#define DRM_FORMAT_C4 fourcc_code('C', '4', ' ', ' ') /* [7:0] C0:C1 4:4 two pixels/byte */
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
-/* 8 bpp Red */
+/* 1 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D1 fourcc_code('D', '1', ' ', ' ') /* [7:0] D0:D1:D2:D3:D4:D5:D6:D7 1:1:1:1:1:1:1:1 eight pixels/byte */
+
+/* 2 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D2 fourcc_code('D', '2', ' ', ' ') /* [7:0] D0:D1:D2:D3 2:2:2:2 four pixels/byte */
+
+/* 4 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D4 fourcc_code('D', '4', ' ', ' ') /* [7:0] D0:D1 4:4 two pixels/byte */
+
+/* 8 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D8 fourcc_code('D', '8', ' ', ' ') /* [7:0] D */
+
+/* 1 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R1 fourcc_code('R', '1', ' ', ' ') /* [7:0] R0:R1:R2:R3:R4:R5:R6:R7 1:1:1:1:1:1:1:1 eight pixels/byte */
+
+/* 2 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R2 fourcc_code('R', '2', ' ', ' ') /* [7:0] R0:R1:R2:R3 2:2:2:2 four pixels/byte */
+
+/* 4 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R4 fourcc_code('R', '4', ' ', ' ') /* [7:0] R0:R1 4:4 two pixels/byte */
+
+/* 8 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
-/* 10 bpp Red */
+/* 10 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R10 fourcc_code('R', '1', '0', ' ') /* [15:0] x:R 6:10 little endian */
-/* 12 bpp Red */
+/* 12 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R12 fourcc_code('R', '1', '2', ' ') /* [15:0] x:R 4:12 little endian */
-/* 16 bpp Red */
+/* 16 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R16 fourcc_code('R', '1', '6', ' ') /* [15:0] R little endian */
/* 16 bpp RG */
@@ -174,6 +210,10 @@ extern "C" {
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+/* 48 bpp RGB */
+#define DRM_FORMAT_RGB161616 fourcc_code('R', 'G', '4', '8') /* [47:0] R:G:B 16:16:16 little endian */
+#define DRM_FORMAT_BGR161616 fourcc_code('B', 'G', '4', '8') /* [47:0] B:G:R 16:16:16 little endian */
+
/* 64 bpp RGB */
#define DRM_FORMAT_XRGB16161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_XBGR16161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */
@@ -205,7 +245,9 @@ extern "C" {
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_AVUY8888 fourcc_code('A', 'V', 'U', 'Y') /* [31:0] A:Cr:Cb:Y 8:8:8:8 little endian */
#define DRM_FORMAT_XYUV8888 fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_XVUY8888 fourcc_code('X', 'V', 'U', 'Y') /* [31:0] X:Cr:Cb:Y 8:8:8:8 little endian */
#define DRM_FORMAT_VUY888 fourcc_code('V', 'U', '2', '4') /* [23:0] Cr:Cb:Y 8:8:8 little endian */
#define DRM_FORMAT_VUY101010 fourcc_code('V', 'U', '3', '0') /* Y followed by U then V, 10:10:10. Non-linear modifier only */
@@ -285,6 +327,8 @@ extern "C" {
* index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian
*/
#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV30 fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */
/*
* 2 plane YCbCr MSB aligned
@@ -314,6 +358,13 @@ extern "C" {
*/
#define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */
+/* 2 plane YCbCr420.
+ * 3 10 bit components and 2 padding bits packed into 4 bytes.
+ * index 0 = Y plane, [31:0] x:Y2:Y1:Y0 2:10:10:10 little endian
+ * index 1 = Cr:Cb plane, [63:0] x:Cr2:Cb2:Cr1:x:Cb1:Cr0:Cb0 [2:10:10:10:2:10:10:10] little endian
+ */
+#define DRM_FORMAT_P030 fourcc_code('P', '0', '3', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel packed */
+
/* 3 plane non-subsampled (444) YCbCr
* 16 bits per component, but only 10 bits are used and 6 bits are padded
* index 0: Y plane, [15:0] Y:x [10:6] little endian
@@ -439,6 +490,7 @@ extern "C" {
#define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09
#define DRM_FORMAT_MOD_VENDOR_AMLOGIC 0x0a
#define DRM_FORMAT_MOD_VENDOR_MIPI 0x0b
+#define DRM_FORMAT_MOD_VENDOR_RPI 0x0c
/* add more to the end as needed */
@@ -617,7 +669,7 @@ extern "C" {
*
* The main surface is Y-tiled and is at plane index 0 whereas CCS is linear
* and at index 1. The clear color is stored at index 2, and the pitch should
- * be ignored. The clear color structure is 256 bits. The first 128 bits
+ * be 64 bytes aligned. The clear color structure is 256 bits. The first 128 bits
* represents Raw Clear Color Red, Green, Blue and Alpha color each represented
* by 32 bits. The raw clear color is consumed by the 3d engine and generates
* the converted clear color of size 64 bits. The first 32 bits store the Lower
@@ -631,6 +683,96 @@ extern "C" {
#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC fourcc_mod_code(INTEL, 8)
/*
+ * Intel Tile 4 layout
+ *
+ * This is a tiled layout using 4KB tiles in a row-major layout. It has the same
+ * shape as Tile Y at two granularities: 4KB (128B x 32) and 64B (16B x 4). It
+ * only differs from Tile Y at the 256B granularity in between. At this
+ * granularity, Tile Y has a shape of 16B x 32 rows, but this tiling has a shape
+ * of 64B x 8 rows.
+ */
+#define I915_FORMAT_MOD_4_TILED fourcc_mod_code(INTEL, 9)
+
+/*
+ * Intel color control surfaces (CCS) for DG2 render compression.
+ *
+ * The main surface is Tile 4 and at plane index 0. The CCS data is stored
+ * outside of the GEM object in a reserved memory area dedicated for the
+ * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The
+ * main surface pitch is required to be a multiple of four Tile 4 widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS fourcc_mod_code(INTEL, 10)
+
+/*
+ * Intel color control surfaces (CCS) for DG2 media compression.
+ *
+ * The main surface is Tile 4 and at plane index 0. For semi-planar formats
+ * like NV12, the Y and UV planes are Tile 4 and are located at plane indices
+ * 0 and 1, respectively. The CCS for all planes are stored outside of the
+ * GEM object in a reserved memory area dedicated for the storage of the
+ * CCS data for all RC/RC_CC/MC compressible GEM objects. The main surface
+ * pitch is required to be a multiple of four Tile 4 widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_DG2_MC_CCS fourcc_mod_code(INTEL, 11)
+
+/*
+ * Intel Color Control Surface with Clear Color (CCS) for DG2 render compression.
+ *
+ * The main surface is Tile 4 and at plane index 0. The CCS data is stored
+ * outside of the GEM object in a reserved memory area dedicated for the
+ * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The
+ * main surface pitch is required to be a multiple of four Tile 4 widths. The
+ * clear color is stored at plane index 1 and the pitch should be 64 bytes
+ * aligned. The format of the 256 bits of clear color data matches the one used
+ * for the I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC modifier, see its description
+ * for details.
+ */
+#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12)
+
+/*
+ * Intel Color Control Surfaces (CCS) for display ver. 14 render compression.
+ *
+ * The main surface is tile4 and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * tile4 widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS fourcc_mod_code(INTEL, 13)
+
+/*
+ * Intel Color Control Surfaces (CCS) for display ver. 14 media compression
+ *
+ * The main surface is tile4 and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * tile4 widths. For semi-planar formats like NV12, CCS planes follow the
+ * Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces,
+ * planes 2 and 3 for the respective CCS.
+ */
+#define I915_FORMAT_MOD_4_TILED_MTL_MC_CCS fourcc_mod_code(INTEL, 14)
+
+/*
+ * Intel Color Control Surface with Clear Color (CCS) for display ver. 14 render
+ * compression.
+ *
+ * The main surface is tile4 and is at plane index 0 whereas CCS is linear
+ * and at index 1. The clear color is stored at index 2, and the pitch should
+ * be ignored. The clear color structure is 256 bits. The first 128 bits
+ * represents Raw Clear Color Red, Green, Blue and Alpha color each represented
+ * by 32 bits. The raw clear color is consumed by the 3d engine and generates
+ * the converted clear color of size 64 bits. The first 32 bits store the Lower
+ * Converted Clear Color value and the next 32 bits store the Higher Converted
+ * Clear Color value when applicable. The Converted Clear Color values are
+ * consumed by the DE. The last 64 bits are used to store Color Discard Enable
+ * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
+ * corresponds to an area of 4x1 tiles in the main surface. The main surface
+ * pitch is required to be a multiple of 4 tile widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15)
+
+/*
* IPU3 Bayer packing layout
*
* The IPU3 raw Bayer formats use a custom packing layout where there are no
@@ -638,7 +780,7 @@ extern "C" {
* the 6 most significant bits in the last byte unused. The format is little
* endian.
*/
-#define IPU3_FORMAT_MOD_PACKED fourcc_mod_code(INTEL, 9)
+#define IPU3_FORMAT_MOD_PACKED fourcc_mod_code(INTEL, 13)
/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
@@ -677,6 +819,28 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_QCOM_COMPRESSED fourcc_mod_code(QCOM, 1)
+/*
+ * Qualcomm Tiled Format
+ *
+ * Similar to DRM_FORMAT_MOD_QCOM_COMPRESSED but not compressed.
+ * Implementation may be platform and base-format specific.
+ *
+ * Each macrotile consists of m x n (mostly 4 x 4) tiles.
+ * Pixel data pitch/stride is aligned with macrotile width.
+ * Pixel data height is aligned with macrotile height.
+ * Entire pixel data buffer is aligned with 4k(bytes).
+ */
+#define DRM_FORMAT_MOD_QCOM_TILED3 fourcc_mod_code(QCOM, 3)
+
+/*
+ * Qualcomm Alternate Tiled Format
+ *
+ * Alternate tiled format typically only used within GMEM.
+ * Implementation may be platform and base-format specific.
+ */
+#define DRM_FORMAT_MOD_QCOM_TILED2 fourcc_mod_code(QCOM, 2)
+
+
/* Vivante framebuffer modifiers */
/*
@@ -717,6 +881,35 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4)
+/*
+ * Vivante TS (tile-status) buffer modifiers. They can be combined with all of
+ * the color buffer tiling modifiers defined above. When TS is present it's a
+ * separate buffer containing the clear/compression status of each tile. The
+ * modifiers are defined as VIVANTE_MOD_TS_c_s, where c is the color buffer
+ * tile size in bytes covered by one entry in the status buffer and s is the
+ * number of status bits per entry.
+ * We reserve the top 8 bits of the Vivante modifier space for tile status
+ * clear/compression modifiers, as future cores might add some more TS layout
+ * variations.
+ */
+#define VIVANTE_MOD_TS_64_4 (1ULL << 48)
+#define VIVANTE_MOD_TS_64_2 (2ULL << 48)
+#define VIVANTE_MOD_TS_128_4 (3ULL << 48)
+#define VIVANTE_MOD_TS_256_4 (4ULL << 48)
+#define VIVANTE_MOD_TS_MASK (0xfULL << 48)
+
+/*
+ * Vivante compression modifiers. Those depend on a TS modifier being present
+ * as the TS bits get reinterpreted as compression tags instead of simple
+ * clear markers when compression is enabled.
+ */
+#define VIVANTE_MOD_COMP_DEC400 (1ULL << 52)
+#define VIVANTE_MOD_COMP_MASK (0xfULL << 52)
+
+/* Masking out the extension bits will yield the base modifier. */
+#define VIVANTE_MOD_EXT_MASK (VIVANTE_MOD_TS_MASK | \
+ VIVANTE_MOD_COMP_MASK)
+
/* NVIDIA frame buffer modifiers */
/*
@@ -929,6 +1122,10 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
* and UV. Some SAND-using hardware stores UV in a separate tiled
* image from Y to reduce the column height, which is not supported
* with these modifiers.
+ *
+ * The DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT modifier is also
+ * supported for DRM_FORMAT_P030 where the columns remain as 128 bytes
+ * wide, but as this is a 10 bpp format that translates to 96 pixels.
*/
#define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \
@@ -1358,6 +1555,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
#define AMD_FMT_MOD_TILE_VER_GFX9 1
#define AMD_FMT_MOD_TILE_VER_GFX10 2
#define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3
+#define AMD_FMT_MOD_TILE_VER_GFX11 4
/*
* 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical
@@ -1373,6 +1571,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
#define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25
#define AMD_FMT_MOD_TILE_GFX9_64K_D_X 26
#define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27
+#define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31
#define AMD_FMT_MOD_DCC_BLOCK_64B 0
#define AMD_FMT_MOD_DCC_BLOCK_128B 1
@@ -1439,11 +1638,11 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
#define AMD_FMT_MOD_PIPE_MASK 0x7
#define AMD_FMT_MOD_SET(field, value) \
- ((uint64_t)(value) << AMD_FMT_MOD_##field##_SHIFT)
+ ((__u64)(value) << AMD_FMT_MOD_##field##_SHIFT)
#define AMD_FMT_MOD_GET(field, value) \
(((value) >> AMD_FMT_MOD_##field##_SHIFT) & AMD_FMT_MOD_##field##_MASK)
#define AMD_FMT_MOD_CLEAR(field) \
- (~((uint64_t)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT))
+ (~((__u64)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT))
/* Mobile Industry Processor Interface (MIPI) modifiers */
@@ -1472,6 +1671,9 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
*/
#define MIPI_FORMAT_MOD_CSI2_PACKED fourcc_mod_code(MIPI, 1)
+#define PISP_FORMAT_MOD_COMPRESS_MODE1 fourcc_mod_code(RPI, 1)
+#define PISP_FORMAT_MOD_COMPRESS_MODE2 fourcc_mod_code(RPI, 2)
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/linux/intel-ipu3.h b/include/linux/intel-ipu3.h
index f30dce43..bd771f1b 100644
--- a/include/linux/intel-ipu3.h
+++ b/include/linux/intel-ipu3.h
@@ -34,11 +34,17 @@
* struct ipu3_uapi_grid_config - Grid plane config
*
* @width: Grid horizontal dimensions, in number of grid blocks(cells).
+ * For AWB, the range is (16, 80).
+ * For AF/AE, the range is (16, 32).
* @height: Grid vertical dimensions, in number of grid cells.
+ * For AWB, the range is (16, 60).
+ * For AF/AE, the range is (16, 24).
* @block_width_log2: Log2 of the width of each cell in pixels.
- * for (2^3, 2^4, 2^5, 2^6, 2^7), values [3, 7].
+ * For AWB, the range is [3, 6].
+ * For AF/AE, the range is [3, 7].
* @block_height_log2: Log2 of the height of each cell in pixels.
- * for (2^3, 2^4, 2^5, 2^6, 2^7), values [3, 7].
+ * For AWB, the range is [3, 6].
+ * For AF/AE, the range is [3, 7].
* @height_per_slice: The number of blocks in vertical axis per slice.
* Default 2.
* @x_start: X value of top left corner of Region of Interest(ROI).
@@ -68,21 +74,21 @@ struct ipu3_uapi_grid_config {
* @R_avg: Red average in the cell.
* @B_avg: Blue average in the cell.
* @Gb_avg: Green average for blue lines in the cell.
- * @sat_ratio: Percentage of pixels over a given threshold set in
+ * @sat_ratio: Percentage of pixels over the thresholds specified in
* ipu3_uapi_awb_config_s, coded from 0 to 255.
- * @padding0: Unused byte for padding.
- * @padding1: Unused byte for padding.
- * @padding2: Unused byte for padding.
+ * @padding0: Unused byte for padding.
+ * @padding1: Unused byte for padding.
+ * @padding2: Unused byte for padding.
*/
struct ipu3_uapi_awb_set_item {
- unsigned char Gr_avg;
- unsigned char R_avg;
- unsigned char B_avg;
- unsigned char Gb_avg;
- unsigned char sat_ratio;
- unsigned char padding0;
- unsigned char padding1;
- unsigned char padding2;
+ __u8 Gr_avg;
+ __u8 R_avg;
+ __u8 B_avg;
+ __u8 Gb_avg;
+ __u8 sat_ratio;
+ __u8 padding0;
+ __u8 padding1;
+ __u8 padding2;
} __attribute__((packed));
/*
@@ -98,7 +104,6 @@ struct ipu3_uapi_awb_set_item {
(IPU3_UAPI_AWB_MAX_SETS * \
(IPU3_UAPI_AWB_SET_SIZE + IPU3_UAPI_AWB_SPARE_FOR_BUBBLES))
-
/**
* struct ipu3_uapi_awb_raw_buffer - AWB raw buffer
*
@@ -621,8 +626,11 @@ struct ipu3_uapi_stats_3a {
* @b: white balance gain for B channel.
* @gb: white balance gain for Gb channel.
*
- * Precision u3.13, range [0, 8). White balance correction is done by applying
- * a multiplicative gain to each color channels prior to BNR.
+ * For BNR parameters WB gain factor for the three channels [Ggr, Ggb, Gb, Gr].
+ * Their precision is U3.13 and the range is (0, 8) and the actual gain is
+ * Gx + 1, it is typically Gx = 1.
+ *
+ * Pout = {Pin * (1 + Gx)}.
*/
struct ipu3_uapi_bnr_static_config_wb_gains_config {
__u16 gr;
diff --git a/include/linux/media-bus-format.h b/include/linux/media-bus-format.h
index 0dfc11ee..f05f747e 100644
--- a/include/linux/media-bus-format.h
+++ b/include/linux/media-bus-format.h
@@ -34,7 +34,7 @@
#define MEDIA_BUS_FMT_FIXED 0x0001
-/* RGB - next is 0x101e */
+/* RGB - next is 0x1026 */
#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
@@ -46,8 +46,12 @@
#define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007
#define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008
#define MEDIA_BUS_FMT_RGB666_1X18 0x1009
+#define MEDIA_BUS_FMT_RGB666_2X9_BE 0x1025
+#define MEDIA_BUS_FMT_BGR666_1X18 0x1023
#define MEDIA_BUS_FMT_RBG888_1X24 0x100e
#define MEDIA_BUS_FMT_RGB666_1X24_CPADHI 0x1015
+#define MEDIA_BUS_FMT_BGR666_1X24_CPADHI 0x1024
+#define MEDIA_BUS_FMT_RGB565_1X24_CPADHI 0x1022
#define MEDIA_BUS_FMT_RGB666_1X7X3_SPWG 0x1010
#define MEDIA_BUS_FMT_BGR888_1X24 0x1013
#define MEDIA_BUS_FMT_BGR888_3X8 0x101b
@@ -59,13 +63,17 @@
#define MEDIA_BUS_FMT_RGB888_3X8_DELTA 0x101d
#define MEDIA_BUS_FMT_RGB888_1X7X4_SPWG 0x1011
#define MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA 0x1012
+#define MEDIA_BUS_FMT_RGB666_1X30_CPADLO 0x101e
+#define MEDIA_BUS_FMT_RGB888_1X30_CPADLO 0x101f
#define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d
#define MEDIA_BUS_FMT_RGB888_1X32_PADHI 0x100f
#define MEDIA_BUS_FMT_RGB101010_1X30 0x1018
+#define MEDIA_BUS_FMT_RGB666_1X36_CPADLO 0x1020
+#define MEDIA_BUS_FMT_RGB888_1X36_CPADLO 0x1021
#define MEDIA_BUS_FMT_RGB121212_1X36 0x1019
#define MEDIA_BUS_FMT_RGB161616_1X48 0x101a
-/* YUV (including grey) - next is 0x202e */
+/* YUV (including grey) - next is 0x202f */
#define MEDIA_BUS_FMT_Y8_1X8 0x2001
#define MEDIA_BUS_FMT_UV8_1X8 0x2015
#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
@@ -88,6 +96,7 @@
#define MEDIA_BUS_FMT_YUYV12_2X12 0x201e
#define MEDIA_BUS_FMT_YVYU12_2X12 0x201f
#define MEDIA_BUS_FMT_Y14_1X14 0x202d
+#define MEDIA_BUS_FMT_Y16_1X16 0x202e
#define MEDIA_BUS_FMT_UYVY8_1X16 0x200f
#define MEDIA_BUS_FMT_VYUY8_1X16 0x2010
#define MEDIA_BUS_FMT_YUYV8_1X16 0x2011
diff --git a/include/linux/media.h b/include/linux/media.h
index e3123d1a..b5a77bbf 100644
--- a/include/linux/media.h
+++ b/include/linux/media.h
@@ -20,7 +20,6 @@
#ifndef __LINUX_MEDIA_H
#define __LINUX_MEDIA_H
-#include <stdint.h>
#include <linux/ioctl.h>
#include <linux/types.h>
@@ -141,8 +140,8 @@ struct media_device_info {
#define MEDIA_ENT_F_DV_ENCODER (MEDIA_ENT_F_BASE + 0x6002)
/* Entity flags */
-#define MEDIA_ENT_FL_DEFAULT (1 << 0)
-#define MEDIA_ENT_FL_CONNECTOR (1 << 1)
+#define MEDIA_ENT_FL_DEFAULT (1U << 0)
+#define MEDIA_ENT_FL_CONNECTOR (1U << 1)
/* OR with the entity id value to find the next entity */
#define MEDIA_ENT_ID_FLAG_NEXT (1U << 31)
@@ -204,9 +203,9 @@ struct media_entity_desc {
};
};
-#define MEDIA_PAD_FL_SINK (1 << 0)
-#define MEDIA_PAD_FL_SOURCE (1 << 1)
-#define MEDIA_PAD_FL_MUST_CONNECT (1 << 2)
+#define MEDIA_PAD_FL_SINK (1U << 0)
+#define MEDIA_PAD_FL_SOURCE (1U << 1)
+#define MEDIA_PAD_FL_MUST_CONNECT (1U << 2)
struct media_pad_desc {
__u32 entity; /* entity ID */
@@ -215,14 +214,14 @@ struct media_pad_desc {
__u32 reserved[2];
};
-#define MEDIA_LNK_FL_ENABLED (1 << 0)
-#define MEDIA_LNK_FL_IMMUTABLE (1 << 1)
-#define MEDIA_LNK_FL_DYNAMIC (1 << 2)
+#define MEDIA_LNK_FL_ENABLED (1U << 0)
+#define MEDIA_LNK_FL_IMMUTABLE (1U << 1)
+#define MEDIA_LNK_FL_DYNAMIC (1U << 2)
#define MEDIA_LNK_FL_LINK_TYPE (0xf << 28)
-# define MEDIA_LNK_FL_DATA_LINK (0 << 28)
-# define MEDIA_LNK_FL_INTERFACE_LINK (1 << 28)
-# define MEDIA_LNK_FL_ANCILLARY_LINK (2 << 28)
+# define MEDIA_LNK_FL_DATA_LINK (0U << 28)
+# define MEDIA_LNK_FL_INTERFACE_LINK (1U << 28)
+# define MEDIA_LNK_FL_ANCILLARY_LINK (2U << 28)
struct media_link_desc {
struct media_pad_desc source;
@@ -277,7 +276,7 @@ struct media_links_enum {
* struct media_device_info.
*/
#define MEDIA_V2_ENTITY_HAS_FLAGS(media_version) \
- ((media_version) >= ((4 << 16) | (19 << 8) | 0))
+ ((media_version) >= ((4U << 16) | (19U << 8) | 0U))
struct media_v2_entity {
__u32 id;
@@ -312,7 +311,7 @@ struct media_v2_interface {
* struct media_device_info.
*/
#define MEDIA_V2_PAD_HAS_INDEX(media_version) \
- ((media_version) >= ((4 << 16) | (19 << 8) | 0))
+ ((media_version) >= ((4U << 16) | (19U << 8) | 0U))
struct media_v2_pad {
__u32 id;
@@ -415,7 +414,7 @@ struct media_v2_topology {
#define MEDIA_INTF_T_ALSA_TIMER (MEDIA_INTF_T_ALSA_BASE + 7)
/* Obsolete symbol for media_version, no longer used in the kernel */
-#define MEDIA_API_VERSION ((0 << 16) | (1 << 8) | 0)
+#define MEDIA_API_VERSION ((0U << 16) | (1U << 8) | 0U)
#endif /* __LINUX_MEDIA_H */
diff --git a/include/linux/rkisp1-config.h b/include/linux/rkisp1-config.h
index 012293e3..2d1c448a 100644
--- a/include/linux/rkisp1-config.h
+++ b/include/linux/rkisp1-config.h
@@ -4,8 +4,8 @@
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
-#ifndef _RKISP1_CONFIG_H
-#define _RKISP1_CONFIG_H
+#ifndef _UAPI_RKISP1_CONFIG_H
+#define _UAPI_RKISP1_CONFIG_H
#include <linux/types.h>
@@ -117,7 +117,46 @@
/*
* Defect Pixel Cluster Correction
*/
-#define RKISP1_CIF_ISP_DPCC_METHODS_MAX 3
+#define RKISP1_CIF_ISP_DPCC_METHODS_MAX 3
+
+#define RKISP1_CIF_ISP_DPCC_MODE_STAGE1_ENABLE (1U << 2)
+
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_G_CENTER (1U << 0)
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_RB_CENTER (1U << 1)
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_G_3X3 (1U << 2)
+#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_RB_3X3 (1U << 3)
+
+/* 0-2 for sets 1-3 */
+#define RKISP1_CIF_ISP_DPCC_SET_USE_STAGE1_USE_SET(n) ((n) << 0)
+#define RKISP1_CIF_ISP_DPCC_SET_USE_STAGE1_USE_FIX_SET (1U << 3)
+
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_GREEN_ENABLE (1U << 0)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_GREEN_ENABLE (1U << 1)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_GREEN_ENABLE (1U << 2)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_GREEN_ENABLE (1U << 3)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_GREEN_ENABLE (1U << 4)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_RED_BLUE_ENABLE (1U << 8)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_RED_BLUE_ENABLE (1U << 9)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_RED_BLUE_ENABLE (1U << 10)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_RED_BLUE_ENABLE (1U << 11)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_RED_BLUE_ENABLE (1U << 12)
+
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_RB(v) ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_RB(v) ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_PG_FAC_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_PG_FAC_RB(v) ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_RB(v) ((v) << 8)
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_G(v) ((v) << 0)
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_RB(v) ((v) << 8)
+
+#define RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_G(n, v) ((v) << ((n) * 4))
+#define RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_RB(n, v) ((v) << ((n) * 4 + 2))
+
+#define RKISP1_CIF_ISP_DPCC_RND_OFFS_n_G(n, v) ((v) << ((n) * 4))
+#define RKISP1_CIF_ISP_DPCC_RND_OFFS_n_RB(n, v) ((v) << ((n) * 4 + 2))
/*
* Denoising pre filter
@@ -140,12 +179,14 @@
* @RKISP1_V11: declared in the original vendor code, but not used
* @RKISP1_V12: used at least in rk3326 and px30
* @RKISP1_V13: used at least in rk1808
+ * @RKISP1_V_IMX8MP: used in at least imx8mp
*/
enum rkisp1_cif_isp_version {
RKISP1_V10 = 10,
RKISP1_V11,
RKISP1_V12,
RKISP1_V13,
+ RKISP1_V_IMX8MP,
};
enum rkisp1_cif_isp_histogram_mode {
@@ -249,16 +290,20 @@ struct rkisp1_cif_isp_bls_config {
};
/**
- * struct rkisp1_cif_isp_dpcc_methods_config - Methods Configuration used by DPCC
+ * struct rkisp1_cif_isp_dpcc_methods_config - DPCC methods set configuration
*
- * Methods Configuration used by Defect Pixel Cluster Correction
+ * This structure stores the configuration of one set of methods for the DPCC
+ * algorithm. Multiple methods can be selected in each set (independently for
+ * the Green and Red/Blue components) through the @method field, the result is
+ * the logical AND of all enabled methods. The remaining fields set thresholds
+ * and factors for each method.
*
- * @method: Method enable bits
- * @line_thresh: Line threshold
- * @line_mad_fac: Line MAD factor
- * @pg_fac: Peak gradient factor
- * @rnd_thresh: Rank Neighbor Difference threshold
- * @rg_fac: Rank gradient factor
+ * @method: Method enable bits (RKISP1_CIF_ISP_DPCC_METHODS_SET_*)
+ * @line_thresh: Line threshold (RKISP1_CIF_ISP_DPCC_LINE_THRESH_*)
+ * @line_mad_fac: Line Mean Absolute Difference factor (RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_*)
+ * @pg_fac: Peak gradient factor (RKISP1_CIF_ISP_DPCC_PG_FAC_*)
+ * @rnd_thresh: Rank Neighbor Difference threshold (RKISP1_CIF_ISP_DPCC_RND_THRESH_*)
+ * @rg_fac: Rank gradient factor (RKISP1_CIF_ISP_DPCC_RG_FAC_*)
*/
struct rkisp1_cif_isp_dpcc_methods_config {
__u32 method;
@@ -272,14 +317,16 @@ struct rkisp1_cif_isp_dpcc_methods_config {
/**
* struct rkisp1_cif_isp_dpcc_config - Configuration used by DPCC
*
- * Configuration used by Defect Pixel Cluster Correction
+ * Configuration used by Defect Pixel Cluster Correction. Three sets of methods
+ * can be configured and selected through the @set_use field. The result is the
+ * logical OR of all enabled sets.
*
- * @mode: dpcc output mode
- * @output_mode: whether use hard coded methods
- * @set_use: stage1 methods set
- * @methods: methods config
- * @ro_limits: rank order limits
- * @rnd_offs: differential rank offsets for rank neighbor difference
+ * @mode: DPCC mode (RKISP1_CIF_ISP_DPCC_MODE_*)
+ * @output_mode: Interpolation output mode (RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_*)
+ * @set_use: Methods sets selection (RKISP1_CIF_ISP_DPCC_SET_USE_*)
+ * @methods: Methods sets configuration
+ * @ro_limits: Rank order limits (RKISP1_CIF_ISP_DPCC_RO_LIMITS_*)
+ * @rnd_offs: Differential rank offsets for rank neighbor difference (RKISP1_CIF_ISP_DPCC_RND_OFFS_*)
*/
struct rkisp1_cif_isp_dpcc_config {
__u32 mode;
@@ -947,4 +994,4 @@ struct rkisp1_stat_buffer {
struct rkisp1_cif_isp_stat params;
};
-#endif /* _RKISP1_CONFIG_H */
+#endif /* _UAPI_RKISP1_CONFIG_H */
diff --git a/include/linux/v4l2-common.h b/include/linux/v4l2-common.h
index 14de1731..c3ca11e3 100644
--- a/include/linux/v4l2-common.h
+++ b/include/linux/v4l2-common.h
@@ -10,45 +10,6 @@
*
* Copyright (C) 2012 Nokia Corporation
* Contact: Sakari Ailus <sakari.ailus@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Alternatively you can redistribute this file under the terms of the
- * BSD license as stated below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * 3. The names of its contributors may not be used to endorse or promote
- * products derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
- * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef __V4L2_COMMON__
diff --git a/include/linux/v4l2-controls.h b/include/linux/v4l2-controls.h
index a055d257..b9f64810 100644
--- a/include/linux/v4l2-controls.h
+++ b/include/linux/v4l2-controls.h
@@ -4,44 +4,6 @@
*
* Copyright (C) 1999-2012 the contributors
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Alternatively you can redistribute this file under the terms of the
- * BSD license as stated below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * 3. The names of its contributors may not be used to endorse or promote
- * products derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
- * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
* The contents of this header was split off from videodev2.h. All control
* definitions should be added to this header, which is included by
* videodev2.h.
@@ -128,6 +90,7 @@ enum v4l2_colorfx {
V4L2_COLORFX_SOLARIZATION = 13,
V4L2_COLORFX_ANTIQUE = 14,
V4L2_COLORFX_SET_CBCR = 15,
+ V4L2_COLORFX_SET_RGB = 16,
};
#define V4L2_CID_AUTOBRIGHTNESS (V4L2_CID_BASE+32)
#define V4L2_CID_BAND_STOP_FILTER (V4L2_CID_BASE+33)
@@ -145,14 +108,17 @@ enum v4l2_colorfx {
#define V4L2_CID_ALPHA_COMPONENT (V4L2_CID_BASE+41)
#define V4L2_CID_COLORFX_CBCR (V4L2_CID_BASE+42)
+#define V4L2_CID_COLORFX_RGB (V4L2_CID_BASE+43)
/* last CID + 1 */
-#define V4L2_CID_LASTP1 (V4L2_CID_BASE+43)
+#define V4L2_CID_LASTP1 (V4L2_CID_BASE+44)
/* USER-class private control IDs */
-/* The base for the meye driver controls. See linux/meye.h for the list
- * of controls. We reserve 16 controls for this driver. */
+/*
+ * The base for the meye driver controls. This driver was removed, but
+ * we keep this define in case any software still uses it.
+ */
#define V4L2_CID_USER_MEYE_BASE (V4L2_CID_USER_BASE + 0x1000)
/* The base for the bttv driver controls.
@@ -221,6 +187,30 @@ enum v4l2_colorfx {
*/
#define V4L2_CID_USER_ALLEGRO_BASE (V4L2_CID_USER_BASE + 0x1170)
+/*
+ * The base for the isl7998x driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_ISL7998X_BASE (V4L2_CID_USER_BASE + 0x1180)
+
+/*
+ * The base for DW100 driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_DW100_BASE (V4L2_CID_USER_BASE + 0x1190)
+
+/*
+ * The base for Aspeed driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_ASPEED_BASE (V4L2_CID_USER_BASE + 0x11a0)
+
+/*
+ * The base for Nuvoton NPCM driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_NPCM_BASE (V4L2_CID_USER_BASE + 0x11b0)
+
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical */
@@ -443,6 +433,11 @@ enum v4l2_mpeg_video_multi_slice_mode {
#define V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES (V4L2_CID_CODEC_BASE+234)
#define V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR (V4L2_CID_CODEC_BASE+235)
#define V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD (V4L2_CID_CODEC_BASE+236)
+#define V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE (V4L2_CID_CODEC_BASE+237)
+enum v4l2_mpeg_video_intra_refresh_period_type {
+ V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_RANDOM = 0,
+ V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_CYCLIC = 1,
+};
/* CIDs for the MPEG-2 Part 2 (H.262) codec */
#define V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL (V4L2_CID_CODEC_BASE+270)
@@ -815,6 +810,88 @@ enum v4l2_mpeg_video_frame_skip_mode {
#define V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY (V4L2_CID_CODEC_BASE + 653)
#define V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE (V4L2_CID_CODEC_BASE + 654)
+#define V4L2_CID_MPEG_VIDEO_AV1_PROFILE (V4L2_CID_CODEC_BASE + 655)
+/**
+ * enum v4l2_mpeg_video_av1_profile - AV1 profiles
+ *
+ * @V4L2_MPEG_VIDEO_AV1_PROFILE_MAIN: compliant decoders must be able to decode
+ * streams with seq_profile equal to 0.
+ * @V4L2_MPEG_VIDEO_AV1_PROFILE_HIGH: compliant decoders must be able to decode
+ * streams with seq_profile equal less than or equal to 1.
+ * @V4L2_MPEG_VIDEO_AV1_PROFILE_PROFESSIONAL: compliant decoders must be able to
+ * decode streams with seq_profile less than or equal to 2.
+ *
+ * Conveys the highest profile a decoder can work with.
+ */
+enum v4l2_mpeg_video_av1_profile {
+ V4L2_MPEG_VIDEO_AV1_PROFILE_MAIN = 0,
+ V4L2_MPEG_VIDEO_AV1_PROFILE_HIGH = 1,
+ V4L2_MPEG_VIDEO_AV1_PROFILE_PROFESSIONAL = 2,
+};
+
+#define V4L2_CID_MPEG_VIDEO_AV1_LEVEL (V4L2_CID_CODEC_BASE + 656)
+/**
+ * enum v4l2_mpeg_video_av1_level - AV1 levels
+ *
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_0: Level 2.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_1: Level 2.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_2: Level 2.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_3: Level 2.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_0: Level 3.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_1: Level 3.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_2: Level 3.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_3: Level 3.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_0: Level 4.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_1: Level 4.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_2: Level 4.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_3: Level 4.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_0: Level 5.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_1: Level 5.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_2: Level 5.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_3: Level 5.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_0: Level 6.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_1: Level 6.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_2: Level 6.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_3: Level 6.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_0: Level 7.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_1: Level 7.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_2: Level 7.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_3: Level 7.3.
+ *
+ * Conveys the highest level a decoder can work with.
+ */
+enum v4l2_mpeg_video_av1_level {
+ V4L2_MPEG_VIDEO_AV1_LEVEL_2_0 = 0,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_2_1 = 1,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_2_2 = 2,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_2_3 = 3,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_3_0 = 4,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_3_1 = 5,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_3_2 = 6,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_3_3 = 7,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_4_0 = 8,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_4_1 = 9,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_4_2 = 10,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_4_3 = 11,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_5_0 = 12,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_5_1 = 13,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_5_2 = 14,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_5_3 = 15,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_6_0 = 16,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_6_1 = 17,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_6_2 = 18,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_6_3 = 19,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_7_0 = 20,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_7_1 = 21,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_7_2 = 22,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_7_3 = 23
+};
+
/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */
#define V4L2_CID_CODEC_CX2341X_BASE (V4L2_CTRL_CLASS_CODEC | 0x1000)
#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_CODEC_CX2341X_BASE+0)
@@ -1002,6 +1079,8 @@ enum v4l2_auto_focus_range {
#define V4L2_CID_CAMERA_SENSOR_ROTATION (V4L2_CID_CAMERA_CLASS_BASE+35)
+#define V4L2_CID_HDR_SENSOR_MODE (V4L2_CID_CAMERA_CLASS_BASE+36)
+
/* FM Modulator class control IDs */
#define V4L2_CID_FM_TX_CLASS_BASE (V4L2_CTRL_CLASS_FM_TX | 0x900)
@@ -1563,6 +1642,8 @@ struct v4l2_h264_dpb_entry {
#define V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC 0x01
#define V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC 0x02
#define V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD 0x04
+#define V4L2_H264_DECODE_PARAM_FLAG_PFRAME 0x08
+#define V4L2_H264_DECODE_PARAM_FLAG_BFRAME 0x10
#define V4L2_CID_STATELESS_H264_DECODE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 7)
/**
@@ -1717,7 +1798,7 @@ struct v4l2_vp8_segment {
* @sharpness_level: matches sharpness_level syntax element.
* @level: matches loop_filter_level syntax element.
* @padding: padding field. Should be zeroed by applications.
- * @flags: see V4L2_VP8_LF_FLAG_{}.
+ * @flags: see V4L2_VP8_LF_{}.
*
* This structure contains loop filter related parameters.
* See the 'mb_lf_adjustments()' part of the frame header syntax,
@@ -1984,6 +2065,469 @@ struct v4l2_ctrl_mpeg2_quantisation {
__u8 chroma_non_intra_quantiser_matrix[64];
};
+#define V4L2_CID_STATELESS_HEVC_SPS (V4L2_CID_CODEC_STATELESS_BASE + 400)
+#define V4L2_CID_STATELESS_HEVC_PPS (V4L2_CID_CODEC_STATELESS_BASE + 401)
+#define V4L2_CID_STATELESS_HEVC_SLICE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 402)
+#define V4L2_CID_STATELESS_HEVC_SCALING_MATRIX (V4L2_CID_CODEC_STATELESS_BASE + 403)
+#define V4L2_CID_STATELESS_HEVC_DECODE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 404)
+#define V4L2_CID_STATELESS_HEVC_DECODE_MODE (V4L2_CID_CODEC_STATELESS_BASE + 405)
+#define V4L2_CID_STATELESS_HEVC_START_CODE (V4L2_CID_CODEC_STATELESS_BASE + 406)
+#define V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS (V4L2_CID_CODEC_STATELESS_BASE + 407)
+
+enum v4l2_stateless_hevc_decode_mode {
+ V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED,
+ V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
+};
+
+enum v4l2_stateless_hevc_start_code {
+ V4L2_STATELESS_HEVC_START_CODE_NONE,
+ V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
+};
+
+#define V4L2_HEVC_SLICE_TYPE_B 0
+#define V4L2_HEVC_SLICE_TYPE_P 1
+#define V4L2_HEVC_SLICE_TYPE_I 2
+
+#define V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE (1ULL << 0)
+#define V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED (1ULL << 1)
+#define V4L2_HEVC_SPS_FLAG_AMP_ENABLED (1ULL << 2)
+#define V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET (1ULL << 3)
+#define V4L2_HEVC_SPS_FLAG_PCM_ENABLED (1ULL << 4)
+#define V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED (1ULL << 5)
+#define V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT (1ULL << 6)
+#define V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED (1ULL << 7)
+#define V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED (1ULL << 8)
+
+/**
+ * struct v4l2_ctrl_hevc_sps - ITU-T Rec. H.265: Sequence parameter set
+ *
+ * @video_parameter_set_id: specifies the value of the
+ * vps_video_parameter_set_id of the active VPS
+ * @seq_parameter_set_id: provides an identifier for the SPS for
+ * reference by other syntax elements
+ * @pic_width_in_luma_samples: specifies the width of each decoded picture
+ * in units of luma samples
+ * @pic_height_in_luma_samples: specifies the height of each decoded picture
+ * in units of luma samples
+ * @bit_depth_luma_minus8: this value plus 8specifies the bit depth of the
+ * samples of the luma array
+ * @bit_depth_chroma_minus8: this value plus 8 specifies the bit depth of the
+ * samples of the chroma arrays
+ * @log2_max_pic_order_cnt_lsb_minus4: this value plus 4 specifies the value of
+ * the variable MaxPicOrderCntLsb
+ * @sps_max_dec_pic_buffering_minus1: this value plus 1 specifies the maximum
+ * required size of the decoded picture
+ * buffer for the codec video sequence
+ * @sps_max_num_reorder_pics: indicates the maximum allowed number of pictures
+ * @sps_max_latency_increase_plus1: not equal to 0 is used to compute the
+ * value of SpsMaxLatencyPictures array
+ * @log2_min_luma_coding_block_size_minus3: plus 3 specifies the minimum
+ * luma coding block size
+ * @log2_diff_max_min_luma_coding_block_size: specifies the difference between
+ * the maximum and minimum luma
+ * coding block size
+ * @log2_min_luma_transform_block_size_minus2: plus 2 specifies the minimum luma
+ * transform block size
+ * @log2_diff_max_min_luma_transform_block_size: specifies the difference between
+ * the maximum and minimum luma
+ * transform block size
+ * @max_transform_hierarchy_depth_inter: specifies the maximum hierarchy
+ * depth for transform units of
+ * coding units coded in inter
+ * prediction mode
+ * @max_transform_hierarchy_depth_intra: specifies the maximum hierarchy
+ * depth for transform units of
+ * coding units coded in intra
+ * prediction mode
+ * @pcm_sample_bit_depth_luma_minus1: this value plus 1 specifies the number of
+ * bits used to represent each of PCM sample
+ * values of the luma component
+ * @pcm_sample_bit_depth_chroma_minus1: this value plus 1 specifies the number
+ * of bits used to represent each of PCM
+ * sample values of the chroma components
+ * @log2_min_pcm_luma_coding_block_size_minus3: this value plus 3 specifies the
+ * minimum size of coding blocks
+ * @log2_diff_max_min_pcm_luma_coding_block_size: specifies the difference between
+ * the maximum and minimum size of
+ * coding blocks
+ * @num_short_term_ref_pic_sets: specifies the number of st_ref_pic_set()
+ * syntax structures included in the SPS
+ * @num_long_term_ref_pics_sps: specifies the number of candidate long-term
+ * reference pictures that are specified in the SPS
+ * @chroma_format_idc: specifies the chroma sampling
+ * @sps_max_sub_layers_minus1: this value plus 1 specifies the maximum number
+ * of temporal sub-layers
+ * @reserved: padding field. Should be zeroed by applications.
+ * @flags: see V4L2_HEVC_SPS_FLAG_{}
+ */
+struct v4l2_ctrl_hevc_sps {
+ __u8 video_parameter_set_id;
+ __u8 seq_parameter_set_id;
+ __u16 pic_width_in_luma_samples;
+ __u16 pic_height_in_luma_samples;
+ __u8 bit_depth_luma_minus8;
+ __u8 bit_depth_chroma_minus8;
+ __u8 log2_max_pic_order_cnt_lsb_minus4;
+ __u8 sps_max_dec_pic_buffering_minus1;
+ __u8 sps_max_num_reorder_pics;
+ __u8 sps_max_latency_increase_plus1;
+ __u8 log2_min_luma_coding_block_size_minus3;
+ __u8 log2_diff_max_min_luma_coding_block_size;
+ __u8 log2_min_luma_transform_block_size_minus2;
+ __u8 log2_diff_max_min_luma_transform_block_size;
+ __u8 max_transform_hierarchy_depth_inter;
+ __u8 max_transform_hierarchy_depth_intra;
+ __u8 pcm_sample_bit_depth_luma_minus1;
+ __u8 pcm_sample_bit_depth_chroma_minus1;
+ __u8 log2_min_pcm_luma_coding_block_size_minus3;
+ __u8 log2_diff_max_min_pcm_luma_coding_block_size;
+ __u8 num_short_term_ref_pic_sets;
+ __u8 num_long_term_ref_pics_sps;
+ __u8 chroma_format_idc;
+ __u8 sps_max_sub_layers_minus1;
+
+ __u8 reserved[6];
+ __u64 flags;
+};
+
+#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED (1ULL << 0)
+#define V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT (1ULL << 1)
+#define V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED (1ULL << 2)
+#define V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT (1ULL << 3)
+#define V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED (1ULL << 4)
+#define V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED (1ULL << 5)
+#define V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED (1ULL << 6)
+#define V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT (1ULL << 7)
+#define V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED (1ULL << 8)
+#define V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED (1ULL << 9)
+#define V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED (1ULL << 10)
+#define V4L2_HEVC_PPS_FLAG_TILES_ENABLED (1ULL << 11)
+#define V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED (1ULL << 12)
+#define V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED (1ULL << 13)
+#define V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 14)
+#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED (1ULL << 15)
+#define V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER (1ULL << 16)
+#define V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT (1ULL << 17)
+#define V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT (1ULL << 18)
+#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT (1ULL << 19)
+#define V4L2_HEVC_PPS_FLAG_UNIFORM_SPACING (1ULL << 20)
+
+/**
+ * struct v4l2_ctrl_hevc_pps - ITU-T Rec. H.265: Picture parameter set
+ *
+ * @pic_parameter_set_id: identifies the PPS for reference by other
+ * syntax elements
+ * @num_extra_slice_header_bits: specifies the number of extra slice header
+ * bits that are present in the slice header RBSP
+ * for coded pictures referring to the PPS.
+ * @num_ref_idx_l0_default_active_minus1: this value plus 1 specifies the
+ * inferred value of num_ref_idx_l0_active_minus1
+ * @num_ref_idx_l1_default_active_minus1: this value plus 1 specifies the
+ * inferred value of num_ref_idx_l1_active_minus1
+ * @init_qp_minus26: this value plus 26 specifies the initial value of SliceQp Y for
+ * each slice referring to the PPS
+ * @diff_cu_qp_delta_depth: specifies the difference between the luma coding
+ * tree block size and the minimum luma coding block
+ * size of coding units that convey cu_qp_delta_abs
+ * and cu_qp_delta_sign_flag
+ * @pps_cb_qp_offset: specify the offsets to the luma quantization parameter Cb
+ * @pps_cr_qp_offset: specify the offsets to the luma quantization parameter Cr
+ * @num_tile_columns_minus1: this value plus 1 specifies the number of tile columns
+ * partitioning the picture
+ * @num_tile_rows_minus1: this value plus 1 specifies the number of tile rows partitioning
+ * the picture
+ * @column_width_minus1: this value plus 1 specifies the width of the each tile column in
+ * units of coding tree blocks
+ * @row_height_minus1: this value plus 1 specifies the height of the each tile row in
+ * units of coding tree blocks
+ * @pps_beta_offset_div2: specify the default deblocking parameter offsets for
+ * beta divided by 2
+ * @pps_tc_offset_div2: specify the default deblocking parameter offsets for tC
+ * divided by 2
+ * @log2_parallel_merge_level_minus2: this value plus 2 specifies the value of
+ * the variable Log2ParMrgLevel
+ * @reserved: padding field. Should be zeroed by applications.
+ * @flags: see V4L2_HEVC_PPS_FLAG_{}
+ */
+struct v4l2_ctrl_hevc_pps {
+ __u8 pic_parameter_set_id;
+ __u8 num_extra_slice_header_bits;
+ __u8 num_ref_idx_l0_default_active_minus1;
+ __u8 num_ref_idx_l1_default_active_minus1;
+ __s8 init_qp_minus26;
+ __u8 diff_cu_qp_delta_depth;
+ __s8 pps_cb_qp_offset;
+ __s8 pps_cr_qp_offset;
+ __u8 num_tile_columns_minus1;
+ __u8 num_tile_rows_minus1;
+ __u8 column_width_minus1[20];
+ __u8 row_height_minus1[22];
+ __s8 pps_beta_offset_div2;
+ __s8 pps_tc_offset_div2;
+ __u8 log2_parallel_merge_level_minus2;
+ __u8 reserved;
+ __u64 flags;
+};
+
+#define V4L2_HEVC_DPB_ENTRY_LONG_TERM_REFERENCE 0x01
+
+#define V4L2_HEVC_SEI_PIC_STRUCT_FRAME 0
+#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_FIELD 1
+#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_FIELD 2
+#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_BOTTOM 3
+#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_TOP 4
+#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_BOTTOM_TOP 5
+#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM 6
+#define V4L2_HEVC_SEI_PIC_STRUCT_FRAME_DOUBLING 7
+#define V4L2_HEVC_SEI_PIC_STRUCT_FRAME_TRIPLING 8
+#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_PAIRED_PREVIOUS_BOTTOM 9
+#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_PAIRED_PREVIOUS_TOP 10
+#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_PAIRED_NEXT_BOTTOM 11
+#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_PAIRED_NEXT_TOP 12
+
+#define V4L2_HEVC_DPB_ENTRIES_NUM_MAX 16
+
+/**
+ * struct v4l2_hevc_dpb_entry - HEVC decoded picture buffer entry
+ *
+ * @timestamp: timestamp of the V4L2 capture buffer to use as reference.
+ * @flags: long term flag for the reference frame
+ * @field_pic: whether the reference is a field picture or a frame.
+ * @reserved: padding field. Should be zeroed by applications.
+ * @pic_order_cnt_val: the picture order count of the current picture.
+ */
+struct v4l2_hevc_dpb_entry {
+ __u64 timestamp;
+ __u8 flags;
+ __u8 field_pic;
+ __u16 reserved;
+ __s32 pic_order_cnt_val;
+};
+
+/**
+ * struct v4l2_hevc_pred_weight_table - HEVC weighted prediction parameters
+ *
+ * @delta_luma_weight_l0: the difference of the weighting factor applied
+ * to the luma prediction value for list 0
+ * @luma_offset_l0: the additive offset applied to the luma prediction value
+ * for list 0
+ * @delta_chroma_weight_l0: the difference of the weighting factor applied
+ * to the chroma prediction values for list 0
+ * @chroma_offset_l0: the difference of the additive offset applied to
+ * the chroma prediction values for list 0
+ * @delta_luma_weight_l1: the difference of the weighting factor applied
+ * to the luma prediction value for list 1
+ * @luma_offset_l1: the additive offset applied to the luma prediction value
+ * for list 1
+ * @delta_chroma_weight_l1: the difference of the weighting factor applied
+ * to the chroma prediction values for list 1
+ * @chroma_offset_l1: the difference of the additive offset applied to
+ * the chroma prediction values for list 1
+ * @luma_log2_weight_denom: the base 2 logarithm of the denominator for
+ * all luma weighting factors
+ * @delta_chroma_log2_weight_denom: the difference of the base 2 logarithm
+ * of the denominator for all chroma
+ * weighting factors
+ */
+struct v4l2_hevc_pred_weight_table {
+ __s8 delta_luma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 luma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 delta_chroma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+ __s8 chroma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+
+ __s8 delta_luma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 luma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 delta_chroma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+ __s8 chroma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+
+ __u8 luma_log2_weight_denom;
+ __s8 delta_chroma_log2_weight_denom;
+};
+
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA (1ULL << 0)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA (1ULL << 1)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED (1ULL << 2)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO (1ULL << 3)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT (1ULL << 4)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0 (1ULL << 5)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV (1ULL << 6)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED (1ULL << 7)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 8)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 9)
+
+/**
+ * struct v4l2_ctrl_hevc_slice_params - HEVC slice parameters
+ *
+ * This control is a dynamically sized 1-dimensional array,
+ * V4L2_CTRL_FLAG_DYNAMIC_ARRAY flag must be set when using it.
+ *
+ * @bit_size: size (in bits) of the current slice data
+ * @data_byte_offset: offset (in bytes) to the video data in the current slice data
+ * @num_entry_point_offsets: specifies the number of entry point offset syntax
+ * elements in the slice header.
+ * @nal_unit_type: specifies the coding type of the slice (B, P or I)
+ * @nuh_temporal_id_plus1: minus 1 specifies a temporal identifier for the NAL unit
+ * @slice_type: see V4L2_HEVC_SLICE_TYPE_{}
+ * @colour_plane_id: specifies the colour plane associated with the current slice
+ * @slice_pic_order_cnt: specifies the picture order count
+ * @num_ref_idx_l0_active_minus1: this value plus 1 specifies the maximum
+ * reference index for reference picture list 0
+ * that may be used to decode the slice
+ * @num_ref_idx_l1_active_minus1: this value plus 1 specifies the maximum
+ * reference index for reference picture list 1
+ * that may be used to decode the slice
+ * @collocated_ref_idx: specifies the reference index of the collocated picture used
+ * for temporal motion vector prediction
+ * @five_minus_max_num_merge_cand: specifies the maximum number of merging
+ * motion vector prediction candidates supported in
+ * the slice subtracted from 5
+ * @slice_qp_delta: specifies the initial value of QpY to be used for the coding
+ * blocks in the slice
+ * @slice_cb_qp_offset: specifies a difference to be added to the value of pps_cb_qp_offset
+ * @slice_cr_qp_offset: specifies a difference to be added to the value of pps_cr_qp_offset
+ * @slice_act_y_qp_offset: screen content extension parameters
+ * @slice_act_cb_qp_offset: screen content extension parameters
+ * @slice_act_cr_qp_offset: screen content extension parameters
+ * @slice_beta_offset_div2: specify the deblocking parameter offsets for beta divided by 2
+ * @slice_tc_offset_div2: specify the deblocking parameter offsets for tC divided by 2
+ * @pic_struct: indicates whether a picture should be displayed as a frame or as one or
+ * more fields
+ * @reserved0: padding field. Should be zeroed by applications.
+ * @slice_segment_addr: specifies the address of the first coding tree block in
+ * the slice segment
+ * @ref_idx_l0: the list of L0 reference elements as indices in the DPB
+ * @ref_idx_l1: the list of L1 reference elements as indices in the DPB
+ * @short_term_ref_pic_set_size: specifies the size of short-term reference
+ * pictures set included in the SPS
+ * @long_term_ref_pic_set_size: specifies the size of long-term reference
+ * pictures set include in the SPS
+ * @pred_weight_table: the prediction weight coefficients for inter-picture
+ * prediction
+ * @reserved1: padding field. Should be zeroed by applications.
+ * @flags: see V4L2_HEVC_SLICE_PARAMS_FLAG_{}
+ */
+struct v4l2_ctrl_hevc_slice_params {
+ __u32 bit_size;
+ __u32 data_byte_offset;
+ __u32 num_entry_point_offsets;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: NAL unit header */
+ __u8 nal_unit_type;
+ __u8 nuh_temporal_id_plus1;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
+ __u8 slice_type;
+ __u8 colour_plane_id;
+ __s32 slice_pic_order_cnt;
+ __u8 num_ref_idx_l0_active_minus1;
+ __u8 num_ref_idx_l1_active_minus1;
+ __u8 collocated_ref_idx;
+ __u8 five_minus_max_num_merge_cand;
+ __s8 slice_qp_delta;
+ __s8 slice_cb_qp_offset;
+ __s8 slice_cr_qp_offset;
+ __s8 slice_act_y_qp_offset;
+ __s8 slice_act_cb_qp_offset;
+ __s8 slice_act_cr_qp_offset;
+ __s8 slice_beta_offset_div2;
+ __s8 slice_tc_offset_div2;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture timing SEI message */
+ __u8 pic_struct;
+
+ __u8 reserved0[3];
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
+ __u32 slice_segment_addr;
+ __u8 ref_idx_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u8 ref_idx_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u16 short_term_ref_pic_set_size;
+ __u16 long_term_ref_pic_set_size;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Weighted prediction parameter */
+ struct v4l2_hevc_pred_weight_table pred_weight_table;
+
+ __u8 reserved1[2];
+ __u64 flags;
+};
+
+#define V4L2_HEVC_DECODE_PARAM_FLAG_IRAP_PIC 0x1
+#define V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC 0x2
+#define V4L2_HEVC_DECODE_PARAM_FLAG_NO_OUTPUT_OF_PRIOR 0x4
+
+/**
+ * struct v4l2_ctrl_hevc_decode_params - HEVC decode parameters
+ *
+ * @pic_order_cnt_val: picture order count
+ * @short_term_ref_pic_set_size: specifies the size of short-term reference
+ * pictures set included in the SPS of the first slice
+ * @long_term_ref_pic_set_size: specifies the size of long-term reference
+ * pictures set include in the SPS of the first slice
+ * @num_active_dpb_entries: the number of entries in dpb
+ * @num_poc_st_curr_before: the number of reference pictures in the short-term
+ * set that come before the current frame
+ * @num_poc_st_curr_after: the number of reference pictures in the short-term
+ * set that come after the current frame
+ * @num_poc_lt_curr: the number of reference pictures in the long-term set
+ * @poc_st_curr_before: provides the index of the short term before references
+ * in DPB array
+ * @poc_st_curr_after: provides the index of the short term after references
+ * in DPB array
+ * @poc_lt_curr: provides the index of the long term references in DPB array
+ * @num_delta_pocs_of_ref_rps_idx: same as the derived value NumDeltaPocs[RefRpsIdx],
+ * can be used to parse the RPS data in slice headers
+ * instead of skipping it with @short_term_ref_pic_set_size.
+ * @reserved: padding field. Should be zeroed by applications.
+ * @dpb: the decoded picture buffer, for meta-data about reference frames
+ * @flags: see V4L2_HEVC_DECODE_PARAM_FLAG_{}
+ */
+struct v4l2_ctrl_hevc_decode_params {
+ __s32 pic_order_cnt_val;
+ __u16 short_term_ref_pic_set_size;
+ __u16 long_term_ref_pic_set_size;
+ __u8 num_active_dpb_entries;
+ __u8 num_poc_st_curr_before;
+ __u8 num_poc_st_curr_after;
+ __u8 num_poc_lt_curr;
+ __u8 poc_st_curr_before[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u8 poc_st_curr_after[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u8 poc_lt_curr[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u8 num_delta_pocs_of_ref_rps_idx;
+ __u8 reserved[3];
+ struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u64 flags;
+};
+
+/**
+ * struct v4l2_ctrl_hevc_scaling_matrix - HEVC scaling lists parameters
+ *
+ * @scaling_list_4x4: scaling list is used for the scaling process for
+ * transform coefficients. The values on each scaling
+ * list are expected in raster scan order
+ * @scaling_list_8x8: scaling list is used for the scaling process for
+ * transform coefficients. The values on each scaling
+ * list are expected in raster scan order
+ * @scaling_list_16x16: scaling list is used for the scaling process for
+ * transform coefficients. The values on each scaling
+ * list are expected in raster scan order
+ * @scaling_list_32x32: scaling list is used for the scaling process for
+ * transform coefficients. The values on each scaling
+ * list are expected in raster scan order
+ * @scaling_list_dc_coef_16x16: scaling list is used for the scaling process
+ * for transform coefficients. The values on each
+ * scaling list are expected in raster scan order.
+ * @scaling_list_dc_coef_32x32: scaling list is used for the scaling process
+ * for transform coefficients. The values on each
+ * scaling list are expected in raster scan order.
+ */
+struct v4l2_ctrl_hevc_scaling_matrix {
+ __u8 scaling_list_4x4[6][16];
+ __u8 scaling_list_8x8[6][64];
+ __u8 scaling_list_16x16[6][64];
+ __u8 scaling_list_32x32[2][64];
+ __u8 scaling_list_dc_coef_16x16[6];
+ __u8 scaling_list_dc_coef_32x32[2];
+};
+
#define V4L2_CID_COLORIMETRY_CLASS_BASE (V4L2_CTRL_CLASS_COLORIMETRY | 0x900)
#define V4L2_CID_COLORIMETRY_CLASS (V4L2_CTRL_CLASS_COLORIMETRY | 1)
@@ -2018,6 +2562,929 @@ struct v4l2_ctrl_hdr10_mastering_display {
__u32 min_display_mastering_luminance;
};
+/* Stateless VP9 controls */
+
+#define V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED 0x1
+#define V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE 0x2
+
+/**
+ * struct v4l2_vp9_loop_filter - VP9 loop filter parameters
+ *
+ * @ref_deltas: contains the adjustment needed for the filter level based on the
+ * chosen reference frame. If this syntax element is not present in the bitstream,
+ * users should pass its last value.
+ * @mode_deltas: contains the adjustment needed for the filter level based on the
+ * chosen mode. If this syntax element is not present in the bitstream, users should
+ * pass its last value.
+ * @level: indicates the loop filter strength.
+ * @sharpness: indicates the sharpness level.
+ * @flags: combination of V4L2_VP9_LOOP_FILTER_FLAG_{} flags.
+ * @reserved: padding field. Should be zeroed by applications.
+ *
+ * This structure contains all loop filter related parameters. See sections
+ * '7.2.8 Loop filter semantics' of the VP9 specification for more details.
+ */
+struct v4l2_vp9_loop_filter {
+ __s8 ref_deltas[4];
+ __s8 mode_deltas[2];
+ __u8 level;
+ __u8 sharpness;
+ __u8 flags;
+ __u8 reserved[7];
+};
+
+/**
+ * struct v4l2_vp9_quantization - VP9 quantization parameters
+ *
+ * @base_q_idx: indicates the base frame qindex.
+ * @delta_q_y_dc: indicates the Y DC quantizer relative to base_q_idx.
+ * @delta_q_uv_dc: indicates the UV DC quantizer relative to base_q_idx.
+ * @delta_q_uv_ac: indicates the UV AC quantizer relative to base_q_idx.
+ * @reserved: padding field. Should be zeroed by applications.
+ *
+ * Encodes the quantization parameters. See section '7.2.9 Quantization params
+ * syntax' of the VP9 specification for more details.
+ */
+struct v4l2_vp9_quantization {
+ __u8 base_q_idx;
+ __s8 delta_q_y_dc;
+ __s8 delta_q_uv_dc;
+ __s8 delta_q_uv_ac;
+ __u8 reserved[4];
+};
+
+#define V4L2_VP9_SEGMENTATION_FLAG_ENABLED 0x01
+#define V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP 0x02
+#define V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE 0x04
+#define V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA 0x08
+#define V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE 0x10
+
+#define V4L2_VP9_SEG_LVL_ALT_Q 0
+#define V4L2_VP9_SEG_LVL_ALT_L 1
+#define V4L2_VP9_SEG_LVL_REF_FRAME 2
+#define V4L2_VP9_SEG_LVL_SKIP 3
+#define V4L2_VP9_SEG_LVL_MAX 4
+
+#define V4L2_VP9_SEGMENT_FEATURE_ENABLED(id) (1 << (id))
+#define V4L2_VP9_SEGMENT_FEATURE_ENABLED_MASK 0xf
+
+/**
+ * struct v4l2_vp9_segmentation - VP9 segmentation parameters
+ *
+ * @feature_data: data attached to each feature. Data entry is only valid if
+ * the feature is enabled. The array shall be indexed with segment number as
+ * the first dimension (0..7) and one of V4L2_VP9_SEG_{} as the second dimension.
+ * @feature_enabled: bitmask defining which features are enabled in each segment.
+ * The value for each segment is a combination of V4L2_VP9_SEGMENT_FEATURE_ENABLED(id)
+ * values where id is one of V4L2_VP9_SEG_LVL_{}.
+ * @tree_probs: specifies the probability values to be used when decoding a
+ * Segment-ID. See '5.15. Segmentation map' section of the VP9 specification
+ * for more details.
+ * @pred_probs: specifies the probability values to be used when decoding a
+ * Predicted-Segment-ID. See '6.4.14. Get segment id syntax' section of :ref:`vp9`
+ * for more details.
+ * @flags: combination of V4L2_VP9_SEGMENTATION_FLAG_{} flags.
+ * @reserved: padding field. Should be zeroed by applications.
+ *
+ * Encodes the quantization parameters. See section '7.2.10 Segmentation params syntax' of
+ * the VP9 specification for more details.
+ */
+struct v4l2_vp9_segmentation {
+ __s16 feature_data[8][4];
+ __u8 feature_enabled[8];
+ __u8 tree_probs[7];
+ __u8 pred_probs[3];
+ __u8 flags;
+ __u8 reserved[5];
+};
+
+#define V4L2_VP9_FRAME_FLAG_KEY_FRAME 0x001
+#define V4L2_VP9_FRAME_FLAG_SHOW_FRAME 0x002
+#define V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT 0x004
+#define V4L2_VP9_FRAME_FLAG_INTRA_ONLY 0x008
+#define V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV 0x010
+#define V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX 0x020
+#define V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE 0x040
+#define V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING 0x080
+#define V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING 0x100
+#define V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING 0x200
+
+#define V4L2_VP9_SIGN_BIAS_LAST 0x1
+#define V4L2_VP9_SIGN_BIAS_GOLDEN 0x2
+#define V4L2_VP9_SIGN_BIAS_ALT 0x4
+
+#define V4L2_VP9_RESET_FRAME_CTX_NONE 0
+#define V4L2_VP9_RESET_FRAME_CTX_SPEC 1
+#define V4L2_VP9_RESET_FRAME_CTX_ALL 2
+
+#define V4L2_VP9_INTERP_FILTER_EIGHTTAP 0
+#define V4L2_VP9_INTERP_FILTER_EIGHTTAP_SMOOTH 1
+#define V4L2_VP9_INTERP_FILTER_EIGHTTAP_SHARP 2
+#define V4L2_VP9_INTERP_FILTER_BILINEAR 3
+#define V4L2_VP9_INTERP_FILTER_SWITCHABLE 4
+
+#define V4L2_VP9_REFERENCE_MODE_SINGLE_REFERENCE 0
+#define V4L2_VP9_REFERENCE_MODE_COMPOUND_REFERENCE 1
+#define V4L2_VP9_REFERENCE_MODE_SELECT 2
+
+#define V4L2_VP9_PROFILE_MAX 3
+
+#define V4L2_CID_STATELESS_VP9_FRAME (V4L2_CID_CODEC_STATELESS_BASE + 300)
+/**
+ * struct v4l2_ctrl_vp9_frame - VP9 frame decoding control
+ *
+ * @lf: loop filter parameters. See &v4l2_vp9_loop_filter for more details.
+ * @quant: quantization parameters. See &v4l2_vp9_quantization for more details.
+ * @seg: segmentation parameters. See &v4l2_vp9_segmentation for more details.
+ * @flags: combination of V4L2_VP9_FRAME_FLAG_{} flags.
+ * @compressed_header_size: compressed header size in bytes.
+ * @uncompressed_header_size: uncompressed header size in bytes.
+ * @frame_width_minus_1: add 1 to it and you'll get the frame width expressed in pixels.
+ * @frame_height_minus_1: add 1 to it and you'll get the frame height expressed in pixels.
+ * @render_width_minus_1: add 1 to it and you'll get the expected render width expressed in
+ * pixels. This is not used during the decoding process but might be used by HW scalers
+ * to prepare a frame that's ready for scanout.
+ * @render_height_minus_1: add 1 to it and you'll get the expected render height expressed in
+ * pixels. This is not used during the decoding process but might be used by HW scalers
+ * to prepare a frame that's ready for scanout.
+ * @last_frame_ts: "last" reference buffer timestamp.
+ * The timestamp refers to the timestamp field in struct v4l2_buffer.
+ * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
+ * @golden_frame_ts: "golden" reference buffer timestamp.
+ * The timestamp refers to the timestamp field in struct v4l2_buffer.
+ * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
+ * @alt_frame_ts: "alt" reference buffer timestamp.
+ * The timestamp refers to the timestamp field in struct v4l2_buffer.
+ * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
+ * @ref_frame_sign_bias: a bitfield specifying whether the sign bias is set for a given
+ * reference frame. Either of V4L2_VP9_SIGN_BIAS_{}.
+ * @reset_frame_context: specifies whether the frame context should be reset to default values.
+ * Either of V4L2_VP9_RESET_FRAME_CTX_{}.
+ * @frame_context_idx: frame context that should be used/updated.
+ * @profile: VP9 profile. Can be 0, 1, 2 or 3.
+ * @bit_depth: bits per components. Can be 8, 10 or 12. Note that not all profiles support
+ * 10 and/or 12 bits depths.
+ * @interpolation_filter: specifies the filter selection used for performing inter prediction.
+ * Set to one of V4L2_VP9_INTERP_FILTER_{}.
+ * @tile_cols_log2: specifies the base 2 logarithm of the width of each tile (where the width
+ * is measured in units of 8x8 blocks). Shall be less than or equal to 6.
+ * @tile_rows_log2: specifies the base 2 logarithm of the height of each tile (where the height
+ * is measured in units of 8x8 blocks).
+ * @reference_mode: specifies the type of inter prediction to be used.
+ * Set to one of V4L2_VP9_REFERENCE_MODE_{}.
+ * @reserved: padding field. Should be zeroed by applications.
+ */
+struct v4l2_ctrl_vp9_frame {
+ struct v4l2_vp9_loop_filter lf;
+ struct v4l2_vp9_quantization quant;
+ struct v4l2_vp9_segmentation seg;
+ __u32 flags;
+ __u16 compressed_header_size;
+ __u16 uncompressed_header_size;
+ __u16 frame_width_minus_1;
+ __u16 frame_height_minus_1;
+ __u16 render_width_minus_1;
+ __u16 render_height_minus_1;
+ __u64 last_frame_ts;
+ __u64 golden_frame_ts;
+ __u64 alt_frame_ts;
+ __u8 ref_frame_sign_bias;
+ __u8 reset_frame_context;
+ __u8 frame_context_idx;
+ __u8 profile;
+ __u8 bit_depth;
+ __u8 interpolation_filter;
+ __u8 tile_cols_log2;
+ __u8 tile_rows_log2;
+ __u8 reference_mode;
+ __u8 reserved[7];
+};
+
+#define V4L2_VP9_NUM_FRAME_CTX 4
+
+/**
+ * struct v4l2_vp9_mv_probs - VP9 Motion vector probability updates
+ * @joint: motion vector joint probability updates.
+ * @sign: motion vector sign probability updates.
+ * @classes: motion vector class probability updates.
+ * @class0_bit: motion vector class0 bit probability updates.
+ * @bits: motion vector bits probability updates.
+ * @class0_fr: motion vector class0 fractional bit probability updates.
+ * @fr: motion vector fractional bit probability updates.
+ * @class0_hp: motion vector class0 high precision fractional bit probability updates.
+ * @hp: motion vector high precision fractional bit probability updates.
+ *
+ * This structure contains new values of motion vector probabilities.
+ * A value of zero in an array element means there is no update of the relevant probability.
+ * See `struct v4l2_vp9_prob_updates` for details.
+ */
+struct v4l2_vp9_mv_probs {
+ __u8 joint[3];
+ __u8 sign[2];
+ __u8 classes[2][10];
+ __u8 class0_bit[2];
+ __u8 bits[2][10];
+ __u8 class0_fr[2][2][3];
+ __u8 fr[2][3];
+ __u8 class0_hp[2];
+ __u8 hp[2];
+};
+
+#define V4L2_CID_STATELESS_VP9_COMPRESSED_HDR (V4L2_CID_CODEC_STATELESS_BASE + 301)
+
+#define V4L2_VP9_TX_MODE_ONLY_4X4 0
+#define V4L2_VP9_TX_MODE_ALLOW_8X8 1
+#define V4L2_VP9_TX_MODE_ALLOW_16X16 2
+#define V4L2_VP9_TX_MODE_ALLOW_32X32 3
+#define V4L2_VP9_TX_MODE_SELECT 4
+
+/**
+ * struct v4l2_ctrl_vp9_compressed_hdr - VP9 probability updates control
+ * @tx_mode: specifies the TX mode. Set to one of V4L2_VP9_TX_MODE_{}.
+ * @tx8: TX 8x8 probability updates.
+ * @tx16: TX 16x16 probability updates.
+ * @tx32: TX 32x32 probability updates.
+ * @coef: coefficient probability updates.
+ * @skip: skip probability updates.
+ * @inter_mode: inter mode probability updates.
+ * @interp_filter: interpolation filter probability updates.
+ * @is_inter: is inter-block probability updates.
+ * @comp_mode: compound prediction mode probability updates.
+ * @single_ref: single ref probability updates.
+ * @comp_ref: compound ref probability updates.
+ * @y_mode: Y prediction mode probability updates.
+ * @uv_mode: UV prediction mode probability updates.
+ * @partition: partition probability updates.
+ * @mv: motion vector probability updates.
+ *
+ * This structure holds the probabilities update as parsed in the compressed
+ * header (Spec 6.3). These values represent the value of probability update after
+ * being translated with inv_map_table[] (see 6.3.5). A value of zero in an array element
+ * means that there is no update of the relevant probability.
+ *
+ * This control is optional and needs to be used when dealing with the hardware which is
+ * not capable of parsing the compressed header itself. Only drivers which need it will
+ * implement it.
+ */
+struct v4l2_ctrl_vp9_compressed_hdr {
+ __u8 tx_mode;
+ __u8 tx8[2][1];
+ __u8 tx16[2][2];
+ __u8 tx32[2][3];
+ __u8 coef[4][2][2][6][6][3];
+ __u8 skip[3];
+ __u8 inter_mode[7][3];
+ __u8 interp_filter[4][2];
+ __u8 is_inter[4];
+ __u8 comp_mode[5];
+ __u8 single_ref[5][2];
+ __u8 comp_ref[5];
+ __u8 y_mode[4][9];
+ __u8 uv_mode[10][9];
+ __u8 partition[16][3];
+
+ struct v4l2_vp9_mv_probs mv;
+};
+
+/* Stateless AV1 controls */
+
+#define V4L2_AV1_TOTAL_REFS_PER_FRAME 8
+#define V4L2_AV1_CDEF_MAX 8
+#define V4L2_AV1_NUM_PLANES_MAX 3 /* 1 if monochrome, 3 otherwise */
+#define V4L2_AV1_MAX_SEGMENTS 8
+#define V4L2_AV1_MAX_OPERATING_POINTS (1 << 5) /* 5 bits to encode */
+#define V4L2_AV1_REFS_PER_FRAME 7
+#define V4L2_AV1_MAX_NUM_Y_POINTS (1 << 4) /* 4 bits to encode */
+#define V4L2_AV1_MAX_NUM_CB_POINTS (1 << 4) /* 4 bits to encode */
+#define V4L2_AV1_MAX_NUM_CR_POINTS (1 << 4) /* 4 bits to encode */
+#define V4L2_AV1_AR_COEFFS_SIZE 25 /* (2 * 3 * (3 + 1)) + 1 */
+#define V4L2_AV1_MAX_NUM_PLANES 3
+#define V4L2_AV1_MAX_TILE_COLS 64
+#define V4L2_AV1_MAX_TILE_ROWS 64
+#define V4L2_AV1_MAX_TILE_COUNT 512
+
+#define V4L2_AV1_SEQUENCE_FLAG_STILL_PICTURE 0x00000001
+#define V4L2_AV1_SEQUENCE_FLAG_USE_128X128_SUPERBLOCK 0x00000002
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_FILTER_INTRA 0x00000004
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTRA_EDGE_FILTER 0x00000008
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTERINTRA_COMPOUND 0x00000010
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_MASKED_COMPOUND 0x00000020
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_WARPED_MOTION 0x00000040
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_DUAL_FILTER 0x00000080
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_ORDER_HINT 0x00000100
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_JNT_COMP 0x00000200
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_REF_FRAME_MVS 0x00000400
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_SUPERRES 0x00000800
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_CDEF 0x00001000
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_RESTORATION 0x00002000
+#define V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME 0x00004000
+#define V4L2_AV1_SEQUENCE_FLAG_COLOR_RANGE 0x00008000
+#define V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_X 0x00010000
+#define V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_Y 0x00020000
+#define V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT 0x00040000
+#define V4L2_AV1_SEQUENCE_FLAG_SEPARATE_UV_DELTA_Q 0x00080000
+
+#define V4L2_CID_STATELESS_AV1_SEQUENCE (V4L2_CID_CODEC_STATELESS_BASE + 500)
+/**
+ * struct v4l2_ctrl_av1_sequence - AV1 Sequence
+ *
+ * Represents an AV1 Sequence OBU. See section 5.5 "Sequence header OBU syntax"
+ * for more details.
+ *
+ * @flags: See V4L2_AV1_SEQUENCE_FLAG_{}.
+ * @seq_profile: specifies the features that can be used in the coded video
+ * sequence.
+ * @order_hint_bits: specifies the number of bits used for the order_hint field
+ * at each frame.
+ * @bit_depth: the bitdepth to use for the sequence as described in section
+ * 5.5.2 "Color config syntax".
+ * @reserved: padding field. Should be zeroed by applications.
+ * @max_frame_width_minus_1: specifies the maximum frame width minus 1 for the
+ * frames represented by this sequence header.
+ * @max_frame_height_minus_1: specifies the maximum frame height minus 1 for the
+ * frames represented by this sequence header.
+ */
+struct v4l2_ctrl_av1_sequence {
+ __u32 flags;
+ __u8 seq_profile;
+ __u8 order_hint_bits;
+ __u8 bit_depth;
+ __u8 reserved;
+ __u16 max_frame_width_minus_1;
+ __u16 max_frame_height_minus_1;
+};
+
+#define V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY (V4L2_CID_CODEC_STATELESS_BASE + 501)
+/**
+ * struct v4l2_ctrl_av1_tile_group_entry - AV1 Tile Group entry
+ *
+ * Represents a single AV1 tile inside an AV1 Tile Group. Note that MiRowStart,
+ * MiRowEnd, MiColStart and MiColEnd can be retrieved from struct
+ * v4l2_av1_tile_info in struct v4l2_ctrl_av1_frame using tile_row and
+ * tile_col. See section 6.10.1 "General tile group OBU semantics" for more
+ * details.
+ *
+ * @tile_offset: offset from the OBU data, i.e. where the coded tile data
+ * actually starts.
+ * @tile_size: specifies the size in bytes of the coded tile. Equivalent to
+ * "TileSize" in the AV1 Specification.
+ * @tile_row: specifies the row of the current tile. Equivalent to "TileRow" in
+ * the AV1 Specification.
+ * @tile_col: specifies the col of the current tile. Equivalent to "TileCol" in
+ * the AV1 Specification.
+ */
+struct v4l2_ctrl_av1_tile_group_entry {
+ __u32 tile_offset;
+ __u32 tile_size;
+ __u32 tile_row;
+ __u32 tile_col;
+};
+
+/**
+ * enum v4l2_av1_warp_model - AV1 Warp Model as described in section 3
+ * "Symbols and abbreviated terms" of the AV1 Specification.
+ *
+ * @V4L2_AV1_WARP_MODEL_IDENTITY: Warp model is just an identity transform.
+ * @V4L2_AV1_WARP_MODEL_TRANSLATION: Warp model is a pure translation.
+ * @V4L2_AV1_WARP_MODEL_ROTZOOM: Warp model is a rotation + symmetric zoom +
+ * translation.
+ * @V4L2_AV1_WARP_MODEL_AFFINE: Warp model is a general affine transform.
+ */
+enum v4l2_av1_warp_model {
+ V4L2_AV1_WARP_MODEL_IDENTITY = 0,
+ V4L2_AV1_WARP_MODEL_TRANSLATION = 1,
+ V4L2_AV1_WARP_MODEL_ROTZOOM = 2,
+ V4L2_AV1_WARP_MODEL_AFFINE = 3,
+};
+
+/**
+ * enum v4l2_av1_reference_frame - AV1 reference frames
+ *
+ * @V4L2_AV1_REF_INTRA_FRAME: Intra Frame Reference
+ * @V4L2_AV1_REF_LAST_FRAME: Last Reference Frame
+ * @V4L2_AV1_REF_LAST2_FRAME: Last2 Reference Frame
+ * @V4L2_AV1_REF_LAST3_FRAME: Last3 Reference Frame
+ * @V4L2_AV1_REF_GOLDEN_FRAME: Golden Reference Frame
+ * @V4L2_AV1_REF_BWDREF_FRAME: BWD Reference Frame
+ * @V4L2_AV1_REF_ALTREF2_FRAME: Alternative2 Reference Frame
+ * @V4L2_AV1_REF_ALTREF_FRAME: Alternative Reference Frame
+ */
+enum v4l2_av1_reference_frame {
+ V4L2_AV1_REF_INTRA_FRAME = 0,
+ V4L2_AV1_REF_LAST_FRAME = 1,
+ V4L2_AV1_REF_LAST2_FRAME = 2,
+ V4L2_AV1_REF_LAST3_FRAME = 3,
+ V4L2_AV1_REF_GOLDEN_FRAME = 4,
+ V4L2_AV1_REF_BWDREF_FRAME = 5,
+ V4L2_AV1_REF_ALTREF2_FRAME = 6,
+ V4L2_AV1_REF_ALTREF_FRAME = 7,
+};
+
+#define V4L2_AV1_GLOBAL_MOTION_IS_INVALID(ref) (1 << (ref))
+
+#define V4L2_AV1_GLOBAL_MOTION_FLAG_IS_GLOBAL 0x1
+#define V4L2_AV1_GLOBAL_MOTION_FLAG_IS_ROT_ZOOM 0x2
+#define V4L2_AV1_GLOBAL_MOTION_FLAG_IS_TRANSLATION 0x4
+/**
+ * struct v4l2_av1_global_motion - AV1 Global Motion parameters as described in
+ * section 6.8.17 "Global motion params semantics" of the AV1 specification.
+ *
+ * @flags: A bitfield containing the flags per reference frame. See
+ * V4L2_AV1_GLOBAL_MOTION_FLAG_{}
+ * @type: The type of global motion transform used.
+ * @params: this field has the same meaning as "gm_params" in the AV1
+ * specification.
+ * @invalid: bitfield indicating whether the global motion params are invalid
+ * for a given reference frame. See section 7.11.3.6 Setup shear process and
+ * the variable "warpValid". Use V4L2_AV1_GLOBAL_MOTION_IS_INVALID(ref) to
+ * create a suitable mask.
+ * @reserved: padding field. Should be zeroed by applications.
+ */
+
+struct v4l2_av1_global_motion {
+ __u8 flags[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ enum v4l2_av1_warp_model type[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ __s32 params[V4L2_AV1_TOTAL_REFS_PER_FRAME][6];
+ __u8 invalid;
+ __u8 reserved[3];
+};
+
+/**
+ * enum v4l2_av1_frame_restoration_type - AV1 Frame Restoration Type
+ * @V4L2_AV1_FRAME_RESTORE_NONE: no filtering is applied.
+ * @V4L2_AV1_FRAME_RESTORE_WIENER: Wiener filter process is invoked.
+ * @V4L2_AV1_FRAME_RESTORE_SGRPROJ: self guided filter process is invoked.
+ * @V4L2_AV1_FRAME_RESTORE_SWITCHABLE: restoration filter is swichtable.
+ */
+enum v4l2_av1_frame_restoration_type {
+ V4L2_AV1_FRAME_RESTORE_NONE = 0,
+ V4L2_AV1_FRAME_RESTORE_WIENER = 1,
+ V4L2_AV1_FRAME_RESTORE_SGRPROJ = 2,
+ V4L2_AV1_FRAME_RESTORE_SWITCHABLE = 3,
+};
+
+#define V4L2_AV1_LOOP_RESTORATION_FLAG_USES_LR 0x1
+#define V4L2_AV1_LOOP_RESTORATION_FLAG_USES_CHROMA_LR 0x2
+
+/**
+ * struct v4l2_av1_loop_restoration - AV1 Loop Restauration as described in
+ * section 6.10.15 "Loop restoration params semantics" of the AV1 specification.
+ *
+ * @flags: See V4L2_AV1_LOOP_RESTORATION_FLAG_{}.
+ * @lr_unit_shift: specifies if the luma restoration size should be halved.
+ * @lr_uv_shift: specifies if the chroma size should be half the luma size.
+ * @reserved: padding field. Should be zeroed by applications.
+ * @frame_restoration_type: specifies the type of restoration used for each
+ * plane. See enum v4l2_av1_frame_restoration_type.
+ * @loop_restoration_size: specifies the size of loop restoration units in units
+ * of samples in the current plane.
+ */
+struct v4l2_av1_loop_restoration {
+ __u8 flags;
+ __u8 lr_unit_shift;
+ __u8 lr_uv_shift;
+ __u8 reserved;
+ enum v4l2_av1_frame_restoration_type frame_restoration_type[V4L2_AV1_NUM_PLANES_MAX];
+ __u32 loop_restoration_size[V4L2_AV1_MAX_NUM_PLANES];
+};
+
+/**
+ * struct v4l2_av1_cdef - AV1 CDEF params semantics as described in section
+ * 6.10.14 "CDEF params semantics" of the AV1 specification
+ *
+ * @damping_minus_3: controls the amount of damping in the deringing filter.
+ * @bits: specifies the number of bits needed to specify which CDEF filter to
+ * apply.
+ * @y_pri_strength: specifies the strength of the primary filter.
+ * @y_sec_strength: specifies the strength of the secondary filter.
+ * @uv_pri_strength: specifies the strength of the primary filter.
+ * @uv_sec_strength: specifies the strength of the secondary filter.
+ */
+struct v4l2_av1_cdef {
+ __u8 damping_minus_3;
+ __u8 bits;
+ __u8 y_pri_strength[V4L2_AV1_CDEF_MAX];
+ __u8 y_sec_strength[V4L2_AV1_CDEF_MAX];
+ __u8 uv_pri_strength[V4L2_AV1_CDEF_MAX];
+ __u8 uv_sec_strength[V4L2_AV1_CDEF_MAX];
+};
+
+#define V4L2_AV1_SEGMENTATION_FLAG_ENABLED 0x1
+#define V4L2_AV1_SEGMENTATION_FLAG_UPDATE_MAP 0x2
+#define V4L2_AV1_SEGMENTATION_FLAG_TEMPORAL_UPDATE 0x4
+#define V4L2_AV1_SEGMENTATION_FLAG_UPDATE_DATA 0x8
+#define V4L2_AV1_SEGMENTATION_FLAG_SEG_ID_PRE_SKIP 0x10
+
+/**
+ * enum v4l2_av1_segment_feature - AV1 segment features as described in section
+ * 3 "Symbols and abbreviated terms" of the AV1 specification.
+ *
+ * @V4L2_AV1_SEG_LVL_ALT_Q: Index for quantizer segment feature.
+ * @V4L2_AV1_SEG_LVL_ALT_LF_Y_V: Index for vertical luma loop filter segment
+ * feature.
+ * @V4L2_AV1_SEG_LVL_REF_FRAME: Index for reference frame segment feature.
+ * @V4L2_AV1_SEG_LVL_REF_SKIP: Index for skip segment feature.
+ * @V4L2_AV1_SEG_LVL_REF_GLOBALMV: Index for global mv feature.
+ * @V4L2_AV1_SEG_LVL_MAX: Number of segment features.
+ */
+enum v4l2_av1_segment_feature {
+ V4L2_AV1_SEG_LVL_ALT_Q = 0,
+ V4L2_AV1_SEG_LVL_ALT_LF_Y_V = 1,
+ V4L2_AV1_SEG_LVL_REF_FRAME = 5,
+ V4L2_AV1_SEG_LVL_REF_SKIP = 6,
+ V4L2_AV1_SEG_LVL_REF_GLOBALMV = 7,
+ V4L2_AV1_SEG_LVL_MAX = 8
+};
+
+#define V4L2_AV1_SEGMENT_FEATURE_ENABLED(id) (1 << (id))
+
+/**
+ * struct v4l2_av1_segmentation - AV1 Segmentation params as defined in section
+ * 6.8.13 "Segmentation params semantics" of the AV1 specification.
+ *
+ * @flags: see V4L2_AV1_SEGMENTATION_FLAG_{}.
+ * @last_active_seg_id: indicates the highest numbered segment id that has some
+ * enabled feature. This is used when decoding the segment id to only decode
+ * choices corresponding to used segments.
+ * @feature_enabled: bitmask defining which features are enabled in each
+ * segment. Use V4L2_AV1_SEGMENT_FEATURE_ENABLED to build a suitable mask.
+ * @feature_data: data attached to each feature. Data entry is only valid if the
+ * feature is enabled
+ */
+struct v4l2_av1_segmentation {
+ __u8 flags;
+ __u8 last_active_seg_id;
+ __u8 feature_enabled[V4L2_AV1_MAX_SEGMENTS];
+ __s16 feature_data[V4L2_AV1_MAX_SEGMENTS][V4L2_AV1_SEG_LVL_MAX];
+};
+
+#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_ENABLED 0x1
+#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_UPDATE 0x2
+#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_PRESENT 0x4
+#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_MULTI 0x8
+
+/**
+ * struct v4l2_av1_loop_filter - AV1 Loop filter params as defined in section
+ * 6.8.10 "Loop filter semantics" and 6.8.16 "Loop filter delta parameters
+ * semantics" of the AV1 specification.
+ *
+ * @flags: see V4L2_AV1_LOOP_FILTER_FLAG_{}
+ * @level: an array containing loop filter strength values. Different loop
+ * filter strength values from the array are used depending on the image plane
+ * being filtered, and the edge direction (vertical or horizontal) being
+ * filtered.
+ * @sharpness: indicates the sharpness level. The loop_filter_level and
+ * loop_filter_sharpness together determine when a block edge is filtered, and
+ * by how much the filtering can change the sample values. The loop filter
+ * process is described in section 7.14 of the AV1 specification.
+ * @ref_deltas: contains the adjustment needed for the filter level based on the
+ * chosen reference frame. If this syntax element is not present, it maintains
+ * its previous value.
+ * @mode_deltas: contains the adjustment needed for the filter level based on
+ * the chosen mode. If this syntax element is not present, it maintains its
+ * previous value.
+ * @delta_lf_res: specifies the left shift which should be applied to decoded
+ * loop filter delta values.
+ */
+struct v4l2_av1_loop_filter {
+ __u8 flags;
+ __u8 level[4];
+ __u8 sharpness;
+ __s8 ref_deltas[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ __s8 mode_deltas[2];
+ __u8 delta_lf_res;
+};
+
+#define V4L2_AV1_QUANTIZATION_FLAG_DIFF_UV_DELTA 0x1
+#define V4L2_AV1_QUANTIZATION_FLAG_USING_QMATRIX 0x2
+#define V4L2_AV1_QUANTIZATION_FLAG_DELTA_Q_PRESENT 0x4
+
+/**
+ * struct v4l2_av1_quantization - AV1 Quantization params as defined in section
+ * 6.8.11 "Quantization params semantics" of the AV1 specification.
+ *
+ * @flags: see V4L2_AV1_QUANTIZATION_FLAG_{}
+ * @base_q_idx: indicates the base frame qindex. This is used for Y AC
+ * coefficients and as the base value for the other quantizers.
+ * @delta_q_y_dc: indicates the Y DC quantizer relative to base_q_idx.
+ * @delta_q_u_dc: indicates the U DC quantizer relative to base_q_idx.
+ * @delta_q_u_ac: indicates the U AC quantizer relative to base_q_idx.
+ * @delta_q_v_dc: indicates the V DC quantizer relative to base_q_idx.
+ * @delta_q_v_ac: indicates the V AC quantizer relative to base_q_idx.
+ * @qm_y: specifies the level in the quantizer matrix that should be used for
+ * luma plane decoding.
+ * @qm_u: specifies the level in the quantizer matrix that should be used for
+ * chroma U plane decoding.
+ * @qm_v: specifies the level in the quantizer matrix that should be used for
+ * chroma V plane decoding.
+ * @delta_q_res: specifies the left shift which should be applied to decoded
+ * quantizer index delta values.
+ */
+struct v4l2_av1_quantization {
+ __u8 flags;
+ __u8 base_q_idx;
+ __s8 delta_q_y_dc;
+ __s8 delta_q_u_dc;
+ __s8 delta_q_u_ac;
+ __s8 delta_q_v_dc;
+ __s8 delta_q_v_ac;
+ __u8 qm_y;
+ __u8 qm_u;
+ __u8 qm_v;
+ __u8 delta_q_res;
+};
+
+#define V4L2_AV1_TILE_INFO_FLAG_UNIFORM_TILE_SPACING 0x1
+
+/**
+ * struct v4l2_av1_tile_info - AV1 Tile info as defined in section 6.8.14 "Tile
+ * info semantics" of the AV1 specification.
+ *
+ * @flags: see V4L2_AV1_TILE_INFO_FLAG_{}
+ * @context_update_tile_id: specifies which tile to use for the CDF update.
+ * @tile_rows: specifies the number of tiles down the frame.
+ * @tile_cols: specifies the number of tiles across the frame.
+ * @mi_col_starts: an array specifying the start column (in units of 4x4 luma
+ * samples) for each tile across the image.
+ * @mi_row_starts: an array specifying the start row (in units of 4x4 luma
+ * samples) for each tile down the image.
+ * @width_in_sbs_minus_1: specifies the width of a tile minus 1 in units of
+ * superblocks.
+ * @height_in_sbs_minus_1: specifies the height of a tile minus 1 in units of
+ * superblocks.
+ * @tile_size_bytes: specifies the number of bytes needed to code each tile
+ * size.
+ * @reserved: padding field. Should be zeroed by applications.
+ */
+struct v4l2_av1_tile_info {
+ __u8 flags;
+ __u8 context_update_tile_id;
+ __u8 tile_cols;
+ __u8 tile_rows;
+ __u32 mi_col_starts[V4L2_AV1_MAX_TILE_COLS + 1];
+ __u32 mi_row_starts[V4L2_AV1_MAX_TILE_ROWS + 1];
+ __u32 width_in_sbs_minus_1[V4L2_AV1_MAX_TILE_COLS];
+ __u32 height_in_sbs_minus_1[V4L2_AV1_MAX_TILE_ROWS];
+ __u8 tile_size_bytes;
+ __u8 reserved[3];
+};
+
+/**
+ * enum v4l2_av1_frame_type - AV1 Frame Type
+ *
+ * @V4L2_AV1_KEY_FRAME: Key frame
+ * @V4L2_AV1_INTER_FRAME: Inter frame
+ * @V4L2_AV1_INTRA_ONLY_FRAME: Intra-only frame
+ * @V4L2_AV1_SWITCH_FRAME: Switch frame
+ */
+enum v4l2_av1_frame_type {
+ V4L2_AV1_KEY_FRAME = 0,
+ V4L2_AV1_INTER_FRAME = 1,
+ V4L2_AV1_INTRA_ONLY_FRAME = 2,
+ V4L2_AV1_SWITCH_FRAME = 3
+};
+
+/**
+ * enum v4l2_av1_interpolation_filter - AV1 interpolation filter types
+ *
+ * @V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP: eight tap filter
+ * @V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SMOOTH: eight tap smooth filter
+ * @V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SHARP: eight tap sharp filter
+ * @V4L2_AV1_INTERPOLATION_FILTER_BILINEAR: bilinear filter
+ * @V4L2_AV1_INTERPOLATION_FILTER_SWITCHABLE: filter selection is signaled at
+ * the block level
+ *
+ * See section 6.8.9 "Interpolation filter semantics" of the AV1 specification
+ * for more details.
+ */
+enum v4l2_av1_interpolation_filter {
+ V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP = 0,
+ V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SMOOTH = 1,
+ V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SHARP = 2,
+ V4L2_AV1_INTERPOLATION_FILTER_BILINEAR = 3,
+ V4L2_AV1_INTERPOLATION_FILTER_SWITCHABLE = 4,
+};
+
+/**
+ * enum v4l2_av1_tx_mode - AV1 Tx mode as described in section 6.8.21 "TX mode
+ * semantics" of the AV1 specification.
+ * @V4L2_AV1_TX_MODE_ONLY_4X4: the inverse transform will use only 4x4
+ * transforms
+ * @V4L2_AV1_TX_MODE_LARGEST: the inverse transform will use the largest
+ * transform size that fits inside the block
+ * @V4L2_AV1_TX_MODE_SELECT: the choice of transform size is specified
+ * explicitly for each block.
+ */
+enum v4l2_av1_tx_mode {
+ V4L2_AV1_TX_MODE_ONLY_4X4 = 0,
+ V4L2_AV1_TX_MODE_LARGEST = 1,
+ V4L2_AV1_TX_MODE_SELECT = 2
+};
+
+#define V4L2_AV1_FRAME_FLAG_SHOW_FRAME 0x00000001
+#define V4L2_AV1_FRAME_FLAG_SHOWABLE_FRAME 0x00000002
+#define V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE 0x00000004
+#define V4L2_AV1_FRAME_FLAG_DISABLE_CDF_UPDATE 0x00000008
+#define V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS 0x00000010
+#define V4L2_AV1_FRAME_FLAG_FORCE_INTEGER_MV 0x00000020
+#define V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC 0x00000040
+#define V4L2_AV1_FRAME_FLAG_USE_SUPERRES 0x00000080
+#define V4L2_AV1_FRAME_FLAG_ALLOW_HIGH_PRECISION_MV 0x00000100
+#define V4L2_AV1_FRAME_FLAG_IS_MOTION_MODE_SWITCHABLE 0x00000200
+#define V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS 0x00000400
+#define V4L2_AV1_FRAME_FLAG_DISABLE_FRAME_END_UPDATE_CDF 0x00000800
+#define V4L2_AV1_FRAME_FLAG_ALLOW_WARPED_MOTION 0x00001000
+#define V4L2_AV1_FRAME_FLAG_REFERENCE_SELECT 0x00002000
+#define V4L2_AV1_FRAME_FLAG_REDUCED_TX_SET 0x00004000
+#define V4L2_AV1_FRAME_FLAG_SKIP_MODE_ALLOWED 0x00008000
+#define V4L2_AV1_FRAME_FLAG_SKIP_MODE_PRESENT 0x00010000
+#define V4L2_AV1_FRAME_FLAG_FRAME_SIZE_OVERRIDE 0x00020000
+#define V4L2_AV1_FRAME_FLAG_BUFFER_REMOVAL_TIME_PRESENT 0x00040000
+#define V4L2_AV1_FRAME_FLAG_FRAME_REFS_SHORT_SIGNALING 0x00080000
+
+#define V4L2_CID_STATELESS_AV1_FRAME (V4L2_CID_CODEC_STATELESS_BASE + 502)
+/**
+ * struct v4l2_ctrl_av1_frame - Represents an AV1 Frame Header OBU.
+ *
+ * @tile_info: tile info
+ * @quantization: quantization params
+ * @segmentation: segmentation params
+ * @superres_denom: the denominator for the upscaling ratio.
+ * @loop_filter: loop filter params
+ * @cdef: cdef params
+ * @skip_mode_frame: specifies the frames to use for compound prediction when
+ * skip_mode is equal to 1.
+ * @primary_ref_frame: specifies which reference frame contains the CDF values
+ * and other state that should be loaded at the start of the frame.
+ * @loop_restoration: loop restoration params
+ * @global_motion: global motion params
+ * @flags: see V4L2_AV1_FRAME_FLAG_{}
+ * @frame_type: specifies the AV1 frame type
+ * @order_hint: specifies OrderHintBits least significant bits of the expected
+ * output order for this frame.
+ * @upscaled_width: the upscaled width.
+ * @interpolation_filter: specifies the filter selection used for performing
+ * inter prediction.
+ * @tx_mode: specifies how the transform size is determined.
+ * @frame_width_minus_1: add 1 to get the frame's width.
+ * @frame_height_minus_1: add 1 to get the frame's height
+ * @render_width_minus_1: add 1 to get the render width of the frame in luma
+ * samples.
+ * @render_height_minus_1: add 1 to get the render height of the frame in luma
+ * samples.
+ * @current_frame_id: specifies the frame id number for the current frame. Frame
+ * id numbers are additional information that do not affect the decoding
+ * process, but provide decoders with a way of detecting missing reference
+ * frames so that appropriate action can be taken.
+ * @buffer_removal_time: specifies the frame removal time in units of DecCT clock
+ * ticks counted from the removal time of the last random access point for
+ * operating point opNum.
+ * @reserved: padding field. Should be zeroed by applications.
+ * @order_hints: specifies the expected output order hint for each reference
+ * frame. This field corresponds to the OrderHints variable from the
+ * specification (section 5.9.2 "Uncompressed header syntax"). As such, this is
+ * only used for non-intra frames and ignored otherwise. order_hints[0] is
+ * always ignored.
+ * @reference_frame_ts: the V4L2 timestamp of the reference frame slots.
+ * @ref_frame_idx: used to index into @reference_frame_ts when decoding
+ * inter-frames. The meaning of this array is the same as in the specification.
+ * The timestamp refers to the timestamp field in struct v4l2_buffer. Use
+ * v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
+ * @refresh_frame_flags: contains a bitmask that specifies which reference frame
+ * slots will be updated with the current frame after it is decoded.
+ */
+struct v4l2_ctrl_av1_frame {
+ struct v4l2_av1_tile_info tile_info;
+ struct v4l2_av1_quantization quantization;
+ __u8 superres_denom;
+ struct v4l2_av1_segmentation segmentation;
+ struct v4l2_av1_loop_filter loop_filter;
+ struct v4l2_av1_cdef cdef;
+ __u8 skip_mode_frame[2];
+ __u8 primary_ref_frame;
+ struct v4l2_av1_loop_restoration loop_restoration;
+ struct v4l2_av1_global_motion global_motion;
+ __u32 flags;
+ enum v4l2_av1_frame_type frame_type;
+ __u32 order_hint;
+ __u32 upscaled_width;
+ enum v4l2_av1_interpolation_filter interpolation_filter;
+ enum v4l2_av1_tx_mode tx_mode;
+ __u32 frame_width_minus_1;
+ __u32 frame_height_minus_1;
+ __u16 render_width_minus_1;
+ __u16 render_height_minus_1;
+
+ __u32 current_frame_id;
+ __u32 buffer_removal_time[V4L2_AV1_MAX_OPERATING_POINTS];
+ __u8 reserved[4];
+ __u32 order_hints[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ __u64 reference_frame_ts[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ __s8 ref_frame_idx[V4L2_AV1_REFS_PER_FRAME];
+ __u8 refresh_frame_flags;
+};
+
+#define V4L2_AV1_FILM_GRAIN_FLAG_APPLY_GRAIN 0x1
+#define V4L2_AV1_FILM_GRAIN_FLAG_UPDATE_GRAIN 0x2
+#define V4L2_AV1_FILM_GRAIN_FLAG_CHROMA_SCALING_FROM_LUMA 0x4
+#define V4L2_AV1_FILM_GRAIN_FLAG_OVERLAP 0x8
+#define V4L2_AV1_FILM_GRAIN_FLAG_CLIP_TO_RESTRICTED_RANGE 0x10
+
+#define V4L2_CID_STATELESS_AV1_FILM_GRAIN (V4L2_CID_CODEC_STATELESS_BASE + 505)
+/**
+ * struct v4l2_ctrl_av1_film_grain - AV1 Film Grain parameters.
+ *
+ * Film grain parameters as specified by section 6.8.20 of the AV1 Specification.
+ *
+ * @flags: see V4L2_AV1_FILM_GRAIN_{}.
+ * @cr_mult: represents a multiplier for the cr component used in derivation of
+ * the input index to the cr component scaling function.
+ * @grain_seed: specifies the starting value for the pseudo-random numbers used
+ * during film grain synthesis.
+ * @film_grain_params_ref_idx: indicates which reference frame contains the
+ * film grain parameters to be used for this frame.
+ * @num_y_points: specifies the number of points for the piece-wise linear
+ * scaling function of the luma component.
+ * @point_y_value: represents the x (luma value) coordinate for the i-th point
+ * of the piecewise linear scaling function for luma component. The values are
+ * signaled on the scale of 0..255. In case of 10 bit video, these values
+ * correspond to luma values divided by 4. In case of 12 bit video, these values
+ * correspond to luma values divided by 16.
+ * @point_y_scaling: represents the scaling (output) value for the i-th point
+ * of the piecewise linear scaling function for luma component.
+ * @num_cb_points: specifies the number of points for the piece-wise linear
+ * scaling function of the cb component.
+ * @point_cb_value: represents the x coordinate for the i-th point of the
+ * piece-wise linear scaling function for cb component. The values are signaled
+ * on the scale of 0..255.
+ * @point_cb_scaling: represents the scaling (output) value for the i-th point
+ * of the piecewise linear scaling function for cb component.
+ * @num_cr_points: specifies represents the number of points for the piece-wise
+ * linear scaling function of the cr component.
+ * @point_cr_value: represents the x coordinate for the i-th point of the
+ * piece-wise linear scaling function for cr component. The values are signaled
+ * on the scale of 0..255.
+ * @point_cr_scaling: represents the scaling (output) value for the i-th point
+ * of the piecewise linear scaling function for cr component.
+ * @grain_scaling_minus_8: represents the shift – 8 applied to the values of the
+ * chroma component. The grain_scaling_minus_8 can take values of 0..3 and
+ * determines the range and quantization step of the standard deviation of film
+ * grain.
+ * @ar_coeff_lag: specifies the number of auto-regressive coefficients for luma
+ * and chroma.
+ * @ar_coeffs_y_plus_128: specifies auto-regressive coefficients used for the Y
+ * plane.
+ * @ar_coeffs_cb_plus_128: specifies auto-regressive coefficients used for the U
+ * plane.
+ * @ar_coeffs_cr_plus_128: specifies auto-regressive coefficients used for the V
+ * plane.
+ * @ar_coeff_shift_minus_6: specifies the range of the auto-regressive
+ * coefficients. Values of 0, 1, 2, and 3 correspond to the ranges for
+ * auto-regressive coefficients of [-2, 2), [-1, 1), [-0.5, 0.5) and [-0.25,
+ * 0.25) respectively.
+ * @grain_scale_shift: specifies how much the Gaussian random numbers should be
+ * scaled down during the grain synthesis process.
+ * @cb_mult: represents a multiplier for the cb component used in derivation of
+ * the input index to the cb component scaling function.
+ * @cb_luma_mult: represents a multiplier for the average luma component used in
+ * derivation of the input index to the cb component scaling function.
+ * @cr_luma_mult: represents a multiplier for the average luma component used in
+ * derivation of the input index to the cr component scaling function.
+ * @cb_offset: represents an offset used in derivation of the input index to the
+ * cb component scaling function.
+ * @cr_offset: represents an offset used in derivation of the input index to the
+ * cr component scaling function.
+ * @reserved: padding field. Should be zeroed by applications.
+ */
+struct v4l2_ctrl_av1_film_grain {
+ __u8 flags;
+ __u8 cr_mult;
+ __u16 grain_seed;
+ __u8 film_grain_params_ref_idx;
+ __u8 num_y_points;
+ __u8 point_y_value[V4L2_AV1_MAX_NUM_Y_POINTS];
+ __u8 point_y_scaling[V4L2_AV1_MAX_NUM_Y_POINTS];
+ __u8 num_cb_points;
+ __u8 point_cb_value[V4L2_AV1_MAX_NUM_CB_POINTS];
+ __u8 point_cb_scaling[V4L2_AV1_MAX_NUM_CB_POINTS];
+ __u8 num_cr_points;
+ __u8 point_cr_value[V4L2_AV1_MAX_NUM_CR_POINTS];
+ __u8 point_cr_scaling[V4L2_AV1_MAX_NUM_CR_POINTS];
+ __u8 grain_scaling_minus_8;
+ __u8 ar_coeff_lag;
+ __u8 ar_coeffs_y_plus_128[V4L2_AV1_AR_COEFFS_SIZE];
+ __u8 ar_coeffs_cb_plus_128[V4L2_AV1_AR_COEFFS_SIZE];
+ __u8 ar_coeffs_cr_plus_128[V4L2_AV1_AR_COEFFS_SIZE];
+ __u8 ar_coeff_shift_minus_6;
+ __u8 grain_scale_shift;
+ __u8 cb_mult;
+ __u8 cb_luma_mult;
+ __u8 cr_luma_mult;
+ __u16 cb_offset;
+ __u16 cr_offset;
+ __u8 reserved[4];
+};
+
/* MPEG-compression definitions kept for backwards compatibility */
#define V4L2_CTRL_CLASS_MPEG V4L2_CTRL_CLASS_CODEC
#define V4L2_CID_MPEG_CLASS V4L2_CID_CODEC_CLASS
diff --git a/include/linux/v4l2-mediabus.h b/include/linux/v4l2-mediabus.h
index 846dadfb..2c318de1 100644
--- a/include/linux/v4l2-mediabus.h
+++ b/include/linux/v4l2-mediabus.h
@@ -3,10 +3,6 @@
* Media Bus API header
*
* Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_V4L2_MEDIABUS_H
diff --git a/include/linux/v4l2-subdev.h b/include/linux/v4l2-subdev.h
index 658106f5..b383c2fe 100644
--- a/include/linux/v4l2-subdev.h
+++ b/include/linux/v4l2-subdev.h
@@ -6,24 +6,12 @@
*
* Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
* Sakari Ailus <sakari.ailus@iki.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_V4L2_SUBDEV_H
#define __LINUX_V4L2_SUBDEV_H
+#include <linux/const.h>
#include <linux/ioctl.h>
#include <linux/types.h>
#include <linux/v4l2-common.h>
@@ -44,13 +32,15 @@ enum v4l2_subdev_format_whence {
* @which: format type (from enum v4l2_subdev_format_whence)
* @pad: pad number, as reported by the media API
* @format: media bus format (format code and frame size)
+ * @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_format {
__u32 which;
__u32 pad;
struct v4l2_mbus_framefmt format;
- __u32 reserved[8];
+ __u32 stream;
+ __u32 reserved[7];
};
/**
@@ -58,13 +48,15 @@ struct v4l2_subdev_format {
* @which: format type (from enum v4l2_subdev_format_whence)
* @pad: pad number, as reported by the media API
* @rect: pad crop rectangle boundaries
+ * @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_crop {
__u32 which;
__u32 pad;
struct v4l2_rect rect;
- __u32 reserved[8];
+ __u32 stream;
+ __u32 reserved[7];
};
#define V4L2_SUBDEV_MBUS_CODE_CSC_COLORSPACE 0x00000001
@@ -80,6 +72,7 @@ struct v4l2_subdev_crop {
* @code: format code (MEDIA_BUS_FMT_ definitions)
* @which: format type (from enum v4l2_subdev_format_whence)
* @flags: flags set by the driver, (V4L2_SUBDEV_MBUS_CODE_*)
+ * @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_mbus_code_enum {
@@ -88,7 +81,8 @@ struct v4l2_subdev_mbus_code_enum {
__u32 code;
__u32 which;
__u32 flags;
- __u32 reserved[7];
+ __u32 stream;
+ __u32 reserved[6];
};
/**
@@ -101,6 +95,7 @@ struct v4l2_subdev_mbus_code_enum {
* @min_height: minimum frame height, in pixels
* @max_height: maximum frame height, in pixels
* @which: format type (from enum v4l2_subdev_format_whence)
+ * @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_frame_size_enum {
@@ -112,19 +107,22 @@ struct v4l2_subdev_frame_size_enum {
__u32 min_height;
__u32 max_height;
__u32 which;
- __u32 reserved[8];
+ __u32 stream;
+ __u32 reserved[7];
};
/**
* struct v4l2_subdev_frame_interval - Pad-level frame rate
* @pad: pad number, as reported by the media API
* @interval: frame interval in seconds
+ * @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_frame_interval {
__u32 pad;
struct v4l2_fract interval;
- __u32 reserved[9];
+ __u32 stream;
+ __u32 reserved[8];
};
/**
@@ -136,6 +134,7 @@ struct v4l2_subdev_frame_interval {
* @height: frame height in pixels
* @interval: frame interval in seconds
* @which: format type (from enum v4l2_subdev_format_whence)
+ * @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_frame_interval_enum {
@@ -146,7 +145,8 @@ struct v4l2_subdev_frame_interval_enum {
__u32 height;
struct v4l2_fract interval;
__u32 which;
- __u32 reserved[8];
+ __u32 stream;
+ __u32 reserved[7];
};
/**
@@ -158,6 +158,7 @@ struct v4l2_subdev_frame_interval_enum {
* defined in v4l2-common.h; V4L2_SEL_TGT_* .
* @flags: constraint flags, defined in v4l2-common.h; V4L2_SEL_FLAG_*.
* @r: coordinates of the selection window
+ * @stream: stream number, defined in subdev routing
* @reserved: for future use, set to zero for now
*
* Hardware may use multiple helper windows to process a video stream.
@@ -170,7 +171,8 @@ struct v4l2_subdev_selection {
__u32 target;
__u32 flags;
struct v4l2_rect r;
- __u32 reserved[8];
+ __u32 stream;
+ __u32 reserved[7];
};
/**
@@ -188,6 +190,67 @@ struct v4l2_subdev_capability {
/* The v4l2 sub-device video device node is registered in read-only mode. */
#define V4L2_SUBDEV_CAP_RO_SUBDEV 0x00000001
+/* The v4l2 sub-device supports routing and multiplexed streams. */
+#define V4L2_SUBDEV_CAP_STREAMS 0x00000002
+
+/*
+ * Is the route active? An active route will start when streaming is enabled
+ * on a video node.
+ */
+#define V4L2_SUBDEV_ROUTE_FL_ACTIVE (1U << 0)
+
+/**
+ * struct v4l2_subdev_route - A route inside a subdev
+ *
+ * @sink_pad: the sink pad index
+ * @sink_stream: the sink stream identifier
+ * @source_pad: the source pad index
+ * @source_stream: the source stream identifier
+ * @flags: route flags V4L2_SUBDEV_ROUTE_FL_*
+ * @reserved: drivers and applications must zero this array
+ */
+struct v4l2_subdev_route {
+ __u32 sink_pad;
+ __u32 sink_stream;
+ __u32 source_pad;
+ __u32 source_stream;
+ __u32 flags;
+ __u32 reserved[5];
+};
+
+/**
+ * struct v4l2_subdev_routing - Subdev routing information
+ *
+ * @which: configuration type (from enum v4l2_subdev_format_whence)
+ * @num_routes: the total number of routes in the routes array
+ * @routes: pointer to the routes array
+ * @reserved: drivers and applications must zero this array
+ */
+struct v4l2_subdev_routing {
+ __u32 which;
+ __u32 num_routes;
+ __u64 routes;
+ __u32 reserved[6];
+};
+
+/*
+ * The client is aware of streams. Setting this flag enables the use of 'stream'
+ * fields (referring to the stream number) with various ioctls. If this is not
+ * set (which is the default), the 'stream' fields will be forced to 0 by the
+ * kernel.
+ */
+ #define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1ULL << 0)
+
+/**
+ * struct v4l2_subdev_client_capability - Capabilities of the client accessing
+ * the subdev
+ *
+ * @capabilities: A bitmask of V4L2_SUBDEV_CLIENT_CAP_* flags.
+ */
+struct v4l2_subdev_client_capability {
+ __u64 capabilities;
+};
+
/* Backwards compatibility define --- to be removed */
#define v4l2_subdev_edid v4l2_edid
@@ -203,6 +266,11 @@ struct v4l2_subdev_capability {
#define VIDIOC_SUBDEV_S_CROP _IOWR('V', 60, struct v4l2_subdev_crop)
#define VIDIOC_SUBDEV_G_SELECTION _IOWR('V', 61, struct v4l2_subdev_selection)
#define VIDIOC_SUBDEV_S_SELECTION _IOWR('V', 62, struct v4l2_subdev_selection)
+#define VIDIOC_SUBDEV_G_ROUTING _IOWR('V', 38, struct v4l2_subdev_routing)
+#define VIDIOC_SUBDEV_S_ROUTING _IOWR('V', 39, struct v4l2_subdev_routing)
+#define VIDIOC_SUBDEV_G_CLIENT_CAP _IOR('V', 101, struct v4l2_subdev_client_capability)
+#define VIDIOC_SUBDEV_S_CLIENT_CAP _IOWR('V', 102, struct v4l2_subdev_client_capability)
+
/* The following ioctls are identical to the ioctls in videodev2.h */
#define VIDIOC_SUBDEV_G_STD _IOR('V', 23, v4l2_std_id)
#define VIDIOC_SUBDEV_S_STD _IOW('V', 24, v4l2_std_id)
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index dcc0b01d..0b5482a0 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -243,6 +243,7 @@ enum v4l2_colorspace {
/* DCI-P3 colorspace, used by cinema projectors */
V4L2_COLORSPACE_DCI_P3 = 12,
+
};
/*
@@ -474,7 +475,6 @@ struct v4l2_capability {
#define V4L2_CAP_META_CAPTURE 0x00800000 /* Is a metadata capture device */
#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */
-#define V4L2_CAP_ASYNCIO 0x02000000 /* async I/O */
#define V4L2_CAP_STREAMING 0x04000000 /* streaming I/O ioctls */
#define V4L2_CAP_META_OUTPUT 0x08000000 /* Is a metadata output device */
@@ -549,6 +549,15 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_RGBX32 v4l2_fourcc('X', 'B', '2', '4') /* 32 RGBX-8-8-8-8 */
#define V4L2_PIX_FMT_ARGB32 v4l2_fourcc('B', 'A', '2', '4') /* 32 ARGB-8-8-8-8 */
#define V4L2_PIX_FMT_XRGB32 v4l2_fourcc('B', 'X', '2', '4') /* 32 XRGB-8-8-8-8 */
+#define V4L2_PIX_FMT_RGBX1010102 v4l2_fourcc('R', 'X', '3', '0') /* 32 RGBX-10-10-10-2 */
+#define V4L2_PIX_FMT_RGBA1010102 v4l2_fourcc('R', 'A', '3', '0') /* 32 RGBA-10-10-10-2 */
+#define V4L2_PIX_FMT_ARGB2101010 v4l2_fourcc('A', 'R', '3', '0') /* 32 ARGB-2-10-10-10 */
+
+/* RGB formats (6 or 8 bytes per pixel) */
+#define V4L2_PIX_FMT_BGR48_12 v4l2_fourcc('B', '3', '1', '2') /* 48 BGR 12-bit per component */
+#define V4L2_PIX_FMT_BGR48 v4l2_fourcc('B', 'G', 'R', '6') /* 48 BGR 16-bit per component */
+#define V4L2_PIX_FMT_RGB48 v4l2_fourcc('R', 'G', 'B', '6') /* 48 RGB 16-bit per component */
+#define V4L2_PIX_FMT_ABGR64_12 v4l2_fourcc('B', '4', '1', '2') /* 64 BGRA 12-bit per component */
/* Grey formats */
#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */
@@ -556,6 +565,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Y6 v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */
#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
#define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */
+#define V4L2_PIX_FMT_Y012 v4l2_fourcc('Y', '0', '1', '2') /* 12 Greyscale */
#define V4L2_PIX_FMT_Y14 v4l2_fourcc('Y', '1', '4', ' ') /* 14 Greyscale */
#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
#define V4L2_PIX_FMT_Y16_BE v4l2_fourcc_be('Y', '1', '6', ' ') /* 16 Greyscale BE */
@@ -563,6 +573,7 @@ struct v4l2_pix_format {
/* Grey bit-packed formats */
#define V4L2_PIX_FMT_Y10BPACK v4l2_fourcc('Y', '1', '0', 'B') /* 10 Greyscale bit-packed */
#define V4L2_PIX_FMT_Y10P v4l2_fourcc('Y', '1', '0', 'P') /* 10 Greyscale, MIPI RAW10 packed */
+#define V4L2_PIX_FMT_IPU3_Y10 v4l2_fourcc('i', 'p', '3', 'y') /* IPU3 packed 10-bit greyscale */
/* Palette formats */
#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */
@@ -586,7 +597,18 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_XYUV32 v4l2_fourcc('X', 'Y', 'U', 'V') /* 32 XYUV-8-8-8-8 */
#define V4L2_PIX_FMT_VUYA32 v4l2_fourcc('V', 'U', 'Y', 'A') /* 32 VUYA-8-8-8-8 */
#define V4L2_PIX_FMT_VUYX32 v4l2_fourcc('V', 'U', 'Y', 'X') /* 32 VUYX-8-8-8-8 */
+#define V4L2_PIX_FMT_YUVA32 v4l2_fourcc('Y', 'U', 'V', 'A') /* 32 YUVA-8-8-8-8 */
+#define V4L2_PIX_FMT_YUVX32 v4l2_fourcc('Y', 'U', 'V', 'X') /* 32 YUVX-8-8-8-8 */
#define V4L2_PIX_FMT_M420 v4l2_fourcc('M', '4', '2', '0') /* 12 YUV 4:2:0 2 lines y, 1 line uv interleaved */
+#define V4L2_PIX_FMT_YUV48_12 v4l2_fourcc('Y', '3', '1', '2') /* 48 YUV 4:4:4 12-bit per component */
+
+/*
+ * YCbCr packed format. For each Y2xx format, xx bits of valid data occupy the MSBs
+ * of the 16 bit components, and 16-xx bits of zero padding occupy the LSBs.
+ */
+#define V4L2_PIX_FMT_Y210 v4l2_fourcc('Y', '2', '1', '0') /* 32 YUYV 4:2:2 */
+#define V4L2_PIX_FMT_Y212 v4l2_fourcc('Y', '2', '1', '2') /* 32 YUYV 4:2:2 */
+#define V4L2_PIX_FMT_Y216 v4l2_fourcc('Y', '2', '1', '6') /* 32 YUYV 4:2:2 */
/* two planes -- one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
@@ -595,12 +617,15 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */
#define V4L2_PIX_FMT_NV24 v4l2_fourcc('N', 'V', '2', '4') /* 24 Y/CbCr 4:4:4 */
#define V4L2_PIX_FMT_NV42 v4l2_fourcc('N', 'V', '4', '2') /* 24 Y/CrCb 4:4:4 */
+#define V4L2_PIX_FMT_P010 v4l2_fourcc('P', '0', '1', '0') /* 24 Y/CbCr 4:2:0 10-bit per component */
+#define V4L2_PIX_FMT_P012 v4l2_fourcc('P', '0', '1', '2') /* 24 Y/CbCr 4:2:0 12-bit per component */
/* two non contiguous planes - one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12M v4l2_fourcc('N', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 */
#define V4L2_PIX_FMT_NV21M v4l2_fourcc('N', 'M', '2', '1') /* 21 Y/CrCb 4:2:0 */
#define V4L2_PIX_FMT_NV16M v4l2_fourcc('N', 'M', '1', '6') /* 16 Y/CbCr 4:2:2 */
#define V4L2_PIX_FMT_NV61M v4l2_fourcc('N', 'M', '6', '1') /* 16 Y/CrCb 4:2:2 */
+#define V4L2_PIX_FMT_P012M v4l2_fourcc('P', 'M', '1', '2') /* 24 Y/CbCr 4:2:0 12-bit per component */
/* three planes - Y Cb, Cr */
#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */
@@ -622,10 +647,16 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_NV12_4L4 v4l2_fourcc('V', 'T', '1', '2') /* 12 Y/CbCr 4:2:0 4x4 tiles */
#define V4L2_PIX_FMT_NV12_16L16 v4l2_fourcc('H', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 tiles */
#define V4L2_PIX_FMT_NV12_32L32 v4l2_fourcc('S', 'T', '1', '2') /* 12 Y/CbCr 4:2:0 32x32 tiles */
+#define V4L2_PIX_FMT_NV15_4L4 v4l2_fourcc('V', 'T', '1', '5') /* 15 Y/CbCr 4:2:0 10-bit 4x4 tiles */
+#define V4L2_PIX_FMT_P010_4L4 v4l2_fourcc('T', '0', '1', '0') /* 12 Y/CbCr 4:2:0 10-bit 4x4 macroblocks */
+#define V4L2_PIX_FMT_NV12_8L128 v4l2_fourcc('A', 'T', '1', '2') /* Y/CbCr 4:2:0 8x128 tiles */
+#define V4L2_PIX_FMT_NV12_10BE_8L128 v4l2_fourcc_be('A', 'X', '1', '2') /* Y/CbCr 4:2:0 10-bit 8x128 tiles */
/* Tiled YUV formats, non contiguous planes */
#define V4L2_PIX_FMT_NV12MT v4l2_fourcc('T', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 64x32 tiles */
#define V4L2_PIX_FMT_NV12MT_16X16 v4l2_fourcc('V', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 tiles */
+#define V4L2_PIX_FMT_NV12M_8L128 v4l2_fourcc('N', 'A', '1', '2') /* Y/CbCr 4:2:0 8x128 tiles */
+#define V4L2_PIX_FMT_NV12M_10BE_8L128 v4l2_fourcc_be('N', 'T', '1', '2') /* Y/CbCr 4:2:0 10-bit 8x128 tiles */
/* Bayer formats - see http://www.siliconimaging.com/RGB%20Bayer.htm */
#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
@@ -697,10 +728,16 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_VP8 v4l2_fourcc('V', 'P', '8', '0') /* VP8 */
#define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F') /* VP8 parsed frame */
#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') /* VP9 */
+#define V4L2_PIX_FMT_VP9_FRAME v4l2_fourcc('V', 'P', '9', 'F') /* VP9 parsed frame */
#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') /* HEVC aka H.265 */
#define V4L2_PIX_FMT_FWHT v4l2_fourcc('F', 'W', 'H', 'T') /* Fast Walsh Hadamard Transform (vicodec) */
#define V4L2_PIX_FMT_FWHT_STATELESS v4l2_fourcc('S', 'F', 'W', 'H') /* Stateless FWHT (vicodec) */
#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */
+#define V4L2_PIX_FMT_HEVC_SLICE v4l2_fourcc('S', '2', '6', '5') /* HEVC parsed slices */
+#define V4L2_PIX_FMT_AV1_FRAME v4l2_fourcc('A', 'V', '1', 'F') /* AV1 parsed frame */
+#define V4L2_PIX_FMT_SPK v4l2_fourcc('S', 'P', 'K', '0') /* Sorenson Spark */
+#define V4L2_PIX_FMT_RV30 v4l2_fourcc('R', 'V', '3', '0') /* RealVideo 8 */
+#define V4L2_PIX_FMT_RV40 v4l2_fourcc('R', 'V', '4', '0') /* RealVideo 9 & 10 */
/* Vendor-specific formats */
#define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
@@ -734,16 +771,34 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
#define V4L2_PIX_FMT_MM21 v4l2_fourcc('M', 'M', '2', '1') /* Mediatek 8-bit block mode, two non-contiguous planes */
+#define V4L2_PIX_FMT_MT2110T v4l2_fourcc('M', 'T', '2', 'T') /* Mediatek 10-bit block tile mode */
+#define V4L2_PIX_FMT_MT2110R v4l2_fourcc('M', 'T', '2', 'R') /* Mediatek 10-bit block raster mode */
#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */
#define V4L2_PIX_FMT_CNF4 v4l2_fourcc('C', 'N', 'F', '4') /* Intel 4-bit packed depth confidence information */
#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* BTTV 8-bit dithered RGB */
+#define V4L2_PIX_FMT_QC08C v4l2_fourcc('Q', '0', '8', 'C') /* Qualcomm 8-bit compressed */
+#define V4L2_PIX_FMT_QC10C v4l2_fourcc('Q', '1', '0', 'C') /* Qualcomm 10-bit compressed */
+#define V4L2_PIX_FMT_AJPG v4l2_fourcc('A', 'J', 'P', 'G') /* Aspeed JPEG */
+#define V4L2_PIX_FMT_HEXTILE v4l2_fourcc('H', 'X', 'T', 'L') /* Hextile compressed */
-/* 10bit raw bayer packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */
+/* 10bit raw packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */
#define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */
#define V4L2_PIX_FMT_IPU3_SGBRG10 v4l2_fourcc('i', 'p', '3', 'g') /* IPU3 packed 10-bit GBRG bayer */
#define V4L2_PIX_FMT_IPU3_SGRBG10 v4l2_fourcc('i', 'p', '3', 'G') /* IPU3 packed 10-bit GRBG bayer */
#define V4L2_PIX_FMT_IPU3_SRGGB10 v4l2_fourcc('i', 'p', '3', 'r') /* IPU3 packed 10-bit RGGB bayer */
+/* Raspberry Pi PiSP compressed formats. */
+#define V4L2_PIX_FMT_PISP_COMP1_RGGB v4l2_fourcc('P', 'C', '1', 'R') /* PiSP 8-bit mode 1 compressed RGGB bayer */
+#define V4L2_PIX_FMT_PISP_COMP1_GRBG v4l2_fourcc('P', 'C', '1', 'G') /* PiSP 8-bit mode 1 compressed GRBG bayer */
+#define V4L2_PIX_FMT_PISP_COMP1_GBRG v4l2_fourcc('P', 'C', '1', 'g') /* PiSP 8-bit mode 1 compressed GBRG bayer */
+#define V4L2_PIX_FMT_PISP_COMP1_BGGR v4l2_fourcc('P', 'C', '1', 'B') /* PiSP 8-bit mode 1 compressed BGGR bayer */
+#define V4L2_PIX_FMT_PISP_COMP1_MONO v4l2_fourcc('P', 'C', '1', 'M') /* PiSP 8-bit mode 1 compressed monochrome */
+#define V4L2_PIX_FMT_PISP_COMP2_RGGB v4l2_fourcc('P', 'C', '2', 'R') /* PiSP 8-bit mode 2 compressed RGGB bayer */
+#define V4L2_PIX_FMT_PISP_COMP2_GRBG v4l2_fourcc('P', 'C', '2', 'G') /* PiSP 8-bit mode 2 compressed GRBG bayer */
+#define V4L2_PIX_FMT_PISP_COMP2_GBRG v4l2_fourcc('P', 'C', '2', 'g') /* PiSP 8-bit mode 2 compressed GBRG bayer */
+#define V4L2_PIX_FMT_PISP_COMP2_BGGR v4l2_fourcc('P', 'C', '2', 'B') /* PiSP 8-bit mode 2 compressed BGGR bayer */
+#define V4L2_PIX_FMT_PISP_COMP2_MONO v4l2_fourcc('P', 'C', '2', 'M') /* PiSP 8-bit mode 2 compressed monochrome */
+
/* SDR formats - used only for Software Defined Radio devices */
#define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */
#define V4L2_SDR_FMT_CU16LE v4l2_fourcc('C', 'U', '1', '6') /* IQ u16le */
@@ -773,6 +828,15 @@ struct v4l2_pix_format {
#define V4L2_META_FMT_RK_ISP1_PARAMS v4l2_fourcc('R', 'K', '1', 'P') /* Rockchip ISP1 3A Parameters */
#define V4L2_META_FMT_RK_ISP1_STAT_3A v4l2_fourcc('R', 'K', '1', 'S') /* Rockchip ISP1 3A Statistics */
+/* The metadata format identifier for BE configuration buffers. */
+#define V4L2_META_FMT_RPI_BE_CFG v4l2_fourcc('R', 'P', 'B', 'C')
+
+/* The metadata format identifier for FE configuration buffers. */
+#define V4L2_META_FMT_RPI_FE_CFG v4l2_fourcc('R', 'P', 'F', 'C')
+
+/* The metadata format identifier for FE stats buffers. */
+#define V4L2_META_FMT_RPI_FE_STATS v4l2_fourcc('R', 'P', 'F', 'S')
+
/* priv field value to indicates that subsequent fields are valid. */
#define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe
@@ -1542,7 +1606,8 @@ struct v4l2_bt_timings {
((bt)->width + V4L2_DV_BT_BLANKING_WIDTH(bt))
#define V4L2_DV_BT_BLANKING_HEIGHT(bt) \
((bt)->vfrontporch + (bt)->vsync + (bt)->vbackporch + \
- (bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch)
+ ((bt)->interlaced ? \
+ ((bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch) : 0))
#define V4L2_DV_BT_FRAME_HEIGHT(bt) \
((bt)->height + V4L2_DV_BT_BLANKING_HEIGHT(bt))
@@ -1633,7 +1698,7 @@ struct v4l2_input {
__u8 name[32]; /* Label */
__u32 type; /* Type of input */
__u32 audioset; /* Associated audios (bitfield) */
- __u32 tuner; /* enum v4l2_tuner_type */
+ __u32 tuner; /* Tuner index */
v4l2_std_id std;
__u32 status;
__u32 capabilities;
@@ -1720,6 +1785,8 @@ struct v4l2_ext_control {
__u8 *p_u8;
__u16 *p_u16;
__u32 *p_u32;
+ __s32 *p_s32;
+ __s64 *p_s64;
struct v4l2_area *p_area;
struct v4l2_ctrl_h264_sps *p_h264_sps;
struct v4l2_ctrl_h264_pps *p_h264_pps;
@@ -1732,6 +1799,17 @@ struct v4l2_ext_control {
struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence;
struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture;
struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quantisation;
+ struct v4l2_ctrl_vp9_compressed_hdr *p_vp9_compressed_hdr_probs;
+ struct v4l2_ctrl_vp9_frame *p_vp9_frame;
+ struct v4l2_ctrl_hevc_sps *p_hevc_sps;
+ struct v4l2_ctrl_hevc_pps *p_hevc_pps;
+ struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params;
+ struct v4l2_ctrl_hevc_scaling_matrix *p_hevc_scaling_matrix;
+ struct v4l2_ctrl_hevc_decode_params *p_hevc_decode_params;
+ struct v4l2_ctrl_av1_sequence *p_av1_sequence;
+ struct v4l2_ctrl_av1_tile_group_entry *p_av1_tile_group_entry;
+ struct v4l2_ctrl_av1_frame *p_av1_frame;
+ struct v4l2_ctrl_av1_film_grain *p_av1_film_grain;
void *ptr;
};
} __attribute__ ((packed));
@@ -1792,6 +1870,20 @@ enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_MPEG2_QUANTISATION = 0x0250,
V4L2_CTRL_TYPE_MPEG2_SEQUENCE = 0x0251,
V4L2_CTRL_TYPE_MPEG2_PICTURE = 0x0252,
+
+ V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR = 0x0260,
+ V4L2_CTRL_TYPE_VP9_FRAME = 0x0261,
+
+ V4L2_CTRL_TYPE_HEVC_SPS = 0x0270,
+ V4L2_CTRL_TYPE_HEVC_PPS = 0x0271,
+ V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS = 0x0272,
+ V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX = 0x0273,
+ V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS = 0x0274,
+
+ V4L2_CTRL_TYPE_AV1_SEQUENCE = 0x280,
+ V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY = 0x281,
+ V4L2_CTRL_TYPE_AV1_FRAME = 0x282,
+ V4L2_CTRL_TYPE_AV1_FILM_GRAIN = 0x283,
};
/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
@@ -1847,6 +1939,7 @@ struct v4l2_querymenu {
#define V4L2_CTRL_FLAG_HAS_PAYLOAD 0x0100
#define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE 0x0200
#define V4L2_CTRL_FLAG_MODIFY_LAYOUT 0x0400
+#define V4L2_CTRL_FLAG_DYNAMIC_ARRAY 0x0800
/* Query flags, to be ORed with the control ID */
#define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000
@@ -2354,6 +2447,7 @@ struct v4l2_event_vsync {
#define V4L2_EVENT_CTRL_CH_VALUE (1 << 0)
#define V4L2_EVENT_CTRL_CH_FLAGS (1 << 1)
#define V4L2_EVENT_CTRL_CH_RANGE (1 << 2)
+#define V4L2_EVENT_CTRL_CH_DIMENSIONS (1 << 3)
struct v4l2_event_ctrl {
__u32 changes;
@@ -2596,5 +2690,10 @@ struct v4l2_create_buffers {
/* Deprecated definitions kept for backwards compatibility */
#define V4L2_PIX_FMT_HM12 V4L2_PIX_FMT_NV12_16L16
#define V4L2_PIX_FMT_SUNXI_TILED_NV12 V4L2_PIX_FMT_NV12_32L32
+/*
+ * This capability was never implemented, anyone using this cap should drop it
+ * from their code.
+ */
+#define V4L2_CAP_ASYNCIO 0x02000000
#endif /* __LINUX_VIDEODEV2_H */
diff --git a/include/meson.build b/include/meson.build
index 27ce2f41..19b93a7b 100644
--- a/include/meson.build
+++ b/include/meson.build
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: CC0-1.0
+include_build_dir = meson.current_build_dir()
+
subdir('android')
subdir('libcamera')