Bug 1654112 - Vendor libwebrtc for rel86. r=ng

Differential Revision: https://phabricator.services.mozilla.com/D130052
This commit is contained in:
Dan Minor 2020-10-23 13:29:51 -04:00
Родитель 3efac1da2b
Коммит f5ed42edec
7277 изменённых файлов: 641461 добавлений и 593480 удалений

Просмотреть файл

@ -31,7 +31,6 @@ check_targets = [
"//logging/*",
"//media/*",
"//modules/*",
"//ortc/*",
"//p2p/*",
"//pc/*",
"//rtc_base/*",
@ -41,7 +40,7 @@ check_targets = [
"//system_wrappers/*",
"//test/*",
"//video/*",
"//voice_engine/*",
"//third_party/libyuv/*",
]
# These are the list of GN files that run exec_script. This whitelist exists
@ -60,15 +59,19 @@ default_args = {
# component builds.
is_component_build = false
#mac_sdk_min = "10.12"
mac_sdk_min = "10.12"
# WebRTC does not want to switch to C++14 yet.
#use_cxx11 = true
ios_deployment_target = "10.0"
# WebRTC relies on Chromium's Android test infrastructure.
#use_cxx11_on_android = false
# The SDK API level, in contrast, is set by build/android/AndroidManifest.xml.
android32_ndk_api_level = 16
android64_ndk_api_level = 21
# WebRTC does not provide the gflags dependency. Because libyuv uses it only
# for unittests, it can be disabled (see third_party/libyuv/BUILD.gn)
#libyuv_use_gflags = false
libyuv_use_gflags = false
enable_libaom = true
gtest_enable_absl_printers = true
}

98
third_party/libwebrtc/AUTHORS поставляемый
Просмотреть файл

@ -1,59 +1,147 @@
# Names should be added to this file like so:
# Name or Organization <email address>
# Names should be added to this file with this pattern:
#
# For individuals:
# Name <email address>
#
# For organizations:
# Organization <fnmatch pattern>
#
# See python fnmatch module documentation for more information.
#
# Please keep the list sorted.
# BEGIN individuals section.
Adam Fedor <adam.fedor@gmail.com>
Akshay Shah <meetakshay99@gmail.com>
Alexander Brauckmann <a.brauckmann@gmail.com>
Alexandre Gouaillard <agouaillard@gmail.com>
Alex Henrie <alexhenrie24@gmail.com>
Andrew MacDonald <andrew@webrtc.org>
Andrey Efremov <yoklmnprst@ya.ru>
Andrew Johnson <ajohnson@draster.com>
Anil Kumar <an1kumar@gmail.com>
Ben Strong <bstrong@gmail.com>
Bob Withers <bwit@pobox.com>
Bridger Maxwell <bridgeyman@gmail.com>
Christophe Dumez <ch.dumez@samsung.com>
Chris Tserng <tserng@amazon.com>
Cody Barnes <conceptgenesis@gmail.com>
Colin Plumb
Cyril Lashkevich <notorca@gmail.com>
CZ Theng <cz.theng@gmail.com>
Danail Kirov <dkirovbroadsoft@gmail.com>
David Porter <david@porter.me>
Dax Booysen <dax@younow.com>
Dennis Angelo <dennis.angelo@gmail.com>
Dharmesh Chauhan <dharmesh.r.chauhan@gmail.com>
Dirk-Jan C. Binnema <djcb@djcbsoftware.nl>
Dmitry Lizin <sdkdimon@gmail.com>
Eike Rathke <erathke@redhat.com>
Eric Rescorla, RTFM Inc. <ekr@rtfm.com>
Frederik Riedel, Frogg GmbH <frederik.riedel@frogg.io>
Giji Gangadharan <giji.g@samsung.com>
Graham Yoakum <gyoakum@skobalt.com>
Gustavo Garcia <gustavogb@gmail.com>
Hans Knoechel <hans@hans-knoechel.de>
Hugues Ekra <hekra01@gmail.com>
Jake Hilton <jakehilton@gmail.com>
James H. Brown <jbrown@burgoyne.com>
Jan Grulich <grulja@gmail.com>
Jan Kalab <pitlicek@gmail.com>
Jens Nielsen <jens.nielsen@berotec.se>
Jiawei Ou <jiawei.ou@gmail.com>
Jie Mao <maojie0924@gmail.com>
Jiwon Kim <jwkim0000@gmail.com>
Jose Antonio Olivera Ortega <josea.olivera@gmail.com>
Kiran Thind <kiran.thind@gmail.com>
Korniltsev Anatoly <korniltsev.anatoly@gmail.com>
Lennart Grahl <lennart.grahl@gmail.com>
Luke Weber <luke.weber@gmail.com>
Maksim Khobat <maksimkhobat@gmail.com>
Mallikarjuna Rao V <vm.arjun@samsung.com>
Manish Jethani <manish.jethani@gmail.com>
Martin Storsjo <martin@martin.st>
Matthias Liebig <matthias.gcode@gmail.com>
Maxim Pavlov <pavllovmax@gmail.com>
Maxim Potapov <vopatop.skam@gmail.com>
Michael Iedema <michael@kapsulate.com>
Michel Promonet <michel.promonet.1@gmail.com>
Miguel Paris <mparisdiaz@gmail.com>
Mike Gilbert <floppymaster@gmail.com>
Min Wang <mingewang@gmail.com>
Mo Zanaty <mzanaty@cisco.com>
Pali Rohar
Paul Kapustin <pkapustin@gmail.com>
Peng Yu <yupeng323@gmail.com>
Philipp Hancke <philipp.hancke@googlemail.com>
Piasy Xu <xz4215@gmail.com>
Rafael Lopez Diez <rafalopezdiez@gmail.com>
Ralph Giles <giles@ghostscript.com>
Raman Budny <budnyjj@gmail.com>
Ramprakash Jelari <ennajelari@gmail.com>
Riku Voipio <riku.voipio@linaro.org>
Robert Bares <robert@bares.me>
Robert Nagy <robert.nagy@gmail.com>
Ryan Yoakum <ryoakum@skobalt.com>
Satender Saroha <ssaroha@yahoo.com>
Sarah Thompson <sarah@telergy.com>
Satender Saroha <ssaroha@yahoo.com>
Saul Kravitz <Saul.Kravitz@celera.com>
Sergio Garcia Murillo <sergio.garcia.murillo@gmail.com>
Silviu Caragea <silviu.cpp@gmail.com>
Stefan Gula <steweg@gmail.com>
Stephan Hartmann <stha09@googlemail.com>
Steve Reid <sreid@sea-to-sky.net>
Vladimir Beloborodov <VladimirTechMan@gmail.com>
Tarun Chawla <trnkumarchawla@gmail.com>
Todd Wong <todd.wong.ndq@gmail.com>
Tomas Popela <tomas.popela@gmail.com>
Trevor Hayes <trevor.axiom@gmail.com>
Uladzislau Susha <landby@gmail.com>
Vicken Simonian <vsimon@gmail.com>
Victor Costan <costan@gmail.com>
Vladimir Beloborodov <VladimirTechMan@gmail.com>
Xiaohong Xu <freemine@yeah.net>
Xiaolei Yu <dreifachstein@gmail.com>
Yura Yaroshevich <yura.yaroshevich@gmail.com>
Yuriy Pavlyshak <yuriy@appear.in>
Yusuke Suzuki <utatane.tea@gmail.com>
# END individuals section.
&yet LLC <*@andyet.com>
# BEGIN organizations section.
8x8 Inc. <*@8x8.com>
8x8 Inc. <*@sip-communicator.org>
Agora IO <*@agora.io>
ARM Holdings <*@arm.com>
BroadSoft Inc. <*@broadsoft.com>
CoSMo Software Consulting, Pte Ltd <*@cosmosoftware.io>
Facebook Inc. <*@fb.com>
Google Inc. <*@google.com>
Highfive, Inc. <*@highfive.com>
HyperConnect Inc. <*@hpcnt.com>
Intel Corporation <*@intel.com>
Life On Air Inc. <*@lifeonair.com>
Microsoft Corporation <*@microsoft.com>
MIPS Technologies <*@mips.com>
Mozilla Foundation <*@mozilla.com>
NVIDIA Corporation <*@nvidia.com>
Opera Software ASA <*@opera.com>
Optical Tone Ltd <*@opticaltone.com>
Pengutronix e.K. <*@pengutronix.de>
RingCentral, Inc. <*@ringcentral.com>
Signal Messenger, LLC <*@signal.org>
Sinch AB <*@sinch.com>
struktur AG <*@struktur.de>
Telenor Digital AS <*@telenor.com>
Temasys Communications <*@temasys.io>
The Chromium Authors <*@chromium.org>
The WebRTC Authors <*@webrtc.org>
Threema GmbH <*@threema.ch>
Tuple, LLC <*@tuple.app>
Twilio, Inc. <*@twilio.com>
Vewd Software AS <*@vewd.com>
Videona Socialmedia <*@videona.com>
Videxio AS <*@videxio.com>
Vidyo, Inc. <*@vidyo.com>
Vonage Holdings Corp. <*@vonage.com>
Wire Swiss GmbH <*@wire.com>
&yet LLC <*@andyet.com>
# END organizations section.

727
third_party/libwebrtc/BUILD.gn поставляемый Normal file
Просмотреть файл

@ -0,0 +1,727 @@
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# This is the root build file for GN. GN will start processing by loading this
# file, and recursively load all dependencies until all dependencies are either
# resolved or known not to exist (which will cause the build to fail). So if
# you add a new build file, there must be some path of dependencies from this
# file to your new one or GN won't know about it.
import("//build/config/linux/pkg_config.gni")
import("//build/config/sanitizers/sanitizers.gni")
import("webrtc.gni")
if (rtc_enable_protobuf) {
import("//third_party/protobuf/proto_library.gni")
}
if (is_android) {
import("//build/config/android/config.gni")
import("//build/config/android/rules.gni")
}
if (!build_with_chromium) {
# This target should (transitively) cause everything to be built; if you run
# 'ninja default' and then 'ninja all', the second build should do no work.
group("default") {
testonly = true
deps = [ ":webrtc" ]
if (rtc_build_examples) {
deps += [ "examples" ]
}
if (rtc_build_tools) {
deps += [ "rtc_tools" ]
}
if (rtc_include_tests) {
deps += [
":rtc_unittests",
":slow_tests",
":video_engine_tests",
":voip_unittests",
":webrtc_nonparallel_tests",
":webrtc_perf_tests",
"common_audio:common_audio_unittests",
"common_video:common_video_unittests",
"examples:examples_unittests",
"media:rtc_media_unittests",
"modules:modules_tests",
"modules:modules_unittests",
"modules/audio_coding:audio_coding_tests",
"modules/audio_processing:audio_processing_tests",
"modules/remote_bitrate_estimator:rtp_to_text",
"modules/rtp_rtcp:test_packet_masks_metrics",
"modules/video_capture:video_capture_internal_impl",
"pc:peerconnection_unittests",
"pc:rtc_pc_unittests",
"rtc_tools:rtp_generator",
"rtc_tools:video_replay",
"stats:rtc_stats_unittests",
"system_wrappers:system_wrappers_unittests",
"test",
"video:screenshare_loopback",
"video:sv_loopback",
"video:video_loopback",
]
if (!is_asan) {
# Do not build :webrtc_lib_link_test because lld complains on some OS
# (e.g. when target_os = "mac") when is_asan=true. For more details,
# see bugs.webrtc.org/11027#c5.
deps += [ ":webrtc_lib_link_test" ]
}
if (is_android) {
deps += [
"examples:android_examples_junit_tests",
"sdk/android:android_instrumentation_test_apk",
"sdk/android:android_sdk_junit_tests",
]
} else {
deps += [ "modules/video_capture:video_capture_tests" ]
}
if (rtc_enable_protobuf) {
deps += [
"audio:low_bandwidth_audio_test",
"logging:rtc_event_log_rtp_dump",
"tools_webrtc/perf:webrtc_dashboard_upload",
]
}
}
}
}
# Abseil Flags by default doesn't register command line flags on mobile
# platforms, WebRTC tests requires them (e.g. on simualtors) so this
# config will be applied to testonly targets globally (see webrtc.gni).
config("absl_flags_configs") {
defines = [ "ABSL_FLAGS_STRIP_NAMES=0" ]
}
config("library_impl_config") {
# Build targets that contain WebRTC implementation need this macro to
# be defined in order to correctly export symbols when is_component_build
# is true.
# For more info see: rtc_base/build/rtc_export.h.
defines = [ "WEBRTC_LIBRARY_IMPL" ]
}
# Contains the defines and includes in common.gypi that are duplicated both as
# target_defaults and direct_dependent_settings.
config("common_inherited_config") {
defines = []
cflags = []
ldflags = []
if (rtc_enable_symbol_export || is_component_build) {
defines = [ "WEBRTC_ENABLE_SYMBOL_EXPORT" ]
}
if (build_with_mozilla) {
defines += [ "WEBRTC_MOZILLA_BUILD" ]
}
if (!rtc_builtin_ssl_root_certificates) {
defines += [ "WEBRTC_EXCLUDE_BUILT_IN_SSL_ROOT_CERTS" ]
}
if (rtc_disable_check_msg) {
defines += [ "RTC_DISABLE_CHECK_MSG" ]
}
if (rtc_enable_avx2) {
defines += [ "WEBRTC_ENABLE_AVX2" ]
}
# Some tests need to declare their own trace event handlers. If this define is
# not set, the first time TRACE_EVENT_* is called it will store the return
# value for the current handler in an static variable, so that subsequent
# changes to the handler for that TRACE_EVENT_* will be ignored.
# So when tests are included, we set this define, making it possible to use
# different event handlers in different tests.
if (rtc_include_tests) {
defines += [ "WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS=1" ]
} else {
defines += [ "WEBRTC_NON_STATIC_TRACE_EVENT_HANDLERS=0" ]
}
if (build_with_chromium) {
defines += [ "WEBRTC_CHROMIUM_BUILD" ]
include_dirs = [
# The overrides must be included first as that is the mechanism for
# selecting the override headers in Chromium.
"../webrtc_overrides",
# Allow includes to be prefixed with webrtc/ in case it is not an
# immediate subdirectory of the top-level.
".",
# Just like the root WebRTC directory is added to include path, the
# corresponding directory tree with generated files needs to be added too.
# Note: this path does not change depending on the current target, e.g.
# it is always "//gen/third_party/webrtc" when building with Chromium.
# See also: http://cs.chromium.org/?q=%5C"default_include_dirs
# https://gn.googlesource.com/gn/+/master/docs/reference.md#target_gen_dir
target_gen_dir,
]
}
if (is_posix || is_fuchsia) {
defines += [ "WEBRTC_POSIX" ]
}
if (is_ios) {
defines += [
"WEBRTC_MAC",
"WEBRTC_IOS",
]
}
if (is_linux || is_chromeos) {
defines += [ "WEBRTC_LINUX" ]
}
if (is_mac) {
defines += [ "WEBRTC_MAC" ]
}
if (is_fuchsia) {
defines += [ "WEBRTC_FUCHSIA" ]
}
if (is_win) {
defines += [ "WEBRTC_WIN" ]
}
if (is_android) {
defines += [
"WEBRTC_LINUX",
"WEBRTC_ANDROID",
]
if (build_with_mozilla) {
defines += [ "WEBRTC_ANDROID_OPENSLES" ]
}
}
if (is_chromeos) {
defines += [ "CHROMEOS" ]
}
if (rtc_sanitize_coverage != "") {
assert(is_clang, "sanitizer coverage requires clang")
cflags += [ "-fsanitize-coverage=${rtc_sanitize_coverage}" ]
ldflags += [ "-fsanitize-coverage=${rtc_sanitize_coverage}" ]
}
if (is_ubsan) {
cflags += [ "-fsanitize=float-cast-overflow" ]
}
}
# TODO(bugs.webrtc.org/9693): Remove the possibility to suppress this warning
# as soon as WebRTC compiles without it.
config("no_exit_time_destructors") {
if (is_clang) {
cflags = [ "-Wno-exit-time-destructors" ]
}
}
# TODO(bugs.webrtc.org/9693): Remove the possibility to suppress this warning
# as soon as WebRTC compiles without it.
config("no_global_constructors") {
if (is_clang) {
cflags = [ "-Wno-global-constructors" ]
}
}
config("rtc_prod_config") {
# Ideally, WebRTC production code (but not test code) should have these flags.
if (is_clang) {
cflags = [
"-Wexit-time-destructors",
"-Wglobal-constructors",
]
}
}
config("common_config") {
cflags = []
cflags_c = []
cflags_cc = []
cflags_objc = []
defines = []
if (rtc_enable_protobuf) {
defines += [ "WEBRTC_ENABLE_PROTOBUF=1" ]
} else {
defines += [ "WEBRTC_ENABLE_PROTOBUF=0" ]
}
if (rtc_include_internal_audio_device) {
defines += [ "WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE" ]
}
if (rtc_libvpx_build_vp9) {
defines += [ "RTC_ENABLE_VP9" ]
}
if (rtc_enable_sctp) {
defines += [ "HAVE_SCTP" ]
}
if (rtc_enable_external_auth) {
defines += [ "ENABLE_EXTERNAL_AUTH" ]
}
if (rtc_use_h264) {
defines += [ "WEBRTC_USE_H264" ]
}
if (rtc_use_absl_mutex) {
defines += [ "WEBRTC_ABSL_MUTEX" ]
}
if (rtc_disable_logging) {
defines += [ "RTC_DISABLE_LOGGING" ]
}
if (rtc_disable_trace_events) {
defines += [ "RTC_DISABLE_TRACE_EVENTS" ]
}
if (rtc_disable_metrics) {
defines += [ "RTC_DISABLE_METRICS" ]
}
if (rtc_exclude_transient_suppressor) {
defines += [ "WEBRTC_EXCLUDE_TRANSIENT_SUPPRESSOR" ]
}
if (rtc_exclude_audio_processing_module) {
defines += [ "WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE" ]
}
cflags = []
if (build_with_chromium) {
defines += [
# NOTICE: Since common_inherited_config is used in public_configs for our
# targets, there's no point including the defines in that config here.
# TODO(kjellander): Cleanup unused ones and move defines closer to the
# source when webrtc:4256 is completed.
"HAVE_WEBRTC_VIDEO",
"LOGGING_INSIDE_WEBRTC",
]
} else {
if (is_posix || is_fuchsia) {
cflags_c += [
# TODO(bugs.webrtc.org/9029): enable commented compiler flags.
# Some of these flags should also be added to cflags_objc.
# "-Wextra", (used when building C++ but not when building C)
# "-Wmissing-prototypes", (C/Obj-C only)
# "-Wmissing-declarations", (ensure this is always used C/C++, etc..)
"-Wstrict-prototypes",
# "-Wpointer-arith", (ensure this is always used C/C++, etc..)
# "-Wbad-function-cast", (C/Obj-C only)
# "-Wnested-externs", (C/Obj-C only)
]
cflags_objc += [ "-Wstrict-prototypes" ]
cflags_cc = [
"-Wnon-virtual-dtor",
# This is enabled for clang; enable for gcc as well.
"-Woverloaded-virtual",
]
}
if (is_clang) {
cflags += [
"-Wc++11-narrowing",
"-Wimplicit-fallthrough",
"-Wthread-safety",
"-Winconsistent-missing-override",
"-Wundef",
]
# use_xcode_clang only refers to the iOS toolchain, host binaries use
# chromium's clang always.
if (!is_nacl &&
(!use_xcode_clang || current_toolchain == host_toolchain)) {
# Flags NaCl (Clang 3.7) and Xcode 7.3 (Clang clang-703.0.31) do not
# recognize.
cflags += [ "-Wunused-lambda-capture" ]
}
}
if (is_win && !is_clang) {
# MSVC warning suppressions (needed to use Abseil).
# TODO(bugs.webrtc.org/9274): Remove these warnings as soon as MSVC allows
# external headers warning suppression (or fix them upstream).
cflags += [ "/wd4702" ] # unreachable code
# MSVC 2019 warning suppressions for C++17 compiling
cflags +=
[ "/wd5041" ] # out-of-line definition for constexpr static data
# member is not needed and is deprecated in C++17
}
}
if (current_cpu == "arm64") {
defines += [ "WEBRTC_ARCH_ARM64" ]
defines += [ "WEBRTC_HAS_NEON" ]
}
if (current_cpu == "arm") {
defines += [ "WEBRTC_ARCH_ARM" ]
if (arm_version >= 7) {
defines += [ "WEBRTC_ARCH_ARM_V7" ]
if (arm_use_neon) {
defines += [ "WEBRTC_HAS_NEON" ]
}
}
}
if (current_cpu == "mipsel") {
defines += [ "MIPS32_LE" ]
if (mips_float_abi == "hard") {
defines += [ "MIPS_FPU_LE" ]
}
if (mips_arch_variant == "r2") {
defines += [ "MIPS32_R2_LE" ]
}
if (mips_dsp_rev == 1) {
defines += [ "MIPS_DSP_R1_LE" ]
} else if (mips_dsp_rev == 2) {
defines += [
"MIPS_DSP_R1_LE",
"MIPS_DSP_R2_LE",
]
}
}
if (is_android && !is_clang) {
# The Android NDK doesn"t provide optimized versions of these
# functions. Ensure they are disabled for all compilers.
cflags += [
"-fno-builtin-cos",
"-fno-builtin-sin",
"-fno-builtin-cosf",
"-fno-builtin-sinf",
]
}
if (use_fuzzing_engine && optimize_for_fuzzing) {
# Used in Chromium's overrides to disable logging
defines += [ "WEBRTC_UNSAFE_FUZZER_MODE" ]
}
if (!build_with_chromium && rtc_win_undef_unicode) {
cflags += [
"/UUNICODE",
"/U_UNICODE",
]
}
}
config("common_objc") {
frameworks = [ "Foundation.framework" ]
if (rtc_use_metal_rendering) {
defines = [ "RTC_SUPPORTS_METAL" ]
}
}
if (!build_with_chromium) {
# Target to build all the WebRTC production code.
rtc_static_library("webrtc") {
# Only the root target and the test should depend on this.
visibility = [
"//:default",
"//:webrtc_lib_link_test",
]
sources = []
complete_static_lib = true
suppressed_configs += [ "//build/config/compiler:thin_archive" ]
defines = []
deps = [
"api:create_peerconnection_factory",
"api:libjingle_peerconnection_api",
"api:rtc_error",
"api:transport_api",
"api/crypto",
"api/rtc_event_log:rtc_event_log_factory",
"api/task_queue",
"api/task_queue:default_task_queue_factory",
"audio",
"call",
"common_audio",
"common_video",
"logging:rtc_event_log_api",
"media",
"modules",
"modules/video_capture:video_capture_internal_impl",
"p2p:rtc_p2p",
"pc:libjingle_peerconnection",
"pc:peerconnection",
"pc:rtc_pc",
"pc:rtc_pc_base",
"rtc_base",
"sdk",
"video",
]
if (rtc_include_builtin_audio_codecs) {
deps += [
"api/audio_codecs:builtin_audio_decoder_factory",
"api/audio_codecs:builtin_audio_encoder_factory",
]
}
if (rtc_include_builtin_video_codecs) {
deps += [
"api/video_codecs:builtin_video_decoder_factory",
"api/video_codecs:builtin_video_encoder_factory",
]
}
if (build_with_mozilla) {
deps += [
"api/video:video_frame",
"api/video:video_rtp_headers",
]
} else {
deps += [
"api",
"logging",
"p2p",
"pc",
"stats",
]
}
if (rtc_enable_protobuf) {
deps += [ "logging:rtc_event_log_proto" ]
}
}
if (rtc_include_tests && !is_asan) {
rtc_executable("webrtc_lib_link_test") {
testonly = true
sources = [ "webrtc_lib_link_test.cc" ]
deps = [
# NOTE: Don't add deps here. If this test fails to link, it means you
# need to add stuff to the webrtc static lib target above.
":webrtc",
]
}
}
}
if (use_libfuzzer || use_afl) {
# This target is only here for gn to discover fuzzer build targets under
# webrtc/test/fuzzers/.
group("webrtc_fuzzers_dummy") {
testonly = true
deps = [ "test/fuzzers:webrtc_fuzzer_main" ]
}
}
if (rtc_include_tests) {
rtc_test("rtc_unittests") {
testonly = true
deps = [
"api:compile_all_headers",
"api:rtc_api_unittests",
"api/audio/test:audio_api_unittests",
"api/audio_codecs/test:audio_codecs_api_unittests",
"api/numerics:numerics_unittests",
"api/transport:stun_unittest",
"api/video/test:rtc_api_video_unittests",
"api/video_codecs/test:video_codecs_api_unittests",
"call:fake_network_pipe_unittests",
"p2p:libstunprober_unittests",
"p2p:rtc_p2p_unittests",
"rtc_base:robo_caller_unittests",
"rtc_base:rtc_base_approved_unittests",
"rtc_base:rtc_base_unittests",
"rtc_base:rtc_json_unittests",
"rtc_base:rtc_numerics_unittests",
"rtc_base:rtc_operations_chain_unittests",
"rtc_base:rtc_task_queue_unittests",
"rtc_base:sigslot_unittest",
"rtc_base:untyped_function_unittest",
"rtc_base:weak_ptr_unittests",
"rtc_base/experiments:experiments_unittests",
"rtc_base/synchronization:sequence_checker_unittests",
"rtc_base/task_utils:pending_task_safety_flag_unittests",
"rtc_base/task_utils:to_queued_task_unittests",
"sdk:sdk_tests",
"test:rtp_test_utils",
"test:test_main",
"test/network:network_emulation_unittests",
]
if (rtc_enable_protobuf) {
deps += [ "logging:rtc_event_log_tests" ]
}
if (is_android) {
# Do not use Chromium's launcher. native_unittests defines its own JNI_OnLoad.
use_default_launcher = false
deps += [
"sdk/android:native_unittests",
"sdk/android:native_unittests_java",
"//testing/android/native_test:native_test_support",
]
shard_timeout = 900
}
if (is_ios || is_mac) {
deps += [ "sdk:rtc_unittests_objc" ]
}
}
rtc_test("benchmarks") {
testonly = true
deps = [
"rtc_base/synchronization:mutex_benchmark",
"test:benchmark_main",
]
}
# This runs tests that must run in real time and therefore can take some
# time to execute. They are in a separate executable to avoid making the
# regular unittest suite too slow to run frequently.
rtc_test("slow_tests") {
testonly = true
deps = [
"rtc_base/task_utils:repeating_task_unittests",
"test:test_main",
]
}
# TODO(pbos): Rename test suite, this is no longer "just" for video targets.
video_engine_tests_resources = [
"resources/foreman_cif_short.yuv",
"resources/voice_engine/audio_long16.pcm",
]
if (is_ios) {
bundle_data("video_engine_tests_bundle_data") {
testonly = true
sources = video_engine_tests_resources
outputs = [ "{{bundle_resources_dir}}/{{source_file_part}}" ]
}
}
rtc_test("video_engine_tests") {
testonly = true
deps = [
"audio:audio_tests",
# TODO(eladalon): call_tests aren't actually video-specific, so we
# should move them to a more appropriate test suite.
"call:call_tests",
"call/adaptation:resource_adaptation_tests",
"test:test_common",
"test:test_main",
"test:video_test_common",
"video:video_tests",
"video/adaptation:video_adaptation_tests",
]
data = video_engine_tests_resources
if (is_android) {
deps += [ "//testing/android/native_test:native_test_native_code" ]
shard_timeout = 900
}
if (is_ios) {
deps += [ ":video_engine_tests_bundle_data" ]
}
}
webrtc_perf_tests_resources = [
"resources/ConferenceMotion_1280_720_50.yuv",
"resources/audio_coding/speech_mono_16kHz.pcm",
"resources/audio_coding/speech_mono_32_48kHz.pcm",
"resources/audio_coding/testfile32kHz.pcm",
"resources/difficult_photo_1850_1110.yuv",
"resources/foreman_cif.yuv",
"resources/paris_qcif.yuv",
"resources/photo_1850_1110.yuv",
"resources/presentation_1850_1110.yuv",
"resources/voice_engine/audio_long16.pcm",
"resources/web_screenshot_1850_1110.yuv",
]
if (is_ios) {
bundle_data("webrtc_perf_tests_bundle_data") {
testonly = true
sources = webrtc_perf_tests_resources
outputs = [ "{{bundle_resources_dir}}/{{source_file_part}}" ]
}
}
rtc_test("webrtc_perf_tests") {
testonly = true
deps = [
"audio:audio_perf_tests",
"call:call_perf_tests",
"modules/audio_coding:audio_coding_perf_tests",
"modules/audio_processing:audio_processing_perf_tests",
"pc:peerconnection_perf_tests",
"test:test_main",
"video:video_full_stack_tests",
"video:video_pc_full_stack_tests",
]
data = webrtc_perf_tests_resources
if (is_android) {
deps += [ "//testing/android/native_test:native_test_native_code" ]
shard_timeout = 4500
}
if (is_ios) {
deps += [ ":webrtc_perf_tests_bundle_data" ]
}
}
rtc_test("webrtc_nonparallel_tests") {
testonly = true
deps = [ "rtc_base:rtc_base_nonparallel_tests" ]
if (is_android) {
deps += [ "//testing/android/native_test:native_test_support" ]
shard_timeout = 900
}
}
rtc_test("voip_unittests") {
testonly = true
deps = [
"api/voip:voip_engine_factory_unittests",
"audio/voip/test:audio_channel_unittests",
"audio/voip/test:audio_egress_unittests",
"audio/voip/test:audio_ingress_unittests",
"audio/voip/test:voip_core_unittests",
"test:test_main",
]
}
}
# ---- Poisons ----
#
# Here is one empty dummy target for each poison type (needed because
# "being poisonous with poison type foo" is implemented as "depends on
# //:poison_foo").
#
# The set of poison_* targets needs to be kept in sync with the
# `all_poison_types` list in webrtc.gni.
#
group("poison_audio_codecs") {
}
group("poison_default_task_queue") {
}
group("poison_rtc_json") {
}
group("poison_software_video_codecs") {
}

22
third_party/libwebrtc/OWNERS поставляемый
Просмотреть файл

@ -1,16 +1,22 @@
henrika@webrtc.org
juberti@webrtc.org
kwiberg@webrtc.org
mflodman@webrtc.org
niklas.enbom@webrtc.org
tina.legrand@webrtc.org
stefan@webrtc.org
tommi@webrtc.org
per-file .gitignore=*
per-file .gn=kjellander@webrtc.org
per-file *.gyp=*
per-file .gn=mbonadei@webrtc.org
per-file *.gn=mbonadei@webrtc.org
per-file *.gni=mbonadei@webrtc.org
per-file AUTHORS=*
per-file BUILD.gn=kjellander@webrtc.org
per-file DEPS=*
per-file PRESUBMIT.py=kjellander@webrtc.org
per-file setup_links.py=*
per-file sync_chromium.py=kjellander@webrtc.org
per-file pylintrc=phoglund@webrtc.org
per-file WATCHLISTS=*
per-file abseil-in-webrtc.md=danilchap@webrtc.org
per-file abseil-in-webrtc.md=kwiberg@webrtc.org
per-file abseil-in-webrtc.md=mbonadei@webrtc.org
per-file style-guide.md=danilchap@webrtc.org
per-file style-guide.md=kwiberg@webrtc.org
per-file native-api.md=kwiberg@webrtc.org
# COMPONENT: Internals>WebRTC

24
third_party/libwebrtc/PATENTS поставляемый Normal file
Просмотреть файл

@ -0,0 +1,24 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the WebRTC code package.
Google hereby grants to you a perpetual, worldwide, non-exclusive,
no-charge, irrevocable (except as stated in this section) patent
license to make, have made, use, offer to sell, sell, import,
transfer, and otherwise run, modify and propagate the contents of this
implementation of the WebRTC code package, where such license applies
only to those patent claims, both currently owned by Google and
acquired in the future, licensable by Google that are necessarily
infringed by this implementation of the WebRTC code package. This
grant does not include claims that would be infringed only as a
consequence of further modification of this implementation. If you or
your agent or exclusive licensee institute or order or agree to the
institution of patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that this
implementation of the WebRTC code package or any code incorporated
within this implementation of the WebRTC code package constitutes
direct or contributory patent infringement, or inducement of patent
infringement, then any patent rights granted to you under this License
for this implementation of the WebRTC code package shall terminate as
of the date such litigation is filed.

1
third_party/libwebrtc/README.mozilla поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
libwebrtc updated from commit https://github.com/mozilla/libwebrtc/archive/149d693483e9055f574d9d65b01fe75a186b654b.tar.gz on 2020-11-30T15:48:48.472088.

1050
third_party/libwebrtc/api/BUILD.gn поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

310
third_party/libwebrtc/api/DEPS поставляемый Normal file
Просмотреть файл

@ -0,0 +1,310 @@
# This is supposed to be a complete list of top-level directories,
# excepting only api/ itself.
include_rules = [
"-audio",
"-base",
"-build",
"-buildtools",
"-build_overrides",
"-call",
"-common_audio",
"-common_video",
"-data",
"-examples",
"-ios",
"-infra",
"-logging",
"-media",
"-modules",
"-out",
"-p2p",
"-pc",
"-resources",
"-rtc_base",
"-rtc_tools",
"-sdk",
"-stats",
"-style-guide",
"-system_wrappers",
"-test",
"-testing",
"-third_party",
"-tools",
"-tools_webrtc",
"-video",
"-external/webrtc/webrtc", # Android platform build.
"-libyuv",
"-common_types.h",
"-WebRTC",
]
specific_include_rules = {
# Some internal headers are allowed even in API headers:
".*\.h": [
"+rtc_base/checks.h",
"+rtc_base/system/rtc_export.h",
"+rtc_base/system/rtc_export_template.h",
"+rtc_base/units/unit_base.h",
"+rtc_base/deprecation.h",
],
"array_view\.h": [
"+rtc_base/type_traits.h",
],
# Needed because AudioEncoderOpus is in the wrong place for
# backwards compatibilty reasons. See
# https://bugs.chromium.org/p/webrtc/issues/detail?id=7847
"audio_encoder_opus\.h": [
"+modules/audio_coding/codecs/opus/audio_encoder_opus.h",
],
"async_resolver_factory\.h": [
"+rtc_base/async_resolver_interface.h",
],
"candidate\.h": [
"+rtc_base/network_constants.h",
"+rtc_base/socket_address.h",
],
"data_channel_interface\.h": [
"+rtc_base/copy_on_write_buffer.h",
"+rtc_base/ref_count.h",
],
"data_channel_transport_interface\.h": [
"+rtc_base/copy_on_write_buffer.h",
],
"dtls_transport_interface\.h": [
"+rtc_base/ref_count.h",
"+rtc_base/ssl_certificate.h",
],
"dtmf_sender_interface\.h": [
"+rtc_base/ref_count.h",
],
"fec_controller\.h": [
"+modules/include/module_fec_types.h",
],
"frame_transformer_interface\.h": [
"+rtc_base/ref_count.h",
],
"ice_transport_interface\.h": [
"+rtc_base/ref_count.h",
],
"jsep\.h": [
"+rtc_base/ref_count.h",
],
"jsep_ice_candidate\.h": [
"+rtc_base/constructor_magic.h",
],
"jsep_session_description\.h": [
"+rtc_base/constructor_magic.h",
],
"media_stream_interface\.h": [
"+modules/audio_processing/include/audio_processing_statistics.h",
"+rtc_base/ref_count.h",
],
"packet_socket_factory\.h": [
"+rtc_base/proxy_info.h",
"+rtc_base/async_packet_socket.h",
],
"peer_connection_factory_proxy\.h": [
"+rtc_base/bind.h",
],
"peer_connection_interface\.h": [
"+media/base/media_config.h",
"+media/base/media_engine.h",
"+p2p/base/port_allocator.h",
"+rtc_base/network_monitor_factory.h",
"+rtc_base/rtc_certificate.h",
"+rtc_base/rtc_certificate_generator.h",
"+rtc_base/socket_address.h",
"+rtc_base/ssl_certificate.h",
"+rtc_base/ssl_stream_adapter.h",
],
"proxy\.h": [
"+rtc_base/event.h",
"+rtc_base/message_handler.h", # Inherits from it.
"+rtc_base/ref_counted_object.h",
"+rtc_base/thread.h",
],
"ref_counted_base\.h": [
"+rtc_base/constructor_magic.h",
"+rtc_base/ref_count.h",
"+rtc_base/ref_counter.h",
],
"rtc_error\.h": [
"+rtc_base/logging.h",
],
"rtc_event_log_output_file.h": [
# For private member and constructor.
"+rtc_base/system/file_wrapper.h",
],
"rtp_receiver_interface\.h": [
"+rtc_base/ref_count.h",
],
"rtp_sender_interface\.h": [
"+rtc_base/ref_count.h",
],
"rtp_transceiver_interface\.h": [
"+rtc_base/ref_count.h",
],
"sctp_transport_interface\.h": [
"+rtc_base/ref_count.h",
],
"set_local_description_observer_interface\.h": [
"+rtc_base/ref_count.h",
],
"set_remote_description_observer_interface\.h": [
"+rtc_base/ref_count.h",
],
"stats_types\.h": [
"+rtc_base/constructor_magic.h",
"+rtc_base/ref_count.h",
"+rtc_base/string_encode.h",
"+rtc_base/thread_checker.h",
],
"uma_metrics\.h": [
"+rtc_base/ref_count.h",
],
"audio_frame\.h": [
"+rtc_base/constructor_magic.h",
],
"audio_mixer\.h": [
"+rtc_base/ref_count.h",
],
"audio_decoder\.h": [
"+rtc_base/buffer.h",
"+rtc_base/constructor_magic.h",
],
"audio_decoder_factory\.h": [
"+rtc_base/ref_count.h",
],
"audio_decoder_factory_template\.h": [
"+rtc_base/ref_counted_object.h",
],
"audio_encoder\.h": [
"+rtc_base/buffer.h",
],
"audio_encoder_factory\.h": [
"+rtc_base/ref_count.h",
],
"audio_encoder_factory_template\.h": [
"+rtc_base/ref_counted_object.h",
],
"frame_decryptor_interface\.h": [
"+rtc_base/ref_count.h",
],
"frame_encryptor_interface\.h": [
"+rtc_base/ref_count.h",
],
"rtc_stats_collector_callback\.h": [
"+rtc_base/ref_count.h",
],
"rtc_stats_report\.h": [
"+rtc_base/ref_count.h",
"+rtc_base/ref_counted_object.h",
],
"audioproc_float\.h": [
"+modules/audio_processing/include/audio_processing.h",
],
"echo_detector_creator\.h": [
"+modules/audio_processing/include/audio_processing.h",
],
"fake_frame_decryptor\.h": [
"+rtc_base/ref_counted_object.h",
],
"fake_frame_encryptor\.h": [
"+rtc_base/ref_counted_object.h",
],
"mock.*\.h": [
"+test/gmock.h",
],
"simulated_network\.h": [
"+rtc_base/random.h",
"+rtc_base/thread_annotations.h",
],
"test_dependency_factory\.h": [
"+rtc_base/thread_checker.h",
],
"time_controller\.h": [
"+rtc_base/thread.h",
],
"videocodec_test_fixture\.h": [
"+modules/video_coding/include/video_codec_interface.h"
],
"video_encoder_config\.h": [
"+rtc_base/ref_count.h",
],
# .cc files in api/ should not be restricted in what they can #include,
# so we re-add all the top-level directories here. (That's because .h
# files leak their #includes to whoever's #including them, but .cc files
# do not since no one #includes them.)
".*\.cc": [
"+audio",
"+call",
"+common_audio",
"+common_video",
"+examples",
"+logging",
"+media",
"+modules",
"+p2p",
"+pc",
"+rtc_base",
"+rtc_tools",
"+sdk",
"+stats",
"+system_wrappers",
"+test",
"+tools",
"+tools_webrtc",
"+video",
"+third_party",
],
}

71
third_party/libwebrtc/api/DESIGN.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,71 @@
# Design considerations
The header files in this directory form the API to the WebRTC library
that is intended for client applications' use.
This API is designed to be used on top of a multithreaded runtime.
The public API functions are designed to be called from a single thread*
(the "client thread"), and can do internal dispatching to the thread
where activity needs to happen. Those threads can be passed in by the
client, typically as arguments to factory constructors, or they can be
created by the library if factory constructors that don't take threads
are used.
Many of the functions are designed to be used in an asynchronous manner,
where a function is called to initiate an activity, and a callback will
be called when the activity is completed, or a handler function will
be called on an observer object when interesting events happen.
Note: Often, even functions that look like simple functions (such as
information query functions) will need to jump between threads to perform
their function - which means that things may happen on other threads
between calls; writing "increment(x); increment(x)" is not a safe
way to increment X by exactly two, since the increment function may have
jumped to another thread that already had a queue of things to handle,
causing large amounts of other activity to have intervened between
the two calls.
(*) The term "thread" is used here to denote any construct that guarantees
sequential execution - other names for such constructs are task runners
and sequenced task queues.
# Client threads and callbacks
At the moment, the API does not give any guarantee on which thread* the
callbacks and events are called on. So it's best to write all callback
and event handlers like this (pseudocode):
<pre>
void ObserverClass::Handler(event) {
if (!called_on_client_thread()) {
dispatch_to_client_thread(bind(handler(event)));
return;
}
// Process event, we're now on the right thread
}
</pre>
In the future, the implementation may change to always call the callbacks
and event handlers on the client thread.
# Implementation considerations
The C++ classes that are part of the public API are also used to derive
classes that form part of the implementation.
This should not directly concern users of the API, but may matter if one
wants to look at how the WebRTC library is implemented, or for legacy code
that directly accesses internal APIs.
Many APIs are defined in terms of a "proxy object", which will do a blocking
dispatch of the function to another thread, and an "implementation object"
which will do the actual
work, but can only be created, invoked and destroyed on its "home thread".
Usually, the classes are named "xxxInterface" (in api/), "xxxProxy" and
"xxx" (not in api/). WebRTC users should only need to depend on the files
in api/. In many cases, the "xxxProxy" and "xxx" classes are subclasses
of "xxxInterface", but this property is an implementation feature only,
and should not be relied upon.
The threading properties of these internal APIs are NOT documented in
this note, and need to be understood by inspecting those classes.

14
third_party/libwebrtc/api/OWNERS поставляемый Normal file
Просмотреть файл

@ -0,0 +1,14 @@
crodbro@webrtc.org
deadbeef@webrtc.org
hta@webrtc.org
juberti@webrtc.org
kwiberg@webrtc.org
magjed@webrtc.org
perkj@webrtc.org
tkchin@webrtc.org
tommi@webrtc.org
per-file peer_connection*=hbos@webrtc.org
per-file DEPS=mbonadei@webrtc.org
per-file DEPS=kwiberg@webrtc.org

24
third_party/libwebrtc/api/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,24 @@
# How to write code in the `api/` directory
Mostly, just follow the regular [style guide](../style-guide.md), but:
* Note that `api/` code is not exempt from the “`.h` and `.cc` files come in
pairs” rule, so if you declare something in `api/path/to/foo.h`, it should be
defined in `api/path/to/foo.cc`.
* Headers in `api/` should, if possible, not `#include` headers outside `api/`.
Its not always possible to avoid this, but be aware that it adds to a small
mountain of technical debt that were trying to shrink.
* `.cc` files in `api/`, on the other hand, are free to `#include` headers
outside `api/`.
That is, the preferred way for `api/` code to access non-`api/` code is to call
it from a `.cc` file, so that users of our API headers wont transitively
`#include` non-public headers.
For headers in `api/` that need to refer to non-public types, forward
declarations are often a lesser evil than including non-public header files. The
usual [rules](../style-guide.md#forward-declarations) still apply, though.
`.cc` files in `api/` should preferably be kept reasonably small. If a
substantial implementation is needed, consider putting it with our non-public
code, and just call it from the `api/` `.cc` file.

23
third_party/libwebrtc/api/adaptation/BUILD.gn поставляемый Normal file
Просмотреть файл

@ -0,0 +1,23 @@
# Copyright(c) 2020 The WebRTC project authors.All Rights Reserved.
#
# Use of this source code is governed by a BSD - style license
# that can be found in the LICENSE file in the root of the source
# tree.An additional intellectual property rights grant can be found
# in the file PATENTS.All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../webrtc.gni")
rtc_source_set("resource_adaptation_api") {
visibility = [ "*" ]
sources = [
"resource.cc",
"resource.h",
]
deps = [
"../../api:scoped_refptr",
"../../rtc_base:refcount",
"../../rtc_base:rtc_base_approved",
"../../rtc_base/system:rtc_export",
]
}

7
third_party/libwebrtc/api/adaptation/DEPS поставляемый Normal file
Просмотреть файл

@ -0,0 +1,7 @@
specific_include_rules = {
"resource\.h": [
# ref_count.h is a public_deps of rtc_base_approved. Necessary because of
# rtc::RefCountInterface.
"+rtc_base/ref_count.h",
],
}

30
third_party/libwebrtc/api/adaptation/resource.cc поставляемый Normal file
Просмотреть файл

@ -0,0 +1,30 @@
/*
* Copyright 2019 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/adaptation/resource.h"
namespace webrtc {
const char* ResourceUsageStateToString(ResourceUsageState usage_state) {
switch (usage_state) {
case ResourceUsageState::kOveruse:
return "kOveruse";
case ResourceUsageState::kUnderuse:
return "kUnderuse";
}
}
ResourceListener::~ResourceListener() {}
Resource::Resource() {}
Resource::~Resource() {}
} // namespace webrtc

67
third_party/libwebrtc/api/adaptation/resource.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,67 @@
/*
* Copyright 2019 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_ADAPTATION_RESOURCE_H_
#define API_ADAPTATION_RESOURCE_H_
#include <string>
#include "api/scoped_refptr.h"
#include "rtc_base/ref_count.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
class Resource;
enum class ResourceUsageState {
// Action is needed to minimze the load on this resource.
kOveruse,
// Increasing the load on this resource is desired, if possible.
kUnderuse,
};
RTC_EXPORT const char* ResourceUsageStateToString(
ResourceUsageState usage_state);
class RTC_EXPORT ResourceListener {
public:
virtual ~ResourceListener();
virtual void OnResourceUsageStateMeasured(
rtc::scoped_refptr<Resource> resource,
ResourceUsageState usage_state) = 0;
};
// A Resource monitors an implementation-specific resource. It may report
// kOveruse or kUnderuse when resource usage is high or low enough that we
// should perform some sort of mitigation to fulfil the resource's constraints.
//
// The methods on this interface are invoked on the adaptation task queue.
// Resource usage measurements may be performed on an any task queue.
//
// The Resource is reference counted to prevent use-after-free when posting
// between task queues. As such, the implementation MUST NOT make any
// assumptions about which task queue Resource is destructed on.
class RTC_EXPORT Resource : public rtc::RefCountInterface {
public:
Resource();
// Destruction may happen on any task queue.
~Resource() override;
virtual std::string Name() const = 0;
// The |listener| may be informed of resource usage measurements on any task
// queue, but not after this method is invoked with the null argument.
virtual void SetResourceListener(ResourceListener* listener) = 0;
};
} // namespace webrtc
#endif // API_ADAPTATION_RESOURCE_H_

Просмотреть файл

@ -12,6 +12,7 @@
#define API_ARRAY_VIEW_H_
#include <algorithm>
#include <array>
#include <type_traits>
#include "rtc_base/checks.h"
@ -169,7 +170,7 @@ class ArrayView final : public impl::ArrayViewBase<T, Size> {
RTC_DCHECK_EQ(0, size);
}
// Construct an ArrayView from an array.
// Construct an ArrayView from a C-style array.
template <typename U, size_t N>
ArrayView(U (&array)[N]) // NOLINT
: ArrayView(array, N) {
@ -177,6 +178,26 @@ class ArrayView final : public impl::ArrayViewBase<T, Size> {
"Array size must match ArrayView size");
}
// (Only if size is fixed.) Construct a fixed size ArrayView<T, N> from a
// non-const std::array instance. For an ArrayView with variable size, the
// used ctor is ArrayView(U& u) instead.
template <typename U,
size_t N,
typename std::enable_if<
Size == static_cast<std::ptrdiff_t>(N)>::type* = nullptr>
ArrayView(std::array<U, N>& u) // NOLINT
: ArrayView(u.data(), u.size()) {}
// (Only if size is fixed.) Construct a fixed size ArrayView<T, N> where T is
// const from a const(expr) std::array instance. For an ArrayView with
// variable size, the used ctor is ArrayView(U& u) instead.
template <typename U,
size_t N,
typename std::enable_if<
Size == static_cast<std::ptrdiff_t>(N)>::type* = nullptr>
ArrayView(const std::array<U, N>& u) // NOLINT
: ArrayView(u.data(), u.size()) {}
// (Only if size is fixed.) Construct an ArrayView from any type U that has a
// static constexpr size() method whose return value is equal to Size, and a
// data() method whose return value converts implicitly to T*. In particular,
@ -192,6 +213,14 @@ class ArrayView final : public impl::ArrayViewBase<T, Size> {
: ArrayView(u.data(), u.size()) {
static_assert(U::size() == Size, "Sizes must match exactly");
}
template <
typename U,
typename std::enable_if<Size != impl::kArrayViewVarSize &&
HasDataAndSize<U, T>::value>::type* = nullptr>
ArrayView(const U& u) // NOLINT(runtime/explicit)
: ArrayView(u.data(), u.size()) {
static_assert(U::size() == Size, "Sizes must match exactly");
}
// (Only if size is variable.) Construct an ArrayView from any type U that
// has a size() method whose return value converts implicitly to size_t, and
@ -210,6 +239,12 @@ class ArrayView final : public impl::ArrayViewBase<T, Size> {
HasDataAndSize<U, T>::value>::type* = nullptr>
ArrayView(U& u) // NOLINT
: ArrayView(u.data(), u.size()) {}
template <
typename U,
typename std::enable_if<Size == impl::kArrayViewVarSize &&
HasDataAndSize<U, T>::value>::type* = nullptr>
ArrayView(const U& u) // NOLINT(runtime/explicit)
: ArrayView(u.data(), u.size()) {}
// Indexing and iteration. These allow mutation even if the ArrayView is
// const, because the ArrayView doesn't own the array. (To prevent mutation,
@ -258,6 +293,23 @@ inline ArrayView<T> MakeArrayView(T* data, size_t size) {
return ArrayView<T>(data, size);
}
// Only for primitive types that have the same size and aligment.
// Allow reinterpret cast of the array view to another primitive type of the
// same size.
// Template arguments order is (U, T, Size) to allow deduction of the template
// arguments in client calls: reinterpret_array_view<target_type>(array_view).
template <typename U, typename T, std::ptrdiff_t Size>
inline ArrayView<U, Size> reinterpret_array_view(ArrayView<T, Size> view) {
static_assert(sizeof(U) == sizeof(T) && alignof(U) == alignof(T),
"ArrayView reinterpret_cast is only supported for casting "
"between views that represent the same chunk of memory.");
static_assert(
std::is_fundamental<T>::value && std::is_fundamental<U>::value,
"ArrayView reinterpret_cast is only supported for casting between "
"fundamental types.");
return ArrayView<U, Size>(reinterpret_cast<U*>(view.data()), view.size());
}
} // namespace rtc
#endif // API_ARRAY_VIEW_H_

578
third_party/libwebrtc/api/array_view_unittest.cc поставляемый Normal file
Просмотреть файл

@ -0,0 +1,578 @@
/*
* Copyright 2015 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/array_view.h"
#include <algorithm>
#include <array>
#include <string>
#include <utility>
#include <vector>
#include "rtc_base/buffer.h"
#include "rtc_base/checks.h"
#include "rtc_base/gunit.h"
#include "test/gmock.h"
namespace rtc {
namespace {
using ::testing::ElementsAre;
using ::testing::IsEmpty;
template <typename T>
size_t Call(ArrayView<T> av) {
return av.size();
}
template <typename T, size_t N>
void CallFixed(ArrayView<T, N> av) {}
} // namespace
TEST(ArrayViewDeathTest, TestConstructFromPtrAndArray) {
char arr[] = "Arrr!";
const char carr[] = "Carrr!";
EXPECT_EQ(6u, Call<const char>(arr));
EXPECT_EQ(7u, Call<const char>(carr));
EXPECT_EQ(6u, Call<char>(arr));
// Call<char>(carr); // Compile error, because can't drop const.
// Call<int>(arr); // Compile error, because incompatible types.
ArrayView<int*> x;
EXPECT_EQ(0u, x.size());
EXPECT_EQ(nullptr, x.data());
ArrayView<char> y = arr;
EXPECT_EQ(6u, y.size());
EXPECT_EQ(arr, y.data());
ArrayView<char, 6> yf = arr;
static_assert(yf.size() == 6, "");
EXPECT_EQ(arr, yf.data());
ArrayView<const char> z(arr + 1, 3);
EXPECT_EQ(3u, z.size());
EXPECT_EQ(arr + 1, z.data());
ArrayView<const char, 3> zf(arr + 1, 3);
static_assert(zf.size() == 3, "");
EXPECT_EQ(arr + 1, zf.data());
ArrayView<const char> w(arr, 2);
EXPECT_EQ(2u, w.size());
EXPECT_EQ(arr, w.data());
ArrayView<const char, 2> wf(arr, 2);
static_assert(wf.size() == 2, "");
EXPECT_EQ(arr, wf.data());
ArrayView<char> q(arr, 0);
EXPECT_EQ(0u, q.size());
EXPECT_EQ(nullptr, q.data());
ArrayView<char, 0> qf(arr, 0);
static_assert(qf.size() == 0, "");
EXPECT_EQ(nullptr, qf.data());
#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
// DCHECK error (nullptr with nonzero size).
EXPECT_DEATH(ArrayView<int>(static_cast<int*>(nullptr), 5), "");
#endif
// These are compile errors, because incompatible types.
// ArrayView<int> m = arr;
// ArrayView<float> n(arr + 2, 2);
}
TEST(ArrayViewTest, TestCopyConstructorVariableLvalue) {
char arr[] = "Arrr!";
ArrayView<char> x = arr;
EXPECT_EQ(6u, x.size());
EXPECT_EQ(arr, x.data());
ArrayView<char> y = x; // Copy non-const -> non-const.
EXPECT_EQ(6u, y.size());
EXPECT_EQ(arr, y.data());
ArrayView<const char> z = x; // Copy non-const -> const.
EXPECT_EQ(6u, z.size());
EXPECT_EQ(arr, z.data());
ArrayView<const char> w = z; // Copy const -> const.
EXPECT_EQ(6u, w.size());
EXPECT_EQ(arr, w.data());
// ArrayView<char> v = z; // Compile error, because can't drop const.
}
TEST(ArrayViewTest, TestCopyConstructorVariableRvalue) {
char arr[] = "Arrr!";
ArrayView<char> x = arr;
EXPECT_EQ(6u, x.size());
EXPECT_EQ(arr, x.data());
ArrayView<char> y = std::move(x); // Copy non-const -> non-const.
EXPECT_EQ(6u, y.size());
EXPECT_EQ(arr, y.data());
ArrayView<const char> z = std::move(x); // Copy non-const -> const.
EXPECT_EQ(6u, z.size());
EXPECT_EQ(arr, z.data());
ArrayView<const char> w = std::move(z); // Copy const -> const.
EXPECT_EQ(6u, w.size());
EXPECT_EQ(arr, w.data());
// ArrayView<char> v = std::move(z); // Error, because can't drop const.
}
TEST(ArrayViewTest, TestCopyConstructorFixedLvalue) {
char arr[] = "Arrr!";
ArrayView<char, 6> x = arr;
static_assert(x.size() == 6, "");
EXPECT_EQ(arr, x.data());
// Copy fixed -> fixed.
ArrayView<char, 6> y = x; // Copy non-const -> non-const.
static_assert(y.size() == 6, "");
EXPECT_EQ(arr, y.data());
ArrayView<const char, 6> z = x; // Copy non-const -> const.
static_assert(z.size() == 6, "");
EXPECT_EQ(arr, z.data());
ArrayView<const char, 6> w = z; // Copy const -> const.
static_assert(w.size() == 6, "");
EXPECT_EQ(arr, w.data());
// ArrayView<char, 6> v = z; // Compile error, because can't drop const.
// Copy fixed -> variable.
ArrayView<char> yv = x; // Copy non-const -> non-const.
EXPECT_EQ(6u, yv.size());
EXPECT_EQ(arr, yv.data());
ArrayView<const char> zv = x; // Copy non-const -> const.
EXPECT_EQ(6u, zv.size());
EXPECT_EQ(arr, zv.data());
ArrayView<const char> wv = z; // Copy const -> const.
EXPECT_EQ(6u, wv.size());
EXPECT_EQ(arr, wv.data());
// ArrayView<char> vv = z; // Compile error, because can't drop const.
}
TEST(ArrayViewTest, TestCopyConstructorFixedRvalue) {
char arr[] = "Arrr!";
ArrayView<char, 6> x = arr;
static_assert(x.size() == 6, "");
EXPECT_EQ(arr, x.data());
// Copy fixed -> fixed.
ArrayView<char, 6> y = std::move(x); // Copy non-const -> non-const.
static_assert(y.size() == 6, "");
EXPECT_EQ(arr, y.data());
ArrayView<const char, 6> z = std::move(x); // Copy non-const -> const.
static_assert(z.size() == 6, "");
EXPECT_EQ(arr, z.data());
ArrayView<const char, 6> w = std::move(z); // Copy const -> const.
static_assert(w.size() == 6, "");
EXPECT_EQ(arr, w.data());
// ArrayView<char, 6> v = std::move(z); // Error, because can't drop const.
// Copy fixed -> variable.
ArrayView<char> yv = std::move(x); // Copy non-const -> non-const.
EXPECT_EQ(6u, yv.size());
EXPECT_EQ(arr, yv.data());
ArrayView<const char> zv = std::move(x); // Copy non-const -> const.
EXPECT_EQ(6u, zv.size());
EXPECT_EQ(arr, zv.data());
ArrayView<const char> wv = std::move(z); // Copy const -> const.
EXPECT_EQ(6u, wv.size());
EXPECT_EQ(arr, wv.data());
// ArrayView<char> vv = std::move(z); // Error, because can't drop const.
}
TEST(ArrayViewTest, TestCopyAssignmentVariableLvalue) {
char arr[] = "Arrr!";
ArrayView<char> x(arr);
EXPECT_EQ(6u, x.size());
EXPECT_EQ(arr, x.data());
ArrayView<char> y;
y = x; // Copy non-const -> non-const.
EXPECT_EQ(6u, y.size());
EXPECT_EQ(arr, y.data());
ArrayView<const char> z;
z = x; // Copy non-const -> const.
EXPECT_EQ(6u, z.size());
EXPECT_EQ(arr, z.data());
ArrayView<const char> w;
w = z; // Copy const -> const.
EXPECT_EQ(6u, w.size());
EXPECT_EQ(arr, w.data());
// ArrayView<char> v;
// v = z; // Compile error, because can't drop const.
}
TEST(ArrayViewTest, TestCopyAssignmentVariableRvalue) {
char arr[] = "Arrr!";
ArrayView<char> x(arr);
EXPECT_EQ(6u, x.size());
EXPECT_EQ(arr, x.data());
ArrayView<char> y;
y = std::move(x); // Copy non-const -> non-const.
EXPECT_EQ(6u, y.size());
EXPECT_EQ(arr, y.data());
ArrayView<const char> z;
z = std::move(x); // Copy non-const -> const.
EXPECT_EQ(6u, z.size());
EXPECT_EQ(arr, z.data());
ArrayView<const char> w;
w = std::move(z); // Copy const -> const.
EXPECT_EQ(6u, w.size());
EXPECT_EQ(arr, w.data());
// ArrayView<char> v;
// v = std::move(z); // Compile error, because can't drop const.
}
TEST(ArrayViewTest, TestCopyAssignmentFixedLvalue) {
char arr[] = "Arrr!";
char init[] = "Init!";
ArrayView<char, 6> x(arr);
EXPECT_EQ(arr, x.data());
// Copy fixed -> fixed.
ArrayView<char, 6> y(init);
y = x; // Copy non-const -> non-const.
EXPECT_EQ(arr, y.data());
ArrayView<const char, 6> z(init);
z = x; // Copy non-const -> const.
EXPECT_EQ(arr, z.data());
ArrayView<const char, 6> w(init);
w = z; // Copy const -> const.
EXPECT_EQ(arr, w.data());
// ArrayView<char, 6> v(init);
// v = z; // Compile error, because can't drop const.
// Copy fixed -> variable.
ArrayView<char> yv;
yv = x; // Copy non-const -> non-const.
EXPECT_EQ(6u, yv.size());
EXPECT_EQ(arr, yv.data());
ArrayView<const char> zv;
zv = x; // Copy non-const -> const.
EXPECT_EQ(6u, zv.size());
EXPECT_EQ(arr, zv.data());
ArrayView<const char> wv;
wv = z; // Copy const -> const.
EXPECT_EQ(6u, wv.size());
EXPECT_EQ(arr, wv.data());
// ArrayView<char> v;
// v = z; // Compile error, because can't drop const.
}
TEST(ArrayViewTest, TestCopyAssignmentFixedRvalue) {
char arr[] = "Arrr!";
char init[] = "Init!";
ArrayView<char, 6> x(arr);
EXPECT_EQ(arr, x.data());
// Copy fixed -> fixed.
ArrayView<char, 6> y(init);
y = std::move(x); // Copy non-const -> non-const.
EXPECT_EQ(arr, y.data());
ArrayView<const char, 6> z(init);
z = std::move(x); // Copy non-const -> const.
EXPECT_EQ(arr, z.data());
ArrayView<const char, 6> w(init);
w = std::move(z); // Copy const -> const.
EXPECT_EQ(arr, w.data());
// ArrayView<char, 6> v(init);
// v = std::move(z); // Compile error, because can't drop const.
// Copy fixed -> variable.
ArrayView<char> yv;
yv = std::move(x); // Copy non-const -> non-const.
EXPECT_EQ(6u, yv.size());
EXPECT_EQ(arr, yv.data());
ArrayView<const char> zv;
zv = std::move(x); // Copy non-const -> const.
EXPECT_EQ(6u, zv.size());
EXPECT_EQ(arr, zv.data());
ArrayView<const char> wv;
wv = std::move(z); // Copy const -> const.
EXPECT_EQ(6u, wv.size());
EXPECT_EQ(arr, wv.data());
// ArrayView<char> v;
// v = std::move(z); // Compile error, because can't drop const.
}
TEST(ArrayViewTest, TestStdArray) {
EXPECT_EQ(4u, Call<const int>(std::array<int, 4>{1, 2, 3, 4}));
CallFixed<const int, 3>(std::array<int, 3>{2, 3, 4});
constexpr size_t size = 5;
std::array<float, size> arr{};
// Fixed size view.
rtc::ArrayView<float, size> arr_view_fixed(arr);
EXPECT_EQ(arr.data(), arr_view_fixed.data());
static_assert(size == arr_view_fixed.size(), "");
// Variable size view.
rtc::ArrayView<float> arr_view(arr);
EXPECT_EQ(arr.data(), arr_view.data());
EXPECT_EQ(size, arr_view.size());
}
TEST(ArrayViewTest, TestConstStdArray) {
constexpr size_t size = 5;
constexpr std::array<float, size> constexpr_arr{};
rtc::ArrayView<const float, size> constexpr_arr_view(constexpr_arr);
EXPECT_EQ(constexpr_arr.data(), constexpr_arr_view.data());
static_assert(constexpr_arr.size() == constexpr_arr_view.size(), "");
const std::array<float, size> const_arr{};
rtc::ArrayView<const float, size> const_arr_view(const_arr);
EXPECT_EQ(const_arr.data(), const_arr_view.data());
static_assert(const_arr.size() == const_arr_view.size(), "");
std::array<float, size> non_const_arr{};
rtc::ArrayView<const float, size> non_const_arr_view(non_const_arr);
EXPECT_EQ(non_const_arr.data(), non_const_arr_view.data());
static_assert(non_const_arr.size() == non_const_arr_view.size(), "");
}
TEST(ArrayViewTest, TestStdVector) {
EXPECT_EQ(3u, Call<const int>(std::vector<int>{4, 5, 6}));
std::vector<int> v;
v.push_back(3);
v.push_back(11);
EXPECT_EQ(2u, Call<const int>(v));
EXPECT_EQ(2u, Call<int>(v));
// Call<unsigned int>(v); // Compile error, because incompatible types.
ArrayView<int> x = v;
EXPECT_EQ(2u, x.size());
EXPECT_EQ(v.data(), x.data());
ArrayView<const int> y;
y = v;
EXPECT_EQ(2u, y.size());
EXPECT_EQ(v.data(), y.data());
// ArrayView<double> d = v; // Compile error, because incompatible types.
const std::vector<int> cv;
EXPECT_EQ(0u, Call<const int>(cv));
// Call<int>(cv); // Compile error, because can't drop const.
ArrayView<const int> z = cv;
EXPECT_EQ(0u, z.size());
EXPECT_EQ(nullptr, z.data());
// ArrayView<int> w = cv; // Compile error, because can't drop const.
}
TEST(ArrayViewTest, TestRtcBuffer) {
rtc::Buffer b = "so buffer";
EXPECT_EQ(10u, Call<const uint8_t>(b));
EXPECT_EQ(10u, Call<uint8_t>(b));
// Call<int8_t>(b); // Compile error, because incompatible types.
ArrayView<uint8_t> x = b;
EXPECT_EQ(10u, x.size());
EXPECT_EQ(b.data(), x.data());
ArrayView<const uint8_t> y;
y = b;
EXPECT_EQ(10u, y.size());
EXPECT_EQ(b.data(), y.data());
// ArrayView<char> d = b; // Compile error, because incompatible types.
const rtc::Buffer cb = "very const";
EXPECT_EQ(11u, Call<const uint8_t>(cb));
// Call<uint8_t>(cb); // Compile error, because can't drop const.
ArrayView<const uint8_t> z = cb;
EXPECT_EQ(11u, z.size());
EXPECT_EQ(cb.data(), z.data());
// ArrayView<uint8_t> w = cb; // Compile error, because can't drop const.
}
TEST(ArrayViewTest, TestSwapVariable) {
const char arr[] = "Arrr!";
const char aye[] = "Aye, Cap'n!";
ArrayView<const char> x(arr);
EXPECT_EQ(6u, x.size());
EXPECT_EQ(arr, x.data());
ArrayView<const char> y(aye);
EXPECT_EQ(12u, y.size());
EXPECT_EQ(aye, y.data());
using std::swap;
swap(x, y);
EXPECT_EQ(12u, x.size());
EXPECT_EQ(aye, x.data());
EXPECT_EQ(6u, y.size());
EXPECT_EQ(arr, y.data());
// ArrayView<char> z;
// swap(x, z); // Compile error, because can't drop const.
}
TEST(FixArrayViewTest, TestSwapFixed) {
const char arr[] = "Arr!";
char aye[] = "Aye!";
ArrayView<const char, 5> x(arr);
EXPECT_EQ(arr, x.data());
ArrayView<const char, 5> y(aye);
EXPECT_EQ(aye, y.data());
using std::swap;
swap(x, y);
EXPECT_EQ(aye, x.data());
EXPECT_EQ(arr, y.data());
// ArrayView<char, 5> z(aye);
// swap(x, z); // Compile error, because can't drop const.
// ArrayView<const char, 4> w(aye, 4);
// swap(x, w); // Compile error, because different sizes.
}
TEST(ArrayViewDeathTest, TestIndexing) {
char arr[] = "abcdefg";
ArrayView<char> x(arr);
const ArrayView<char> y(arr);
ArrayView<const char, 8> z(arr);
EXPECT_EQ(8u, x.size());
EXPECT_EQ(8u, y.size());
EXPECT_EQ(8u, z.size());
EXPECT_EQ('b', x[1]);
EXPECT_EQ('c', y[2]);
EXPECT_EQ('d', z[3]);
x[3] = 'X';
y[2] = 'Y';
// z[1] = 'Z'; // Compile error, because z's element type is const char.
EXPECT_EQ('b', x[1]);
EXPECT_EQ('Y', y[2]);
EXPECT_EQ('X', z[3]);
#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
EXPECT_DEATH(z[8], ""); // DCHECK error (index out of bounds).
#endif
}
TEST(ArrayViewTest, TestIterationEmpty) {
// Variable-size.
ArrayView<std::vector<std::vector<std::vector<std::string>>>> av;
EXPECT_EQ(av.begin(), av.end());
EXPECT_EQ(av.cbegin(), av.cend());
for (auto& e : av) {
EXPECT_TRUE(false);
EXPECT_EQ(42u, e.size()); // Dummy use of e to prevent unused var warning.
}
// Fixed-size.
ArrayView<std::vector<std::vector<std::vector<std::string>>>, 0> af;
EXPECT_EQ(af.begin(), af.end());
EXPECT_EQ(af.cbegin(), af.cend());
for (auto& e : af) {
EXPECT_TRUE(false);
EXPECT_EQ(42u, e.size()); // Dummy use of e to prevent unused var warning.
}
}
TEST(ArrayViewTest, TestIterationVariable) {
char arr[] = "Arrr!";
ArrayView<char> av(arr);
EXPECT_EQ('A', *av.begin());
EXPECT_EQ('A', *av.cbegin());
EXPECT_EQ('\0', *(av.end() - 1));
EXPECT_EQ('\0', *(av.cend() - 1));
char i = 0;
for (auto& e : av) {
EXPECT_EQ(arr + i, &e);
e = 's' + i;
++i;
}
i = 0;
for (auto& e : ArrayView<const char>(av)) {
EXPECT_EQ(arr + i, &e);
// e = 'q' + i; // Compile error, because e is a const char&.
++i;
}
}
TEST(ArrayViewTest, TestIterationFixed) {
char arr[] = "Arrr!";
ArrayView<char, 6> av(arr);
EXPECT_EQ('A', *av.begin());
EXPECT_EQ('A', *av.cbegin());
EXPECT_EQ('\0', *(av.end() - 1));
EXPECT_EQ('\0', *(av.cend() - 1));
char i = 0;
for (auto& e : av) {
EXPECT_EQ(arr + i, &e);
e = 's' + i;
++i;
}
i = 0;
for (auto& e : ArrayView<const char, 6>(av)) {
EXPECT_EQ(arr + i, &e);
// e = 'q' + i; // Compile error, because e is a const char&.
++i;
}
}
TEST(ArrayViewTest, TestEmpty) {
EXPECT_TRUE(ArrayView<int>().empty());
const int a[] = {1, 2, 3};
EXPECT_FALSE(ArrayView<const int>(a).empty());
static_assert(ArrayView<int, 0>::empty(), "");
static_assert(!ArrayView<int, 3>::empty(), "");
}
TEST(ArrayViewTest, TestCompare) {
int a[] = {1, 2, 3};
int b[] = {1, 2, 3};
EXPECT_EQ(ArrayView<int>(a), ArrayView<int>(a));
EXPECT_EQ((ArrayView<int, 3>(a)), (ArrayView<int, 3>(a)));
EXPECT_EQ(ArrayView<int>(a), (ArrayView<int, 3>(a)));
EXPECT_EQ(ArrayView<int>(), ArrayView<int>());
EXPECT_EQ(ArrayView<int>(), ArrayView<int>(a, 0));
EXPECT_EQ(ArrayView<int>(a, 0), ArrayView<int>(b, 0));
EXPECT_EQ((ArrayView<int, 0>(a, 0)), ArrayView<int>());
EXPECT_NE(ArrayView<int>(a), ArrayView<int>(b));
EXPECT_NE((ArrayView<int, 3>(a)), (ArrayView<int, 3>(b)));
EXPECT_NE((ArrayView<int, 3>(a)), ArrayView<int>(b));
EXPECT_NE(ArrayView<int>(a), ArrayView<int>());
EXPECT_NE(ArrayView<int>(a), ArrayView<int>(a, 2));
EXPECT_NE((ArrayView<int, 3>(a)), (ArrayView<int, 2>(a, 2)));
}
TEST(ArrayViewTest, TestSubViewVariable) {
int a[] = {1, 2, 3};
ArrayView<int> av(a);
EXPECT_EQ(av.subview(0), av);
EXPECT_THAT(av.subview(1), ElementsAre(2, 3));
EXPECT_THAT(av.subview(2), ElementsAre(3));
EXPECT_THAT(av.subview(3), IsEmpty());
EXPECT_THAT(av.subview(4), IsEmpty());
EXPECT_THAT(av.subview(1, 0), IsEmpty());
EXPECT_THAT(av.subview(1, 1), ElementsAre(2));
EXPECT_THAT(av.subview(1, 2), ElementsAre(2, 3));
EXPECT_THAT(av.subview(1, 3), ElementsAre(2, 3));
}
TEST(ArrayViewTest, TestSubViewFixed) {
int a[] = {1, 2, 3};
ArrayView<int, 3> av(a);
EXPECT_EQ(av.subview(0), av);
EXPECT_THAT(av.subview(1), ElementsAre(2, 3));
EXPECT_THAT(av.subview(2), ElementsAre(3));
EXPECT_THAT(av.subview(3), IsEmpty());
EXPECT_THAT(av.subview(4), IsEmpty());
EXPECT_THAT(av.subview(1, 0), IsEmpty());
EXPECT_THAT(av.subview(1, 1), ElementsAre(2));
EXPECT_THAT(av.subview(1, 2), ElementsAre(2, 3));
EXPECT_THAT(av.subview(1, 3), ElementsAre(2, 3));
}
TEST(ArrayViewTest, TestReinterpretCastFixedSize) {
uint8_t bytes[] = {1, 2, 3};
ArrayView<uint8_t, 3> uint8_av(bytes);
ArrayView<int8_t, 3> int8_av = reinterpret_array_view<int8_t>(uint8_av);
EXPECT_EQ(int8_av.size(), uint8_av.size());
EXPECT_EQ(int8_av[0], 1);
EXPECT_EQ(int8_av[1], 2);
EXPECT_EQ(int8_av[2], 3);
}
TEST(ArrayViewTest, TestReinterpretCastVariableSize) {
std::vector<int8_t> v = {1, 2, 3};
ArrayView<int8_t> int8_av(v);
ArrayView<uint8_t> uint8_av = reinterpret_array_view<uint8_t>(int8_av);
EXPECT_EQ(int8_av.size(), uint8_av.size());
EXPECT_EQ(uint8_av[0], 1);
EXPECT_EQ(uint8_av[1], 2);
EXPECT_EQ(uint8_av[2], 3);
}
} // namespace rtc

32
third_party/libwebrtc/api/async_resolver_factory.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,32 @@
/*
* Copyright 2018 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_ASYNC_RESOLVER_FACTORY_H_
#define API_ASYNC_RESOLVER_FACTORY_H_
#include "rtc_base/async_resolver_interface.h"
namespace webrtc {
// An abstract factory for creating AsyncResolverInterfaces. This allows
// client applications to provide WebRTC with their own mechanism for
// performing DNS resolution.
class AsyncResolverFactory {
public:
AsyncResolverFactory() = default;
virtual ~AsyncResolverFactory() = default;
// The caller should call Destroy on the returned object to delete it.
virtual rtc::AsyncResolverInterface* Create() = 0;
};
} // namespace webrtc
#endif // API_ASYNC_RESOLVER_FACTORY_H_

103
third_party/libwebrtc/api/audio/BUILD.gn поставляемый Normal file
Просмотреть файл

@ -0,0 +1,103 @@
# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../webrtc.gni")
rtc_library("audio_frame_api") {
visibility = [ "*" ]
sources = [
"audio_frame.cc",
"audio_frame.h",
"channel_layout.cc",
"channel_layout.h",
]
deps = [
"..:rtp_packet_info",
"../../rtc_base:checks",
"../../rtc_base:rtc_base_approved",
]
}
rtc_source_set("audio_mixer_api") {
visibility = [ "*" ]
sources = [ "audio_mixer.h" ]
deps = [
":audio_frame_api",
"../../rtc_base:rtc_base_approved",
]
}
rtc_library("aec3_config") {
visibility = [ "*" ]
sources = [
"echo_canceller3_config.cc",
"echo_canceller3_config.h",
]
deps = [
"../../rtc_base:checks",
"../../rtc_base:rtc_base_approved",
"../../rtc_base:safe_minmax",
"../../rtc_base/system:rtc_export",
]
}
rtc_library("aec3_config_json") {
visibility = [ "*" ]
allow_poison = [ "rtc_json" ]
sources = [
"echo_canceller3_config_json.cc",
"echo_canceller3_config_json.h",
]
deps = [
":aec3_config",
"../../rtc_base:checks",
"../../rtc_base:rtc_base_approved",
"../../rtc_base:rtc_json",
"../../rtc_base/system:rtc_export",
]
absl_deps = [ "//third_party/abseil-cpp/absl/strings" ]
}
rtc_library("aec3_factory") {
visibility = [ "*" ]
configs += [ "../../modules/audio_processing:apm_debug_dump" ]
sources = [
"echo_canceller3_factory.cc",
"echo_canceller3_factory.h",
]
deps = [
":aec3_config",
":echo_control",
"../../modules/audio_processing/aec3",
"../../rtc_base:rtc_base_approved",
"../../rtc_base/system:rtc_export",
]
}
rtc_source_set("echo_control") {
visibility = [ "*" ]
sources = [ "echo_control.h" ]
deps = [ "../../rtc_base:checks" ]
}
rtc_source_set("echo_detector_creator") {
visibility = [ "*" ]
sources = [
"echo_detector_creator.cc",
"echo_detector_creator.h",
]
deps = [
"../../api:scoped_refptr",
"../../modules/audio_processing:api",
"../../modules/audio_processing:audio_processing",
"../../rtc_base:refcount",
]
}

2
third_party/libwebrtc/api/audio/OWNERS поставляемый Normal file
Просмотреть файл

@ -0,0 +1,2 @@
gustaf@webrtc.org
peah@webrtc.org

164
third_party/libwebrtc/api/audio/audio_frame.cc поставляемый Normal file
Просмотреть файл

@ -0,0 +1,164 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio/audio_frame.h"
#include <string.h>
#include <algorithm>
#include <utility>
#include "rtc_base/checks.h"
#include "rtc_base/time_utils.h"
namespace webrtc {
AudioFrame::AudioFrame() {
// Visual Studio doesn't like this in the class definition.
static_assert(sizeof(data_) == kMaxDataSizeBytes, "kMaxDataSizeBytes");
}
void swap(AudioFrame& a, AudioFrame& b) {
using std::swap;
swap(a.timestamp_, b.timestamp_);
swap(a.elapsed_time_ms_, b.elapsed_time_ms_);
swap(a.ntp_time_ms_, b.ntp_time_ms_);
swap(a.samples_per_channel_, b.samples_per_channel_);
swap(a.sample_rate_hz_, b.sample_rate_hz_);
swap(a.num_channels_, b.num_channels_);
swap(a.channel_layout_, b.channel_layout_);
swap(a.speech_type_, b.speech_type_);
swap(a.vad_activity_, b.vad_activity_);
swap(a.profile_timestamp_ms_, b.profile_timestamp_ms_);
swap(a.packet_infos_, b.packet_infos_);
const size_t length_a = a.samples_per_channel_ * a.num_channels_;
const size_t length_b = b.samples_per_channel_ * b.num_channels_;
RTC_DCHECK_LE(length_a, AudioFrame::kMaxDataSizeSamples);
RTC_DCHECK_LE(length_b, AudioFrame::kMaxDataSizeSamples);
std::swap_ranges(a.data_, a.data_ + std::max(length_a, length_b), b.data_);
swap(a.muted_, b.muted_);
swap(a.absolute_capture_timestamp_ms_, b.absolute_capture_timestamp_ms_);
}
void AudioFrame::Reset() {
ResetWithoutMuting();
muted_ = true;
}
void AudioFrame::ResetWithoutMuting() {
// TODO(wu): Zero is a valid value for |timestamp_|. We should initialize
// to an invalid value, or add a new member to indicate invalidity.
timestamp_ = 0;
elapsed_time_ms_ = -1;
ntp_time_ms_ = -1;
samples_per_channel_ = 0;
sample_rate_hz_ = 0;
num_channels_ = 0;
channel_layout_ = CHANNEL_LAYOUT_NONE;
speech_type_ = kUndefined;
vad_activity_ = kVadUnknown;
profile_timestamp_ms_ = 0;
packet_infos_ = RtpPacketInfos();
absolute_capture_timestamp_ms_ = absl::nullopt;
}
void AudioFrame::UpdateFrame(uint32_t timestamp,
const int16_t* data,
size_t samples_per_channel,
int sample_rate_hz,
SpeechType speech_type,
VADActivity vad_activity,
size_t num_channels) {
timestamp_ = timestamp;
samples_per_channel_ = samples_per_channel;
sample_rate_hz_ = sample_rate_hz;
speech_type_ = speech_type;
vad_activity_ = vad_activity;
num_channels_ = num_channels;
channel_layout_ = GuessChannelLayout(num_channels);
if (channel_layout_ != CHANNEL_LAYOUT_UNSUPPORTED) {
RTC_DCHECK_EQ(num_channels, ChannelLayoutToChannelCount(channel_layout_));
}
const size_t length = samples_per_channel * num_channels;
RTC_CHECK_LE(length, kMaxDataSizeSamples);
if (data != nullptr) {
memcpy(data_, data, sizeof(int16_t) * length);
muted_ = false;
} else {
muted_ = true;
}
}
void AudioFrame::CopyFrom(const AudioFrame& src) {
if (this == &src)
return;
timestamp_ = src.timestamp_;
elapsed_time_ms_ = src.elapsed_time_ms_;
ntp_time_ms_ = src.ntp_time_ms_;
packet_infos_ = src.packet_infos_;
muted_ = src.muted();
samples_per_channel_ = src.samples_per_channel_;
sample_rate_hz_ = src.sample_rate_hz_;
speech_type_ = src.speech_type_;
vad_activity_ = src.vad_activity_;
num_channels_ = src.num_channels_;
channel_layout_ = src.channel_layout_;
absolute_capture_timestamp_ms_ = src.absolute_capture_timestamp_ms();
const size_t length = samples_per_channel_ * num_channels_;
RTC_CHECK_LE(length, kMaxDataSizeSamples);
if (!src.muted()) {
memcpy(data_, src.data(), sizeof(int16_t) * length);
muted_ = false;
}
}
void AudioFrame::UpdateProfileTimeStamp() {
profile_timestamp_ms_ = rtc::TimeMillis();
}
int64_t AudioFrame::ElapsedProfileTimeMs() const {
if (profile_timestamp_ms_ == 0) {
// Profiling has not been activated.
return -1;
}
return rtc::TimeSince(profile_timestamp_ms_);
}
const int16_t* AudioFrame::data() const {
return muted_ ? empty_data() : data_;
}
// TODO(henrik.lundin) Can we skip zeroing the buffer?
// See https://bugs.chromium.org/p/webrtc/issues/detail?id=5647.
int16_t* AudioFrame::mutable_data() {
if (muted_) {
memset(data_, 0, kMaxDataSizeBytes);
muted_ = false;
}
return data_;
}
void AudioFrame::Mute() {
muted_ = true;
}
bool AudioFrame::muted() const {
return muted_;
}
// static
const int16_t* AudioFrame::empty_data() {
static int16_t* null_data = new int16_t[kMaxDataSizeSamples]();
return &null_data[0];
}
} // namespace webrtc

177
third_party/libwebrtc/api/audio/audio_frame.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,177 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_AUDIO_FRAME_H_
#define API_AUDIO_AUDIO_FRAME_H_
#include <stddef.h>
#include <stdint.h>
#include <utility>
#include "api/audio/channel_layout.h"
#include "api/rtp_packet_infos.h"
#include "rtc_base/constructor_magic.h"
namespace webrtc {
/* This class holds up to 120 ms of super-wideband (32 kHz) stereo audio. It
* allows for adding and subtracting frames while keeping track of the resulting
* states.
*
* Notes
* - This is a de-facto api, not designed for external use. The AudioFrame class
* is in need of overhaul or even replacement, and anyone depending on it
* should be prepared for that.
* - The total number of samples is samples_per_channel_ * num_channels_.
* - Stereo data is interleaved starting with the left channel.
*/
class AudioFrame {
public:
// Using constexpr here causes linker errors unless the variable also has an
// out-of-class definition, which is impractical in this header-only class.
// (This makes no sense because it compiles as an enum value, which we most
// certainly cannot take the address of, just fine.) C++17 introduces inline
// variables which should allow us to switch to constexpr and keep this a
// header-only class.
enum : size_t {
// Stereo, 32 kHz, 120 ms (2 * 32 * 120)
// Stereo, 192 kHz, 20 ms (2 * 192 * 20)
kMaxDataSizeSamples = 7680,
kMaxDataSizeBytes = kMaxDataSizeSamples * sizeof(int16_t),
};
enum VADActivity { kVadActive = 0, kVadPassive = 1, kVadUnknown = 2 };
enum SpeechType {
kNormalSpeech = 0,
kPLC = 1,
kCNG = 2,
kPLCCNG = 3,
kCodecPLC = 5,
kUndefined = 4
};
AudioFrame();
friend void swap(AudioFrame& a, AudioFrame& b);
// Resets all members to their default state.
void Reset();
// Same as Reset(), but leaves mute state unchanged. Muting a frame requires
// the buffer to be zeroed on the next call to mutable_data(). Callers
// intending to write to the buffer immediately after Reset() can instead use
// ResetWithoutMuting() to skip this wasteful zeroing.
void ResetWithoutMuting();
void UpdateFrame(uint32_t timestamp,
const int16_t* data,
size_t samples_per_channel,
int sample_rate_hz,
SpeechType speech_type,
VADActivity vad_activity,
size_t num_channels = 1);
void CopyFrom(const AudioFrame& src);
// Sets a wall-time clock timestamp in milliseconds to be used for profiling
// of time between two points in the audio chain.
// Example:
// t0: UpdateProfileTimeStamp()
// t1: ElapsedProfileTimeMs() => t1 - t0 [msec]
void UpdateProfileTimeStamp();
// Returns the time difference between now and when UpdateProfileTimeStamp()
// was last called. Returns -1 if UpdateProfileTimeStamp() has not yet been
// called.
int64_t ElapsedProfileTimeMs() const;
// data() returns a zeroed static buffer if the frame is muted.
// mutable_frame() always returns a non-static buffer; the first call to
// mutable_frame() zeros the non-static buffer and marks the frame unmuted.
const int16_t* data() const;
int16_t* mutable_data();
// Prefer to mute frames using AudioFrameOperations::Mute.
void Mute();
// Frame is muted by default.
bool muted() const;
size_t max_16bit_samples() const { return kMaxDataSizeSamples; }
size_t samples_per_channel() const { return samples_per_channel_; }
size_t num_channels() const { return num_channels_; }
ChannelLayout channel_layout() const { return channel_layout_; }
int sample_rate_hz() const { return sample_rate_hz_; }
void set_absolute_capture_timestamp_ms(
int64_t absolute_capture_time_stamp_ms) {
absolute_capture_timestamp_ms_ = absolute_capture_time_stamp_ms;
}
absl::optional<int64_t> absolute_capture_timestamp_ms() const {
return absolute_capture_timestamp_ms_;
}
// RTP timestamp of the first sample in the AudioFrame.
uint32_t timestamp_ = 0;
// Time since the first frame in milliseconds.
// -1 represents an uninitialized value.
int64_t elapsed_time_ms_ = -1;
// NTP time of the estimated capture time in local timebase in milliseconds.
// -1 represents an uninitialized value.
int64_t ntp_time_ms_ = -1;
size_t samples_per_channel_ = 0;
int sample_rate_hz_ = 0;
size_t num_channels_ = 0;
ChannelLayout channel_layout_ = CHANNEL_LAYOUT_NONE;
SpeechType speech_type_ = kUndefined;
VADActivity vad_activity_ = kVadUnknown;
// Monotonically increasing timestamp intended for profiling of audio frames.
// Typically used for measuring elapsed time between two different points in
// the audio path. No lock is used to save resources and we are thread safe
// by design.
// TODO(nisse@webrtc.org): consider using absl::optional.
int64_t profile_timestamp_ms_ = 0;
// Information about packets used to assemble this audio frame. This is needed
// by |SourceTracker| when the frame is delivered to the RTCRtpReceiver's
// MediaStreamTrack, in order to implement getContributingSources(). See:
// https://w3c.github.io/webrtc-pc/#dom-rtcrtpreceiver-getcontributingsources
//
// TODO(bugs.webrtc.org/10757):
// Note that this information might not be fully accurate since we currently
// don't have a proper way to track it across the audio sync buffer. The
// sync buffer is the small sample-holding buffer located after the audio
// decoder and before where samples are assembled into output frames.
//
// |RtpPacketInfos| may also be empty if the audio samples did not come from
// RTP packets. E.g. if the audio were locally generated by packet loss
// concealment, comfort noise generation, etc.
RtpPacketInfos packet_infos_;
private:
// A permanently zeroed out buffer to represent muted frames. This is a
// header-only class, so the only way to avoid creating a separate empty
// buffer per translation unit is to wrap a static in an inline function.
static const int16_t* empty_data();
int16_t data_[kMaxDataSizeSamples];
bool muted_ = true;
// Absolute capture timestamp when this audio frame was originally captured.
// This is only valid for audio frames captured on this machine. The absolute
// capture timestamp of a received frame is found in |packet_infos_|.
// This timestamp MUST be based on the same clock as rtc::TimeMillis().
absl::optional<int64_t> absolute_capture_timestamp_ms_;
RTC_DISALLOW_COPY_AND_ASSIGN(AudioFrame);
};
} // namespace webrtc
#endif // API_AUDIO_AUDIO_FRAME_H_

Просмотреть файл

@ -13,8 +13,8 @@
#include <memory>
#include "modules/include/module_common_types.h"
#include "rtc_base/refcount.h"
#include "api/audio/audio_frame.h"
#include "rtc_base/ref_count.h"
namespace webrtc {

282
third_party/libwebrtc/api/audio/channel_layout.cc поставляемый Normal file
Просмотреть файл

@ -0,0 +1,282 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio/channel_layout.h"
#include <stddef.h>
#include "rtc_base/arraysize.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
namespace webrtc {
static const int kLayoutToChannels[] = {
0, // CHANNEL_LAYOUT_NONE
0, // CHANNEL_LAYOUT_UNSUPPORTED
1, // CHANNEL_LAYOUT_MONO
2, // CHANNEL_LAYOUT_STEREO
3, // CHANNEL_LAYOUT_2_1
3, // CHANNEL_LAYOUT_SURROUND
4, // CHANNEL_LAYOUT_4_0
4, // CHANNEL_LAYOUT_2_2
4, // CHANNEL_LAYOUT_QUAD
5, // CHANNEL_LAYOUT_5_0
6, // CHANNEL_LAYOUT_5_1
5, // CHANNEL_LAYOUT_5_0_BACK
6, // CHANNEL_LAYOUT_5_1_BACK
7, // CHANNEL_LAYOUT_7_0
8, // CHANNEL_LAYOUT_7_1
8, // CHANNEL_LAYOUT_7_1_WIDE
2, // CHANNEL_LAYOUT_STEREO_DOWNMIX
3, // CHANNEL_LAYOUT_2POINT1
4, // CHANNEL_LAYOUT_3_1
5, // CHANNEL_LAYOUT_4_1
6, // CHANNEL_LAYOUT_6_0
6, // CHANNEL_LAYOUT_6_0_FRONT
6, // CHANNEL_LAYOUT_HEXAGONAL
7, // CHANNEL_LAYOUT_6_1
7, // CHANNEL_LAYOUT_6_1_BACK
7, // CHANNEL_LAYOUT_6_1_FRONT
7, // CHANNEL_LAYOUT_7_0_FRONT
8, // CHANNEL_LAYOUT_7_1_WIDE_BACK
8, // CHANNEL_LAYOUT_OCTAGONAL
0, // CHANNEL_LAYOUT_DISCRETE
3, // CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC
5, // CHANNEL_LAYOUT_4_1_QUAD_SIDE
0, // CHANNEL_LAYOUT_BITSTREAM
};
// The channel orderings for each layout as specified by FFmpeg. Each value
// represents the index of each channel in each layout. Values of -1 mean the
// channel at that index is not used for that layout. For example, the left side
// surround sound channel in FFmpeg's 5.1 layout is in the 5th position (because
// the order is L, R, C, LFE, LS, RS), so
// kChannelOrderings[CHANNEL_LAYOUT_5_1][SIDE_LEFT] = 4;
static const int kChannelOrderings[CHANNEL_LAYOUT_MAX + 1][CHANNELS_MAX + 1] = {
// FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
// CHANNEL_LAYOUT_NONE
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_UNSUPPORTED
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_MONO
{-1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_STEREO
{0, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_2_1
{0, 1, -1, -1, -1, -1, -1, -1, 2, -1, -1},
// CHANNEL_LAYOUT_SURROUND
{0, 1, 2, -1, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_4_0
{0, 1, 2, -1, -1, -1, -1, -1, 3, -1, -1},
// CHANNEL_LAYOUT_2_2
{0, 1, -1, -1, -1, -1, -1, -1, -1, 2, 3},
// CHANNEL_LAYOUT_QUAD
{0, 1, -1, -1, 2, 3, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_5_0
{0, 1, 2, -1, -1, -1, -1, -1, -1, 3, 4},
// CHANNEL_LAYOUT_5_1
{0, 1, 2, 3, -1, -1, -1, -1, -1, 4, 5},
// FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
// CHANNEL_LAYOUT_5_0_BACK
{0, 1, 2, -1, 3, 4, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_5_1_BACK
{0, 1, 2, 3, 4, 5, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_7_0
{0, 1, 2, -1, 5, 6, -1, -1, -1, 3, 4},
// CHANNEL_LAYOUT_7_1
{0, 1, 2, 3, 6, 7, -1, -1, -1, 4, 5},
// CHANNEL_LAYOUT_7_1_WIDE
{0, 1, 2, 3, -1, -1, 6, 7, -1, 4, 5},
// CHANNEL_LAYOUT_STEREO_DOWNMIX
{0, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_2POINT1
{0, 1, -1, 2, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_3_1
{0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_4_1
{0, 1, 2, 4, -1, -1, -1, -1, 3, -1, -1},
// CHANNEL_LAYOUT_6_0
{0, 1, 2, -1, -1, -1, -1, -1, 5, 3, 4},
// CHANNEL_LAYOUT_6_0_FRONT
{0, 1, -1, -1, -1, -1, 4, 5, -1, 2, 3},
// FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
// CHANNEL_LAYOUT_HEXAGONAL
{0, 1, 2, -1, 3, 4, -1, -1, 5, -1, -1},
// CHANNEL_LAYOUT_6_1
{0, 1, 2, 3, -1, -1, -1, -1, 6, 4, 5},
// CHANNEL_LAYOUT_6_1_BACK
{0, 1, 2, 3, 4, 5, -1, -1, 6, -1, -1},
// CHANNEL_LAYOUT_6_1_FRONT
{0, 1, -1, 6, -1, -1, 4, 5, -1, 2, 3},
// CHANNEL_LAYOUT_7_0_FRONT
{0, 1, 2, -1, -1, -1, 5, 6, -1, 3, 4},
// CHANNEL_LAYOUT_7_1_WIDE_BACK
{0, 1, 2, 3, 4, 5, 6, 7, -1, -1, -1},
// CHANNEL_LAYOUT_OCTAGONAL
{0, 1, 2, -1, 5, 6, -1, -1, 7, 3, 4},
// CHANNEL_LAYOUT_DISCRETE
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC
{0, 1, 2, -1, -1, -1, -1, -1, -1, -1, -1},
// CHANNEL_LAYOUT_4_1_QUAD_SIDE
{0, 1, -1, 4, -1, -1, -1, -1, -1, 2, 3},
// CHANNEL_LAYOUT_BITSTREAM
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
// FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
};
int ChannelLayoutToChannelCount(ChannelLayout layout) {
RTC_DCHECK_LT(static_cast<size_t>(layout), arraysize(kLayoutToChannels));
RTC_DCHECK_LE(kLayoutToChannels[layout], kMaxConcurrentChannels);
return kLayoutToChannels[layout];
}
// Converts a channel count into a channel layout.
ChannelLayout GuessChannelLayout(int channels) {
switch (channels) {
case 1:
return CHANNEL_LAYOUT_MONO;
case 2:
return CHANNEL_LAYOUT_STEREO;
case 3:
return CHANNEL_LAYOUT_SURROUND;
case 4:
return CHANNEL_LAYOUT_QUAD;
case 5:
return CHANNEL_LAYOUT_5_0;
case 6:
return CHANNEL_LAYOUT_5_1;
case 7:
return CHANNEL_LAYOUT_6_1;
case 8:
return CHANNEL_LAYOUT_7_1;
default:
RTC_DLOG(LS_WARNING) << "Unsupported channel count: " << channels;
}
return CHANNEL_LAYOUT_UNSUPPORTED;
}
int ChannelOrder(ChannelLayout layout, Channels channel) {
RTC_DCHECK_LT(static_cast<size_t>(layout), arraysize(kChannelOrderings));
RTC_DCHECK_LT(static_cast<size_t>(channel), arraysize(kChannelOrderings[0]));
return kChannelOrderings[layout][channel];
}
const char* ChannelLayoutToString(ChannelLayout layout) {
switch (layout) {
case CHANNEL_LAYOUT_NONE:
return "NONE";
case CHANNEL_LAYOUT_UNSUPPORTED:
return "UNSUPPORTED";
case CHANNEL_LAYOUT_MONO:
return "MONO";
case CHANNEL_LAYOUT_STEREO:
return "STEREO";
case CHANNEL_LAYOUT_2_1:
return "2.1";
case CHANNEL_LAYOUT_SURROUND:
return "SURROUND";
case CHANNEL_LAYOUT_4_0:
return "4.0";
case CHANNEL_LAYOUT_2_2:
return "QUAD_SIDE";
case CHANNEL_LAYOUT_QUAD:
return "QUAD";
case CHANNEL_LAYOUT_5_0:
return "5.0";
case CHANNEL_LAYOUT_5_1:
return "5.1";
case CHANNEL_LAYOUT_5_0_BACK:
return "5.0_BACK";
case CHANNEL_LAYOUT_5_1_BACK:
return "5.1_BACK";
case CHANNEL_LAYOUT_7_0:
return "7.0";
case CHANNEL_LAYOUT_7_1:
return "7.1";
case CHANNEL_LAYOUT_7_1_WIDE:
return "7.1_WIDE";
case CHANNEL_LAYOUT_STEREO_DOWNMIX:
return "STEREO_DOWNMIX";
case CHANNEL_LAYOUT_2POINT1:
return "2POINT1";
case CHANNEL_LAYOUT_3_1:
return "3.1";
case CHANNEL_LAYOUT_4_1:
return "4.1";
case CHANNEL_LAYOUT_6_0:
return "6.0";
case CHANNEL_LAYOUT_6_0_FRONT:
return "6.0_FRONT";
case CHANNEL_LAYOUT_HEXAGONAL:
return "HEXAGONAL";
case CHANNEL_LAYOUT_6_1:
return "6.1";
case CHANNEL_LAYOUT_6_1_BACK:
return "6.1_BACK";
case CHANNEL_LAYOUT_6_1_FRONT:
return "6.1_FRONT";
case CHANNEL_LAYOUT_7_0_FRONT:
return "7.0_FRONT";
case CHANNEL_LAYOUT_7_1_WIDE_BACK:
return "7.1_WIDE_BACK";
case CHANNEL_LAYOUT_OCTAGONAL:
return "OCTAGONAL";
case CHANNEL_LAYOUT_DISCRETE:
return "DISCRETE";
case CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC:
return "STEREO_AND_KEYBOARD_MIC";
case CHANNEL_LAYOUT_4_1_QUAD_SIDE:
return "4.1_QUAD_SIDE";
case CHANNEL_LAYOUT_BITSTREAM:
return "BITSTREAM";
}
RTC_NOTREACHED() << "Invalid channel layout provided: " << layout;
return "";
}
} // namespace webrtc

165
third_party/libwebrtc/api/audio/channel_layout.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,165 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CHANNEL_LAYOUT_H_
#define API_AUDIO_CHANNEL_LAYOUT_H_
namespace webrtc {
// This file is derived from Chromium's base/channel_layout.h.
// Enumerates the various representations of the ordering of audio channels.
// Logged to UMA, so never reuse a value, always add new/greater ones!
enum ChannelLayout {
CHANNEL_LAYOUT_NONE = 0,
CHANNEL_LAYOUT_UNSUPPORTED = 1,
// Front C
CHANNEL_LAYOUT_MONO = 2,
// Front L, Front R
CHANNEL_LAYOUT_STEREO = 3,
// Front L, Front R, Back C
CHANNEL_LAYOUT_2_1 = 4,
// Front L, Front R, Front C
CHANNEL_LAYOUT_SURROUND = 5,
// Front L, Front R, Front C, Back C
CHANNEL_LAYOUT_4_0 = 6,
// Front L, Front R, Side L, Side R
CHANNEL_LAYOUT_2_2 = 7,
// Front L, Front R, Back L, Back R
CHANNEL_LAYOUT_QUAD = 8,
// Front L, Front R, Front C, Side L, Side R
CHANNEL_LAYOUT_5_0 = 9,
// Front L, Front R, Front C, LFE, Side L, Side R
CHANNEL_LAYOUT_5_1 = 10,
// Front L, Front R, Front C, Back L, Back R
CHANNEL_LAYOUT_5_0_BACK = 11,
// Front L, Front R, Front C, LFE, Back L, Back R
CHANNEL_LAYOUT_5_1_BACK = 12,
// Front L, Front R, Front C, Side L, Side R, Back L, Back R
CHANNEL_LAYOUT_7_0 = 13,
// Front L, Front R, Front C, LFE, Side L, Side R, Back L, Back R
CHANNEL_LAYOUT_7_1 = 14,
// Front L, Front R, Front C, LFE, Side L, Side R, Front LofC, Front RofC
CHANNEL_LAYOUT_7_1_WIDE = 15,
// Stereo L, Stereo R
CHANNEL_LAYOUT_STEREO_DOWNMIX = 16,
// Stereo L, Stereo R, LFE
CHANNEL_LAYOUT_2POINT1 = 17,
// Stereo L, Stereo R, Front C, LFE
CHANNEL_LAYOUT_3_1 = 18,
// Stereo L, Stereo R, Front C, Rear C, LFE
CHANNEL_LAYOUT_4_1 = 19,
// Stereo L, Stereo R, Front C, Side L, Side R, Back C
CHANNEL_LAYOUT_6_0 = 20,
// Stereo L, Stereo R, Side L, Side R, Front LofC, Front RofC
CHANNEL_LAYOUT_6_0_FRONT = 21,
// Stereo L, Stereo R, Front C, Rear L, Rear R, Rear C
CHANNEL_LAYOUT_HEXAGONAL = 22,
// Stereo L, Stereo R, Front C, LFE, Side L, Side R, Rear Center
CHANNEL_LAYOUT_6_1 = 23,
// Stereo L, Stereo R, Front C, LFE, Back L, Back R, Rear Center
CHANNEL_LAYOUT_6_1_BACK = 24,
// Stereo L, Stereo R, Side L, Side R, Front LofC, Front RofC, LFE
CHANNEL_LAYOUT_6_1_FRONT = 25,
// Front L, Front R, Front C, Side L, Side R, Front LofC, Front RofC
CHANNEL_LAYOUT_7_0_FRONT = 26,
// Front L, Front R, Front C, LFE, Back L, Back R, Front LofC, Front RofC
CHANNEL_LAYOUT_7_1_WIDE_BACK = 27,
// Front L, Front R, Front C, Side L, Side R, Rear L, Back R, Back C.
CHANNEL_LAYOUT_OCTAGONAL = 28,
// Channels are not explicitly mapped to speakers.
CHANNEL_LAYOUT_DISCRETE = 29,
// Front L, Front R, Front C. Front C contains the keyboard mic audio. This
// layout is only intended for input for WebRTC. The Front C channel
// is stripped away in the WebRTC audio input pipeline and never seen outside
// of that.
CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC = 30,
// Front L, Front R, Side L, Side R, LFE
CHANNEL_LAYOUT_4_1_QUAD_SIDE = 31,
// Actual channel layout is specified in the bitstream and the actual channel
// count is unknown at Chromium media pipeline level (useful for audio
// pass-through mode).
CHANNEL_LAYOUT_BITSTREAM = 32,
// Max value, must always equal the largest entry ever logged.
CHANNEL_LAYOUT_MAX = CHANNEL_LAYOUT_BITSTREAM
};
// Note: Do not reorder or reassign these values; other code depends on their
// ordering to operate correctly. E.g., CoreAudio channel layout computations.
enum Channels {
LEFT = 0,
RIGHT,
CENTER,
LFE,
BACK_LEFT,
BACK_RIGHT,
LEFT_OF_CENTER,
RIGHT_OF_CENTER,
BACK_CENTER,
SIDE_LEFT,
SIDE_RIGHT,
CHANNELS_MAX =
SIDE_RIGHT, // Must always equal the largest value ever logged.
};
// The maximum number of concurrently active channels for all possible layouts.
// ChannelLayoutToChannelCount() will never return a value higher than this.
constexpr int kMaxConcurrentChannels = 8;
// Returns the expected channel position in an interleaved stream. Values of -1
// mean the channel at that index is not used for that layout. Values range
// from 0 to ChannelLayoutToChannelCount(layout) - 1.
int ChannelOrder(ChannelLayout layout, Channels channel);
// Returns the number of channels in a given ChannelLayout.
int ChannelLayoutToChannelCount(ChannelLayout layout);
// Given the number of channels, return the best layout,
// or return CHANNEL_LAYOUT_UNSUPPORTED if there is no good match.
ChannelLayout GuessChannelLayout(int channels);
// Returns a string representation of the channel layout.
const char* ChannelLayoutToString(ChannelLayout layout);
} // namespace webrtc
#endif // API_AUDIO_CHANNEL_LAYOUT_H_

270
third_party/libwebrtc/api/audio/echo_canceller3_config.cc поставляемый Normal file
Просмотреть файл

@ -0,0 +1,270 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio/echo_canceller3_config.h"
#include <algorithm>
#include <cmath>
#include "rtc_base/checks.h"
#include "rtc_base/numerics/safe_minmax.h"
namespace webrtc {
namespace {
bool Limit(float* value, float min, float max) {
float clamped = rtc::SafeClamp(*value, min, max);
clamped = std::isfinite(clamped) ? clamped : min;
bool res = *value == clamped;
*value = clamped;
return res;
}
bool Limit(size_t* value, size_t min, size_t max) {
size_t clamped = rtc::SafeClamp(*value, min, max);
bool res = *value == clamped;
*value = clamped;
return res;
}
bool Limit(int* value, int min, int max) {
int clamped = rtc::SafeClamp(*value, min, max);
bool res = *value == clamped;
*value = clamped;
return res;
}
bool FloorLimit(size_t* value, size_t min) {
size_t clamped = *value >= min ? *value : min;
bool res = *value == clamped;
*value = clamped;
return res;
}
} // namespace
EchoCanceller3Config::EchoCanceller3Config() = default;
EchoCanceller3Config::EchoCanceller3Config(const EchoCanceller3Config& e) =
default;
EchoCanceller3Config& EchoCanceller3Config::operator=(
const EchoCanceller3Config& e) = default;
EchoCanceller3Config::Delay::Delay() = default;
EchoCanceller3Config::Delay::Delay(const EchoCanceller3Config::Delay& e) =
default;
EchoCanceller3Config::Delay& EchoCanceller3Config::Delay::operator=(
const Delay& e) = default;
EchoCanceller3Config::EchoModel::EchoModel() = default;
EchoCanceller3Config::EchoModel::EchoModel(
const EchoCanceller3Config::EchoModel& e) = default;
EchoCanceller3Config::EchoModel& EchoCanceller3Config::EchoModel::operator=(
const EchoModel& e) = default;
EchoCanceller3Config::Suppressor::Suppressor() = default;
EchoCanceller3Config::Suppressor::Suppressor(
const EchoCanceller3Config::Suppressor& e) = default;
EchoCanceller3Config::Suppressor& EchoCanceller3Config::Suppressor::operator=(
const Suppressor& e) = default;
EchoCanceller3Config::Suppressor::MaskingThresholds::MaskingThresholds(
float enr_transparent,
float enr_suppress,
float emr_transparent)
: enr_transparent(enr_transparent),
enr_suppress(enr_suppress),
emr_transparent(emr_transparent) {}
EchoCanceller3Config::Suppressor::MaskingThresholds::MaskingThresholds(
const EchoCanceller3Config::Suppressor::MaskingThresholds& e) = default;
EchoCanceller3Config::Suppressor::MaskingThresholds&
EchoCanceller3Config::Suppressor::MaskingThresholds::operator=(
const MaskingThresholds& e) = default;
EchoCanceller3Config::Suppressor::Tuning::Tuning(MaskingThresholds mask_lf,
MaskingThresholds mask_hf,
float max_inc_factor,
float max_dec_factor_lf)
: mask_lf(mask_lf),
mask_hf(mask_hf),
max_inc_factor(max_inc_factor),
max_dec_factor_lf(max_dec_factor_lf) {}
EchoCanceller3Config::Suppressor::Tuning::Tuning(
const EchoCanceller3Config::Suppressor::Tuning& e) = default;
EchoCanceller3Config::Suppressor::Tuning&
EchoCanceller3Config::Suppressor::Tuning::operator=(const Tuning& e) = default;
bool EchoCanceller3Config::Validate(EchoCanceller3Config* config) {
RTC_DCHECK(config);
EchoCanceller3Config* c = config;
bool res = true;
if (c->delay.down_sampling_factor != 4 &&
c->delay.down_sampling_factor != 8) {
c->delay.down_sampling_factor = 4;
res = false;
}
res = res & Limit(&c->delay.default_delay, 0, 5000);
res = res & Limit(&c->delay.num_filters, 0, 5000);
res = res & Limit(&c->delay.delay_headroom_samples, 0, 5000);
res = res & Limit(&c->delay.hysteresis_limit_blocks, 0, 5000);
res = res & Limit(&c->delay.fixed_capture_delay_samples, 0, 5000);
res = res & Limit(&c->delay.delay_estimate_smoothing, 0.f, 1.f);
res = res & Limit(&c->delay.delay_candidate_detection_threshold, 0.f, 1.f);
res = res & Limit(&c->delay.delay_selection_thresholds.initial, 1, 250);
res = res & Limit(&c->delay.delay_selection_thresholds.converged, 1, 250);
res = res & FloorLimit(&c->filter.refined.length_blocks, 1);
res = res & Limit(&c->filter.refined.leakage_converged, 0.f, 1000.f);
res = res & Limit(&c->filter.refined.leakage_diverged, 0.f, 1000.f);
res = res & Limit(&c->filter.refined.error_floor, 0.f, 1000.f);
res = res & Limit(&c->filter.refined.error_ceil, 0.f, 100000000.f);
res = res & Limit(&c->filter.refined.noise_gate, 0.f, 100000000.f);
res = res & FloorLimit(&c->filter.refined_initial.length_blocks, 1);
res = res & Limit(&c->filter.refined_initial.leakage_converged, 0.f, 1000.f);
res = res & Limit(&c->filter.refined_initial.leakage_diverged, 0.f, 1000.f);
res = res & Limit(&c->filter.refined_initial.error_floor, 0.f, 1000.f);
res = res & Limit(&c->filter.refined_initial.error_ceil, 0.f, 100000000.f);
res = res & Limit(&c->filter.refined_initial.noise_gate, 0.f, 100000000.f);
if (c->filter.refined.length_blocks <
c->filter.refined_initial.length_blocks) {
c->filter.refined_initial.length_blocks = c->filter.refined.length_blocks;
res = false;
}
res = res & FloorLimit(&c->filter.coarse.length_blocks, 1);
res = res & Limit(&c->filter.coarse.rate, 0.f, 1.f);
res = res & Limit(&c->filter.coarse.noise_gate, 0.f, 100000000.f);
res = res & FloorLimit(&c->filter.coarse_initial.length_blocks, 1);
res = res & Limit(&c->filter.coarse_initial.rate, 0.f, 1.f);
res = res & Limit(&c->filter.coarse_initial.noise_gate, 0.f, 100000000.f);
if (c->filter.coarse.length_blocks < c->filter.coarse_initial.length_blocks) {
c->filter.coarse_initial.length_blocks = c->filter.coarse.length_blocks;
res = false;
}
res = res & Limit(&c->filter.config_change_duration_blocks, 0, 100000);
res = res & Limit(&c->filter.initial_state_seconds, 0.f, 100.f);
res = res & Limit(&c->erle.min, 1.f, 100000.f);
res = res & Limit(&c->erle.max_l, 1.f, 100000.f);
res = res & Limit(&c->erle.max_h, 1.f, 100000.f);
if (c->erle.min > c->erle.max_l || c->erle.min > c->erle.max_h) {
c->erle.min = std::min(c->erle.max_l, c->erle.max_h);
res = false;
}
res = res & Limit(&c->erle.num_sections, 1, c->filter.refined.length_blocks);
res = res & Limit(&c->ep_strength.default_gain, 0.f, 1000000.f);
res = res & Limit(&c->ep_strength.default_len, -1.f, 1.f);
res =
res & Limit(&c->echo_audibility.low_render_limit, 0.f, 32768.f * 32768.f);
res = res &
Limit(&c->echo_audibility.normal_render_limit, 0.f, 32768.f * 32768.f);
res = res & Limit(&c->echo_audibility.floor_power, 0.f, 32768.f * 32768.f);
res = res & Limit(&c->echo_audibility.audibility_threshold_lf, 0.f,
32768.f * 32768.f);
res = res & Limit(&c->echo_audibility.audibility_threshold_mf, 0.f,
32768.f * 32768.f);
res = res & Limit(&c->echo_audibility.audibility_threshold_hf, 0.f,
32768.f * 32768.f);
res = res &
Limit(&c->render_levels.active_render_limit, 0.f, 32768.f * 32768.f);
res = res & Limit(&c->render_levels.poor_excitation_render_limit, 0.f,
32768.f * 32768.f);
res = res & Limit(&c->render_levels.poor_excitation_render_limit_ds8, 0.f,
32768.f * 32768.f);
res = res & Limit(&c->echo_model.noise_floor_hold, 0, 1000);
res = res & Limit(&c->echo_model.min_noise_floor_power, 0, 2000000.f);
res = res & Limit(&c->echo_model.stationary_gate_slope, 0, 1000000.f);
res = res & Limit(&c->echo_model.noise_gate_power, 0, 1000000.f);
res = res & Limit(&c->echo_model.noise_gate_slope, 0, 1000000.f);
res = res & Limit(&c->echo_model.render_pre_window_size, 0, 100);
res = res & Limit(&c->echo_model.render_post_window_size, 0, 100);
res = res & Limit(&c->comfort_noise.noise_floor_dbfs, -200.f, 0.f);
res = res & Limit(&c->suppressor.nearend_average_blocks, 1, 5000);
res = res &
Limit(&c->suppressor.normal_tuning.mask_lf.enr_transparent, 0.f, 100.f);
res = res &
Limit(&c->suppressor.normal_tuning.mask_lf.enr_suppress, 0.f, 100.f);
res = res &
Limit(&c->suppressor.normal_tuning.mask_lf.emr_transparent, 0.f, 100.f);
res = res &
Limit(&c->suppressor.normal_tuning.mask_hf.enr_transparent, 0.f, 100.f);
res = res &
Limit(&c->suppressor.normal_tuning.mask_hf.enr_suppress, 0.f, 100.f);
res = res &
Limit(&c->suppressor.normal_tuning.mask_hf.emr_transparent, 0.f, 100.f);
res = res & Limit(&c->suppressor.normal_tuning.max_inc_factor, 0.f, 100.f);
res = res & Limit(&c->suppressor.normal_tuning.max_dec_factor_lf, 0.f, 100.f);
res = res & Limit(&c->suppressor.nearend_tuning.mask_lf.enr_transparent, 0.f,
100.f);
res = res &
Limit(&c->suppressor.nearend_tuning.mask_lf.enr_suppress, 0.f, 100.f);
res = res & Limit(&c->suppressor.nearend_tuning.mask_lf.emr_transparent, 0.f,
100.f);
res = res & Limit(&c->suppressor.nearend_tuning.mask_hf.enr_transparent, 0.f,
100.f);
res = res &
Limit(&c->suppressor.nearend_tuning.mask_hf.enr_suppress, 0.f, 100.f);
res = res & Limit(&c->suppressor.nearend_tuning.mask_hf.emr_transparent, 0.f,
100.f);
res = res & Limit(&c->suppressor.nearend_tuning.max_inc_factor, 0.f, 100.f);
res =
res & Limit(&c->suppressor.nearend_tuning.max_dec_factor_lf, 0.f, 100.f);
res = res & Limit(&c->suppressor.dominant_nearend_detection.enr_threshold,
0.f, 1000000.f);
res = res & Limit(&c->suppressor.dominant_nearend_detection.snr_threshold,
0.f, 1000000.f);
res = res & Limit(&c->suppressor.dominant_nearend_detection.hold_duration, 0,
10000);
res = res & Limit(&c->suppressor.dominant_nearend_detection.trigger_threshold,
0, 10000);
res = res &
Limit(&c->suppressor.subband_nearend_detection.nearend_average_blocks,
1, 1024);
res =
res & Limit(&c->suppressor.subband_nearend_detection.subband1.low, 0, 65);
res = res & Limit(&c->suppressor.subband_nearend_detection.subband1.high,
c->suppressor.subband_nearend_detection.subband1.low, 65);
res =
res & Limit(&c->suppressor.subband_nearend_detection.subband2.low, 0, 65);
res = res & Limit(&c->suppressor.subband_nearend_detection.subband2.high,
c->suppressor.subband_nearend_detection.subband2.low, 65);
res = res & Limit(&c->suppressor.subband_nearend_detection.nearend_threshold,
0.f, 1.e24f);
res = res & Limit(&c->suppressor.subband_nearend_detection.snr_threshold, 0.f,
1.e24f);
res = res & Limit(&c->suppressor.high_bands_suppression.enr_threshold, 0.f,
1000000.f);
res = res & Limit(&c->suppressor.high_bands_suppression.max_gain_during_echo,
0.f, 1.f);
res = res & Limit(&c->suppressor.high_bands_suppression
.anti_howling_activation_threshold,
0.f, 32768.f * 32768.f);
res = res & Limit(&c->suppressor.high_bands_suppression.anti_howling_gain,
0.f, 1.f);
res = res & Limit(&c->suppressor.floor_first_increase, 0.f, 1000000.f);
return res;
}
} // namespace webrtc

227
third_party/libwebrtc/api/audio/echo_canceller3_config.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,227 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_ECHO_CANCELLER3_CONFIG_H_
#define API_AUDIO_ECHO_CANCELLER3_CONFIG_H_
#include <stddef.h> // size_t
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Configuration struct for EchoCanceller3
struct RTC_EXPORT EchoCanceller3Config {
// Checks and updates the config parameters to lie within (mostly) reasonable
// ranges. Returns true if and only of the config did not need to be changed.
static bool Validate(EchoCanceller3Config* config);
EchoCanceller3Config();
EchoCanceller3Config(const EchoCanceller3Config& e);
EchoCanceller3Config& operator=(const EchoCanceller3Config& other);
struct Buffering {
size_t excess_render_detection_interval_blocks = 250;
size_t max_allowed_excess_render_blocks = 8;
} buffering;
struct Delay {
Delay();
Delay(const Delay& e);
Delay& operator=(const Delay& e);
size_t default_delay = 5;
size_t down_sampling_factor = 4;
size_t num_filters = 5;
size_t delay_headroom_samples = 32;
size_t hysteresis_limit_blocks = 1;
size_t fixed_capture_delay_samples = 0;
float delay_estimate_smoothing = 0.7f;
float delay_candidate_detection_threshold = 0.2f;
struct DelaySelectionThresholds {
int initial;
int converged;
} delay_selection_thresholds = {5, 20};
bool use_external_delay_estimator = false;
bool log_warning_on_delay_changes = false;
struct AlignmentMixing {
bool downmix;
bool adaptive_selection;
float activity_power_threshold;
bool prefer_first_two_channels;
};
AlignmentMixing render_alignment_mixing = {false, true, 10000.f, true};
AlignmentMixing capture_alignment_mixing = {false, true, 10000.f, false};
} delay;
struct Filter {
struct RefinedConfiguration {
size_t length_blocks;
float leakage_converged;
float leakage_diverged;
float error_floor;
float error_ceil;
float noise_gate;
};
struct CoarseConfiguration {
size_t length_blocks;
float rate;
float noise_gate;
};
RefinedConfiguration refined = {13, 0.00005f, 0.05f,
0.001f, 2.f, 20075344.f};
CoarseConfiguration coarse = {13, 0.7f, 20075344.f};
RefinedConfiguration refined_initial = {12, 0.005f, 0.5f,
0.001f, 2.f, 20075344.f};
CoarseConfiguration coarse_initial = {12, 0.9f, 20075344.f};
size_t config_change_duration_blocks = 250;
float initial_state_seconds = 2.5f;
bool conservative_initial_phase = false;
bool enable_coarse_filter_output_usage = true;
bool use_linear_filter = true;
bool export_linear_aec_output = false;
} filter;
struct Erle {
float min = 1.f;
float max_l = 4.f;
float max_h = 1.5f;
bool onset_detection = true;
size_t num_sections = 1;
bool clamp_quality_estimate_to_zero = true;
bool clamp_quality_estimate_to_one = true;
} erle;
struct EpStrength {
float default_gain = 1.f;
float default_len = 0.83f;
bool echo_can_saturate = true;
bool bounded_erl = false;
} ep_strength;
struct EchoAudibility {
float low_render_limit = 4 * 64.f;
float normal_render_limit = 64.f;
float floor_power = 2 * 64.f;
float audibility_threshold_lf = 10;
float audibility_threshold_mf = 10;
float audibility_threshold_hf = 10;
bool use_stationarity_properties = false;
bool use_stationarity_properties_at_init = false;
} echo_audibility;
struct RenderLevels {
float active_render_limit = 100.f;
float poor_excitation_render_limit = 150.f;
float poor_excitation_render_limit_ds8 = 20.f;
float render_power_gain_db = 0.f;
} render_levels;
struct EchoRemovalControl {
bool has_clock_drift = false;
bool linear_and_stable_echo_path = false;
} echo_removal_control;
struct EchoModel {
EchoModel();
EchoModel(const EchoModel& e);
EchoModel& operator=(const EchoModel& e);
size_t noise_floor_hold = 50;
float min_noise_floor_power = 1638400.f;
float stationary_gate_slope = 10.f;
float noise_gate_power = 27509.42f;
float noise_gate_slope = 0.3f;
size_t render_pre_window_size = 1;
size_t render_post_window_size = 1;
} echo_model;
struct ComfortNoise {
float noise_floor_dbfs = -96.03406f;
} comfort_noise;
struct Suppressor {
Suppressor();
Suppressor(const Suppressor& e);
Suppressor& operator=(const Suppressor& e);
size_t nearend_average_blocks = 4;
struct MaskingThresholds {
MaskingThresholds(float enr_transparent,
float enr_suppress,
float emr_transparent);
MaskingThresholds(const MaskingThresholds& e);
MaskingThresholds& operator=(const MaskingThresholds& e);
float enr_transparent;
float enr_suppress;
float emr_transparent;
};
struct Tuning {
Tuning(MaskingThresholds mask_lf,
MaskingThresholds mask_hf,
float max_inc_factor,
float max_dec_factor_lf);
Tuning(const Tuning& e);
Tuning& operator=(const Tuning& e);
MaskingThresholds mask_lf;
MaskingThresholds mask_hf;
float max_inc_factor;
float max_dec_factor_lf;
};
Tuning normal_tuning = Tuning(MaskingThresholds(.3f, .4f, .3f),
MaskingThresholds(.07f, .1f, .3f),
2.0f,
0.25f);
Tuning nearend_tuning = Tuning(MaskingThresholds(1.09f, 1.1f, .3f),
MaskingThresholds(.1f, .3f, .3f),
2.0f,
0.25f);
struct DominantNearendDetection {
float enr_threshold = .25f;
float enr_exit_threshold = 10.f;
float snr_threshold = 30.f;
int hold_duration = 50;
int trigger_threshold = 12;
bool use_during_initial_phase = true;
} dominant_nearend_detection;
struct SubbandNearendDetection {
size_t nearend_average_blocks = 1;
struct SubbandRegion {
size_t low;
size_t high;
};
SubbandRegion subband1 = {1, 1};
SubbandRegion subband2 = {1, 1};
float nearend_threshold = 1.f;
float snr_threshold = 1.f;
} subband_nearend_detection;
bool use_subband_nearend_detection = false;
struct HighBandsSuppression {
float enr_threshold = 1.f;
float max_gain_during_echo = 1.f;
float anti_howling_activation_threshold = 25.f;
float anti_howling_gain = 0.01f;
} high_bands_suppression;
float floor_first_increase = 0.00001f;
} suppressor;
};
} // namespace webrtc
#endif // API_AUDIO_ECHO_CANCELLER3_CONFIG_H_

682
third_party/libwebrtc/api/audio/echo_canceller3_config_json.cc поставляемый Normal file
Просмотреть файл

@ -0,0 +1,682 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio/echo_canceller3_config_json.h"
#include <stddef.h>
#include <string>
#include <vector>
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/strings/json.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
namespace {
void ReadParam(const Json::Value& root, std::string param_name, bool* param) {
RTC_DCHECK(param);
bool v;
if (rtc::GetBoolFromJsonObject(root, param_name, &v)) {
*param = v;
}
}
void ReadParam(const Json::Value& root, std::string param_name, size_t* param) {
RTC_DCHECK(param);
int v;
if (rtc::GetIntFromJsonObject(root, param_name, &v) && v >= 0) {
*param = v;
}
}
void ReadParam(const Json::Value& root, std::string param_name, int* param) {
RTC_DCHECK(param);
int v;
if (rtc::GetIntFromJsonObject(root, param_name, &v)) {
*param = v;
}
}
void ReadParam(const Json::Value& root, std::string param_name, float* param) {
RTC_DCHECK(param);
double v;
if (rtc::GetDoubleFromJsonObject(root, param_name, &v)) {
*param = static_cast<float>(v);
}
}
void ReadParam(const Json::Value& root,
std::string param_name,
EchoCanceller3Config::Filter::RefinedConfiguration* param) {
RTC_DCHECK(param);
Json::Value json_array;
if (rtc::GetValueFromJsonObject(root, param_name, &json_array)) {
std::vector<double> v;
rtc::JsonArrayToDoubleVector(json_array, &v);
if (v.size() != 6) {
RTC_LOG(LS_ERROR) << "Incorrect array size for " << param_name;
return;
}
param->length_blocks = static_cast<size_t>(v[0]);
param->leakage_converged = static_cast<float>(v[1]);
param->leakage_diverged = static_cast<float>(v[2]);
param->error_floor = static_cast<float>(v[3]);
param->error_ceil = static_cast<float>(v[4]);
param->noise_gate = static_cast<float>(v[5]);
}
}
void ReadParam(const Json::Value& root,
std::string param_name,
EchoCanceller3Config::Filter::CoarseConfiguration* param) {
RTC_DCHECK(param);
Json::Value json_array;
if (rtc::GetValueFromJsonObject(root, param_name, &json_array)) {
std::vector<double> v;
rtc::JsonArrayToDoubleVector(json_array, &v);
if (v.size() != 3) {
RTC_LOG(LS_ERROR) << "Incorrect array size for " << param_name;
return;
}
param->length_blocks = static_cast<size_t>(v[0]);
param->rate = static_cast<float>(v[1]);
param->noise_gate = static_cast<float>(v[2]);
}
}
void ReadParam(const Json::Value& root,
std::string param_name,
EchoCanceller3Config::Delay::AlignmentMixing* param) {
RTC_DCHECK(param);
Json::Value subsection;
if (rtc::GetValueFromJsonObject(root, param_name, &subsection)) {
ReadParam(subsection, "downmix", &param->downmix);
ReadParam(subsection, "adaptive_selection", &param->adaptive_selection);
ReadParam(subsection, "activity_power_threshold",
&param->activity_power_threshold);
ReadParam(subsection, "prefer_first_two_channels",
&param->prefer_first_two_channels);
}
}
void ReadParam(
const Json::Value& root,
std::string param_name,
EchoCanceller3Config::Suppressor::SubbandNearendDetection::SubbandRegion*
param) {
RTC_DCHECK(param);
Json::Value json_array;
if (rtc::GetValueFromJsonObject(root, param_name, &json_array)) {
std::vector<int> v;
rtc::JsonArrayToIntVector(json_array, &v);
if (v.size() != 2) {
RTC_LOG(LS_ERROR) << "Incorrect array size for " << param_name;
return;
}
param->low = static_cast<size_t>(v[0]);
param->high = static_cast<size_t>(v[1]);
}
}
void ReadParam(const Json::Value& root,
std::string param_name,
EchoCanceller3Config::Suppressor::MaskingThresholds* param) {
RTC_DCHECK(param);
Json::Value json_array;
if (rtc::GetValueFromJsonObject(root, param_name, &json_array)) {
std::vector<double> v;
rtc::JsonArrayToDoubleVector(json_array, &v);
if (v.size() != 3) {
RTC_LOG(LS_ERROR) << "Incorrect array size for " << param_name;
return;
}
param->enr_transparent = static_cast<float>(v[0]);
param->enr_suppress = static_cast<float>(v[1]);
param->emr_transparent = static_cast<float>(v[2]);
}
}
} // namespace
void Aec3ConfigFromJsonString(absl::string_view json_string,
EchoCanceller3Config* config,
bool* parsing_successful) {
RTC_DCHECK(config);
RTC_DCHECK(parsing_successful);
EchoCanceller3Config& cfg = *config;
cfg = EchoCanceller3Config();
*parsing_successful = true;
Json::Value root;
bool success = Json::Reader().parse(std::string(json_string), root);
if (!success) {
RTC_LOG(LS_ERROR) << "Incorrect JSON format: " << json_string;
*parsing_successful = false;
return;
}
Json::Value aec3_root;
success = rtc::GetValueFromJsonObject(root, "aec3", &aec3_root);
if (!success) {
RTC_LOG(LS_ERROR) << "Missing AEC3 config field: " << json_string;
*parsing_successful = false;
return;
}
Json::Value section;
if (rtc::GetValueFromJsonObject(aec3_root, "buffering", &section)) {
ReadParam(section, "excess_render_detection_interval_blocks",
&cfg.buffering.excess_render_detection_interval_blocks);
ReadParam(section, "max_allowed_excess_render_blocks",
&cfg.buffering.max_allowed_excess_render_blocks);
}
if (rtc::GetValueFromJsonObject(aec3_root, "delay", &section)) {
ReadParam(section, "default_delay", &cfg.delay.default_delay);
ReadParam(section, "down_sampling_factor", &cfg.delay.down_sampling_factor);
ReadParam(section, "num_filters", &cfg.delay.num_filters);
ReadParam(section, "delay_headroom_samples",
&cfg.delay.delay_headroom_samples);
ReadParam(section, "hysteresis_limit_blocks",
&cfg.delay.hysteresis_limit_blocks);
ReadParam(section, "fixed_capture_delay_samples",
&cfg.delay.fixed_capture_delay_samples);
ReadParam(section, "delay_estimate_smoothing",
&cfg.delay.delay_estimate_smoothing);
ReadParam(section, "delay_candidate_detection_threshold",
&cfg.delay.delay_candidate_detection_threshold);
Json::Value subsection;
if (rtc::GetValueFromJsonObject(section, "delay_selection_thresholds",
&subsection)) {
ReadParam(subsection, "initial",
&cfg.delay.delay_selection_thresholds.initial);
ReadParam(subsection, "converged",
&cfg.delay.delay_selection_thresholds.converged);
}
ReadParam(section, "use_external_delay_estimator",
&cfg.delay.use_external_delay_estimator);
ReadParam(section, "log_warning_on_delay_changes",
&cfg.delay.log_warning_on_delay_changes);
ReadParam(section, "render_alignment_mixing",
&cfg.delay.render_alignment_mixing);
ReadParam(section, "capture_alignment_mixing",
&cfg.delay.capture_alignment_mixing);
}
if (rtc::GetValueFromJsonObject(aec3_root, "filter", &section)) {
ReadParam(section, "refined", &cfg.filter.refined);
ReadParam(section, "coarse", &cfg.filter.coarse);
ReadParam(section, "refined_initial", &cfg.filter.refined_initial);
ReadParam(section, "coarse_initial", &cfg.filter.coarse_initial);
ReadParam(section, "config_change_duration_blocks",
&cfg.filter.config_change_duration_blocks);
ReadParam(section, "initial_state_seconds",
&cfg.filter.initial_state_seconds);
ReadParam(section, "conservative_initial_phase",
&cfg.filter.conservative_initial_phase);
ReadParam(section, "enable_coarse_filter_output_usage",
&cfg.filter.enable_coarse_filter_output_usage);
ReadParam(section, "use_linear_filter", &cfg.filter.use_linear_filter);
ReadParam(section, "export_linear_aec_output",
&cfg.filter.export_linear_aec_output);
}
if (rtc::GetValueFromJsonObject(aec3_root, "erle", &section)) {
ReadParam(section, "min", &cfg.erle.min);
ReadParam(section, "max_l", &cfg.erle.max_l);
ReadParam(section, "max_h", &cfg.erle.max_h);
ReadParam(section, "onset_detection", &cfg.erle.onset_detection);
ReadParam(section, "num_sections", &cfg.erle.num_sections);
ReadParam(section, "clamp_quality_estimate_to_zero",
&cfg.erle.clamp_quality_estimate_to_zero);
ReadParam(section, "clamp_quality_estimate_to_one",
&cfg.erle.clamp_quality_estimate_to_one);
}
if (rtc::GetValueFromJsonObject(aec3_root, "ep_strength", &section)) {
ReadParam(section, "default_gain", &cfg.ep_strength.default_gain);
ReadParam(section, "default_len", &cfg.ep_strength.default_len);
ReadParam(section, "echo_can_saturate", &cfg.ep_strength.echo_can_saturate);
ReadParam(section, "bounded_erl", &cfg.ep_strength.bounded_erl);
}
if (rtc::GetValueFromJsonObject(aec3_root, "echo_audibility", &section)) {
ReadParam(section, "low_render_limit",
&cfg.echo_audibility.low_render_limit);
ReadParam(section, "normal_render_limit",
&cfg.echo_audibility.normal_render_limit);
ReadParam(section, "floor_power", &cfg.echo_audibility.floor_power);
ReadParam(section, "audibility_threshold_lf",
&cfg.echo_audibility.audibility_threshold_lf);
ReadParam(section, "audibility_threshold_mf",
&cfg.echo_audibility.audibility_threshold_mf);
ReadParam(section, "audibility_threshold_hf",
&cfg.echo_audibility.audibility_threshold_hf);
ReadParam(section, "use_stationarity_properties",
&cfg.echo_audibility.use_stationarity_properties);
ReadParam(section, "use_stationarity_properties_at_init",
&cfg.echo_audibility.use_stationarity_properties_at_init);
}
if (rtc::GetValueFromJsonObject(aec3_root, "render_levels", &section)) {
ReadParam(section, "active_render_limit",
&cfg.render_levels.active_render_limit);
ReadParam(section, "poor_excitation_render_limit",
&cfg.render_levels.poor_excitation_render_limit);
ReadParam(section, "poor_excitation_render_limit_ds8",
&cfg.render_levels.poor_excitation_render_limit_ds8);
ReadParam(section, "render_power_gain_db",
&cfg.render_levels.render_power_gain_db);
}
if (rtc::GetValueFromJsonObject(aec3_root, "echo_removal_control",
&section)) {
ReadParam(section, "has_clock_drift",
&cfg.echo_removal_control.has_clock_drift);
ReadParam(section, "linear_and_stable_echo_path",
&cfg.echo_removal_control.linear_and_stable_echo_path);
}
if (rtc::GetValueFromJsonObject(aec3_root, "echo_model", &section)) {
Json::Value subsection;
ReadParam(section, "noise_floor_hold", &cfg.echo_model.noise_floor_hold);
ReadParam(section, "min_noise_floor_power",
&cfg.echo_model.min_noise_floor_power);
ReadParam(section, "stationary_gate_slope",
&cfg.echo_model.stationary_gate_slope);
ReadParam(section, "noise_gate_power", &cfg.echo_model.noise_gate_power);
ReadParam(section, "noise_gate_slope", &cfg.echo_model.noise_gate_slope);
ReadParam(section, "render_pre_window_size",
&cfg.echo_model.render_pre_window_size);
ReadParam(section, "render_post_window_size",
&cfg.echo_model.render_post_window_size);
}
if (rtc::GetValueFromJsonObject(aec3_root, "comfort_noise", &section)) {
ReadParam(section, "noise_floor_dbfs", &cfg.comfort_noise.noise_floor_dbfs);
}
Json::Value subsection;
if (rtc::GetValueFromJsonObject(aec3_root, "suppressor", &section)) {
ReadParam(section, "nearend_average_blocks",
&cfg.suppressor.nearend_average_blocks);
if (rtc::GetValueFromJsonObject(section, "normal_tuning", &subsection)) {
ReadParam(subsection, "mask_lf", &cfg.suppressor.normal_tuning.mask_lf);
ReadParam(subsection, "mask_hf", &cfg.suppressor.normal_tuning.mask_hf);
ReadParam(subsection, "max_inc_factor",
&cfg.suppressor.normal_tuning.max_inc_factor);
ReadParam(subsection, "max_dec_factor_lf",
&cfg.suppressor.normal_tuning.max_dec_factor_lf);
}
if (rtc::GetValueFromJsonObject(section, "nearend_tuning", &subsection)) {
ReadParam(subsection, "mask_lf", &cfg.suppressor.nearend_tuning.mask_lf);
ReadParam(subsection, "mask_hf", &cfg.suppressor.nearend_tuning.mask_hf);
ReadParam(subsection, "max_inc_factor",
&cfg.suppressor.nearend_tuning.max_inc_factor);
ReadParam(subsection, "max_dec_factor_lf",
&cfg.suppressor.nearend_tuning.max_dec_factor_lf);
}
if (rtc::GetValueFromJsonObject(section, "dominant_nearend_detection",
&subsection)) {
ReadParam(subsection, "enr_threshold",
&cfg.suppressor.dominant_nearend_detection.enr_threshold);
ReadParam(subsection, "enr_exit_threshold",
&cfg.suppressor.dominant_nearend_detection.enr_exit_threshold);
ReadParam(subsection, "snr_threshold",
&cfg.suppressor.dominant_nearend_detection.snr_threshold);
ReadParam(subsection, "hold_duration",
&cfg.suppressor.dominant_nearend_detection.hold_duration);
ReadParam(subsection, "trigger_threshold",
&cfg.suppressor.dominant_nearend_detection.trigger_threshold);
ReadParam(
subsection, "use_during_initial_phase",
&cfg.suppressor.dominant_nearend_detection.use_during_initial_phase);
}
if (rtc::GetValueFromJsonObject(section, "subband_nearend_detection",
&subsection)) {
ReadParam(
subsection, "nearend_average_blocks",
&cfg.suppressor.subband_nearend_detection.nearend_average_blocks);
ReadParam(subsection, "subband1",
&cfg.suppressor.subband_nearend_detection.subband1);
ReadParam(subsection, "subband2",
&cfg.suppressor.subband_nearend_detection.subband2);
ReadParam(subsection, "nearend_threshold",
&cfg.suppressor.subband_nearend_detection.nearend_threshold);
ReadParam(subsection, "snr_threshold",
&cfg.suppressor.subband_nearend_detection.snr_threshold);
}
ReadParam(section, "use_subband_nearend_detection",
&cfg.suppressor.use_subband_nearend_detection);
if (rtc::GetValueFromJsonObject(section, "high_bands_suppression",
&subsection)) {
ReadParam(subsection, "enr_threshold",
&cfg.suppressor.high_bands_suppression.enr_threshold);
ReadParam(subsection, "max_gain_during_echo",
&cfg.suppressor.high_bands_suppression.max_gain_during_echo);
ReadParam(subsection, "anti_howling_activation_threshold",
&cfg.suppressor.high_bands_suppression
.anti_howling_activation_threshold);
ReadParam(subsection, "anti_howling_gain",
&cfg.suppressor.high_bands_suppression.anti_howling_gain);
}
ReadParam(section, "floor_first_increase",
&cfg.suppressor.floor_first_increase);
}
}
EchoCanceller3Config Aec3ConfigFromJsonString(absl::string_view json_string) {
EchoCanceller3Config cfg;
bool not_used;
Aec3ConfigFromJsonString(json_string, &cfg, &not_used);
return cfg;
}
std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) {
rtc::StringBuilder ost;
ost << "{";
ost << "\"aec3\": {";
ost << "\"buffering\": {";
ost << "\"excess_render_detection_interval_blocks\": "
<< config.buffering.excess_render_detection_interval_blocks << ",";
ost << "\"max_allowed_excess_render_blocks\": "
<< config.buffering.max_allowed_excess_render_blocks;
ost << "},";
ost << "\"delay\": {";
ost << "\"default_delay\": " << config.delay.default_delay << ",";
ost << "\"down_sampling_factor\": " << config.delay.down_sampling_factor
<< ",";
ost << "\"num_filters\": " << config.delay.num_filters << ",";
ost << "\"delay_headroom_samples\": " << config.delay.delay_headroom_samples
<< ",";
ost << "\"hysteresis_limit_blocks\": " << config.delay.hysteresis_limit_blocks
<< ",";
ost << "\"fixed_capture_delay_samples\": "
<< config.delay.fixed_capture_delay_samples << ",";
ost << "\"delay_estimate_smoothing\": "
<< config.delay.delay_estimate_smoothing << ",";
ost << "\"delay_candidate_detection_threshold\": "
<< config.delay.delay_candidate_detection_threshold << ",";
ost << "\"delay_selection_thresholds\": {";
ost << "\"initial\": " << config.delay.delay_selection_thresholds.initial
<< ",";
ost << "\"converged\": " << config.delay.delay_selection_thresholds.converged;
ost << "},";
ost << "\"use_external_delay_estimator\": "
<< (config.delay.use_external_delay_estimator ? "true" : "false") << ",";
ost << "\"log_warning_on_delay_changes\": "
<< (config.delay.log_warning_on_delay_changes ? "true" : "false") << ",";
ost << "\"render_alignment_mixing\": {";
ost << "\"downmix\": "
<< (config.delay.render_alignment_mixing.downmix ? "true" : "false")
<< ",";
ost << "\"adaptive_selection\": "
<< (config.delay.render_alignment_mixing.adaptive_selection ? "true"
: "false")
<< ",";
ost << "\"activity_power_threshold\": "
<< config.delay.render_alignment_mixing.activity_power_threshold << ",";
ost << "\"prefer_first_two_channels\": "
<< (config.delay.render_alignment_mixing.prefer_first_two_channels
? "true"
: "false");
ost << "},";
ost << "\"capture_alignment_mixing\": {";
ost << "\"downmix\": "
<< (config.delay.capture_alignment_mixing.downmix ? "true" : "false")
<< ",";
ost << "\"adaptive_selection\": "
<< (config.delay.capture_alignment_mixing.adaptive_selection ? "true"
: "false")
<< ",";
ost << "\"activity_power_threshold\": "
<< config.delay.capture_alignment_mixing.activity_power_threshold << ",";
ost << "\"prefer_first_two_channels\": "
<< (config.delay.capture_alignment_mixing.prefer_first_two_channels
? "true"
: "false");
ost << "}";
ost << "},";
ost << "\"filter\": {";
ost << "\"refined\": [";
ost << config.filter.refined.length_blocks << ",";
ost << config.filter.refined.leakage_converged << ",";
ost << config.filter.refined.leakage_diverged << ",";
ost << config.filter.refined.error_floor << ",";
ost << config.filter.refined.error_ceil << ",";
ost << config.filter.refined.noise_gate;
ost << "],";
ost << "\"coarse\": [";
ost << config.filter.coarse.length_blocks << ",";
ost << config.filter.coarse.rate << ",";
ost << config.filter.coarse.noise_gate;
ost << "],";
ost << "\"refined_initial\": [";
ost << config.filter.refined_initial.length_blocks << ",";
ost << config.filter.refined_initial.leakage_converged << ",";
ost << config.filter.refined_initial.leakage_diverged << ",";
ost << config.filter.refined_initial.error_floor << ",";
ost << config.filter.refined_initial.error_ceil << ",";
ost << config.filter.refined_initial.noise_gate;
ost << "],";
ost << "\"coarse_initial\": [";
ost << config.filter.coarse_initial.length_blocks << ",";
ost << config.filter.coarse_initial.rate << ",";
ost << config.filter.coarse_initial.noise_gate;
ost << "],";
ost << "\"config_change_duration_blocks\": "
<< config.filter.config_change_duration_blocks << ",";
ost << "\"initial_state_seconds\": " << config.filter.initial_state_seconds
<< ",";
ost << "\"conservative_initial_phase\": "
<< (config.filter.conservative_initial_phase ? "true" : "false") << ",";
ost << "\"enable_coarse_filter_output_usage\": "
<< (config.filter.enable_coarse_filter_output_usage ? "true" : "false")
<< ",";
ost << "\"use_linear_filter\": "
<< (config.filter.use_linear_filter ? "true" : "false") << ",";
ost << "\"export_linear_aec_output\": "
<< (config.filter.export_linear_aec_output ? "true" : "false");
ost << "},";
ost << "\"erle\": {";
ost << "\"min\": " << config.erle.min << ",";
ost << "\"max_l\": " << config.erle.max_l << ",";
ost << "\"max_h\": " << config.erle.max_h << ",";
ost << "\"onset_detection\": "
<< (config.erle.onset_detection ? "true" : "false") << ",";
ost << "\"num_sections\": " << config.erle.num_sections << ",";
ost << "\"clamp_quality_estimate_to_zero\": "
<< (config.erle.clamp_quality_estimate_to_zero ? "true" : "false") << ",";
ost << "\"clamp_quality_estimate_to_one\": "
<< (config.erle.clamp_quality_estimate_to_one ? "true" : "false");
ost << "},";
ost << "\"ep_strength\": {";
ost << "\"default_gain\": " << config.ep_strength.default_gain << ",";
ost << "\"default_len\": " << config.ep_strength.default_len << ",";
ost << "\"echo_can_saturate\": "
<< (config.ep_strength.echo_can_saturate ? "true" : "false") << ",";
ost << "\"bounded_erl\": "
<< (config.ep_strength.bounded_erl ? "true" : "false");
ost << "},";
ost << "\"echo_audibility\": {";
ost << "\"low_render_limit\": " << config.echo_audibility.low_render_limit
<< ",";
ost << "\"normal_render_limit\": "
<< config.echo_audibility.normal_render_limit << ",";
ost << "\"floor_power\": " << config.echo_audibility.floor_power << ",";
ost << "\"audibility_threshold_lf\": "
<< config.echo_audibility.audibility_threshold_lf << ",";
ost << "\"audibility_threshold_mf\": "
<< config.echo_audibility.audibility_threshold_mf << ",";
ost << "\"audibility_threshold_hf\": "
<< config.echo_audibility.audibility_threshold_hf << ",";
ost << "\"use_stationarity_properties\": "
<< (config.echo_audibility.use_stationarity_properties ? "true" : "false")
<< ",";
ost << "\"use_stationarity_properties_at_init\": "
<< (config.echo_audibility.use_stationarity_properties_at_init ? "true"
: "false");
ost << "},";
ost << "\"render_levels\": {";
ost << "\"active_render_limit\": " << config.render_levels.active_render_limit
<< ",";
ost << "\"poor_excitation_render_limit\": "
<< config.render_levels.poor_excitation_render_limit << ",";
ost << "\"poor_excitation_render_limit_ds8\": "
<< config.render_levels.poor_excitation_render_limit_ds8 << ",";
ost << "\"render_power_gain_db\": "
<< config.render_levels.render_power_gain_db;
ost << "},";
ost << "\"echo_removal_control\": {";
ost << "\"has_clock_drift\": "
<< (config.echo_removal_control.has_clock_drift ? "true" : "false")
<< ",";
ost << "\"linear_and_stable_echo_path\": "
<< (config.echo_removal_control.linear_and_stable_echo_path ? "true"
: "false");
ost << "},";
ost << "\"echo_model\": {";
ost << "\"noise_floor_hold\": " << config.echo_model.noise_floor_hold << ",";
ost << "\"min_noise_floor_power\": "
<< config.echo_model.min_noise_floor_power << ",";
ost << "\"stationary_gate_slope\": "
<< config.echo_model.stationary_gate_slope << ",";
ost << "\"noise_gate_power\": " << config.echo_model.noise_gate_power << ",";
ost << "\"noise_gate_slope\": " << config.echo_model.noise_gate_slope << ",";
ost << "\"render_pre_window_size\": "
<< config.echo_model.render_pre_window_size << ",";
ost << "\"render_post_window_size\": "
<< config.echo_model.render_post_window_size;
ost << "},";
ost << "\"comfort_noise\": {";
ost << "\"noise_floor_dbfs\": " << config.comfort_noise.noise_floor_dbfs;
ost << "},";
ost << "\"suppressor\": {";
ost << "\"nearend_average_blocks\": "
<< config.suppressor.nearend_average_blocks << ",";
ost << "\"normal_tuning\": {";
ost << "\"mask_lf\": [";
ost << config.suppressor.normal_tuning.mask_lf.enr_transparent << ",";
ost << config.suppressor.normal_tuning.mask_lf.enr_suppress << ",";
ost << config.suppressor.normal_tuning.mask_lf.emr_transparent;
ost << "],";
ost << "\"mask_hf\": [";
ost << config.suppressor.normal_tuning.mask_hf.enr_transparent << ",";
ost << config.suppressor.normal_tuning.mask_hf.enr_suppress << ",";
ost << config.suppressor.normal_tuning.mask_hf.emr_transparent;
ost << "],";
ost << "\"max_inc_factor\": "
<< config.suppressor.normal_tuning.max_inc_factor << ",";
ost << "\"max_dec_factor_lf\": "
<< config.suppressor.normal_tuning.max_dec_factor_lf;
ost << "},";
ost << "\"nearend_tuning\": {";
ost << "\"mask_lf\": [";
ost << config.suppressor.nearend_tuning.mask_lf.enr_transparent << ",";
ost << config.suppressor.nearend_tuning.mask_lf.enr_suppress << ",";
ost << config.suppressor.nearend_tuning.mask_lf.emr_transparent;
ost << "],";
ost << "\"mask_hf\": [";
ost << config.suppressor.nearend_tuning.mask_hf.enr_transparent << ",";
ost << config.suppressor.nearend_tuning.mask_hf.enr_suppress << ",";
ost << config.suppressor.nearend_tuning.mask_hf.emr_transparent;
ost << "],";
ost << "\"max_inc_factor\": "
<< config.suppressor.nearend_tuning.max_inc_factor << ",";
ost << "\"max_dec_factor_lf\": "
<< config.suppressor.nearend_tuning.max_dec_factor_lf;
ost << "},";
ost << "\"dominant_nearend_detection\": {";
ost << "\"enr_threshold\": "
<< config.suppressor.dominant_nearend_detection.enr_threshold << ",";
ost << "\"enr_exit_threshold\": "
<< config.suppressor.dominant_nearend_detection.enr_exit_threshold << ",";
ost << "\"snr_threshold\": "
<< config.suppressor.dominant_nearend_detection.snr_threshold << ",";
ost << "\"hold_duration\": "
<< config.suppressor.dominant_nearend_detection.hold_duration << ",";
ost << "\"trigger_threshold\": "
<< config.suppressor.dominant_nearend_detection.trigger_threshold << ",";
ost << "\"use_during_initial_phase\": "
<< config.suppressor.dominant_nearend_detection.use_during_initial_phase;
ost << "},";
ost << "\"subband_nearend_detection\": {";
ost << "\"nearend_average_blocks\": "
<< config.suppressor.subband_nearend_detection.nearend_average_blocks
<< ",";
ost << "\"subband1\": [";
ost << config.suppressor.subband_nearend_detection.subband1.low << ",";
ost << config.suppressor.subband_nearend_detection.subband1.high;
ost << "],";
ost << "\"subband2\": [";
ost << config.suppressor.subband_nearend_detection.subband2.low << ",";
ost << config.suppressor.subband_nearend_detection.subband2.high;
ost << "],";
ost << "\"nearend_threshold\": "
<< config.suppressor.subband_nearend_detection.nearend_threshold << ",";
ost << "\"snr_threshold\": "
<< config.suppressor.subband_nearend_detection.snr_threshold;
ost << "},";
ost << "\"use_subband_nearend_detection\": "
<< config.suppressor.use_subband_nearend_detection << ",";
ost << "\"high_bands_suppression\": {";
ost << "\"enr_threshold\": "
<< config.suppressor.high_bands_suppression.enr_threshold << ",";
ost << "\"max_gain_during_echo\": "
<< config.suppressor.high_bands_suppression.max_gain_during_echo << ",";
ost << "\"anti_howling_activation_threshold\": "
<< config.suppressor.high_bands_suppression
.anti_howling_activation_threshold
<< ",";
ost << "\"anti_howling_gain\": "
<< config.suppressor.high_bands_suppression.anti_howling_gain;
ost << "},";
ost << "\"floor_first_increase\": " << config.suppressor.floor_first_increase;
ost << "}";
ost << "}";
ost << "}";
return ost.Release();
}
} // namespace webrtc

45
third_party/libwebrtc/api/audio/echo_canceller3_config_json.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,45 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_ECHO_CANCELLER3_CONFIG_JSON_H_
#define API_AUDIO_ECHO_CANCELLER3_CONFIG_JSON_H_
#include <string>
#include "absl/strings/string_view.h"
#include "api/audio/echo_canceller3_config.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Parses a JSON-encoded string into an Aec3 config. Fields corresponds to
// substruct names, with the addition that there must be a top-level node
// "aec3". Produces default config values for anything that cannot be parsed
// from the string. If any error was found in the parsing, parsing_successful is
// set to false.
RTC_EXPORT void Aec3ConfigFromJsonString(absl::string_view json_string,
EchoCanceller3Config* config,
bool* parsing_successful);
// To be deprecated.
// Parses a JSON-encoded string into an Aec3 config. Fields corresponds to
// substruct names, with the addition that there must be a top-level node
// "aec3". Returns default config values for anything that cannot be parsed from
// the string.
RTC_EXPORT EchoCanceller3Config
Aec3ConfigFromJsonString(absl::string_view json_string);
// Encodes an Aec3 config in JSON format. Fields corresponds to substruct names,
// with the addition that the top-level node is named "aec3".
RTC_EXPORT std::string Aec3ConfigToJsonString(
const EchoCanceller3Config& config);
} // namespace webrtc
#endif // API_AUDIO_ECHO_CANCELLER3_CONFIG_JSON_H_

31
third_party/libwebrtc/api/audio/echo_canceller3_factory.cc поставляемый Normal file
Просмотреть файл

@ -0,0 +1,31 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio/echo_canceller3_factory.h"
#include <memory>
#include "modules/audio_processing/aec3/echo_canceller3.h"
namespace webrtc {
EchoCanceller3Factory::EchoCanceller3Factory() {}
EchoCanceller3Factory::EchoCanceller3Factory(const EchoCanceller3Config& config)
: config_(config) {}
std::unique_ptr<EchoControl> EchoCanceller3Factory::Create(
int sample_rate_hz,
int num_render_channels,
int num_capture_channels) {
return std::make_unique<EchoCanceller3>(
config_, sample_rate_hz, num_render_channels, num_capture_channels);
}
} // namespace webrtc

41
third_party/libwebrtc/api/audio/echo_canceller3_factory.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,41 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_ECHO_CANCELLER3_FACTORY_H_
#define API_AUDIO_ECHO_CANCELLER3_FACTORY_H_
#include <memory>
#include "api/audio/echo_canceller3_config.h"
#include "api/audio/echo_control.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
class RTC_EXPORT EchoCanceller3Factory : public EchoControlFactory {
public:
// Factory producing EchoCanceller3 instances with the default configuration.
EchoCanceller3Factory();
// Factory producing EchoCanceller3 instances with the specified
// configuration.
explicit EchoCanceller3Factory(const EchoCanceller3Config& config);
// Creates an EchoCanceller3 with a specified channel count and sampling rate.
std::unique_ptr<EchoControl> Create(int sample_rate_hz,
int num_render_channels,
int num_capture_channels) override;
private:
const EchoCanceller3Config config_;
};
} // namespace webrtc
#endif // API_AUDIO_ECHO_CANCELLER3_FACTORY_H_

68
third_party/libwebrtc/api/audio/echo_control.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,68 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_ECHO_CONTROL_H_
#define API_AUDIO_ECHO_CONTROL_H_
#include <memory>
#include "rtc_base/checks.h"
namespace webrtc {
class AudioBuffer;
// Interface for an acoustic echo cancellation (AEC) submodule.
class EchoControl {
public:
// Analysis (not changing) of the render signal.
virtual void AnalyzeRender(AudioBuffer* render) = 0;
// Analysis (not changing) of the capture signal.
virtual void AnalyzeCapture(AudioBuffer* capture) = 0;
// Processes the capture signal in order to remove the echo.
virtual void ProcessCapture(AudioBuffer* capture, bool level_change) = 0;
// As above, but also returns the linear filter output.
virtual void ProcessCapture(AudioBuffer* capture,
AudioBuffer* linear_output,
bool level_change) = 0;
struct Metrics {
double echo_return_loss;
double echo_return_loss_enhancement;
int delay_ms;
};
// Collect current metrics from the echo controller.
virtual Metrics GetMetrics() const = 0;
// Provides an optional external estimate of the audio buffer delay.
virtual void SetAudioBufferDelay(int delay_ms) = 0;
// Returns wheter the signal is altered.
virtual bool ActiveProcessing() const = 0;
virtual ~EchoControl() {}
};
// Interface for a factory that creates EchoControllers.
class EchoControlFactory {
public:
virtual std::unique_ptr<EchoControl> Create(int sample_rate_hz,
int num_render_channels,
int num_capture_channels) = 0;
virtual ~EchoControlFactory() = default;
};
} // namespace webrtc
#endif // API_AUDIO_ECHO_CONTROL_H_

21
third_party/libwebrtc/api/audio/echo_detector_creator.cc поставляемый Normal file
Просмотреть файл

@ -0,0 +1,21 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio/echo_detector_creator.h"
#include "modules/audio_processing/residual_echo_detector.h"
#include "rtc_base/ref_counted_object.h"
namespace webrtc {
rtc::scoped_refptr<EchoDetector> CreateEchoDetector() {
return new rtc::RefCountedObject<ResidualEchoDetector>();
}
} // namespace webrtc

26
third_party/libwebrtc/api/audio/echo_detector_creator.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,26 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_ECHO_DETECTOR_CREATOR_H_
#define API_AUDIO_ECHO_DETECTOR_CREATOR_H_
#include "api/scoped_refptr.h"
#include "modules/audio_processing/include/audio_processing.h"
namespace webrtc {
// Returns an instance of the WebRTC implementation of a residual echo detector.
// It can be provided to the webrtc::AudioProcessingBuilder to obtain the
// usual residual echo metrics.
rtc::scoped_refptr<EchoDetector> CreateEchoDetector();
} // namespace webrtc
#endif // API_AUDIO_ECHO_DETECTOR_CREATOR_H_

31
third_party/libwebrtc/api/audio/test/BUILD.gn поставляемый Normal file
Просмотреть файл

@ -0,0 +1,31 @@
# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../../webrtc.gni")
if (is_android) {
import("//build/config/android/config.gni")
import("//build/config/android/rules.gni")
}
if (rtc_include_tests) {
rtc_library("audio_api_unittests") {
testonly = true
sources = [
"audio_frame_unittest.cc",
"echo_canceller3_config_json_unittest.cc",
"echo_canceller3_config_unittest.cc",
]
deps = [
"..:aec3_config",
"..:aec3_config_json",
"..:audio_frame_api",
"../../../rtc_base:rtc_base_approved",
"../../../test:test_support",
]
}
}

186
third_party/libwebrtc/api/audio/test/audio_frame_unittest.cc поставляемый Normal file
Просмотреть файл

@ -0,0 +1,186 @@
/*
* Copyright 2018 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio/audio_frame.h"
#include <stdint.h>
#include <string.h> // memcmp
#include "test/gtest.h"
namespace webrtc {
namespace {
bool AllSamplesAre(int16_t sample, const AudioFrame& frame) {
const int16_t* frame_data = frame.data();
for (size_t i = 0; i < frame.max_16bit_samples(); i++) {
if (frame_data[i] != sample) {
return false;
}
}
return true;
}
constexpr uint32_t kTimestamp = 27;
constexpr int kSampleRateHz = 16000;
constexpr size_t kNumChannelsMono = 1;
constexpr size_t kNumChannelsStereo = 2;
constexpr size_t kNumChannels5_1 = 6;
constexpr size_t kSamplesPerChannel = kSampleRateHz / 100;
} // namespace
TEST(AudioFrameTest, FrameStartsMuted) {
AudioFrame frame;
EXPECT_TRUE(frame.muted());
EXPECT_TRUE(AllSamplesAre(0, frame));
}
TEST(AudioFrameTest, UnmutedFrameIsInitiallyZeroed) {
AudioFrame frame;
frame.mutable_data();
EXPECT_FALSE(frame.muted());
EXPECT_TRUE(AllSamplesAre(0, frame));
}
TEST(AudioFrameTest, MutedFrameBufferIsZeroed) {
AudioFrame frame;
int16_t* frame_data = frame.mutable_data();
for (size_t i = 0; i < frame.max_16bit_samples(); i++) {
frame_data[i] = 17;
}
ASSERT_TRUE(AllSamplesAre(17, frame));
frame.Mute();
EXPECT_TRUE(frame.muted());
EXPECT_TRUE(AllSamplesAre(0, frame));
}
TEST(AudioFrameTest, UpdateFrameMono) {
AudioFrame frame;
int16_t samples[kNumChannelsMono * kSamplesPerChannel] = {17};
frame.UpdateFrame(kTimestamp, samples, kSamplesPerChannel, kSampleRateHz,
AudioFrame::kPLC, AudioFrame::kVadActive, kNumChannelsMono);
EXPECT_EQ(kTimestamp, frame.timestamp_);
EXPECT_EQ(kSamplesPerChannel, frame.samples_per_channel());
EXPECT_EQ(kSampleRateHz, frame.sample_rate_hz());
EXPECT_EQ(AudioFrame::kPLC, frame.speech_type_);
EXPECT_EQ(AudioFrame::kVadActive, frame.vad_activity_);
EXPECT_EQ(kNumChannelsMono, frame.num_channels());
EXPECT_EQ(CHANNEL_LAYOUT_MONO, frame.channel_layout());
EXPECT_FALSE(frame.muted());
EXPECT_EQ(0, memcmp(samples, frame.data(), sizeof(samples)));
frame.UpdateFrame(kTimestamp, nullptr /* data*/, kSamplesPerChannel,
kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
kNumChannelsMono);
EXPECT_TRUE(frame.muted());
EXPECT_TRUE(AllSamplesAre(0, frame));
}
TEST(AudioFrameTest, UpdateFrameMultiChannel) {
AudioFrame frame;
frame.UpdateFrame(kTimestamp, nullptr /* data */, kSamplesPerChannel,
kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
kNumChannelsStereo);
EXPECT_EQ(kSamplesPerChannel, frame.samples_per_channel());
EXPECT_EQ(kNumChannelsStereo, frame.num_channels());
EXPECT_EQ(CHANNEL_LAYOUT_STEREO, frame.channel_layout());
frame.UpdateFrame(kTimestamp, nullptr /* data */, kSamplesPerChannel,
kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
kNumChannels5_1);
EXPECT_EQ(kSamplesPerChannel, frame.samples_per_channel());
EXPECT_EQ(kNumChannels5_1, frame.num_channels());
EXPECT_EQ(CHANNEL_LAYOUT_5_1, frame.channel_layout());
}
TEST(AudioFrameTest, CopyFrom) {
AudioFrame frame1;
AudioFrame frame2;
int16_t samples[kNumChannelsMono * kSamplesPerChannel] = {17};
frame2.UpdateFrame(kTimestamp, samples, kSamplesPerChannel, kSampleRateHz,
AudioFrame::kPLC, AudioFrame::kVadActive,
kNumChannelsMono);
frame1.CopyFrom(frame2);
EXPECT_EQ(frame2.timestamp_, frame1.timestamp_);
EXPECT_EQ(frame2.samples_per_channel_, frame1.samples_per_channel_);
EXPECT_EQ(frame2.sample_rate_hz_, frame1.sample_rate_hz_);
EXPECT_EQ(frame2.speech_type_, frame1.speech_type_);
EXPECT_EQ(frame2.vad_activity_, frame1.vad_activity_);
EXPECT_EQ(frame2.num_channels_, frame1.num_channels_);
EXPECT_EQ(frame2.muted(), frame1.muted());
EXPECT_EQ(0, memcmp(frame2.data(), frame1.data(), sizeof(samples)));
frame2.UpdateFrame(kTimestamp, nullptr /* data */, kSamplesPerChannel,
kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
kNumChannelsMono);
frame1.CopyFrom(frame2);
EXPECT_EQ(frame2.muted(), frame1.muted());
EXPECT_EQ(0, memcmp(frame2.data(), frame1.data(), sizeof(samples)));
}
TEST(AudioFrameTest, SwapFrames) {
AudioFrame frame1, frame2;
int16_t samples1[kNumChannelsMono * kSamplesPerChannel];
for (size_t i = 0; i < kNumChannelsMono * kSamplesPerChannel; ++i) {
samples1[i] = i;
}
frame1.UpdateFrame(kTimestamp, samples1, kSamplesPerChannel, kSampleRateHz,
AudioFrame::kPLC, AudioFrame::kVadActive,
kNumChannelsMono);
frame1.set_absolute_capture_timestamp_ms(12345678);
const auto frame1_channel_layout = frame1.channel_layout();
int16_t samples2[(kNumChannelsMono + 1) * (kSamplesPerChannel + 1)];
for (size_t i = 0; i < (kNumChannelsMono + 1) * (kSamplesPerChannel + 1);
++i) {
samples2[i] = 1000 + i;
}
frame2.UpdateFrame(kTimestamp + 1, samples2, kSamplesPerChannel + 1,
kSampleRateHz + 1, AudioFrame::kNormalSpeech,
AudioFrame::kVadPassive, kNumChannelsMono + 1);
const auto frame2_channel_layout = frame2.channel_layout();
swap(frame1, frame2);
EXPECT_EQ(kTimestamp + 1, frame1.timestamp_);
ASSERT_EQ(kSamplesPerChannel + 1, frame1.samples_per_channel_);
EXPECT_EQ(kSampleRateHz + 1, frame1.sample_rate_hz_);
EXPECT_EQ(AudioFrame::kNormalSpeech, frame1.speech_type_);
EXPECT_EQ(AudioFrame::kVadPassive, frame1.vad_activity_);
ASSERT_EQ(kNumChannelsMono + 1, frame1.num_channels_);
for (size_t i = 0; i < (kNumChannelsMono + 1) * (kSamplesPerChannel + 1);
++i) {
EXPECT_EQ(samples2[i], frame1.data()[i]);
}
EXPECT_FALSE(frame1.absolute_capture_timestamp_ms());
EXPECT_EQ(frame2_channel_layout, frame1.channel_layout());
EXPECT_EQ(kTimestamp, frame2.timestamp_);
ASSERT_EQ(kSamplesPerChannel, frame2.samples_per_channel_);
EXPECT_EQ(kSampleRateHz, frame2.sample_rate_hz_);
EXPECT_EQ(AudioFrame::kPLC, frame2.speech_type_);
EXPECT_EQ(AudioFrame::kVadActive, frame2.vad_activity_);
ASSERT_EQ(kNumChannelsMono, frame2.num_channels_);
for (size_t i = 0; i < kNumChannelsMono * kSamplesPerChannel; ++i) {
EXPECT_EQ(samples1[i], frame2.data()[i]);
}
EXPECT_EQ(12345678, frame2.absolute_capture_timestamp_ms());
EXPECT_EQ(frame1_channel_layout, frame2.channel_layout());
}
} // namespace webrtc

Просмотреть файл

@ -0,0 +1,70 @@
/*
* Copyright 2018 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio/echo_canceller3_config_json.h"
#include "api/audio/echo_canceller3_config.h"
#include "test/gtest.h"
namespace webrtc {
TEST(EchoCanceller3JsonHelpers, ToStringAndParseJson) {
EchoCanceller3Config cfg;
cfg.delay.down_sampling_factor = 1u;
cfg.delay.log_warning_on_delay_changes = true;
cfg.filter.refined.error_floor = 2.f;
cfg.filter.coarse_initial.length_blocks = 3u;
cfg.comfort_noise.noise_floor_dbfs = 100.f;
cfg.suppressor.normal_tuning.mask_hf.enr_suppress = .5f;
cfg.suppressor.subband_nearend_detection.nearend_average_blocks = 3;
cfg.suppressor.subband_nearend_detection.subband1 = {1, 3};
cfg.suppressor.subband_nearend_detection.subband1 = {4, 5};
cfg.suppressor.subband_nearend_detection.nearend_threshold = 2.f;
cfg.suppressor.subband_nearend_detection.snr_threshold = 100.f;
std::string json_string = Aec3ConfigToJsonString(cfg);
EchoCanceller3Config cfg_transformed = Aec3ConfigFromJsonString(json_string);
// Expect unchanged values to remain default.
EXPECT_EQ(cfg.ep_strength.default_len,
cfg_transformed.ep_strength.default_len);
EXPECT_EQ(cfg.suppressor.normal_tuning.mask_lf.enr_suppress,
cfg_transformed.suppressor.normal_tuning.mask_lf.enr_suppress);
// Expect changed values to carry through the transformation.
EXPECT_EQ(cfg.delay.down_sampling_factor,
cfg_transformed.delay.down_sampling_factor);
EXPECT_EQ(cfg.delay.log_warning_on_delay_changes,
cfg_transformed.delay.log_warning_on_delay_changes);
EXPECT_EQ(cfg.filter.coarse_initial.length_blocks,
cfg_transformed.filter.coarse_initial.length_blocks);
EXPECT_EQ(cfg.filter.refined.error_floor,
cfg_transformed.filter.refined.error_floor);
EXPECT_EQ(cfg.comfort_noise.noise_floor_dbfs,
cfg_transformed.comfort_noise.noise_floor_dbfs);
EXPECT_EQ(cfg.suppressor.normal_tuning.mask_hf.enr_suppress,
cfg_transformed.suppressor.normal_tuning.mask_hf.enr_suppress);
EXPECT_EQ(cfg.suppressor.subband_nearend_detection.nearend_average_blocks,
cfg_transformed.suppressor.subband_nearend_detection
.nearend_average_blocks);
EXPECT_EQ(cfg.suppressor.subband_nearend_detection.subband1.low,
cfg_transformed.suppressor.subband_nearend_detection.subband1.low);
EXPECT_EQ(cfg.suppressor.subband_nearend_detection.subband1.high,
cfg_transformed.suppressor.subband_nearend_detection.subband1.high);
EXPECT_EQ(cfg.suppressor.subband_nearend_detection.subband2.low,
cfg_transformed.suppressor.subband_nearend_detection.subband2.low);
EXPECT_EQ(cfg.suppressor.subband_nearend_detection.subband2.high,
cfg_transformed.suppressor.subband_nearend_detection.subband2.high);
EXPECT_EQ(
cfg.suppressor.subband_nearend_detection.nearend_threshold,
cfg_transformed.suppressor.subband_nearend_detection.nearend_threshold);
EXPECT_EQ(cfg.suppressor.subband_nearend_detection.snr_threshold,
cfg_transformed.suppressor.subband_nearend_detection.snr_threshold);
}
} // namespace webrtc

Просмотреть файл

@ -0,0 +1,46 @@
/*
* Copyright 2018 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio/echo_canceller3_config.h"
#include "api/audio/echo_canceller3_config_json.h"
#include "test/gtest.h"
namespace webrtc {
TEST(EchoCanceller3Config, ValidConfigIsNotModified) {
EchoCanceller3Config config;
EXPECT_TRUE(EchoCanceller3Config::Validate(&config));
EchoCanceller3Config default_config;
EXPECT_EQ(Aec3ConfigToJsonString(config),
Aec3ConfigToJsonString(default_config));
}
TEST(EchoCanceller3Config, InvalidConfigIsCorrected) {
// Change a parameter and validate.
EchoCanceller3Config config;
config.echo_model.min_noise_floor_power = -1600000.f;
EXPECT_FALSE(EchoCanceller3Config::Validate(&config));
EXPECT_GE(config.echo_model.min_noise_floor_power, 0.f);
// Verify remaining parameters are unchanged.
EchoCanceller3Config default_config;
config.echo_model.min_noise_floor_power =
default_config.echo_model.min_noise_floor_power;
EXPECT_EQ(Aec3ConfigToJsonString(config),
Aec3ConfigToJsonString(default_config));
}
TEST(EchoCanceller3Config, ValidatedConfigsAreValid) {
EchoCanceller3Config config;
config.delay.down_sampling_factor = 983;
EXPECT_FALSE(EchoCanceller3Config::Validate(&config));
EXPECT_TRUE(EchoCanceller3Config::Validate(&config));
}
} // namespace webrtc

146
third_party/libwebrtc/api/audio_codecs/BUILD.gn поставляемый Normal file
Просмотреть файл

@ -0,0 +1,146 @@
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../webrtc.gni")
if (is_android) {
import("//build/config/android/config.gni")
import("//build/config/android/rules.gni")
}
rtc_library("audio_codecs_api") {
visibility = [ "*" ]
sources = [
"audio_codec_pair_id.cc",
"audio_codec_pair_id.h",
"audio_decoder.cc",
"audio_decoder.h",
"audio_decoder_factory.h",
"audio_decoder_factory_template.h",
"audio_encoder.cc",
"audio_encoder.h",
"audio_encoder_factory.h",
"audio_encoder_factory_template.h",
"audio_format.cc",
"audio_format.h",
]
deps = [
"..:array_view",
"..:bitrate_allocation",
"..:scoped_refptr",
"../../rtc_base:checks",
"../../rtc_base:deprecation",
"../../rtc_base:rtc_base_approved",
"../../rtc_base:sanitizer",
"../../rtc_base/system:rtc_export",
"../units:time_delta",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("builtin_audio_decoder_factory") {
visibility = [ "*" ]
allow_poison = [ "audio_codecs" ]
sources = [
"builtin_audio_decoder_factory.cc",
"builtin_audio_decoder_factory.h",
]
deps = [
":audio_codecs_api",
"..:scoped_refptr",
"../../rtc_base:rtc_base_approved",
"L16:audio_decoder_L16",
"g711:audio_decoder_g711",
"g722:audio_decoder_g722",
"isac:audio_decoder_isac",
]
defines = []
if (rtc_include_ilbc) {
deps += [ "ilbc:audio_decoder_ilbc" ]
defines += [ "WEBRTC_USE_BUILTIN_ILBC=1" ]
} else {
defines += [ "WEBRTC_USE_BUILTIN_ILBC=0" ]
}
if (rtc_include_opus) {
deps += [
"opus:audio_decoder_multiopus",
"opus:audio_decoder_opus",
]
defines += [ "WEBRTC_USE_BUILTIN_OPUS=1" ]
} else {
defines += [ "WEBRTC_USE_BUILTIN_OPUS=0" ]
}
}
rtc_library("builtin_audio_encoder_factory") {
visibility = [ "*" ]
allow_poison = [ "audio_codecs" ]
sources = [
"builtin_audio_encoder_factory.cc",
"builtin_audio_encoder_factory.h",
]
deps = [
":audio_codecs_api",
"..:scoped_refptr",
"../../rtc_base:rtc_base_approved",
"L16:audio_encoder_L16",
"g711:audio_encoder_g711",
"g722:audio_encoder_g722",
"isac:audio_encoder_isac",
]
defines = []
if (rtc_include_ilbc) {
deps += [ "ilbc:audio_encoder_ilbc" ]
defines += [ "WEBRTC_USE_BUILTIN_ILBC=1" ]
} else {
defines += [ "WEBRTC_USE_BUILTIN_ILBC=0" ]
}
if (rtc_include_opus) {
deps += [
"opus:audio_encoder_multiopus",
"opus:audio_encoder_opus",
]
defines += [ "WEBRTC_USE_BUILTIN_OPUS=1" ]
} else {
defines += [ "WEBRTC_USE_BUILTIN_OPUS=0" ]
}
}
rtc_library("opus_audio_decoder_factory") {
visibility = [ "*" ]
allow_poison = [ "audio_codecs" ]
sources = [
"opus_audio_decoder_factory.cc",
"opus_audio_decoder_factory.h",
]
deps = [
":audio_codecs_api",
"..:scoped_refptr",
"../../rtc_base:rtc_base_approved",
"opus:audio_decoder_multiopus",
"opus:audio_decoder_opus",
]
}
rtc_library("opus_audio_encoder_factory") {
visibility = [ "*" ]
allow_poison = [ "audio_codecs" ]
sources = [
"opus_audio_encoder_factory.cc",
"opus_audio_encoder_factory.h",
]
deps = [
":audio_codecs_api",
"..:scoped_refptr",
"../../rtc_base:rtc_base_approved",
"opus:audio_encoder_multiopus",
"opus:audio_encoder_opus",
]
}

52
third_party/libwebrtc/api/audio_codecs/L16/BUILD.gn поставляемый Normal file
Просмотреть файл

@ -0,0 +1,52 @@
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../../webrtc.gni")
if (is_android) {
import("//build/config/android/config.gni")
import("//build/config/android/rules.gni")
}
rtc_library("audio_encoder_L16") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_encoder_L16.cc",
"audio_encoder_L16.h",
]
deps = [
"..:audio_codecs_api",
"../../../modules/audio_coding:pcm16b",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base:safe_minmax",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_decoder_L16") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_decoder_L16.cc",
"audio_decoder_L16.h",
]
deps = [
"..:audio_codecs_api",
"../../../modules/audio_coding:pcm16b",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}

45
third_party/libwebrtc/api/audio_codecs/L16/audio_decoder_L16.cc поставляемый Normal file
Просмотреть файл

@ -0,0 +1,45 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/L16/audio_decoder_L16.h"
#include <memory>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h"
#include "modules/audio_coding/codecs/pcm16b/pcm16b_common.h"
#include "rtc_base/numerics/safe_conversions.h"
namespace webrtc {
absl::optional<AudioDecoderL16::Config> AudioDecoderL16::SdpToConfig(
const SdpAudioFormat& format) {
Config config;
config.sample_rate_hz = format.clockrate_hz;
config.num_channels = rtc::checked_cast<int>(format.num_channels);
return absl::EqualsIgnoreCase(format.name, "L16") && config.IsOk()
? absl::optional<Config>(config)
: absl::nullopt;
}
void AudioDecoderL16::AppendSupportedDecoders(
std::vector<AudioCodecSpec>* specs) {
Pcm16BAppendSupportedCodecSpecs(specs);
}
std::unique_ptr<AudioDecoder> AudioDecoderL16::MakeAudioDecoder(
const Config& config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
return config.IsOk() ? std::make_unique<AudioDecoderPcm16B>(
config.sample_rate_hz, config.num_channels)
: nullptr;
}
} // namespace webrtc

46
third_party/libwebrtc/api/audio_codecs/L16/audio_decoder_L16.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,46 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_L16_AUDIO_DECODER_L16_H_
#define API_AUDIO_CODECS_L16_AUDIO_DECODER_L16_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// L16 decoder API for use as a template parameter to
// CreateAudioDecoderFactory<...>().
struct RTC_EXPORT AudioDecoderL16 {
struct Config {
bool IsOk() const {
return (sample_rate_hz == 8000 || sample_rate_hz == 16000 ||
sample_rate_hz == 32000 || sample_rate_hz == 48000) &&
num_channels >= 1;
}
int sample_rate_hz = 8000;
int num_channels = 1;
};
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const Config& config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_L16_AUDIO_DECODER_L16_H_

70
third_party/libwebrtc/api/audio_codecs/L16/audio_encoder_L16.cc поставляемый Normal file
Просмотреть файл

@ -0,0 +1,70 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/L16/audio_encoder_L16.h"
#include <memory>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h"
#include "modules/audio_coding/codecs/pcm16b/pcm16b_common.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "rtc_base/numerics/safe_minmax.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
absl::optional<AudioEncoderL16::Config> AudioEncoderL16::SdpToConfig(
const SdpAudioFormat& format) {
if (!rtc::IsValueInRangeForNumericType<int>(format.num_channels)) {
return absl::nullopt;
}
Config config;
config.sample_rate_hz = format.clockrate_hz;
config.num_channels = rtc::dchecked_cast<int>(format.num_channels);
auto ptime_iter = format.parameters.find("ptime");
if (ptime_iter != format.parameters.end()) {
const auto ptime = rtc::StringToNumber<int>(ptime_iter->second);
if (ptime && *ptime > 0) {
config.frame_size_ms = rtc::SafeClamp(10 * (*ptime / 10), 10, 60);
}
}
return absl::EqualsIgnoreCase(format.name, "L16") && config.IsOk()
? absl::optional<Config>(config)
: absl::nullopt;
}
void AudioEncoderL16::AppendSupportedEncoders(
std::vector<AudioCodecSpec>* specs) {
Pcm16BAppendSupportedCodecSpecs(specs);
}
AudioCodecInfo AudioEncoderL16::QueryAudioEncoder(
const AudioEncoderL16::Config& config) {
RTC_DCHECK(config.IsOk());
return {config.sample_rate_hz,
rtc::dchecked_cast<size_t>(config.num_channels),
config.sample_rate_hz * config.num_channels * 16};
}
std::unique_ptr<AudioEncoder> AudioEncoderL16::MakeAudioEncoder(
const AudioEncoderL16::Config& config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
RTC_DCHECK(config.IsOk());
AudioEncoderPcm16B::Config c;
c.sample_rate_hz = config.sample_rate_hz;
c.num_channels = config.num_channels;
c.frame_size_ms = config.frame_size_ms;
c.payload_type = payload_type;
return std::make_unique<AudioEncoderPcm16B>(c);
}
} // namespace webrtc

Просмотреть файл

@ -14,17 +14,17 @@
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/optional.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// L16 encoder API for use as a template parameter to
// CreateAudioEncoderFactory<...>().
//
// NOTE: This struct is still under development and may change without notice.
struct AudioEncoderL16 {
struct RTC_EXPORT AudioEncoderL16 {
struct Config {
bool IsOk() const {
return (sample_rate_hz == 8000 || sample_rate_hz == 16000 ||
@ -36,11 +36,13 @@ struct AudioEncoderL16 {
int num_channels = 1;
int frame_size_ms = 10;
};
static rtc::Optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
static AudioCodecInfo QueryAudioEncoder(const Config& config);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(const Config& config,
int payload_type);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const Config& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
};
} // namespace webrtc

1
third_party/libwebrtc/api/audio_codecs/OWNERS поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
kwiberg@webrtc.org

91
third_party/libwebrtc/api/audio_codecs/audio_codec_pair_id.cc поставляемый Normal file
Просмотреть файл

@ -0,0 +1,91 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/audio_codec_pair_id.h"
#include <atomic>
#include <cstdint>
#include "rtc_base/checks.h"
namespace webrtc {
namespace {
// Returns a new value that it has never returned before. You may call it at
// most 2^63 times in the lifetime of the program. Note: The returned values
// may be easily predictable.
uint64_t GetNextId() {
static std::atomic<uint64_t> next_id(0);
// Atomically increment `next_id`, and return the previous value. Relaxed
// memory order is sufficient, since all we care about is that different
// callers return different values.
const uint64_t new_id = next_id.fetch_add(1, std::memory_order_relaxed);
// This check isn't atomic with the increment, so if we start 2^63 + 1
// invocations of GetNextId() in parallel, the last one to do the atomic
// increment could return the ID 0 before any of the others had time to
// trigger this DCHECK. We blithely assume that this won't happen.
RTC_DCHECK_LT(new_id, uint64_t{1} << 63) << "Used up all ID values";
return new_id;
}
// Make an integer ID more unpredictable. This is a 1:1 mapping, so you can
// feed it any value, but the idea is that you can feed it a sequence such as
// 0, 1, 2, ... and get a new sequence that isn't as trivially predictable, so
// that users won't rely on it being consecutive or increasing or anything like
// that.
constexpr uint64_t ObfuscateId(uint64_t id) {
// Any nonzero coefficient that's relatively prime to 2^64 (that is, any odd
// number) and any constant will give a 1:1 mapping. These high-entropy
// values will prevent the sequence from being trivially predictable.
//
// Both the multiplication and the addition going to overflow almost always,
// but that's fine---we *want* arithmetic mod 2^64.
return uint64_t{0x85fdb20e1294309a} + uint64_t{0xc516ef5c37462469} * id;
}
// The first ten values. Verified against the Python function
//
// def f(n):
// return (0x85fdb20e1294309a + 0xc516ef5c37462469 * n) % 2**64
//
// Callers should obviously not depend on these exact values...
//
// (On Visual C++, we have to disable warning C4307 (integral constant
// overflow), even though unsigned integers have perfectly well-defined
// overflow behavior.)
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4307)
#endif
static_assert(ObfuscateId(0) == uint64_t{0x85fdb20e1294309a}, "");
static_assert(ObfuscateId(1) == uint64_t{0x4b14a16a49da5503}, "");
static_assert(ObfuscateId(2) == uint64_t{0x102b90c68120796c}, "");
static_assert(ObfuscateId(3) == uint64_t{0xd5428022b8669dd5}, "");
static_assert(ObfuscateId(4) == uint64_t{0x9a596f7eefacc23e}, "");
static_assert(ObfuscateId(5) == uint64_t{0x5f705edb26f2e6a7}, "");
static_assert(ObfuscateId(6) == uint64_t{0x24874e375e390b10}, "");
static_assert(ObfuscateId(7) == uint64_t{0xe99e3d93957f2f79}, "");
static_assert(ObfuscateId(8) == uint64_t{0xaeb52cefccc553e2}, "");
static_assert(ObfuscateId(9) == uint64_t{0x73cc1c4c040b784b}, "");
#ifdef _MSC_VER
#pragma warning(pop)
#endif
} // namespace
AudioCodecPairId AudioCodecPairId::Create() {
return AudioCodecPairId(ObfuscateId(GetNextId()));
}
} // namespace webrtc

74
third_party/libwebrtc/api/audio_codecs/audio_codec_pair_id.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,74 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_AUDIO_CODEC_PAIR_ID_H_
#define API_AUDIO_CODECS_AUDIO_CODEC_PAIR_ID_H_
#include <stdint.h>
#include <utility>
namespace webrtc {
class AudioCodecPairId final {
public:
// Copyable, but not default constructible.
AudioCodecPairId() = delete;
AudioCodecPairId(const AudioCodecPairId&) = default;
AudioCodecPairId(AudioCodecPairId&&) = default;
AudioCodecPairId& operator=(const AudioCodecPairId&) = default;
AudioCodecPairId& operator=(AudioCodecPairId&&) = default;
friend void swap(AudioCodecPairId& a, AudioCodecPairId& b) {
using std::swap;
swap(a.id_, b.id_);
}
// Creates a new ID, unequal to any previously created ID.
static AudioCodecPairId Create();
// IDs can be tested for equality.
friend bool operator==(AudioCodecPairId a, AudioCodecPairId b) {
return a.id_ == b.id_;
}
friend bool operator!=(AudioCodecPairId a, AudioCodecPairId b) {
return a.id_ != b.id_;
}
// Comparisons. The ordering of ID values is completely arbitrary, but
// stable, so it's useful e.g. if you want to use IDs as keys in an ordered
// map.
friend bool operator<(AudioCodecPairId a, AudioCodecPairId b) {
return a.id_ < b.id_;
}
friend bool operator<=(AudioCodecPairId a, AudioCodecPairId b) {
return a.id_ <= b.id_;
}
friend bool operator>=(AudioCodecPairId a, AudioCodecPairId b) {
return a.id_ >= b.id_;
}
friend bool operator>(AudioCodecPairId a, AudioCodecPairId b) {
return a.id_ > b.id_;
}
// Returns a numeric representation of the ID. The numeric values are
// completely arbitrary, but stable, collision-free, and reasonably evenly
// distributed, so they are e.g. useful as hash values in unordered maps.
uint64_t NumericRepresentation() const { return id_; }
private:
explicit AudioCodecPairId(uint64_t id) : id_(id) {}
uint64_t id_;
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_AUDIO_CODEC_PAIR_ID_H_

Просмотреть файл

@ -11,6 +11,7 @@
#include "api/audio_codecs/audio_decoder.h"
#include <assert.h>
#include <memory>
#include <utility>
@ -33,14 +34,14 @@ class OldStyleEncodedFrame final : public AudioDecoder::EncodedAudioFrame {
return ret < 0 ? 0 : static_cast<size_t>(ret);
}
rtc::Optional<DecodeResult> Decode(
absl::optional<DecodeResult> Decode(
rtc::ArrayView<int16_t> decoded) const override {
auto speech_type = AudioDecoder::kSpeech;
const int ret = decoder_->Decode(
payload_.data(), payload_.size(), decoder_->SampleRateHz(),
decoded.size() * sizeof(int16_t), decoded.data(), &speech_type);
return ret < 0 ? rtc::nullopt
: rtc::Optional<DecodeResult>(
return ret < 0 ? absl::nullopt
: absl::optional<DecodeResult>(
{static_cast<size_t>(ret), speech_type});
}
@ -51,6 +52,10 @@ class OldStyleEncodedFrame final : public AudioDecoder::EncodedAudioFrame {
} // namespace
bool AudioDecoder::EncodedAudioFrame::IsDtxPacket() const {
return false;
}
AudioDecoder::ParseResult::ParseResult() = default;
AudioDecoder::ParseResult::ParseResult(ParseResult&& b) = default;
AudioDecoder::ParseResult::ParseResult(uint32_t timestamp,
@ -126,13 +131,9 @@ size_t AudioDecoder::DecodePlc(size_t num_frames, int16_t* decoded) {
return 0;
}
int AudioDecoder::IncomingPacket(const uint8_t* payload,
size_t payload_len,
uint16_t rtp_sequence_number,
uint32_t rtp_timestamp,
uint32_t arrival_timestamp) {
return 0;
}
// TODO(bugs.webrtc.org/9676): Remove default implementation.
void AudioDecoder::GeneratePlc(size_t /*requested_samples_per_channel*/,
rtc::BufferT<int16_t>* /*concealment_audio*/) {}
int AudioDecoder::ErrorCode() {
return 0;

Просмотреть файл

@ -11,14 +11,16 @@
#ifndef API_AUDIO_CODECS_AUDIO_DECODER_H_
#define API_AUDIO_CODECS_AUDIO_DECODER_H_
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/optional.h"
#include "rtc_base/buffer.h"
#include "rtc_base/constructormagic.h"
#include "typedefs.h" // NOLINT(build/include)
#include "rtc_base/constructor_magic.h"
namespace webrtc {
@ -48,13 +50,16 @@ class AudioDecoder {
// If no duration can be ascertained, returns zero.
virtual size_t Duration() const = 0;
// Returns true if this packet contains DTX.
virtual bool IsDtxPacket() const;
// Decodes this frame of audio and writes the result in |decoded|.
// |decoded| must be large enough to store as many samples as indicated by a
// call to Duration() . On success, returns an rtc::Optional containing the
// call to Duration() . On success, returns an absl::optional containing the
// total number of samples across all channels, as well as whether the
// decoder produced comfort noise or speech. On failure, returns an empty
// rtc::Optional. Decode may be called at most once per frame object.
virtual rtc::Optional<DecodeResult> Decode(
// absl::optional. Decode may be called at most once per frame object.
virtual absl::optional<DecodeResult> Decode(
rtc::ArrayView<int16_t> decoded) const = 0;
};
@ -86,6 +91,10 @@ class AudioDecoder {
virtual std::vector<ParseResult> ParsePayload(rtc::Buffer&& payload,
uint32_t timestamp);
// TODO(bugs.webrtc.org/10098): The Decode and DecodeRedundant methods are
// obsolete; callers should call ParsePayload instead. For now, subclasses
// must still implement DecodeInternal.
// Decodes |encode_len| bytes from |encoded| and writes the result in
// |decoded|. The maximum bytes allowed to be written into |decoded| is
// |max_decoded_bytes|. Returns the total number of samples across all
@ -117,16 +126,23 @@ class AudioDecoder {
// memory allocated in |decoded| should accommodate |num_frames| frames.
virtual size_t DecodePlc(size_t num_frames, int16_t* decoded);
// Asks the decoder to generate packet-loss concealment and append it to the
// end of |concealment_audio|. The concealment audio should be in
// channel-interleaved format, with as many channels as the last decoded
// packet produced. The implementation must produce at least
// requested_samples_per_channel, or nothing at all. This is a signal to the
// caller to conceal the loss with other means. If the implementation provides
// concealment samples, it is also responsible for "stitching" it together
// with the decoded audio on either side of the concealment.
// Note: The default implementation of GeneratePlc will be deleted soon. All
// implementations must provide their own, which can be a simple as a no-op.
// TODO(bugs.webrtc.org/9676): Remove default impementation.
virtual void GeneratePlc(size_t requested_samples_per_channel,
rtc::BufferT<int16_t>* concealment_audio);
// Resets the decoder state (empty buffers etc.).
virtual void Reset() = 0;
// Notifies the decoder of an incoming packet to NetEQ.
virtual int IncomingPacket(const uint8_t* payload,
size_t payload_len,
uint16_t rtp_sequence_number,
uint32_t rtp_timestamp,
uint32_t arrival_timestamp);
// Returns the last error code from the decoder.
virtual int ErrorCode();

53
third_party/libwebrtc/api/audio_codecs/audio_decoder_factory.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,53 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_AUDIO_DECODER_FACTORY_H_
#define API_AUDIO_CODECS_AUDIO_DECODER_FACTORY_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "rtc_base/ref_count.h"
namespace webrtc {
// A factory that creates AudioDecoders.
class AudioDecoderFactory : public rtc::RefCountInterface {
public:
virtual std::vector<AudioCodecSpec> GetSupportedDecoders() = 0;
virtual bool IsSupportedDecoder(const SdpAudioFormat& format) = 0;
// Create a new decoder instance. The `codec_pair_id` argument is used to link
// encoders and decoders that talk to the same remote entity: if a
// AudioEncoderFactory::MakeAudioEncoder() and a
// AudioDecoderFactory::MakeAudioDecoder() call receive non-null IDs that
// compare equal, the factory implementations may assume that the encoder and
// decoder form a pair. (The intended use case for this is to set up
// communication between the AudioEncoder and AudioDecoder instances, which is
// needed for some codecs with built-in bandwidth adaptation.)
//
// Returns null if the format isn't supported.
//
// Note: Implementations need to be robust against combinations other than
// one encoder, one decoder getting the same ID; such decoders must still
// work.
virtual std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id) = 0;
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_AUDIO_DECODER_FACTORY_H_

Просмотреть файл

@ -15,8 +15,8 @@
#include <vector>
#include "api/audio_codecs/audio_decoder_factory.h"
#include "rtc_base/refcountedobject.h"
#include "rtc_base/scoped_ref_ptr.h"
#include "api/scoped_refptr.h"
#include "rtc_base/ref_counted_object.h"
namespace webrtc {
@ -31,7 +31,8 @@ struct Helper<> {
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs) {}
static bool IsSupportedDecoder(const SdpAudioFormat& format) { return false; }
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const SdpAudioFormat& format) {
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id) {
return nullptr;
}
};
@ -46,13 +47,18 @@ struct Helper<T, Ts...> {
}
static bool IsSupportedDecoder(const SdpAudioFormat& format) {
auto opt_config = T::SdpToConfig(format);
static_assert(std::is_same<decltype(opt_config),
absl::optional<typename T::Config>>::value,
"T::SdpToConfig() must return a value of type "
"absl::optional<T::Config>");
return opt_config ? true : Helper<Ts...>::IsSupportedDecoder(format);
}
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const SdpAudioFormat& format) {
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id) {
auto opt_config = T::SdpToConfig(format);
return opt_config ? T::MakeAudioDecoder(*opt_config)
: Helper<Ts...>::MakeAudioDecoder(format);
return opt_config ? T::MakeAudioDecoder(*opt_config, codec_pair_id)
: Helper<Ts...>::MakeAudioDecoder(format, codec_pair_id);
}
};
@ -70,8 +76,9 @@ class AudioDecoderFactoryT : public AudioDecoderFactory {
}
std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const SdpAudioFormat& format) override {
return Helper<Ts...>::MakeAudioDecoder(format);
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id) override {
return Helper<Ts...>::MakeAudioDecoder(format, codec_pair_id);
}
};
@ -83,9 +90,9 @@ class AudioDecoderFactoryT : public AudioDecoderFactory {
// be a struct with the following static member functions:
//
// // Converts |audio_format| to a ConfigType instance. Returns an empty
// // optional if |audio_format| doesn't correctly specify an decoder of our
// // optional if |audio_format| doesn't correctly specify a decoder of our
// // type.
// rtc::Optional<ConfigType> SdpToConfig(const SdpAudioFormat& audio_format);
// absl::optional<ConfigType> SdpToConfig(const SdpAudioFormat& audio_format);
//
// // Appends zero or more AudioCodecSpecs to the list that will be returned
// // by AudioDecoderFactory::GetSupportedDecoders().
@ -93,17 +100,18 @@ class AudioDecoderFactoryT : public AudioDecoderFactory {
//
// // Creates an AudioDecoder for the specified format. Used to implement
// // AudioDecoderFactory::MakeAudioDecoder().
// std::unique_ptr<AudioDecoder> MakeAudioDecoder(const ConfigType& config);
// std::unique_ptr<AudioDecoder> MakeAudioDecoder(
// const ConfigType& config,
// absl::optional<AudioCodecPairId> codec_pair_id);
//
// ConfigType should be a type that encapsulates all the settings needed to
// create an AudioDecoder.
// create an AudioDecoder. T::Config (where T is the decoder struct) should
// either be the config type, or an alias for it.
//
// Whenever it tries to do something, the new factory will try each of the
// decoder types in the order they were specified in the template argument
// list, stopping at the first one that claims to be able to do the job.
//
// NOTE: This function is still under development and may change without notice.
//
// TODO(kwiberg): Point at CreateBuiltinAudioDecoderFactory() for an example of
// how it is used.
template <typename... Ts>

Просмотреть файл

@ -82,15 +82,22 @@ void AudioEncoder::OnReceivedUplinkPacketLossFraction(
float uplink_packet_loss_fraction) {}
void AudioEncoder::OnReceivedUplinkRecoverablePacketLossFraction(
float uplink_recoverable_packet_loss_fraction) {}
float uplink_recoverable_packet_loss_fraction) {
RTC_NOTREACHED();
}
void AudioEncoder::OnReceivedTargetAudioBitrate(int target_audio_bitrate_bps) {
OnReceivedUplinkBandwidth(target_audio_bitrate_bps, rtc::nullopt);
OnReceivedUplinkBandwidth(target_audio_bitrate_bps, absl::nullopt);
}
void AudioEncoder::OnReceivedUplinkBandwidth(
int target_audio_bitrate_bps,
rtc::Optional<int64_t> bwe_period_ms) {}
absl::optional<int64_t> bwe_period_ms) {}
void AudioEncoder::OnReceivedUplinkAllocation(BitrateAllocationUpdate update) {
OnReceivedUplinkBandwidth(update.target_bitrate.bps(),
update.bwe_period.ms());
}
void AudioEncoder::OnReceivedRtt(int rtt_ms) {}

Просмотреть файл

@ -11,16 +11,17 @@
#ifndef API_AUDIO_CODECS_AUDIO_ENCODER_H_
#define API_AUDIO_CODECS_AUDIO_ENCODER_H_
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/optional.h"
#include "api/call/bitrate_allocation.h"
#include "api/units/time_delta.h"
#include "rtc_base/buffer.h"
#include "rtc_base/deprecation.h"
#include "typedefs.h" // NOLINT(build/include)
namespace webrtc {
@ -34,30 +35,30 @@ struct ANAStats {
// Number of actions taken by the ANA bitrate controller since the start of
// the call. If this value is not set, it indicates that the bitrate
// controller is disabled.
rtc::Optional<uint32_t> bitrate_action_counter;
absl::optional<uint32_t> bitrate_action_counter;
// Number of actions taken by the ANA channel controller since the start of
// the call. If this value is not set, it indicates that the channel
// controller is disabled.
rtc::Optional<uint32_t> channel_action_counter;
absl::optional<uint32_t> channel_action_counter;
// Number of actions taken by the ANA DTX controller since the start of the
// call. If this value is not set, it indicates that the DTX controller is
// disabled.
rtc::Optional<uint32_t> dtx_action_counter;
absl::optional<uint32_t> dtx_action_counter;
// Number of actions taken by the ANA FEC controller since the start of the
// call. If this value is not set, it indicates that the FEC controller is
// disabled.
rtc::Optional<uint32_t> fec_action_counter;
absl::optional<uint32_t> fec_action_counter;
// Number of times the ANA frame length controller decided to increase the
// frame length since the start of the call. If this value is not set, it
// indicates that the frame length controller is disabled.
rtc::Optional<uint32_t> frame_length_increase_counter;
absl::optional<uint32_t> frame_length_increase_counter;
// Number of times the ANA frame length controller decided to decrease the
// frame length since the start of the call. If this value is not set, it
// indicates that the frame length controller is disabled.
rtc::Optional<uint32_t> frame_length_decrease_counter;
absl::optional<uint32_t> frame_length_decrease_counter;
// The uplink packet loss fractions as set by the ANA FEC controller. If this
// value is not set, it indicates that the ANA FEC controller is not active.
rtc::Optional<float> uplink_packet_loss_fraction;
absl::optional<float> uplink_packet_loss_fraction;
};
// This is the interface class for encoders in AudioCoding module. Each codec
@ -209,10 +210,7 @@ class AudioEncoder {
virtual void OnReceivedUplinkPacketLossFraction(
float uplink_packet_loss_fraction);
// Provides 1st-order-FEC-recoverable uplink packet loss rate to this encoder
// to allow it to adapt.
// |uplink_recoverable_packet_loss_fraction| is in the range [0.0, 1.0].
virtual void OnReceivedUplinkRecoverablePacketLossFraction(
RTC_DEPRECATED virtual void OnReceivedUplinkRecoverablePacketLossFraction(
float uplink_recoverable_packet_loss_fraction);
// Provides target audio bitrate to this encoder to allow it to adapt.
@ -220,9 +218,12 @@ class AudioEncoder {
// Provides target audio bitrate and corresponding probing interval of
// the bandwidth estimator to this encoder to allow it to adapt.
virtual void OnReceivedUplinkBandwidth(
int target_audio_bitrate_bps,
rtc::Optional<int64_t> bwe_period_ms);
virtual void OnReceivedUplinkBandwidth(int target_audio_bitrate_bps,
absl::optional<int64_t> bwe_period_ms);
// Provides target audio bitrate and corresponding probing interval of
// the bandwidth estimator to this encoder to allow it to adapt.
virtual void OnReceivedUplinkAllocation(BitrateAllocationUpdate update);
// Provides RTT to this encoder to allow it to adapt.
virtual void OnReceivedRtt(int rtt_ms);
@ -239,6 +240,12 @@ class AudioEncoder {
// Get statistics related to audio network adaptation.
virtual ANAStats GetANAStats() const;
// The range of frame lengths that are supported or nullopt if there's no sch
// information. This is used to calculated the full bitrate range, including
// overhead.
virtual absl::optional<std::pair<TimeDelta, TimeDelta>> GetFrameLengthRange()
const = 0;
protected:
// Subclasses implement this to perform the actual encoding. Called by
// Encode().

62
third_party/libwebrtc/api/audio_codecs/audio_encoder_factory.h поставляемый Normal file
Просмотреть файл

@ -0,0 +1,62 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_AUDIO_ENCODER_FACTORY_H_
#define API_AUDIO_CODECS_AUDIO_ENCODER_FACTORY_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "rtc_base/ref_count.h"
namespace webrtc {
// A factory that creates AudioEncoders.
class AudioEncoderFactory : public rtc::RefCountInterface {
public:
// Returns a prioritized list of audio codecs, to use for signaling etc.
virtual std::vector<AudioCodecSpec> GetSupportedEncoders() = 0;
// Returns information about how this format would be encoded, provided it's
// supported. More format and format variations may be supported than those
// returned by GetSupportedEncoders().
virtual absl::optional<AudioCodecInfo> QueryAudioEncoder(
const SdpAudioFormat& format) = 0;
// Creates an AudioEncoder for the specified format. The encoder will tags its
// payloads with the specified payload type. The `codec_pair_id` argument is
// used to link encoders and decoders that talk to the same remote entity: if
// a AudioEncoderFactory::MakeAudioEncoder() and a
// AudioDecoderFactory::MakeAudioDecoder() call receive non-null IDs that
// compare equal, the factory implementations may assume that the encoder and
// decoder form a pair. (The intended use case for this is to set up
// communication between the AudioEncoder and AudioDecoder instances, which is
// needed for some codecs with built-in bandwidth adaptation.)
//
// Returns null if the format isn't supported.
//
// Note: Implementations need to be robust against combinations other than
// one encoder, one decoder getting the same ID; such encoders must still
// work.
//
// TODO(ossu): Try to avoid audio encoders having to know their payload type.
virtual std::unique_ptr<AudioEncoder> MakeAudioEncoder(
int payload_type,
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id) = 0;
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_AUDIO_ENCODER_FACTORY_H_

Просмотреть файл

@ -15,8 +15,8 @@
#include <vector>
#include "api/audio_codecs/audio_encoder_factory.h"
#include "rtc_base/refcountedobject.h"
#include "rtc_base/scoped_ref_ptr.h"
#include "api/scoped_refptr.h"
#include "rtc_base/ref_counted_object.h"
namespace webrtc {
@ -29,13 +29,14 @@ struct Helper;
template <>
struct Helper<> {
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs) {}
static rtc::Optional<AudioCodecInfo> QueryAudioEncoder(
static absl::optional<AudioCodecInfo> QueryAudioEncoder(
const SdpAudioFormat& format) {
return rtc::nullopt;
return absl::nullopt;
}
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
int payload_type,
const SdpAudioFormat& format) {
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id) {
return nullptr;
}
};
@ -48,21 +49,27 @@ struct Helper<T, Ts...> {
T::AppendSupportedEncoders(specs);
Helper<Ts...>::AppendSupportedEncoders(specs);
}
static rtc::Optional<AudioCodecInfo> QueryAudioEncoder(
static absl::optional<AudioCodecInfo> QueryAudioEncoder(
const SdpAudioFormat& format) {
auto opt_config = T::SdpToConfig(format);
return opt_config ? rtc::Optional<AudioCodecInfo>(
static_assert(std::is_same<decltype(opt_config),
absl::optional<typename T::Config>>::value,
"T::SdpToConfig() must return a value of type "
"absl::optional<T::Config>");
return opt_config ? absl::optional<AudioCodecInfo>(
T::QueryAudioEncoder(*opt_config))
: Helper<Ts...>::QueryAudioEncoder(format);
}
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
int payload_type,
const SdpAudioFormat& format) {
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id) {
auto opt_config = T::SdpToConfig(format);
if (opt_config) {
return T::MakeAudioEncoder(*opt_config, payload_type);
return T::MakeAudioEncoder(*opt_config, payload_type, codec_pair_id);
} else {
return Helper<Ts...>::MakeAudioEncoder(payload_type, format);
return Helper<Ts...>::MakeAudioEncoder(payload_type, format,
codec_pair_id);
}
}
};
@ -76,15 +83,16 @@ class AudioEncoderFactoryT : public AudioEncoderFactory {
return specs;
}
rtc::Optional<AudioCodecInfo> QueryAudioEncoder(
absl::optional<AudioCodecInfo> QueryAudioEncoder(
const SdpAudioFormat& format) override {
return Helper<Ts...>::QueryAudioEncoder(format);
}
std::unique_ptr<AudioEncoder> MakeAudioEncoder(
int payload_type,
const SdpAudioFormat& format) override {
return Helper<Ts...>::MakeAudioEncoder(payload_type, format);
const SdpAudioFormat& format,
absl::optional<AudioCodecPairId> codec_pair_id) override {
return Helper<Ts...>::MakeAudioEncoder(payload_type, format, codec_pair_id);
}
};
@ -98,7 +106,7 @@ class AudioEncoderFactoryT : public AudioEncoderFactory {
// // Converts |audio_format| to a ConfigType instance. Returns an empty
// // optional if |audio_format| doesn't correctly specify an encoder of our
// // type.
// rtc::Optional<ConfigType> SdpToConfig(const SdpAudioFormat& audio_format);
// absl::optional<ConfigType> SdpToConfig(const SdpAudioFormat& audio_format);
//
// // Appends zero or more AudioCodecSpecs to the list that will be returned
// // by AudioEncoderFactory::GetSupportedEncoders().
@ -110,18 +118,19 @@ class AudioEncoderFactoryT : public AudioEncoderFactory {
//
// // Creates an AudioEncoder for the specified format. Used to implement
// // AudioEncoderFactory::MakeAudioEncoder().
// std::unique_ptr<AudioEncoder> MakeAudioEncoder(const ConfigType& config,
// int payload_type);
// std::unique_ptr<AudioDecoder> MakeAudioEncoder(
// const ConfigType& config,
// int payload_type,
// absl::optional<AudioCodecPairId> codec_pair_id);
//
// ConfigType should be a type that encapsulates all the settings needed to
// create an AudioDecoder.
// create an AudioEncoder. T::Config (where T is the encoder struct) should
// either be the config type, or an alias for it.
//
// Whenever it tries to do something, the new factory will try each of the
// encoders in the order they were specified in the template argument list,
// stopping at the first one that claims to be able to do the job.
//
// NOTE: This function is still under development and may change without notice.
//
// TODO(kwiberg): Point at CreateBuiltinAudioEncoderFactory() for an example of
// how it is used.
template <typename... Ts>

86
third_party/libwebrtc/api/audio_codecs/audio_format.cc поставляемый Normal file
Просмотреть файл

@ -0,0 +1,86 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/audio_format.h"
#include <utility>
#include "absl/strings/match.h"
namespace webrtc {
SdpAudioFormat::SdpAudioFormat(const SdpAudioFormat&) = default;
SdpAudioFormat::SdpAudioFormat(SdpAudioFormat&&) = default;
SdpAudioFormat::SdpAudioFormat(absl::string_view name,
int clockrate_hz,
size_t num_channels)
: name(name), clockrate_hz(clockrate_hz), num_channels(num_channels) {}
SdpAudioFormat::SdpAudioFormat(absl::string_view name,
int clockrate_hz,
size_t num_channels,
const Parameters& param)
: name(name),
clockrate_hz(clockrate_hz),
num_channels(num_channels),
parameters(param) {}
SdpAudioFormat::SdpAudioFormat(absl::string_view name,
int clockrate_hz,
size_t num_channels,
Parameters&& param)
: name(name),
clockrate_hz(clockrate_hz),
num_channels(num_channels),
parameters(std::move(param)) {}
bool SdpAudioFormat::Matches(const SdpAudioFormat& o) const {
return absl::EqualsIgnoreCase(name, o.name) &&
clockrate_hz == o.clockrate_hz && num_channels == o.num_channels;
}
SdpAudioFormat::~SdpAudioFormat() = default;
SdpAudioFormat& SdpAudioFormat::operator=(const SdpAudioFormat&) = default;
SdpAudioFormat& SdpAudioFormat::operator=(SdpAudioFormat&&) = default;
bool operator==(const SdpAudioFormat& a, const SdpAudioFormat& b) {
return absl::EqualsIgnoreCase(a.name, b.name) &&
a.clockrate_hz == b.clockrate_hz && a.num_channels == b.num_channels &&
a.parameters == b.parameters;
}
AudioCodecInfo::AudioCodecInfo(int sample_rate_hz,
size_t num_channels,
int bitrate_bps)
: AudioCodecInfo(sample_rate_hz,
num_channels,
bitrate_bps,
bitrate_bps,
bitrate_bps) {}
AudioCodecInfo::AudioCodecInfo(int sample_rate_hz,
size_t num_channels,
int default_bitrate_bps,
int min_bitrate_bps,
int max_bitrate_bps)
: sample_rate_hz(sample_rate_hz),
num_channels(num_channels),
default_bitrate_bps(default_bitrate_bps),
min_bitrate_bps(min_bitrate_bps),
max_bitrate_bps(max_bitrate_bps) {
RTC_DCHECK_GT(sample_rate_hz, 0);
RTC_DCHECK_GT(num_channels, 0);
RTC_DCHECK_GE(min_bitrate_bps, 0);
RTC_DCHECK_LE(min_bitrate_bps, default_bitrate_bps);
RTC_DCHECK_GE(max_bitrate_bps, default_bitrate_bps);
}
} // namespace webrtc

Просмотреть файл

@ -11,34 +11,32 @@
#ifndef API_AUDIO_CODECS_AUDIO_FORMAT_H_
#define API_AUDIO_CODECS_AUDIO_FORMAT_H_
#include <map>
#include <ostream>
#include <string>
#include <utility>
#include <stddef.h>
#include "api/optional.h"
#include <map>
#include <string>
#include "absl/strings/string_view.h"
#include "rtc_base/checks.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// SDP specification for a single audio codec.
// NOTE: This class is still under development and may change without notice.
struct SdpAudioFormat {
struct RTC_EXPORT SdpAudioFormat {
using Parameters = std::map<std::string, std::string>;
SdpAudioFormat(const SdpAudioFormat&);
SdpAudioFormat(SdpAudioFormat&&);
SdpAudioFormat(const char* name, int clockrate_hz, size_t num_channels);
SdpAudioFormat(const std::string& name,
int clockrate_hz,
size_t num_channels);
SdpAudioFormat(const char* name,
SdpAudioFormat(absl::string_view name, int clockrate_hz, size_t num_channels);
SdpAudioFormat(absl::string_view name,
int clockrate_hz,
size_t num_channels,
const Parameters& param);
SdpAudioFormat(const std::string& name,
SdpAudioFormat(absl::string_view name,
int clockrate_hz,
size_t num_channels,
const Parameters& param);
Parameters&& param);
~SdpAudioFormat();
// Returns true if this format is compatible with |o|. In SDP terminology:
@ -60,9 +58,6 @@ struct SdpAudioFormat {
Parameters parameters;
};
void swap(SdpAudioFormat& a, SdpAudioFormat& b);
std::ostream& operator<<(std::ostream& os, const SdpAudioFormat& saf);
// Information about how an audio format is treated by the codec implementation.
// Contains basic information, such as sample rate and number of channels, which
// isn't uniformly presented by SDP. Also contains flags indicating support for
@ -120,8 +115,6 @@ struct AudioCodecInfo {
// network conditions.
};
std::ostream& operator<<(std::ostream& os, const AudioCodecInfo& aci);
// AudioCodecSpec ties an audio format to specific information about the codec
// and its implementation.
struct AudioCodecSpec {
@ -135,8 +128,6 @@ struct AudioCodecSpec {
AudioCodecInfo info;
};
std::ostream& operator<<(std::ostream& os, const AudioCodecSpec& acs);
} // namespace webrtc
#endif // API_AUDIO_CODECS_AUDIO_FORMAT_H_

Просмотреть файл

@ -22,6 +22,7 @@
#endif
#include "api/audio_codecs/isac/audio_decoder_isac.h"
#if WEBRTC_USE_BUILTIN_OPUS
#include "api/audio_codecs/opus/audio_decoder_multi_channel_opus.h"
#include "api/audio_codecs/opus/audio_decoder_opus.h" // nogncheck
#endif
@ -33,14 +34,17 @@ namespace {
template <typename T>
struct NotAdvertised {
using Config = typename T::Config;
static rtc::Optional<Config> SdpToConfig(const SdpAudioFormat& audio_format) {
static absl::optional<Config> SdpToConfig(
const SdpAudioFormat& audio_format) {
return T::SdpToConfig(audio_format);
}
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs) {
// Don't advertise support for anything.
}
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(const Config& config) {
return T::MakeAudioDecoder(config);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const Config& config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt) {
return T::MakeAudioDecoder(config, codec_pair_id);
}
};
@ -50,7 +54,7 @@ rtc::scoped_refptr<AudioDecoderFactory> CreateBuiltinAudioDecoderFactory() {
return CreateAudioDecoderFactory<
#if WEBRTC_USE_BUILTIN_OPUS
AudioDecoderOpus,
AudioDecoderOpus, NotAdvertised<AudioDecoderMultiChannelOpus>,
#endif
AudioDecoderIsac, AudioDecoderG722,

Просмотреть файл

@ -0,0 +1,28 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_BUILTIN_AUDIO_DECODER_FACTORY_H_
#define API_AUDIO_CODECS_BUILTIN_AUDIO_DECODER_FACTORY_H_
#include "api/audio_codecs/audio_decoder_factory.h"
#include "api/scoped_refptr.h"
namespace webrtc {
// Creates a new factory that can create the built-in types of audio decoders.
// Note: This will link with all the code implementing those codecs, so if you
// only need a subset of the codecs, consider using
// CreateAudioDecoderFactory<...codecs listed here...>() or
// CreateOpusAudioDecoderFactory() instead.
rtc::scoped_refptr<AudioDecoderFactory> CreateBuiltinAudioDecoderFactory();
} // namespace webrtc
#endif // API_AUDIO_CODECS_BUILTIN_AUDIO_DECODER_FACTORY_H_

Просмотреть файл

@ -22,6 +22,7 @@
#endif
#include "api/audio_codecs/isac/audio_encoder_isac.h"
#if WEBRTC_USE_BUILTIN_OPUS
#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus.h"
#include "api/audio_codecs/opus/audio_encoder_opus.h" // nogncheck
#endif
@ -33,7 +34,8 @@ namespace {
template <typename T>
struct NotAdvertised {
using Config = typename T::Config;
static rtc::Optional<Config> SdpToConfig(const SdpAudioFormat& audio_format) {
static absl::optional<Config> SdpToConfig(
const SdpAudioFormat& audio_format) {
return T::SdpToConfig(audio_format);
}
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs) {
@ -42,9 +44,11 @@ struct NotAdvertised {
static AudioCodecInfo QueryAudioEncoder(const Config& config) {
return T::QueryAudioEncoder(config);
}
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(const Config& config,
int payload_type) {
return T::MakeAudioEncoder(config, payload_type);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const Config& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt) {
return T::MakeAudioEncoder(config, payload_type, codec_pair_id);
}
};
@ -54,7 +58,7 @@ rtc::scoped_refptr<AudioEncoderFactory> CreateBuiltinAudioEncoderFactory() {
return CreateAudioEncoderFactory<
#if WEBRTC_USE_BUILTIN_OPUS
AudioEncoderOpus,
AudioEncoderOpus, NotAdvertised<AudioEncoderMultiChannelOpus>,
#endif
AudioEncoderIsac, AudioEncoderG722,

Просмотреть файл

@ -0,0 +1,28 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_BUILTIN_AUDIO_ENCODER_FACTORY_H_
#define API_AUDIO_CODECS_BUILTIN_AUDIO_ENCODER_FACTORY_H_
#include "api/audio_codecs/audio_encoder_factory.h"
#include "api/scoped_refptr.h"
namespace webrtc {
// Creates a new factory that can create the built-in types of audio encoders.
// Note: This will link with all the code implementing those codecs, so if you
// only need a subset of the codecs, consider using
// CreateAudioEncoderFactory<...codecs listed here...>() or
// CreateOpusAudioEncoderFactory() instead.
rtc::scoped_refptr<AudioEncoderFactory> CreateBuiltinAudioEncoderFactory();
} // namespace webrtc
#endif // API_AUDIO_CODECS_BUILTIN_AUDIO_ENCODER_FACTORY_H_

52
third_party/libwebrtc/api/audio_codecs/g711/BUILD.gn поставляемый Normal file
Просмотреть файл

@ -0,0 +1,52 @@
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../../webrtc.gni")
if (is_android) {
import("//build/config/android/config.gni")
import("//build/config/android/rules.gni")
}
rtc_library("audio_encoder_g711") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_encoder_g711.cc",
"audio_encoder_g711.h",
]
deps = [
"..:audio_codecs_api",
"../../../modules/audio_coding:g711",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base:safe_minmax",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_decoder_g711") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_decoder_g711.cc",
"audio_decoder_g711.h",
]
deps = [
"..:audio_codecs_api",
"../../../modules/audio_coding:g711",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}

Просмотреть файл

@ -0,0 +1,59 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/g711/audio_decoder_g711.h"
#include <memory>
#include <vector>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/g711/audio_decoder_pcm.h"
#include "rtc_base/numerics/safe_conversions.h"
namespace webrtc {
absl::optional<AudioDecoderG711::Config> AudioDecoderG711::SdpToConfig(
const SdpAudioFormat& format) {
const bool is_pcmu = absl::EqualsIgnoreCase(format.name, "PCMU");
const bool is_pcma = absl::EqualsIgnoreCase(format.name, "PCMA");
if (format.clockrate_hz == 8000 && format.num_channels >= 1 &&
(is_pcmu || is_pcma)) {
Config config;
config.type = is_pcmu ? Config::Type::kPcmU : Config::Type::kPcmA;
config.num_channels = rtc::dchecked_cast<int>(format.num_channels);
RTC_DCHECK(config.IsOk());
return config;
} else {
return absl::nullopt;
}
}
void AudioDecoderG711::AppendSupportedDecoders(
std::vector<AudioCodecSpec>* specs) {
for (const char* type : {"PCMU", "PCMA"}) {
specs->push_back({{type, 8000, 1}, {8000, 1, 64000}});
}
}
std::unique_ptr<AudioDecoder> AudioDecoderG711::MakeAudioDecoder(
const Config& config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
RTC_DCHECK(config.IsOk());
switch (config.type) {
case Config::Type::kPcmU:
return std::make_unique<AudioDecoderPcmU>(config.num_channels);
case Config::Type::kPcmA:
return std::make_unique<AudioDecoderPcmA>(config.num_channels);
default:
return nullptr;
}
}
} // namespace webrtc

Просмотреть файл

@ -0,0 +1,45 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_G711_AUDIO_DECODER_G711_H_
#define API_AUDIO_CODECS_G711_AUDIO_DECODER_G711_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// G711 decoder API for use as a template parameter to
// CreateAudioDecoderFactory<...>().
struct RTC_EXPORT AudioDecoderG711 {
struct Config {
enum class Type { kPcmU, kPcmA };
bool IsOk() const {
return (type == Type::kPcmU || type == Type::kPcmA) && num_channels >= 1;
}
Type type;
int num_channels;
};
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
const Config& config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_G711_AUDIO_DECODER_G711_H_

Просмотреть файл

@ -13,19 +13,18 @@
#include <memory>
#include <vector>
#include "common_types.h" // NOLINT(build/include)
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "rtc_base/numerics/safe_minmax.h"
#include "rtc_base/ptr_util.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
rtc::Optional<AudioEncoderG711::Config> AudioEncoderG711::SdpToConfig(
absl::optional<AudioEncoderG711::Config> AudioEncoderG711::SdpToConfig(
const SdpAudioFormat& format) {
const bool is_pcmu = STR_CASE_CMP(format.name.c_str(), "PCMU") == 0;
const bool is_pcma = STR_CASE_CMP(format.name.c_str(), "PCMA") == 0;
const bool is_pcmu = absl::EqualsIgnoreCase(format.name, "PCMU");
const bool is_pcma = absl::EqualsIgnoreCase(format.name, "PCMA");
if (format.clockrate_hz == 8000 && format.num_channels >= 1 &&
(is_pcmu || is_pcma)) {
Config config;
@ -42,7 +41,7 @@ rtc::Optional<AudioEncoderG711::Config> AudioEncoderG711::SdpToConfig(
RTC_DCHECK(config.IsOk());
return config;
} else {
return rtc::nullopt;
return absl::nullopt;
}
}
@ -61,7 +60,8 @@ AudioCodecInfo AudioEncoderG711::QueryAudioEncoder(const Config& config) {
std::unique_ptr<AudioEncoder> AudioEncoderG711::MakeAudioEncoder(
const Config& config,
int payload_type) {
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
RTC_DCHECK(config.IsOk());
switch (config.type) {
case Config::Type::kPcmU: {
@ -69,16 +69,18 @@ std::unique_ptr<AudioEncoder> AudioEncoderG711::MakeAudioEncoder(
impl_config.num_channels = config.num_channels;
impl_config.frame_size_ms = config.frame_size_ms;
impl_config.payload_type = payload_type;
return rtc::MakeUnique<AudioEncoderPcmU>(impl_config);
return std::make_unique<AudioEncoderPcmU>(impl_config);
}
case Config::Type::kPcmA: {
AudioEncoderPcmA::Config impl_config;
impl_config.num_channels = config.num_channels;
impl_config.frame_size_ms = config.frame_size_ms;
impl_config.payload_type = payload_type;
return rtc::MakeUnique<AudioEncoderPcmA>(impl_config);
return std::make_unique<AudioEncoderPcmA>(impl_config);
}
default: {
return nullptr;
}
default: { return nullptr; }
}
}

Просмотреть файл

@ -14,17 +14,17 @@
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/optional.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// G711 encoder API for use as a template parameter to
// CreateAudioEncoderFactory<...>().
//
// NOTE: This struct is still under development and may change without notice.
struct AudioEncoderG711 {
struct RTC_EXPORT AudioEncoderG711 {
struct Config {
enum class Type { kPcmU, kPcmA };
bool IsOk() const {
@ -35,12 +35,14 @@ struct AudioEncoderG711 {
int num_channels = 1;
int frame_size_ms = 20;
};
static rtc::Optional<AudioEncoderG711::Config> SdpToConfig(
static absl::optional<AudioEncoderG711::Config> SdpToConfig(
const SdpAudioFormat& audio_format);
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
static AudioCodecInfo QueryAudioEncoder(const Config& config);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(const Config& config,
int payload_type);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const Config& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
};
} // namespace webrtc

58
third_party/libwebrtc/api/audio_codecs/g722/BUILD.gn поставляемый Normal file
Просмотреть файл

@ -0,0 +1,58 @@
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../../webrtc.gni")
if (is_android) {
import("//build/config/android/config.gni")
import("//build/config/android/rules.gni")
}
rtc_source_set("audio_encoder_g722_config") {
visibility = [ "*" ]
sources = [ "audio_encoder_g722_config.h" ]
}
rtc_library("audio_encoder_g722") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_encoder_g722.cc",
"audio_encoder_g722.h",
]
deps = [
":audio_encoder_g722_config",
"..:audio_codecs_api",
"../../../modules/audio_coding:g722",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base:safe_minmax",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_decoder_g722") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_decoder_g722.cc",
"audio_decoder_g722.h",
]
deps = [
"..:audio_codecs_api",
"../../../modules/audio_coding:g722",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}

Просмотреть файл

@ -0,0 +1,50 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/g722/audio_decoder_g722.h"
#include <memory>
#include <vector>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/g722/audio_decoder_g722.h"
#include "rtc_base/numerics/safe_conversions.h"
namespace webrtc {
absl::optional<AudioDecoderG722::Config> AudioDecoderG722::SdpToConfig(
const SdpAudioFormat& format) {
return absl::EqualsIgnoreCase(format.name, "G722") &&
format.clockrate_hz == 8000 &&
(format.num_channels == 1 || format.num_channels == 2)
? absl::optional<Config>(
Config{rtc::dchecked_cast<int>(format.num_channels)})
: absl::nullopt;
}
void AudioDecoderG722::AppendSupportedDecoders(
std::vector<AudioCodecSpec>* specs) {
specs->push_back({{"G722", 8000, 1}, {16000, 1, 64000}});
}
std::unique_ptr<AudioDecoder> AudioDecoderG722::MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
switch (config.num_channels) {
case 1:
return std::make_unique<AudioDecoderG722Impl>();
case 2:
return std::make_unique<AudioDecoderG722StereoImpl>();
default:
return nullptr;
}
}
} // namespace webrtc

Просмотреть файл

@ -0,0 +1,41 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_G722_AUDIO_DECODER_G722_H_
#define API_AUDIO_CODECS_G722_AUDIO_DECODER_G722_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// G722 decoder API for use as a template parameter to
// CreateAudioDecoderFactory<...>().
struct RTC_EXPORT AudioDecoderG722 {
struct Config {
bool IsOk() const { return num_channels == 1 || num_channels == 2; }
int num_channels;
};
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_G722_AUDIO_DECODER_G722_H_

Просмотреть файл

@ -0,0 +1,67 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/g722/audio_encoder_g722.h"
#include <memory>
#include <vector>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/g722/audio_encoder_g722.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "rtc_base/numerics/safe_minmax.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
absl::optional<AudioEncoderG722Config> AudioEncoderG722::SdpToConfig(
const SdpAudioFormat& format) {
if (!absl::EqualsIgnoreCase(format.name, "g722") ||
format.clockrate_hz != 8000) {
return absl::nullopt;
}
AudioEncoderG722Config config;
config.num_channels = rtc::checked_cast<int>(format.num_channels);
auto ptime_iter = format.parameters.find("ptime");
if (ptime_iter != format.parameters.end()) {
auto ptime = rtc::StringToNumber<int>(ptime_iter->second);
if (ptime && *ptime > 0) {
const int whole_packets = *ptime / 10;
config.frame_size_ms = rtc::SafeClamp<int>(whole_packets * 10, 10, 60);
}
}
return config.IsOk() ? absl::optional<AudioEncoderG722Config>(config)
: absl::nullopt;
}
void AudioEncoderG722::AppendSupportedEncoders(
std::vector<AudioCodecSpec>* specs) {
const SdpAudioFormat fmt = {"G722", 8000, 1};
const AudioCodecInfo info = QueryAudioEncoder(*SdpToConfig(fmt));
specs->push_back({fmt, info});
}
AudioCodecInfo AudioEncoderG722::QueryAudioEncoder(
const AudioEncoderG722Config& config) {
RTC_DCHECK(config.IsOk());
return {16000, rtc::dchecked_cast<size_t>(config.num_channels),
64000 * config.num_channels};
}
std::unique_ptr<AudioEncoder> AudioEncoderG722::MakeAudioEncoder(
const AudioEncoderG722Config& config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
RTC_DCHECK(config.IsOk());
return std::make_unique<AudioEncoderG722Impl>(config, payload_type);
}
} // namespace webrtc

Просмотреть файл

@ -0,0 +1,42 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_G722_AUDIO_ENCODER_G722_H_
#define API_AUDIO_CODECS_G722_AUDIO_ENCODER_G722_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/audio_codecs/g722/audio_encoder_g722_config.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// G722 encoder API for use as a template parameter to
// CreateAudioEncoderFactory<...>().
struct RTC_EXPORT AudioEncoderG722 {
using Config = AudioEncoderG722Config;
static absl::optional<AudioEncoderG722Config> SdpToConfig(
const SdpAudioFormat& audio_format);
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
static AudioCodecInfo QueryAudioEncoder(const AudioEncoderG722Config& config);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const AudioEncoderG722Config& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_G722_AUDIO_ENCODER_G722_H_

Просмотреть файл

@ -13,7 +13,6 @@
namespace webrtc {
// NOTE: This struct is still under development and may change without notice.
struct AudioEncoderG722Config {
bool IsOk() const {
return frame_size_ms > 0 && frame_size_ms % 10 == 0 && num_channels >= 1;

56
third_party/libwebrtc/api/audio_codecs/ilbc/BUILD.gn поставляемый Normal file
Просмотреть файл

@ -0,0 +1,56 @@
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../../webrtc.gni")
if (is_android) {
import("//build/config/android/config.gni")
import("//build/config/android/rules.gni")
}
rtc_source_set("audio_encoder_ilbc_config") {
visibility = [ "*" ]
sources = [ "audio_encoder_ilbc_config.h" ]
}
rtc_library("audio_encoder_ilbc") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_encoder_ilbc.cc",
"audio_encoder_ilbc.h",
]
deps = [
":audio_encoder_ilbc_config",
"..:audio_codecs_api",
"../../../modules/audio_coding:ilbc",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base:safe_minmax",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_decoder_ilbc") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_decoder_ilbc.cc",
"audio_decoder_ilbc.h",
]
deps = [
"..:audio_codecs_api",
"../../../modules/audio_coding:ilbc",
"../../../rtc_base:rtc_base_approved",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}

Просмотреть файл

@ -0,0 +1,40 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/ilbc/audio_decoder_ilbc.h"
#include <memory>
#include <vector>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h"
namespace webrtc {
absl::optional<AudioDecoderIlbc::Config> AudioDecoderIlbc::SdpToConfig(
const SdpAudioFormat& format) {
return absl::EqualsIgnoreCase(format.name, "ILBC") &&
format.clockrate_hz == 8000 && format.num_channels == 1
? absl::optional<Config>(Config())
: absl::nullopt;
}
void AudioDecoderIlbc::AppendSupportedDecoders(
std::vector<AudioCodecSpec>* specs) {
specs->push_back({{"ILBC", 8000, 1}, {8000, 1, 13300}});
}
std::unique_ptr<AudioDecoder> AudioDecoderIlbc::MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
return std::make_unique<AudioDecoderIlbcImpl>();
}
} // namespace webrtc

Просмотреть файл

@ -0,0 +1,37 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ILBC_AUDIO_DECODER_ILBC_H_
#define API_AUDIO_CODECS_ILBC_AUDIO_DECODER_ILBC_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
namespace webrtc {
// ILBC decoder API for use as a template parameter to
// CreateAudioDecoderFactory<...>().
struct AudioDecoderIlbc {
struct Config {}; // Empty---no config values needed!
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_ILBC_AUDIO_DECODER_ILBC_H_

Просмотреть файл

@ -0,0 +1,81 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/ilbc/audio_encoder_ilbc.h"
#include <memory>
#include <vector>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "rtc_base/numerics/safe_minmax.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
namespace {
int GetIlbcBitrate(int ptime) {
switch (ptime) {
case 20:
case 40:
// 38 bytes per frame of 20 ms => 15200 bits/s.
return 15200;
case 30:
case 60:
// 50 bytes per frame of 30 ms => (approx) 13333 bits/s.
return 13333;
default:
FATAL();
}
}
} // namespace
absl::optional<AudioEncoderIlbcConfig> AudioEncoderIlbc::SdpToConfig(
const SdpAudioFormat& format) {
if (!absl::EqualsIgnoreCase(format.name.c_str(), "ILBC") ||
format.clockrate_hz != 8000 || format.num_channels != 1) {
return absl::nullopt;
}
AudioEncoderIlbcConfig config;
auto ptime_iter = format.parameters.find("ptime");
if (ptime_iter != format.parameters.end()) {
auto ptime = rtc::StringToNumber<int>(ptime_iter->second);
if (ptime && *ptime > 0) {
const int whole_packets = *ptime / 10;
config.frame_size_ms = rtc::SafeClamp<int>(whole_packets * 10, 20, 60);
}
}
return config.IsOk() ? absl::optional<AudioEncoderIlbcConfig>(config)
: absl::nullopt;
}
void AudioEncoderIlbc::AppendSupportedEncoders(
std::vector<AudioCodecSpec>* specs) {
const SdpAudioFormat fmt = {"ILBC", 8000, 1};
const AudioCodecInfo info = QueryAudioEncoder(*SdpToConfig(fmt));
specs->push_back({fmt, info});
}
AudioCodecInfo AudioEncoderIlbc::QueryAudioEncoder(
const AudioEncoderIlbcConfig& config) {
RTC_DCHECK(config.IsOk());
return {8000, 1, GetIlbcBitrate(config.frame_size_ms)};
}
std::unique_ptr<AudioEncoder> AudioEncoderIlbc::MakeAudioEncoder(
const AudioEncoderIlbcConfig& config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
RTC_DCHECK(config.IsOk());
return std::make_unique<AudioEncoderIlbcImpl>(config, payload_type);
}
} // namespace webrtc

Просмотреть файл

@ -0,0 +1,41 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ILBC_AUDIO_ENCODER_ILBC_H_
#define API_AUDIO_CODECS_ILBC_AUDIO_ENCODER_ILBC_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/audio_codecs/ilbc/audio_encoder_ilbc_config.h"
namespace webrtc {
// ILBC encoder API for use as a template parameter to
// CreateAudioEncoderFactory<...>().
struct AudioEncoderIlbc {
using Config = AudioEncoderIlbcConfig;
static absl::optional<AudioEncoderIlbcConfig> SdpToConfig(
const SdpAudioFormat& audio_format);
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
static AudioCodecInfo QueryAudioEncoder(const AudioEncoderIlbcConfig& config);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const AudioEncoderIlbcConfig& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_ILBC_AUDIO_ENCODER_ILBC_H_

Просмотреть файл

@ -13,7 +13,6 @@
namespace webrtc {
// NOTE: This struct is still under development and may change without notice.
struct AudioEncoderIlbcConfig {
bool IsOk() const {
return (frame_size_ms == 20 || frame_size_ms == 30 || frame_size_ms == 40 ||

133
third_party/libwebrtc/api/audio_codecs/isac/BUILD.gn поставляемый Normal file
Просмотреть файл

@ -0,0 +1,133 @@
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../../webrtc.gni")
if (is_android) {
import("//build/config/android/config.gni")
import("//build/config/android/rules.gni")
}
# The targets with _fix and _float suffixes unconditionally use the
# fixed-point and floating-point iSAC implementations, respectively.
# The targets without suffixes pick one of the implementations based
# on cleverly chosen criteria.
rtc_source_set("audio_encoder_isac") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
public = [ "audio_encoder_isac.h" ]
public_configs = [ ":isac_config" ]
if (current_cpu == "arm") {
deps = [ ":audio_encoder_isac_fix" ]
} else {
deps = [ ":audio_encoder_isac_float" ]
}
}
rtc_source_set("audio_decoder_isac") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
public = [ "audio_decoder_isac.h" ]
public_configs = [ ":isac_config" ]
if (current_cpu == "arm") {
deps = [ ":audio_decoder_isac_fix" ]
} else {
deps = [ ":audio_decoder_isac_float" ]
}
}
config("isac_config") {
visibility = [ ":*" ]
if (current_cpu == "arm") {
defines = [
"WEBRTC_USE_BUILTIN_ISAC_FIX=1",
"WEBRTC_USE_BUILTIN_ISAC_FLOAT=0",
]
} else {
defines = [
"WEBRTC_USE_BUILTIN_ISAC_FIX=0",
"WEBRTC_USE_BUILTIN_ISAC_FLOAT=1",
]
}
}
rtc_library("audio_encoder_isac_fix") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_encoder_isac_fix.cc",
"audio_encoder_isac_fix.h",
]
deps = [
"..:audio_codecs_api",
"../../../modules/audio_coding:isac_fix",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_decoder_isac_fix") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_decoder_isac_fix.cc",
"audio_decoder_isac_fix.h",
]
deps = [
"..:audio_codecs_api",
"../../../modules/audio_coding:isac_fix",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_encoder_isac_float") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_encoder_isac_float.cc",
"audio_encoder_isac_float.h",
]
deps = [
"..:audio_codecs_api",
"../../../modules/audio_coding:isac",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_decoder_isac_float") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_decoder_isac_float.cc",
"audio_decoder_isac_float.h",
]
deps = [
"..:audio_codecs_api",
"../../../modules/audio_coding:isac",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}

Просмотреть файл

@ -0,0 +1,41 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/isac/audio_decoder_isac_fix.h"
#include <memory>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h"
namespace webrtc {
absl::optional<AudioDecoderIsacFix::Config> AudioDecoderIsacFix::SdpToConfig(
const SdpAudioFormat& format) {
return absl::EqualsIgnoreCase(format.name, "ISAC") &&
format.clockrate_hz == 16000 && format.num_channels == 1
? absl::optional<Config>(Config())
: absl::nullopt;
}
void AudioDecoderIsacFix::AppendSupportedDecoders(
std::vector<AudioCodecSpec>* specs) {
specs->push_back({{"ISAC", 16000, 1}, {16000, 1, 32000, 10000, 32000}});
}
std::unique_ptr<AudioDecoder> AudioDecoderIsacFix::MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
AudioDecoderIsacFixImpl::Config c;
c.sample_rate_hz = 16000;
return std::make_unique<AudioDecoderIsacFixImpl>(c);
}
} // namespace webrtc

Просмотреть файл

@ -0,0 +1,38 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_FIX_H_
#define API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_FIX_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// iSAC decoder API (fixed-point implementation) for use as a template
// parameter to CreateAudioDecoderFactory<...>().
struct RTC_EXPORT AudioDecoderIsacFix {
struct Config {}; // Empty---no config values needed!
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_FIX_H_

Просмотреть файл

@ -0,0 +1,48 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/isac/audio_decoder_isac_float.h"
#include <memory>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h"
namespace webrtc {
absl::optional<AudioDecoderIsacFloat::Config>
AudioDecoderIsacFloat::SdpToConfig(const SdpAudioFormat& format) {
if (absl::EqualsIgnoreCase(format.name, "ISAC") &&
(format.clockrate_hz == 16000 || format.clockrate_hz == 32000) &&
format.num_channels == 1) {
Config config;
config.sample_rate_hz = format.clockrate_hz;
return config;
} else {
return absl::nullopt;
}
}
void AudioDecoderIsacFloat::AppendSupportedDecoders(
std::vector<AudioCodecSpec>* specs) {
specs->push_back({{"ISAC", 16000, 1}, {16000, 1, 32000, 10000, 32000}});
specs->push_back({{"ISAC", 32000, 1}, {32000, 1, 56000, 10000, 56000}});
}
std::unique_ptr<AudioDecoder> AudioDecoderIsacFloat::MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
RTC_DCHECK(config.IsOk());
AudioDecoderIsacFloatImpl::Config c;
c.sample_rate_hz = config.sample_rate_hz;
return std::make_unique<AudioDecoderIsacFloatImpl>(c);
}
} // namespace webrtc

Просмотреть файл

@ -0,0 +1,43 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_FLOAT_H_
#define API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_FLOAT_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// iSAC decoder API (floating-point implementation) for use as a template
// parameter to CreateAudioDecoderFactory<...>().
struct RTC_EXPORT AudioDecoderIsacFloat {
struct Config {
bool IsOk() const {
return sample_rate_hz == 16000 || sample_rate_hz == 32000;
}
int sample_rate_hz = 16000;
};
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_ISAC_AUDIO_DECODER_ISAC_FLOAT_H_

Просмотреть файл

@ -10,16 +10,17 @@
#include "api/audio_codecs/isac/audio_encoder_isac_fix.h"
#include "common_types.h" // NOLINT(build/include)
#include <memory>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h"
#include "rtc_base/ptr_util.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
rtc::Optional<AudioEncoderIsacFix::Config> AudioEncoderIsacFix::SdpToConfig(
absl::optional<AudioEncoderIsacFix::Config> AudioEncoderIsacFix::SdpToConfig(
const SdpAudioFormat& format) {
if (STR_CASE_CMP(format.name.c_str(), "ISAC") == 0 &&
if (absl::EqualsIgnoreCase(format.name, "ISAC") &&
format.clockrate_hz == 16000 && format.num_channels == 1) {
Config config;
const auto ptime_iter = format.parameters.find("ptime");
@ -31,7 +32,7 @@ rtc::Optional<AudioEncoderIsacFix::Config> AudioEncoderIsacFix::SdpToConfig(
}
return config;
} else {
return rtc::nullopt;
return absl::nullopt;
}
}
@ -50,12 +51,14 @@ AudioCodecInfo AudioEncoderIsacFix::QueryAudioEncoder(
std::unique_ptr<AudioEncoder> AudioEncoderIsacFix::MakeAudioEncoder(
AudioEncoderIsacFix::Config config,
int payload_type) {
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
RTC_DCHECK(config.IsOk());
AudioEncoderIsacFixImpl::Config c;
c.frame_size_ms = config.frame_size_ms;
c.bit_rate = config.bit_rate;
c.payload_type = payload_type;
return rtc::MakeUnique<AudioEncoderIsacFixImpl>(c);
return std::make_unique<AudioEncoderIsacFixImpl>(c);
}
} // namespace webrtc

Просмотреть файл

@ -0,0 +1,52 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_FIX_H_
#define API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_FIX_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// iSAC encoder API (fixed-point implementation) for use as a template
// parameter to CreateAudioEncoderFactory<...>().
struct RTC_EXPORT AudioEncoderIsacFix {
struct Config {
bool IsOk() const {
if (frame_size_ms != 30 && frame_size_ms != 60) {
return false;
}
if (bit_rate < 10000 || bit_rate > 32000) {
return false;
}
return true;
}
int frame_size_ms = 30;
int bit_rate = 32000; // Limit on short-term average bit rate, in bits/s.
};
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
static AudioCodecInfo QueryAudioEncoder(Config config);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
Config config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_FIX_H_

Просмотреть файл

@ -10,20 +10,22 @@
#include "api/audio_codecs/isac/audio_encoder_isac_float.h"
#include "common_types.h" // NOLINT(build/include)
#include <memory>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
#include "rtc_base/ptr_util.h"
#include "rtc_base/string_to_number.h"
namespace webrtc {
rtc::Optional<AudioEncoderIsacFloat::Config> AudioEncoderIsacFloat::SdpToConfig(
const SdpAudioFormat& format) {
if (STR_CASE_CMP(format.name.c_str(), "ISAC") == 0 &&
absl::optional<AudioEncoderIsacFloat::Config>
AudioEncoderIsacFloat::SdpToConfig(const SdpAudioFormat& format) {
if (absl::EqualsIgnoreCase(format.name, "ISAC") &&
(format.clockrate_hz == 16000 || format.clockrate_hz == 32000) &&
format.num_channels == 1) {
Config config;
config.sample_rate_hz = format.clockrate_hz;
config.bit_rate = format.clockrate_hz == 16000 ? 32000 : 56000;
if (config.sample_rate_hz == 16000) {
// For sample rate 16 kHz, optionally use 60 ms frames, instead of the
// default 30 ms.
@ -37,7 +39,7 @@ rtc::Optional<AudioEncoderIsacFloat::Config> AudioEncoderIsacFloat::SdpToConfig(
}
return config;
} else {
return rtc::nullopt;
return absl::nullopt;
}
}
@ -61,13 +63,15 @@ AudioCodecInfo AudioEncoderIsacFloat::QueryAudioEncoder(
std::unique_ptr<AudioEncoder> AudioEncoderIsacFloat::MakeAudioEncoder(
const AudioEncoderIsacFloat::Config& config,
int payload_type) {
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
RTC_DCHECK(config.IsOk());
AudioEncoderIsacFloatImpl::Config c;
c.payload_type = payload_type;
c.sample_rate_hz = config.sample_rate_hz;
c.frame_size_ms = config.frame_size_ms;
c.payload_type = payload_type;
return rtc::MakeUnique<AudioEncoderIsacFloatImpl>(c);
c.bit_rate = config.bit_rate;
return std::make_unique<AudioEncoderIsacFloatImpl>(c);
}
} // namespace webrtc

Просмотреть файл

@ -0,0 +1,66 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_FLOAT_H_
#define API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_FLOAT_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// iSAC encoder API (floating-point implementation) for use as a template
// parameter to CreateAudioEncoderFactory<...>().
struct RTC_EXPORT AudioEncoderIsacFloat {
struct Config {
bool IsOk() const {
switch (sample_rate_hz) {
case 16000:
if (frame_size_ms != 30 && frame_size_ms != 60) {
return false;
}
if (bit_rate < 10000 || bit_rate > 32000) {
return false;
}
return true;
case 32000:
if (frame_size_ms != 30) {
return false;
}
if (bit_rate < 10000 || bit_rate > 56000) {
return false;
}
return true;
default:
return false;
}
}
int sample_rate_hz = 16000;
int frame_size_ms = 30;
int bit_rate = 32000; // Limit on short-term average bit rate, in bits/s.
};
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
static AudioCodecInfo QueryAudioEncoder(const Config& config);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const Config& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_ISAC_AUDIO_ENCODER_ISAC_FLOAT_H_

112
third_party/libwebrtc/api/audio_codecs/opus/BUILD.gn поставляемый Normal file
Просмотреть файл

@ -0,0 +1,112 @@
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../../webrtc.gni")
if (is_android) {
import("//build/config/android/config.gni")
import("//build/config/android/rules.gni")
}
rtc_library("audio_encoder_opus_config") {
visibility = [ "*" ]
sources = [
"audio_encoder_multi_channel_opus_config.cc",
"audio_encoder_multi_channel_opus_config.h",
"audio_encoder_opus_config.cc",
"audio_encoder_opus_config.h",
]
deps = [
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
defines = []
if (rtc_opus_variable_complexity) {
defines += [ "WEBRTC_OPUS_VARIABLE_COMPLEXITY=1" ]
} else {
defines += [ "WEBRTC_OPUS_VARIABLE_COMPLEXITY=0" ]
}
}
rtc_source_set("audio_decoder_opus_config") {
visibility = [ "*" ]
sources = [ "audio_decoder_multi_channel_opus_config.h" ]
}
rtc_library("audio_encoder_opus") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
public = [ "audio_encoder_opus.h" ]
sources = [ "audio_encoder_opus.cc" ]
deps = [
":audio_encoder_opus_config",
"..:audio_codecs_api",
"../../../modules/audio_coding:webrtc_opus",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_decoder_opus") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_decoder_opus.cc",
"audio_decoder_opus.h",
]
deps = [
"..:audio_codecs_api",
"../../../modules/audio_coding:webrtc_opus",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
rtc_library("audio_encoder_multiopus") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
public = [ "audio_encoder_multi_channel_opus.h" ]
sources = [ "audio_encoder_multi_channel_opus.cc" ]
deps = [
"..:audio_codecs_api",
"../../../modules/audio_coding:webrtc_multiopus",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
"../opus:audio_encoder_opus_config",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
rtc_library("audio_decoder_multiopus") {
visibility = [ "*" ]
poisonous = [ "audio_codecs" ]
sources = [
"audio_decoder_multi_channel_opus.cc",
"audio_decoder_multi_channel_opus.h",
]
deps = [
":audio_decoder_opus_config",
"..:audio_codecs_api",
"../../../modules/audio_coding:webrtc_multiopus",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base/system:rtc_export",
]
absl_deps = [
"//third_party/abseil-cpp/absl/memory",
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}

Просмотреть файл

@ -0,0 +1,70 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/opus/audio_decoder_multi_channel_opus.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/opus/audio_decoder_multi_channel_opus_impl.h"
namespace webrtc {
absl::optional<AudioDecoderMultiChannelOpusConfig>
AudioDecoderMultiChannelOpus::SdpToConfig(const SdpAudioFormat& format) {
return AudioDecoderMultiChannelOpusImpl::SdpToConfig(format);
}
void AudioDecoderMultiChannelOpus::AppendSupportedDecoders(
std::vector<AudioCodecSpec>* specs) {
// To get full utilization of the surround support of the Opus lib, we can
// mark which channel is the low frequency effects (LFE). But that is not done
// ATM.
{
AudioCodecInfo surround_5_1_opus_info{48000, 6,
/* default_bitrate_bps= */ 128000};
surround_5_1_opus_info.allow_comfort_noise = false;
surround_5_1_opus_info.supports_network_adaption = false;
SdpAudioFormat opus_format({"multiopus",
48000,
6,
{{"minptime", "10"},
{"useinbandfec", "1"},
{"channel_mapping", "0,4,1,2,3,5"},
{"num_streams", "4"},
{"coupled_streams", "2"}}});
specs->push_back({std::move(opus_format), surround_5_1_opus_info});
}
{
AudioCodecInfo surround_7_1_opus_info{48000, 8,
/* default_bitrate_bps= */ 200000};
surround_7_1_opus_info.allow_comfort_noise = false;
surround_7_1_opus_info.supports_network_adaption = false;
SdpAudioFormat opus_format({"multiopus",
48000,
8,
{{"minptime", "10"},
{"useinbandfec", "1"},
{"channel_mapping", "0,6,1,2,3,4,5,7"},
{"num_streams", "5"},
{"coupled_streams", "3"}}});
specs->push_back({std::move(opus_format), surround_7_1_opus_info});
}
}
std::unique_ptr<AudioDecoder> AudioDecoderMultiChannelOpus::MakeAudioDecoder(
AudioDecoderMultiChannelOpusConfig config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
return AudioDecoderMultiChannelOpusImpl::MakeAudioDecoder(config);
}
} // namespace webrtc

Просмотреть файл

@ -0,0 +1,40 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_H_
#define API_AUDIO_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/audio_codecs/opus/audio_decoder_multi_channel_opus_config.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Opus decoder API for use as a template parameter to
// CreateAudioDecoderFactory<...>().
struct RTC_EXPORT AudioDecoderMultiChannelOpus {
using Config = AudioDecoderMultiChannelOpusConfig;
static absl::optional<AudioDecoderMultiChannelOpusConfig> SdpToConfig(
const SdpAudioFormat& audio_format);
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
AudioDecoderMultiChannelOpusConfig config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_H_

Просмотреть файл

@ -0,0 +1,63 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_CONFIG_H_
#define API_AUDIO_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_CONFIG_H_
#include <vector>
namespace webrtc {
struct AudioDecoderMultiChannelOpusConfig {
// The number of channels that the decoder will output.
int num_channels;
// Number of mono or stereo encoded Opus streams.
int num_streams;
// Number of channel pairs coupled together, see RFC 7845 section
// 5.1.1. Has to be less than the number of streams.
int coupled_streams;
// Channel mapping table, defines the mapping from encoded streams to output
// channels. See RFC 7845 section 5.1.1.
std::vector<unsigned char> channel_mapping;
bool IsOk() const {
if (num_channels < 0 || num_streams < 0 || coupled_streams < 0) {
return false;
}
if (num_streams < coupled_streams) {
return false;
}
if (channel_mapping.size() != static_cast<size_t>(num_channels)) {
return false;
}
// Every mono stream codes one channel, every coupled stream codes two. This
// is the total coded channel count:
const int max_coded_channel = num_streams + coupled_streams;
for (const auto& x : channel_mapping) {
// Coded channels >= max_coded_channel don't exist. Except for 255, which
// tells Opus to put silence in output channel x.
if (x >= max_coded_channel && x != 255) {
return false;
}
}
if (num_channels > 255 || max_coded_channel >= 255) {
return false;
}
return true;
}
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_OPUS_AUDIO_DECODER_MULTI_CHANNEL_OPUS_CONFIG_H_

Просмотреть файл

@ -0,0 +1,79 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/opus/audio_decoder_opus.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/strings/match.h"
#include "modules/audio_coding/codecs/opus/audio_decoder_opus.h"
namespace webrtc {
bool AudioDecoderOpus::Config::IsOk() const {
if (sample_rate_hz != 16000 && sample_rate_hz != 48000) {
// Unsupported sample rate. (libopus supports a few other rates as
// well; we can add support for them when needed.)
return false;
}
if (num_channels != 1 && num_channels != 2) {
return false;
}
return true;
}
absl::optional<AudioDecoderOpus::Config> AudioDecoderOpus::SdpToConfig(
const SdpAudioFormat& format) {
const auto num_channels = [&]() -> absl::optional<int> {
auto stereo = format.parameters.find("stereo");
if (stereo != format.parameters.end()) {
if (stereo->second == "0") {
return 1;
} else if (stereo->second == "1") {
return 2;
} else {
return absl::nullopt; // Bad stereo parameter.
}
}
return 1; // Default to mono.
}();
if (absl::EqualsIgnoreCase(format.name, "opus") &&
format.clockrate_hz == 48000 && format.num_channels == 2 &&
num_channels) {
Config config;
config.num_channels = *num_channels;
RTC_DCHECK(config.IsOk());
return config;
} else {
return absl::nullopt;
}
}
void AudioDecoderOpus::AppendSupportedDecoders(
std::vector<AudioCodecSpec>* specs) {
AudioCodecInfo opus_info{48000, 1, 64000, 6000, 510000};
opus_info.allow_comfort_noise = false;
opus_info.supports_network_adaption = true;
SdpAudioFormat opus_format(
{"opus", 48000, 2, {{"minptime", "10"}, {"useinbandfec", "1"}}});
specs->push_back({std::move(opus_format), opus_info});
}
std::unique_ptr<AudioDecoder> AudioDecoderOpus::MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
RTC_DCHECK(config.IsOk());
return std::make_unique<AudioDecoderOpusImpl>(config.num_channels,
config.sample_rate_hz);
}
} // namespace webrtc

Просмотреть файл

@ -0,0 +1,42 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_DECODER_OPUS_H_
#define API_AUDIO_CODECS_OPUS_AUDIO_DECODER_OPUS_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_decoder.h"
#include "api/audio_codecs/audio_format.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Opus decoder API for use as a template parameter to
// CreateAudioDecoderFactory<...>().
struct RTC_EXPORT AudioDecoderOpus {
struct Config {
bool IsOk() const; // Checks if the values are currently OK.
int sample_rate_hz = 48000;
int num_channels = 1;
};
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedDecoders(std::vector<AudioCodecSpec>* specs);
static std::unique_ptr<AudioDecoder> MakeAudioDecoder(
Config config,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_OPUS_AUDIO_DECODER_OPUS_H_

Просмотреть файл

@ -0,0 +1,74 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus.h"
#include <utility>
#include "modules/audio_coding/codecs/opus/audio_encoder_multi_channel_opus_impl.h"
namespace webrtc {
absl::optional<AudioEncoderMultiChannelOpusConfig>
AudioEncoderMultiChannelOpus::SdpToConfig(const SdpAudioFormat& format) {
return AudioEncoderMultiChannelOpusImpl::SdpToConfig(format);
}
void AudioEncoderMultiChannelOpus::AppendSupportedEncoders(
std::vector<AudioCodecSpec>* specs) {
// To get full utilization of the surround support of the Opus lib, we can
// mark which channel is the low frequency effects (LFE). But that is not done
// ATM.
{
AudioCodecInfo surround_5_1_opus_info{48000, 6,
/* default_bitrate_bps= */ 128000};
surround_5_1_opus_info.allow_comfort_noise = false;
surround_5_1_opus_info.supports_network_adaption = false;
SdpAudioFormat opus_format({"multiopus",
48000,
6,
{{"minptime", "10"},
{"useinbandfec", "1"},
{"channel_mapping", "0,4,1,2,3,5"},
{"num_streams", "4"},
{"coupled_streams", "2"}}});
specs->push_back({std::move(opus_format), surround_5_1_opus_info});
}
{
AudioCodecInfo surround_7_1_opus_info{48000, 8,
/* default_bitrate_bps= */ 200000};
surround_7_1_opus_info.allow_comfort_noise = false;
surround_7_1_opus_info.supports_network_adaption = false;
SdpAudioFormat opus_format({"multiopus",
48000,
8,
{{"minptime", "10"},
{"useinbandfec", "1"},
{"channel_mapping", "0,6,1,2,3,4,5,7"},
{"num_streams", "5"},
{"coupled_streams", "3"}}});
specs->push_back({std::move(opus_format), surround_7_1_opus_info});
}
}
AudioCodecInfo AudioEncoderMultiChannelOpus::QueryAudioEncoder(
const AudioEncoderMultiChannelOpusConfig& config) {
return AudioEncoderMultiChannelOpusImpl::QueryAudioEncoder(config);
}
std::unique_ptr<AudioEncoder> AudioEncoderMultiChannelOpus::MakeAudioEncoder(
const AudioEncoderMultiChannelOpusConfig& config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
return AudioEncoderMultiChannelOpusImpl::MakeAudioEncoder(config,
payload_type);
}
} // namespace webrtc

Просмотреть файл

@ -0,0 +1,41 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_H_
#define API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_H_
#include <memory>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/audio_codec_pair_id.h"
#include "api/audio_codecs/audio_encoder.h"
#include "api/audio_codecs/audio_format.h"
#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus_config.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Opus encoder API for use as a template parameter to
// CreateAudioEncoderFactory<...>().
struct RTC_EXPORT AudioEncoderMultiChannelOpus {
using Config = AudioEncoderMultiChannelOpusConfig;
static absl::optional<Config> SdpToConfig(const SdpAudioFormat& audio_format);
static void AppendSupportedEncoders(std::vector<AudioCodecSpec>* specs);
static AudioCodecInfo QueryAudioEncoder(const Config& config);
static std::unique_ptr<AudioEncoder> MakeAudioEncoder(
const Config& config,
int payload_type,
absl::optional<AudioCodecPairId> codec_pair_id = absl::nullopt);
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_H_

Просмотреть файл

@ -0,0 +1,106 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/opus/audio_encoder_multi_channel_opus_config.h"
namespace webrtc {
namespace {
constexpr int kDefaultComplexity = 9;
} // namespace
AudioEncoderMultiChannelOpusConfig::AudioEncoderMultiChannelOpusConfig()
: frame_size_ms(kDefaultFrameSizeMs),
num_channels(1),
application(ApplicationMode::kVoip),
bitrate_bps(32000),
fec_enabled(false),
cbr_enabled(false),
dtx_enabled(false),
max_playback_rate_hz(48000),
complexity(kDefaultComplexity),
num_streams(-1),
coupled_streams(-1) {}
AudioEncoderMultiChannelOpusConfig::AudioEncoderMultiChannelOpusConfig(
const AudioEncoderMultiChannelOpusConfig&) = default;
AudioEncoderMultiChannelOpusConfig::~AudioEncoderMultiChannelOpusConfig() =
default;
AudioEncoderMultiChannelOpusConfig& AudioEncoderMultiChannelOpusConfig::
operator=(const AudioEncoderMultiChannelOpusConfig&) = default;
bool AudioEncoderMultiChannelOpusConfig::IsOk() const {
if (frame_size_ms <= 0 || frame_size_ms % 10 != 0)
return false;
if (num_channels < 0 || num_channels >= 255) {
return false;
}
if (bitrate_bps < kMinBitrateBps || bitrate_bps > kMaxBitrateBps)
return false;
if (complexity < 0 || complexity > 10)
return false;
// Check the lengths:
if (num_channels < 0 || num_streams < 0 || coupled_streams < 0) {
return false;
}
if (num_streams < coupled_streams) {
return false;
}
if (channel_mapping.size() != static_cast<size_t>(num_channels)) {
return false;
}
// Every mono stream codes one channel, every coupled stream codes two. This
// is the total coded channel count:
const int max_coded_channel = num_streams + coupled_streams;
for (const auto& x : channel_mapping) {
// Coded channels >= max_coded_channel don't exist. Except for 255, which
// tells Opus to ignore input channel x.
if (x >= max_coded_channel && x != 255) {
return false;
}
}
// Inverse mapping.
constexpr int kNotSet = -1;
std::vector<int> coded_channels_to_input_channels(max_coded_channel, kNotSet);
for (size_t i = 0; i < num_channels; ++i) {
if (channel_mapping[i] == 255) {
continue;
}
// If it's not ignored, put it in the inverted mapping. But first check if
// we've told Opus to use another input channel for this coded channel:
const int coded_channel = channel_mapping[i];
if (coded_channels_to_input_channels[coded_channel] != kNotSet) {
// Coded channel `coded_channel` comes from both input channels
// `coded_channels_to_input_channels[coded_channel]` and `i`.
return false;
}
coded_channels_to_input_channels[coded_channel] = i;
}
// Check that we specified what input the encoder should use to produce
// every coded channel.
for (int i = 0; i < max_coded_channel; ++i) {
if (coded_channels_to_input_channels[i] == kNotSet) {
// Coded channel `i` has unspecified input channel.
return false;
}
}
if (num_channels > 255 || max_coded_channel >= 255) {
return false;
}
return true;
}
} // namespace webrtc

Просмотреть файл

@ -0,0 +1,66 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_CONFIG_H_
#define API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_CONFIG_H_
#include <stddef.h>
#include <vector>
#include "absl/types/optional.h"
#include "api/audio_codecs/opus/audio_encoder_opus_config.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
struct RTC_EXPORT AudioEncoderMultiChannelOpusConfig {
static constexpr int kDefaultFrameSizeMs = 20;
// Opus API allows a min bitrate of 500bps, but Opus documentation suggests
// bitrate should be in the range of 6000 to 510000, inclusive.
static constexpr int kMinBitrateBps = 6000;
static constexpr int kMaxBitrateBps = 510000;
AudioEncoderMultiChannelOpusConfig();
AudioEncoderMultiChannelOpusConfig(const AudioEncoderMultiChannelOpusConfig&);
~AudioEncoderMultiChannelOpusConfig();
AudioEncoderMultiChannelOpusConfig& operator=(
const AudioEncoderMultiChannelOpusConfig&);
int frame_size_ms;
size_t num_channels;
enum class ApplicationMode { kVoip, kAudio };
ApplicationMode application;
int bitrate_bps;
bool fec_enabled;
bool cbr_enabled;
bool dtx_enabled;
int max_playback_rate_hz;
std::vector<int> supported_frame_lengths_ms;
int complexity;
// Number of mono/stereo Opus streams.
int num_streams;
// Number of channel pairs coupled together, see RFC 7845 section
// 5.1.1. Has to be less than the number of streams
int coupled_streams;
// Channel mapping table, defines the mapping from encoded streams to input
// channels. See RFC 7845 section 5.1.1.
std::vector<unsigned char> channel_mapping;
bool IsOk() const;
};
} // namespace webrtc
#endif // API_AUDIO_CODECS_OPUS_AUDIO_ENCODER_MULTI_CHANNEL_OPUS_CONFIG_H_

Просмотреть файл

@ -0,0 +1,39 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio_codecs/opus/audio_encoder_opus.h"
#include "modules/audio_coding/codecs/opus/audio_encoder_opus.h"
namespace webrtc {
absl::optional<AudioEncoderOpusConfig> AudioEncoderOpus::SdpToConfig(
const SdpAudioFormat& format) {
return AudioEncoderOpusImpl::SdpToConfig(format);
}
void AudioEncoderOpus::AppendSupportedEncoders(
std::vector<AudioCodecSpec>* specs) {
AudioEncoderOpusImpl::AppendSupportedEncoders(specs);
}
AudioCodecInfo AudioEncoderOpus::QueryAudioEncoder(
const AudioEncoderOpusConfig& config) {
return AudioEncoderOpusImpl::QueryAudioEncoder(config);
}
std::unique_ptr<AudioEncoder> AudioEncoderOpus::MakeAudioEncoder(
const AudioEncoderOpusConfig& config,
int payload_type,
absl::optional<AudioCodecPairId> /*codec_pair_id*/) {
return AudioEncoderOpusImpl::MakeAudioEncoder(config, payload_type);
}
} // namespace webrtc

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше