int const AudioContentProperty::STREAMS = 200;
int const AudioContentProperty::GAIN = 201;
int const AudioContentProperty::DELAY = 202;
+int const AudioContentProperty::FADE_IN = 203;
+int const AudioContentProperty::FADE_OUT = 204;
AudioContent::AudioContent (Content* parent)
{
_gain = node->number_child<double> ("AudioGain");
_delay = node->number_child<int> ("AudioDelay");
+ _fade_in = ContentTime(node->optional_number_child<ContentTime::Type>("AudioFadeIn").get_value_or(0));
+ _fade_out = ContentTime(node->optional_number_child<ContentTime::Type>("AudioFadeOut").get_value_or(0));
/* Backwards compatibility */
auto r = node->optional_number_child<double>("AudioVideoFrameRate");
boost::mutex::scoped_lock lm (_mutex);
node->add_child("AudioGain")->add_child_text(raw_convert<string>(_gain));
node->add_child("AudioDelay")->add_child_text(raw_convert<string>(_delay));
+ node->add_child("AudioFadeIn")->add_child_text(raw_convert<string>(_fade_in.get()));
+ node->add_child("AudioFadeOut")->add_child_text(raw_convert<string>(_fade_out.get()));
}
/* XXX: we're in trouble if streams have different rates */
trim = trim.round (_streams.front()->frame_rate());
}
+
+
+void
+AudioContent::set_fade_in (ContentTime t)
+{
+ maybe_set (_fade_in, t, AudioContentProperty::FADE_IN);
+}
+
+
+void
+AudioContent::set_fade_out (ContentTime t)
+{
+ maybe_set (_fade_out, t, AudioContentProperty::FADE_OUT);
+}
+
+
+vector<float>
+AudioContent::fade (Frame frame, Frame length, int frame_rate) const
+{
+ auto const in = fade_in().frames_round(frame_rate);
+ auto const out = fade_out().frames_round(frame_rate);
+
+ /* Where the start trim ends, at frame_rate */
+ auto const trim_start = _parent->trim_start().frames_round(frame_rate);
+ /* Where the end trim starts within the whole length of the content, at frame_rate */
+ auto const trim_end = ContentTime(ContentTime::from_frames(stream()->length(), stream()->frame_rate()) - _parent->trim_end()).frames_round(frame_rate);
+
+ if (
+ (in == 0 || (frame >= (trim_start + in))) &&
+ (out == 0 || ((frame + length) < (trim_end - out)))
+ ) {
+ /* This section starts after the fade in and ends before the fade out */
+ return {};
+ }
+
+ /* Start position relative to the start of the fade in */
+ auto in_start = frame - trim_start;
+ /* Start position relative to the start of the fade out */
+ auto out_start = frame - (trim_end - out);
+
+ vector<float> coeffs(length);
+ for (auto coeff = 0; coeff < length; ++coeff) {
+ coeffs[coeff] = 1.0;
+ if (in) {
+ coeffs[coeff] *= logarithmic_fade_in_curve(static_cast<float>(in_start + coeff) / in);
+ }
+ if (out) {
+ coeffs[coeff] *= logarithmic_fade_out_curve(static_cast<float>(out_start + coeff) / out);
+ }
+ }
+
+ return coeffs;
+}
+
static int const STREAMS;
static int const GAIN;
static int const DELAY;
+ static int const FADE_IN;
+ static int const FADE_OUT;
};
return _delay;
}
+ dcpomatic::ContentTime fade_in () const {
+ boost::mutex::scoped_lock lm (_mutex);
+ return _fade_in;
+ }
+
+ dcpomatic::ContentTime fade_out () const {
+ boost::mutex::scoped_lock lm (_mutex);
+ return _fade_out;
+ }
+
+ void set_fade_in (dcpomatic::ContentTime time);
+ void set_fade_out (dcpomatic::ContentTime time);
+
std::string processing_description (std::shared_ptr<const Film> film) const;
std::vector<AudioStreamPtr> streams () const {
void modify_position (std::shared_ptr<const Film> film, dcpomatic::DCPTime& pos) const;
void modify_trim_start (dcpomatic::ContentTime& pos) const;
+ /** @param frame frame within the whole (untrimmed) content.
+ * @param frame_rate The frame rate of the audio (it may have been resampled).
+ * @return a fade coefficient for @ref length samples starting at an offset @frame within
+ * the content, or an empty vector if the given section has no fade.
+ */
+ std::vector<float> fade (Frame frame, Frame length, int frame_rate) const;
+
static std::shared_ptr<AudioContent> from_xml (Content* parent, cxml::ConstNodePtr, int version);
private:
double _gain = 0;
/** Delay to apply to audio (positive moves audio later) in milliseconds */
int _delay = 0;
+ dcpomatic::ContentTime _fade_in;
+ dcpomatic::ContentTime _fade_out;
std::vector<AudioStreamPtr> _streams;
};
*/
+#include "maths_util.h"
#include <cmath>
return 20 * log10(linear);
}
+
+float
+logarithmic_fade_in_curve (float t)
+{
+ auto const c = clamp(t, 0.0f, 1.0f);
+ return std::exp(2 * (c - 1)) * c;
+}
+
+
+float
+logarithmic_fade_out_curve (float t)
+{
+ auto const c = clamp(t, 0.0f, 1.0f);
+ return std::exp(-2 * c) * (1 - c);
+}
+
extern double db_to_linear (double db);
extern double linear_to_db (double linear);
+/** @return linear gain according to a logarithmic curve, for fading in.
+ * t < 0: linear gain of 0
+ * 0 >= t >= 1: logarithmic fade in curve
+ * t > 1: linear gain of 1
+ */
+extern float logarithmic_fade_in_curve (float t);
+
+
+/** @return linear gain according to a logarithmic curve, for fading out.
+ * t > 1: linear gain of 0
+ * 0 >= t >= 1: logarithmic fade out curve
+ * t < 0: linear gain of 1
+ */
+extern float logarithmic_fade_out_curve (float t);
+
template <class T>
T clamp (T val, T minimum, T maximum)
DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
- /* Gain */
-
- if (content->gain() != 0) {
- auto gain = make_shared<AudioBuffers>(content_audio.audio);
- gain->apply_gain (content->gain());
- content_audio.audio = gain;
+ /* Gain and fade */
+
+ auto const fade_coeffs = content->fade (content_audio.frame, content_audio.audio->frames(), rfr);
+ if (content->gain() != 0 || !fade_coeffs.empty()) {
+ auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
+ if (!fade_coeffs.empty()) {
+ /* Apply both fade and gain */
+ DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
+ auto const channels = gain_buffers->channels();
+ auto const frames = fade_coeffs.size();
+ auto data = gain_buffers->data();
+ auto const gain = db_to_linear (content->gain());
+ for (auto channel = 0; channel < channels; ++channel) {
+ for (auto frame = 0U; frame < frames; ++frame) {
+ data[channel][frame] *= gain * fade_coeffs[frame];
+ }
+ }
+ } else {
+ /* Just apply gain */
+ gain_buffers->apply_gain (content->gain());
+ }
+ content_audio.audio = gain_buffers;
}
/* Remap */
#include "gain_calculator_dialog.h"
#include "static_text.h"
#include "wx_util.h"
+#include "lib/audio_content.h"
+#include "lib/cinema_sound_processor.h"
#include "lib/config.h"
+#include "lib/dcp_content.h"
#include "lib/ffmpeg_audio_stream.h"
#include "lib/ffmpeg_content.h"
-#include "lib/cinema_sound_processor.h"
#include "lib/job_manager.h"
-#include "lib/dcp_content.h"
-#include "lib/audio_content.h"
#include "lib/maths_util.h"
#include <wx/spinctrl.h>
#include <iostream>
-using std::vector;
using std::cout;
-using std::string;
+using std::dynamic_pointer_cast;
using std::list;
-using std::pair;
using std::make_shared;
-using std::dynamic_pointer_cast;
+using std::pair;
+using std::set;
using std::shared_ptr;
+using std::string;
+using std::vector;
using boost::optional;
#if BOOST_VERSION >= 106100
using namespace boost::placeholders;
#endif
+using namespace dcpomatic;
AudioPanel::AudioPanel (ContentPanel* p)
/// TRANSLATORS: this is an abbreviation for milliseconds, the unit of time
_delay_ms_label = create_label (this, _("ms"), false);
+ _fade_in_label = create_label (this, _("Fade in"), true);
+ _fade_in = new Timecode<ContentTime> (this);
+
+ _fade_out_label = create_label (this, _("Fade out"), true);
+ _fade_out = new Timecode<ContentTime> (this);
+
_mapping = new AudioMappingView (this, _("Content"), _("content"), _("DCP"), _("DCP"));
_sizer->Add (_mapping, 1, wxEXPAND | wxALL, 6);
_show->Bind (wxEVT_BUTTON, boost::bind (&AudioPanel::show_clicked, this));
_gain_calculate_button->Bind (wxEVT_BUTTON, boost::bind (&AudioPanel::gain_calculate_button_clicked, this));
+ _fade_in->Changed.connect (boost::bind(&AudioPanel::fade_in_changed, this));
+ _fade_out->Changed.connect (boost::bind(&AudioPanel::fade_out_changed, this));
+
_mapping_connection = _mapping->Changed.connect (boost::bind (&AudioPanel::mapping_changed, this, _1));
_active_jobs_connection = JobManager::instance()->ActiveJobsChanged.connect (boost::bind (&AudioPanel::active_jobs_changed, this, _1, _2));
_sizer->Layout ();
}
+
void
AudioPanel::add_to_grid ()
{
s->Add (_delay_ms_label, 0, wxALIGN_CENTER_VERTICAL);
_grid->Add (s, wxGBPosition(r, 1));
++r;
+
+ add_label_to_sizer (_grid, _fade_in_label, true, wxGBPosition(r, 0));
+ _grid->Add (_fade_in, wxGBPosition(r, 1), wxGBSpan(1, 3));
+ ++r;
+
+ add_label_to_sizer (_grid, _fade_out_label, true, wxGBPosition(r, 0));
+ _grid->Add (_fade_out, wxGBPosition(r, 1), wxGBSpan(1, 3));
+ ++r;
}
+
AudioPanel::~AudioPanel ()
{
if (_audio_dialog) {
setup_sensitivity ();
} else if (property == ContentProperty::VIDEO_FRAME_RATE) {
setup_description ();
+ } else if (property == AudioContentProperty::FADE_IN) {
+ set<Frame> check;
+ for (auto i: ac) {
+ check.insert (i->audio->fade_in().get());
+ }
+
+ if (check.size() == 1) {
+ _fade_in->set (
+ ac.front()->audio->fade_in(),
+ ac.front()->active_video_frame_rate(_parent->film())
+ );
+ } else {
+ _fade_in->clear ();
+ }
+ } else if (property == AudioContentProperty::FADE_OUT) {
+ set<Frame> check;
+ for (auto i: ac) {
+ check.insert (i->audio->fade_out().get());
+ }
+
+ if (check.size() == 1) {
+ _fade_out->set (
+ ac.front()->audio->fade_out(),
+ ac.front()->active_video_frame_rate(_parent->film())
+ );
+ } else {
+ _fade_out->clear ();
+ }
}
}
film_content_changed (AudioContentProperty::STREAMS);
film_content_changed (AudioContentProperty::GAIN);
+ film_content_changed (AudioContentProperty::FADE_IN);
+ film_content_changed (AudioContentProperty::FADE_OUT);
film_content_changed (DCPContentProperty::REFERENCE_AUDIO);
setup_sensitivity ();
}
}
+
+void
+AudioPanel::fade_in_changed ()
+{
+ auto const hmsf = _fade_in->get();
+ for (auto i: _parent->selected_audio()) {
+ auto const vfr = i->active_video_frame_rate(_parent->film());
+ i->audio->set_fade_in (dcpomatic::ContentTime(hmsf, vfr));
+ }
+}
+
+
+void
+AudioPanel::fade_out_changed ()
+{
+ auto const hmsf = _fade_out->get();
+ for (auto i: _parent->selected_audio()) {
+ auto const vfr = i->active_video_frame_rate (_parent->film());
+ i->audio->set_fade_out (dcpomatic::ContentTime(hmsf, vfr));
+ }
+}
+
/*
- Copyright (C) 2012-2021 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2012-2022 Carl Hetherington <cth@carlh.net>
This file is part of DCP-o-matic.
#include "lib/audio_mapping.h"
#include "content_sub_panel.h"
#include "content_widget.h"
+#include "timecode.h"
class wxSpinCtrlDouble;
void reference_clicked ();
void add_to_grid () override;
boost::optional<float> peak () const;
+ void fade_in_changed ();
+ void fade_out_changed ();
wxCheckBox* _reference;
wxStaticText* _reference_note;
wxStaticText* _delay_label;
wxStaticText* _delay_ms_label;
ContentSpinCtrl<AudioContent>* _delay;
+ wxStaticText* _fade_in_label;
+ Timecode<dcpomatic::ContentTime>* _fade_in;
+ wxStaticText* _fade_out_label;
+ Timecode<dcpomatic::ContentTime>* _fade_out;
AudioMappingView* _mapping;
wxStaticText* _description;
AudioDialog* _audio_dialog;
*/
+
#ifndef DCPOMATIC_WX_TIMECODE_H
#define DCPOMATIC_WX_TIMECODE_H
+
#include "wx_util.h"
#include "lib/dcpomatic_time.h"
#include "lib/types.h"
#include <wx/wx.h>
#include <boost/signals2.hpp>
+
class TimecodeBase : public wxPanel
{
public:
bool _ignore_changed = false;
};
+
template <class T>
class Timecode : public TimecodeBase
{
--- /dev/null
+/*
+ Copyright (C) 2022 Carl Hetherington <cth@carlh.net>
+
+ This file is part of DCP-o-matic.
+
+ DCP-o-matic is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ DCP-o-matic is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+
+#include "lib/audio_content.h"
+#include "lib/content_factory.h"
+#include "lib/maths_util.h"
+#include "test.h"
+#include <boost/test/unit_test.hpp>
+
+
+BOOST_AUTO_TEST_CASE (audio_content_fade_empty_region)
+{
+ auto content = content_factory("test/data/impulse_train.wav").front();
+ auto film = new_test_film2("audio_content_fade_empty_region", { content });
+
+ BOOST_CHECK (content->audio->fade(0, 0, 48000).empty());
+}
+
+
+BOOST_AUTO_TEST_CASE (audio_content_fade_no_fade)
+{
+ auto content = content_factory("test/data/impulse_train.wav").front();
+ auto film = new_test_film2("audio_content_fade_no_fade", { content });
+
+ BOOST_CHECK (content->audio->fade(0, 2000, 48000).empty());
+ BOOST_CHECK (content->audio->fade(9999, 451, 48000).empty());
+ BOOST_CHECK (content->audio->fade(content->audio->stream()->length() + 100, 8000, 48000).empty());
+}
+
+
+BOOST_AUTO_TEST_CASE (audio_content_fade_unfaded_part)
+{
+ auto content = content_factory("test/data/impulse_train.wav").front();
+ auto film = new_test_film2("audio_content_fade_unfaded_part", { content });
+
+ content->audio->set_fade_in(dcpomatic::ContentTime::from_frames(2000, 48000));
+ content->audio->set_fade_out(dcpomatic::ContentTime::from_frames(2000, 48000));
+
+ BOOST_CHECK (content->audio->fade(2000, 50, 48000).empty());
+ BOOST_CHECK (content->audio->fade(12000, 99, 48000).empty());
+ BOOST_CHECK (content->audio->fade(content->audio->stream()->length() - 2051, 50, 48000).empty());
+}
+
+
+BOOST_AUTO_TEST_CASE (audio_content_within_the_fade_in)
+{
+ auto content = content_factory("test/data/impulse_train.wav").front();
+ auto film = new_test_film2("audio_content_within_the_fade_in", { content });
+
+ content->audio->set_fade_in(dcpomatic::ContentTime::from_frames(2000, 48000));
+
+ auto const f1 = content->audio->fade(0, 2000, 48000);
+ BOOST_REQUIRE_EQUAL (f1.size(), 2000U);
+ for (auto i = 0; i < 2000; ++i) {
+ BOOST_REQUIRE_CLOSE (f1[i], logarithmic_fade_in_curve(static_cast<float>(i) / 2000), 0.01);
+ }
+}
+
+
+BOOST_AUTO_TEST_CASE (audio_content_within_the_fade_out)
+{
+ auto content = content_factory("test/data/impulse_train.wav").front();
+ auto film = new_test_film2("audio_content_within_the_fade_out", { content });
+
+ content->audio->set_fade_in(dcpomatic::ContentTime::from_frames(2000, 48000));
+ content->audio->set_fade_out(dcpomatic::ContentTime::from_frames(2000, 48000));
+
+ auto const f1 = content->audio->fade(content->audio->stream()->length() - 2000, 2000, 48000);
+ BOOST_REQUIRE_EQUAL (f1.size(), 2000U);
+ for (auto i = 0; i < 2000; ++i) {
+ BOOST_REQUIRE_CLOSE (f1[i], logarithmic_fade_out_curve(static_cast<float>(i) / 2000), 0.01);
+ }
+}
+
+
+BOOST_AUTO_TEST_CASE (audio_content_overlapping_the_fade_in)
+{
+ auto content = content_factory("test/data/impulse_train.wav").front();
+ auto film = new_test_film2("audio_content_overlapping_the_fade_in", { content });
+
+ content->audio->set_fade_in(dcpomatic::ContentTime::from_frames(2000, 48000));
+ content->audio->set_fade_out(dcpomatic::ContentTime::from_frames(2000, 48000));
+
+ auto const f1 = content->audio->fade(1500, 2000, 48000);
+ BOOST_REQUIRE_EQUAL (f1.size(), 2000U);
+ for (auto i = 0; i < 500; ++i) {
+ BOOST_REQUIRE_CLOSE (f1[i], logarithmic_fade_in_curve(static_cast<float>(i + 1500) / 2000), 0.01);
+ }
+ for (auto i = 500; i < 2000; ++i) {
+ BOOST_REQUIRE_CLOSE (f1[i], 1.0f, 0.01);
+ }
+}
+
+
+BOOST_AUTO_TEST_CASE (audio_content_overlapping_the_fade_out)
+{
+ auto content = content_factory("test/data/impulse_train.wav").front();
+ auto film = new_test_film2("audio_content_overlapping_the_fade_out", { content });
+
+ content->audio->set_fade_in(dcpomatic::ContentTime::from_frames(2000, 48000));
+ content->audio->set_fade_out(dcpomatic::ContentTime::from_frames(4000, 48000));
+
+ auto const f1 = content->audio->fade(content->audio->stream()->length() - 4100, 2000, 48000);
+ BOOST_REQUIRE_EQUAL (f1.size(), 2000U);
+ for (auto i = 0; i < 100; ++i) {
+ BOOST_REQUIRE_CLOSE (f1[i], 1.0f, 0.01);
+ }
+ for (auto i = 100; i < 2000; ++i) {
+ BOOST_REQUIRE_CLOSE (f1[i], logarithmic_fade_out_curve(static_cast<float>(i - 100) / 4000), 0.01);
+ }
+}
+
+
+BOOST_AUTO_TEST_CASE (audio_content_fade_in_and_out)
+{
+ auto content = content_factory("test/data/impulse_train.wav").front();
+ auto film = new_test_film2("audio_content_fade_in_and_out", { content });
+
+ auto const length = content->audio->stream()->length();
+
+ content->audio->set_fade_in(dcpomatic::ContentTime::from_frames(length, 48000));
+ content->audio->set_fade_out(dcpomatic::ContentTime::from_frames(length, 48000));
+
+ auto const f1 = content->audio->fade(0, 10000, 48000);
+ BOOST_REQUIRE_EQUAL (f1.size(), 10000U);
+ for (auto i = 0; i < 10000; ++i) {
+ BOOST_REQUIRE_CLOSE (f1[i], logarithmic_fade_in_curve(static_cast<float>(i) / length) * logarithmic_fade_out_curve(static_cast<float>(i) / length), 0.01);
+ }
+}
+
+
+BOOST_AUTO_TEST_CASE (audio_content_fade_in_with_trim)
+{
+ auto content = content_factory("test/data/impulse_train.wav").front();
+ auto film = new_test_film2("audio_content_fade_in_with_trim", { content });
+
+ content->audio->set_fade_in(dcpomatic::ContentTime::from_frames(2000, 48000));
+ content->audio->set_fade_out(dcpomatic::ContentTime::from_frames(1000, 48000));
+ content->set_trim_start(dcpomatic::ContentTime::from_frames(5200, 48000));
+
+ /* In the trim */
+ auto const f1 = content->audio->fade(0, 2000, 48000);
+ BOOST_REQUIRE_EQUAL (f1.size(), 2000);
+ for (auto i = 0; i < 2000; ++i) {
+ BOOST_REQUIRE_CLOSE (f1[i], 0.0f, 0.01);
+ }
+
+ /* In the fade */
+ auto const f2 = content->audio->fade(5200, 2000, 48000);
+ BOOST_REQUIRE_EQUAL (f2.size(), 2000);
+ for (auto i = 0; i < 2000; ++i) {
+ BOOST_REQUIRE_CLOSE (f2[i], logarithmic_fade_in_curve(static_cast<float>(i) / 2000), 0.01);
+ }
+}
+
+
+BOOST_AUTO_TEST_CASE (audio_content_fade_out_with_trim)
+{
+ auto content = content_factory("test/data/impulse_train.wav").front();
+ auto film = new_test_film2("audio_content_fade_out_with_trim", { content });
+
+ auto const length = content->audio->stream()->length();
+
+ content->audio->set_fade_in(dcpomatic::ContentTime::from_frames(2000, 48000));
+ content->audio->set_fade_out(dcpomatic::ContentTime::from_frames(1000, 48000));
+ content->set_trim_start(dcpomatic::ContentTime::from_frames(5200, 48000));
+ content->set_trim_end(dcpomatic::ContentTime::from_frames(9000, 48000));
+
+ /* In the trim */
+ auto const f1 = content->audio->fade(length - 6000, 2000, 48000);
+ BOOST_REQUIRE_EQUAL (f1.size(), 2000);
+ for (auto i = 0; i < 2000; ++i) {
+ BOOST_REQUIRE_CLOSE (f1[i], 0.0f, 0.01);
+ }
+
+ /* In the fade */
+ auto const f2 = content->audio->fade(length - 9000 - 1000, 1000, 48000);
+ BOOST_REQUIRE_EQUAL (f2.size(), 1000);
+ for (auto i = 0; i < 1000; ++i) {
+ BOOST_REQUIRE_CLOSE (f2[i], logarithmic_fade_out_curve(static_cast<float>(i) / 1000), 0.01);
+ }
+}
+
+
+BOOST_AUTO_TEST_CASE (audio_content_fade_out_with_trim_at_44k1)
+{
+ /* 5s at 44.1kHz */
+ auto content = content_factory("test/data/white.wav").front();
+ auto film = new_test_film2("audio_content_fade_out_with_trim_at_44k1", { content });
+
+ /* /----- 3.5s ------|-Fade-|-Trim-\
+ * | | 1s | 0.5s |
+ * \-----------------|------|------/
+ */
+
+ content->audio->set_fade_out(dcpomatic::ContentTime::from_seconds(1));
+ content->set_trim_end(dcpomatic::ContentTime::from_seconds(0.5));
+
+ /* In the trim */
+ auto const f1 = content->audio->fade(std::round(48000 * 4.75), 200, 48000);
+ BOOST_REQUIRE_EQUAL (f1.size(), 200);
+ for (auto i = 0; i < 200; ++i) {
+ BOOST_REQUIRE_CLOSE (f1[i], 0.0f, 0.01);
+ }
+
+ /* In the fade */
+ auto const f2 = content->audio->fade(std::round(48000 * 3.5 + 200), 7000, 48000);
+ BOOST_REQUIRE_EQUAL (f2.size(), 7000);
+ for (auto i = 0; i < 7000; ++i) {
+ BOOST_REQUIRE_CLOSE (f2[i], logarithmic_fade_out_curve(static_cast<float>(i + 200) / 48000), 0.01);
+ }
+
+}
+
atmos_test.cc
audio_analysis_test.cc
audio_buffers_test.cc
+ audio_content_test.cc
audio_delay_test.cc
audio_filter_test.cc
audio_mapping_test.cc