X-Git-Url: https://main.carlh.net/gitweb/?p=dcpomatic.git;a=blobdiff_plain;f=src%2Flib%2Faudio_decoder.cc;h=2ab527f59bc4bf535d5160f79db8be1288b04983;hp=5a33716f4fe775622e2864a7ce97192c281ed84a;hb=HEAD;hpb=89aa9d4ba69e471949f791cdafe4ae20cea554d2 diff --git a/src/lib/audio_decoder.cc b/src/lib/audio_decoder.cc index 5a33716f4..61ff5d265 100644 --- a/src/lib/audio_decoder.cc +++ b/src/lib/audio_decoder.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2012-2016 Carl Hetherington + Copyright (C) 2012-2021 Carl Hetherington This file is part of DCP-o-matic. @@ -18,62 +18,196 @@ */ + #include "audio_decoder.h" #include "audio_buffers.h" #include "audio_content.h" +#include "dcpomatic_log.h" #include "log.h" +#include "resampler.h" #include "compose.hpp" -#include #include #include "i18n.h" + using std::cout; -using std::map; -using boost::shared_ptr; +using std::shared_ptr; +using std::make_shared; using boost::optional; +using namespace dcpomatic; + -AudioDecoder::AudioDecoder (Decoder* parent, shared_ptr content, shared_ptr log) - : DecoderPart (parent, log) +AudioDecoder::AudioDecoder (Decoder* parent, shared_ptr content, bool fast) + : DecoderPart (parent) + , _content (content) + , _fast (fast) { - BOOST_FOREACH (AudioStreamPtr i, content->streams ()) { + /* Set up _positions so that we have one for each stream */ + for (auto i: content->streams ()) { _positions[i] = 0; } } + +/** @param time_already_delayed true if the delay should not be added to time */ void -AudioDecoder::emit (AudioStreamPtr stream, shared_ptr data, ContentTime time) +AudioDecoder::emit(shared_ptr film, AudioStreamPtr stream, shared_ptr data, ContentTime time, bool flushing) { if (ignore ()) { return; } - if (_positions[stream] == 0) { - _positions[stream] = time.frames_round (stream->frame_rate ()); + int const resampled_rate = _content->resampled_frame_rate(film); + if (!flushing) { + time += ContentTime::from_seconds (_content->delay() / 1000.0); + } + + /* Amount of error we will tolerate on audio timestamps; see comment below. + * We'll use 1 24fps video frame as this seems to be roughly how ffplay does it. + */ + Frame const slack_frames = resampled_rate / 24; + + /* first_since_seek is set to true if this is the first data we have + received since initialisation or seek. We'll set the position based + on the ContentTime that was given. After this first time we just + count samples unless the timestamp is more than slack_frames away + from where we think it should be. This is because ContentTimes seem + to be slightly unreliable from FFmpegDecoder (i.e. not sample + accurate), but we still need to obey them sometimes otherwise we get + sync problems such as #1833. + */ + + auto const first_since_seek = _positions[stream] == 0; + auto const need_reset = !first_since_seek && (std::abs(_positions[stream] - time.frames_round(resampled_rate)) > slack_frames); + + if (need_reset) { + LOG_GENERAL ( + "Reset audio position: was %1, new data at %2, slack: %3 frames", + _positions[stream], + time.frames_round(resampled_rate), + std::abs(_positions[stream] - time.frames_round(resampled_rate)) + ); } - Data (stream, ContentAudio (data, _positions[stream])); + if (first_since_seek || need_reset) { + _positions[stream] = time.frames_round (resampled_rate); + } + + if (first_since_seek && _content->delay() > 0) { + silence (stream, _content->delay()); + } + + shared_ptr resampler; + auto i = _resamplers.find(stream); + if (i != _resamplers.end()) { + resampler = i->second; + } else { + if (stream->frame_rate() != resampled_rate) { + LOG_GENERAL ( + "Creating new resampler from %1 to %2 with %3 channels", + stream->frame_rate(), + resampled_rate, + stream->channels() + ); + + resampler = make_shared(stream->frame_rate(), resampled_rate, stream->channels()); + if (_fast) { + resampler->set_fast (); + } + _resamplers[stream] = resampler; + } + } + + if (resampler && !flushing) { + /* It can be the the data here has a different number of channels than the stream + * it comes from (e.g. the files decoded by FFmpegDecoder sometimes have a random + * frame, often at the end, with more channels). Insert silence or discard channels + * here. + */ + if (resampler->channels() != data->channels()) { + LOG_WARNING("Received audio data with an unexpected channel count of %1 instead of %2", data->channels(), resampler->channels()); + auto data_copy = data->clone(); + data_copy->set_channels(resampler->channels()); + data = resampler->run(data_copy); + } else { + data = resampler->run(data); + } + + if (data->frames() == 0) { + return; + } + } + + Data(stream, ContentAudio (data, _positions[stream])); _positions[stream] += data->frames(); } + +/** @return Time just after the last thing that was emitted from a given stream */ ContentTime -AudioDecoder::position () const +AudioDecoder::stream_position (shared_ptr film, AudioStreamPtr stream) const +{ + auto i = _positions.find (stream); + DCPOMATIC_ASSERT (i != _positions.end ()); + return ContentTime::from_frames (i->second, _content->resampled_frame_rate(film)); +} + + +boost::optional +AudioDecoder::position (shared_ptr film) const { optional p; - for (map::const_iterator i = _positions.begin(); i != _positions.end(); ++i) { - ContentTime const ct = ContentTime::from_frames (i->second, i->first->frame_rate ()); + for (auto i: _positions) { + auto const ct = stream_position (film, i.first); if (!p || ct < *p) { p = ct; } } - return p.get_value_or(ContentTime()); + return p; } + void AudioDecoder::seek () { - for (map::iterator i = _positions.begin(); i != _positions.end(); ++i) { - i->second = 0; + for (auto i: _resamplers) { + i.second->flush (); + i.second->reset (); + } + + for (auto& i: _positions) { + i.second = 0; + } +} + + +void +AudioDecoder::flush () +{ + for (auto const& i: _resamplers) { + auto ro = i.second->flush (); + if (ro->frames() > 0) { + Data (i.first, ContentAudio (ro, _positions[i.first])); + _positions[i.first] += ro->frames(); + } + } + + if (_content->delay() < 0) { + /* Finish off with the gap caused by the delay */ + for (auto stream: _content->streams()) { + silence (stream, -_content->delay()); + } } } + + +void +AudioDecoder::silence (AudioStreamPtr stream, int milliseconds) +{ + int const samples = ContentTime::from_seconds(milliseconds / 1000.0).frames_round(stream->frame_rate()); + auto silence = make_shared(stream->channels(), samples); + silence->make_silent (); + Data (stream, ContentAudio(silence, _positions[stream])); +}