X-Git-Url: https://main.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Faudio_decoder.cc;h=61ff5d265c526e6de31e44d5bfb7eb6972883fba;hb=c7f45cd00b94393f6e15428a2ea256995f890412;hp=2d02043b5948d1071d0de18982e14abfcf03c12e;hpb=2e2f11b29651cffe37c64275dbd45c7563310020;p=dcpomatic.git diff --git a/src/lib/audio_decoder.cc b/src/lib/audio_decoder.cc index 2d02043b5..61ff5d265 100644 --- a/src/lib/audio_decoder.cc +++ b/src/lib/audio_decoder.cc @@ -52,39 +52,36 @@ AudioDecoder::AudioDecoder (Decoder* parent, shared_ptr cont /** @param time_already_delayed true if the delay should not be added to time */ void -AudioDecoder::emit (shared_ptr film, AudioStreamPtr stream, shared_ptr data, ContentTime time, bool time_already_delayed) +AudioDecoder::emit(shared_ptr film, AudioStreamPtr stream, shared_ptr data, ContentTime time, bool flushing) { if (ignore ()) { return; } - /* Amount of error we will tolerate on audio timestamps; see comment below. - * We'll use 1 24fps video frame at 48kHz as this seems to be roughly how - * ffplay does it. - */ - static Frame const slack_frames = 48000 / 24; - int const resampled_rate = _content->resampled_frame_rate(film); - if (!time_already_delayed) { + if (!flushing) { time += ContentTime::from_seconds (_content->delay() / 1000.0); } - auto reset = false; - if (_positions[stream] == 0) { - /* This is the first data we have received since initialisation or seek. Set - the position based on the ContentTime that was given. After this first time - we just count samples unless the timestamp is more than slack_frames away - from where we think it should be. This is because ContentTimes seem to be - slightly unreliable from FFmpegDecoder (i.e. not sample accurate), but we still - need to obey them sometimes otherwise we get sync problems such as #1833. - */ - if (_content->delay() > 0) { - /* Insert silence to give the delay */ - silence (_content->delay ()); - } - reset = true; - } else if (std::abs(_positions[stream] - time.frames_round(resampled_rate)) > slack_frames) { - reset = true; + /* Amount of error we will tolerate on audio timestamps; see comment below. + * We'll use 1 24fps video frame as this seems to be roughly how ffplay does it. + */ + Frame const slack_frames = resampled_rate / 24; + + /* first_since_seek is set to true if this is the first data we have + received since initialisation or seek. We'll set the position based + on the ContentTime that was given. After this first time we just + count samples unless the timestamp is more than slack_frames away + from where we think it should be. This is because ContentTimes seem + to be slightly unreliable from FFmpegDecoder (i.e. not sample + accurate), but we still need to obey them sometimes otherwise we get + sync problems such as #1833. + */ + + auto const first_since_seek = _positions[stream] == 0; + auto const need_reset = !first_since_seek && (std::abs(_positions[stream] - time.frames_round(resampled_rate)) > slack_frames); + + if (need_reset) { LOG_GENERAL ( "Reset audio position: was %1, new data at %2, slack: %3 frames", _positions[stream], @@ -93,10 +90,14 @@ AudioDecoder::emit (shared_ptr film, AudioStreamPtr stream, shared_p ); } - if (reset) { + if (first_since_seek || need_reset) { _positions[stream] = time.frames_round (resampled_rate); } + if (first_since_seek && _content->delay() > 0) { + silence (stream, _content->delay()); + } + shared_ptr resampler; auto i = _resamplers.find(stream); if (i != _resamplers.end()) { @@ -118,12 +119,24 @@ AudioDecoder::emit (shared_ptr film, AudioStreamPtr stream, shared_p } } - if (resampler) { - auto ro = resampler->run (data); - if (ro->frames() == 0) { + if (resampler && !flushing) { + /* It can be the the data here has a different number of channels than the stream + * it comes from (e.g. the files decoded by FFmpegDecoder sometimes have a random + * frame, often at the end, with more channels). Insert silence or discard channels + * here. + */ + if (resampler->channels() != data->channels()) { + LOG_WARNING("Received audio data with an unexpected channel count of %1 instead of %2", data->channels(), resampler->channels()); + auto data_copy = data->clone(); + data_copy->set_channels(resampler->channels()); + data = resampler->run(data_copy); + } else { + data = resampler->run(data); + } + + if (data->frames() == 0) { return; } - data = ro; } Data(stream, ContentAudio (data, _positions[stream])); @@ -183,18 +196,18 @@ AudioDecoder::flush () if (_content->delay() < 0) { /* Finish off with the gap caused by the delay */ - silence (-_content->delay ()); + for (auto stream: _content->streams()) { + silence (stream, -_content->delay()); + } } } void -AudioDecoder::silence (int milliseconds) +AudioDecoder::silence (AudioStreamPtr stream, int milliseconds) { - for (auto i: _content->streams()) { - int const samples = ContentTime::from_seconds(milliseconds / 1000.0).frames_round(i->frame_rate()); - auto silence = make_shared(i->channels(), samples); - silence->make_silent (); - Data (i, ContentAudio (silence, _positions[i])); - } + int const samples = ContentTime::from_seconds(milliseconds / 1000.0).frames_round(stream->frame_rate()); + auto silence = make_shared(stream->channels(), samples); + silence->make_silent (); + Data (stream, ContentAudio(silence, _positions[stream])); }