X-Git-Url: https://main.carlh.net/gitweb/?p=dcpomatic.git;a=blobdiff_plain;f=src%2Flib%2Faudio_decoder.cc;h=a5e86f22b8e352d529fa39e69149ad10cca9fec4;hp=2c0388fc39318851242b96a7a672b014ba5fca27;hb=463496994d770577ff1e1ea6e7b1e4addb4252b2;hpb=f1d30fb114b3b2c6ccd8fdf5823e7cd6b26c1eef diff --git a/src/lib/audio_decoder.cc b/src/lib/audio_decoder.cc index 2c0388fc3..a5e86f22b 100644 --- a/src/lib/audio_decoder.cc +++ b/src/lib/audio_decoder.cc @@ -1,175 +1,167 @@ /* - Copyright (C) 2012-2014 Carl Hetherington + Copyright (C) 2012-2018 Carl Hetherington - This program is free software; you can redistribute it and/or modify + This file is part of DCP-o-matic. + + DCP-o-matic is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - This program is distributed in the hope that it will be useful, + DCP-o-matic is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + along with DCP-o-matic. If not, see . */ #include "audio_decoder.h" #include "audio_buffers.h" -#include "exceptions.h" +#include "audio_content.h" +#include "dcpomatic_log.h" #include "log.h" #include "resampler.h" -#include "util.h" -#include "film.h" +#include "compose.hpp" +#include +#include #include "i18n.h" -using std::stringstream; -using std::list; -using std::pair; using std::cout; -using std::min; -using std::max; -using boost::optional; +using std::map; +using std::pair; using boost::shared_ptr; +using boost::optional; +using namespace dcpomatic; -AudioDecoder::AudioDecoder (shared_ptr content) - : _audio_content (content) +AudioDecoder::AudioDecoder (Decoder* parent, shared_ptr content, bool fast) + : DecoderPart (parent) + , _content (content) + , _fast (fast) { - if (content->resampled_audio_frame_rate() != content->audio_frame_rate() && content->audio_channels ()) { - _resampler.reset (new Resampler (content->audio_frame_rate(), content->resampled_audio_frame_rate(), content->audio_channels ())); + /* Set up _positions so that we have one for each stream */ + BOOST_FOREACH (AudioStreamPtr i, content->streams ()) { + _positions[i] = 0; } - - reset_decoded_audio (); } void -AudioDecoder::reset_decoded_audio () +AudioDecoder::emit (shared_ptr film, AudioStreamPtr stream, shared_ptr data, ContentTime time) { - _decoded_audio = ContentAudio (shared_ptr (new AudioBuffers (_audio_content->audio_channels(), 0)), 0); -} - -shared_ptr -AudioDecoder::get_audio (AudioFrame frame, AudioFrame length, bool accurate) -{ - shared_ptr dec; + if (ignore ()) { + return; + } - AudioFrame const end = frame + length - 1; - - if (frame < _decoded_audio.frame || end > (_decoded_audio.frame + length * 4)) { - /* Either we have no decoded data, or what we do have is a long way from what we want: seek */ - seek (ContentTime::from_frames (frame, _audio_content->audio_frame_rate()), accurate); + if (_positions[stream] == 0) { + /* This is the first data we have received since initialisation or seek. Set + the position based on the ContentTime that was given. After this first time + we just count samples, as it seems that ContentTimes are unreliable from + FFmpegDecoder (not quite continuous; perhaps due to some rounding error). + */ + if (_content->delay() > 0) { + /* Insert silence to give the delay */ + silence (_content->delay ()); + } + time += ContentTime::from_seconds (_content->delay() / 1000.0); + _positions[stream] = time.frames_round (_content->resampled_frame_rate(film)); } - /* Offset of the data that we want from the start of _decoded_audio.audio - (to be set up shortly) - */ - AudioFrame decoded_offset = 0; - - /* Now enough pass() calls will either: - * (a) give us what we want, or - * (b) hit the end of the decoder. - * - * If we are being accurate, we want the right frames, - * otherwise any frames will do. - */ - if (accurate) { - /* Keep stuffing data into _decoded_audio until we have enough data, or the subclass does not want to give us any more */ - while (!pass() && (_decoded_audio.frame > frame || (_decoded_audio.frame + _decoded_audio.audio->frames()) < end)) {} - decoded_offset = frame - _decoded_audio.frame; + shared_ptr resampler; + ResamplerMap::iterator i = _resamplers.find(stream); + if (i != _resamplers.end ()) { + resampler = i->second; } else { - while (!pass() && _decoded_audio.audio->frames() < length) {} - /* Use decoded_offset of 0, as we don't really care what frames we return */ + if (stream->frame_rate() != _content->resampled_frame_rate(film)) { + LOG_GENERAL ( + "Creating new resampler from %1 to %2 with %3 channels", + stream->frame_rate(), + _content->resampled_frame_rate(film), + stream->channels() + ); + + resampler.reset (new Resampler (stream->frame_rate(), _content->resampled_frame_rate(film), stream->channels())); + if (_fast) { + resampler->set_fast (); + } + _resamplers[stream] = resampler; + } } - /* The amount of data available in _decoded_audio.audio starting from `frame'. This could be -ve - if pass() returned true before we got enough data. - */ - AudioFrame const available = _decoded_audio.audio->frames() - decoded_offset; - - /* We will return either that, or the requested amount, whichever is smaller */ - AudioFrame const to_return = max ((AudioFrame) 0, min (available, length)); + if (resampler) { + shared_ptr ro = resampler->run (data); + if (ro->frames() == 0) { + return; + } + data = ro; + } - /* Copy our data to the output */ - shared_ptr out (new AudioBuffers (_decoded_audio.audio->channels(), to_return)); - out->copy_from (_decoded_audio.audio.get(), to_return, decoded_offset, 0); + Data(stream, ContentAudio (data, _positions[stream])); + _positions[stream] += data->frames(); +} - AudioFrame const remaining = max ((AudioFrame) 0, available - to_return); +/** @return Time just after the last thing that was emitted from a given stream */ +ContentTime +AudioDecoder::stream_position (shared_ptr film, AudioStreamPtr stream) const +{ + PositionMap::const_iterator i = _positions.find (stream); + DCPOMATIC_ASSERT (i != _positions.end ()); + return ContentTime::from_frames (i->second, _content->resampled_frame_rate(film)); +} - /* Clean up decoded; first, move the data after what we just returned to the start of the buffer */ - _decoded_audio.audio->move (decoded_offset + to_return, 0, remaining); - /* And set up the number of frames we have left */ - _decoded_audio.audio->set_frames (remaining); - /* Also bump where those frames are in terms of the content */ - _decoded_audio.frame += decoded_offset + to_return; +boost::optional +AudioDecoder::position (shared_ptr film) const +{ + optional p; + for (PositionMap::const_iterator i = _positions.begin(); i != _positions.end(); ++i) { + ContentTime const ct = stream_position (film, i->first); + if (!p || ct < *p) { + p = ct; + } + } - return shared_ptr (new ContentAudio (out, frame)); + return p; } -/** Called by subclasses when audio data is ready. - * - * Audio timestamping is made hard by many factors, but perhaps the most entertaining is resampling. - * We have to assume that we are feeding continuous data into the resampler, and so we get continuous - * data out. Hence we do the timestamping here, post-resampler, just by counting samples. - * - * The time is passed in here so that after a seek we can set up our _audio_position. The - * time is ignored once this has been done. - */ void -AudioDecoder::audio (shared_ptr data, ContentTime time) +AudioDecoder::seek () { - if (_resampler) { - data = _resampler->run (data); - } - - if (!_audio_position) { - _audio_position = time.frames (_audio_content->resampled_audio_frame_rate ()); + for (ResamplerMap::iterator i = _resamplers.begin(); i != _resamplers.end(); ++i) { + i->second->flush (); + i->second->reset (); } - assert (_audio_position.get() >= (_decoded_audio.frame + _decoded_audio.audio->frames())); - - /* Resize _decoded_audio to fit the new data */ - int new_size = 0; - if (_decoded_audio.audio->frames() == 0) { - /* There's nothing in there, so just store the new data */ - new_size = data->frames (); - _decoded_audio.frame = _audio_position.get (); - } else { - /* Otherwise we need to extend _decoded_audio to include the new stuff */ - new_size = _audio_position.get() + data->frames() - _decoded_audio.frame; + for (PositionMap::iterator i = _positions.begin(); i != _positions.end(); ++i) { + i->second = 0; } - - _decoded_audio.audio->ensure_size (new_size); - _decoded_audio.audio->set_frames (new_size); - - /* Copy new data in */ - _decoded_audio.audio->copy_from (data.get(), data->frames(), 0, _audio_position.get() - _decoded_audio.frame); - _audio_position = _audio_position.get() + data->frames (); } -/* XXX: called? */ void AudioDecoder::flush () { - if (!_resampler) { - return; + for (ResamplerMap::iterator i = _resamplers.begin(); i != _resamplers.end(); ++i) { + shared_ptr ro = i->second->flush (); + if (ro->frames() > 0) { + Data (i->first, ContentAudio (ro, _positions[i->first])); + _positions[i->first] += ro->frames(); + } } - /* - shared_ptr b = _resampler->flush (); - if (b) { - _pending.push_back (shared_ptr (new DecodedAudio (b, _audio_position.get ()))); - _audio_position = _audio_position.get() + b->frames (); + if (_content->delay() < 0) { + /* Finish off with the gap caused by the delay */ + silence (-_content->delay ()); } - */ } void -AudioDecoder::seek (ContentTime, bool) +AudioDecoder::silence (int milliseconds) { - _audio_position.reset (); - reset_decoded_audio (); + BOOST_FOREACH (AudioStreamPtr i, _content->streams ()) { + int const samples = ContentTime::from_seconds(milliseconds / 1000.0).frames_round(i->frame_rate()); + shared_ptr silence (new AudioBuffers (i->channels(), samples)); + silence->make_silent (); + Data (i, ContentAudio (silence, _positions[i])); + } }