A few optimisations.
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
index a09eab68e12e11763436cd83db1378d710a632cc..5e2cb8638804b328e36e1c89cd127e3e449d2535 100644 (file)
@@ -100,6 +100,8 @@ FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log>
        if (c->subtitle) {
                subtitle.reset (new SubtitleDecoder (this, c->subtitle, log));
        }
+
+       _next_time.resize (_format_context->nb_streams);
 }
 
 void
@@ -203,8 +205,10 @@ FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
           of the block that do not form a complete sample or frame they will be dropped.
        */
        int const total_samples = size / bytes_per_audio_sample (stream);
-       int const frames = total_samples / stream->channels();
-       shared_ptr<AudioBuffers> audio (new AudioBuffers (stream->channels(), frames));
+       int const channels = stream->channels();
+       int const frames = total_samples / channels;
+       shared_ptr<AudioBuffers> audio (new AudioBuffers (channels, frames));
+       float** data = audio->data();
 
        switch (audio_sample_format (stream)) {
        case AV_SAMPLE_FMT_U8:
@@ -213,10 +217,10 @@ FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
                int sample = 0;
                int channel = 0;
                for (int i = 0; i < total_samples; ++i) {
-                       audio->data(channel)[sample] = float(*p++) / (1 << 23);
+                       data[channel][sample] = float(*p++) / (1 << 23);
 
                        ++channel;
-                       if (channel == stream->channels()) {
+                       if (channel == channels) {
                                channel = 0;
                                ++sample;
                        }
@@ -230,10 +234,10 @@ FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
                int sample = 0;
                int channel = 0;
                for (int i = 0; i < total_samples; ++i) {
-                       audio->data(channel)[sample] = float(*p++) / (1 << 15);
+                       data[channel][sample] = float(*p++) / (1 << 15);
 
                        ++channel;
-                       if (channel == stream->channels()) {
+                       if (channel == channels) {
                                channel = 0;
                                ++sample;
                        }
@@ -244,9 +248,9 @@ FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
        case AV_SAMPLE_FMT_S16P:
        {
                int16_t** p = reinterpret_cast<int16_t **> (_frame->data);
-               for (int i = 0; i < stream->channels(); ++i) {
+               for (int i = 0; i < channels; ++i) {
                        for (int j = 0; j < frames; ++j) {
-                               audio->data(i)[j] = static_cast<float>(p[i][j]) / (1 << 15);
+                               data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
                        }
                }
        }
@@ -258,10 +262,10 @@ FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
                int sample = 0;
                int channel = 0;
                for (int i = 0; i < total_samples; ++i) {
-                       audio->data(channel)[sample] = static_cast<float>(*p++) / 2147483648;
+                       data[channel][sample] = static_cast<float>(*p++) / 2147483648;
 
                        ++channel;
-                       if (channel == stream->channels()) {
+                       if (channel == channels) {
                                channel = 0;
                                ++sample;
                        }
@@ -272,9 +276,9 @@ FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
        case AV_SAMPLE_FMT_S32P:
        {
                int32_t** p = reinterpret_cast<int32_t **> (_frame->data);
-               for (int i = 0; i < stream->channels(); ++i) {
+               for (int i = 0; i < channels; ++i) {
                        for (int j = 0; j < frames; ++j) {
-                               audio->data(i)[j] = static_cast<float>(p[i][j]) / 2147483648;
+                               data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
                        }
                }
        }
@@ -286,10 +290,10 @@ FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
                int sample = 0;
                int channel = 0;
                for (int i = 0; i < total_samples; ++i) {
-                       audio->data(channel)[sample] = *p++;
+                       data[channel][sample] = *p++;
 
                        ++channel;
-                       if (channel == stream->channels()) {
+                       if (channel == channels) {
                                channel = 0;
                                ++sample;
                        }
@@ -302,9 +306,9 @@ FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
                float** p = reinterpret_cast<float**> (_frame->data);
                /* Sometimes there aren't as many channels in the _frame as in the stream */
                for (int i = 0; i < _frame->channels; ++i) {
-                       memcpy (audio->data(i), p[i], frames * sizeof(float));
+                       memcpy (data[i], p[i], frames * sizeof(float));
                }
-               for (int i = _frame->channels; i < stream->channels(); ++i) {
+               for (int i = _frame->channels; i < channels; ++i) {
                        audio->make_silent (i);
                }
        }
@@ -392,11 +396,12 @@ FFmpegDecoder::decode_audio_packet ()
        */
 
        AVPacket copy_packet = _packet;
+       int const stream_index = copy_packet.stream_index;
 
        /* XXX: inefficient */
        vector<shared_ptr<FFmpegAudioStream> > streams = ffmpeg_content()->ffmpeg_audio_streams ();
        vector<shared_ptr<FFmpegAudioStream> >::const_iterator stream = streams.begin ();
-       while (stream != streams.end () && !(*stream)->uses_index (_format_context, copy_packet.stream_index)) {
+       while (stream != streams.end () && !(*stream)->uses_index (_format_context, stream_index)) {
                ++stream;
        }
 
@@ -426,13 +431,24 @@ FFmpegDecoder::decode_audio_packet ()
                }
 
                if (frame_finished) {
-                       ContentTime ct = ContentTime::from_seconds (
-                               av_frame_get_best_effort_timestamp (_frame) *
-                               av_q2d ((*stream)->stream (_format_context)->time_base))
-                               + _pts_offset;
-
                        shared_ptr<AudioBuffers> data = deinterleave_audio (*stream);
 
+                       ContentTime ct;
+                       if (_frame->pts == AV_NOPTS_VALUE && _next_time[stream_index]) {
+                               /* In some streams we see not every frame coming through with a timestamp; for those
+                                  that have AV_NOPTS_VALUE we need to work out the timestamp ourselves.  This is
+                                  particularly noticeable with TrueHD streams (see #1111).
+                               */
+                               ct = *_next_time[stream_index];
+                       } else {
+                               ct = ContentTime::from_seconds (
+                                       av_frame_get_best_effort_timestamp (_frame) *
+                                       av_q2d ((*stream)->stream (_format_context)->time_base))
+                                       + _pts_offset;
+                       }
+
+                       _next_time[stream_index] = ct + ContentTime::from_frames(data->frames(), (*stream)->frame_rate());
+
                        if (ct < ContentTime ()) {
                                /* Discard audio data that comes before time 0 */
                                Frame const remove = min (int64_t (data->frames()), (-ct).frames_ceil(double((*stream)->frame_rate ())));
@@ -442,7 +458,16 @@ FFmpegDecoder::decode_audio_packet ()
                        }
 
                        if (ct < ContentTime()) {
-                               LOG_WARNING ("Crazy timestamp %1", to_string (ct));
+                               LOG_WARNING (
+                                       "Crazy timestamp %1 for %2 samples in stream %3 packet pts %4 (ts=%5 tb=%6, off=%7)",
+                                       to_string(ct),
+                                       data->frames(),
+                                       copy_packet.stream_index,
+                                       copy_packet.pts,
+                                       av_frame_get_best_effort_timestamp(_frame),
+                                       av_q2d((*stream)->stream(_format_context)->time_base),
+                                       to_string(_pts_offset)
+                                       );
                        }
 
                        /* Give this data provided there is some, and its time is sane */
@@ -647,7 +672,12 @@ FFmpegDecoder::decode_ass_subtitle (string ass, ContentTime from)
        }
 
        sub::RawSubtitle base;
-       list<sub::RawSubtitle> raw = sub::SSAReader::parse_line (base, bits[9]);
+       list<sub::RawSubtitle> raw = sub::SSAReader::parse_line (
+               base,
+               bits[9],
+               _ffmpeg_content->video->size().width,
+               _ffmpeg_content->video->size().height
+               );
 
        BOOST_FOREACH (sub::Subtitle const & i, sub::collect<list<sub::Subtitle> > (raw)) {
                subtitle->emit_text_start (from, i);