More renaming.
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
index a5b6af7de51497550c8c1d450d20f9d04241ad68..909a9d443d1383ebca4179554fa9745c12e5acfb 100644 (file)
@@ -1,5 +1,5 @@
 /*
-    Copyright (C) 2012-2016 Carl Hetherington <cth@carlh.net>
+    Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
 
     This file is part of DCP-o-matic.
 
@@ -28,7 +28,7 @@
 #include "util.h"
 #include "log.h"
 #include "ffmpeg_decoder.h"
-#include "subtitle_decoder.h"
+#include "text_decoder.h"
 #include "ffmpeg_audio_stream.h"
 #include "ffmpeg_subtitle_stream.h"
 #include "video_filter_graph.h"
@@ -39,7 +39,7 @@
 #include "film.h"
 #include "audio_decoder.h"
 #include "compose.hpp"
-#include "subtitle_content.h"
+#include "text_content.h"
 #include "audio_content.h"
 #include <dcp/subtitle_string.h>
 #include <sub/ssa_reader.h>
@@ -99,7 +99,7 @@ FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log>
 
        if (c->subtitle) {
                /* XXX: this time here should be the time of the first subtitle, not 0 */
-               subtitle.reset (new SubtitleDecoder (this, c->subtitle, log, ContentTime()));
+               subtitle.reset (new TextDecoder (this, c->subtitle, log, ContentTime()));
        }
 
        _next_time.resize (_format_context->nb_streams);
@@ -138,12 +138,18 @@ FFmpegDecoder::flush ()
 
        BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, _ffmpeg_content->ffmpeg_audio_streams ()) {
                ContentTime a = audio->stream_position(i);
-               while (a < full_length) {
-                       ContentTime to_do = min (full_length - a, ContentTime::from_seconds (0.1));
-                       shared_ptr<AudioBuffers> silence (new AudioBuffers (i->channels(), to_do.frames_ceil (i->frame_rate())));
-                       silence->make_silent ();
-                       audio->emit (i, silence, a);
-                       a += to_do;
+               /* Unfortunately if a is 0 that really means that we don't know the stream position since
+                  there has been no data on it since the last seek.  In this case we'll just do nothing
+                  here.  I'm not sure if that's the right idea.
+               */
+               if (a > ContentTime()) {
+                       while (a < full_length) {
+                               ContentTime to_do = min (full_length - a, ContentTime::from_seconds (0.1));
+                               shared_ptr<AudioBuffers> silence (new AudioBuffers (i->channels(), to_do.frames_ceil (i->frame_rate())));
+                               silence->make_silent ();
+                               audio->emit (i, silence, a);
+                               a += to_do;
+                       }
                }
        }
 
@@ -502,7 +508,8 @@ FFmpegDecoder::decode_video_packet ()
        }
 
        if (i == _filter_graphs.end ()) {
-               graph.reset (new VideoFilterGraph (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
+               dcp::Fraction vfr (lrint(_ffmpeg_content->video_frame_rate().get() * 1000), 1000);
+               graph.reset (new VideoFilterGraph (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format, vfr));
                graph->setup (_ffmpeg_content->filters ());
                _filter_graphs.push_back (graph);
                LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
@@ -560,11 +567,11 @@ FFmpegDecoder::decode_subtitle_packet ()
        FFmpegSubtitlePeriod sub_period = subtitle_period (sub);
        ContentTime from;
        from = sub_period.from + _pts_offset;
-       _have_current_subtitle = true;
        if (sub_period.to) {
                _current_subtitle_to = *sub_period.to + _pts_offset;
        } else {
                _current_subtitle_to = optional<ContentTime>();
+               _have_current_subtitle = true;
        }
 
        for (unsigned int i = 0; i < sub.num_rects; ++i) {
@@ -662,7 +669,7 @@ FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime
                static_cast<double> (rect->h) / target_height
                );
 
-       subtitle->emit_image_start (from, image, scaled_rect);
+       subtitle->emit_bitmap_start (from, image, scaled_rect);
 }
 
 void
@@ -672,21 +679,29 @@ FFmpegDecoder::decode_ass_subtitle (string ass, ContentTime from)
           produces a single format of Dialogue: lines...
        */
 
-       vector<string> bits;
-       split (bits, ass, is_any_of (","));
-       if (bits.size() < 10) {
+       int commas = 0;
+       string text;
+       for (size_t i = 0; i < ass.length(); ++i) {
+               if (commas < 9 && ass[i] == ',') {
+                       ++commas;
+               } else if (commas == 9) {
+                       text += ass[i];
+               }
+       }
+
+       if (text.empty ()) {
                return;
        }
 
        sub::RawSubtitle base;
        list<sub::RawSubtitle> raw = sub::SSAReader::parse_line (
                base,
-               bits[9],
+               text,
                _ffmpeg_content->video->size().width,
                _ffmpeg_content->video->size().height
                );
 
        BOOST_FOREACH (sub::Subtitle const & i, sub::collect<list<sub::Subtitle> > (raw)) {
-               subtitle->emit_text_start (from, i);
+               subtitle->emit_plain_start (from, i);
        }
 }