Merge branch 'master' into video-player
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
index 911714d7b3c31c19876d2aaef8eefbbfcbbb1c93..0e4446a86a7eada515115b22f8e5b80277edf5ca 100644 (file)
@@ -59,7 +59,7 @@ using boost::shared_ptr;
 using boost::optional;
 using boost::dynamic_pointer_cast;
 
-FFmpegDecoder::FFmpegDecoder (shared_ptr<Film> f, shared_ptr<const Options> o, Job* j)
+FFmpegDecoder::FFmpegDecoder (shared_ptr<Film> f, shared_ptr<const DecodeOptions> o, Job* j)
        : Decoder (f, o, j)
        , VideoDecoder (f, o, j)
        , AudioDecoder (f, o, j)
@@ -77,6 +77,10 @@ FFmpegDecoder::FFmpegDecoder (shared_ptr<Film> f, shared_ptr<const Options> o, J
        setup_video ();
        setup_audio ();
        setup_subtitle ();
+
+       if (!o->video_sync) {
+               _first_video = 0;
+       }
 }
 
 FFmpegDecoder::~FFmpegDecoder ()
@@ -162,14 +166,7 @@ FFmpegDecoder::setup_video ()
                throw DecodeError ("could not find video decoder");
        }
 
-       /* I think this prevents problems with green hash on decodes and
-          "changing frame properties on the fly is not supported by all filters"
-          messages with some content.  Although I'm not sure; needs checking.
-       */
-       AVDictionary* opts = 0;
-       av_dict_set (&opts, "threads", "1", 0);
-       
-       if (avcodec_open2 (_video_codec_context, _video_codec, &opts) < 0) {
+       if (avcodec_open2 (_video_codec_context, _video_codec, 0) < 0) {
                throw DecodeError ("could not open video decoder");
        }
 }
@@ -259,7 +256,7 @@ FFmpegDecoder::pass ()
        avcodec_get_frame_defaults (_frame);
 
        shared_ptr<FFmpegAudioStream> ffa = dynamic_pointer_cast<FFmpegAudioStream> (_audio_stream);
-       
+
        if (_packet.stream_index == _video_stream) {
 
                int frame_finished;
@@ -270,46 +267,10 @@ FFmpegDecoder::pass ()
                                _film->log()->log (String::compose ("Used only %1 bytes of %2 in packet", r, _packet.size));
                        }
 
-                       /* Where we are in the output, in seconds */
-                       double const out_pts_seconds = video_frame() / frames_per_second();
-
-                       /* Where we are in the source, in seconds */
-                       double const source_pts_seconds = av_q2d (_format_context->streams[_packet.stream_index]->time_base)
-                               * av_frame_get_best_effort_timestamp(_frame);
-
-                       _film->log()->log (
-                               String::compose ("Source video frame ready; source at %1, output at %2", source_pts_seconds, out_pts_seconds),
-                               Log::VERBOSE
-                               );
-
-                       if (!_first_video) {
-                               _first_video = source_pts_seconds;
-                       }
-
-                       /* Difference between where we are and where we should be */
-                       double const delta = source_pts_seconds - _first_video.get() - out_pts_seconds;
-                       double const one_frame = 1 / frames_per_second();
-
-                       /* Insert frames if required to get out_pts_seconds up to pts_seconds */
-                       if (delta > one_frame) {
-                               int const extra = rint (delta / one_frame);
-                               for (int i = 0; i < extra; ++i) {
-                                       repeat_last_video ();
-                                       _film->log()->log (
-                                               String::compose (
-                                                       "Extra video frame inserted at %1s; source frame %2, source PTS %3 (at %4 fps)",
-                                                       out_pts_seconds, video_frame(), source_pts_seconds, frames_per_second()
-                                                       )
-                                               );
-                               }
-                       }
-
-                       if (delta > -one_frame) {
-                               /* Process this frame */
-                               filter_and_emit_video (_frame);
+                       if (_opt->video_sync) {
+                               out_with_sync ();
                        } else {
-                               /* Otherwise we are omitting a frame to keep things right */
-                               _film->log()->log (String::compose ("Frame removed at %1s", out_pts_seconds));
+                               filter_and_emit_video (_frame);
                        }
                }
 
@@ -557,11 +518,14 @@ FFmpegDecoder::set_subtitle_stream (shared_ptr<SubtitleStream> s)
 {
        VideoDecoder::set_subtitle_stream (s);
        setup_subtitle ();
+       OutputChanged ();
 }
 
 void
 FFmpegDecoder::filter_and_emit_video (AVFrame* frame)
 {
+       boost::mutex::scoped_lock lm (_filter_graphs_mutex);
+       
        shared_ptr<FilterGraph> graph;
 
        list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
@@ -570,7 +534,7 @@ FFmpegDecoder::filter_and_emit_video (AVFrame* frame)
        }
 
        if (i == _filter_graphs.end ()) {
-               graph.reset (new FilterGraph (_film, this, _opt->apply_crop, Size (frame->width, frame->height), (AVPixelFormat) frame->format));
+               graph.reset (new FilterGraph (_film, this, Size (frame->width, frame->height), (AVPixelFormat) frame->format));
                _filter_graphs.push_back (graph);
                _film->log()->log (String::compose ("New graph for %1x%2, pixel format %3", frame->width, frame->height, frame->format));
        } else {
@@ -579,11 +543,33 @@ FFmpegDecoder::filter_and_emit_video (AVFrame* frame)
 
        list<shared_ptr<Image> > images = graph->process (frame);
 
+       SourceFrame const sf = av_q2d (_format_context->streams[_video_stream]->time_base)
+               * av_frame_get_best_effort_timestamp(_frame) * frames_per_second();
+
        for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
-               emit_video (*i);
+               emit_video (*i, sf);
        }
 }
 
+bool
+FFmpegDecoder::seek (SourceFrame f)
+{
+       int64_t const vt = static_cast<int64_t>(f) / (av_q2d (_format_context->streams[_video_stream]->time_base) * frames_per_second());
+
+       /* This AVSEEK_FLAG_BACKWARD is a bit of a hack; without it, if we ask for a seek to the same place as last time
+          (used when we change decoder parameters and want to re-fetch the frame) we end up going forwards rather than
+          staying in the same place.
+       */
+       int const r = av_seek_frame (_format_context, _video_stream, vt, (f == last_source_frame() ? AVSEEK_FLAG_BACKWARD : 0));
+       
+       avcodec_flush_buffers (_video_codec_context);
+       if (_subtitle_codec_context) {
+               avcodec_flush_buffers (_subtitle_codec_context);
+       }
+       
+       return r < 0;
+}
+
 shared_ptr<FFmpegAudioStream>
 FFmpegAudioStream::create (string t, optional<int> v)
 {
@@ -636,3 +622,74 @@ FFmpegAudioStream::to_string () const
        return String::compose ("ffmpeg %1 %2 %3 %4", _id, _sample_rate, _channel_layout, _name);
 }
 
+void
+FFmpegDecoder::out_with_sync ()
+{
+       /* Where we are in the output, in seconds */
+       double const out_pts_seconds = video_frame() / frames_per_second();
+       
+       /* Where we are in the source, in seconds */
+       double const source_pts_seconds = av_q2d (_format_context->streams[_packet.stream_index]->time_base)
+               * av_frame_get_best_effort_timestamp(_frame);
+       
+       _film->log()->log (
+               String::compose ("Source video frame ready; source at %1, output at %2", source_pts_seconds, out_pts_seconds),
+               Log::VERBOSE
+               );
+       
+       if (!_first_video) {
+               _first_video = source_pts_seconds;
+       }
+       
+       /* Difference between where we are and where we should be */
+       double const delta = source_pts_seconds - _first_video.get() - out_pts_seconds;
+       double const one_frame = 1 / frames_per_second();
+       
+       /* Insert frames if required to get out_pts_seconds up to pts_seconds */
+       if (delta > one_frame) {
+               int const extra = rint (delta / one_frame);
+               for (int i = 0; i < extra; ++i) {
+                       repeat_last_video ();
+                       _film->log()->log (
+                               String::compose (
+                                       "Extra video frame inserted at %1s; source frame %2, source PTS %3 (at %4 fps)",
+                                       out_pts_seconds, video_frame(), source_pts_seconds, frames_per_second()
+                                                       )
+                               );
+               }
+       }
+       
+       if (delta > -one_frame) {
+               /* Process this frame */
+               filter_and_emit_video (_frame);
+       } else {
+               /* Otherwise we are omitting a frame to keep things right */
+               _film->log()->log (String::compose ("Frame removed at %1s", out_pts_seconds));
+       }
+}
+
+void
+FFmpegDecoder::film_changed (Film::Property p)
+{
+       switch (p) {
+       case Film::CROP:
+       case Film::FILTERS:
+       {
+               boost::mutex::scoped_lock lm (_filter_graphs_mutex);
+               _filter_graphs.clear ();
+       }
+       OutputChanged ();
+       break;
+
+       default:
+               break;
+       }
+}
+
+/** @return Length (in video frames) according to our content's header */
+SourceFrame
+FFmpegDecoder::length () const
+{
+       return (double(_format_context->duration) / AV_TIME_BASE) * frames_per_second();
+}
+