Decoder handles crop changing.
[dcpomatic.git] / src / lib / ffmpeg_decoder.cc
index f4c7d3d857f5e6cf04a92e2b23ce0ed3171aca28..51afc461a86d36a2d90bde325cf6a6f98c8d3458 100644 (file)
@@ -59,7 +59,7 @@ using boost::shared_ptr;
 using boost::optional;
 using boost::dynamic_pointer_cast;
 
-FFmpegDecoder::FFmpegDecoder (shared_ptr<Film> f, shared_ptr<const Options> o, Job* j)
+FFmpegDecoder::FFmpegDecoder (shared_ptr<Film> f, shared_ptr<const DecodeOptions> o, Job* j)
        : Decoder (f, o, j)
        , VideoDecoder (f, o, j)
        , AudioDecoder (f, o, j)
@@ -77,6 +77,8 @@ FFmpegDecoder::FFmpegDecoder (shared_ptr<Film> f, shared_ptr<const Options> o, J
        setup_video ();
        setup_audio ();
        setup_subtitle ();
+
+       f->Changed.connect (bind (&FFmpegDecoder::film_changed, this, _1));
 }
 
 FFmpegDecoder::~FFmpegDecoder ()
@@ -270,46 +272,10 @@ FFmpegDecoder::pass ()
                                _film->log()->log (String::compose ("Used only %1 bytes of %2 in packet", r, _packet.size));
                        }
 
-                       /* Where we are in the output, in seconds */
-                       double const out_pts_seconds = video_frame() / frames_per_second();
-
-                       /* Where we are in the source, in seconds */
-                       double const source_pts_seconds = av_q2d (_format_context->streams[_packet.stream_index]->time_base)
-                               * av_frame_get_best_effort_timestamp(_frame);
-
-                       _film->log()->log (
-                               String::compose ("Source video frame ready; source at %1, output at %2", source_pts_seconds, out_pts_seconds),
-                               Log::VERBOSE
-                               );
-
-                       if (!_first_video) {
-                               _first_video = source_pts_seconds;
-                       }
-
-                       /* Difference between where we are and where we should be */
-                       double const delta = source_pts_seconds - _first_video.get() - out_pts_seconds;
-                       double const one_frame = 1 / frames_per_second();
-
-                       /* Insert frames if required to get out_pts_seconds up to pts_seconds */
-                       if (delta > one_frame) {
-                               int const extra = rint (delta / one_frame);
-                               for (int i = 0; i < extra; ++i) {
-                                       repeat_last_video ();
-                                       _film->log()->log (
-                                               String::compose (
-                                                       "Extra video frame inserted at %1s; source frame %2, source PTS %3 (at %4 fps)",
-                                                       out_pts_seconds, video_frame(), source_pts_seconds, frames_per_second()
-                                                       )
-                                               );
-                               }
-                       }
-
-                       if (delta > -one_frame) {
-                               /* Process this frame */
-                               filter_and_emit_video (_frame);
+                       if (_opt->video_sync) {
+                               out_careful ();
                        } else {
-                               /* Otherwise we are omitting a frame to keep things right */
-                               _film->log()->log (String::compose ("Frame removed at %1s", out_pts_seconds));
+                               filter_and_emit_video (_frame);
                        }
                }
 
@@ -562,6 +528,8 @@ FFmpegDecoder::set_subtitle_stream (shared_ptr<SubtitleStream> s)
 void
 FFmpegDecoder::filter_and_emit_video (AVFrame* frame)
 {
+       boost::mutex::scoped_lock lm (_filter_graphs_mutex);
+       
        shared_ptr<FilterGraph> graph;
 
        list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
@@ -570,7 +538,7 @@ FFmpegDecoder::filter_and_emit_video (AVFrame* frame)
        }
 
        if (i == _filter_graphs.end ()) {
-               graph.reset (new FilterGraph (_film, this, _opt->apply_crop, Size (frame->width, frame->height), (AVPixelFormat) frame->format));
+               graph.reset (new FilterGraph (_film, this, Size (frame->width, frame->height), (AVPixelFormat) frame->format));
                _filter_graphs.push_back (graph);
                _film->log()->log (String::compose ("New graph for %1x%2, pixel format %3", frame->width, frame->height, frame->format));
        } else {
@@ -584,12 +552,18 @@ FFmpegDecoder::filter_and_emit_video (AVFrame* frame)
        }
 }
 
-void
+bool
 FFmpegDecoder::seek (SourceFrame f)
 {
        int64_t const t = static_cast<int64_t>(f) / (av_q2d (_format_context->streams[_video_stream]->time_base) * frames_per_second());
-       av_seek_frame (_format_context, _video_stream, t, 0);
+       int const r = av_seek_frame (_format_context, _video_stream, t, 0);
        avcodec_flush_buffers (_video_codec_context);
+
+       if (r >= 0) {
+               OutputChanged ();
+       }
+       
+       return r < 0;
 }
 
 shared_ptr<FFmpegAudioStream>
@@ -645,3 +619,66 @@ FFmpegAudioStream::to_string () const
 }
 
 
+void
+FFmpegDecoder::out_careful ()
+{
+       /* Where we are in the output, in seconds */
+       double const out_pts_seconds = video_frame() / frames_per_second();
+       
+       /* Where we are in the source, in seconds */
+       double const source_pts_seconds = av_q2d (_format_context->streams[_packet.stream_index]->time_base)
+               * av_frame_get_best_effort_timestamp(_frame);
+       
+       _film->log()->log (
+               String::compose ("Source video frame ready; source at %1, output at %2", source_pts_seconds, out_pts_seconds),
+               Log::VERBOSE
+               );
+       
+       if (!_first_video) {
+               _first_video = source_pts_seconds;
+       }
+       
+       /* Difference between where we are and where we should be */
+       double const delta = source_pts_seconds - _first_video.get() - out_pts_seconds;
+       double const one_frame = 1 / frames_per_second();
+       
+       /* Insert frames if required to get out_pts_seconds up to pts_seconds */
+       if (delta > one_frame) {
+               int const extra = rint (delta / one_frame);
+               for (int i = 0; i < extra; ++i) {
+                       repeat_last_video ();
+                       _film->log()->log (
+                               String::compose (
+                                       "Extra video frame inserted at %1s; source frame %2, source PTS %3 (at %4 fps)",
+                                       out_pts_seconds, video_frame(), source_pts_seconds, frames_per_second()
+                                                       )
+                               );
+               }
+       }
+       
+       if (delta > -one_frame) {
+               /* Process this frame */
+               filter_and_emit_video (_frame);
+       } else {
+               /* Otherwise we are omitting a frame to keep things right */
+               _film->log()->log (String::compose ("Frame removed at %1s", out_pts_seconds));
+       }
+}
+
+void
+FFmpegDecoder::film_changed (Film::Property p)
+{
+       switch (p) {
+       case Film::CROP:
+       {
+               boost::mutex::scoped_lock lm (_filter_graphs_mutex);
+               _filter_graphs.clear ();
+       }
+       OutputChanged ();
+       break;
+
+       default:
+               break;
+       }
+}
+