+int
+FFmpegDecoder::bytes_per_audio_sample () const
+{
+ return av_get_bytes_per_sample (audio_sample_format ());
+}
+
+void
+FFmpegDecoder::set_audio_stream (shared_ptr<AudioStream> s)
+{
+ AudioDecoder::set_audio_stream (s);
+ setup_audio ();
+}
+
+void
+FFmpegDecoder::set_subtitle_stream (shared_ptr<SubtitleStream> s)
+{
+ VideoDecoder::set_subtitle_stream (s);
+ setup_subtitle ();
+ OutputChanged ();
+}
+
+void
+FFmpegDecoder::filter_and_emit_video (AVFrame* frame)
+{
+ boost::mutex::scoped_lock lm (_filter_graphs_mutex);
+
+ shared_ptr<FilterGraph> graph;
+
+ list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
+ while (i != _filter_graphs.end() && !(*i)->can_process (Size (frame->width, frame->height), (AVPixelFormat) frame->format)) {
+ ++i;
+ }
+
+ if (i == _filter_graphs.end ()) {
+ graph.reset (new FilterGraph (_film, this, Size (frame->width, frame->height), (AVPixelFormat) frame->format));
+ _filter_graphs.push_back (graph);
+ _film->log()->log (String::compose ("New graph for %1x%2, pixel format %3", frame->width, frame->height, frame->format));
+ } else {
+ graph = *i;
+ }
+
+ list<shared_ptr<Image> > images = graph->process (frame);
+
+ for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
+ emit_video (*i, frame_time ());
+ }
+}
+
+bool
+FFmpegDecoder::seek (double p)
+{
+ return do_seek (p, false);
+}
+
+bool
+FFmpegDecoder::seek_to_last ()
+{
+ /* This AVSEEK_FLAG_BACKWARD in do_seek is a bit of a hack; without it, if we ask for a seek to the same place as last time
+ (used when we change decoder parameters and want to re-fetch the frame) we end up going forwards rather than
+ staying in the same place.
+ */
+ return do_seek (last_source_time(), true);
+}
+
+bool
+FFmpegDecoder::do_seek (double p, bool backwards)
+{
+ int64_t const vt = p / av_q2d (_format_context->streams[_video_stream]->time_base);
+
+ int const r = av_seek_frame (_format_context, _video_stream, vt, backwards ? AVSEEK_FLAG_BACKWARD : 0);
+
+ avcodec_flush_buffers (_video_codec_context);
+ if (_subtitle_codec_context) {
+ avcodec_flush_buffers (_subtitle_codec_context);
+ }
+
+ return r < 0;
+}
+
+shared_ptr<FFmpegAudioStream>
+FFmpegAudioStream::create (string t, optional<int> v)
+{
+ if (!v) {
+ /* version < 1; no type in the string, and there's only FFmpeg streams anyway */
+ return shared_ptr<FFmpegAudioStream> (new FFmpegAudioStream (t, v));
+ }
+
+ stringstream s (t);
+ string type;
+ s >> type;
+ if (type != "ffmpeg") {
+ return shared_ptr<FFmpegAudioStream> ();
+ }
+
+ return shared_ptr<FFmpegAudioStream> (new FFmpegAudioStream (t, v));
+}
+
+FFmpegAudioStream::FFmpegAudioStream (string t, optional<int> version)
+{
+ stringstream n (t);
+
+ int name_index = 4;
+ if (!version) {
+ name_index = 2;
+ int channels;
+ n >> _id >> channels;
+ _channel_layout = av_get_default_channel_layout (channels);
+ _sample_rate = 0;
+ } else {
+ string type;
+ /* Current (marked version 1) */
+ n >> type >> _id >> _sample_rate >> _channel_layout;
+ assert (type == "ffmpeg");
+ }
+
+ for (int i = 0; i < name_index; ++i) {
+ size_t const s = t.find (' ');
+ if (s != string::npos) {
+ t = t.substr (s + 1);
+ }
+ }
+
+ _name = t;
+}
+
+string
+FFmpegAudioStream::to_string () const
+{
+ return String::compose ("ffmpeg %1 %2 %3 %4", _id, _sample_rate, _channel_layout, _name);
+}
+
+void
+FFmpegDecoder::out_with_sync ()
+{
+ /* Where we are in the output, in seconds */
+ double const out_pts_seconds = video_frame() / frames_per_second();
+
+ /* Where we are in the source, in seconds */
+ double const source_pts_seconds = av_q2d (_format_context->streams[_packet.stream_index]->time_base)
+ * av_frame_get_best_effort_timestamp(_frame);
+
+ _film->log()->log (
+ String::compose ("Source video frame ready; source at %1, output at %2", source_pts_seconds, out_pts_seconds),
+ Log::VERBOSE
+ );
+
+ if (!_first_video) {
+ _first_video = source_pts_seconds;
+ }
+
+ /* Difference between where we are and where we should be */
+ double const delta = source_pts_seconds - _first_video.get() - out_pts_seconds;
+ double const one_frame = 1 / frames_per_second();
+
+ /* Insert frames if required to get out_pts_seconds up to pts_seconds */
+ if (delta > one_frame) {
+ int const extra = rint (delta / one_frame);
+ for (int i = 0; i < extra; ++i) {
+ repeat_last_video ();
+ _film->log()->log (
+ String::compose (
+ "Extra video frame inserted at %1s; source frame %2, source PTS %3 (at %4 fps)",
+ out_pts_seconds, video_frame(), source_pts_seconds, frames_per_second()
+ )
+ );
+ }
+ }
+
+ if (delta > -one_frame) {
+ /* Process this frame */
+ filter_and_emit_video (_frame);
+ } else {
+ /* Otherwise we are omitting a frame to keep things right */
+ _film->log()->log (String::compose ("Frame removed at %1s", out_pts_seconds));
+ }
+}
+
+void
+FFmpegDecoder::film_changed (Film::Property p)
+{
+ switch (p) {
+ case Film::CROP:
+ case Film::FILTERS:
+ {
+ boost::mutex::scoped_lock lm (_filter_graphs_mutex);
+ _filter_graphs.clear ();
+ }
+ OutputChanged ();
+ break;
+
+ default:
+ break;
+ }
+}
+
+/** @return Length (in video frames) according to our content's header */
+SourceFrame
+FFmpegDecoder::length () const
+{
+ return (double(_format_context->duration) / AV_TIME_BASE) * frames_per_second();
+}
+
+double
+FFmpegDecoder::frame_time () const
+{
+ return av_frame_get_best_effort_timestamp(_frame) * av_q2d (_format_context->streams[_video_stream]->time_base);
+}
+