X-Git-Url: https://main.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fffmpeg_decoder.cc;h=adf16c9408a36d39976d730e86e8c29a894ec505;hb=0c66eaeac227d6aeb63a7a36e202ef87081dc222;hp=3c607a7fb260f741392b42d221177e9857a72848;hpb=513ee257dfcac679b346a872e4446ab2ef453f67;p=dcpomatic.git diff --git a/src/lib/ffmpeg_decoder.cc b/src/lib/ffmpeg_decoder.cc index 3c607a7fb..adf16c940 100644 --- a/src/lib/ffmpeg_decoder.cc +++ b/src/lib/ffmpeg_decoder.cc @@ -1,3 +1,5 @@ +/* -*- c-basic-offset: 8; default-tab-width: 8; -*- */ + /* Copyright (C) 2012 Carl Hetherington @@ -41,7 +43,6 @@ extern "C" { #include "transcoder.h" #include "job.h" #include "filter.h" -#include "options.h" #include "exceptions.h" #include "image.h" #include "util.h" @@ -49,6 +50,9 @@ extern "C" { #include "ffmpeg_decoder.h" #include "filter_graph.h" #include "subtitle.h" +#include "audio_buffers.h" + +#include "i18n.h" using std::cout; using std::string; @@ -58,11 +62,15 @@ using std::list; using boost::shared_ptr; using boost::optional; using boost::dynamic_pointer_cast; +using libdcp::Size; + +boost::mutex FFmpegDecoder::_mutex; -FFmpegDecoder::FFmpegDecoder (shared_ptr f, shared_ptr o, Job* j) - : Decoder (f, o, j) - , VideoDecoder (f, o, j) - , AudioDecoder (f, o, j) +FFmpegDecoder::FFmpegDecoder (shared_ptr f, shared_ptr c, bool video, bool audio, bool subtitles) + : Decoder (f) + , VideoDecoder (f) + , AudioDecoder (f, c) + , _ffmpeg_content (c) , _format_context (0) , _video_stream (-1) , _frame (0) @@ -72,6 +80,9 @@ FFmpegDecoder::FFmpegDecoder (shared_ptr f, shared_ptr o, J , _audio_codec (0) , _subtitle_codec_context (0) , _subtitle_codec (0) + , _decode_video (video) + , _decode_audio (audio) + , _decode_subtitles (subtitles) { setup_general (); setup_video (); @@ -81,10 +92,12 @@ FFmpegDecoder::FFmpegDecoder (shared_ptr f, shared_ptr o, J FFmpegDecoder::~FFmpegDecoder () { + boost::mutex::scoped_lock lm (_mutex); + if (_audio_codec_context) { avcodec_close (_audio_codec_context); } - + if (_video_codec_context) { avcodec_close (_video_codec_context); } @@ -103,15 +116,15 @@ FFmpegDecoder::setup_general () { av_register_all (); - if (avformat_open_input (&_format_context, _film->content_path().c_str(), 0, 0) < 0) { - throw OpenFileError (_film->content_path ()); + if (avformat_open_input (&_format_context, _ffmpeg_content->file().string().c_str(), 0, 0) < 0) { + throw OpenFileError (_ffmpeg_content->file().string ()); } if (avformat_find_stream_info (_format_context, 0) < 0) { - throw DecodeError ("could not find stream information"); + throw DecodeError (_("could not find stream information")); } - /* Find video, audio and subtitle streams and choose the first of each */ + /* Find video, audio and subtitle streams */ for (uint32_t i = 0; i < _format_context->nb_streams; ++i) { AVStream* s = _format_context->streams[i]; @@ -122,96 +135,86 @@ FFmpegDecoder::setup_general () /* This is a hack; sometimes it seems that _audio_codec_context->channel_layout isn't set up, so bodge it here. No idea why we should have to do this. */ - + if (s->codec->channel_layout == 0) { s->codec->channel_layout = av_get_default_channel_layout (s->codec->channels); } _audio_streams.push_back ( - shared_ptr ( - new FFmpegAudioStream (stream_name (s), i, s->codec->sample_rate, s->codec->channel_layout) - ) + FFmpegAudioStream (stream_name (s), i, s->codec->sample_rate, s->codec->channels) ); - + } else if (s->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) { - _subtitle_streams.push_back ( - shared_ptr ( - new SubtitleStream (stream_name (s), i) - ) - ); + _subtitle_streams.push_back (FFmpegSubtitleStream (stream_name (s), i)); } } if (_video_stream < 0) { - throw DecodeError ("could not find video stream"); + throw DecodeError (N_("could not find video stream")); } _frame = avcodec_alloc_frame (); if (_frame == 0) { - throw DecodeError ("could not allocate frame"); + throw DecodeError (N_("could not allocate frame")); } } void FFmpegDecoder::setup_video () { + boost::mutex::scoped_lock lm (_mutex); + _video_codec_context = _format_context->streams[_video_stream]->codec; _video_codec = avcodec_find_decoder (_video_codec_context->codec_id); if (_video_codec == 0) { - throw DecodeError ("could not find video decoder"); + throw DecodeError (_("could not find video decoder")); } - /* I think this prevents problems with green hash on decodes and - "changing frame properties on the fly is not supported by all filters" - messages with some content. Although I'm not sure; needs checking. - */ - AVDictionary* opts = 0; - av_dict_set (&opts, "threads", "1", 0); - - if (avcodec_open2 (_video_codec_context, _video_codec, &opts) < 0) { - throw DecodeError ("could not open video decoder"); + if (avcodec_open2 (_video_codec_context, _video_codec, 0) < 0) { + throw DecodeError (N_("could not open video decoder")); } } void FFmpegDecoder::setup_audio () { - if (!_audio_stream) { + boost::mutex::scoped_lock lm (_mutex); + + if (!_ffmpeg_content->audio_stream ()) { return; } - shared_ptr ffa = dynamic_pointer_cast (_audio_stream); - assert (ffa); - - _audio_codec_context = _format_context->streams[ffa->id()]->codec; + _audio_codec_context = _format_context->streams[_ffmpeg_content->audio_stream()->id]->codec; _audio_codec = avcodec_find_decoder (_audio_codec_context->codec_id); if (_audio_codec == 0) { - throw DecodeError ("could not find audio decoder"); + throw DecodeError (_("could not find audio decoder")); } if (avcodec_open2 (_audio_codec_context, _audio_codec, 0) < 0) { - throw DecodeError ("could not open audio decoder"); + throw DecodeError (N_("could not open audio decoder")); } } void FFmpegDecoder::setup_subtitle () { - if (!_subtitle_stream) { + boost::mutex::scoped_lock lm (_mutex); + + if (!_ffmpeg_content->subtitle_stream() || _ffmpeg_content->subtitle_stream()->id >= int (_format_context->nb_streams)) { return; } - _subtitle_codec_context = _format_context->streams[_subtitle_stream->id()]->codec; + _subtitle_codec_context = _format_context->streams[_ffmpeg_content->subtitle_stream()->id]->codec; _subtitle_codec = avcodec_find_decoder (_subtitle_codec_context->codec_id); if (_subtitle_codec == 0) { - throw DecodeError ("could not find subtitle decoder"); + throw DecodeError (_("could not find subtitle decoder")); } if (avcodec_open2 (_subtitle_codec_context, _subtitle_codec, 0) < 0) { - throw DecodeError ("could not open subtitle decoder"); + throw DecodeError (N_("could not open subtitle decoder")); } } @@ -220,149 +223,40 @@ bool FFmpegDecoder::pass () { int r = av_read_frame (_format_context, &_packet); - + if (r < 0) { if (r != AVERROR_EOF) { /* Maybe we should fail here, but for now we'll just finish off instead */ char buf[256]; av_strerror (r, buf, sizeof(buf)); - _film->log()->log (String::compose ("error on av_read_frame (%1) (%2)", buf, r)); + _film->log()->log (String::compose (N_("error on av_read_frame (%1) (%2)"), buf, r)); } - + /* Get any remaining frames */ _packet.data = 0; _packet.size = 0; - + /* XXX: should we reset _packet.data and size after each *_decode_* call? */ - - int frame_finished; - - while (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) { - filter_and_emit_video (_frame); + + if (_decode_video) { + while (decode_video_packet ()); } - if (_audio_stream && _opt->decode_audio) { - while (avcodec_decode_audio4 (_audio_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) { - int const data_size = av_samples_get_buffer_size ( - 0, _audio_codec_context->channels, _frame->nb_samples, audio_sample_format (), 1 - ); - - assert (_audio_codec_context->channels == _film->audio_channels()); - Audio (deinterleave_audio (_frame->data[0], data_size)); - } + if (_ffmpeg_content->audio_stream() && _decode_audio) { + decode_audio_packet (); } - + return true; } avcodec_get_frame_defaults (_frame); - shared_ptr ffa = dynamic_pointer_cast (_audio_stream); - - if (_packet.stream_index == _video_stream) { - - int frame_finished; - int const r = avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet); - if (r >= 0 && frame_finished) { - - if (r != _packet.size) { - _film->log()->log (String::compose ("Used only %1 bytes of %2 in packet", r, _packet.size)); - } - - /* Where we are in the output, in seconds */ - double const out_pts_seconds = video_frame() / frames_per_second(); - - /* Where we are in the source, in seconds */ - double const source_pts_seconds = av_q2d (_format_context->streams[_packet.stream_index]->time_base) - * av_frame_get_best_effort_timestamp(_frame); - - _film->log()->log ( - String::compose ("Source video frame ready; source at %1, output at %2", source_pts_seconds, out_pts_seconds), - Log::VERBOSE - ); - - if (!_first_video) { - _first_video = source_pts_seconds; - } - - /* Difference between where we are and where we should be */ - double const delta = source_pts_seconds - _first_video.get() - out_pts_seconds; - double const one_frame = 1 / frames_per_second(); - - /* Insert frames if required to get out_pts_seconds up to pts_seconds */ - if (delta > one_frame) { - int const extra = rint (delta / one_frame); - for (int i = 0; i < extra; ++i) { - repeat_last_video (); - _film->log()->log ( - String::compose ( - "Extra video frame inserted at %1s; source frame %2, source PTS %3 (at %4 fps)", - out_pts_seconds, video_frame(), source_pts_seconds, frames_per_second() - ) - ); - } - } - - if (delta > -one_frame) { - /* Process this frame */ - filter_and_emit_video (_frame); - } else { - /* Otherwise we are omitting a frame to keep things right */ - _film->log()->log (String::compose ("Frame removed at %1s", out_pts_seconds)); - } - } - - } else if (ffa && _packet.stream_index == ffa->id() && _opt->decode_audio) { - - int frame_finished; - if (avcodec_decode_audio4 (_audio_codec_context, _frame, &frame_finished, &_packet) >= 0 && frame_finished) { - - /* Where we are in the source, in seconds */ - double const source_pts_seconds = av_q2d (_format_context->streams[_packet.stream_index]->time_base) - * av_frame_get_best_effort_timestamp(_frame); - - /* We only decode audio if we've had our first video packet through, and if it - was before this packet. Until then audio is thrown away. - */ - - if (_first_video && _first_video.get() <= source_pts_seconds) { - - if (!_first_audio) { - _first_audio = source_pts_seconds; - - /* This is our first audio frame, and if we've arrived here we must have had our - first video frame. Push some silence to make up any gap between our first - video frame and our first audio. - */ - - /* frames of silence that we must push */ - int const s = rint ((_first_audio.get() - _first_video.get()) * ffa->sample_rate ()); - - _film->log()->log ( - String::compose ( - "First video at %1, first audio at %2, pushing %3 audio frames of silence for %4 channels (%5 bytes per sample)", - _first_video.get(), _first_audio.get(), s, ffa->channels(), bytes_per_audio_sample() - ) - ); - - if (s) { - shared_ptr audio (new AudioBuffers (ffa->channels(), s)); - audio->make_silent (); - Audio (audio); - } - } - - int const data_size = av_samples_get_buffer_size ( - 0, _audio_codec_context->channels, _frame->nb_samples, audio_sample_format (), 1 - ); - - assert (_audio_codec_context->channels == _film->audio_channels()); - Audio (deinterleave_audio (_frame->data[0], data_size)); - } - } - - } else if (_subtitle_stream && _packet.stream_index == _subtitle_stream->id() && _opt->decode_subtitles && _first_video) { + if (_packet.stream_index == _video_stream && _decode_video) { + decode_video_packet (); + } else if (_ffmpeg_content->audio_stream() && _packet.stream_index == _ffmpeg_content->audio_stream()->id && _decode_audio) { + decode_audio_packet (); + } else if (_ffmpeg_content->subtitle_stream() && _packet.stream_index == _ffmpeg_content->subtitle_stream()->id && _decode_subtitles) { int got_subtitle; AVSubtitle sub; @@ -373,7 +267,7 @@ FFmpegDecoder::pass () if (sub.num_rects > 0) { shared_ptr ts; try { - emit_subtitle (shared_ptr (new TimedSubtitle (sub, _first_video.get()))); + emit_subtitle (shared_ptr (new TimedSubtitle (sub))); } catch (...) { /* some problem with the subtitle; we probably didn't understand it */ } @@ -388,34 +282,34 @@ FFmpegDecoder::pass () return false; } +/** @param data pointer to array of pointers to buffers. + * Only the first buffer will be used for non-planar data, otherwise there will be one per channel. + */ shared_ptr -FFmpegDecoder::deinterleave_audio (uint8_t* data, int size) +FFmpegDecoder::deinterleave_audio (uint8_t** data, int size) { - assert (_film->audio_channels()); + assert (_ffmpeg_content->audio_channels()); assert (bytes_per_audio_sample()); - shared_ptr ffa = dynamic_pointer_cast (_audio_stream); - assert (ffa); - /* Deinterleave and convert to float */ - assert ((size % (bytes_per_audio_sample() * ffa->channels())) == 0); + assert ((size % (bytes_per_audio_sample() * _ffmpeg_content->audio_channels())) == 0); int const total_samples = size / bytes_per_audio_sample(); - int const frames = total_samples / _film->audio_channels(); - shared_ptr audio (new AudioBuffers (ffa->channels(), frames)); + int const frames = total_samples / _ffmpeg_content->audio_channels(); + shared_ptr audio (new AudioBuffers (_ffmpeg_content->audio_channels(), frames)); switch (audio_sample_format()) { case AV_SAMPLE_FMT_S16: { - int16_t* p = (int16_t *) data; + int16_t* p = reinterpret_cast (data[0]); int sample = 0; int channel = 0; for (int i = 0; i < total_samples; ++i) { audio->data(channel)[sample] = float(*p++) / (1 << 15); ++channel; - if (channel == _film->audio_channels()) { + if (channel == _ffmpeg_content->audio_channels()) { channel = 0; ++sample; } @@ -423,16 +317,27 @@ FFmpegDecoder::deinterleave_audio (uint8_t* data, int size) } break; + case AV_SAMPLE_FMT_S16P: + { + int16_t** p = reinterpret_cast (data); + for (int i = 0; i < _ffmpeg_content->audio_channels(); ++i) { + for (int j = 0; j < frames; ++j) { + audio->data(i)[j] = static_cast(p[i][j]) / (1 << 15); + } + } + } + break; + case AV_SAMPLE_FMT_S32: { - int32_t* p = (int32_t *) data; + int32_t* p = reinterpret_cast (data[0]); int sample = 0; int channel = 0; for (int i = 0; i < total_samples; ++i) { - audio->data(channel)[sample] = float(*p++) / (1 << 31); + audio->data(channel)[sample] = static_cast(*p++) / (1 << 31); ++channel; - if (channel == _film->audio_channels()) { + if (channel == _ffmpeg_content->audio_channels()) { channel = 0; ++sample; } @@ -442,14 +347,14 @@ FFmpegDecoder::deinterleave_audio (uint8_t* data, int size) case AV_SAMPLE_FMT_FLT: { - float* p = reinterpret_cast (data); + float* p = reinterpret_cast (data[0]); int sample = 0; int channel = 0; for (int i = 0; i < total_samples; ++i) { audio->data(channel)[sample] = *p++; ++channel; - if (channel == _film->audio_channels()) { + if (channel == _ffmpeg_content->audio_channels()) { channel = 0; ++sample; } @@ -459,23 +364,22 @@ FFmpegDecoder::deinterleave_audio (uint8_t* data, int size) case AV_SAMPLE_FMT_FLTP: { - float* p = reinterpret_cast (data); - for (int i = 0; i < _film->audio_channels(); ++i) { - memcpy (audio->data(i), p, frames * sizeof(float)); - p += frames; + float** p = reinterpret_cast (data); + for (int i = 0; i < _ffmpeg_content->audio_channels(); ++i) { + memcpy (audio->data(i), p[i], frames * sizeof(float)); } } break; default: - throw DecodeError (String::compose ("Unrecognised audio sample format (%1)", static_cast (audio_sample_format()))); + throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast (audio_sample_format()))); } return audio; } float -FFmpegDecoder::frames_per_second () const +FFmpegDecoder::video_frame_rate () const { AVStream* s = _format_context->streams[_video_stream]; @@ -496,10 +400,10 @@ FFmpegDecoder::audio_sample_format () const return _audio_codec_context->sample_fmt; } -Size +libdcp::Size FFmpegDecoder::native_size () const { - return Size (_video_codec_context->width, _video_codec_context->height); + return libdcp::Size (_video_codec_context->width, _video_codec_context->height); } PixelFormat @@ -536,22 +440,24 @@ string FFmpegDecoder::stream_name (AVStream* s) const { stringstream n; - - AVDictionaryEntry const * lang = av_dict_get (s->metadata, "language", 0, 0); - if (lang) { - n << lang->value; - } - - AVDictionaryEntry const * title = av_dict_get (s->metadata, "title", 0, 0); - if (title) { - if (!n.str().empty()) { - n << " "; + + if (s->metadata) { + AVDictionaryEntry const * lang = av_dict_get (s->metadata, N_("language"), 0, 0); + if (lang) { + n << lang->value; + } + + AVDictionaryEntry const * title = av_dict_get (s->metadata, N_("title"), 0, 0); + if (title) { + if (!n.str().empty()) { + n << N_(" "); + } + n << title->value; } - n << title->value; } if (n.str().empty()) { - n << "unknown"; + n << N_("unknown"); } return n.str (); @@ -563,94 +469,167 @@ FFmpegDecoder::bytes_per_audio_sample () const return av_get_bytes_per_sample (audio_sample_format ()); } -void -FFmpegDecoder::set_audio_stream (shared_ptr s) +bool +FFmpegDecoder::seek (double p) { - AudioDecoder::set_audio_stream (s); - setup_audio (); + return do_seek (p, false, false); } -void -FFmpegDecoder::set_subtitle_stream (shared_ptr s) +bool +FFmpegDecoder::seek_back () { - VideoDecoder::set_subtitle_stream (s); - setup_subtitle (); + if (last_content_time() < 2.5) { + return true; + } + + return do_seek (last_content_time() - 2.5 / video_frame_rate(), true, true); } -void -FFmpegDecoder::filter_and_emit_video (AVFrame* frame) +bool +FFmpegDecoder::seek_forward () { - shared_ptr graph; - - list >::iterator i = _filter_graphs.begin(); - while (i != _filter_graphs.end() && !(*i)->can_process (Size (frame->width, frame->height), (AVPixelFormat) frame->format)) { - ++i; + if (last_content_time() >= (video_length() - video_frame_rate())) { + return true; } + + return do_seek (last_content_time() - 0.5 / video_frame_rate(), true, true); +} - if (i == _filter_graphs.end ()) { - graph.reset (new FilterGraph (_film, this, _opt->apply_crop, Size (frame->width, frame->height), (AVPixelFormat) frame->format)); - _filter_graphs.push_back (graph); - _film->log()->log (String::compose ("New graph for %1x%2, pixel format %3", frame->width, frame->height, frame->format)); - } else { - graph = *i; - } +bool +FFmpegDecoder::do_seek (double p, bool backwards, bool accurate) +{ + int64_t const vt = p / av_q2d (_format_context->streams[_video_stream]->time_base); - list > images = graph->process (frame); + int const r = av_seek_frame (_format_context, _video_stream, vt, backwards ? AVSEEK_FLAG_BACKWARD : 0); - for (list >::iterator i = images.begin(); i != images.end(); ++i) { - emit_video (*i); + avcodec_flush_buffers (_video_codec_context); + if (_subtitle_codec_context) { + avcodec_flush_buffers (_subtitle_codec_context); } + + if (accurate) { + while (1) { + int r = av_read_frame (_format_context, &_packet); + if (r < 0) { + return true; + } + + avcodec_get_frame_defaults (_frame); + + if (_packet.stream_index == _video_stream) { + int finished = 0; + int const r = avcodec_decode_video2 (_video_codec_context, _frame, &finished, &_packet); + if (r >= 0 && finished) { + int64_t const bet = av_frame_get_best_effort_timestamp (_frame); + if (bet > vt) { + break; + } + } + } + + av_free_packet (&_packet); + } + } + + return r < 0; } -shared_ptr -FFmpegAudioStream::create (string t, optional v) +void +FFmpegDecoder::film_changed (Film::Property p) { - if (!v) { - /* version < 1; no type in the string, and there's only FFmpeg streams anyway */ - return shared_ptr (new FFmpegAudioStream (t, v)); + switch (p) { + case Film::CROP: + case Film::FILTERS: + { + boost::mutex::scoped_lock lm (_filter_graphs_mutex); + _filter_graphs.clear (); } + break; - stringstream s (t); - string type; - s >> type; - if (type != "ffmpeg") { - return shared_ptr (); + default: + break; } +} - return shared_ptr (new FFmpegAudioStream (t, v)); +/** @return Length (in video frames) according to our content's header */ +ContentVideoFrame +FFmpegDecoder::video_length () const +{ + return (double(_format_context->duration) / AV_TIME_BASE) * video_frame_rate(); } -FFmpegAudioStream::FFmpegAudioStream (string t, optional version) +void +FFmpegDecoder::decode_audio_packet () { - stringstream n (t); + /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4 + several times. + */ - int name_index = 4; - if (!version) { - name_index = 2; - int channels; - n >> _id >> channels; - _channel_layout = av_get_default_channel_layout (channels); - _sample_rate = 0; - } else { - string type; - /* Current (marked version 1) */ - n >> type >> _id >> _sample_rate >> _channel_layout; - assert (type == "ffmpeg"); - } + AVPacket copy_packet = _packet; + + while (copy_packet.size > 0) { - for (int i = 0; i < name_index; ++i) { - size_t const s = t.find (' '); - if (s != string::npos) { - t = t.substr (s + 1); + int frame_finished; + int const decode_result = avcodec_decode_audio4 (_audio_codec_context, _frame, &frame_finished, ©_packet); + if (decode_result >= 0) { + if (frame_finished) { + + /* Where we are in the source, in seconds */ + double const source_pts_seconds = av_q2d (_format_context->streams[copy_packet.stream_index]->time_base) + * av_frame_get_best_effort_timestamp(_frame); + + int const data_size = av_samples_get_buffer_size ( + 0, _audio_codec_context->channels, _frame->nb_samples, audio_sample_format (), 1 + ); + + assert (_audio_codec_context->channels == _ffmpeg_content->audio_channels()); + Audio (deinterleave_audio (_frame->data, data_size), source_pts_seconds); + } + + copy_packet.data += decode_result; + copy_packet.size -= decode_result; } } - - _name = t; } -string -FFmpegAudioStream::to_string () const +bool +FFmpegDecoder::decode_video_packet () { - return String::compose ("ffmpeg %1 %2 %3 %4", _id, _sample_rate, _channel_layout, _name); -} + int frame_finished; + if (avcodec_decode_video2 (_video_codec_context, _frame, &frame_finished, &_packet) < 0 || !frame_finished) { + return false; + } + + boost::mutex::scoped_lock lm (_filter_graphs_mutex); + + shared_ptr graph; + + list >::iterator i = _filter_graphs.begin(); + while (i != _filter_graphs.end() && !(*i)->can_process (libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) { + ++i; + } + + if (i == _filter_graphs.end ()) { + graph.reset (new FilterGraph (_film, this, libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)); + _filter_graphs.push_back (graph); + _film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format)); + } else { + graph = *i; + } + + list > images = graph->process (_frame); + + for (list >::iterator i = images.begin(); i != images.end(); ++i) { + int64_t const bet = av_frame_get_best_effort_timestamp (_frame); + if (bet != AV_NOPTS_VALUE) { + /* XXX: may need to insert extra frames / remove frames here ... + (as per old Matcher) + */ + emit_video (*i, false, bet * av_q2d (_format_context->streams[_video_stream]->time_base) * TIME_HZ); + } else { + _film->log()->log ("Dropping frame without PTS"); + } + } + return true; +}