void
FFmpegDecoder::filter_and_emit_video ()
{
- boost::mutex::scoped_lock lm (_filter_graphs_mutex);
+ int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
+ if (bet == AV_NOPTS_VALUE) {
+ _film->log()->log ("Dropping frame without PTS");
+ return;
+ }
- shared_ptr<FilterGraph> graph;
-
- list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
- while (i != _filter_graphs.end() && !(*i)->can_process (libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
- ++i;
+ if (_film->crop() == Crop() && _film->filters().empty()) {
+ /* No filter graph needed; just emit */
+ emit_video (shared_ptr<Image> (new FrameImage (_frame, false)), false, bet * av_q2d (_format_context->streams[_video_stream]->time_base));
+ return;
}
+
+ shared_ptr<FilterGraph> graph;
- if (i == _filter_graphs.end ()) {
- graph.reset (new FilterGraph (_film, this, libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
- _filter_graphs.push_back (graph);
- _film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format));
- } else {
- graph = *i;
+ {
+ boost::mutex::scoped_lock lm (_filter_graphs_mutex);
+
+ list<shared_ptr<FilterGraph> >::iterator i = _filter_graphs.begin();
+ while (i != _filter_graphs.end() && !(*i)->can_process (libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
+ ++i;
+ }
+
+ if (i == _filter_graphs.end ()) {
+ graph.reset (new FilterGraph (_film, this, libdcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
+ _filter_graphs.push_back (graph);
+ _film->log()->log (String::compose (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format));
+ } else {
+ graph = *i;
+ }
}
list<shared_ptr<Image> > images = graph->process (_frame);
for (list<shared_ptr<Image> >::iterator i = images.begin(); i != images.end(); ++i) {
- int64_t const bet = av_frame_get_best_effort_timestamp (_frame);
- if (bet != AV_NOPTS_VALUE) {
- emit_video (*i, false, bet * av_q2d (_format_context->streams[_video_stream]->time_base));
- } else {
- _film->log()->log ("Dropping frame without PTS");
- }
+ emit_video (*i, false, bet * av_q2d (_format_context->streams[_video_stream]->time_base));
}
}
}
stringstream a;
- a << _size.width << N_(":")
- << _size.height << N_(":")
- << _pixel_format << N_(":")
- << decoder->time_base_numerator() << N_(":")
- << decoder->time_base_denominator() << N_(":")
- << decoder->sample_aspect_ratio_numerator() << N_(":")
- << decoder->sample_aspect_ratio_denominator();
+ a << "video_size=" << _size.width << "x" << _size.height << ":"
+ << "pix_fmt=" << _pixel_format << ":"
+ << "time_base=" << decoder->time_base_numerator() << "/" << decoder->time_base_denominator() << ":"
+ << "pixel_aspect=" << decoder->sample_aspect_ratio_numerator() << "/" << decoder->sample_aspect_ratio_denominator();
int r;
- if ((r = avfilter_graph_create_filter (&_buffer_src_context, buffer_src, N_("in"), a.str().c_str(), 0, graph)) < 0) {
+ if ((r = avfilter_graph_create_filter (&_buffer_src_context, buffer_src, "in", a.str().c_str(), 0, graph)) < 0) {
throw DecodeError (N_("could not create buffer source"));
}
throw DecodeError (N_("could not create buffer sink."));
}
+ av_free (sink_params);
+
AVFilterInOut* outputs = avfilter_inout_alloc ();
outputs->name = av_strdup(N_("in"));
outputs->filter_ctx = _buffer_src_context;
FilterGraph::process (AVFrame* frame)
{
list<shared_ptr<Image> > images;
-
+
if (av_buffersrc_write_frame (_buffer_src_context, frame) < 0) {
throw DecodeError (N_("could not push buffer into filter chain."));
}
}
/* This takes ownership of the AVFrame */
- images.push_back (shared_ptr<Image> (new FrameImage (frame)));
+ images.push_back (shared_ptr<Image> (new FrameImage (frame, true)));
}
return images;
return _aligned;
}
-FrameImage::FrameImage (AVFrame* frame)
+FrameImage::FrameImage (AVFrame* frame, bool own)
: Image (static_cast<AVPixelFormat> (frame->format))
, _frame (frame)
+ , _own (own)
{
_line_size = (int *) av_malloc (4 * sizeof (int));
_line_size[0] = _line_size[1] = _line_size[2] = _line_size[3] = 0;
FrameImage::~FrameImage ()
{
- av_frame_free (&_frame);
+ if (_own) {
+ av_frame_free (&_frame);
+ }
+
av_free (_line_size);
}
class FrameImage : public Image
{
public:
- FrameImage (AVFrame *);
+ FrameImage (AVFrame *, bool);
~FrameImage ();
uint8_t ** data () const;
FrameImage& operator= (FrameImage const &);
AVFrame* _frame;
+ bool _own;
int* _line_size;
};
f->width = 640;
f->height = 480;
f->format = static_cast<int> (i->format);
- FrameImage t (f);
+ FrameImage t (f, true);
BOOST_CHECK_EQUAL(t.components(), i->components);
BOOST_CHECK_EQUAL(t.lines(0), i->lines[0]);
BOOST_CHECK_EQUAL(t.lines(1), i->lines[1]);