Extract crop.h
[dcpomatic.git] / src / lib / video_filter_graph.cc
index c24d9673d592b24b86679b8743e3634bf59b4f3b..89467ae94e50746dccd5704f8be099bc78b9e9a2 100644 (file)
 
 
 #include "compose.hpp"
+#include "dcpomatic_assert.h"
+#include "exceptions.h"
 #include "image.h"
+#include "scope_guard.h"
 #include "video_filter_graph.h"
-#include "warnings.h"
 extern "C" {
 #include <libavfilter/buffersrc.h>
 #include <libavfilter/buffersink.h>
+#include <libavutil/opt.h>
 }
 
 #include "i18n.h"
@@ -37,7 +40,6 @@ using std::make_shared;
 using std::pair;
 using std::shared_ptr;
 using std::string;
-using std::vector;
 
 
 VideoFilterGraph::VideoFilterGraph (dcp::Size s, AVPixelFormat p, dcp::Fraction r)
@@ -49,16 +51,59 @@ VideoFilterGraph::VideoFilterGraph (dcp::Size s, AVPixelFormat p, dcp::Fraction
 }
 
 
+list<shared_ptr<const Image>>
+VideoFilterGraph::process(shared_ptr<const Image> image)
+{
+       if (_copy) {
+               return { image };
+       }
+
+       auto frame = av_frame_alloc();
+       if (!frame) {
+               throw std::bad_alloc();
+       }
+
+       ScopeGuard sg = [&frame]() { av_frame_free(&frame); };
+
+       for (int i = 0; i < image->planes(); ++i) {
+               frame->data[i] = image->data()[i];
+               frame->linesize[i] = image->stride()[i];
+       }
+
+       frame->width = image->size().width;
+       frame->height = image->size().height;
+       frame->format = image->pixel_format();
+
+       int r = av_buffersrc_write_frame(_buffer_src_context, frame);
+       if (r < 0) {
+               throw DecodeError(String::compose(N_("could not push buffer into filter chain (%1)."), r));
+       }
+
+       list<shared_ptr<const Image>> images;
+
+       while (true) {
+               if (av_buffersink_get_frame(_buffer_sink_context, _frame) < 0) {
+                       break;
+               }
+
+               images.push_back(make_shared<Image>(_frame, Image::Alignment::PADDED));
+               av_frame_unref (_frame);
+       }
+
+       return images;
+}
+
+
 /** Take an AVFrame and process it using our configured filters, returning a
  *  set of Images.  Caller handles memory management of the input frame.
  */
-list<pair<shared_ptr<Image>, int64_t>>
+list<pair<shared_ptr<const Image>, int64_t>>
 VideoFilterGraph::process (AVFrame* frame)
 {
-       list<pair<shared_ptr<Image>, int64_t>> images;
+       list<pair<shared_ptr<const Image>, int64_t>> images;
 
        if (_copy) {
-               images.push_back (make_pair(make_shared<Image>(frame), frame->best_effort_timestamp));
+               images.push_back (make_pair(make_shared<Image>(frame, Image::Alignment::PADDED), frame->best_effort_timestamp));
        } else {
                int r = av_buffersrc_write_frame (_buffer_src_context, frame);
                if (r < 0) {
@@ -70,7 +115,7 @@ VideoFilterGraph::process (AVFrame* frame)
                                break;
                        }
 
-                       images.push_back (make_pair(make_shared<Image>(_frame), _frame->best_effort_timestamp));
+                       images.push_back (make_pair(make_shared<Image>(_frame, Image::Alignment::PADDED), frame->best_effort_timestamp));
                        av_frame_unref (_frame);
                }
        }
@@ -105,15 +150,12 @@ VideoFilterGraph::src_parameters () const
 }
 
 
-void *
-VideoFilterGraph::sink_parameters () const
+void
+VideoFilterGraph::set_parameters (AVFilterContext* context) const
 {
-       auto sink_params = av_buffersink_params_alloc ();
-       auto pixel_fmts = new AVPixelFormat[2];
-       pixel_fmts[0] = _pixel_format;
-       pixel_fmts[1] = AV_PIX_FMT_NONE;
-       sink_params->pixel_fmts = pixel_fmts;
-       return sink_params;
+       AVPixelFormat pix_fmts[] = { _pixel_format, AV_PIX_FMT_NONE };
+       int r = av_opt_set_int_list (context, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
+       DCPOMATIC_ASSERT (r >= 0);
 }