*/
-#include "ffmpeg_image_proxy.h"
+
+#include "compose.hpp"
#include "cross.h"
-#include "exceptions.h"
+#include "dcpomatic_assert.h"
#include "dcpomatic_socket.h"
+#include "exceptions.h"
+#include "ffmpeg_image_proxy.h"
#include "image.h"
-#include "compose.hpp"
-#include "util.h"
+#include "memory_util.h"
+#include "video_filter_graph.h"
#include <dcp/raw_convert.h>
+#include <dcp/warnings.h>
+LIBDCP_DISABLE_WARNINGS
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
+#include <libavutil/pixdesc.h>
}
#include <libxml++/libxml++.h>
+LIBDCP_ENABLE_WARNINGS
#include <iostream>
#include "i18n.h"
-using std::string;
+
using std::cout;
-using std::pair;
-using std::min;
using std::make_pair;
-using boost::shared_ptr;
+using std::make_shared;
+using std::min;
+using std::pair;
+using std::shared_ptr;
+using std::string;
using boost::optional;
-using boost::dynamic_pointer_cast;
+using std::dynamic_pointer_cast;
using dcp::raw_convert;
+
FFmpegImageProxy::FFmpegImageProxy (boost::filesystem::path path)
: _data (path)
, _pos (0)
}
-FFmpegImageProxy::FFmpegImageProxy (dcp::Data data)
+FFmpegImageProxy::FFmpegImageProxy (dcp::ArrayData data)
: _data (data)
, _pos (0)
{
}
-FFmpegImageProxy::FFmpegImageProxy (shared_ptr<cxml::Node>, shared_ptr<Socket> socket)
+FFmpegImageProxy::FFmpegImageProxy (shared_ptr<Socket> socket)
: _pos (0)
{
uint32_t const size = socket->read_uint32 ();
- _data = dcp::Data (size);
- socket->read (_data.data().get(), size);
+ _data = dcp::ArrayData (size);
+ socket->read (_data.data(), size);
}
static int
int
FFmpegImageProxy::avio_read (uint8_t* buffer, int const amount)
{
- int const to_do = min(int64_t(amount), _data.size() - _pos);
+ int const to_do = min(static_cast<int64_t>(amount), static_cast<int64_t>(_data.size()) - _pos);
if (to_do == 0) {
return AVERROR_EOF;
}
- memcpy (buffer, _data.data().get() + _pos, to_do);
+ memcpy (buffer, _data.data() + _pos, to_do);
_pos += to_do;
return to_do;
}
return _pos;
}
-pair<shared_ptr<Image>, int>
-FFmpegImageProxy::image (optional<dcp::Size>) const
+
+ImageProxy::Result
+FFmpegImageProxy::image (Image::Alignment alignment, optional<dcp::Size>) const
{
+ auto constexpr name_for_errors = "FFmpegImageProxy::image";
+
boost::mutex::scoped_lock lm (_mutex);
if (_image) {
- return make_pair (_image, 0);
+ return Result (_image, 0);
}
uint8_t* avio_buffer = static_cast<uint8_t*> (wrapped_av_malloc(4096));
- AVIOContext* avio_context = avio_alloc_context (avio_buffer, 4096, 0, const_cast<FFmpegImageProxy*>(this), avio_read_wrapper, 0, avio_seek_wrapper);
+ auto avio_context = avio_alloc_context (avio_buffer, 4096, 0, const_cast<FFmpegImageProxy*>(this), avio_read_wrapper, 0, avio_seek_wrapper);
AVFormatContext* format_context = avformat_alloc_context ();
format_context->pb = avio_context;
- AVDictionary* options = 0;
+ AVDictionary* options = nullptr;
/* These durations are in microseconds, and represent how far into the content file
we will look for streams.
*/
directly from the file). This code just does enough to allow the
probe code to take a hint from "foo.tga" and so try targa format.
*/
- AVInputFormat* f = av_find_input_format ("image2");
+ auto f = av_find_input_format ("image2");
format_context = avformat_alloc_context ();
format_context->pb = avio_context;
format_context->iformat = f;
e = avformat_open_input (&format_context, "foo.tga", f, &options);
}
if (e < 0) {
- throw OpenFileError (_path->string(), e, true);
+ if (_path) {
+ throw OpenFileError (_path->string(), e, OpenFileError::READ);
+ } else {
+ boost::throw_exception(DecodeError(String::compose(_("Could not decode image (%1)"), e)));
+ }
}
- if (avformat_find_stream_info(format_context, 0) < 0) {
- throw DecodeError (_("could not find stream information"));
+ int r = avformat_find_stream_info(format_context, 0);
+ if (r < 0) {
+ throw DecodeError (N_("avcodec_find_stream_info"), name_for_errors, r, *_path);
}
DCPOMATIC_ASSERT (format_context->nb_streams == 1);
- AVFrame* frame = av_frame_alloc ();
+ auto frame = av_frame_alloc ();
if (!frame) {
- throw DecodeError (N_("could not allocate frame"));
+ std::bad_alloc ();
}
- AVCodecContext* codec_context = format_context->streams[0]->codec;
- AVCodec* codec = avcodec_find_decoder (codec_context->codec_id);
+ auto codec = avcodec_find_decoder (format_context->streams[0]->codecpar->codec_id);
DCPOMATIC_ASSERT (codec);
- if (avcodec_open2 (codec_context, codec, 0) < 0) {
- throw DecodeError (N_("could not open decoder"));
+ auto context = avcodec_alloc_context3 (codec);
+ if (!context) {
+ throw DecodeError (N_("avcodec_alloc_context3"), name_for_errors, *_path);
+ }
+
+ r = avcodec_open2 (context, codec, 0);
+ if (r < 0) {
+ throw DecodeError (N_("avcodec_open2"), name_for_errors, r, *_path);
}
AVPacket packet;
- int r = av_read_frame (format_context, &packet);
+ r = av_read_frame (format_context, &packet);
if (r < 0) {
- throw DecodeError (N_("could not read frame"));
+ throw DecodeError (N_("av_read_frame"), name_for_errors, r, *_path);
}
- int frame_finished;
- if (avcodec_decode_video2(codec_context, frame, &frame_finished, &packet) < 0 || !frame_finished) {
- throw DecodeError (N_("could not decode video"));
+ r = avcodec_send_packet (context, &packet);
+ if (r < 0) {
+ throw DecodeError (N_("avcodec_send_packet"), name_for_errors, r, *_path);
}
- _image.reset (new Image (frame));
+ r = avcodec_receive_frame (context, frame);
+ if (r < 0) {
+ throw DecodeError (N_("avcodec_receive_frame"), name_for_errors, r, *_path);
+ }
+
+ if (av_pix_fmt_desc_get(context->pix_fmt)->flags & AV_PIX_FMT_FLAG_ALPHA) {
+ /* XXX: this repeated setup of a the filter graph could be really slow
+ * (haven't measured it though).
+ */
+ VideoFilterGraph graph(dcp::Size(frame->width, frame->height), context->pix_fmt, dcp::Fraction(24, 1));
+ auto filter = Filter::from_id("premultiply");
+ DCPOMATIC_ASSERT(filter);
+ graph.setup({*filter});
+ auto images = graph.process(frame);
+ DCPOMATIC_ASSERT(images.size() == 1);
+ _image = images.front().first;
+ } else {
+ _image = make_shared<Image>(frame, alignment);
+ }
av_packet_unref (&packet);
av_frame_free (&frame);
- avcodec_close (codec_context);
+ avcodec_free_context (&context);
avformat_close_input (&format_context);
av_free (avio_context->buffer);
av_free (avio_context);
- return make_pair (_image, 0);
+ return Result (_image, 0);
}
+
void
FFmpegImageProxy::add_metadata (xmlpp::Node* node) const
{
}
void
-FFmpegImageProxy::send_binary (shared_ptr<Socket> socket) const
+FFmpegImageProxy::write_to_socket (shared_ptr<Socket> socket) const
{
socket->write (_data.size());
- socket->write (_data.data().get(), _data.size());
+ socket->write (_data.data(), _data.size());
}
bool
FFmpegImageProxy::same (shared_ptr<const ImageProxy> other) const
{
- shared_ptr<const FFmpegImageProxy> mp = dynamic_pointer_cast<const FFmpegImageProxy> (other);
+ auto mp = dynamic_pointer_cast<const FFmpegImageProxy>(other);
if (!mp) {
return false;
}
- if (_data.size() != mp->_data.size()) {
- return false;
- }
-
- return memcmp (_data.data().get(), mp->_data.data().get(), _data.size()) == 0;
+ return _data == mp->_data;
}
size_t