X-Git-Url: https://main.carlh.net/gitweb/?p=dcpomatic.git;a=blobdiff_plain;f=src%2Flib%2Fffmpeg.cc;h=503f8e51cf13b146d25d81d443688428a87e5f97;hp=af6c8e1674e66c12b01dcafbbc2f428ddc96d561;hb=fc1441eeaa3c0805c37809685ea7a3f5ca173666;hpb=4750382795aaf7964cfc4af9483d30dd4e565b4c diff --git a/src/lib/ffmpeg.cc b/src/lib/ffmpeg.cc index af6c8e167..503f8e51c 100644 --- a/src/lib/ffmpeg.cc +++ b/src/lib/ffmpeg.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2013-2016 Carl Hetherington + Copyright (C) 2013-2021 Carl Hetherington This file is part of DCP-o-matic. @@ -18,16 +18,19 @@ */ + #include "ffmpeg.h" #include "ffmpeg_content.h" #include "film.h" #include "exceptions.h" #include "util.h" #include "log.h" +#include "dcpomatic_log.h" #include "ffmpeg_subtitle_stream.h" #include "ffmpeg_audio_stream.h" #include "digester.h" #include "compose.hpp" +#include "config.h" #include extern "C" { #include @@ -35,58 +38,59 @@ extern "C" { #include } #include -#include #include #include "i18n.h" + using std::string; using std::cout; using std::cerr; using std::vector; -using boost::shared_ptr; +using std::shared_ptr; using boost::optional; using dcp::raw_convert; +using namespace dcpomatic; + boost::mutex FFmpeg::_mutex; -boost::weak_ptr FFmpeg::_ffmpeg_log; -FFmpeg::FFmpeg (boost::shared_ptr c) + +FFmpeg::FFmpeg (std::shared_ptr c) : _ffmpeg_content (c) - , _avio_buffer (0) - , _avio_buffer_size (4096) - , _avio_context (0) - , _format_context (0) - , _frame (0) { setup_general (); setup_decoders (); } + FFmpeg::~FFmpeg () { boost::mutex::scoped_lock lm (_mutex); - for (uint32_t i = 0; i < _format_context->nb_streams; ++i) { - avcodec_close (_format_context->streams[i]->codec); + for (auto& i: _codec_context) { + avcodec_free_context (&i); } av_frame_free (&_frame); avformat_close_input (&_format_context); } + static int avio_read_wrapper (void* data, uint8_t* buffer, int amount) { return reinterpret_cast(data)->avio_read (buffer, amount); } + static int64_t avio_seek_wrapper (void* data, int64_t offset, int whence) { return reinterpret_cast(data)->avio_seek (offset, whence); } + void FFmpeg::ffmpeg_log_callback (void* ptr, int level, const char* fmt, va_list vl) { @@ -97,43 +101,36 @@ FFmpeg::ffmpeg_log_callback (void* ptr, int level, const char* fmt, va_list vl) char line[1024]; static int prefix = 0; av_log_format_line (ptr, level, fmt, vl, line, sizeof (line), &prefix); - shared_ptr log = _ffmpeg_log.lock (); - if (log) { - string str (line); - boost::algorithm::trim (str); - log->log (String::compose ("FFmpeg: %1", str), LogEntry::TYPE_GENERAL); - } else { - cerr << line; - } + string str (line); + boost::algorithm::trim (str); + dcpomatic_log->log (String::compose ("FFmpeg: %1", str), LogEntry::TYPE_GENERAL); } + void FFmpeg::setup_general () { - av_register_all (); - /* This might not work too well in some cases of multiple FFmpeg decoders, but it's probably good enough. */ - _ffmpeg_log = _ffmpeg_content->film()->log (); av_log_set_callback (FFmpeg::ffmpeg_log_callback); _file_group.set_paths (_ffmpeg_content->paths ()); - _avio_buffer = static_cast (wrapped_av_malloc (_avio_buffer_size)); + _avio_buffer = static_cast (wrapped_av_malloc(_avio_buffer_size)); _avio_context = avio_alloc_context (_avio_buffer, _avio_buffer_size, 0, this, avio_read_wrapper, 0, avio_seek_wrapper); + if (!_avio_context) { + throw std::bad_alloc (); + } _format_context = avformat_alloc_context (); + if (!_format_context) { + throw std::bad_alloc (); + } _format_context->pb = _avio_context; - AVDictionary* options = 0; - /* These durations are in microseconds, and represent how far into the content file - we will look for streams. - */ - av_dict_set (&options, "analyzeduration", raw_convert (5 * 60 * 1000000).c_str(), 0); - av_dict_set (&options, "probesize", raw_convert (5 * 60 * 1000000).c_str(), 0); - + AVDictionary* options = nullptr; int e = avformat_open_input (&_format_context, 0, 0, &options); if (e < 0) { - throw OpenFileError (_ffmpeg_content->path(0).string(), e, true); + throw OpenFileError (_ffmpeg_content->path(0).string(), e, OpenFileError::READ); } if (avformat_find_stream_info (_format_context, 0) < 0) { @@ -145,8 +142,8 @@ FFmpeg::setup_general () optional video_stream_undefined_frame_rate; for (uint32_t i = 0; i < _format_context->nb_streams; ++i) { - AVStream* s = _format_context->streams[i]; - if (s->codec->codec_type == AVMEDIA_TYPE_VIDEO) { + auto s = _format_context->streams[i]; + if (s->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && avcodec_find_decoder(s->codecpar->codec_id)) { if (s->avg_frame_rate.num > 0 && s->avg_frame_rate.den > 0) { /* This is definitely our video stream */ _video_stream = i; @@ -164,6 +161,11 @@ FFmpeg::setup_general () _video_stream = video_stream_undefined_frame_rate.get(); } + /* Ignore video streams with crazy frame rates. These are usually things like album art on MP3s. */ + if (_video_stream && av_q2d(av_guess_frame_rate(_format_context, _format_context->streams[_video_stream.get()], 0)) > 1000) { + _video_stream = optional(); + } + /* Hack: if the AVStreams have duplicate IDs, replace them with our own. We use the IDs so that we can cope with VOBs, in which streams move about in index but remain with the same ID in different @@ -188,22 +190,35 @@ FFmpeg::setup_general () _frame = av_frame_alloc (); if (_frame == 0) { - throw DecodeError (N_("could not allocate frame")); + throw std::bad_alloc (); } } + void FFmpeg::setup_decoders () { boost::mutex::scoped_lock lm (_mutex); + _codec_context.resize (_format_context->nb_streams); for (uint32_t i = 0; i < _format_context->nb_streams; ++i) { - AVCodecContext* context = _format_context->streams[i]->codec; - - AVCodec* codec = avcodec_find_decoder (context->codec_id); + auto codec = avcodec_find_decoder (_format_context->streams[i]->codecpar->codec_id); if (codec) { + auto context = avcodec_alloc_context3 (codec); + if (!context) { + throw std::bad_alloc (); + } + _codec_context[i] = context; - AVDictionary* options = 0; + int r = avcodec_parameters_to_context (context, _format_context->streams[i]->codecpar); + if (r < 0) { + throw DecodeError ("avcodec_parameters_to_context", "FFmpeg::setup_decoders", r); + } + + context->thread_count = 8; + context->thread_type = FF_THREAD_FRAME | FF_THREAD_SLICE; + + AVDictionary* options = nullptr; /* This option disables decoding of DCA frame footers in our patched version of FFmpeg. I believe these footers are of no use to us, and they can cause problems when FFmpeg fails to decode them (mantis #352). @@ -213,42 +228,50 @@ FFmpeg::setup_decoders () https://trac.ffmpeg.org/ticket/5681 */ av_dict_set_int (&options, "strict", FF_COMPLIANCE_EXPERIMENTAL, 0); + /* Enable following of links in files */ + av_dict_set_int (&options, "enable_drefs", 1, 0); - if (avcodec_open2 (context, codec, &options) < 0) { - throw DecodeError (N_("could not open decoder")); + r = avcodec_open2 (context, codec, &options); + if (r < 0) { + throw DecodeError (N_("avcodec_open2"), N_("FFmpeg::setup_decoders"), r); } + } else { + dcpomatic_log->log (String::compose ("No codec found for stream %1", i), LogEntry::TYPE_WARNING); } - - /* We are silently ignoring any failures to find suitable decoders here */ } } + AVCodecContext * FFmpeg::video_codec_context () const { if (!_video_stream) { - return 0; + return nullptr; } - return _format_context->streams[_video_stream.get()]->codec; + return _codec_context[_video_stream.get()]; } + AVCodecContext * FFmpeg::subtitle_codec_context () const { - if (!_ffmpeg_content->subtitle_stream ()) { - return 0; + auto str = _ffmpeg_content->subtitle_stream(); + if (!str) { + return nullptr; } - return _ffmpeg_content->subtitle_stream()->stream(_format_context)->codec; + return _codec_context[str->index(_format_context)]; } + int FFmpeg::avio_read (uint8_t* buffer, int const amount) { return _file_group.read (buffer, amount); } + int64_t FFmpeg::avio_seek (int64_t const pos, int whence) { @@ -259,12 +282,13 @@ FFmpeg::avio_seek (int64_t const pos, int whence) return _file_group.seek (pos, whence); } + FFmpegSubtitlePeriod -FFmpeg::subtitle_period (AVSubtitle const & sub) +FFmpeg::subtitle_period (AVPacket const* packet, AVStream const* stream, AVSubtitle const & sub) { - ContentTime const packet_time = ContentTime::from_seconds (static_cast (sub.pts) / AV_TIME_BASE); + auto const packet_time = ContentTime::from_seconds (packet->pts * av_q2d(stream->time_base)); - if (sub.end_display_time == static_cast (-1)) { + if (sub.end_display_time == 0 || sub.end_display_time == static_cast(-1)) { /* End time is not known */ return FFmpegSubtitlePeriod (packet_time + ContentTime::from_seconds (sub.start_display_time / 1e3)); } @@ -275,64 +299,6 @@ FFmpeg::subtitle_period (AVSubtitle const & sub) ); } -string -FFmpeg::subtitle_id (AVSubtitle const & sub) -{ - Digester digester; - digester.add (sub.pts); - for (unsigned int i = 0; i < sub.num_rects; ++i) { - AVSubtitleRect* rect = sub.rects[i]; - if (rect->type == SUBTITLE_BITMAP) { - digester.add (rect->x); - digester.add (rect->y); - digester.add (rect->w); - digester.add (rect->h); -#ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT - int const line = rect->pict.linesize[0]; - for (int j = 0; j < rect->h; ++j) { - digester.add (rect->pict.data[0] + j * line, line); - } -#else - int const line = rect->linesize[0]; - for (int j = 0; j < rect->h; ++j) { - digester.add (rect->data[0] + j * line, line); - } -#endif - } else if (rect->type == SUBTITLE_TEXT) { - digester.add (string (rect->text)); - } else if (rect->type == SUBTITLE_ASS) { - digester.add (string (rect->ass)); - } - } - return digester.get (); -} - -/** @return true if sub starts a new image subtitle */ -bool -FFmpeg::subtitle_starts_image (AVSubtitle const & sub) -{ - bool image = false; - bool text = false; - - for (unsigned int i = 0; i < sub.num_rects; ++i) { - switch (sub.rects[i]->type) { - case SUBTITLE_BITMAP: - image = true; - break; - case SUBTITLE_TEXT: - case SUBTITLE_ASS: - text = true; - break; - default: - break; - } - } - - /* We can't cope with mixed image/text in one AVSubtitle */ - DCPOMATIC_ASSERT (!image || !text); - - return image; -} /** Compute the pts offset to use given a set of audio streams and some video details. * Sometimes these parameters will have just been determined by an Examiner, sometimes @@ -340,7 +306,7 @@ FFmpeg::subtitle_starts_image (AVSubtitle const & sub) * in FFmpeg. */ ContentTime -FFmpeg::pts_offset (vector > audio_streams, optional first_video, double video_frame_rate) const +FFmpeg::pts_offset (vector> audio_streams, optional first_video, double video_frame_rate) const { /* Audio and video frame PTS values may not start with 0. We want to fiddle them so that: @@ -358,13 +324,13 @@ FFmpeg::pts_offset (vector > audio_streams, option /* First, make one of them start at 0 */ - ContentTime po = ContentTime::min (); + auto po = ContentTime::min (); if (first_video) { po = - first_video.get (); } - BOOST_FOREACH (shared_ptr i, audio_streams) { + for (auto i: audio_streams) { if (i->first_audio) { po = max (po, - i->first_audio.get ()); } @@ -380,8 +346,8 @@ FFmpeg::pts_offset (vector > audio_streams, option /* Now adjust so that the video pts starts on a frame */ if (first_video) { - ContentTime const fvc = first_video.get() + po; - po += fvc.round_up (video_frame_rate) - fvc; + auto const fvc = first_video.get() + po; + po += fvc.ceil (video_frame_rate) - fvc; } return po;