/*
- Copyright (C) 2013-2015 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
- This program is free software; you can redistribute it and/or modify
+ This file is part of DCP-o-matic.
+
+ DCP-o-matic is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
- This program is distributed in the hope that it will be useful,
+ DCP-o-matic is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
*/
+
#include "ffmpeg.h"
#include "ffmpeg_content.h"
#include "film.h"
#include "exceptions.h"
#include "util.h"
-#include "raw_convert.h"
#include "log.h"
+#include "dcpomatic_log.h"
#include "ffmpeg_subtitle_stream.h"
#include "ffmpeg_audio_stream.h"
-#include "md5_digester.h"
+#include "digester.h"
#include "compose.hpp"
+#include "config.h"
+#include <dcp/raw_convert.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
}
#include <boost/algorithm/string.hpp>
-#include <boost/foreach.hpp>
#include <iostream>
#include "i18n.h"
+
using std::string;
using std::cout;
using std::cerr;
using std::vector;
-using boost::shared_ptr;
+using std::shared_ptr;
using boost::optional;
+using dcp::raw_convert;
+using namespace dcpomatic;
+
boost::mutex FFmpeg::_mutex;
-boost::weak_ptr<Log> FFmpeg::_ffmpeg_log;
-FFmpeg::FFmpeg (boost::shared_ptr<const FFmpegContent> c)
+
+FFmpeg::FFmpeg (std::shared_ptr<const FFmpegContent> c)
: _ffmpeg_content (c)
- , _avio_buffer (0)
- , _avio_buffer_size (4096)
- , _avio_context (0)
- , _format_context (0)
- , _frame (0)
- , _video_stream (-1)
{
setup_general ();
setup_decoders ();
}
+
FFmpeg::~FFmpeg ()
{
boost::mutex::scoped_lock lm (_mutex);
- for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
- avcodec_close (_format_context->streams[i]->codec);
+ for (auto& i: _codec_context) {
+ avcodec_free_context (&i);
}
av_frame_free (&_frame);
avformat_close_input (&_format_context);
}
+
static int
avio_read_wrapper (void* data, uint8_t* buffer, int amount)
{
return reinterpret_cast<FFmpeg*>(data)->avio_read (buffer, amount);
}
+
static int64_t
avio_seek_wrapper (void* data, int64_t offset, int whence)
{
return reinterpret_cast<FFmpeg*>(data)->avio_seek (offset, whence);
}
+
void
FFmpeg::ffmpeg_log_callback (void* ptr, int level, const char* fmt, va_list vl)
{
char line[1024];
static int prefix = 0;
av_log_format_line (ptr, level, fmt, vl, line, sizeof (line), &prefix);
- shared_ptr<Log> log = _ffmpeg_log.lock ();
- if (log) {
- string str (line);
- boost::algorithm::trim (str);
- log->log (String::compose ("FFmpeg: %1", str), LogEntry::TYPE_GENERAL);
- } else {
- cerr << line;
- }
+ string str (line);
+ boost::algorithm::trim (str);
+ dcpomatic_log->log (String::compose ("FFmpeg: %1", str), LogEntry::TYPE_GENERAL);
}
+
void
FFmpeg::setup_general ()
{
- av_register_all ();
-
/* This might not work too well in some cases of multiple FFmpeg decoders,
but it's probably good enough.
*/
- _ffmpeg_log = _ffmpeg_content->film()->log ();
av_log_set_callback (FFmpeg::ffmpeg_log_callback);
_file_group.set_paths (_ffmpeg_content->paths ());
- _avio_buffer = static_cast<uint8_t*> (wrapped_av_malloc (_avio_buffer_size));
+ _avio_buffer = static_cast<uint8_t*> (wrapped_av_malloc(_avio_buffer_size));
_avio_context = avio_alloc_context (_avio_buffer, _avio_buffer_size, 0, this, avio_read_wrapper, 0, avio_seek_wrapper);
+ if (!_avio_context) {
+ throw std::bad_alloc ();
+ }
_format_context = avformat_alloc_context ();
+ if (!_format_context) {
+ throw std::bad_alloc ();
+ }
_format_context->pb = _avio_context;
- AVDictionary* options = 0;
- /* These durations are in microseconds, and represent how far into the content file
- we will look for streams.
- */
- av_dict_set (&options, "analyzeduration", raw_convert<string> (5 * 60 * 1000000).c_str(), 0);
- av_dict_set (&options, "probesize", raw_convert<string> (5 * 60 * 1000000).c_str(), 0);
-
- if (avformat_open_input (&_format_context, 0, 0, &options) < 0) {
- throw OpenFileError (_ffmpeg_content->path(0).string ());
+ AVDictionary* options = nullptr;
+ int e = avformat_open_input (&_format_context, 0, 0, &options);
+ if (e < 0) {
+ throw OpenFileError (_ffmpeg_content->path(0).string(), e, OpenFileError::READ);
}
if (avformat_find_stream_info (_format_context, 0) < 0) {
/* Find video stream */
- int video_stream_undefined_frame_rate = -1;
+ optional<int> video_stream_undefined_frame_rate;
for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
- AVStream* s = _format_context->streams[i];
- if (s->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
+ auto s = _format_context->streams[i];
+ if (s->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && avcodec_find_decoder(s->codecpar->codec_id)) {
if (s->avg_frame_rate.num > 0 && s->avg_frame_rate.den > 0) {
/* This is definitely our video stream */
_video_stream = i;
/* Files from iTunes sometimes have two video streams, one with the avg_frame_rate.num and .den set
to zero. Only use such a stream if there is no alternative.
*/
- if (_video_stream == -1 && video_stream_undefined_frame_rate != -1) {
- _video_stream = video_stream_undefined_frame_rate;
+ if (!_video_stream && video_stream_undefined_frame_rate) {
+ _video_stream = video_stream_undefined_frame_rate.get();
}
- if (_video_stream < 0) {
- throw DecodeError (N_("could not find video stream"));
+ /* Ignore video streams with crazy frame rates. These are usually things like album art on MP3s. */
+ if (_video_stream && av_q2d(av_guess_frame_rate(_format_context, _format_context->streams[_video_stream.get()], 0)) > 1000) {
+ _video_stream = optional<int>();
}
/* Hack: if the AVStreams have duplicate IDs, replace them with our
_frame = av_frame_alloc ();
if (_frame == 0) {
- throw DecodeError (N_("could not allocate frame"));
+ throw std::bad_alloc ();
}
}
+
void
FFmpeg::setup_decoders ()
{
boost::mutex::scoped_lock lm (_mutex);
+ _codec_context.resize (_format_context->nb_streams);
for (uint32_t i = 0; i < _format_context->nb_streams; ++i) {
- AVCodecContext* context = _format_context->streams[i]->codec;
-
- AVCodec* codec = avcodec_find_decoder (context->codec_id);
+ auto codec = avcodec_find_decoder (_format_context->streams[i]->codecpar->codec_id);
if (codec) {
+ auto context = avcodec_alloc_context3 (codec);
+ if (!context) {
+ throw std::bad_alloc ();
+ }
+ _codec_context[i] = context;
+
+ int r = avcodec_parameters_to_context (context, _format_context->streams[i]->codecpar);
+ if (r < 0) {
+ throw DecodeError ("avcodec_parameters_to_context", "FFmpeg::setup_decoders", r);
+ }
+ context->thread_count = 8;
+ context->thread_type = FF_THREAD_FRAME | FF_THREAD_SLICE;
+
+ AVDictionary* options = nullptr;
/* This option disables decoding of DCA frame footers in our patched version
of FFmpeg. I believe these footers are of no use to us, and they can cause
problems when FFmpeg fails to decode them (mantis #352).
*/
- AVDictionary* options = 0;
av_dict_set (&options, "disable_footer", "1", 0);
+ /* This allows decoding of some DNxHR 444 and HQX files; see
+ https://trac.ffmpeg.org/ticket/5681
+ */
+ av_dict_set_int (&options, "strict", FF_COMPLIANCE_EXPERIMENTAL, 0);
+ /* Enable following of links in files */
+ av_dict_set_int (&options, "enable_drefs", 1, 0);
- if (avcodec_open2 (context, codec, &options) < 0) {
- throw DecodeError (N_("could not open decoder"));
+ r = avcodec_open2 (context, codec, &options);
+ if (r < 0) {
+ throw DecodeError (N_("avcodec_open2"), N_("FFmpeg::setup_decoders"), r);
}
+ } else {
+ dcpomatic_log->log (String::compose ("No codec found for stream %1", i), LogEntry::TYPE_WARNING);
}
-
- /* We are silently ignoring any failures to find suitable decoders here */
}
}
+
AVCodecContext *
FFmpeg::video_codec_context () const
{
- return _format_context->streams[_video_stream]->codec;
+ if (!_video_stream) {
+ return nullptr;
+ }
+
+ return _codec_context[_video_stream.get()];
}
+
AVCodecContext *
FFmpeg::subtitle_codec_context () const
{
- if (!_ffmpeg_content->subtitle_stream ()) {
- return 0;
+ auto str = _ffmpeg_content->subtitle_stream();
+ if (!str) {
+ return nullptr;
}
- return _ffmpeg_content->subtitle_stream()->stream(_format_context)->codec;
+ return _codec_context[str->index(_format_context)];
}
+
int
FFmpeg::avio_read (uint8_t* buffer, int const amount)
{
return _file_group.read (buffer, amount);
}
+
int64_t
FFmpeg::avio_seek (int64_t const pos, int whence)
{
return _file_group.seek (pos, whence);
}
+
FFmpegSubtitlePeriod
-FFmpeg::subtitle_period (AVSubtitle const & sub)
+FFmpeg::subtitle_period (AVPacket const* packet, AVStream const* stream, AVSubtitle const & sub)
{
- ContentTime const packet_time = ContentTime::from_seconds (static_cast<double> (sub.pts) / AV_TIME_BASE);
+ auto const packet_time = ContentTime::from_seconds (packet->pts * av_q2d(stream->time_base));
- if (sub.end_display_time == static_cast<uint32_t> (-1)) {
+ if (sub.end_display_time == 0 || sub.end_display_time == static_cast<uint32_t>(-1)) {
/* End time is not known */
return FFmpegSubtitlePeriod (packet_time + ContentTime::from_seconds (sub.start_display_time / 1e3));
}
);
}
-string
-FFmpeg::subtitle_id (AVSubtitle const & sub)
-{
- MD5Digester digester;
- digester.add (sub.pts);
- for (unsigned int i = 0; i < sub.num_rects; ++i) {
- AVSubtitleRect* rect = sub.rects[i];
- digester.add (rect->x);
- digester.add (rect->y);
- digester.add (rect->w);
- digester.add (rect->h);
- int const line = rect->pict.linesize[0];
- for (int j = 0; j < rect->h; ++j) {
- digester.add (rect->pict.data[0] + j * line, line);
- }
- }
- return digester.get ();
-}
-
-bool
-FFmpeg::subtitle_is_image (AVSubtitle const & sub)
-{
- bool image = false;
- bool text = false;
-
- for (unsigned int i = 0; i < sub.num_rects; ++i) {
- switch (sub.rects[i]->type) {
- case SUBTITLE_BITMAP:
- image = true;
- break;
- case SUBTITLE_TEXT:
- case SUBTITLE_ASS:
- text = true;
- break;
- default:
- break;
- }
- }
-
- /* We can't cope with mixed image/text in one AVSubtitle */
- DCPOMATIC_ASSERT (!image || !text);
-
- return image;
-}
/** Compute the pts offset to use given a set of audio streams and some video details.
* Sometimes these parameters will have just been determined by an Examiner, sometimes
* in FFmpeg.
*/
ContentTime
-FFmpeg::pts_offset (vector<shared_ptr<FFmpegAudioStream> > audio_streams, optional<ContentTime> first_video, double video_frame_rate) const
+FFmpeg::pts_offset (vector<shared_ptr<FFmpegAudioStream>> audio_streams, optional<ContentTime> first_video, double video_frame_rate) const
{
/* Audio and video frame PTS values may not start with 0. We want
to fiddle them so that:
/* First, make one of them start at 0 */
- ContentTime po = ContentTime::min ();
+ auto po = ContentTime::min ();
if (first_video) {
po = - first_video.get ();
}
- BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, audio_streams) {
+ for (auto i: audio_streams) {
if (i->first_audio) {
po = max (po, - i->first_audio.get ());
}
/* Now adjust so that the video pts starts on a frame */
if (first_video) {
- ContentTime const fvc = first_video.get() + po;
- po += fvc.round_up (video_frame_rate) - fvc;
+ auto const fvc = first_video.get() + po;
+ po += fvc.ceil (video_frame_rate) - fvc;
}
return po;