2 Copyright (C) 2012-2016 Carl Hetherington <cth@carlh.net>
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 /** @file src/ffmpeg_decoder.cc
21 * @brief A decoder using FFmpeg to decode content.
25 #include "exceptions.h"
29 #include "ffmpeg_decoder.h"
30 #include "subtitle_decoder.h"
31 #include "ffmpeg_audio_stream.h"
32 #include "ffmpeg_subtitle_stream.h"
33 #include "video_filter_graph.h"
34 #include "audio_buffers.h"
35 #include "ffmpeg_content.h"
36 #include "raw_image_proxy.h"
37 #include "video_decoder.h"
39 #include "md5_digester.h"
40 #include "audio_decoder.h"
41 #include "compose.hpp"
42 #include <dcp/subtitle_string.h>
43 #include <sub/ssa_reader.h>
44 #include <sub/subtitle.h>
45 #include <sub/collect.h>
47 #include <libavcodec/avcodec.h>
48 #include <libavformat/avformat.h>
50 #include <boost/foreach.hpp>
51 #include <boost/algorithm/string.hpp>
59 #define LOG_GENERAL(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
60 #define LOG_ERROR(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_ERROR);
61 #define LOG_WARNING_NC(...) _log->log (__VA_ARGS__, LogEntry::TYPE_WARNING);
62 #define LOG_WARNING(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_WARNING);
72 using boost::shared_ptr;
73 using boost::is_any_of;
77 FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log> log, bool fast)
82 video.reset (new VideoDecoder (this, c, log));
83 _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate());
85 _pts_offset = ContentTime ();
89 audio.reset (new AudioDecoder (this, c->audio, fast, log));
97 bind (&FFmpegDecoder::image_subtitles_during, this, _1, _2),
98 bind (&FFmpegDecoder::text_subtitles_during, this, _1, _2)
105 FFmpegDecoder::flush ()
107 /* Get any remaining frames */
112 /* XXX: should we reset _packet.data and size after each *_decode_* call? */
114 while (decode_video_packet ()) {}
116 decode_audio_packet ();
121 FFmpegDecoder::pass (PassReason reason, bool accurate)
123 int r = av_read_frame (_format_context, &_packet);
125 /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
126 has pretty-much succeeded (and hence generated data which should be processed).
127 Hence it makes sense to continue here in that case.
129 if (r < 0 && r != AVERROR_INVALIDDATA) {
130 if (r != AVERROR_EOF) {
131 /* Maybe we should fail here, but for now we'll just finish off instead */
133 av_strerror (r, buf, sizeof(buf));
134 LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), buf, r);
141 int const si = _packet.stream_index;
142 shared_ptr<const FFmpegContent> fc = _ffmpeg_content;
144 if (_video_stream && si == _video_stream.get() && !video->ignore_video() && (accurate || reason != PASS_REASON_SUBTITLE)) {
145 decode_video_packet ();
146 } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index (_format_context, si)) {
147 decode_subtitle_packet ();
148 } else if (accurate || reason != PASS_REASON_SUBTITLE) {
149 decode_audio_packet ();
152 av_packet_unref (&_packet);
156 /** @param data pointer to array of pointers to buffers.
157 * Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
159 shared_ptr<AudioBuffers>
160 FFmpegDecoder::deinterleave_audio (shared_ptr<FFmpegAudioStream> stream) const
162 DCPOMATIC_ASSERT (bytes_per_audio_sample (stream));
164 int const size = av_samples_get_buffer_size (
165 0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1
168 /* Deinterleave and convert to float */
170 /* total_samples and frames will be rounded down here, so if there are stray samples at the end
171 of the block that do not form a complete sample or frame they will be dropped.
173 int const total_samples = size / bytes_per_audio_sample (stream);
174 int const frames = total_samples / stream->channels();
175 shared_ptr<AudioBuffers> audio (new AudioBuffers (stream->channels(), frames));
177 switch (audio_sample_format (stream)) {
178 case AV_SAMPLE_FMT_U8:
180 uint8_t* p = reinterpret_cast<uint8_t *> (_frame->data[0]);
183 for (int i = 0; i < total_samples; ++i) {
184 audio->data(channel)[sample] = float(*p++) / (1 << 23);
187 if (channel == stream->channels()) {
195 case AV_SAMPLE_FMT_S16:
197 int16_t* p = reinterpret_cast<int16_t *> (_frame->data[0]);
200 for (int i = 0; i < total_samples; ++i) {
201 audio->data(channel)[sample] = float(*p++) / (1 << 15);
204 if (channel == stream->channels()) {
212 case AV_SAMPLE_FMT_S16P:
214 int16_t** p = reinterpret_cast<int16_t **> (_frame->data);
215 for (int i = 0; i < stream->channels(); ++i) {
216 for (int j = 0; j < frames; ++j) {
217 audio->data(i)[j] = static_cast<float>(p[i][j]) / (1 << 15);
223 case AV_SAMPLE_FMT_S32:
225 int32_t* p = reinterpret_cast<int32_t *> (_frame->data[0]);
228 for (int i = 0; i < total_samples; ++i) {
229 audio->data(channel)[sample] = static_cast<float>(*p++) / (1 << 31);
232 if (channel == stream->channels()) {
240 case AV_SAMPLE_FMT_S32P:
242 int32_t** p = reinterpret_cast<int32_t **> (_frame->data);
243 for (int i = 0; i < stream->channels(); ++i) {
244 for (int j = 0; j < frames; ++j) {
245 audio->data(i)[j] = static_cast<float>(p[i][j]) / (1 << 31);
251 case AV_SAMPLE_FMT_FLT:
253 float* p = reinterpret_cast<float*> (_frame->data[0]);
256 for (int i = 0; i < total_samples; ++i) {
257 audio->data(channel)[sample] = *p++;
260 if (channel == stream->channels()) {
268 case AV_SAMPLE_FMT_FLTP:
270 float** p = reinterpret_cast<float**> (_frame->data);
271 /* Sometimes there aren't as many channels in the _frame as in the stream */
272 for (int i = 0; i < _frame->channels; ++i) {
273 memcpy (audio->data(i), p[i], frames * sizeof(float));
275 for (int i = _frame->channels; i < stream->channels(); ++i) {
276 audio->make_silent (i);
282 throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast<int> (audio_sample_format (stream))));
289 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
291 return stream->stream (_format_context)->codec->sample_fmt;
295 FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
297 return av_get_bytes_per_sample (audio_sample_format (stream));
301 FFmpegDecoder::seek (ContentTime time, bool accurate)
303 video->seek (time, accurate);
304 audio->seek (time, accurate);
305 subtitle->seek (time, accurate);
307 /* If we are doing an `accurate' seek, we need to use pre-roll, as
308 we don't really know what the seek will give us.
311 ContentTime pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
314 /* XXX: it seems debatable whether PTS should be used here...
315 http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
318 DCPOMATIC_ASSERT (_video_stream);
320 ContentTime u = time - _pts_offset;
321 if (u < ContentTime ()) {
327 u.seconds() / av_q2d (_format_context->streams[_video_stream.get()]->time_base),
331 avcodec_flush_buffers (video_codec_context());
333 /* XXX: should be flushing audio buffers? */
335 if (subtitle_codec_context ()) {
336 avcodec_flush_buffers (subtitle_codec_context ());
341 FFmpegDecoder::decode_audio_packet ()
343 /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4
347 AVPacket copy_packet = _packet;
349 /* XXX: inefficient */
350 vector<shared_ptr<FFmpegAudioStream> > streams = ffmpeg_content()->ffmpeg_audio_streams ();
351 vector<shared_ptr<FFmpegAudioStream> >::const_iterator stream = streams.begin ();
352 while (stream != streams.end () && !(*stream)->uses_index (_format_context, copy_packet.stream_index)) {
356 if (stream == streams.end ()) {
357 /* The packet's stream may not be an audio one; just ignore it in this method if so */
361 while (copy_packet.size > 0) {
364 int decode_result = avcodec_decode_audio4 ((*stream)->stream (_format_context)->codec, _frame, &frame_finished, ©_packet);
365 if (decode_result < 0) {
366 /* avcodec_decode_audio4 can sometimes return an error even though it has decoded
367 some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA
368 if it overreads the auxiliary data. ffplay carries on if frame_finished is true,
369 even in the face of such an error, so I think we should too.
371 Returning from the method here caused mantis #352.
373 LOG_WARNING ("avcodec_decode_audio4 failed (%1)", decode_result);
375 /* Fudge decode_result so that we come out of the while loop when
376 we've processed this data.
378 decode_result = copy_packet.size;
381 if (frame_finished) {
382 ContentTime ct = ContentTime::from_seconds (
383 av_frame_get_best_effort_timestamp (_frame) *
384 av_q2d ((*stream)->stream (_format_context)->time_base))
387 shared_ptr<AudioBuffers> data = deinterleave_audio (*stream);
389 if (ct < ContentTime ()) {
390 /* Discard audio data that comes before time 0 */
391 Frame const remove = min (int64_t (data->frames()), (-ct).frames_ceil(double((*stream)->frame_rate ())));
392 data->move (remove, 0, data->frames() - remove);
393 data->set_frames (data->frames() - remove);
394 ct += ContentTime::from_frames (remove, (*stream)->frame_rate ());
397 if (data->frames() > 0) {
398 audio->audio (*stream, data, ct);
402 copy_packet.data += decode_result;
403 copy_packet.size -= decode_result;
408 FFmpegDecoder::decode_video_packet ()
410 DCPOMATIC_ASSERT (_video_stream);
413 if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) {
417 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
419 shared_ptr<VideoFilterGraph> graph;
421 list<shared_ptr<VideoFilterGraph> >::iterator i = _filter_graphs.begin();
422 while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
426 if (i == _filter_graphs.end ()) {
427 graph.reset (new VideoFilterGraph (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format));
428 graph->setup (_ffmpeg_content->filters ());
429 _filter_graphs.push_back (graph);
430 LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
435 list<pair<shared_ptr<Image>, int64_t> > images = graph->process (_frame);
437 for (list<pair<shared_ptr<Image>, int64_t> >::iterator i = images.begin(); i != images.end(); ++i) {
439 shared_ptr<Image> image = i->first;
441 if (i->second != AV_NOPTS_VALUE) {
442 double const pts = i->second * av_q2d (_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds ();
444 shared_ptr<ImageProxy> (new RawImageProxy (image)),
445 llrint (pts * _ffmpeg_content->active_video_frame_rate ())
448 LOG_WARNING_NC ("Dropping frame without PTS");
456 FFmpegDecoder::decode_subtitle_packet ()
460 if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, &_packet) < 0 || !got_subtitle) {
464 if (sub.num_rects <= 0) {
465 /* Sometimes we get an empty AVSubtitle, which is used by some codecs to
466 indicate that the previous subtitle should stop. We can ignore it here.
471 /* Subtitle PTS (within the source, not taking into account any of the
472 source that we may have chopped off for the DCP).
474 FFmpegSubtitlePeriod sub_period = subtitle_period (sub);
475 ContentTimePeriod period;
476 period.from = sub_period.from + _pts_offset;
478 /* We already know the subtitle period `to' time */
479 period.to = sub_period.to.get() + _pts_offset;
481 /* We have to look up the `to' time in the stream's records */
482 period.to = ffmpeg_content()->subtitle_stream()->find_subtitle_to (subtitle_id (sub));
485 for (unsigned int i = 0; i < sub.num_rects; ++i) {
486 AVSubtitleRect const * rect = sub.rects[i];
488 switch (rect->type) {
491 case SUBTITLE_BITMAP:
492 decode_bitmap_subtitle (rect, period);
495 cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
498 decode_ass_subtitle (rect->ass, period);
503 avsubtitle_free (&sub);
506 list<ContentTimePeriod>
507 FFmpegDecoder::image_subtitles_during (ContentTimePeriod p, bool starting) const
509 return _ffmpeg_content->image_subtitles_during (p, starting);
512 list<ContentTimePeriod>
513 FFmpegDecoder::text_subtitles_during (ContentTimePeriod p, bool starting) const
515 return _ffmpeg_content->text_subtitles_during (p, starting);
519 FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTimePeriod period)
521 /* Note RGBA is expressed little-endian, so the first byte in the word is R, second
522 G, third B, fourth A.
524 shared_ptr<Image> image (new Image (AV_PIX_FMT_RGBA, dcp::Size (rect->w, rect->h), true));
526 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
527 /* Start of the first line in the subtitle */
528 uint8_t* sub_p = rect->pict.data[0];
529 /* sub_p looks up into a BGRA palette which is here
530 (i.e. first byte B, second G, third R, fourth A)
532 uint32_t const * palette = (uint32_t *) rect->pict.data[1];
534 /* Start of the first line in the subtitle */
535 uint8_t* sub_p = rect->data[0];
536 /* sub_p looks up into a BGRA palette which is here
537 (i.e. first byte B, second G, third R, fourth A)
539 uint32_t const * palette = (uint32_t *) rect->data[1];
541 /* And the stream has a map of those palette colours to colours
542 chosen by the user; created a `mapped' palette from those settings.
544 map<RGBA, RGBA> colour_map = ffmpeg_content()->subtitle_stream()->colours ();
545 vector<RGBA> mapped_palette (rect->nb_colors);
546 for (int i = 0; i < rect->nb_colors; ++i) {
547 RGBA c ((palette[i] & 0xff0000) >> 16, (palette[i] & 0xff00) >> 8, palette[i] & 0xff, (palette[i] & 0xff000000) >> 24);
548 map<RGBA, RGBA>::const_iterator j = colour_map.find (c);
549 if (j != colour_map.end ()) {
550 mapped_palette[i] = j->second;
552 /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because
553 it is from a project that was created before this stuff was added. Just use the
554 colour straight from the original palette.
556 mapped_palette[i] = c;
560 /* Start of the output data */
561 uint32_t* out_p = (uint32_t *) image->data()[0];
563 for (int y = 0; y < rect->h; ++y) {
564 uint8_t* sub_line_p = sub_p;
565 uint32_t* out_line_p = out_p;
566 for (int x = 0; x < rect->w; ++x) {
567 RGBA const p = mapped_palette[*sub_line_p++];
568 /* XXX: this seems to be wrong to me (isn't the output image RGBA?) but it looks right on screen */
569 *out_line_p++ = (p.a << 24) | (p.r << 16) | (p.g << 8) | p.b;
571 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
572 sub_p += rect->pict.linesize[0];
574 sub_p += rect->linesize[0];
576 out_p += image->stride()[0] / sizeof (uint32_t);
579 dcp::Size const vs = _ffmpeg_content->video->size ();
580 dcpomatic::Rect<double> const scaled_rect (
581 static_cast<double> (rect->x) / vs.width,
582 static_cast<double> (rect->y) / vs.height,
583 static_cast<double> (rect->w) / vs.width,
584 static_cast<double> (rect->h) / vs.height
587 subtitle->image_subtitle (period, image, scaled_rect);
591 FFmpegDecoder::decode_ass_subtitle (string ass, ContentTimePeriod period)
593 /* We have no styles and no Format: line, so I'm assuming that FFmpeg
594 produces a single format of Dialogue: lines...
598 split (bits, ass, is_any_of (","));
599 if (bits.size() < 10) {
603 sub::RawSubtitle base;
604 list<sub::RawSubtitle> raw = sub::SSAReader::parse_line (base, bits[9]);
605 list<sub::Subtitle> subs = sub::collect<list<sub::Subtitle> > (raw);
607 /* XXX: lots of this is copied from TextSubtitle; there should probably be some sharing */
609 /* Highest line index in this subtitle */
611 BOOST_FOREACH (sub::Subtitle i, subs) {
612 BOOST_FOREACH (sub::Line j, i.lines) {
613 DCPOMATIC_ASSERT (j.vertical_position.reference && j.vertical_position.reference.get() == sub::TOP_OF_SUBTITLE);
614 DCPOMATIC_ASSERT (j.vertical_position.line);
615 highest = max (highest, j.vertical_position.line.get());
619 list<dcp::SubtitleString> ss;
621 BOOST_FOREACH (sub::Subtitle i, sub::collect<list<sub::Subtitle> > (sub::SSAReader::parse_line (base, bits[9]))) {
622 BOOST_FOREACH (sub::Line j, i.lines) {
623 BOOST_FOREACH (sub::Block k, j.blocks) {
625 dcp::SubtitleString (
626 boost::optional<string> (),
629 dcp::Colour (255, 255, 255),
630 /* 48pt is 1/22nd of the screen height */
633 dcp::Time (i.from.seconds(), 1000),
634 dcp::Time (i.to.seconds(), 1000),
637 /* This 1.015 is an arbitrary value to lift the bottom sub off the bottom
638 of the screen a bit to a pleasing degree.
640 1.015 - ((1 + highest - j.vertical_position.line.get()) * 1.5 / 22),
644 static_cast<dcp::Effect> (0),
645 dcp::Colour (255, 255, 255),
654 subtitle->text_subtitle (period, ss);