2 Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
21 /** @file src/ffmpeg_decoder.cc
22 * @brief A decoder using FFmpeg to decode content.
26 #include "exceptions.h"
30 #include "dcpomatic_log.h"
31 #include "ffmpeg_decoder.h"
32 #include "text_decoder.h"
33 #include "ffmpeg_audio_stream.h"
34 #include "ffmpeg_subtitle_stream.h"
35 #include "video_filter_graph.h"
36 #include "audio_buffers.h"
37 #include "ffmpeg_content.h"
38 #include "raw_image_proxy.h"
39 #include "video_decoder.h"
41 #include "audio_decoder.h"
42 #include "compose.hpp"
43 #include "text_content.h"
44 #include "audio_content.h"
45 #include "frame_interval_checker.h"
46 #include <dcp/subtitle_string.h>
47 #include <sub/ssa_reader.h>
48 #include <sub/subtitle.h>
49 #include <sub/collect.h>
51 #include <libavcodec/avcodec.h>
52 #include <libavformat/avformat.h>
54 #include <boost/algorithm/string.hpp>
70 using std::shared_ptr;
71 using std::make_shared;
73 using boost::is_any_of;
75 using boost::optional;
76 using std::dynamic_pointer_cast;
78 using namespace dcpomatic;
81 FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c, bool fast)
85 if (c->video && c->video->use()) {
86 video = make_shared<VideoDecoder>(this, c);
87 _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film));
88 /* It doesn't matter what size or pixel format this is, it just needs to be black */
89 _black_image.reset (new Image (AV_PIX_FMT_RGB24, dcp::Size (128, 128), true));
90 _black_image->make_black ();
96 audio = make_shared<AudioDecoder>(this, c->audio, fast);
100 /* XXX: this time here should be the time of the first subtitle, not 0 */
101 text.push_back (make_shared<TextDecoder>(this, c->only_text(), ContentTime()));
104 for (auto i: c->ffmpeg_audio_streams()) {
105 _next_time[i] = boost::optional<dcpomatic::ContentTime>();
111 FFmpegDecoder::flush ()
113 /* Flush video and audio once */
115 bool did_something = false;
117 if (decode_and_process_video_packet(nullptr)) {
118 did_something = true;
122 for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) {
123 auto context = _codec_context[i->index(_format_context)];
124 int r = avcodec_send_packet (context, nullptr);
125 if (r < 0 && r != AVERROR_EOF) {
126 /* EOF can happen if we've already sent a flush packet */
127 throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegDecoder::flush"), r);
129 r = avcodec_receive_frame (context, _frame);
131 process_audio_frame (i);
132 did_something = true;
137 /* We want to be called again */
141 /* Make sure all streams are the same length and round up to the next video frame */
143 auto const frc = film()->active_frame_rate_change(_ffmpeg_content->position());
144 ContentTime full_length (_ffmpeg_content->full_length(film()), frc);
145 full_length = full_length.ceil (frc.source);
147 double const vfr = _ffmpeg_content->video_frame_rate().get();
148 auto const f = full_length.frames_round (vfr);
149 auto v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
151 video->emit (film(), shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)), v);
156 for (auto i: _ffmpeg_content->ffmpeg_audio_streams ()) {
157 auto a = audio->stream_position(film(), i);
158 /* Unfortunately if a is 0 that really means that we don't know the stream position since
159 there has been no data on it since the last seek. In this case we'll just do nothing
160 here. I'm not sure if that's the right idea.
162 if (a > ContentTime()) {
163 while (a < full_length) {
164 auto to_do = min (full_length - a, ContentTime::from_seconds (0.1));
165 auto silence = make_shared<AudioBuffers>(i->channels(), to_do.frames_ceil (i->frame_rate()));
166 silence->make_silent ();
167 audio->emit (film(), i, silence, a, true);
182 FFmpegDecoder::pass ()
184 auto packet = av_packet_alloc();
185 DCPOMATIC_ASSERT (packet);
187 int r = av_read_frame (_format_context, packet);
189 /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
190 has pretty-much succeeded (and hence generated data which should be processed).
191 Hence it makes sense to continue here in that case.
193 if (r < 0 && r != AVERROR_INVALIDDATA) {
194 if (r != AVERROR_EOF) {
195 /* Maybe we should fail here, but for now we'll just finish off instead */
197 av_strerror (r, buf, sizeof(buf));
198 LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), &buf[0], r);
201 av_packet_free (&packet);
205 int const si = packet->stream_index;
206 auto fc = _ffmpeg_content;
208 if (_video_stream && si == _video_stream.get() && video && !video->ignore()) {
209 decode_and_process_video_packet (packet);
210 } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) {
211 decode_and_process_subtitle_packet (packet);
213 decode_and_process_audio_packet (packet);
216 av_packet_free (&packet);
221 /** @param data pointer to array of pointers to buffers.
222 * Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
224 shared_ptr<AudioBuffers>
225 FFmpegDecoder::deinterleave_audio (AVFrame* frame)
227 auto format = static_cast<AVSampleFormat>(frame->format);
229 /* XXX: can't we use swr_convert() to do the format conversion? */
231 int const channels = frame->channels;
232 int const frames = frame->nb_samples;
233 int const total_samples = frames * channels;
234 auto audio = make_shared<AudioBuffers>(channels, frames);
235 auto data = audio->data();
238 case AV_SAMPLE_FMT_U8:
240 auto p = reinterpret_cast<uint8_t *> (frame->data[0]);
243 for (int i = 0; i < total_samples; ++i) {
244 data[channel][sample] = float(*p++) / (1 << 23);
247 if (channel == channels) {
255 case AV_SAMPLE_FMT_S16:
257 auto p = reinterpret_cast<int16_t *> (frame->data[0]);
260 for (int i = 0; i < total_samples; ++i) {
261 data[channel][sample] = float(*p++) / (1 << 15);
264 if (channel == channels) {
272 case AV_SAMPLE_FMT_S16P:
274 auto p = reinterpret_cast<int16_t **> (frame->data);
275 for (int i = 0; i < channels; ++i) {
276 for (int j = 0; j < frames; ++j) {
277 data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
283 case AV_SAMPLE_FMT_S32:
285 auto p = reinterpret_cast<int32_t *> (frame->data[0]);
288 for (int i = 0; i < total_samples; ++i) {
289 data[channel][sample] = static_cast<float>(*p++) / 2147483648;
292 if (channel == channels) {
300 case AV_SAMPLE_FMT_S32P:
302 auto p = reinterpret_cast<int32_t **> (frame->data);
303 for (int i = 0; i < channels; ++i) {
304 for (int j = 0; j < frames; ++j) {
305 data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
311 case AV_SAMPLE_FMT_FLT:
313 auto p = reinterpret_cast<float*> (frame->data[0]);
316 for (int i = 0; i < total_samples; ++i) {
317 data[channel][sample] = *p++;
320 if (channel == channels) {
328 case AV_SAMPLE_FMT_FLTP:
330 auto p = reinterpret_cast<float**> (frame->data);
331 DCPOMATIC_ASSERT (frame->channels <= channels);
332 /* Sometimes there aren't as many channels in the frame as in the stream */
333 for (int i = 0; i < frame->channels; ++i) {
334 memcpy (data[i], p[i], frames * sizeof(float));
336 for (int i = frame->channels; i < channels; ++i) {
337 audio->make_silent (i);
343 throw DecodeError (String::compose(_("Unrecognised audio sample format (%1)"), static_cast<int>(format)));
351 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
353 return static_cast<AVSampleFormat>(stream->stream(_format_context)->codecpar->format);
358 FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
360 return av_get_bytes_per_sample (audio_sample_format (stream));
365 FFmpegDecoder::seek (ContentTime time, bool accurate)
367 Decoder::seek (time, accurate);
369 /* If we are doing an `accurate' seek, we need to use pre-roll, as
370 we don't really know what the seek will give us.
373 auto pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
376 /* XXX: it seems debatable whether PTS should be used here...
377 http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
380 optional<int> stream;
383 stream = _video_stream;
385 DCPOMATIC_ASSERT (_ffmpeg_content->audio);
386 auto s = dynamic_pointer_cast<FFmpegAudioStream>(_ffmpeg_content->audio->stream());
388 stream = s->index (_format_context);
392 DCPOMATIC_ASSERT (stream);
394 auto u = time - _pts_offset;
395 if (u < ContentTime ()) {
401 u.seconds() / av_q2d (_format_context->streams[stream.get()]->time_base),
406 /* Force re-creation of filter graphs to reset them and hence to make sure
407 they don't have any pre-seek frames knocking about.
409 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
410 _filter_graphs.clear ();
413 if (video_codec_context ()) {
414 avcodec_flush_buffers (video_codec_context());
417 for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) {
418 avcodec_flush_buffers (_codec_context[i->index(_format_context)]);
421 if (subtitle_codec_context ()) {
422 avcodec_flush_buffers (subtitle_codec_context ());
425 _have_current_subtitle = false;
427 for (auto& i: _next_time) {
428 i.second = boost::optional<dcpomatic::ContentTime>();
431 /* We find that we get some errors from av_send_packet after a seek. Perhaps we should ignore
432 * all of them (which seems risky), or perhaps we should have some proper fix. But instead
433 * let's ignore the next 2 errors.
435 _errors_to_ignore = 2;
439 shared_ptr<FFmpegAudioStream>
440 FFmpegDecoder::audio_stream_from_index (int index) const
442 /* XXX: inefficient */
443 auto streams = ffmpeg_content()->ffmpeg_audio_streams();
444 auto stream = streams.begin();
445 while (stream != streams.end() && !(*stream)->uses_index(_format_context, index)) {
449 if (stream == streams.end ()) {
458 FFmpegDecoder::process_audio_frame (shared_ptr<FFmpegAudioStream> stream)
460 auto data = deinterleave_audio (_frame);
463 if (_frame->pts == AV_NOPTS_VALUE) {
464 /* In some streams we see not every frame coming through with a timestamp; for those
465 that have AV_NOPTS_VALUE we need to work out the timestamp ourselves. This is
466 particularly noticeable with TrueHD streams (see #1111).
468 if (_next_time[stream]) {
469 ct = *_next_time[stream];
472 ct = ContentTime::from_seconds (
473 _frame->best_effort_timestamp *
474 av_q2d (stream->stream(_format_context)->time_base))
478 _next_time[stream] = ct + ContentTime::from_frames(data->frames(), stream->frame_rate());
480 if (ct < ContentTime()) {
481 /* Discard audio data that comes before time 0 */
482 auto const remove = min (int64_t(data->frames()), (-ct).frames_ceil(double(stream->frame_rate())));
483 data->move (data->frames() - remove, remove, 0);
484 data->set_frames (data->frames() - remove);
485 ct += ContentTime::from_frames (remove, stream->frame_rate());
488 if (ct < ContentTime()) {
490 "Crazy timestamp %1 for %2 samples in stream %3 (ts=%4 tb=%5, off=%6)",
494 _frame->best_effort_timestamp,
495 av_q2d(stream->stream(_format_context)->time_base),
496 to_string(_pts_offset)
500 /* Give this data provided there is some, and its time is sane */
501 if (ct >= ContentTime() && data->frames() > 0) {
502 audio->emit (film(), stream, data, ct);
508 FFmpegDecoder::decode_and_process_audio_packet (AVPacket* packet)
510 auto stream = audio_stream_from_index (packet->stream_index);
515 auto context = _codec_context[stream->index(_format_context)];
517 int r = avcodec_send_packet (context, packet);
519 /* We could cope with AVERROR(EAGAIN) and re-send the packet but I think it should never happen.
520 * Likewise I think AVERROR_EOF should not happen.
522 if (_errors_to_ignore > 0) {
523 /* We see errors here after a seek, which is hopefully to be nothing to worry about */
525 LOG_GENERAL("Ignoring error %1 avcodec_send_packet after seek; will ignore %2 more", r, _errors_to_ignore);
528 throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegDecoder::decode_and_process_audio_packet"), r);
532 r = avcodec_receive_frame (context, _frame);
533 if (r == AVERROR(EAGAIN)) {
534 /* More input is required */
538 /* We choose to be relaxed here about other errors; it seems that there may be valid
539 * data to decode even if an error occurred. #352 may be related (though this was
540 * when we were using an old version of the FFmpeg API).
542 process_audio_frame (stream);
548 FFmpegDecoder::decode_and_process_video_packet (AVPacket* packet)
550 DCPOMATIC_ASSERT (_video_stream);
552 auto context = video_codec_context();
554 int r = avcodec_send_packet (context, packet);
555 if (r < 0 && !(r == AVERROR_EOF && !packet)) {
556 /* We could cope with AVERROR(EAGAIN) and re-send the packet but I think it should never happen.
557 * AVERROR_EOF can happen during flush if we've already sent a flush packet.
559 throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegDecoder::decode_and_process_video_packet"), r);
562 r = avcodec_receive_frame (context, _frame);
563 if (r == AVERROR(EAGAIN) || r == AVERROR_EOF || (r < 0 && !packet)) {
564 /* More input is required, no more frames are coming, or we are flushing and there was
565 * some error which we just want to ignore.
569 throw DecodeError (N_("avcodec_receive_frame"), N_("FFmpeg::decode_and_process_video_packet"), r);
572 /* We assume we'll only get one frame here, which I think is safe */
574 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
576 shared_ptr<VideoFilterGraph> graph;
578 auto i = _filter_graphs.begin();
579 while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
583 if (i == _filter_graphs.end ()) {
584 dcp::Fraction vfr (lrint(_ffmpeg_content->video_frame_rate().get() * 1000), 1000);
585 graph = make_shared<VideoFilterGraph>(dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format, vfr);
586 graph->setup (_ffmpeg_content->filters ());
587 _filter_graphs.push_back (graph);
588 LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
593 auto images = graph->process (_frame);
595 for (auto const& i: images) {
597 auto image = i.first;
599 if (i.second != AV_NOPTS_VALUE) {
600 double const pts = i.second * av_q2d(_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds();
604 make_shared<RawImageProxy>(image),
605 llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
608 LOG_WARNING_NC ("Dropping frame without PTS");
617 FFmpegDecoder::decode_and_process_subtitle_packet (AVPacket* packet)
621 if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, packet) < 0 || !got_subtitle) {
625 /* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */
626 if (_have_current_subtitle) {
627 if (_current_subtitle_to) {
628 only_text()->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset));
630 only_text()->emit_stop (subtitle_period(sub).from + _pts_offset);
632 _have_current_subtitle = false;
635 if (sub.num_rects <= 0) {
636 /* Nothing new in this subtitle */
640 /* Subtitle PTS (within the source, not taking into account any of the
641 source that we may have chopped off for the DCP).
643 auto sub_period = subtitle_period (sub);
645 from = sub_period.from + _pts_offset;
647 _current_subtitle_to = *sub_period.to + _pts_offset;
649 _current_subtitle_to = optional<ContentTime>();
650 _have_current_subtitle = true;
653 for (unsigned int i = 0; i < sub.num_rects; ++i) {
654 auto const rect = sub.rects[i];
656 switch (rect->type) {
659 case SUBTITLE_BITMAP:
660 process_bitmap_subtitle (rect, from);
663 cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
666 process_ass_subtitle (rect->ass, from);
671 if (_current_subtitle_to) {
672 only_text()->emit_stop (*_current_subtitle_to);
675 avsubtitle_free (&sub);
680 FFmpegDecoder::process_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime from)
682 /* Note BGRA is expressed little-endian, so the first byte in the word is B, second
683 G, third R, fourth A.
685 auto image = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), true);
687 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
688 /* Start of the first line in the subtitle */
689 auto sub_p = rect->pict.data[0];
690 /* sub_p looks up into a BGRA palette which is at rect->pict.data[1];
691 (i.e. first byte B, second G, third R, fourth A)
693 auto const palette = rect->pict.data[1];
695 /* Start of the first line in the subtitle */
696 auto sub_p = rect->data[0];
697 /* sub_p looks up into a BGRA palette which is at rect->data[1].
698 (first byte B, second G, third R, fourth A)
700 auto const* palette = rect->data[1];
702 /* And the stream has a map of those palette colours to colours
703 chosen by the user; created a `mapped' palette from those settings.
705 auto colour_map = ffmpeg_content()->subtitle_stream()->colours();
706 vector<RGBA> mapped_palette (rect->nb_colors);
707 for (int i = 0; i < rect->nb_colors; ++i) {
708 RGBA c (palette[2], palette[1], palette[0], palette[3]);
709 auto j = colour_map.find (c);
710 if (j != colour_map.end ()) {
711 mapped_palette[i] = j->second;
713 /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because
714 it is from a project that was created before this stuff was added. Just use the
715 colour straight from the original palette.
717 mapped_palette[i] = c;
722 /* Start of the output data */
723 auto out_p = image->data()[0];
725 for (int y = 0; y < rect->h; ++y) {
726 auto sub_line_p = sub_p;
727 auto out_line_p = out_p;
728 for (int x = 0; x < rect->w; ++x) {
729 auto const p = mapped_palette[*sub_line_p++];
735 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
736 sub_p += rect->pict.linesize[0];
738 sub_p += rect->linesize[0];
740 out_p += image->stride()[0];
743 int target_width = subtitle_codec_context()->width;
744 if (target_width == 0 && video_codec_context()) {
745 /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't
746 know if it's supposed to mean something from FFmpeg's point of view.
748 target_width = video_codec_context()->width;
750 int target_height = subtitle_codec_context()->height;
751 if (target_height == 0 && video_codec_context()) {
752 target_height = video_codec_context()->height;
754 DCPOMATIC_ASSERT (target_width);
755 DCPOMATIC_ASSERT (target_height);
756 dcpomatic::Rect<double> const scaled_rect (
757 static_cast<double>(rect->x) / target_width,
758 static_cast<double>(rect->y) / target_height,
759 static_cast<double>(rect->w) / target_width,
760 static_cast<double>(rect->h) / target_height
763 only_text()->emit_bitmap_start (from, image, scaled_rect);
768 FFmpegDecoder::process_ass_subtitle (string ass, ContentTime from)
770 /* We have no styles and no Format: line, so I'm assuming that FFmpeg
771 produces a single format of Dialogue: lines...
776 for (size_t i = 0; i < ass.length(); ++i) {
777 if (commas < 9 && ass[i] == ',') {
779 } else if (commas == 9) {
788 sub::RawSubtitle base;
789 auto raw = sub::SSAReader::parse_line (
792 _ffmpeg_content->video->size().width,
793 _ffmpeg_content->video->size().height
796 for (auto const& i: sub::collect<vector<sub::Subtitle>>(raw)) {
797 only_text()->emit_plain_start (from, i);