2 Copyright (C) 2012-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
21 /** @file src/ffmpeg_decoder.cc
22 * @brief A decoder using FFmpeg to decode content.
26 #include "exceptions.h"
30 #include "dcpomatic_log.h"
31 #include "ffmpeg_decoder.h"
32 #include "text_decoder.h"
33 #include "ffmpeg_audio_stream.h"
34 #include "ffmpeg_subtitle_stream.h"
35 #include "video_filter_graph.h"
36 #include "audio_buffers.h"
37 #include "ffmpeg_content.h"
38 #include "raw_image_proxy.h"
39 #include "video_decoder.h"
41 #include "audio_decoder.h"
42 #include "compose.hpp"
43 #include "text_content.h"
44 #include "audio_content.h"
45 #include "frame_interval_checker.h"
46 #include <dcp/subtitle_string.h>
47 #include <sub/ssa_reader.h>
48 #include <sub/subtitle.h>
49 #include <sub/collect.h>
51 #include <libavcodec/avcodec.h>
52 #include <libavformat/avformat.h>
54 #include <boost/algorithm/string.hpp>
70 using std::shared_ptr;
71 using std::make_shared;
73 using boost::is_any_of;
75 using boost::optional;
76 using std::dynamic_pointer_cast;
78 using namespace dcpomatic;
81 FFmpegDecoder::FFmpegDecoder (shared_ptr<const Film> film, shared_ptr<const FFmpegContent> c, bool fast)
85 if (c->video && c->video->use()) {
86 video = make_shared<VideoDecoder>(this, c);
87 _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film));
88 /* It doesn't matter what size or pixel format this is, it just needs to be black */
89 _black_image.reset (new Image (AV_PIX_FMT_RGB24, dcp::Size (128, 128), true));
90 _black_image->make_black ();
96 audio = make_shared<AudioDecoder>(this, c->audio, fast);
100 /* XXX: this time here should be the time of the first subtitle, not 0 */
101 text.push_back (make_shared<TextDecoder>(this, c->only_text(), ContentTime()));
104 for (auto i: c->ffmpeg_audio_streams()) {
105 _next_time[i] = boost::optional<dcpomatic::ContentTime>();
111 FFmpegDecoder::flush ()
113 /* Flush video and audio once */
115 bool did_something = false;
117 if (decode_and_process_video_packet(nullptr)) {
118 did_something = true;
122 for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) {
123 auto context = _codec_context[i->index(_format_context)];
124 int r = avcodec_send_packet (context, nullptr);
125 if (r < 0 && r != AVERROR_EOF) {
126 /* EOF can happen if we've already sent a flush packet */
127 throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegDecoder::flush"), r);
129 r = avcodec_receive_frame (context, _frame);
131 process_audio_frame (i);
132 did_something = true;
137 /* We want to be called again */
141 /* Make sure all streams are the same length and round up to the next video frame */
143 auto const frc = film()->active_frame_rate_change(_ffmpeg_content->position());
144 ContentTime full_length (_ffmpeg_content->full_length(film()), frc);
145 full_length = full_length.ceil (frc.source);
147 double const vfr = _ffmpeg_content->video_frame_rate().get();
148 auto const f = full_length.frames_round (vfr);
149 auto v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
151 video->emit (film(), shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)), v);
156 for (auto i: _ffmpeg_content->ffmpeg_audio_streams ()) {
157 auto a = audio->stream_position(film(), i);
158 /* Unfortunately if a is 0 that really means that we don't know the stream position since
159 there has been no data on it since the last seek. In this case we'll just do nothing
160 here. I'm not sure if that's the right idea.
162 if (a > ContentTime()) {
163 while (a < full_length) {
164 auto to_do = min (full_length - a, ContentTime::from_seconds (0.1));
165 auto silence = make_shared<AudioBuffers>(i->channels(), to_do.frames_ceil (i->frame_rate()));
166 silence->make_silent ();
167 audio->emit (film(), i, silence, a, true);
182 FFmpegDecoder::pass ()
184 auto packet = av_packet_alloc();
185 DCPOMATIC_ASSERT (packet);
187 int r = av_read_frame (_format_context, packet);
189 /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
190 has pretty-much succeeded (and hence generated data which should be processed).
191 Hence it makes sense to continue here in that case.
193 if (r < 0 && r != AVERROR_INVALIDDATA) {
194 if (r != AVERROR_EOF) {
195 /* Maybe we should fail here, but for now we'll just finish off instead */
197 av_strerror (r, buf, sizeof(buf));
198 LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), &buf[0], r);
201 av_packet_free (&packet);
205 int const si = packet->stream_index;
206 auto fc = _ffmpeg_content;
208 if (_video_stream && si == _video_stream.get() && video && !video->ignore()) {
209 decode_and_process_video_packet (packet);
210 } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) {
211 decode_and_process_subtitle_packet (packet);
213 decode_and_process_audio_packet (packet);
216 av_packet_free (&packet);
221 /** @param data pointer to array of pointers to buffers.
222 * Only the first buffer will be used for non-planar data, otherwise there will be one per channel.
224 shared_ptr<AudioBuffers>
225 FFmpegDecoder::deinterleave_audio (AVFrame* frame)
227 auto format = static_cast<AVSampleFormat>(frame->format);
229 /* XXX: can't we use swr_convert() to do the format conversion? */
231 int const channels = frame->channels;
232 int const frames = frame->nb_samples;
233 int const total_samples = frames * channels;
234 auto audio = make_shared<AudioBuffers>(channels, frames);
235 auto data = audio->data();
238 case AV_SAMPLE_FMT_U8:
240 auto p = reinterpret_cast<uint8_t *> (frame->data[0]);
243 for (int i = 0; i < total_samples; ++i) {
244 data[channel][sample] = float(*p++) / (1 << 23);
247 if (channel == channels) {
255 case AV_SAMPLE_FMT_S16:
257 auto p = reinterpret_cast<int16_t *> (frame->data[0]);
260 for (int i = 0; i < total_samples; ++i) {
261 data[channel][sample] = float(*p++) / (1 << 15);
264 if (channel == channels) {
272 case AV_SAMPLE_FMT_S16P:
274 auto p = reinterpret_cast<int16_t **> (frame->data);
275 for (int i = 0; i < channels; ++i) {
276 for (int j = 0; j < frames; ++j) {
277 data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
283 case AV_SAMPLE_FMT_S32:
285 auto p = reinterpret_cast<int32_t *> (frame->data[0]);
288 for (int i = 0; i < total_samples; ++i) {
289 data[channel][sample] = static_cast<float>(*p++) / 2147483648;
292 if (channel == channels) {
300 case AV_SAMPLE_FMT_S32P:
302 auto p = reinterpret_cast<int32_t **> (frame->data);
303 for (int i = 0; i < channels; ++i) {
304 for (int j = 0; j < frames; ++j) {
305 data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
311 case AV_SAMPLE_FMT_FLT:
313 auto p = reinterpret_cast<float*> (frame->data[0]);
316 for (int i = 0; i < total_samples; ++i) {
317 data[channel][sample] = *p++;
320 if (channel == channels) {
328 case AV_SAMPLE_FMT_FLTP:
330 auto p = reinterpret_cast<float**> (frame->data);
331 DCPOMATIC_ASSERT (frame->channels <= channels);
332 /* Sometimes there aren't as many channels in the frame as in the stream */
333 for (int i = 0; i < frame->channels; ++i) {
334 memcpy (data[i], p[i], frames * sizeof(float));
336 for (int i = frame->channels; i < channels; ++i) {
337 audio->make_silent (i);
343 throw DecodeError (String::compose(_("Unrecognised audio sample format (%1)"), static_cast<int>(format)));
351 FFmpegDecoder::audio_sample_format (shared_ptr<FFmpegAudioStream> stream) const
353 return static_cast<AVSampleFormat>(stream->stream(_format_context)->codecpar->format);
358 FFmpegDecoder::bytes_per_audio_sample (shared_ptr<FFmpegAudioStream> stream) const
360 return av_get_bytes_per_sample (audio_sample_format (stream));
365 FFmpegDecoder::seek (ContentTime time, bool accurate)
367 Decoder::seek (time, accurate);
369 /* If we are doing an `accurate' seek, we need to use pre-roll, as
370 we don't really know what the seek will give us.
373 auto pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0);
376 /* XXX: it seems debatable whether PTS should be used here...
377 http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html
380 optional<int> stream;
383 stream = _video_stream;
385 DCPOMATIC_ASSERT (_ffmpeg_content->audio);
386 auto s = dynamic_pointer_cast<FFmpegAudioStream>(_ffmpeg_content->audio->stream());
388 stream = s->index (_format_context);
392 DCPOMATIC_ASSERT (stream);
394 auto u = time - _pts_offset;
395 if (u < ContentTime ()) {
401 u.seconds() / av_q2d (_format_context->streams[stream.get()]->time_base),
406 /* Force re-creation of filter graphs to reset them and hence to make sure
407 they don't have any pre-seek frames knocking about.
409 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
410 _filter_graphs.clear ();
413 if (video_codec_context ()) {
414 avcodec_flush_buffers (video_codec_context());
417 for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) {
418 avcodec_flush_buffers (_codec_context[i->index(_format_context)]);
421 if (subtitle_codec_context ()) {
422 avcodec_flush_buffers (subtitle_codec_context ());
425 _have_current_subtitle = false;
427 for (auto& i: _next_time) {
428 i.second = boost::optional<dcpomatic::ContentTime>();
433 shared_ptr<FFmpegAudioStream>
434 FFmpegDecoder::audio_stream_from_index (int index) const
436 /* XXX: inefficient */
437 auto streams = ffmpeg_content()->ffmpeg_audio_streams();
438 auto stream = streams.begin();
439 while (stream != streams.end() && !(*stream)->uses_index(_format_context, index)) {
443 if (stream == streams.end ()) {
452 FFmpegDecoder::process_audio_frame (shared_ptr<FFmpegAudioStream> stream)
454 auto data = deinterleave_audio (_frame);
457 if (_frame->pts == AV_NOPTS_VALUE) {
458 /* In some streams we see not every frame coming through with a timestamp; for those
459 that have AV_NOPTS_VALUE we need to work out the timestamp ourselves. This is
460 particularly noticeable with TrueHD streams (see #1111).
462 if (_next_time[stream]) {
463 ct = *_next_time[stream];
466 ct = ContentTime::from_seconds (
467 _frame->best_effort_timestamp *
468 av_q2d (stream->stream(_format_context)->time_base))
472 _next_time[stream] = ct + ContentTime::from_frames(data->frames(), stream->frame_rate());
474 if (ct < ContentTime()) {
475 /* Discard audio data that comes before time 0 */
476 auto const remove = min (int64_t(data->frames()), (-ct).frames_ceil(double(stream->frame_rate())));
477 data->move (data->frames() - remove, remove, 0);
478 data->set_frames (data->frames() - remove);
479 ct += ContentTime::from_frames (remove, stream->frame_rate());
482 if (ct < ContentTime()) {
484 "Crazy timestamp %1 for %2 samples in stream %3 (ts=%4 tb=%5, off=%6)",
488 _frame->best_effort_timestamp,
489 av_q2d(stream->stream(_format_context)->time_base),
490 to_string(_pts_offset)
494 /* Give this data provided there is some, and its time is sane */
495 if (ct >= ContentTime() && data->frames() > 0) {
496 audio->emit (film(), stream, data, ct);
502 FFmpegDecoder::decode_and_process_audio_packet (AVPacket* packet)
504 auto stream = audio_stream_from_index (packet->stream_index);
509 auto context = _codec_context[stream->index(_format_context)];
511 int r = avcodec_send_packet (context, packet);
513 /* We could cope with AVERROR(EAGAIN) and re-send the packet but I think it should never happen.
514 * Likewise I think AVERROR_EOF should not happen.
516 throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegDecoder::decode_and_process_audio_packet"), r);
520 r = avcodec_receive_frame (context, _frame);
521 if (r == AVERROR(EAGAIN)) {
522 /* More input is required */
526 /* We choose to be relaxed here about other errors; it seems that there may be valid
527 * data to decode even if an error occurred. #352 may be related (though this was
528 * when we were using an old version of the FFmpeg API).
530 process_audio_frame (stream);
536 FFmpegDecoder::decode_and_process_video_packet (AVPacket* packet)
538 DCPOMATIC_ASSERT (_video_stream);
540 auto context = video_codec_context();
542 int r = avcodec_send_packet (context, packet);
543 if (r < 0 && !(r == AVERROR_EOF && !packet)) {
544 /* We could cope with AVERROR(EAGAIN) and re-send the packet but I think it should never happen.
545 * AVERROR_EOF can happen during flush if we've already sent a flush packet.
547 throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegDecoder::decode_and_process_video_packet"), r);
550 r = avcodec_receive_frame (context, _frame);
551 if (r == AVERROR(EAGAIN) || r == AVERROR_EOF) {
552 /* More input is required, or no more frames are coming */
556 /* We assume we'll only get one frame here, which I think is safe */
558 boost::mutex::scoped_lock lm (_filter_graphs_mutex);
560 shared_ptr<VideoFilterGraph> graph;
562 auto i = _filter_graphs.begin();
563 while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) {
567 if (i == _filter_graphs.end ()) {
568 dcp::Fraction vfr (lrint(_ffmpeg_content->video_frame_rate().get() * 1000), 1000);
569 graph = make_shared<VideoFilterGraph>(dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format, vfr);
570 graph->setup (_ffmpeg_content->filters ());
571 _filter_graphs.push_back (graph);
572 LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format);
577 auto images = graph->process (_frame);
579 for (auto const& i: images) {
581 auto image = i.first;
583 if (i.second != AV_NOPTS_VALUE) {
584 double const pts = i.second * av_q2d(_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds();
588 make_shared<RawImageProxy>(image),
589 llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
592 LOG_WARNING_NC ("Dropping frame without PTS");
601 FFmpegDecoder::decode_and_process_subtitle_packet (AVPacket* packet)
605 if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, packet) < 0 || !got_subtitle) {
609 /* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */
610 if (_have_current_subtitle) {
611 if (_current_subtitle_to) {
612 only_text()->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset));
614 only_text()->emit_stop (subtitle_period(sub).from + _pts_offset);
616 _have_current_subtitle = false;
619 if (sub.num_rects <= 0) {
620 /* Nothing new in this subtitle */
624 /* Subtitle PTS (within the source, not taking into account any of the
625 source that we may have chopped off for the DCP).
627 auto sub_period = subtitle_period (sub);
629 from = sub_period.from + _pts_offset;
631 _current_subtitle_to = *sub_period.to + _pts_offset;
633 _current_subtitle_to = optional<ContentTime>();
634 _have_current_subtitle = true;
637 for (unsigned int i = 0; i < sub.num_rects; ++i) {
638 auto const rect = sub.rects[i];
640 switch (rect->type) {
643 case SUBTITLE_BITMAP:
644 process_bitmap_subtitle (rect, from);
647 cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n";
650 process_ass_subtitle (rect->ass, from);
655 if (_current_subtitle_to) {
656 only_text()->emit_stop (*_current_subtitle_to);
659 avsubtitle_free (&sub);
664 FFmpegDecoder::process_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime from)
666 /* Note BGRA is expressed little-endian, so the first byte in the word is B, second
667 G, third R, fourth A.
669 auto image = make_shared<Image>(AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), true);
671 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
672 /* Start of the first line in the subtitle */
673 auto sub_p = rect->pict.data[0];
674 /* sub_p looks up into a BGRA palette which is at rect->pict.data[1];
675 (i.e. first byte B, second G, third R, fourth A)
677 auto const palette = rect->pict.data[1];
679 /* Start of the first line in the subtitle */
680 auto sub_p = rect->data[0];
681 /* sub_p looks up into a BGRA palette which is at rect->data[1].
682 (first byte B, second G, third R, fourth A)
684 auto const* palette = rect->data[1];
686 /* And the stream has a map of those palette colours to colours
687 chosen by the user; created a `mapped' palette from those settings.
689 auto colour_map = ffmpeg_content()->subtitle_stream()->colours();
690 vector<RGBA> mapped_palette (rect->nb_colors);
691 for (int i = 0; i < rect->nb_colors; ++i) {
692 RGBA c (palette[2], palette[1], palette[0], palette[3]);
693 auto j = colour_map.find (c);
694 if (j != colour_map.end ()) {
695 mapped_palette[i] = j->second;
697 /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because
698 it is from a project that was created before this stuff was added. Just use the
699 colour straight from the original palette.
701 mapped_palette[i] = c;
706 /* Start of the output data */
707 auto out_p = image->data()[0];
709 for (int y = 0; y < rect->h; ++y) {
710 auto sub_line_p = sub_p;
711 auto out_line_p = out_p;
712 for (int x = 0; x < rect->w; ++x) {
713 auto const p = mapped_palette[*sub_line_p++];
719 #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT
720 sub_p += rect->pict.linesize[0];
722 sub_p += rect->linesize[0];
724 out_p += image->stride()[0];
727 int target_width = subtitle_codec_context()->width;
728 if (target_width == 0 && video_codec_context()) {
729 /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't
730 know if it's supposed to mean something from FFmpeg's point of view.
732 target_width = video_codec_context()->width;
734 int target_height = subtitle_codec_context()->height;
735 if (target_height == 0 && video_codec_context()) {
736 target_height = video_codec_context()->height;
738 DCPOMATIC_ASSERT (target_width);
739 DCPOMATIC_ASSERT (target_height);
740 dcpomatic::Rect<double> const scaled_rect (
741 static_cast<double>(rect->x) / target_width,
742 static_cast<double>(rect->y) / target_height,
743 static_cast<double>(rect->w) / target_width,
744 static_cast<double>(rect->h) / target_height
747 only_text()->emit_bitmap_start (from, image, scaled_rect);
752 FFmpegDecoder::process_ass_subtitle (string ass, ContentTime from)
754 /* We have no styles and no Format: line, so I'm assuming that FFmpeg
755 produces a single format of Dialogue: lines...
760 for (size_t i = 0; i < ass.length(); ++i) {
761 if (commas < 9 && ass[i] == ',') {
763 } else if (commas == 9) {
772 sub::RawSubtitle base;
773 auto raw = sub::SSAReader::parse_line (
776 _ffmpeg_content->video->size().width,
777 _ffmpeg_content->video->size().height
780 for (auto const& i: sub::collect<vector<sub::Subtitle>>(raw)) {
781 only_text()->emit_plain_start (from, i);