X-Git-Url: https://main.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fffmpeg_decoder.cc;h=57d300e18cf0b18e0c863df0aa26fc38ca5ea014;hb=b703142e8750c509174b4d964009aecf93f3d834;hp=15443c346b3d302cce263165588044e641879160;hpb=cb990adba9c57e5107ef2aa9716cf0a26c1df83d;p=dcpomatic.git diff --git a/src/lib/ffmpeg_decoder.cc b/src/lib/ffmpeg_decoder.cc index 15443c346..e5685f661 100644 --- a/src/lib/ffmpeg_decoder.cc +++ b/src/lib/ffmpeg_decoder.cc @@ -1,203 +1,263 @@ /* - Copyright (C) 2012-2014 Carl Hetherington + Copyright (C) 2012-2018 Carl Hetherington - This program is free software; you can redistribute it and/or modify + This file is part of DCP-o-matic. + + DCP-o-matic is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - This program is distributed in the hope that it will be useful, + DCP-o-matic is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + along with DCP-o-matic. If not, see . */ + /** @file src/ffmpeg_decoder.cc * @brief A decoder using FFmpeg to decode content. */ -#include -#include -#include -#include -#include -#include -extern "C" { -#include -#include -} -#include "filter.h" + +#include "audio_buffers.h" +#include "audio_content.h" +#include "audio_decoder.h" +#include "compose.hpp" +#include "dcpomatic_log.h" #include "exceptions.h" -#include "image.h" -#include "util.h" -#include "log.h" -#include "ffmpeg_decoder.h" #include "ffmpeg_audio_stream.h" -#include "ffmpeg_subtitle_stream.h" -#include "filter_graph.h" -#include "audio_buffers.h" #include "ffmpeg_content.h" -#include "raw_image_proxy.h" +#include "ffmpeg_decoder.h" +#include "ffmpeg_subtitle_stream.h" #include "film.h" -#include "timer.h" +#include "filter.h" +#include "frame_interval_checker.h" +#include "image.h" +#include "log.h" +#include "raw_image_proxy.h" +#include "text_content.h" +#include "text_decoder.h" +#include "util.h" +#include "video_decoder.h" +#include "video_filter_graph.h" +#include +#include +#include +#include +extern "C" { +#include +#include +} +#include +#include +#include +#include +#include #include "i18n.h" -#define LOG_GENERAL(...) _video_content->film()->log()->log (String::compose (__VA_ARGS__), Log::TYPE_GENERAL); -#define LOG_ERROR(...) _video_content->film()->log()->log (String::compose (__VA_ARGS__), Log::TYPE_ERROR); -#define LOG_WARNING(...) _video_content->film()->log()->log (__VA_ARGS__, Log::TYPE_WARNING); using std::cout; +using std::dynamic_pointer_cast; +using std::make_shared; +using std::min; +using std::shared_ptr; using std::string; using std::vector; -using std::list; -using std::min; -using std::pair; -using std::make_pair; -using boost::shared_ptr; using boost::optional; -using boost::dynamic_pointer_cast; using dcp::Size; +using namespace dcpomatic; -FFmpegDecoder::FFmpegDecoder (shared_ptr c, shared_ptr log) - : VideoDecoder (c) - , AudioDecoder (c) - , SubtitleDecoder (c) - , FFmpeg (c) - , _log (log) + +FFmpegDecoder::FFmpegDecoder (shared_ptr film, shared_ptr c, bool fast) + : FFmpeg (c) + , Decoder (film) { - /* Audio and video frame PTS values may not start with 0. We want - to fiddle them so that: + if (c->video && c->video->use()) { + video = make_shared(this, c); + _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film)); + /* It doesn't matter what size or pixel format this is, it just needs to be black */ + _black_image = make_shared(AV_PIX_FMT_RGB24, dcp::Size (128, 128), Image::Alignment::PADDED); + _black_image->make_black (); + } else { + _pts_offset = {}; + } - 1. One of them starts at time 0. - 2. The first video PTS value ends up on a frame boundary. + if (c->audio) { + audio = make_shared(this, c->audio, fast); + } - Then we remove big initial gaps in PTS and we allow our - insertion of black frames to work. + if (c->only_text()) { + /* XXX: this time here should be the time of the first subtitle, not 0 */ + text.push_back (make_shared(this, c->only_text(), ContentTime())); + } - We will do pts_to_use = pts_from_ffmpeg + pts_offset; - */ + for (auto i: c->ffmpeg_audio_streams()) { + _next_time[i] = boost::optional(); + } +} - bool const have_video = c->first_video(); - bool const have_audio = c->audio_stream () && c->audio_stream()->first_audio; - /* First, make one of them start at 0 */ +bool +FFmpegDecoder::flush () +{ + /* Flush video and audio once */ - if (have_audio && have_video) { - _pts_offset = - min (c->first_video().get(), c->audio_stream()->first_audio.get()); - } else if (have_video) { - _pts_offset = - c->first_video().get(); - } else if (have_audio) { - _pts_offset = - c->audio_stream()->first_audio.get(); + bool did_something = false; + if (video) { + if (decode_and_process_video_packet(nullptr)) { + did_something = true; + } } - /* Now adjust both so that the video pts starts on a frame */ - if (have_video && have_audio) { - ContentTime first_video = c->first_video().get() + _pts_offset; - ContentTime const old_first_video = first_video; - _pts_offset += first_video.round_up (c->video_frame_rate ()) - old_first_video; + for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) { + auto context = _codec_context[i->index(_format_context)]; + int r = avcodec_send_packet (context, nullptr); + if (r < 0 && r != AVERROR_EOF) { + /* EOF can happen if we've already sent a flush packet */ + throw DecodeError (N_("avcodec_send_packet"), N_("FFmpegDecoder::flush"), r); + } + r = avcodec_receive_frame (context, audio_frame(i)); + if (r >= 0) { + process_audio_frame (i); + did_something = true; + } } -} -void -FFmpegDecoder::flush () -{ - /* Get any remaining frames */ - - _packet.data = 0; - _packet.size = 0; - - /* XXX: should we reset _packet.data and size after each *_decode_* call? */ - - while (decode_video_packet ()) {} - - if (_ffmpeg_content->audio_stream()) { - decode_audio_packet (); - AudioDecoder::flush (); + if (did_something) { + /* We want to be called again */ + return false; + } + + /* Make sure all streams are the same length and round up to the next video frame */ + + auto const frc = film()->active_frame_rate_change(_ffmpeg_content->position()); + ContentTime full_length (_ffmpeg_content->full_length(film()), frc); + full_length = full_length.ceil (frc.source); + if (video) { + double const vfr = _ffmpeg_content->video_frame_rate().get(); + auto const f = full_length.frames_round (vfr); + auto v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1; + while (v < f) { + video->emit (film(), shared_ptr (new RawImageProxy (_black_image)), v); + ++v; + } + } + + for (auto i: _ffmpeg_content->ffmpeg_audio_streams ()) { + auto a = audio->stream_position(film(), i); + /* Unfortunately if a is 0 that really means that we don't know the stream position since + there has been no data on it since the last seek. In this case we'll just do nothing + here. I'm not sure if that's the right idea. + */ + if (a > ContentTime()) { + while (a < full_length) { + auto to_do = min (full_length - a, ContentTime::from_seconds (0.1)); + auto silence = make_shared(i->channels(), to_do.frames_ceil (i->frame_rate())); + silence->make_silent (); + audio->emit (film(), i, silence, a, true); + a += to_do; + } + } + } + + if (audio) { + audio->flush (); } + + return true; } + bool FFmpegDecoder::pass () { - int r = av_read_frame (_format_context, &_packet); + auto packet = av_packet_alloc(); + DCPOMATIC_ASSERT (packet); - if (r < 0) { + int r = av_read_frame (_format_context, packet); + + /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame + has pretty-much succeeded (and hence generated data which should be processed). + Hence it makes sense to continue here in that case. + */ + if (r < 0 && r != AVERROR_INVALIDDATA) { if (r != AVERROR_EOF) { /* Maybe we should fail here, but for now we'll just finish off instead */ char buf[256]; av_strerror (r, buf, sizeof(buf)); - LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), buf, r); + LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), &buf[0], r); } - flush (); - return true; + av_packet_free (&packet); + return flush (); } - int const si = _packet.stream_index; + int const si = packet->stream_index; + auto fc = _ffmpeg_content; - if (si == _video_stream) { - decode_video_packet (); - } else if (_ffmpeg_content->audio_stream() && _ffmpeg_content->audio_stream()->uses_index (_format_context, si)) { - decode_audio_packet (); - } else if (_ffmpeg_content->subtitle_stream() && _ffmpeg_content->subtitle_stream()->uses_index (_format_context, si)) { - decode_subtitle_packet (); + if (_video_stream && si == _video_stream.get() && video && !video->ignore()) { + decode_and_process_video_packet (packet); + } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) { + decode_and_process_subtitle_packet (packet); + } else { + decode_and_process_audio_packet (packet); } - av_free_packet (&_packet); + av_packet_free (&packet); return false; } + /** @param data pointer to array of pointers to buffers. * Only the first buffer will be used for non-planar data, otherwise there will be one per channel. */ shared_ptr -FFmpegDecoder::deinterleave_audio (uint8_t** data, int size) +FFmpegDecoder::deinterleave_audio (AVFrame* frame) { - assert (_ffmpeg_content->audio_channels()); - assert (bytes_per_audio_sample()); + auto format = static_cast(frame->format); - /* Deinterleave and convert to float */ + /* XXX: can't we use swr_convert() to do the format conversion? */ - assert ((size % (bytes_per_audio_sample() * _ffmpeg_content->audio_channels())) == 0); + int const channels = frame->channels; + int const frames = frame->nb_samples; + int const total_samples = frames * channels; + auto audio = make_shared(channels, frames); + auto data = audio->data(); - int const total_samples = size / bytes_per_audio_sample(); - int const frames = total_samples / _ffmpeg_content->audio_channels(); - shared_ptr audio (new AudioBuffers (_ffmpeg_content->audio_channels(), frames)); - - switch (audio_sample_format()) { + switch (format) { case AV_SAMPLE_FMT_U8: { - uint8_t* p = reinterpret_cast (data[0]); + auto p = reinterpret_cast (frame->data[0]); int sample = 0; int channel = 0; for (int i = 0; i < total_samples; ++i) { - audio->data(channel)[sample] = float(*p++) / (1 << 23); + data[channel][sample] = float(*p++) / (1 << 23); ++channel; - if (channel == _ffmpeg_content->audio_channels()) { + if (channel == channels) { channel = 0; ++sample; } } } break; - + case AV_SAMPLE_FMT_S16: { - int16_t* p = reinterpret_cast (data[0]); + auto p = reinterpret_cast (frame->data[0]); int sample = 0; int channel = 0; for (int i = 0; i < total_samples; ++i) { - audio->data(channel)[sample] = float(*p++) / (1 << 15); + data[channel][sample] = float(*p++) / (1 << 15); ++channel; - if (channel == _ffmpeg_content->audio_channels()) { + if (channel == channels) { channel = 0; ++sample; } @@ -207,25 +267,25 @@ FFmpegDecoder::deinterleave_audio (uint8_t** data, int size) case AV_SAMPLE_FMT_S16P: { - int16_t** p = reinterpret_cast (data); - for (int i = 0; i < _ffmpeg_content->audio_channels(); ++i) { + auto p = reinterpret_cast (frame->data); + for (int i = 0; i < channels; ++i) { for (int j = 0; j < frames; ++j) { - audio->data(i)[j] = static_cast(p[i][j]) / (1 << 15); + data[i][j] = static_cast(p[i][j]) / (1 << 15); } } } break; - + case AV_SAMPLE_FMT_S32: { - int32_t* p = reinterpret_cast (data[0]); + auto p = reinterpret_cast (frame->data[0]); int sample = 0; int channel = 0; for (int i = 0; i < total_samples; ++i) { - audio->data(channel)[sample] = static_cast(*p++) / (1 << 31); + data[channel][sample] = static_cast(*p++) / 2147483648; ++channel; - if (channel == _ffmpeg_content->audio_channels()) { + if (channel == channels) { channel = 0; ++sample; } @@ -233,249 +293,488 @@ FFmpegDecoder::deinterleave_audio (uint8_t** data, int size) } break; + case AV_SAMPLE_FMT_S32P: + { + auto p = reinterpret_cast (frame->data); + for (int i = 0; i < channels; ++i) { + for (int j = 0; j < frames; ++j) { + data[i][j] = static_cast(p[i][j]) / 2147483648; + } + } + } + break; + case AV_SAMPLE_FMT_FLT: { - float* p = reinterpret_cast (data[0]); + auto p = reinterpret_cast (frame->data[0]); int sample = 0; int channel = 0; for (int i = 0; i < total_samples; ++i) { - audio->data(channel)[sample] = *p++; + data[channel][sample] = *p++; ++channel; - if (channel == _ffmpeg_content->audio_channels()) { + if (channel == channels) { channel = 0; ++sample; } } } break; - + case AV_SAMPLE_FMT_FLTP: { - float** p = reinterpret_cast (data); - for (int i = 0; i < _ffmpeg_content->audio_channels(); ++i) { - memcpy (audio->data(i), p[i], frames * sizeof(float)); + auto p = reinterpret_cast (frame->data); + DCPOMATIC_ASSERT (frame->channels <= channels); + /* Sometimes there aren't as many channels in the frame as in the stream */ + for (int i = 0; i < frame->channels; ++i) { + memcpy (data[i], p[i], frames * sizeof(float)); + } + for (int i = frame->channels; i < channels; ++i) { + audio->make_silent (i); } } break; default: - throw DecodeError (String::compose (_("Unrecognised audio sample format (%1)"), static_cast (audio_sample_format()))); + throw DecodeError (String::compose(_("Unrecognised audio sample format (%1)"), static_cast(format))); } return audio; } + AVSampleFormat -FFmpegDecoder::audio_sample_format () const +FFmpegDecoder::audio_sample_format (shared_ptr stream) const { - if (!_ffmpeg_content->audio_stream()) { - return (AVSampleFormat) 0; - } - - return audio_codec_context()->sample_fmt; + return static_cast(stream->stream(_format_context)->codecpar->format); } + int -FFmpegDecoder::bytes_per_audio_sample () const +FFmpegDecoder::bytes_per_audio_sample (shared_ptr stream) const { - return av_get_bytes_per_sample (audio_sample_format ()); + return av_get_bytes_per_sample (audio_sample_format (stream)); } + void FFmpegDecoder::seek (ContentTime time, bool accurate) { - VideoDecoder::seek (time, accurate); - AudioDecoder::seek (time, accurate); - + Decoder::seek (time, accurate); + /* If we are doing an `accurate' seek, we need to use pre-roll, as we don't really know what the seek will give us. */ - ContentTime pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0); + auto pre_roll = accurate ? ContentTime::from_seconds (2) : ContentTime (0); time -= pre_roll; /* XXX: it seems debatable whether PTS should be used here... http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html */ - - ContentTime const u = time - _pts_offset; - int64_t s = u.seconds() / av_q2d (_format_context->streams[_video_stream]->time_base); - if (_ffmpeg_content->audio_stream ()) { - s = min ( - s, int64_t (u.seconds() / av_q2d (_ffmpeg_content->audio_stream()->stream(_format_context)->time_base)) - ); + optional stream; + + if (_video_stream) { + stream = _video_stream; + } else { + DCPOMATIC_ASSERT (_ffmpeg_content->audio); + auto s = dynamic_pointer_cast(_ffmpeg_content->audio->stream()); + if (s) { + stream = s->index (_format_context); + } + } + + DCPOMATIC_ASSERT (stream); + + auto u = time - _pts_offset; + if (u < ContentTime ()) { + u = ContentTime (); } + av_seek_frame ( + _format_context, + stream.get(), + u.seconds() / av_q2d (_format_context->streams[stream.get()]->time_base), + AVSEEK_FLAG_BACKWARD + ); - av_seek_frame (_format_context, _video_stream, s, 0); + { + /* Force re-creation of filter graphs to reset them and hence to make sure + they don't have any pre-seek frames knocking about. + */ + boost::mutex::scoped_lock lm (_filter_graphs_mutex); + _filter_graphs.clear (); + } - avcodec_flush_buffers (video_codec_context()); - if (audio_codec_context ()) { - avcodec_flush_buffers (audio_codec_context ()); + if (video_codec_context ()) { + avcodec_flush_buffers (video_codec_context()); } + + for (auto i: ffmpeg_content()->ffmpeg_audio_streams()) { + avcodec_flush_buffers (_codec_context[i->index(_format_context)]); + } + if (subtitle_codec_context ()) { avcodec_flush_buffers (subtitle_codec_context ()); } + + _have_current_subtitle = false; + + for (auto& i: _next_time) { + i.second = boost::optional(); + } } -void -FFmpegDecoder::decode_audio_packet () + +shared_ptr +FFmpegDecoder::audio_stream_from_index (int index) const { - /* Audio packets can contain multiple frames, so we may have to call avcodec_decode_audio4 - several times. - */ - - AVPacket copy_packet = _packet; - - while (copy_packet.size > 0) { + /* XXX: inefficient */ + auto streams = ffmpeg_content()->ffmpeg_audio_streams(); + auto stream = streams.begin(); + while (stream != streams.end() && !(*stream)->uses_index(_format_context, index)) { + ++stream; + } - int frame_finished; - int const decode_result = avcodec_decode_audio4 (audio_codec_context(), _frame, &frame_finished, ©_packet); + if (stream == streams.end ()) { + return {}; + } - if (decode_result < 0) { - LOG_ERROR ("avcodec_decode_audio4 failed (%1)", decode_result); - return; + return *stream; +} + + +void +FFmpegDecoder::process_audio_frame (shared_ptr stream) +{ + auto frame = audio_frame (stream); + auto data = deinterleave_audio (frame); + + ContentTime ct; + if (frame->pts == AV_NOPTS_VALUE) { + /* In some streams we see not every frame coming through with a timestamp; for those + that have AV_NOPTS_VALUE we need to work out the timestamp ourselves. This is + particularly noticeable with TrueHD streams (see #1111). + */ + if (_next_time[stream]) { + ct = *_next_time[stream]; } + } else { + ct = ContentTime::from_seconds ( + frame->best_effort_timestamp * + av_q2d (stream->stream(_format_context)->time_base)) + + _pts_offset; + } - if (frame_finished) { - ContentTime const ct = ContentTime::from_seconds ( - av_frame_get_best_effort_timestamp (_frame) * - av_q2d (_ffmpeg_content->audio_stream()->stream (_format_context)->time_base)) - + _pts_offset; - - int const data_size = av_samples_get_buffer_size ( - 0, audio_codec_context()->channels, _frame->nb_samples, audio_sample_format (), 1 - ); + _next_time[stream] = ct + ContentTime::from_frames(data->frames(), stream->frame_rate()); + + if (ct < ContentTime()) { + /* Discard audio data that comes before time 0 */ + auto const remove = min (int64_t(data->frames()), (-ct).frames_ceil(double(stream->frame_rate()))); + data->move (data->frames() - remove, remove, 0); + data->set_frames (data->frames() - remove); + ct += ContentTime::from_frames (remove, stream->frame_rate()); + } - audio (deinterleave_audio (_frame->data, data_size), ct); + if (ct < ContentTime()) { + LOG_WARNING ( + "Crazy timestamp %1 for %2 samples in stream %3 (ts=%4 tb=%5, off=%6)", + to_string(ct), + data->frames(), + stream->id(), + frame->best_effort_timestamp, + av_q2d(stream->stream(_format_context)->time_base), + to_string(_pts_offset) + ); + } + + /* Give this data provided there is some, and its time is sane */ + if (ct >= ContentTime() && data->frames() > 0) { + audio->emit (film(), stream, data, ct); + } +} + + +void +FFmpegDecoder::decode_and_process_audio_packet (AVPacket* packet) +{ + auto stream = audio_stream_from_index (packet->stream_index); + if (!stream) { + return; + } + + auto context = _codec_context[stream->index(_format_context)]; + auto frame = audio_frame (stream); + + int r = avcodec_send_packet (context, packet); + if (r < 0) { + LOG_WARNING("avcodec_send_packet returned %1 for an audio packet", r); + } + while (r >= 0) { + r = avcodec_receive_frame (context, frame); + if (r == AVERROR(EAGAIN)) { + /* More input is required */ + return; } - - copy_packet.data += decode_result; - copy_packet.size -= decode_result; + + /* We choose to be relaxed here about other errors; it seems that there may be valid + * data to decode even if an error occurred. #352 may be related (though this was + * when we were using an old version of the FFmpeg API). + */ + process_audio_frame (stream); } } + bool -FFmpegDecoder::decode_video_packet () +FFmpegDecoder::decode_and_process_video_packet (AVPacket* packet) { - int frame_finished; - if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) { + DCPOMATIC_ASSERT (_video_stream); + + auto context = video_codec_context(); + + int r = avcodec_send_packet (context, packet); + if (r < 0) { + LOG_WARNING("avcodec_send_packet returned %1 for a video packet", r); + } + + r = avcodec_receive_frame (context, _video_frame); + if (r == AVERROR(EAGAIN) || r == AVERROR_EOF || (r < 0 && !packet)) { + /* More input is required, no more frames are coming, or we are flushing and there was + * some error which we just want to ignore. + */ return false; + } else if (r < 0) { + throw DecodeError (N_("avcodec_receive_frame"), N_("FFmpeg::decode_and_process_video_packet"), r); } + /* We assume we'll only get one frame here, which I think is safe */ + boost::mutex::scoped_lock lm (_filter_graphs_mutex); - shared_ptr graph; - - list >::iterator i = _filter_graphs.begin(); - while (i != _filter_graphs.end() && !(*i)->can_process (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)) { + shared_ptr graph; + + auto i = _filter_graphs.begin(); + while (i != _filter_graphs.end() && !(*i)->can_process(dcp::Size(_video_frame->width, _video_frame->height), (AVPixelFormat) _video_frame->format)) { ++i; } if (i == _filter_graphs.end ()) { - graph.reset (new FilterGraph (_ffmpeg_content, dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)); + dcp::Fraction vfr (lrint(_ffmpeg_content->video_frame_rate().get() * 1000), 1000); + graph = make_shared(dcp::Size(_video_frame->width, _video_frame->height), (AVPixelFormat) _video_frame->format, vfr); + graph->setup (_ffmpeg_content->filters ()); _filter_graphs.push_back (graph); - LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format); + LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _video_frame->width, _video_frame->height, _video_frame->format); } else { graph = *i; } - list, int64_t> > images = graph->process (_frame); + auto images = graph->process (_video_frame); + + for (auto const& i: images) { + + auto image = i.first; - for (list, int64_t> >::iterator i = images.begin(); i != images.end(); ++i) { + if (i.second != AV_NOPTS_VALUE) { + double const pts = i.second * av_q2d(_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds(); - shared_ptr image = i->first; - - if (i->second != AV_NOPTS_VALUE) { - double const pts = i->second * av_q2d (_format_context->streams[_video_stream]->time_base) + _pts_offset.seconds (); - video ( - shared_ptr (new RawImageProxy (image, _video_content->film()->log())), - rint (pts * _ffmpeg_content->video_frame_rate ()) + video->emit ( + film(), + make_shared(image), + llrint(pts * _ffmpeg_content->active_video_frame_rate(film())) ); } else { - LOG_WARNING ("Dropping frame without PTS"); + LOG_WARNING_NC ("Dropping frame without PTS"); } } return true; } - + + void -FFmpegDecoder::decode_subtitle_packet () +FFmpegDecoder::decode_and_process_subtitle_packet (AVPacket* packet) { int got_subtitle; AVSubtitle sub; - if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, &_packet) < 0 || !got_subtitle) { + if (avcodec_decode_subtitle2 (subtitle_codec_context(), &sub, &got_subtitle, packet) < 0 || !got_subtitle) { return; } - /* Sometimes we get an empty AVSubtitle, which is used by some codecs to - indicate that the previous subtitle should stop. - */ + auto sub_period = subtitle_period (packet, ffmpeg_content()->subtitle_stream()->stream(_format_context), sub); + + /* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */ + if (_have_current_subtitle) { + if (_current_subtitle_to) { + only_text()->emit_stop (min(*_current_subtitle_to, sub_period.from + _pts_offset)); + } else { + only_text()->emit_stop (sub_period.from + _pts_offset); + } + _have_current_subtitle = false; + } + if (sub.num_rects <= 0) { - image_subtitle (ContentTimePeriod (), shared_ptr (), dcpomatic::Rect ()); + /* Nothing new in this subtitle */ + avsubtitle_free (&sub); return; - } else if (sub.num_rects > 1) { - throw DecodeError (_("multi-part subtitles not yet supported")); } - + /* Subtitle PTS (within the source, not taking into account any of the - source that we may have chopped off for the DCP) + source that we may have chopped off for the DCP). */ - ContentTimePeriod period = subtitle_period (sub) + _pts_offset; + ContentTime from; + from = sub_period.from + _pts_offset; + if (sub_period.to) { + _current_subtitle_to = *sub_period.to + _pts_offset; + } else { + _current_subtitle_to = optional(); + _have_current_subtitle = true; + } - AVSubtitleRect const * rect = sub.rects[0]; + for (unsigned int i = 0; i < sub.num_rects; ++i) { + auto const rect = sub.rects[i]; + + switch (rect->type) { + case SUBTITLE_NONE: + break; + case SUBTITLE_BITMAP: + process_bitmap_subtitle (rect, from); + break; + case SUBTITLE_TEXT: + cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n"; + break; + case SUBTITLE_ASS: + process_ass_subtitle (rect->ass, from); + break; + } + } - if (rect->type != SUBTITLE_BITMAP) { - /* XXX */ - // throw DecodeError (_("non-bitmap subtitles not yet supported")); - return; + if (_current_subtitle_to) { + only_text()->emit_stop (*_current_subtitle_to); } - /* Note RGBA is expressed little-endian, so the first byte in the word is R, second - G, third B, fourth A. + avsubtitle_free (&sub); +} + + +void +FFmpegDecoder::process_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime from) +{ + /* Note BGRA is expressed little-endian, so the first byte in the word is B, second + G, third R, fourth A. */ - shared_ptr image (new Image (PIX_FMT_RGBA, dcp::Size (rect->w, rect->h), true)); + auto image = make_shared(AV_PIX_FMT_BGRA, dcp::Size (rect->w, rect->h), Image::Alignment::PADDED); +#ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT /* Start of the first line in the subtitle */ - uint8_t* sub_p = rect->pict.data[0]; - /* sub_p looks up into a BGRA palette which is here + auto sub_p = rect->pict.data[0]; + /* sub_p looks up into a BGRA palette which is at rect->pict.data[1]; (i.e. first byte B, second G, third R, fourth A) */ - uint32_t const * palette = (uint32_t *) rect->pict.data[1]; + auto const palette = rect->pict.data[1]; +#else + /* Start of the first line in the subtitle */ + auto sub_p = rect->data[0]; + /* sub_p looks up into a BGRA palette which is at rect->data[1]. + (first byte B, second G, third R, fourth A) + */ + auto const* palette = rect->data[1]; +#endif + /* And the stream has a map of those palette colours to colours + chosen by the user; created a `mapped' palette from those settings. + */ + auto colour_map = ffmpeg_content()->subtitle_stream()->colours(); + vector mapped_palette (rect->nb_colors); + for (int i = 0; i < rect->nb_colors; ++i) { + RGBA c (palette[2], palette[1], palette[0], palette[3]); + auto j = colour_map.find (c); + if (j != colour_map.end ()) { + mapped_palette[i] = j->second; + } else { + /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because + it is from a project that was created before this stuff was added. Just use the + colour straight from the original palette. + */ + mapped_palette[i] = c; + } + palette += 4; + } + /* Start of the output data */ - uint32_t* out_p = (uint32_t *) image->data()[0]; + auto out_p = image->data()[0]; for (int y = 0; y < rect->h; ++y) { - uint8_t* sub_line_p = sub_p; - uint32_t* out_line_p = out_p; + auto sub_line_p = sub_p; + auto out_line_p = out_p; for (int x = 0; x < rect->w; ++x) { - uint32_t const p = palette[*sub_line_p++]; - *out_line_p++ = ((p & 0xff) << 16) | (p & 0xff00) | ((p & 0xff0000) >> 16) | (p & 0xff000000); + auto const p = mapped_palette[*sub_line_p++]; + *out_line_p++ = p.b; + *out_line_p++ = p.g; + *out_line_p++ = p.r; + *out_line_p++ = p.a; } +#ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT sub_p += rect->pict.linesize[0]; - out_p += image->stride()[0] / sizeof (uint32_t); +#else + sub_p += rect->linesize[0]; +#endif + out_p += image->stride()[0]; } - dcp::Size const vs = _ffmpeg_content->video_size (); - - image_subtitle ( - period, - image, - dcpomatic::Rect ( - static_cast (rect->x) / vs.width, - static_cast (rect->y) / vs.height, - static_cast (rect->w) / vs.width, - static_cast (rect->h) / vs.height - ) + int target_width = subtitle_codec_context()->width; + if (target_width == 0 && video_codec_context()) { + /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't + know if it's supposed to mean something from FFmpeg's point of view. + */ + target_width = video_codec_context()->width; + } + int target_height = subtitle_codec_context()->height; + if (target_height == 0 && video_codec_context()) { + target_height = video_codec_context()->height; + } + DCPOMATIC_ASSERT (target_width); + DCPOMATIC_ASSERT (target_height); + dcpomatic::Rect const scaled_rect ( + static_cast(rect->x) / target_width, + static_cast(rect->y) / target_height, + static_cast(rect->w) / target_width, + static_cast(rect->h) / target_height ); - - avsubtitle_free (&sub); + + only_text()->emit_bitmap_start (from, image, scaled_rect); } -list -FFmpegDecoder::subtitles_during (ContentTimePeriod p, bool starting) const + +void +FFmpegDecoder::process_ass_subtitle (string ass, ContentTime from) { - return _ffmpeg_content->subtitles_during (p, starting); + /* We have no styles and no Format: line, so I'm assuming that FFmpeg + produces a single format of Dialogue: lines... + */ + + int commas = 0; + string text; + for (size_t i = 0; i < ass.length(); ++i) { + if (commas < 9 && ass[i] == ',') { + ++commas; + } else if (commas == 9) { + text += ass[i]; + } + } + + if (text.empty ()) { + return; + } + + sub::RawSubtitle base; + auto raw = sub::SSAReader::parse_line ( + base, + text, + _ffmpeg_content->video->size().width, + _ffmpeg_content->video->size().height + ); + + for (auto const& i: sub::collect>(raw)) { + only_text()->emit_plain_start (from, i); + } }