X-Git-Url: https://main.carlh.net/gitweb/?p=dcpomatic.git;a=blobdiff_plain;f=src%2Flib%2Fffmpeg_decoder.cc;h=ae4ab237f344ce8c8fd39613d0589b21475dbec6;hp=ea41acf23fc9be3e969ac2c9fc9d86859bd9ad8a;hb=dd9be86db6cde0afa5da0d1d1ac43b42e05dca26;hpb=7346d89cebb30eb593e5d806bc9296d06532e128 diff --git a/src/lib/ffmpeg_decoder.cc b/src/lib/ffmpeg_decoder.cc index ea41acf23..ae4ab237f 100644 --- a/src/lib/ffmpeg_decoder.cc +++ b/src/lib/ffmpeg_decoder.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2012-2016 Carl Hetherington + Copyright (C) 2012-2018 Carl Hetherington This file is part of DCP-o-matic. @@ -27,8 +27,9 @@ #include "image.h" #include "util.h" #include "log.h" +#include "dcpomatic_log.h" #include "ffmpeg_decoder.h" -#include "subtitle_decoder.h" +#include "text_decoder.h" #include "ffmpeg_audio_stream.h" #include "ffmpeg_subtitle_stream.h" #include "video_filter_graph.h" @@ -39,8 +40,9 @@ #include "film.h" #include "audio_decoder.h" #include "compose.hpp" -#include "subtitle_content.h" +#include "text_content.h" #include "audio_content.h" +#include "frame_interval_checker.h" #include #include #include @@ -58,11 +60,6 @@ extern "C" { #include "i18n.h" -#define LOG_GENERAL(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL); -#define LOG_ERROR(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_ERROR); -#define LOG_WARNING_NC(...) _log->log (__VA_ARGS__, LogEntry::TYPE_WARNING); -#define LOG_WARNING(...) _log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_WARNING); - using std::cout; using std::string; using std::vector; @@ -71,21 +68,22 @@ using std::min; using std::pair; using std::max; using std::map; -using boost::shared_ptr; +using std::shared_ptr; using boost::is_any_of; using boost::split; using boost::optional; -using boost::dynamic_pointer_cast; +using std::dynamic_pointer_cast; using dcp::Size; +using namespace dcpomatic; -FFmpegDecoder::FFmpegDecoder (shared_ptr c, shared_ptr log, bool fast) +FFmpegDecoder::FFmpegDecoder (shared_ptr film, shared_ptr c, bool fast) : FFmpeg (c) - , _log (log) + , Decoder (film) , _have_current_subtitle (false) { - if (c->video) { - video.reset (new VideoDecoder (this, c, log)); - _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate()); + if (c->video && c->video->use()) { + video.reset (new VideoDecoder (this, c)); + _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film)); /* It doesn't matter what size or pixel format this is, it just needs to be black */ _black_image.reset (new Image (AV_PIX_FMT_RGB24, dcp::Size (128, 128), true)); _black_image->make_black (); @@ -94,12 +92,12 @@ FFmpegDecoder::FFmpegDecoder (shared_ptr c, shared_ptr } if (c->audio) { - audio.reset (new AudioDecoder (this, c->audio, log, fast)); + audio.reset (new AudioDecoder (this, c->audio, fast)); } - if (c->subtitle) { + if (c->only_text()) { /* XXX: this time here should be the time of the first subtitle, not 0 */ - subtitle.reset (new SubtitleDecoder (this, c->subtitle, log, ContentTime())); + text.push_back (shared_ptr (new TextDecoder (this, c->only_text(), ContentTime()))); } _next_time.resize (_format_context->nb_streams); @@ -115,7 +113,7 @@ FFmpegDecoder::flush () /* XXX: should we reset _packet.data and size after each *_decode_* call? */ - while (video && decode_video_packet ()) {} + while (video && decode_video_packet()) {} if (audio) { decode_audio_packet (); @@ -123,21 +121,21 @@ FFmpegDecoder::flush () /* Make sure all streams are the same length and round up to the next video frame */ - FrameRateChange const frc = _ffmpeg_content->film()->active_frame_rate_change(_ffmpeg_content->position()); - ContentTime full_length (_ffmpeg_content->full_length(), frc); + FrameRateChange const frc = film()->active_frame_rate_change(_ffmpeg_content->position()); + ContentTime full_length (_ffmpeg_content->full_length(film()), frc); full_length = full_length.ceil (frc.source); if (video) { double const vfr = _ffmpeg_content->video_frame_rate().get(); Frame const f = full_length.frames_round (vfr); - Frame v = video->position().frames_round (vfr) + 1; + Frame v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1; while (v < f) { - video->emit (shared_ptr (new RawImageProxy (_black_image)), v); + video->emit (film(), shared_ptr (new RawImageProxy (_black_image)), v); ++v; } } BOOST_FOREACH (shared_ptr i, _ffmpeg_content->ffmpeg_audio_streams ()) { - ContentTime a = audio->stream_position(i); + ContentTime a = audio->stream_position(film(), i); /* Unfortunately if a is 0 that really means that we don't know the stream position since there has been no data on it since the last seek. In this case we'll just do nothing here. I'm not sure if that's the right idea. @@ -147,7 +145,7 @@ FFmpegDecoder::flush () ContentTime to_do = min (full_length - a, ContentTime::from_seconds (0.1)); shared_ptr silence (new AudioBuffers (i->channels(), to_do.frames_ceil (i->frame_rate()))); silence->make_silent (); - audio->emit (i, silence, a); + audio->emit (film(), i, silence, a, true); a += to_do; } } @@ -182,9 +180,9 @@ FFmpegDecoder::pass () int const si = _packet.stream_index; shared_ptr fc = _ffmpeg_content; - if (_video_stream && si == _video_stream.get() && !video->ignore()) { + if (_video_stream && si == _video_stream.get() && video && !video->ignore()) { decode_video_packet (); - } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !subtitle->ignore()) { + } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) { decode_subtitle_packet (); } else { decode_audio_packet (); @@ -202,9 +200,14 @@ FFmpegDecoder::deinterleave_audio (shared_ptr stream) const { DCPOMATIC_ASSERT (bytes_per_audio_sample (stream)); +DCPOMATIC_DISABLE_WARNINGS int const size = av_samples_get_buffer_size ( 0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1 ); +DCPOMATIC_ENABLE_WARNINGS + + /* XXX: can't we just use _frame->nb_samples directly here? */ + /* XXX: can't we use swr_convert() to do the format conversion? */ /* Deinterleave and convert to float */ @@ -311,6 +314,7 @@ FFmpegDecoder::deinterleave_audio (shared_ptr stream) const case AV_SAMPLE_FMT_FLTP: { float** p = reinterpret_cast (_frame->data); + DCPOMATIC_ASSERT (_frame->channels <= channels); /* Sometimes there aren't as many channels in the _frame as in the stream */ for (int i = 0; i < _frame->channels; ++i) { memcpy (data[i], p[i], frames * sizeof(float)); @@ -331,7 +335,9 @@ FFmpegDecoder::deinterleave_audio (shared_ptr stream) const AVSampleFormat FFmpegDecoder::audio_sample_format (shared_ptr stream) const { +DCPOMATIC_DISABLE_WARNINGS return stream->stream (_format_context)->codec->sample_fmt; +DCPOMATIC_ENABLE_WARNINGS } int @@ -361,6 +367,7 @@ FFmpegDecoder::seek (ContentTime time, bool accurate) if (_video_stream) { stream = _video_stream; } else { + DCPOMATIC_ASSERT (_ffmpeg_content->audio); shared_ptr s = dynamic_pointer_cast (_ffmpeg_content->audio->stream ()); if (s) { stream = s->index (_format_context); @@ -380,19 +387,33 @@ FFmpegDecoder::seek (ContentTime time, bool accurate) AVSEEK_FLAG_BACKWARD ); + { + /* Force re-creation of filter graphs to reset them and hence to make sure + they don't have any pre-seek frames knocking about. + */ + boost::mutex::scoped_lock lm (_filter_graphs_mutex); + _filter_graphs.clear (); + } + if (video_codec_context ()) { avcodec_flush_buffers (video_codec_context()); } +DCPOMATIC_DISABLE_WARNINGS BOOST_FOREACH (shared_ptr i, ffmpeg_content()->ffmpeg_audio_streams()) { avcodec_flush_buffers (i->stream(_format_context)->codec); } +DCPOMATIC_ENABLE_WARNINGS if (subtitle_codec_context ()) { avcodec_flush_buffers (subtitle_codec_context ()); } _have_current_subtitle = false; + + BOOST_FOREACH (optional& i, _next_time) { + i = optional(); + } } void @@ -417,6 +438,7 @@ FFmpegDecoder::decode_audio_packet () return; } +DCPOMATIC_DISABLE_WARNINGS while (copy_packet.size > 0) { int frame_finished; @@ -441,12 +463,14 @@ FFmpegDecoder::decode_audio_packet () shared_ptr data = deinterleave_audio (*stream); ContentTime ct; - if (_frame->pts == AV_NOPTS_VALUE && _next_time[stream_index]) { + if (_frame->pts == AV_NOPTS_VALUE) { /* In some streams we see not every frame coming through with a timestamp; for those that have AV_NOPTS_VALUE we need to work out the timestamp ourselves. This is particularly noticeable with TrueHD streams (see #1111). */ - ct = *_next_time[stream_index]; + if (_next_time[stream_index]) { + ct = *_next_time[stream_index]; + } } else { ct = ContentTime::from_seconds ( av_frame_get_best_effort_timestamp (_frame) * @@ -476,10 +500,11 @@ FFmpegDecoder::decode_audio_packet () to_string(_pts_offset) ); } +DCPOMATIC_ENABLE_WARNINGS /* Give this data provided there is some, and its time is sane */ if (ct >= ContentTime() && data->frames() > 0) { - audio->emit (*stream, data, ct); + audio->emit (film(), *stream, data, ct); } } @@ -494,9 +519,11 @@ FFmpegDecoder::decode_video_packet () DCPOMATIC_ASSERT (_video_stream); int frame_finished; +DCPOMATIC_DISABLE_WARNINGS if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) { return false; } +DCPOMATIC_ENABLE_WARNINGS boost::mutex::scoped_lock lm (_filter_graphs_mutex); @@ -508,7 +535,8 @@ FFmpegDecoder::decode_video_packet () } if (i == _filter_graphs.end ()) { - graph.reset (new VideoFilterGraph (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format)); + dcp::Fraction vfr (lrint(_ffmpeg_content->video_frame_rate().get() * 1000), 1000); + graph.reset (new VideoFilterGraph (dcp::Size (_frame->width, _frame->height), (AVPixelFormat) _frame->format, vfr)); graph->setup (_ffmpeg_content->filters ()); _filter_graphs.push_back (graph); LOG_GENERAL (N_("New graph for %1x%2, pixel format %3"), _frame->width, _frame->height, _frame->format); @@ -524,9 +552,11 @@ FFmpegDecoder::decode_video_packet () if (i->second != AV_NOPTS_VALUE) { double const pts = i->second * av_q2d (_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds (); + video->emit ( + film(), shared_ptr (new RawImageProxy (image)), - llrint (pts * _ffmpeg_content->active_video_frame_rate ()) + llrint(pts * _ffmpeg_content->active_video_frame_rate(film())) ); } else { LOG_WARNING_NC ("Dropping frame without PTS"); @@ -548,9 +578,9 @@ FFmpegDecoder::decode_subtitle_packet () /* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */ if (_have_current_subtitle) { if (_current_subtitle_to) { - subtitle->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset)); + only_text()->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset)); } else { - subtitle->emit_stop (subtitle_period(sub).from + _pts_offset); + only_text()->emit_stop (subtitle_period(sub).from + _pts_offset); } _have_current_subtitle = false; } @@ -566,11 +596,11 @@ FFmpegDecoder::decode_subtitle_packet () FFmpegSubtitlePeriod sub_period = subtitle_period (sub); ContentTime from; from = sub_period.from + _pts_offset; - _have_current_subtitle = true; if (sub_period.to) { _current_subtitle_to = *sub_period.to + _pts_offset; } else { _current_subtitle_to = optional(); + _have_current_subtitle = true; } for (unsigned int i = 0; i < sub.num_rects; ++i) { @@ -592,7 +622,7 @@ FFmpegDecoder::decode_subtitle_packet () } if (_current_subtitle_to) { - subtitle->emit_stop (*_current_subtitle_to); + only_text()->emit_stop (*_current_subtitle_to); } avsubtitle_free (&sub); @@ -609,17 +639,17 @@ FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT /* Start of the first line in the subtitle */ uint8_t* sub_p = rect->pict.data[0]; - /* sub_p looks up into a BGRA palette which is here + /* sub_p looks up into a BGRA palette which is at rect->pict.data[1]; (i.e. first byte B, second G, third R, fourth A) */ - uint32_t const * palette = (uint32_t *) rect->pict.data[1]; + uint8_t const * palette = rect->pict.data[1]; #else /* Start of the first line in the subtitle */ uint8_t* sub_p = rect->data[0]; - /* sub_p looks up into a BGRA palette which is here - (i.e. first byte B, second G, third R, fourth A) + /* sub_p looks up into a BGRA palette which is at rect->data[1]. + (first byte B, second G, third R, fourth A) */ - uint32_t const * palette = (uint32_t *) rect->data[1]; + uint8_t const * palette = rect->data[1]; #endif /* And the stream has a map of those palette colours to colours chosen by the user; created a `mapped' palette from those settings. @@ -627,7 +657,7 @@ FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime map colour_map = ffmpeg_content()->subtitle_stream()->colours (); vector mapped_palette (rect->nb_colors); for (int i = 0; i < rect->nb_colors; ++i) { - RGBA c ((palette[i] & 0xff0000) >> 16, (palette[i] & 0xff00) >> 8, palette[i] & 0xff, (palette[i] & 0xff000000) >> 24); + RGBA c (palette[2], palette[1], palette[0], palette[3]); map::const_iterator j = colour_map.find (c); if (j != colour_map.end ()) { mapped_palette[i] = j->second; @@ -638,29 +668,43 @@ FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime */ mapped_palette[i] = c; } + palette += 4; } /* Start of the output data */ - uint32_t* out_p = (uint32_t *) image->data()[0]; + uint8_t* out_p = image->data()[0]; for (int y = 0; y < rect->h; ++y) { uint8_t* sub_line_p = sub_p; - uint32_t* out_line_p = out_p; + uint8_t* out_line_p = out_p; for (int x = 0; x < rect->w; ++x) { RGBA const p = mapped_palette[*sub_line_p++]; - /* XXX: this seems to be wrong to me (isn't the output image BGRA?) but it looks right on screen */ - *out_line_p++ = (p.a << 24) | (p.b << 16) | (p.g << 8) | p.r; + *out_line_p++ = p.b; + *out_line_p++ = p.g; + *out_line_p++ = p.r; + *out_line_p++ = p.a; } #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT sub_p += rect->pict.linesize[0]; #else sub_p += rect->linesize[0]; #endif - out_p += image->stride()[0] / sizeof (uint32_t); + out_p += image->stride()[0]; } - int const target_width = subtitle_codec_context()->width; - int const target_height = subtitle_codec_context()->height; + int target_width = subtitle_codec_context()->width; + if (target_width == 0 && video_codec_context()) { + /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't + know if it's supposed to mean something from FFmpeg's point of view. + */ + target_width = video_codec_context()->width; + } + int target_height = subtitle_codec_context()->height; + if (target_height == 0 && video_codec_context()) { + target_height = video_codec_context()->height; + } + DCPOMATIC_ASSERT (target_width); + DCPOMATIC_ASSERT (target_height); dcpomatic::Rect const scaled_rect ( static_cast (rect->x) / target_width, static_cast (rect->y) / target_height, @@ -668,7 +712,7 @@ FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime static_cast (rect->h) / target_height ); - subtitle->emit_image_start (from, image, scaled_rect); + only_text()->emit_bitmap_start (from, image, scaled_rect); } void @@ -678,21 +722,29 @@ FFmpegDecoder::decode_ass_subtitle (string ass, ContentTime from) produces a single format of Dialogue: lines... */ - vector bits; - split (bits, ass, is_any_of (",")); - if (bits.size() < 10) { + int commas = 0; + string text; + for (size_t i = 0; i < ass.length(); ++i) { + if (commas < 9 && ass[i] == ',') { + ++commas; + } else if (commas == 9) { + text += ass[i]; + } + } + + if (text.empty ()) { return; } sub::RawSubtitle base; list raw = sub::SSAReader::parse_line ( base, - bits[9], + text, _ffmpeg_content->video->size().width, _ffmpeg_content->video->size().height ); BOOST_FOREACH (sub::Subtitle const & i, sub::collect > (raw)) { - subtitle->emit_text_start (from, i); + only_text()->emit_plain_start (from, i); } }