X-Git-Url: https://main.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fffmpeg_decoder.cc;h=af309cdbe4aa99715eca949e88b1cb934f3699a5;hb=a8364241532c0c4b064c30d6151f1a248a27e467;hp=a1d90b2ba99af04368b056bd84cede08a9790099;hpb=54d17e98a597334bf1ba2615e3eb6191088f606f;p=dcpomatic.git diff --git a/src/lib/ffmpeg_decoder.cc b/src/lib/ffmpeg_decoder.cc index a1d90b2ba..af309cdbe 100644 --- a/src/lib/ffmpeg_decoder.cc +++ b/src/lib/ffmpeg_decoder.cc @@ -1,19 +1,20 @@ /* Copyright (C) 2012-2016 Carl Hetherington - This program is free software; you can redistribute it and/or modify + This file is part of DCP-o-matic. + + DCP-o-matic is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - This program is distributed in the hope that it will be useful, + DCP-o-matic is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + along with DCP-o-matic. If not, see . */ @@ -27,15 +28,19 @@ #include "util.h" #include "log.h" #include "ffmpeg_decoder.h" +#include "subtitle_decoder.h" #include "ffmpeg_audio_stream.h" #include "ffmpeg_subtitle_stream.h" #include "video_filter_graph.h" #include "audio_buffers.h" #include "ffmpeg_content.h" #include "raw_image_proxy.h" +#include "video_decoder.h" #include "film.h" -#include "md5_digester.h" +#include "audio_decoder.h" #include "compose.hpp" +#include "subtitle_content.h" +#include "audio_content.h" #include #include #include @@ -65,20 +70,36 @@ using std::list; using std::min; using std::pair; using std::max; +using std::map; using boost::shared_ptr; using boost::is_any_of; using boost::split; +using boost::optional; +using boost::dynamic_pointer_cast; using dcp::Size; -FFmpegDecoder::FFmpegDecoder (shared_ptr c, shared_ptr log, bool fast) - : VideoDecoder (c) - , AudioDecoder (c, fast) - , SubtitleDecoder (c) - , FFmpeg (c) +FFmpegDecoder::FFmpegDecoder (shared_ptr c, shared_ptr log) + : FFmpeg (c) , _log (log) - , _pts_offset (pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->video_frame_rate())) + , _have_current_subtitle (false) { + if (c->video) { + video.reset (new VideoDecoder (this, c, log)); + _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate()); + /* It doesn't matter what size or pixel format this is, it just needs to be black */ + _black_image.reset (new Image (AV_PIX_FMT_RGB24, dcp::Size (128, 128), true)); + _black_image->make_black (); + } else { + _pts_offset = ContentTime (); + } + + if (c->audio) { + audio.reset (new AudioDecoder (this, c->audio, log)); + } + if (c->subtitle) { + subtitle.reset (new SubtitleDecoder (this, c->subtitle, log)); + } } void @@ -91,14 +112,45 @@ FFmpegDecoder::flush () /* XXX: should we reset _packet.data and size after each *_decode_* call? */ - while (decode_video_packet ()) {} + while (video && decode_video_packet ()) {} + + if (audio) { + decode_audio_packet (); + } - decode_audio_packet (); - AudioDecoder::flush (); + /* Make sure all streams are the same length and round up to the next video frame */ + + FrameRateChange const frc = _ffmpeg_content->film()->active_frame_rate_change(_ffmpeg_content->position()); + ContentTime full_length (_ffmpeg_content->full_length(), frc); + full_length = full_length.ceil (frc.source); + if (video) { + double const vfr = _ffmpeg_content->video_frame_rate().get(); + Frame const f = full_length.frames_round (vfr); + Frame v = video->position().frames_round (vfr); + while (v < f) { + video->emit (shared_ptr (new RawImageProxy (_black_image)), v); + ++v; + } + } + + BOOST_FOREACH (shared_ptr i, _ffmpeg_content->ffmpeg_audio_streams ()) { + ContentTime a = audio->stream_position(i); + while (a < full_length) { + ContentTime to_do = min (full_length - a, ContentTime::from_seconds (0.1)); + shared_ptr silence (new AudioBuffers (i->channels(), to_do.frames_ceil (i->frame_rate()))); + silence->make_silent (); + audio->emit (i, silence, a); + a += to_do; + } + } + + if (audio) { + audio->flush (); + } } bool -FFmpegDecoder::pass (PassReason reason, bool accurate) +FFmpegDecoder::pass () { int r = av_read_frame (_format_context, &_packet); @@ -111,7 +163,7 @@ FFmpegDecoder::pass (PassReason reason, bool accurate) /* Maybe we should fail here, but for now we'll just finish off instead */ char buf[256]; av_strerror (r, buf, sizeof(buf)); - LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), buf, r); + LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), &buf[0], r); } flush (); @@ -121,15 +173,15 @@ FFmpegDecoder::pass (PassReason reason, bool accurate) int const si = _packet.stream_index; shared_ptr fc = _ffmpeg_content; - if (si == _video_stream && !_ignore_video && (accurate || reason != PASS_REASON_SUBTITLE)) { + if (_video_stream && si == _video_stream.get() && !video->ignore()) { decode_video_packet (); - } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index (_format_context, si)) { + } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !subtitle->ignore()) { decode_subtitle_packet (); - } else if (accurate || reason != PASS_REASON_SUBTITLE) { + } else { decode_audio_packet (); } - av_free_packet (&_packet); + av_packet_unref (&_packet); return false; } @@ -206,7 +258,7 @@ FFmpegDecoder::deinterleave_audio (shared_ptr stream) const int sample = 0; int channel = 0; for (int i = 0; i < total_samples; ++i) { - audio->data(channel)[sample] = static_cast(*p++) / (1 << 31); + audio->data(channel)[sample] = static_cast(*p++) / 2147483648; ++channel; if (channel == stream->channels()) { @@ -217,6 +269,17 @@ FFmpegDecoder::deinterleave_audio (shared_ptr stream) const } break; + case AV_SAMPLE_FMT_S32P: + { + int32_t** p = reinterpret_cast (_frame->data); + for (int i = 0; i < stream->channels(); ++i) { + for (int j = 0; j < frames; ++j) { + audio->data(i)[j] = static_cast(p[i][j]) / 2147483648; + } + } + } + break; + case AV_SAMPLE_FMT_FLT: { float* p = reinterpret_cast (_frame->data[0]); @@ -269,9 +332,7 @@ FFmpegDecoder::bytes_per_audio_sample (shared_ptr stream) con void FFmpegDecoder::seek (ContentTime time, bool accurate) { - VideoDecoder::seek (time, accurate); - AudioDecoder::seek (time, accurate); - SubtitleDecoder::seek (time, accurate); + Decoder::seek (time, accurate); /* If we are doing an `accurate' seek, we need to use pre-roll, as we don't really know what the seek will give us. @@ -284,19 +345,43 @@ FFmpegDecoder::seek (ContentTime time, bool accurate) http://www.mjbshaw.com/2012/04/seeking-in-ffmpeg-know-your-timestamp.html */ + optional stream; + + if (_video_stream) { + stream = _video_stream; + } else { + shared_ptr s = dynamic_pointer_cast (_ffmpeg_content->audio->stream ()); + if (s) { + stream = s->index (_format_context); + } + } + + DCPOMATIC_ASSERT (stream); + ContentTime u = time - _pts_offset; if (u < ContentTime ()) { u = ContentTime (); } - av_seek_frame (_format_context, _video_stream, u.seconds() / av_q2d (_format_context->streams[_video_stream]->time_base), AVSEEK_FLAG_BACKWARD); + av_seek_frame ( + _format_context, + stream.get(), + u.seconds() / av_q2d (_format_context->streams[stream.get()]->time_base), + AVSEEK_FLAG_BACKWARD + ); - avcodec_flush_buffers (video_codec_context()); + if (video_codec_context ()) { + avcodec_flush_buffers (video_codec_context()); + } - /* XXX: should be flushing audio buffers? */ + BOOST_FOREACH (shared_ptr i, ffmpeg_content()->ffmpeg_audio_streams()) { + avcodec_flush_buffers (i->stream(_format_context)->codec); + } if (subtitle_codec_context ()) { avcodec_flush_buffers (subtitle_codec_context ()); } + + _have_current_subtitle = false; } void @@ -327,7 +412,7 @@ FFmpegDecoder::decode_audio_packet () if (decode_result < 0) { /* avcodec_decode_audio4 can sometimes return an error even though it has decoded some valid data; for example dca_subframe_footer can return AVERROR_INVALIDDATA - if it overreads the auxiliary data. ffplay carries on if frame_finished is true, + if it overreads the auxiliary data. ffplay carries on if frame_finished is true, even in the face of such an error, so I think we should too. Returning from the method here caused mantis #352. @@ -351,13 +436,18 @@ FFmpegDecoder::decode_audio_packet () if (ct < ContentTime ()) { /* Discard audio data that comes before time 0 */ Frame const remove = min (int64_t (data->frames()), (-ct).frames_ceil(double((*stream)->frame_rate ()))); - data->move (remove, 0, data->frames() - remove); + data->move (data->frames() - remove, remove, 0); data->set_frames (data->frames() - remove); ct += ContentTime::from_frames (remove, (*stream)->frame_rate ()); } - if (data->frames() > 0) { - audio (*stream, data, ct); + if (ct < ContentTime()) { + LOG_WARNING ("Crazy timestamp %1", to_string (ct)); + } + + /* Give this data provided there is some, and its time is sane */ + if (ct >= ContentTime() && data->frames() > 0) { + audio->emit (*stream, data, ct); } } @@ -369,6 +459,8 @@ FFmpegDecoder::decode_audio_packet () bool FFmpegDecoder::decode_video_packet () { + DCPOMATIC_ASSERT (_video_stream); + int frame_finished; if (avcodec_decode_video2 (video_codec_context(), _frame, &frame_finished, &_packet) < 0 || !frame_finished) { return false; @@ -399,10 +491,10 @@ FFmpegDecoder::decode_video_packet () shared_ptr image = i->first; if (i->second != AV_NOPTS_VALUE) { - double const pts = i->second * av_q2d (_format_context->streams[_video_stream]->time_base) + _pts_offset.seconds (); - video ( + double const pts = i->second * av_q2d (_format_context->streams[_video_stream.get()]->time_base) + _pts_offset.seconds (); + video->emit ( shared_ptr (new RawImageProxy (image)), - llrint (pts * _ffmpeg_content->video_frame_rate ()) + llrint (pts * _ffmpeg_content->active_video_frame_rate ()) ); } else { LOG_WARNING_NC ("Dropping frame without PTS"); @@ -421,10 +513,18 @@ FFmpegDecoder::decode_subtitle_packet () return; } + /* Stop any current subtitle, either at the time it was supposed to stop, or now if now is sooner */ + if (_have_current_subtitle) { + if (_current_subtitle_to) { + subtitle->emit_stop (min(*_current_subtitle_to, subtitle_period(sub).from + _pts_offset)); + } else { + subtitle->emit_stop (subtitle_period(sub).from + _pts_offset); + } + _have_current_subtitle = false; + } + if (sub.num_rects <= 0) { - /* Sometimes we get an empty AVSubtitle, which is used by some codecs to - indicate that the previous subtitle should stop. We can ignore it here. - */ + /* Nothing new in this subtitle */ return; } @@ -432,14 +532,11 @@ FFmpegDecoder::decode_subtitle_packet () source that we may have chopped off for the DCP). */ FFmpegSubtitlePeriod sub_period = subtitle_period (sub); - ContentTimePeriod period; - period.from = sub_period.from + _pts_offset; + ContentTime from; + from = sub_period.from + _pts_offset; + _have_current_subtitle = true; if (sub_period.to) { - /* We already know the subtitle period `to' time */ - period.to = sub_period.to.get() + _pts_offset; - } else { - /* We have to look up the `to' time in the stream's records */ - period.to = ffmpeg_content()->subtitle_stream()->find_subtitle_to (subtitle_id (sub)); + _current_subtitle_to = *sub_period.to + _pts_offset; } for (unsigned int i = 0; i < sub.num_rects; ++i) { @@ -449,13 +546,13 @@ FFmpegDecoder::decode_subtitle_packet () case SUBTITLE_NONE: break; case SUBTITLE_BITMAP: - decode_bitmap_subtitle (rect, period); + decode_bitmap_subtitle (rect, from); break; case SUBTITLE_TEXT: cout << "XXX: SUBTITLE_TEXT " << rect->text << "\n"; break; case SUBTITLE_ASS: - decode_ass_subtitle (rect->ass, period); + decode_ass_subtitle (rect->ass, from); break; } } @@ -463,32 +560,48 @@ FFmpegDecoder::decode_subtitle_packet () avsubtitle_free (&sub); } -list -FFmpegDecoder::image_subtitles_during (ContentTimePeriod p, bool starting) const -{ - return _ffmpeg_content->image_subtitles_during (p, starting); -} - -list -FFmpegDecoder::text_subtitles_during (ContentTimePeriod p, bool starting) const -{ - return _ffmpeg_content->text_subtitles_during (p, starting); -} - void -FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTimePeriod period) +FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime from) { /* Note RGBA is expressed little-endian, so the first byte in the word is R, second G, third B, fourth A. */ shared_ptr image (new Image (AV_PIX_FMT_RGBA, dcp::Size (rect->w, rect->h), true)); +#ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT /* Start of the first line in the subtitle */ uint8_t* sub_p = rect->pict.data[0]; /* sub_p looks up into a BGRA palette which is here (i.e. first byte B, second G, third R, fourth A) */ uint32_t const * palette = (uint32_t *) rect->pict.data[1]; +#else + /* Start of the first line in the subtitle */ + uint8_t* sub_p = rect->data[0]; + /* sub_p looks up into a BGRA palette which is here + (i.e. first byte B, second G, third R, fourth A) + */ + uint32_t const * palette = (uint32_t *) rect->data[1]; +#endif + /* And the stream has a map of those palette colours to colours + chosen by the user; created a `mapped' palette from those settings. + */ + map colour_map = ffmpeg_content()->subtitle_stream()->colours (); + vector mapped_palette (rect->nb_colors); + for (int i = 0; i < rect->nb_colors; ++i) { + RGBA c ((palette[i] & 0xff0000) >> 16, (palette[i] & 0xff00) >> 8, palette[i] & 0xff, (palette[i] & 0xff000000) >> 24); + map::const_iterator j = colour_map.find (c); + if (j != colour_map.end ()) { + mapped_palette[i] = j->second; + } else { + /* This colour was not found in the FFmpegSubtitleStream's colour map; probably because + it is from a project that was created before this stuff was added. Just use the + colour straight from the original palette. + */ + mapped_palette[i] = c; + } + } + /* Start of the output data */ uint32_t* out_p = (uint32_t *) image->data()[0]; @@ -496,26 +609,32 @@ FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTimeP uint8_t* sub_line_p = sub_p; uint32_t* out_line_p = out_p; for (int x = 0; x < rect->w; ++x) { - uint32_t const p = palette[*sub_line_p++]; - *out_line_p++ = ((p & 0xff) << 16) | (p & 0xff00) | ((p & 0xff0000) >> 16) | (p & 0xff000000); + RGBA const p = mapped_palette[*sub_line_p++]; + /* XXX: this seems to be wrong to me (isn't the output image RGBA?) but it looks right on screen */ + *out_line_p++ = (p.a << 24) | (p.r << 16) | (p.g << 8) | p.b; } +#ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT sub_p += rect->pict.linesize[0]; +#else + sub_p += rect->linesize[0]; +#endif out_p += image->stride()[0] / sizeof (uint32_t); } - dcp::Size const vs = _ffmpeg_content->video_size (); + int const target_width = subtitle_codec_context()->width; + int const target_height = subtitle_codec_context()->height; dcpomatic::Rect const scaled_rect ( - static_cast (rect->x) / vs.width, - static_cast (rect->y) / vs.height, - static_cast (rect->w) / vs.width, - static_cast (rect->h) / vs.height + static_cast (rect->x) / target_width, + static_cast (rect->y) / target_height, + static_cast (rect->w) / target_width, + static_cast (rect->h) / target_height ); - image_subtitle (period, image, scaled_rect); + subtitle->emit_image_start (from, image, scaled_rect); } void -FFmpegDecoder::decode_ass_subtitle (string ass, ContentTimePeriod period) +FFmpegDecoder::decode_ass_subtitle (string ass, ContentTime from) { /* We have no styles and no Format: line, so I'm assuming that FFmpeg produces a single format of Dialogue: lines... @@ -528,52 +647,14 @@ FFmpegDecoder::decode_ass_subtitle (string ass, ContentTimePeriod period) } sub::RawSubtitle base; - list raw = sub::SSAReader::parse_line (base, bits[9]); - list subs = sub::collect > (raw); - - /* XXX: lots of this is copied from TextSubtitle; there should probably be some sharing */ - - /* Highest line index in this subtitle */ - int highest = 0; - BOOST_FOREACH (sub::Subtitle i, subs) { - BOOST_FOREACH (sub::Line j, i.lines) { - DCPOMATIC_ASSERT (j.vertical_position.reference && j.vertical_position.reference.get() == sub::TOP_OF_SUBTITLE); - DCPOMATIC_ASSERT (j.vertical_position.line); - highest = max (highest, j.vertical_position.line.get()); - } - } + list raw = sub::SSAReader::parse_line ( + base, + bits[9], + _ffmpeg_content->video->size().width, + _ffmpeg_content->video->size().height + ); - list ss; - - BOOST_FOREACH (sub::Subtitle i, sub::collect > (sub::SSAReader::parse_line (base, bits[9]))) { - BOOST_FOREACH (sub::Line j, i.lines) { - BOOST_FOREACH (sub::Block k, j.blocks) { - ss.push_back ( - dcp::SubtitleString ( - boost::optional (), - k.italic, - dcp::Colour (255, 255, 255), - 60, - 1, - dcp::Time (i.from.seconds(), 1000), - dcp::Time (i.to.seconds(), 1000), - 0, - dcp::HALIGN_CENTER, - /* This 1.015 is an arbitrary value to lift the bottom sub off the bottom - of the screen a bit to a pleasing degree. - */ - 1.015 - ((1 + highest - j.vertical_position.line.get()) * 1.5 / 22), - dcp::VALIGN_TOP, - k.text, - static_cast (0), - dcp::Colour (255, 255, 255), - dcp::Time (), - dcp::Time () - ) - ); - } - } + BOOST_FOREACH (sub::Subtitle const & i, sub::collect > (raw)) { + subtitle->emit_text_start (from, i); } - - text_subtitle (period, ss); }