X-Git-Url: https://main.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fffmpeg_decoder.cc;h=cfaf0361b9ab79da14f4f4ae841a5bd8a243cf9d;hb=8dabe7c3d815ff95c2fb3edc3cfce62434711fc4;hp=fbc0ee416b77dcb74db59aba773265a0fddab750;hpb=84012cdd64f451891febd36154b7226ea21a899b;p=dcpomatic.git diff --git a/src/lib/ffmpeg_decoder.cc b/src/lib/ffmpeg_decoder.cc index fbc0ee416..cfaf0361b 100644 --- a/src/lib/ffmpeg_decoder.cc +++ b/src/lib/ffmpeg_decoder.cc @@ -27,6 +27,7 @@ #include "image.h" #include "util.h" #include "log.h" +#include "dcpomatic_log.h" #include "ffmpeg_decoder.h" #include "text_decoder.h" #include "ffmpeg_audio_stream.h" @@ -41,6 +42,7 @@ #include "compose.hpp" #include "text_content.h" #include "audio_content.h" +#include "frame_interval_checker.h" #include #include #include @@ -58,11 +60,6 @@ extern "C" { #include "i18n.h" -#define LOG_GENERAL(...) dcpomatic_log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL); -#define LOG_ERROR(...) dcpomatic_log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_ERROR); -#define LOG_WARNING_NC(...) dcpomatic_log->log (__VA_ARGS__, LogEntry::TYPE_WARNING); -#define LOG_WARNING(...) dcpomatic_log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_WARNING); - using std::cout; using std::string; using std::vector; @@ -77,13 +74,14 @@ using boost::split; using boost::optional; using boost::dynamic_pointer_cast; using dcp::Size; +using namespace dcpomatic; FFmpegDecoder::FFmpegDecoder (shared_ptr film, shared_ptr c, bool fast) : FFmpeg (c) , Decoder (film) , _have_current_subtitle (false) { - if (c->video) { + if (c->video && c->video->use()) { video.reset (new VideoDecoder (this, c)); _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate(film)); /* It doesn't matter what size or pixel format this is, it just needs to be black */ @@ -129,7 +127,7 @@ FFmpegDecoder::flush () if (video) { double const vfr = _ffmpeg_content->video_frame_rate().get(); Frame const f = full_length.frames_round (vfr); - Frame v = video->position(film()).frames_round(vfr) + 1; + Frame v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1; while (v < f) { video->emit (film(), shared_ptr (new RawImageProxy (_black_image)), v); ++v; @@ -161,6 +159,12 @@ FFmpegDecoder::flush () bool FFmpegDecoder::pass () { +#ifdef DCPOMATIC_VARIANT_SWAROOP + if (_ffmpeg_content->encrypted() && !_ffmpeg_content->kdm()) { + return true; + } +#endif + int r = av_read_frame (_format_context, &_packet); /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame @@ -182,7 +186,7 @@ FFmpegDecoder::pass () int const si = _packet.stream_index; shared_ptr fc = _ffmpeg_content; - if (_video_stream && si == _video_stream.get() && !video->ignore()) { + if (_video_stream && si == _video_stream.get() && video && !video->ignore()) { decode_video_packet (); } else if (fc->subtitle_stream() && fc->subtitle_stream()->uses_index(_format_context, si) && !only_text()->ignore()) { decode_subtitle_packet (); @@ -206,6 +210,9 @@ FFmpegDecoder::deinterleave_audio (shared_ptr stream) const 0, stream->stream(_format_context)->codec->channels, _frame->nb_samples, audio_sample_format (stream), 1 ); + /* XXX: can't we just use _frame->nb_samples directly here? */ + /* XXX: can't we use swr_convert() to do the format conversion? */ + /* Deinterleave and convert to float */ /* total_samples and frames will be rounded down here, so if there are stray samples at the end @@ -311,6 +318,7 @@ FFmpegDecoder::deinterleave_audio (shared_ptr stream) const case AV_SAMPLE_FMT_FLTP: { float** p = reinterpret_cast (_frame->data); + DCPOMATIC_ASSERT (_frame->channels <= channels); /* Sometimes there aren't as many channels in the _frame as in the stream */ for (int i = 0; i < _frame->channels; ++i) { memcpy (data[i], p[i], frames * sizeof(float)); @@ -361,6 +369,7 @@ FFmpegDecoder::seek (ContentTime time, bool accurate) if (_video_stream) { stream = _video_stream; } else { + DCPOMATIC_ASSERT (_ffmpeg_content->audio); shared_ptr s = dynamic_pointer_cast (_ffmpeg_content->audio->stream ()); if (s) { stream = s->index (_format_context); @@ -380,6 +389,14 @@ FFmpegDecoder::seek (ContentTime time, bool accurate) AVSEEK_FLAG_BACKWARD ); + { + /* Force re-creation of filter graphs to reset them and hence to make sure + they don't have any pre-seek frames knocking about. + */ + boost::mutex::scoped_lock lm (_filter_graphs_mutex); + _filter_graphs.clear (); + } + if (video_codec_context ()) { avcodec_flush_buffers (video_codec_context()); } @@ -612,17 +629,17 @@ FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT /* Start of the first line in the subtitle */ uint8_t* sub_p = rect->pict.data[0]; - /* sub_p looks up into a BGRA palette which is here + /* sub_p looks up into a BGRA palette which is at rect->pict.data[1]; (i.e. first byte B, second G, third R, fourth A) */ - uint32_t const * palette = (uint32_t *) rect->pict.data[1]; + uint8_t const * palette = rect->pict.data[1]; #else /* Start of the first line in the subtitle */ uint8_t* sub_p = rect->data[0]; - /* sub_p looks up into a BGRA palette which is here - (i.e. first byte B, second G, third R, fourth A) + /* sub_p looks up into a BGRA palette which is at rect->data[1]. + (first byte B, second G, third R, fourth A) */ - uint32_t const * palette = (uint32_t *) rect->data[1]; + uint8_t const * palette = rect->data[1]; #endif /* And the stream has a map of those palette colours to colours chosen by the user; created a `mapped' palette from those settings. @@ -630,7 +647,7 @@ FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime map colour_map = ffmpeg_content()->subtitle_stream()->colours (); vector mapped_palette (rect->nb_colors); for (int i = 0; i < rect->nb_colors; ++i) { - RGBA c ((palette[i] & 0xff0000) >> 16, (palette[i] & 0xff00) >> 8, palette[i] & 0xff, (palette[i] & 0xff000000) >> 24); + RGBA c (palette[2], palette[1], palette[0], palette[3]); map::const_iterator j = colour_map.find (c); if (j != colour_map.end ()) { mapped_palette[i] = j->second; @@ -641,29 +658,43 @@ FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTime */ mapped_palette[i] = c; } + palette += 4; } /* Start of the output data */ - uint32_t* out_p = (uint32_t *) image->data()[0]; + uint8_t* out_p = image->data()[0]; for (int y = 0; y < rect->h; ++y) { uint8_t* sub_line_p = sub_p; - uint32_t* out_line_p = out_p; + uint8_t* out_line_p = out_p; for (int x = 0; x < rect->w; ++x) { RGBA const p = mapped_palette[*sub_line_p++]; - /* XXX: this seems to be wrong to me (isn't the output image BGRA?) but it looks right on screen */ - *out_line_p++ = (p.a << 24) | (p.b << 16) | (p.g << 8) | p.r; + *out_line_p++ = p.b; + *out_line_p++ = p.g; + *out_line_p++ = p.r; + *out_line_p++ = p.a; } #ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT sub_p += rect->pict.linesize[0]; #else sub_p += rect->linesize[0]; #endif - out_p += image->stride()[0] / sizeof (uint32_t); + out_p += image->stride()[0]; } - int const target_width = subtitle_codec_context()->width; - int const target_height = subtitle_codec_context()->height; + int target_width = subtitle_codec_context()->width; + if (target_width == 0 && video_codec_context()) { + /* subtitle_codec_context()->width == 0 has been seen in the wild but I don't + know if it's supposed to mean something from FFmpeg's point of view. + */ + target_width = video_codec_context()->width; + } + int target_height = subtitle_codec_context()->height; + if (target_height == 0 && video_codec_context()) { + target_height = video_codec_context()->height; + } + DCPOMATIC_ASSERT (target_width); + DCPOMATIC_ASSERT (target_height); dcpomatic::Rect const scaled_rect ( static_cast (rect->x) / target_width, static_cast (rect->y) / target_height,