From: Carl Hetherington Date: Fri, 23 Jun 2017 14:09:30 +0000 (+0100) Subject: Attempts to simplify black-filling logic in Player. X-Git-Tag: v2.11.12~31 X-Git-Url: https://main.carlh.net/gitweb/?p=dcpomatic.git;a=commitdiff_plain;h=9824173a79ce723068296b3a44499101408c24f2 Attempts to simplify black-filling logic in Player. --- diff --git a/src/lib/audio_decoder.cc b/src/lib/audio_decoder.cc index 4b77a8afb..69d86c57b 100644 --- a/src/lib/audio_decoder.cc +++ b/src/lib/audio_decoder.cc @@ -93,12 +93,21 @@ AudioDecoder::emit (AudioStreamPtr stream, shared_ptr data, _positions[stream] += data->frames(); } +/** @return Time just after the last thing that was emitted from a given stream */ +ContentTime +AudioDecoder::stream_position (AudioStreamPtr stream) const +{ + map::const_iterator i = _positions.find (stream); + DCPOMATIC_ASSERT (i != _positions.end ()); + return ContentTime::from_frames (i->second, _content->resampled_frame_rate()); +} + ContentTime AudioDecoder::position () const { optional p; for (map::const_iterator i = _positions.begin(); i != _positions.end(); ++i) { - ContentTime const ct = ContentTime::from_frames (i->second, _content->resampled_frame_rate()); + ContentTime const ct = stream_position (i->first); if (!p || ct < *p) { p = ct; } diff --git a/src/lib/audio_decoder.h b/src/lib/audio_decoder.h index fcbd8267b..624b5c94a 100644 --- a/src/lib/audio_decoder.h +++ b/src/lib/audio_decoder.h @@ -51,6 +51,8 @@ public: void seek (); void flush (); + ContentTime stream_position (AudioStreamPtr stream) const; + boost::signals2::signal Data; private: diff --git a/src/lib/dcp_video.cc b/src/lib/dcp_video.cc index 916563b85..655c37382 100644 --- a/src/lib/dcp_video.cc +++ b/src/lib/dcp_video.cc @@ -109,7 +109,7 @@ DCPVideo::convert_to_xyz (shared_ptr frame, dcp::NoteHandler note ); } else { - xyz = dcp::xyz_to_xyz (image->data()[0], image->size(), image->stride()[0]); + xyz.reset (new dcp::OpenJPEGImage (image->data()[0], image->size(), image->stride()[0])); } return xyz; diff --git a/src/lib/ffmpeg_decoder.cc b/src/lib/ffmpeg_decoder.cc index baec57f3d..a54ac96ad 100644 --- a/src/lib/ffmpeg_decoder.cc +++ b/src/lib/ffmpeg_decoder.cc @@ -86,6 +86,9 @@ FFmpegDecoder::FFmpegDecoder (shared_ptr c, shared_ptr if (c->video) { video.reset (new VideoDecoder (this, c, log)); _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate()); + /* It doesn't matter what size or pixel format this is, it just needs to be black */ + _black_image.reset (new Image (AV_PIX_FMT_RGB24, dcp::Size (128, 128), true)); + _black_image->make_black (); } else { _pts_offset = ContentTime (); } @@ -113,6 +116,35 @@ FFmpegDecoder::flush () if (audio) { decode_audio_packet (); + } + + /* Make sure all streams are the same length and round up to the next video frame */ + + FrameRateChange const frc = _ffmpeg_content->film()->active_frame_rate_change(_ffmpeg_content->position()); + ContentTime full_length (_ffmpeg_content->full_length(), frc); + full_length = full_length.ceil (frc.source); + if (video) { + double const vfr = _ffmpeg_content->video_frame_rate().get(); + Frame const f = full_length.frames_round (vfr); + Frame v = video->position().frames_round (vfr); + while (v < f) { + video->emit (shared_ptr (new RawImageProxy (_black_image)), v); + ++v; + } + } + + BOOST_FOREACH (shared_ptr i, _ffmpeg_content->ffmpeg_audio_streams ()) { + ContentTime a = audio->stream_position(i); + while (a < full_length) { + ContentTime to_do = min (full_length - a, ContentTime::from_seconds (0.1)); + shared_ptr silence (new AudioBuffers (i->channels(), to_do.frames_ceil (i->frame_rate()))); + silence->make_silent (); + audio->emit (i, silence, a); + a += to_do; + } + } + + if (audio) { audio->flush (); } } diff --git a/src/lib/ffmpeg_decoder.h b/src/lib/ffmpeg_decoder.h index bd7ba98b8..3ea5f5809 100644 --- a/src/lib/ffmpeg_decoder.h +++ b/src/lib/ffmpeg_decoder.h @@ -36,6 +36,7 @@ class Log; class VideoFilterGraph; class FFmpegAudioStream; class AudioBuffers; +class Image; struct ffmpeg_pts_offset_test; /** @class FFmpegDecoder @@ -75,4 +76,6 @@ private: ContentTime _pts_offset; boost::optional _current_subtitle_to; bool _have_current_subtitle; + + boost::shared_ptr _black_image; }; diff --git a/src/lib/ffmpeg_examiner.cc b/src/lib/ffmpeg_examiner.cc index a145ae25c..88b76d04e 100644 --- a/src/lib/ffmpeg_examiner.cc +++ b/src/lib/ffmpeg_examiner.cc @@ -87,7 +87,7 @@ FFmpegExaminer::FFmpegExaminer (shared_ptr c, shared_ptrduration == AV_NOPTS_VALUE; if (!_need_video_length) { - _video_length = (double (_format_context->duration) / AV_TIME_BASE) * video_frame_rate().get (); + _video_length = llrint ((double (_format_context->duration) / AV_TIME_BASE) * video_frame_rate().get()); } } diff --git a/src/lib/player.cc b/src/lib/player.cc index d344aab0a..32c0026b6 100644 --- a/src/lib/player.cc +++ b/src/lib/player.cc @@ -513,66 +513,49 @@ Player::pass () setup_pieces (); } - shared_ptr earliest; - DCPTime earliest_content; - - BOOST_FOREACH (shared_ptr i, _pieces) { - if (!i->done) { - DCPTime const t = content_time_to_dcp (i, i->decoder->position()); - if (!earliest || t < earliest_content) { - earliest_content = t; - earliest = i; - } - } - } - - /* Fill towards the next thing that might happen (or the end of the playlist). This is to fill gaps between content, - NOT to fill gaps within content (the latter is done in ::video()) - - XXX: can't we just look at content position/end and fill based on that? - */ - DCPTime fill_towards = earliest ? earliest_content : _playlist->length().ceil(_film->video_frame_rate()); - bool filled = false; - /* Fill some black if we would emit before the earliest piece of content. This is so we act like a phantom - Piece which emits black in spaces (we only emit if we are the earliest thing) - */ - if (_last_video_time && (!earliest || *_last_video_time < earliest_content) && ((fill_towards - *_last_video_time)) >= one_video_frame()) { - list p = subtract(DCPTimePeriod(*_last_video_time, *_last_video_time + one_video_frame()), _no_video); - if (!p.empty ()) { - emit_video (black_player_video_frame(), p.front().from); - filled = true; - } + + if (_last_video_time && !_playlist->video_content_at(*_last_video_time) && *_last_video_time < _playlist->length()) { + /* _last_video_time is the time just after the last video we emitted, and there is no video content + at this time so we need to emit some black. + */ + emit_video (black_player_video_frame(), *_last_video_time); + filled = true; } else if (_playlist->length() == DCPTime()) { /* Special case of an empty Film; just give one black frame */ emit_video (black_player_video_frame(), DCPTime()); filled = true; } - optional audio_fill_from; - if (_last_audio_time) { - /* Fill from the last audio or seek time */ - audio_fill_from = _last_audio_time; + if (_last_audio_time && !_playlist->audio_content_at(*_last_audio_time) && *_last_audio_time < _playlist->length()) { + /* _last_audio_time is the time just after the last audio we emitted. There is no audio here + so we need to emit some silence. + */ + shared_ptr next = _playlist->next_audio_content(*_last_audio_time); + DCPTimePeriod period (*_last_audio_time, next ? next->position() : _playlist->length()); + if (period.duration() > one_video_frame()) { + period = DCPTimePeriod (*_last_audio_time, *_last_audio_time + one_video_frame()); + } + fill_audio (period); + filled = true; } - DCPTime audio_fill_towards = fill_towards; - if (earliest && earliest->content->audio) { - audio_fill_towards += DCPTime::from_seconds (earliest->content->audio->delay() / 1000.0); - } + /* Now pass() the decoder which is farthest behind where we are */ - if (audio_fill_from && audio_fill_from < audio_fill_towards && ((audio_fill_towards - *audio_fill_from) >= one_video_frame())) { - DCPTimePeriod period (*audio_fill_from, audio_fill_towards); - if (period.duration() > one_video_frame()) { - period.to = period.from + one_video_frame(); - } - list p = subtract(period, _no_audio); - if (!p.empty ()) { - fill_audio (p.front()); - filled = true; + shared_ptr earliest; + DCPTime earliest_content; + + BOOST_FOREACH (shared_ptr i, _pieces) { + if (!i->done) { + DCPTime const t = content_time_to_dcp (i, i->decoder->position()); + if (!earliest || t < earliest_content) { + earliest_content = t; + earliest = i; + } } } - if (earliest) { + if (!filled && earliest) { earliest->done = earliest->decoder->pass (); } @@ -650,14 +633,6 @@ Player::video (weak_ptr wp, ContentVideo video) DCPTime const time = content_video_to_dcp (piece, video.frame); DCPTimePeriod const period (time, time + one_video_frame()); - /* Discard if it's outside the content's period or if it's before the last accurate seek */ - if ( - time < piece->content->position() || - time >= piece->content->end() || - (_last_video_time && time < *_last_video_time)) { - return; - } - /* Fill gaps that we discover now that we have some video which needs to be emitted */ optional fill_to; @@ -679,6 +654,14 @@ Player::video (weak_ptr wp, ContentVideo video) } } + /* Discard if it's outside the content's period or if it's before the last accurate seek */ + if ( + time < piece->content->position() || + time >= piece->content->end() || + (_last_video_time && time < *_last_video_time)) { + return; + } + _last_video[wp].reset ( new PlayerVideo ( video.image, @@ -902,6 +885,7 @@ Player::emit_video (shared_ptr pv, DCPTime time) pv->set_subtitle (subtitles.get ()); } + cout << "Player emit @ " << to_string(time) << "\n"; Video (pv, time); if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) { diff --git a/src/lib/playlist.cc b/src/lib/playlist.cc index d37f28783..3609f9eb3 100644 --- a/src/lib/playlist.cc +++ b/src/lib/playlist.cc @@ -24,6 +24,7 @@ #include "ffmpeg_decoder.h" #include "ffmpeg_content.h" #include "image_decoder.h" +#include "audio_content.h" #include "content_factory.h" #include "dcp_content.h" #include "job.h" @@ -559,7 +560,14 @@ bool Playlist::audio_content_at (DCPTime time) const { BOOST_FOREACH (shared_ptr i, _content) { - if (i->audio && i->position() <= time && time < i->end()) { + if (!i->audio) { + continue; + } + DCPTime end = i->end (); + if (i->audio->delay() < 0) { + end += DCPTime::from_seconds (i->audio->delay() / 1000.0); + } + if (i->position() <= time && time < end) { return true; } } @@ -567,6 +575,24 @@ Playlist::audio_content_at (DCPTime time) const return false; } +shared_ptr +Playlist::next_audio_content (DCPTime time) const +{ + shared_ptr next; + DCPTime next_position; + BOOST_FOREACH (shared_ptr i, _content) { + if (!i->audio) { + continue; + } + if (i->position() >= time && (!next || i->position() < next_position)) { + next = i; + next_position = i->position(); + } + } + + return next; +} + pair Playlist::speed_up_range (int dcp_video_frame_rate) const { diff --git a/src/lib/playlist.h b/src/lib/playlist.h index 25dcda202..f8f51ac2d 100644 --- a/src/lib/playlist.h +++ b/src/lib/playlist.h @@ -57,6 +57,7 @@ public: ContentList content () const; bool video_content_at (DCPTime time) const; bool audio_content_at (DCPTime time) const; + boost::shared_ptr next_audio_content (DCPTime time) const; std::string video_identifier () const; diff --git a/src/lib/video_decoder.cc b/src/lib/video_decoder.cc index eb5d2e71f..2bd8d6f51 100644 --- a/src/lib/video_decoder.cc +++ b/src/lib/video_decoder.cc @@ -59,41 +59,45 @@ VideoDecoder::emit (shared_ptr image, Frame frame) return; } - /* Work out what we are going to emit next */ - switch (_content->video->frame_type ()) { - case VIDEO_FRAME_TYPE_2D: - Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE)); - break; - case VIDEO_FRAME_TYPE_3D: - { - /* We receive the same frame index twice for 3D; hence we know which - frame this one is. - */ - bool const same = (_last_emitted && _last_emitted.get() == frame); - Data (ContentVideo (image, frame, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE)); - _last_emitted = frame; - break; - } - case VIDEO_FRAME_TYPE_3D_ALTERNATE: - Data (ContentVideo (image, frame / 2, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE)); - frame /= 2; - break; - case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT: - Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF)); - Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF)); - break; - case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM: - Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF)); - Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF)); - break; - case VIDEO_FRAME_TYPE_3D_LEFT: - Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE)); - break; - case VIDEO_FRAME_TYPE_3D_RIGHT: - Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE)); - break; - default: - DCPOMATIC_ASSERT (false); + FrameRateChange const frc = _content->film()->active_frame_rate_change (_content->position()); + for (int i = 0; i < frc.repeat; ++i) { + switch (_content->video->frame_type ()) { + case VIDEO_FRAME_TYPE_2D: + Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE)); + break; + case VIDEO_FRAME_TYPE_3D: + { + /* We receive the same frame index twice for 3D; hence we know which + frame this one is. + */ + bool const same = (_last_emitted && _last_emitted.get() == frame); + Data (ContentVideo (image, frame, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE)); + _last_emitted = frame; + break; + } + case VIDEO_FRAME_TYPE_3D_ALTERNATE: + Data (ContentVideo (image, frame / 2, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE)); + frame /= 2; + break; + case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT: + Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF)); + Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF)); + break; + case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM: + Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF)); + Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF)); + break; + case VIDEO_FRAME_TYPE_3D_LEFT: + Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE)); + break; + case VIDEO_FRAME_TYPE_3D_RIGHT: + Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE)); + break; + default: + DCPOMATIC_ASSERT (false); + } + + ++frame; } _position = ContentTime::from_frames (frame, _content->active_video_frame_rate ()); diff --git a/test/data b/test/data index 83a0643c3..adec5472f 160000 --- a/test/data +++ b/test/data @@ -1 +1 @@ -Subproject commit 83a0643c345454864dafabfb9d4703db30dd0bc0 +Subproject commit adec5472fc49df236d9ef7609f54c56c65d09479