Attempts to simplify black-filling logic in Player.
authorCarl Hetherington <cth@carlh.net>
Fri, 23 Jun 2017 14:09:30 +0000 (15:09 +0100)
committerCarl Hetherington <cth@carlh.net>
Fri, 23 Jun 2017 14:09:30 +0000 (15:09 +0100)
src/lib/audio_decoder.cc
src/lib/audio_decoder.h
src/lib/dcp_video.cc
src/lib/ffmpeg_decoder.cc
src/lib/ffmpeg_decoder.h
src/lib/ffmpeg_examiner.cc
src/lib/player.cc
src/lib/playlist.cc
src/lib/playlist.h
src/lib/video_decoder.cc
test/data

index 4b77a8afbe0a1f56787f110c60c227cdd849b262..69d86c57b6d5ecb5059b32f7ad26708398163b24 100644 (file)
@@ -93,12 +93,21 @@ AudioDecoder::emit (AudioStreamPtr stream, shared_ptr<const AudioBuffers> data,
        _positions[stream] += data->frames();
 }
 
        _positions[stream] += data->frames();
 }
 
+/** @return Time just after the last thing that was emitted from a given stream */
+ContentTime
+AudioDecoder::stream_position (AudioStreamPtr stream) const
+{
+       map<AudioStreamPtr, Frame>::const_iterator i = _positions.find (stream);
+       DCPOMATIC_ASSERT (i != _positions.end ());
+       return ContentTime::from_frames (i->second, _content->resampled_frame_rate());
+}
+
 ContentTime
 AudioDecoder::position () const
 {
        optional<ContentTime> p;
        for (map<AudioStreamPtr, Frame>::const_iterator i = _positions.begin(); i != _positions.end(); ++i) {
 ContentTime
 AudioDecoder::position () const
 {
        optional<ContentTime> p;
        for (map<AudioStreamPtr, Frame>::const_iterator i = _positions.begin(); i != _positions.end(); ++i) {
-               ContentTime const ct = ContentTime::from_frames (i->second, _content->resampled_frame_rate());
+               ContentTime const ct = stream_position (i->first);
                if (!p || ct < *p) {
                        p = ct;
                }
                if (!p || ct < *p) {
                        p = ct;
                }
index fcbd8267b6f6a7380dc5356551c68a5d8520d79d..624b5c94ab01d3f9c3674a2b5d7f767d52eae012 100644 (file)
@@ -51,6 +51,8 @@ public:
        void seek ();
        void flush ();
 
        void seek ();
        void flush ();
 
+       ContentTime stream_position (AudioStreamPtr stream) const;
+
        boost::signals2::signal<void (AudioStreamPtr, ContentAudio)> Data;
 
 private:
        boost::signals2::signal<void (AudioStreamPtr, ContentAudio)> Data;
 
 private:
index 916563b856468a421708e31ad7cfc624cd5df40d..655c373822ea750aedda3669a52fcccec4db913c 100644 (file)
@@ -109,7 +109,7 @@ DCPVideo::convert_to_xyz (shared_ptr<const PlayerVideo> frame, dcp::NoteHandler
                        note
                        );
        } else {
                        note
                        );
        } else {
-               xyz = dcp::xyz_to_xyz (image->data()[0], image->size(), image->stride()[0]);
+               xyz.reset (new dcp::OpenJPEGImage (image->data()[0], image->size(), image->stride()[0]));
        }
 
        return xyz;
        }
 
        return xyz;
index baec57f3d24e19473df2dd74788caa7a411e1841..a54ac96addbd7dd158ea6d13f02d784065fd2e2c 100644 (file)
@@ -86,6 +86,9 @@ FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log>
        if (c->video) {
                video.reset (new VideoDecoder (this, c, log));
                _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate());
        if (c->video) {
                video.reset (new VideoDecoder (this, c, log));
                _pts_offset = pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->active_video_frame_rate());
+               /* It doesn't matter what size or pixel format this is, it just needs to be black */
+               _black_image.reset (new Image (AV_PIX_FMT_RGB24, dcp::Size (128, 128), true));
+               _black_image->make_black ();
        } else {
                _pts_offset = ContentTime ();
        }
        } else {
                _pts_offset = ContentTime ();
        }
@@ -113,6 +116,35 @@ FFmpegDecoder::flush ()
 
        if (audio) {
                decode_audio_packet ();
 
        if (audio) {
                decode_audio_packet ();
+       }
+
+       /* Make sure all streams are the same length and round up to the next video frame */
+
+       FrameRateChange const frc = _ffmpeg_content->film()->active_frame_rate_change(_ffmpeg_content->position());
+       ContentTime full_length (_ffmpeg_content->full_length(), frc);
+       full_length = full_length.ceil (frc.source);
+       if (video) {
+               double const vfr = _ffmpeg_content->video_frame_rate().get();
+               Frame const f = full_length.frames_round (vfr);
+               Frame v = video->position().frames_round (vfr);
+               while (v < f) {
+                       video->emit (shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)), v);
+                       ++v;
+               }
+       }
+
+       BOOST_FOREACH (shared_ptr<FFmpegAudioStream> i, _ffmpeg_content->ffmpeg_audio_streams ()) {
+               ContentTime a = audio->stream_position(i);
+               while (a < full_length) {
+                       ContentTime to_do = min (full_length - a, ContentTime::from_seconds (0.1));
+                       shared_ptr<AudioBuffers> silence (new AudioBuffers (i->channels(), to_do.frames_ceil (i->frame_rate())));
+                       silence->make_silent ();
+                       audio->emit (i, silence, a);
+                       a += to_do;
+               }
+       }
+
+       if (audio) {
                audio->flush ();
        }
 }
                audio->flush ();
        }
 }
index bd7ba98b84fe0f74edd8e50ca195a9873de2ba4b..3ea5f580928773c1a8f681aa782e1e4e61e08def 100644 (file)
@@ -36,6 +36,7 @@ class Log;
 class VideoFilterGraph;
 class FFmpegAudioStream;
 class AudioBuffers;
 class VideoFilterGraph;
 class FFmpegAudioStream;
 class AudioBuffers;
+class Image;
 struct ffmpeg_pts_offset_test;
 
 /** @class FFmpegDecoder
 struct ffmpeg_pts_offset_test;
 
 /** @class FFmpegDecoder
@@ -75,4 +76,6 @@ private:
        ContentTime _pts_offset;
        boost::optional<ContentTime> _current_subtitle_to;
        bool _have_current_subtitle;
        ContentTime _pts_offset;
        boost::optional<ContentTime> _current_subtitle_to;
        bool _have_current_subtitle;
+
+       boost::shared_ptr<Image> _black_image;
 };
 };
index a145ae25c8f2c6fe6ab1d3fde72c8217c37577e4..88b76d04efc9593e1fc3df632e292c4c19189174 100644 (file)
@@ -87,7 +87,7 @@ FFmpegExaminer::FFmpegExaminer (shared_ptr<const FFmpegContent> c, shared_ptr<Jo
                /* See if the header has duration information in it */
                _need_video_length = _format_context->duration == AV_NOPTS_VALUE;
                if (!_need_video_length) {
                /* See if the header has duration information in it */
                _need_video_length = _format_context->duration == AV_NOPTS_VALUE;
                if (!_need_video_length) {
-                       _video_length = (double (_format_context->duration) / AV_TIME_BASE) * video_frame_rate().get ();
+                       _video_length = llrint ((double (_format_context->duration) / AV_TIME_BASE) * video_frame_rate().get());
                }
        }
 
                }
        }
 
index d344aab0a13b8902dc80d93a549191740447a6fb..32c0026b610b4596dbde9ee57024ae4985a7d3e3 100644 (file)
@@ -513,66 +513,49 @@ Player::pass ()
                setup_pieces ();
        }
 
                setup_pieces ();
        }
 
-       shared_ptr<Piece> earliest;
-       DCPTime earliest_content;
-
-       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
-               if (!i->done) {
-                       DCPTime const t = content_time_to_dcp (i, i->decoder->position());
-                       if (!earliest || t < earliest_content) {
-                               earliest_content = t;
-                               earliest = i;
-                       }
-               }
-       }
-
-       /* Fill towards the next thing that might happen (or the end of the playlist).  This is to fill gaps between content,
-          NOT to fill gaps within content (the latter is done in ::video())
-
-          XXX: can't we just look at content position/end and fill based on that?
-       */
-       DCPTime fill_towards = earliest ? earliest_content : _playlist->length().ceil(_film->video_frame_rate());
-
        bool filled = false;
        bool filled = false;
-       /* Fill some black if we would emit before the earliest piece of content.  This is so we act like a phantom
-          Piece which emits black in spaces (we only emit if we are the earliest thing)
-       */
-       if (_last_video_time && (!earliest || *_last_video_time < earliest_content) && ((fill_towards - *_last_video_time)) >= one_video_frame()) {
-               list<DCPTimePeriod> p = subtract(DCPTimePeriod(*_last_video_time, *_last_video_time + one_video_frame()), _no_video);
-               if (!p.empty ()) {
-                       emit_video (black_player_video_frame(), p.front().from);
-                       filled = true;
-               }
+
+       if (_last_video_time && !_playlist->video_content_at(*_last_video_time) && *_last_video_time < _playlist->length()) {
+               /* _last_video_time is the time just after the last video we emitted, and there is no video content
+                  at this time so we need to emit some black.
+               */
+               emit_video (black_player_video_frame(), *_last_video_time);
+               filled = true;
        } else if (_playlist->length() == DCPTime()) {
                /* Special case of an empty Film; just give one black frame */
                emit_video (black_player_video_frame(), DCPTime());
                filled = true;
        }
 
        } else if (_playlist->length() == DCPTime()) {
                /* Special case of an empty Film; just give one black frame */
                emit_video (black_player_video_frame(), DCPTime());
                filled = true;
        }
 
-       optional<DCPTime> audio_fill_from;
-       if (_last_audio_time) {
-               /* Fill from the last audio or seek time */
-               audio_fill_from = _last_audio_time;
+       if (_last_audio_time && !_playlist->audio_content_at(*_last_audio_time) && *_last_audio_time < _playlist->length()) {
+               /* _last_audio_time is the time just after the last audio we emitted.  There is no audio here
+                  so we need to emit some silence.
+               */
+               shared_ptr<Content> next = _playlist->next_audio_content(*_last_audio_time);
+               DCPTimePeriod period (*_last_audio_time, next ? next->position() : _playlist->length());
+               if (period.duration() > one_video_frame()) {
+                       period = DCPTimePeriod (*_last_audio_time, *_last_audio_time + one_video_frame());
+               }
+               fill_audio (period);
+               filled = true;
        }
 
        }
 
-       DCPTime audio_fill_towards = fill_towards;
-       if (earliest && earliest->content->audio) {
-               audio_fill_towards += DCPTime::from_seconds (earliest->content->audio->delay() / 1000.0);
-       }
+       /* Now pass() the decoder which is farthest behind where we are */
 
 
-       if (audio_fill_from && audio_fill_from < audio_fill_towards && ((audio_fill_towards - *audio_fill_from) >= one_video_frame())) {
-               DCPTimePeriod period (*audio_fill_from, audio_fill_towards);
-               if (period.duration() > one_video_frame()) {
-                       period.to = period.from + one_video_frame();
-               }
-               list<DCPTimePeriod> p = subtract(period, _no_audio);
-               if (!p.empty ()) {
-                       fill_audio (p.front());
-                       filled = true;
+       shared_ptr<Piece> earliest;
+       DCPTime earliest_content;
+
+       BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
+               if (!i->done) {
+                       DCPTime const t = content_time_to_dcp (i, i->decoder->position());
+                       if (!earliest || t < earliest_content) {
+                               earliest_content = t;
+                               earliest = i;
+                       }
                }
        }
 
                }
        }
 
-       if (earliest) {
+       if (!filled && earliest) {
                earliest->done = earliest->decoder->pass ();
        }
 
                earliest->done = earliest->decoder->pass ();
        }
 
@@ -650,14 +633,6 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
        DCPTime const time = content_video_to_dcp (piece, video.frame);
        DCPTimePeriod const period (time, time + one_video_frame());
 
        DCPTime const time = content_video_to_dcp (piece, video.frame);
        DCPTimePeriod const period (time, time + one_video_frame());
 
-       /* Discard if it's outside the content's period or if it's before the last accurate seek */
-       if (
-               time < piece->content->position() ||
-               time >= piece->content->end() ||
-               (_last_video_time && time < *_last_video_time)) {
-               return;
-       }
-
        /* Fill gaps that we discover now that we have some video which needs to be emitted */
 
        optional<DCPTime> fill_to;
        /* Fill gaps that we discover now that we have some video which needs to be emitted */
 
        optional<DCPTime> fill_to;
@@ -679,6 +654,14 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
                }
        }
 
                }
        }
 
+       /* Discard if it's outside the content's period or if it's before the last accurate seek */
+       if (
+               time < piece->content->position() ||
+               time >= piece->content->end() ||
+               (_last_video_time && time < *_last_video_time)) {
+               return;
+       }
+
        _last_video[wp].reset (
                new PlayerVideo (
                        video.image,
        _last_video[wp].reset (
                new PlayerVideo (
                        video.image,
@@ -902,6 +885,7 @@ Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
                pv->set_subtitle (subtitles.get ());
        }
 
                pv->set_subtitle (subtitles.get ());
        }
 
+       cout << "Player emit @ " << to_string(time) << "\n";
        Video (pv, time);
 
        if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
        Video (pv, time);
 
        if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
index d37f287837110a283b8ee56003d58011853fb1f3..3609f9eb3df59d46caf04149f46de1a5faf4aa43 100644 (file)
@@ -24,6 +24,7 @@
 #include "ffmpeg_decoder.h"
 #include "ffmpeg_content.h"
 #include "image_decoder.h"
 #include "ffmpeg_decoder.h"
 #include "ffmpeg_content.h"
 #include "image_decoder.h"
+#include "audio_content.h"
 #include "content_factory.h"
 #include "dcp_content.h"
 #include "job.h"
 #include "content_factory.h"
 #include "dcp_content.h"
 #include "job.h"
@@ -559,7 +560,14 @@ bool
 Playlist::audio_content_at (DCPTime time) const
 {
        BOOST_FOREACH (shared_ptr<Content> i, _content) {
 Playlist::audio_content_at (DCPTime time) const
 {
        BOOST_FOREACH (shared_ptr<Content> i, _content) {
-               if (i->audio && i->position() <= time && time < i->end()) {
+               if (!i->audio) {
+                       continue;
+               }
+               DCPTime end = i->end ();
+               if (i->audio->delay() < 0) {
+                       end += DCPTime::from_seconds (i->audio->delay() / 1000.0);
+               }
+               if (i->position() <= time && time < end) {
                        return true;
                }
        }
                        return true;
                }
        }
@@ -567,6 +575,24 @@ Playlist::audio_content_at (DCPTime time) const
        return false;
 }
 
        return false;
 }
 
+shared_ptr<Content>
+Playlist::next_audio_content (DCPTime time) const
+{
+       shared_ptr<Content> next;
+       DCPTime next_position;
+       BOOST_FOREACH (shared_ptr<Content> i, _content) {
+               if (!i->audio) {
+                       continue;
+               }
+               if (i->position() >= time && (!next || i->position() < next_position)) {
+                       next = i;
+                       next_position = i->position();
+               }
+       }
+
+       return next;
+}
+
 pair<double, double>
 Playlist::speed_up_range (int dcp_video_frame_rate) const
 {
 pair<double, double>
 Playlist::speed_up_range (int dcp_video_frame_rate) const
 {
index 25dcda202b214e0e9029ac5407cfaf6b4d0e43fb..f8f51ac2da826e2857faeb2746a945378f4bda21 100644 (file)
@@ -57,6 +57,7 @@ public:
        ContentList content () const;
        bool video_content_at (DCPTime time) const;
        bool audio_content_at (DCPTime time) const;
        ContentList content () const;
        bool video_content_at (DCPTime time) const;
        bool audio_content_at (DCPTime time) const;
+       boost::shared_ptr<Content> next_audio_content (DCPTime time) const;
 
        std::string video_identifier () const;
 
 
        std::string video_identifier () const;
 
index eb5d2e71f8d340b34cf4f8d6e27e51dd4628b1b7..2bd8d6f5170e0c0e824af35b1d236e674538efe6 100644 (file)
@@ -59,41 +59,45 @@ VideoDecoder::emit (shared_ptr<const ImageProxy> image, Frame frame)
                return;
        }
 
                return;
        }
 
-       /* Work out what we are going to emit next */
-       switch (_content->video->frame_type ()) {
-       case VIDEO_FRAME_TYPE_2D:
-               Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE));
-               break;
-       case VIDEO_FRAME_TYPE_3D:
-       {
-               /* We receive the same frame index twice for 3D; hence we know which
-                  frame this one is.
-               */
-               bool const same = (_last_emitted && _last_emitted.get() == frame);
-               Data (ContentVideo (image, frame, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE));
-               _last_emitted = frame;
-               break;
-       }
-       case VIDEO_FRAME_TYPE_3D_ALTERNATE:
-               Data (ContentVideo (image, frame / 2, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE));
-               frame /= 2;
-               break;
-       case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
-               Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF));
-               Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF));
-               break;
-       case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
-               Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF));
-               Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF));
-               break;
-       case VIDEO_FRAME_TYPE_3D_LEFT:
-               Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE));
-               break;
-       case VIDEO_FRAME_TYPE_3D_RIGHT:
-               Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE));
-               break;
-       default:
-               DCPOMATIC_ASSERT (false);
+       FrameRateChange const frc = _content->film()->active_frame_rate_change (_content->position());
+       for (int i = 0; i < frc.repeat; ++i) {
+               switch (_content->video->frame_type ()) {
+               case VIDEO_FRAME_TYPE_2D:
+                       Data (ContentVideo (image, frame, EYES_BOTH, PART_WHOLE));
+                       break;
+               case VIDEO_FRAME_TYPE_3D:
+               {
+                       /* We receive the same frame index twice for 3D; hence we know which
+                          frame this one is.
+                       */
+                       bool const same = (_last_emitted && _last_emitted.get() == frame);
+                       Data (ContentVideo (image, frame, same ? EYES_RIGHT : EYES_LEFT, PART_WHOLE));
+                       _last_emitted = frame;
+                       break;
+               }
+               case VIDEO_FRAME_TYPE_3D_ALTERNATE:
+                       Data (ContentVideo (image, frame / 2, (frame % 2) ? EYES_RIGHT : EYES_LEFT, PART_WHOLE));
+                       frame /= 2;
+                       break;
+               case VIDEO_FRAME_TYPE_3D_LEFT_RIGHT:
+                       Data (ContentVideo (image, frame, EYES_LEFT, PART_LEFT_HALF));
+                       Data (ContentVideo (image, frame, EYES_RIGHT, PART_RIGHT_HALF));
+                       break;
+               case VIDEO_FRAME_TYPE_3D_TOP_BOTTOM:
+                       Data (ContentVideo (image, frame, EYES_LEFT, PART_TOP_HALF));
+                       Data (ContentVideo (image, frame, EYES_RIGHT, PART_BOTTOM_HALF));
+                       break;
+               case VIDEO_FRAME_TYPE_3D_LEFT:
+                       Data (ContentVideo (image, frame, EYES_LEFT, PART_WHOLE));
+                       break;
+               case VIDEO_FRAME_TYPE_3D_RIGHT:
+                       Data (ContentVideo (image, frame, EYES_RIGHT, PART_WHOLE));
+                       break;
+               default:
+                       DCPOMATIC_ASSERT (false);
+               }
+
+               ++frame;
        }
 
        _position = ContentTime::from_frames (frame, _content->active_video_frame_rate ());
        }
 
        _position = ContentTime::from_frames (frame, _content->active_video_frame_rate ());
index 83a0643c345454864dafabfb9d4703db30dd0bc0..adec5472fc49df236d9ef7609f54c56c65d09479 160000 (submodule)
--- a/test/data
+++ b/test/data
@@ -1 +1 @@
-Subproject commit 83a0643c345454864dafabfb9d4703db30dd0bc0
+Subproject commit adec5472fc49df236d9ef7609f54c56c65d09479