Change how video timing is done. v2.17.0
authorCarl Hetherington <cth@carlh.net>
Thu, 29 Sep 2022 08:17:50 +0000 (10:17 +0200)
committerCarl Hetherington <cth@carlh.net>
Mon, 3 Oct 2022 15:53:41 +0000 (17:53 +0200)
This commit changes the approach with video timing.  Previously,
we would (more-or-less) try to use every video frame from the content
in the output, hoping that they come at a constant frame rate.

This is not always the case, however.  Here we preserve the PTS
of video frames, and then when one arrives we output whatever
DCP video frames we can (at the regular DCP frame rate).

Hopefully this will solve a range of sync problems, but it
could also introduce new ones.

21 files changed:
src/lib/content_video.h
src/lib/dcp_decoder.cc
src/lib/ffmpeg_decoder.cc
src/lib/image_decoder.cc
src/lib/player.cc
src/lib/player.h
src/lib/player_video.cc
src/lib/player_video.h
src/lib/shuffler.cc
src/lib/util.cc
src/lib/util.h
src/lib/video_content.cc
src/lib/video_content.h
src/lib/video_decoder.cc
src/lib/video_decoder.h
src/lib/video_mxf_decoder.cc
test/client_server_test.cc
test/content_test.cc
test/ffmpeg_decoder_seek_test.cc
test/low_bitrate_test.cc
test/shuffler_test.cc

index 8ca18576e4e939f9c7b469924b6f5e63e89a8d10..1c145f60294f4e7d1e1523cd8cf3abbe16eb12e5 100644 (file)
 
 */
 
+
 #ifndef DCPOMATIC_CONTENT_VIDEO_H
 #define DCPOMATIC_CONTENT_VIDEO_H
 
+
+#include "dcpomatic_time.h"
 #include "types.h"
 
+
 class ImageProxy;
 
+
 /** @class ContentVideo
  *  @brief A frame of video straight out of some content.
  */
@@ -32,22 +37,22 @@ class ContentVideo
 {
 public:
        ContentVideo ()
-               : frame (0)
-               , eyes (Eyes::LEFT)
+               : eyes (Eyes::LEFT)
                , part (Part::WHOLE)
        {}
 
-       ContentVideo (std::shared_ptr<const ImageProxy> i, Frame f, Eyes e, Part p)
+       ContentVideo (std::shared_ptr<const ImageProxy> i, dcpomatic::ContentTime t, Eyes e, Part p)
                : image (i)
-               , frame (f)
+               , time (t)
                , eyes (e)
                , part (p)
        {}
 
        std::shared_ptr<const ImageProxy> image;
-       Frame frame;
+       dcpomatic::ContentTime time;
        Eyes eyes;
        Part part;
 };
 
+
 #endif
index 9064627ba9cd937a4930631e17f4665837553ee6..3ccb6ccc671ff3ad2335cc31b14f739a375f89bc 100644 (file)
@@ -177,7 +177,7 @@ DCPDecoder::pass ()
                                        AV_PIX_FMT_XYZ12LE,
                                        _forced_reduction
                                        ),
-                               _offset + frame
+                               ContentTime::from_frames(_offset + frame, vfr)
                                );
                } else {
                        video->emit (
@@ -189,7 +189,7 @@ DCPDecoder::pass ()
                                        AV_PIX_FMT_XYZ12LE,
                                        _forced_reduction
                                        ),
-                               _offset + frame
+                               ContentTime::from_frames(_offset + frame, vfr)
                                );
 
                        video->emit (
@@ -201,7 +201,7 @@ DCPDecoder::pass ()
                                        AV_PIX_FMT_XYZ12LE,
                                        _forced_reduction
                                        ),
-                               _offset + frame
+                               ContentTime::from_frames(_offset + frame, vfr)
                                );
                }
        }
index d4a5d6c192884872dd35ca260576be73e4690418..765b9fa62b127976d000ff70ee68b2b8ba514fd8 100644 (file)
@@ -143,11 +143,10 @@ FFmpegDecoder::flush ()
        full_length = full_length.ceil (frc.source);
        if (video) {
                double const vfr = _ffmpeg_content->video_frame_rate().get();
-               auto const f = full_length.frames_round (vfr);
-               auto v = video->position(film()).get_value_or(ContentTime()).frames_round(vfr) + 1;
-               while (v < f) {
-                       video->emit (film(), make_shared<const RawImageProxy>(_black_image), v);
-                       ++v;
+               auto v = video->position(film()).get_value_or(ContentTime()) + ContentTime::from_frames(1, vfr);
+               while (v < full_length) {
+                       video->emit(film(), make_shared<const RawImageProxy>(_black_image), v);
+                       v += ContentTime::from_frames(1, vfr);
                }
        }
 
@@ -611,7 +610,7 @@ FFmpegDecoder::process_video_frame ()
                        video->emit (
                                film(),
                                make_shared<RawImageProxy>(image),
-                               llrint(pts * _ffmpeg_content->active_video_frame_rate(film()))
+                               ContentTime::from_seconds(pts)
                                );
                } else {
                        LOG_WARNING_NC ("Dropping frame without PTS");
index 59dc4e873fd92fcaa5a0723351b6fda862ff51ad..ecbbd5134adc7298ed2c48717151af4b25c0e66e 100644 (file)
@@ -78,7 +78,7 @@ ImageDecoder::pass ()
                }
        }
 
-       video->emit (film(), _image, _frame_video_position);
+       video->emit(film(), _image, dcpomatic::ContentTime::from_frames(_frame_video_position, _image_content->video_frame_rate().get_value_or(24)));
        ++_frame_video_position;
        return false;
 }
index 7e3a1bdcf15e072ce740236fe0c30755aae529d3..ae79a457f428f6330121c77833f38c1a4f512626 100644 (file)
@@ -71,10 +71,8 @@ using std::dynamic_pointer_cast;
 using std::list;
 using std::make_pair;
 using std::make_shared;
-using std::make_shared;
 using std::max;
 using std::min;
-using std::min;
 using std::pair;
 using std::shared_ptr;
 using std::vector;
@@ -297,7 +295,6 @@ Player::setup_pieces ()
        _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
 
        _next_video_time = boost::none;
-       _next_video_eyes = Eyes::BOTH;
        _next_audio_time = boost::none;
 }
 
@@ -402,7 +399,7 @@ Player::black_player_video_frame (Eyes eyes) const
        boost::mutex::scoped_lock lm(_black_image_mutex);
 
        return std::make_shared<PlayerVideo> (
-               std::make_shared<const RawImageProxy>(_black_image),
+               make_shared<const RawImageProxy>(_black_image),
                Crop(),
                optional<double>(),
                _video_container_size,
@@ -412,7 +409,7 @@ Player::black_player_video_frame (Eyes eyes) const
                PresetColourConversion::all().front().conversion,
                VideoRange::FULL,
                std::weak_ptr<Content>(),
-               boost::optional<Frame>(),
+               boost::optional<dcpomatic::ContentTime>(),
                false
        );
 }
@@ -561,7 +558,7 @@ Player::pass ()
 
        if (_playback_length.load() == DCPTime()) {
                /* Special; just give one black frame */
-               emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
+               use_video(black_player_video_frame(Eyes::BOTH), DCPTime(), one_video_frame());
                return true;
        }
 
@@ -619,18 +616,23 @@ Player::pass ()
                LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
                earliest_content->done = earliest_content->decoder->pass ();
                auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
-               if (dcp && !_play_referenced && dcp->reference_audio()) {
-                       /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
-                          to `hide' the fact that no audio was emitted during the referenced DCP (though
-                          we need to behave as though it was).
-                       */
-                       _next_audio_time = dcp->end (_film);
+               if (dcp && !_play_referenced) {
+                       if (dcp->reference_video()) {
+                               _next_video_time = dcp->end(_film);
+                       }
+                       if (dcp->reference_audio()) {
+                               /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
+                                  to `hide' the fact that no audio was emitted during the referenced DCP (though
+                                  we need to behave as though it was).
+                               */
+                               _next_audio_time = dcp->end(_film);
+                       }
                }
                break;
        }
        case BLACK:
                LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
-               emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
+               use_video(black_player_video_frame(Eyes::BOTH), _black.position(), _black.period_at_position().to);
                _black.set_position (_black.position() + one_video_frame());
                break;
        case SILENT:
@@ -726,24 +728,13 @@ Player::pass ()
        }
 
        if (done) {
+               emit_video_until(_film->length());
+
                if (_shuffler) {
                        _shuffler->flush ();
                }
                for (auto const& i: _delay) {
-                       do_emit_video(i.first, i.second);
-               }
-
-               /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
-                * However, if we have L and R video files, and one is shorter than the other,
-                * the fill code in ::video mostly takes care of filling in the gaps.
-                * However, since it fills at the point when it knows there is more video coming
-                * at time t (so it should fill any gap up to t) it can't do anything right at the
-                * end.  This is particularly bad news if the last frame emitted is a LEFT
-                * eye, as the MXF writer will complain about the 3D sequence being wrong.
-                * Here's a hack to workaround that particular case.
-                */
-               if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
-                       do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
+                       emit_video(i.first, i.second);
                }
        }
 
@@ -798,6 +789,57 @@ Player::open_subtitles_for_frame (DCPTime time) const
 }
 
 
+void
+Player::emit_video_until(DCPTime time)
+{
+       auto frame = [this](shared_ptr<PlayerVideo> pv, DCPTime time) {
+               /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
+                  player before the video that requires them.
+               */
+               _delay.push_back(make_pair(pv, time));
+
+               if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
+                       _next_video_time = time + one_video_frame();
+               }
+
+               if (_delay.size() < 3) {
+                       return;
+               }
+
+               auto to_do = _delay.front();
+               _delay.pop_front();
+               emit_video(to_do.first, to_do.second);
+       };
+
+       auto const age_threshold = one_video_frame() * 2;
+
+       while (_next_video_time.get_value_or({}) < time) {
+               auto left = _last_video[Eyes::LEFT];
+               auto right = _last_video[Eyes::RIGHT];
+               auto both = _last_video[Eyes::BOTH];
+
+               auto const next = _next_video_time.get_value_or({});
+
+               if (
+                       left.first &&
+                       right.first &&
+                       (!both.first || (left.second >= both.second && right.second >= both.second)) &&
+                       (left.second - next) < age_threshold &&
+                       (right.second - next) < age_threshold
+                  ) {
+                       frame(left.first, next);
+                       frame(right.first, next);
+               } else if (both.first && (both.second - next) < age_threshold) {
+                       frame(both.first, next);
+                       LOG_DEBUG_PLAYER("Content %1 selected for DCP %2 (age %3)", to_string(both.second), to_string(next), to_string(both.second - next));
+               } else {
+                       frame(black_player_video_frame(Eyes::BOTH), next);
+                       LOG_DEBUG_PLAYER("Black selected for DCP %1", to_string(next));
+               }
+       }
+}
+
+
 void
 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
 {
@@ -814,20 +856,25 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
                return;
        }
 
-       FrameRateChange frc (_film, piece->content);
-       if (frc.skip && (video.frame % 2) == 1) {
-               return;
+       auto const three_d = _film->three_d();
+
+       if (!three_d) {
+               if (video.eyes == Eyes::LEFT) {
+                       /* Use left-eye images for both eyes... */
+                       video.eyes = Eyes::BOTH;
+               } else if (video.eyes == Eyes::RIGHT) {
+                       /* ...and discard the right */
+                       return;
+               }
        }
 
-       /* Time of the first frame we will emit */
-       DCPTime const time = content_video_to_dcp (piece, video.frame);
-       LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
+       FrameRateChange frc (_film, piece->content);
 
-       /* Discard if it's before the content's period or the last accurate seek.  We can't discard
-          if it's after the content's period here as in that case we still need to fill any gap between
-          `now' and the end of the content's period.
-       */
-       if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
+       /* Time of the frame we just received within the DCP */
+       auto const time = content_time_to_dcp(piece, video.time);
+       LOG_DEBUG_PLAYER("Received video frame %1 %2 eyes %3", to_string(video.time), to_string(time), static_cast<int>(video.eyes));
+
+       if (time < piece->content->position()) {
                return;
        }
 
@@ -835,86 +882,43 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
                return;
        }
 
-       /* Fill gaps that we discover now that we have some video which needs to be emitted.
-          This is where we need to fill to.
-       */
-       DCPTime fill_to = min (time, piece->content->end(_film));
-
-       if (_next_video_time) {
-               DCPTime fill_from = max (*_next_video_time, piece->content->position());
-
-               /* Fill if we have more than half a frame to do */
-               if ((fill_to - fill_from) > one_video_frame() / 2) {
-                       auto last = _last_video.find (weak_piece);
-                       if (_film->three_d()) {
-                               auto fill_to_eyes = video.eyes;
-                               if (fill_to_eyes == Eyes::BOTH) {
-                                       fill_to_eyes = Eyes::LEFT;
-                               }
-                               if (fill_to == piece->content->end(_film)) {
-                                       /* Don't fill after the end of the content */
-                                       fill_to_eyes = Eyes::LEFT;
-                               }
-                               auto j = fill_from;
-                               auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
-                               if (eyes == Eyes::BOTH) {
-                                       eyes = Eyes::LEFT;
-                               }
-                               while (j < fill_to || eyes != fill_to_eyes) {
-                                       if (last != _last_video.end()) {
-                                               LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
-                                               auto copy = last->second->shallow_copy();
-                                               copy->set_eyes (eyes);
-                                               emit_video (copy, j);
-                                       } else {
-                                               LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
-                                               emit_video (black_player_video_frame(eyes), j);
-                                       }
-                                       if (eyes == Eyes::RIGHT) {
-                                               j += one_video_frame();
-                                       }
-                                       eyes = increment_eyes (eyes);
-                               }
-                       } else {
-                               for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
-                                       if (last != _last_video.end()) {
-                                               emit_video (last->second, j);
-                                       } else {
-                                               emit_video (black_player_video_frame(Eyes::BOTH), j);
-                                       }
-                               }
-                       }
-               }
+       if (!_next_video_time) {
+               _next_video_time = time.round(_film->video_frame_rate());
        }
 
        auto const content_video = piece->content->video;
-
-       _last_video[weak_piece] = std::make_shared<PlayerVideo>(
-               video.image,
-               content_video->actual_crop(),
-               content_video->fade (_film, video.frame),
-               scale_for_display(
-                       content_video->scaled_size(_film->frame_size()),
+       use_video(
+               std::make_shared<PlayerVideo>(
+                       video.image,
+                       content_video->actual_crop(),
+                       content_video->fade(_film, video.time),
+                       scale_for_display(
+                               content_video->scaled_size(_film->frame_size()),
+                               _video_container_size,
+                               _film->frame_size(),
+                               content_video->pixel_quanta()
+                               ),
                        _video_container_size,
-                       _film->frame_size(),
-                       content_video->pixel_quanta()
+                       video.eyes,
+                       video.part,
+                       content_video->colour_conversion(),
+                       content_video->range(),
+                       piece->content,
+                       video.time,
+                       false
                        ),
-               _video_container_size,
-               video.eyes,
-               video.part,
-               content_video->colour_conversion(),
-               content_video->range(),
-               piece->content,
-               video.frame,
-               false
-               );
+                       time,
+                       piece->content->end(_film)
+                               );
+}
 
-       DCPTime t = time;
-       for (int i = 0; i < frc.repeat; ++i) {
-               if (t < piece->content->end(_film)) {
-                       emit_video (_last_video[weak_piece], t);
-               }
-               t += one_video_frame ();
+
+void
+Player::use_video(shared_ptr<PlayerVideo> pv, DCPTime time, DCPTime end)
+{
+       _last_video[pv->eyes()] = { pv, time };
+       if (pv->eyes() != Eyes::LEFT) {
+               emit_video_until(std::min(time + one_video_frame() / 2, end));
        }
 }
 
@@ -1184,56 +1188,22 @@ Player::seek (DCPTime time, bool accurate)
 
        if (accurate) {
                _next_video_time = time;
-               _next_video_eyes = Eyes::LEFT;
                _next_audio_time = time;
        } else {
                _next_video_time = boost::none;
-               _next_video_eyes = boost::none;
                _next_audio_time = boost::none;
        }
 
        _black.set_position (time);
        _silent.set_position (time);
 
-       _last_video.clear ();
+       _last_video[Eyes::LEFT] = {};
+       _last_video[Eyes::RIGHT] = {};
+       _last_video[Eyes::BOTH] = {};
 }
 
-
-void
-Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
-{
-       if (!_film->three_d()) {
-               if (pv->eyes() == Eyes::LEFT) {
-                       /* Use left-eye images for both eyes... */
-                       pv->set_eyes (Eyes::BOTH);
-               } else if (pv->eyes() == Eyes::RIGHT) {
-                       /* ...and discard the right */
-                       return;
-               }
-       }
-
-       /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
-          player before the video that requires them.
-       */
-       _delay.push_back (make_pair (pv, time));
-
-       if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
-               _next_video_time = time + one_video_frame();
-       }
-       _next_video_eyes = increment_eyes (pv->eyes());
-
-       if (_delay.size() < 3) {
-               return;
-       }
-
-       auto to_do = _delay.front();
-       _delay.pop_front();
-       do_emit_video (to_do.first, to_do.second);
-}
-
-
 void
-Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
+Player::emit_video(shared_ptr<PlayerVideo> pv, DCPTime time)
 {
        if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
                std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
index b6ee4e9c636d8adacb08da38b0810108c8802694..71daf4b32d0c969ab5a8c48430071c4079b933ea 100644 (file)
@@ -137,6 +137,8 @@ private:
        dcpomatic::ContentTime dcp_to_content_time (std::shared_ptr<const Piece> piece, dcpomatic::DCPTime t) const;
        dcpomatic::DCPTime content_time_to_dcp (std::shared_ptr<const Piece> piece, dcpomatic::ContentTime t) const;
        std::shared_ptr<PlayerVideo> black_player_video_frame (Eyes eyes) const;
+       void emit_video_until(dcpomatic::DCPTime time);
+       void insert_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time, dcpomatic::DCPTime end);
 
        void video (std::weak_ptr<Piece>, ContentVideo);
        void audio (std::weak_ptr<Piece>, AudioStreamPtr, ContentAudio);
@@ -151,8 +153,8 @@ private:
                std::shared_ptr<const AudioBuffers> audio, dcpomatic::DCPTime time, dcpomatic::DCPTime discard_to
                ) const;
        boost::optional<PositionImage> open_subtitles_for_frame (dcpomatic::DCPTime time) const;
-       void emit_video (std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time);
-       void do_emit_video (std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time);
+       void emit_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time);
+       void use_video(std::shared_ptr<PlayerVideo> pv, dcpomatic::DCPTime time, dcpomatic::DCPTime end);
        void emit_audio (std::shared_ptr<AudioBuffers> data, dcpomatic::DCPTime time);
        std::shared_ptr<const Playlist> playlist () const;
 
@@ -193,15 +195,12 @@ private:
 
        /** Time of the next video that we will emit, or the time of the last accurate seek */
        boost::optional<dcpomatic::DCPTime> _next_video_time;
-       /** Eyes of the next video that we will emit */
-       boost::optional<Eyes> _next_video_eyes;
        /** Time of the next audio that we will emit, or the time of the last accurate seek */
        boost::optional<dcpomatic::DCPTime> _next_audio_time;
 
        boost::atomic<boost::optional<int>> _dcp_decode_reduction;
 
-       typedef std::map<std::weak_ptr<Piece>, std::shared_ptr<PlayerVideo>, std::owner_less<std::weak_ptr<Piece>>> LastVideoMap;
-       LastVideoMap _last_video;
+       EnumIndexedVector<std::pair<std::shared_ptr<PlayerVideo>, dcpomatic::DCPTime>, Eyes> _last_video;
 
        AudioMerger _audio_merger;
        std::unique_ptr<Shuffler> _shuffler;
index d45bf9f432b90c996feb870baf1580fc3a5796a4..b020ca1cd8172ba1cd5193c5658d42c621166171 100644 (file)
@@ -45,6 +45,7 @@ using std::weak_ptr;
 using boost::optional;
 using dcp::Data;
 using dcp::raw_convert;
+using namespace dcpomatic;
 
 
 PlayerVideo::PlayerVideo (
@@ -58,7 +59,7 @@ PlayerVideo::PlayerVideo (
        optional<ColourConversion> colour_conversion,
        VideoRange video_range,
        weak_ptr<Content> content,
-       optional<Frame> video_frame,
+       optional<ContentTime> video_time,
        bool error
        )
        : _in (in)
@@ -71,7 +72,7 @@ PlayerVideo::PlayerVideo (
        , _colour_conversion (colour_conversion)
        , _video_range (video_range)
        , _content (content)
-       , _video_frame (video_frame)
+       , _video_time(video_time)
        , _error (error)
 {
 
@@ -343,7 +344,7 @@ PlayerVideo::shallow_copy () const
                _colour_conversion,
                _video_range,
                _content,
-               _video_frame,
+               _video_time,
                _error
                );
 }
@@ -356,12 +357,12 @@ bool
 PlayerVideo::reset_metadata (shared_ptr<const Film> film, dcp::Size player_video_container_size)
 {
        auto content = _content.lock();
-       if (!content || !_video_frame) {
+       if (!content || !_video_time) {
                return false;
        }
 
        _crop = content->video->actual_crop();
-       _fade = content->video->fade(film, _video_frame.get());
+       _fade = content->video->fade(film, _video_time.get());
        _inter_size = scale_for_display(
                content->video->scaled_size(film->frame_size()),
                player_video_container_size,
index f2781c1a0c51c3e099dd0009604e04a89ce2c3df..10b2078a00af83e73b1505b97aa2801145c9edde 100644 (file)
@@ -59,7 +59,7 @@ public:
                boost::optional<ColourConversion> colour_conversion,
                VideoRange video_range,
                std::weak_ptr<Content> content,
-               boost::optional<Frame> video_frame,
+               boost::optional<dcpomatic::ContentTime> video_time,
                bool error
                );
 
@@ -141,8 +141,8 @@ private:
        boost::optional<PositionImage> _text;
        /** Content that we came from.  This is so that reset_metadata() can work. */
        std::weak_ptr<Content> _content;
-       /** Video frame that we came from.  Again, this is for reset_metadata() */
-       boost::optional<Frame> _video_frame;
+       /** Video time that we came from.  Again, this is for reset_metadata() */
+       boost::optional<dcpomatic::ContentTime> _video_time;
 
        mutable boost::mutex _mutex;
        mutable std::shared_ptr<Image> _image;
index 5a4faf4d15fa4c0f74617bd2bd03ecd57f1ba114..a4ea0f5dc05a923d00d8da3ea7a34983fc4522c8 100644 (file)
@@ -40,8 +40,8 @@ int const Shuffler::_max_size = 64;
 struct Comparator
 {
        bool operator()(Shuffler::Store const & a, Shuffler::Store const & b) {
-               if (a.second.frame != b.second.frame) {
-                       return a.second.frame < b.second.frame;
+               if (a.second.time != b.second.time) {
+                       return a.second.time < b.second.time;
                }
                return a.second.eyes < b.second.eyes;
        }
@@ -51,7 +51,7 @@ struct Comparator
 void
 Shuffler::video (weak_ptr<Piece> weak_piece, ContentVideo video)
 {
-       LOG_DEBUG_THREE_D ("Shuffler::video frame=%1 eyes=%2 part=%3", video.frame, static_cast<int>(video.eyes), static_cast<int>(video.part));
+       LOG_DEBUG_THREE_D("Shuffler::video time=%1 eyes=%2 part=%3", to_string(video.time), static_cast<int>(video.eyes), static_cast<int>(video.part));
 
        if (video.eyes != Eyes::LEFT && video.eyes != Eyes::RIGHT) {
                /* Pass through anything that we don't care about */
@@ -79,13 +79,13 @@ Shuffler::video (weak_ptr<Piece> weak_piece, ContentVideo video)
                        !_store.empty() &&
                        _last &&
                        (
-                               (_store.front().second.frame == _last->frame       && _store.front().second.eyes == Eyes::RIGHT && _last->eyes == Eyes::LEFT) ||
-                               (_store.front().second.frame >= (_last->frame + 1) && _store.front().second.eyes == Eyes::LEFT  && _last->eyes == Eyes::RIGHT)
+                               (_store.front().second.time == _last->time && _store.front().second.eyes == Eyes::RIGHT && _last->eyes == Eyes::LEFT) ||
+                               (_store.front().second.time > _last->time  && _store.front().second.eyes == Eyes::LEFT  && _last->eyes == Eyes::RIGHT)
                                );
 
                if (!store_front_in_sequence) {
-                       string const store = _store.empty() ? "store empty" : String::compose("store front frame=%1 eyes=%2", _store.front().second.frame, static_cast<int>(_store.front().second.eyes));
-                       string const last = _last ? String::compose("last frame=%1 eyes=%2", _last->frame, static_cast<int>(_last->eyes)) : "no last";
+                       string const store = _store.empty() ? "store empty" : String::compose("store front time=%1 eyes=%2", to_string(_store.front().second.time), static_cast<int>(_store.front().second.eyes));
+                       string const last = _last ? String::compose("last time=%1 eyes=%2", to_string(_last->time), static_cast<int>(_last->eyes)) : "no last";
                        LOG_DEBUG_THREE_D("Shuffler not in sequence: %1 %2", store, last);
                }
 
@@ -98,10 +98,10 @@ Shuffler::video (weak_ptr<Piece> weak_piece, ContentVideo video)
                }
 
                if (_store.size() > _max_size) {
-                       LOG_WARNING ("Shuffler is full after receiving frame %1; 3D sync may be incorrect.", video.frame);
+                       LOG_WARNING("Shuffler is full after receiving frame at %1; 3D sync may be incorrect.", to_string(video.time));
                }
 
-               LOG_DEBUG_THREE_D("Shuffler emits frame=%1 eyes=%2 store=%3", _store.front().second.frame, static_cast<int>(_store.front().second.eyes), _store.size());
+               LOG_DEBUG_THREE_D("Shuffler emits time=%1 eyes=%2 store=%3", to_string(_store.front().second.time), static_cast<int>(_store.front().second.eyes), _store.size());
                Video (_store.front().first, _store.front().second);
                _last = _store.front().second;
                _store.pop_front ();
index cfeacdb92cece74e3c0049081bba2964ace11fdb..6a353818cb2e26d42e74c1289826df44543b5205 100644 (file)
@@ -873,16 +873,6 @@ remap (shared_ptr<const AudioBuffers> input, int output_channels, AudioMapping m
        return mapped;
 }
 
-Eyes
-increment_eyes (Eyes e)
-{
-       if (e == Eyes::LEFT) {
-               return Eyes::RIGHT;
-       }
-
-       return Eyes::LEFT;
-}
-
 
 size_t
 utf8_strlen (string s)
index 7e7a7b96bcadded154f19d924cfdcd3667e096a7..4fbe09a0a371d3d9c30f14bd6fa2a1d95d242a8c 100644 (file)
@@ -115,7 +115,6 @@ extern float relaxed_string_to_float (std::string);
 extern std::string careful_string_filter (std::string);
 extern std::pair<int, int> audio_channel_types (std::list<int> mapped, int channels);
 extern std::shared_ptr<AudioBuffers> remap (std::shared_ptr<const AudioBuffers> input, int output_channels, AudioMapping map);
-extern Eyes increment_eyes (Eyes e);
 extern size_t utf8_strlen (std::string s);
 extern std::string day_of_week_to_string (boost::gregorian::greg_weekday d);
 extern void emit_subtitle_image (dcpomatic::ContentTimePeriod period, dcp::SubtitleImage sub, dcp::Size size, std::shared_ptr<TextDecoder> decoder);
index 1d1d010a95662193bc52ab84e5dacb2b8746fb10..d173b4607abb7e698568ca7b6564d01c961309e2 100644 (file)
@@ -417,27 +417,29 @@ VideoContent::size_after_crop () const
 }
 
 
-/** @param f Frame index within the whole (untrimmed) content.
+/** @param time Time within the whole (untrimmed) content.
  *  @return Fade factor (between 0 and 1) or unset if there is no fade.
  */
 optional<double>
-VideoContent::fade (shared_ptr<const Film> film, Frame f) const
+VideoContent::fade(shared_ptr<const Film> film, ContentTime time) const
 {
-       DCPOMATIC_ASSERT (f >= 0);
+       DCPOMATIC_ASSERT(time.get() >= 0);
 
        double const vfr = _parent->active_video_frame_rate(film);
 
-       auto const ts = _parent->trim_start().frames_round(vfr);
-       if ((f - ts) < fade_in()) {
-               return double (f - ts) / fade_in();
+       auto const ts = _parent->trim_start();
+       auto const fade_in_time = ContentTime::from_frames(fade_in(), vfr);
+       if ((time - ts) < fade_in_time) {
+               return double(ContentTime(time - ts).get()) / fade_in_time.get();
        }
 
-       auto fade_out_start = length() - _parent->trim_end().frames_round(vfr) - fade_out();
-       if (f >= fade_out_start) {
-               return 1 - double (f - fade_out_start) / fade_out();
+       auto const fade_out_time = ContentTime::from_frames(fade_out(), vfr);
+       auto fade_out_start = ContentTime::from_frames(length(), vfr) - _parent->trim_end() - fade_out_time;
+       if (time >= fade_out_start) {
+               return 1 - double(ContentTime(time - fade_out_start).get()) / fade_out_time.get();
        }
 
-       return optional<double> ();
+       return {};
 }
 
 string
index 7214d35e40d2994b5d525c0a9120cb39a96d1905..5a3aabe8252d17e62bc53d3bf6ae25efd488d300 100644 (file)
@@ -205,7 +205,7 @@ public:
        dcp::Size size_after_crop () const;
        dcp::Size scaled_size (dcp::Size container_size);
 
-       boost::optional<double> fade (std::shared_ptr<const Film> film, Frame) const;
+       boost::optional<double> fade(std::shared_ptr<const Film> film, dcpomatic::ContentTime time) const;
 
        std::string processing_description (std::shared_ptr<const Film> film);
 
index cf21f885a3fbc6af45ba2efde1cfc18ad645eb9b..c628fddd983e2015a81b1bd7d13b52b0d9424a0a 100644 (file)
@@ -20,7 +20,6 @@
 
 
 #include "compose.hpp"
-#include "film.h"
 #include "frame_interval_checker.h"
 #include "image.h"
 #include "j2k_image_proxy.h"
@@ -47,17 +46,9 @@ VideoDecoder::VideoDecoder (Decoder* parent, shared_ptr<const Content> c)
 }
 
 
-/** Called by decoder classes when they have a video frame ready.
- *  @param frame Frame index within the content; this does not take into account 3D
- *  so for 3D_ALTERNATE this value goes:
- *     0: frame 0 left
- *     1: frame 0 right
- *     2: frame 1 left
- *     3: frame 1 right
- *  and so on.
- */
+/** Called by decoder classes when they have a video frame ready */
 void
-VideoDecoder::emit (shared_ptr<const Film> film, shared_ptr<const ImageProxy> image, Frame decoder_frame)
+VideoDecoder::emit(shared_ptr<const Film> film, shared_ptr<const ImageProxy> image, ContentTime time)
 {
        if (ignore ()) {
                return;
@@ -66,14 +57,12 @@ VideoDecoder::emit (shared_ptr<const Film> film, shared_ptr<const ImageProxy> im
        auto const afr = _content->active_video_frame_rate(film);
        auto const vft = _content->video->frame_type();
 
-       auto frame_time = ContentTime::from_frames (decoder_frame, afr);
-
        /* Do some heuristics to try and spot the case where the user sets content to 3D
         * when it is not.  We try to tell this by looking at the differences in time between
         * the first few frames.  Real 3D content should have two frames for each timestamp.
         */
        if (_frame_interval_checker) {
-               _frame_interval_checker->feed (frame_time, afr);
+               _frame_interval_checker->feed(time, afr);
                if (_frame_interval_checker->guess() == FrameIntervalChecker::PROBABLY_NOT_3D && vft == VideoFrameType::THREE_D) {
                        boost::throw_exception (
                                DecodeError(
@@ -91,94 +80,54 @@ VideoDecoder::emit (shared_ptr<const Film> film, shared_ptr<const ImageProxy> im
                }
        }
 
-       Frame frame;
-       Eyes eyes = Eyes::BOTH;
-       if (!_position) {
-               /* This is the first data we have received since initialisation or seek.  Set
-                  the position based on the frame that was given.  After this first time
-                  we just count frames, since (as with audio) it seems that ContentTimes
-                  are unreliable from FFmpegDecoder.  They are much better than audio times
-                  but still we get the occasional one which is duplicated.  In this case
-                  ffmpeg seems to carry on regardless, processing the video frame as normal.
-                  If we drop the frame with the duplicated timestamp we obviously lose sync.
-               */
-
-               if (vft == VideoFrameType::THREE_D_ALTERNATE) {
-                       frame = decoder_frame / 2;
-                       eyes = (decoder_frame % 2) ? Eyes::RIGHT : Eyes::LEFT;
-               } else {
-                       frame = decoder_frame;
-                       if (vft == VideoFrameType::THREE_D) {
-                               auto j2k = dynamic_pointer_cast<const J2KImageProxy>(image);
-                               /* At the moment only DCP decoders producers VideoFrameType::THREE_D, so only the J2KImageProxy
-                                * knows which eye it is.
-                                */
-                               if (j2k && j2k->eye()) {
-                                       eyes = j2k->eye().get() == dcp::Eye::LEFT ? Eyes::LEFT : Eyes::RIGHT;
-                               }
-                       }
-               }
-
-               _position = ContentTime::from_frames (frame, afr);
-       } else {
-               if (vft == VideoFrameType::THREE_D) {
-                       auto j2k = dynamic_pointer_cast<const J2KImageProxy>(image);
-                       if (j2k && j2k->eye()) {
-                               if (j2k->eye() == dcp::Eye::LEFT) {
-                                       frame = _position->frames_round(afr) + 1;
-                                       eyes = Eyes::LEFT;
-                               } else {
-                                       frame = _position->frames_round(afr);
-                                       eyes = Eyes::RIGHT;
-                               }
-                       } else {
-                               /* This should not happen; see above */
-                               frame = _position->frames_round(afr) + 1;
-                       }
-               } else if (vft == VideoFrameType::THREE_D_ALTERNATE) {
-                       DCPOMATIC_ASSERT (_last_emitted_eyes);
-                       if (_last_emitted_eyes.get() == Eyes::RIGHT) {
-                               frame = _position->frames_round(afr) + 1;
-                               eyes = Eyes::LEFT;
-                       } else {
-                               frame = _position->frames_round(afr);
-                               eyes = Eyes::RIGHT;
-                       }
-               } else {
-                       frame = _position->frames_round(afr) + 1;
-               }
-       }
-
        switch (vft) {
        case VideoFrameType::TWO_D:
+               Data(ContentVideo(image, time, Eyes::BOTH, Part::WHOLE));
+               break;
        case VideoFrameType::THREE_D:
-               Data (ContentVideo (image, frame, eyes, Part::WHOLE));
+       {
+               auto eyes = Eyes::LEFT;
+               auto j2k = dynamic_pointer_cast<const J2KImageProxy>(image);
+               if (j2k && j2k->eye()) {
+                       eyes = *j2k->eye() == dcp::Eye::LEFT ? Eyes::LEFT : Eyes::RIGHT;
+               }
+
+               Data(ContentVideo(image, time, eyes, Part::WHOLE));
                break;
+       }
        case VideoFrameType::THREE_D_ALTERNATE:
        {
-               Data (ContentVideo (image, frame, eyes, Part::WHOLE));
+               Eyes eyes;
+               if (_last_emitted_eyes) {
+                       eyes = _last_emitted_eyes.get() == Eyes::LEFT ? Eyes::RIGHT : Eyes::LEFT;
+               } else {
+                       /* We don't know what eye this frame is, so just guess */
+                       auto frame = time.frames_round(_content->video_frame_rate().get_value_or(24));
+                       eyes = (frame % 2) ? Eyes::RIGHT : Eyes::LEFT;
+               }
+               Data(ContentVideo(image, time, eyes, Part::WHOLE));
                _last_emitted_eyes = eyes;
                break;
        }
        case VideoFrameType::THREE_D_LEFT_RIGHT:
-               Data (ContentVideo (image, frame, Eyes::LEFT, Part::LEFT_HALF));
-               Data (ContentVideo (image, frame, Eyes::RIGHT, Part::RIGHT_HALF));
+               Data(ContentVideo(image, time, Eyes::LEFT, Part::LEFT_HALF));
+               Data(ContentVideo(image, time, Eyes::RIGHT, Part::RIGHT_HALF));
                break;
        case VideoFrameType::THREE_D_TOP_BOTTOM:
-               Data (ContentVideo (image, frame, Eyes::LEFT, Part::TOP_HALF));
-               Data (ContentVideo (image, frame, Eyes::RIGHT, Part::BOTTOM_HALF));
+               Data(ContentVideo(image, time, Eyes::LEFT, Part::TOP_HALF));
+               Data(ContentVideo(image, time, Eyes::RIGHT, Part::BOTTOM_HALF));
                break;
        case VideoFrameType::THREE_D_LEFT:
-               Data (ContentVideo (image, frame, Eyes::LEFT, Part::WHOLE));
+               Data(ContentVideo(image, time, Eyes::LEFT, Part::WHOLE));
                break;
        case VideoFrameType::THREE_D_RIGHT:
-               Data (ContentVideo (image, frame, Eyes::RIGHT, Part::WHOLE));
+               Data(ContentVideo(image, time, Eyes::RIGHT, Part::WHOLE));
                break;
        default:
                DCPOMATIC_ASSERT (false);
        }
 
-       _position = ContentTime::from_frames (frame, afr);
+       _position = time;
 }
 
 
index 828ac66a20af1cbbff7785f039b9c1fd8a32bddb..0072224044fc5816ef96dbd94d9d48eaa1b73397 100644 (file)
@@ -61,7 +61,7 @@ public:
        }
 
        void seek () override;
-       void emit (std::shared_ptr<const Film> film, std::shared_ptr<const ImageProxy>, Frame frame);
+       void emit(std::shared_ptr<const Film> film, std::shared_ptr<const ImageProxy>, dcpomatic::ContentTime time);
 
        boost::signals2::signal<void (ContentVideo)> Data;
 
index 92cab0259dd8a1697826f2ad5213b221cba7e6e3..911b8c0bb06c290f0bba9ee9a6fa679664437282 100644 (file)
@@ -91,18 +91,18 @@ VideoMXFDecoder::pass ()
                video->emit (
                        film(),
                        std::make_shared<J2KImageProxy>(_mono_reader->get_frame(frame), _size, AV_PIX_FMT_XYZ12LE, optional<int>()),
-                       frame
+                       _next
                        );
        } else {
                video->emit (
                        film(),
                        std::make_shared<J2KImageProxy>(_stereo_reader->get_frame(frame), _size, dcp::Eye::LEFT, AV_PIX_FMT_XYZ12LE, optional<int>()),
-                       frame
+                       _next
                        );
                video->emit (
                        film(),
                        std::make_shared<J2KImageProxy>(_stereo_reader->get_frame(frame), _size, dcp::Eye::RIGHT, AV_PIX_FMT_XYZ12LE, optional<int>()),
-                       frame
+                       _next
                        );
        }
 
index 7a99f72274758a1bace8f1e9f637fe7e1c61d502..32af60cbe6c0b4efd755bcdff20f14521f7a1a76 100644 (file)
@@ -51,6 +51,7 @@ using std::weak_ptr;
 using boost::thread;
 using boost::optional;
 using dcp::ArrayData;
+using namespace dcpomatic;
 
 
 void
@@ -105,7 +106,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_rgb)
                ColourConversion(),
                VideoRange::FULL,
                weak_ptr<Content>(),
-               optional<Frame>(),
+               optional<ContentTime>(),
                false
                );
 
@@ -188,7 +189,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_yuv)
                ColourConversion(),
                VideoRange::FULL,
                weak_ptr<Content>(),
-               optional<Frame>(),
+               optional<ContentTime>(),
                false
                );
 
@@ -258,7 +259,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_j2k)
                ColourConversion(),
                VideoRange::FULL,
                weak_ptr<Content>(),
-               optional<Frame>(),
+               optional<ContentTime>(),
                false
                );
 
@@ -283,7 +284,7 @@ BOOST_AUTO_TEST_CASE (client_server_test_j2k)
                PresetColourConversion::all().front().conversion,
                VideoRange::FULL,
                weak_ptr<Content>(),
-               optional<Frame>(),
+               optional<ContentTime>(),
                false
                );
 
index a22be29aac16130183dec3024ade5c117d028637..903f5c1e785b61dcfc9daf60dee30e6a29112cec 100644 (file)
@@ -160,7 +160,7 @@ BOOST_AUTO_TEST_CASE (content_test6)
                );
 
        make_and_verify_dcp (film);
-       check_dcp (TestPaths::private_data() / "fha", film);
+       check_dcp (TestPaths::private_data() / "v2.18.x" / "fha", film);
 
        cl.run ();
 }
index 4dceae86ba6f6416e7add63be7c0093265fad6e8..f38ef3564a2004a0b49efb2fda7c929563c2fd39 100644 (file)
@@ -64,18 +64,18 @@ store (ContentVideo v)
 
 
 static void
-check (shared_ptr<FFmpegDecoder> decoder, int frame)
+check (shared_ptr<FFmpegDecoder> decoder, ContentTime time)
 {
        BOOST_REQUIRE (decoder->ffmpeg_content()->video_frame_rate ());
-       decoder->seek (ContentTime::from_frames (frame, decoder->ffmpeg_content()->video_frame_rate().get()), true);
+       decoder->seek(time, true);
        stored = optional<ContentVideo> ();
        while (!decoder->pass() && !stored) {}
-       BOOST_CHECK (stored->frame <= frame);
+       BOOST_CHECK(stored->time <= time);
 }
 
 
 static void
-test (boost::filesystem::path file, vector<int> frames)
+test (boost::filesystem::path file, vector<ContentTime> times)
 {
        auto path = TestPaths::private_data() / file;
        BOOST_REQUIRE (boost::filesystem::exists (path));
@@ -87,7 +87,7 @@ test (boost::filesystem::path file, vector<int> frames)
        auto decoder = make_shared<FFmpegDecoder>(film, content, false);
        decoder->video->Data.connect (bind (&store, _1));
 
-       for (auto i: frames) {
+       for (auto i: times) {
                check (decoder, i);
        }
 }
@@ -95,10 +95,43 @@ test (boost::filesystem::path file, vector<int> frames)
 
 BOOST_AUTO_TEST_CASE (ffmpeg_decoder_seek_test)
 {
-       vector<int> frames = { 0, 42, 999, 0 };
-
-       test ("boon_telly.mkv", frames);
-       test ("Sintel_Trailer1.480p.DivX_Plus_HD.mkv", frames);
-       test ("prophet_long_clip.mkv", { 15, 42, 999, 15 });
-       test ("dolby_aurora.vob", { 0, 125, 250, 41 });
+       test(
+               "boon_telly.mkv",
+               {
+                       ContentTime::from_frames(0, 29.97),
+                       ContentTime::from_frames(42, 29.97),
+                       ContentTime::from_frames(999, 29.97),
+                       ContentTime::from_frames(0, 29.97),
+               }
+           );
+
+       test(
+               "Sintel_Trailer1.480p.DivX_Plus_HD.mkv",
+               {
+                       ContentTime::from_frames(0, 24),
+                       ContentTime::from_frames(42, 24),
+                       ContentTime::from_frames(999, 24),
+                       ContentTime::from_frames(0, 24),
+               }
+           );
+
+       test(
+               "prophet_long_clip.mkv",
+               {
+                       ContentTime::from_frames(15, 23.976),
+                       ContentTime::from_frames(42, 23.976),
+                       ContentTime::from_frames(999, 23.976),
+                       ContentTime::from_frames(15, 23.976)
+               }
+           );
+
+       test(
+               "dolby_aurora.vob",
+               {
+                       ContentTime::from_frames(0, 25),
+                       ContentTime::from_frames(125, 25),
+                       ContentTime::from_frames(250, 25),
+                       ContentTime::from_frames(41, 25)
+               }
+           );
 }
index 7050dd771acc6dd74dd8a57f0e30b8d177010470..52b8d54be118c33559b9f1722d58141cbd34c116 100644 (file)
@@ -31,6 +31,7 @@ extern "C" {
 
 
 using std::make_shared;
+using namespace dcpomatic;
 
 
 BOOST_AUTO_TEST_CASE (low_bitrate_test)
@@ -51,7 +52,7 @@ BOOST_AUTO_TEST_CASE (low_bitrate_test)
                boost::optional<ColourConversion>(),
                VideoRange::FULL,
                std::weak_ptr<Content>(),
-               boost::optional<Frame>(),
+               boost::optional<ContentTime>(),
                false
                );
 
index d1c5b85338f53272ead9d19e4388a9cea0dd12dc..0187300569c0a84afb3ac019dc889b8dfc0d1d2d 100644 (file)
@@ -10,14 +10,15 @@ using boost::optional;
 #if BOOST_VERSION >= 106100
 using namespace boost::placeholders;
 #endif
+using namespace dcpomatic;
 
 
 static void
-push (Shuffler& s, int frame, Eyes eyes)
+push(Shuffler& s, int frame, Eyes eyes)
 {
        shared_ptr<Piece> piece (new Piece (shared_ptr<Content>(), shared_ptr<Decoder>(), FrameRateChange(24, 24)));
        ContentVideo cv;
-       cv.frame = frame;
+       cv.time = ContentTime::from_frames(frame, 24);
        cv.eyes = eyes;
        s.video (piece, cv);
 }
@@ -33,8 +34,9 @@ receive (weak_ptr<Piece>, ContentVideo cv)
 static void
 check (int frame, Eyes eyes, int line)
 {
+       auto const time = ContentTime::from_frames(frame, 24);
        BOOST_REQUIRE_MESSAGE (!pending_cv.empty(), "Check at " << line << " failed.");
-       BOOST_CHECK_MESSAGE (pending_cv.front().frame == frame, "Check at " << line << " failed.");
+       BOOST_CHECK_MESSAGE (pending_cv.front().time == time, "Check at " << line << " failed.");
        BOOST_CHECK_MESSAGE (pending_cv.front().eyes == eyes, "Check at " << line << " failed.");
        pending_cv.pop_front();
 }