Update for small change to libdcp API.
[dcpomatic.git] / src / lib / dcp_decoder.cc
index 21eb2f7ea48d116f828d9955ff93ca817687636f..ae2f8ef9fe19c2d7d0d5e2509de0240295bdff43 100644 (file)
@@ -59,36 +59,46 @@ DCPDecoder::DCPDecoder (shared_ptr<const DCPContent> c, bool fast)
 }
 
 bool
-DCPDecoder::pass ()
+DCPDecoder::pass (PassReason reason, bool)
 {
        if (_reel == _reels.end () || !_dcp_content->can_be_played ()) {
                return true;
        }
 
+       /* Offset of the start of the current reel from the start of the content in frames */
+       int offset = 0;
+       list<shared_ptr<dcp::Reel> >::const_iterator i = _reels.begin();
+       while (i != _reel) {
+               offset += (*i)->main_picture()->duration ();
+               ++i;
+       }
+
        double const vfr = _dcp_content->video_frame_rate ();
+
+       /* Frame within the (played part of the) reel that is coming up next */
        int64_t const frame = _next.frames_round (vfr);
 
-       if ((*_reel)->main_picture ()) {
+       if ((*_reel)->main_picture () && reason != PASS_REASON_SUBTITLE) {
                shared_ptr<dcp::PictureAsset> asset = (*_reel)->main_picture()->asset ();
                shared_ptr<dcp::MonoPictureAsset> mono = dynamic_pointer_cast<dcp::MonoPictureAsset> (asset);
                shared_ptr<dcp::StereoPictureAsset> stereo = dynamic_pointer_cast<dcp::StereoPictureAsset> (asset);
                int64_t const entry_point = (*_reel)->main_picture()->entry_point ();
                if (mono) {
-                       video (shared_ptr<ImageProxy> (new J2KImageProxy (mono->get_frame (entry_point + frame), asset->size())), frame);
+                       video (shared_ptr<ImageProxy> (new J2KImageProxy (mono->get_frame (entry_point + frame), asset->size())), offset + frame);
                } else {
                        video (
                                shared_ptr<ImageProxy> (new J2KImageProxy (stereo->get_frame (entry_point + frame), asset->size(), dcp::EYE_LEFT)),
-                               frame
+                               offset + frame
                                );
 
                        video (
                                shared_ptr<ImageProxy> (new J2KImageProxy (stereo->get_frame (entry_point + frame), asset->size(), dcp::EYE_RIGHT)),
-                               frame
+                               offset + frame
                                );
                }
        }
 
-       if ((*_reel)->main_sound ()) {
+       if ((*_reel)->main_sound () && reason != PASS_REASON_SUBTITLE) {
                int64_t const entry_point = (*_reel)->main_sound()->entry_point ();
                shared_ptr<const dcp::SoundFrame> sf = (*_reel)->main_sound()->asset()->get_frame (entry_point + frame);
                uint8_t const * from = sf->data ();
@@ -103,12 +113,12 @@ DCPDecoder::pass ()
                        }
                }
 
-               audio (_dcp_content->audio_stream(), data, _next);
+               audio (_dcp_content->audio_stream(), data, ContentTime::from_frames (offset, vfr) + _next);
        }
 
        if ((*_reel)->main_subtitle ()) {
                int64_t const entry_point = (*_reel)->main_subtitle()->entry_point ();
-               list<dcp::SubtitleString> subs = (*_reel)->main_subtitle()->subtitle_asset()->subtitles_during (
+               list<dcp::SubtitleString> subs = (*_reel)->main_subtitle()->asset()->subtitles_during (
                        dcp::Time (entry_point + frame, vfr, vfr),
                        dcp::Time (entry_point + frame + 1, vfr, vfr),
                        true
@@ -118,8 +128,8 @@ DCPDecoder::pass ()
                        /* XXX: assuming that all `subs' are at the same time; maybe this is ok */
                        text_subtitle (
                                ContentTimePeriod (
-                                       ContentTime::from_seconds (subs.front().in().as_seconds ()),
-                                       ContentTime::from_seconds (subs.front().out().as_seconds ())
+                                       ContentTime::from_frames (offset - entry_point, vfr) + ContentTime::from_seconds (subs.front().in().as_seconds ()),
+                                       ContentTime::from_frames (offset - entry_point, vfr) + ContentTime::from_seconds (subs.front().out().as_seconds ())
                                        ),
                                subs
                                );
@@ -131,6 +141,7 @@ DCPDecoder::pass ()
        if ((*_reel)->main_picture ()) {
                if (_next.frames_round (vfr) >= (*_reel)->main_picture()->duration()) {
                        ++_reel;
+                       _next = ContentTime ();
                }
        }
 
@@ -175,7 +186,7 @@ DCPDecoder::text_subtitles_during (ContentTimePeriod period, bool starting) cons
 
                int64_t const entry_point = r->main_subtitle()->entry_point ();
 
-               list<dcp::SubtitleString> subs = r->main_subtitle()->subtitle_asset()->subtitles_during (
+               list<dcp::SubtitleString> subs = r->main_subtitle()->asset()->subtitles_during (
                        dcp::Time (period.from.seconds(), 1000) - dcp::Time (entry_point, vfr, vfr),
                        dcp::Time (period.to.seconds(), 1000) - dcp::Time (entry_point, vfr, vfr),
                        starting