Give DCPDecoder its own ::position which just returns its internal
[dcpomatic.git] / src / lib / dcp_decoder.cc
index 2ffe110655d8f676217f6ee6af38d0fab0b95d5a..7af89e84d074b3c791032b96327465068b645e11 100644 (file)
@@ -1,5 +1,5 @@
 /*
-    Copyright (C) 2014-2016 Carl Hetherington <cth@carlh.net>
+    Copyright (C) 2014-2018 Carl Hetherington <cth@carlh.net>
 
     This file is part of DCP-o-matic.
 
@@ -24,7 +24,8 @@
 #include "video_decoder.h"
 #include "audio_decoder.h"
 #include "j2k_image_proxy.h"
-#include "subtitle_decoder.h"
+#include "text_decoder.h"
+#include "ffmpeg_image_proxy.h"
 #include "image.h"
 #include "config.h"
 #include <dcp/dcp.h>
 #include <dcp/reel_picture_asset.h>
 #include <dcp/reel_sound_asset.h>
 #include <dcp/reel_subtitle_asset.h>
+#include <dcp/reel_closed_caption_asset.h>
 #include <dcp/mono_picture_frame.h>
 #include <dcp/stereo_picture_frame.h>
 #include <dcp/sound_frame.h>
 #include <dcp/sound_asset_reader.h>
+#include <dcp/subtitle_image.h>
 #include <boost/foreach.hpp>
 #include <iostream>
 
+#include "i18n.h"
+
 using std::list;
 using std::cout;
 using boost::shared_ptr;
 using boost::dynamic_pointer_cast;
 using boost::optional;
 
-DCPDecoder::DCPDecoder (shared_ptr<const DCPContent> c, shared_ptr<Log> log, bool fast)
+DCPDecoder::DCPDecoder (shared_ptr<const Film> film, shared_ptr<const DCPContent> c, bool fast)
        : DCP (c)
+       , Decoder (film)
        , _decode_referenced (false)
 {
-       video.reset (new VideoDecoder (this, c, log));
-       if (c->audio) {
-               audio.reset (new AudioDecoder (this, c->audio, log, fast));
+       if (c->can_be_played()) {
+               if (c->video) {
+                       video.reset (new VideoDecoder (this, c));
+               }
+               if (c->audio) {
+                       audio.reset (new AudioDecoder (this, c->audio, fast));
+               }
+               BOOST_FOREACH (shared_ptr<TextContent> i, c->text) {
+                       /* XXX: this time here should be the time of the first subtitle, not 0 */
+                       text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, i, ContentTime())));
+               }
        }
-       if (c->subtitle) {
-               /* XXX: this time here should be the time of the first subtitle, not 0 */
-               subtitle.reset (new SubtitleDecoder (this, c->subtitle, log, ContentTime()));
+
+       list<shared_ptr<dcp::CPL> > cpl_list = cpls ();
+
+       if (cpl_list.empty()) {
+               throw DCPError (_("No CPLs found in DCP."));
        }
 
        shared_ptr<dcp::CPL> cpl;
-       BOOST_FOREACH (shared_ptr<dcp::CPL> i, cpls ()) {
+       BOOST_FOREACH (shared_ptr<dcp::CPL> i, cpl_list) {
                if (_dcp_content->cpl() && i->id() == _dcp_content->cpl().get()) {
                        cpl = i;
                }
@@ -90,29 +106,39 @@ DCPDecoder::DCPDecoder (shared_ptr<const DCPContent> c, shared_ptr<Log> log, boo
 bool
 DCPDecoder::pass ()
 {
-       if (_reel == _reels.end () || !_dcp_content->can_be_played ()) {
+       if (!_dcp_content->can_be_played()) {
+               return true;
+       }
+
+       if (_reel == _reels.end()) {
+               if (audio) {
+                       audio->flush ();
+               }
                return true;
        }
 
-       double const vfr = _dcp_content->active_video_frame_rate ();
+       double const vfr = _dcp_content->active_video_frame_rate (film());
 
        /* Frame within the (played part of the) reel that is coming up next */
        int64_t const frame = _next.frames_round (vfr);
 
-       /* We must emit subtitles first as when we emit the video for this frame
-          it will expect already to have the subs.
+       shared_ptr<dcp::PictureAsset> picture_asset = (*_reel)->main_picture()->asset();
+       DCPOMATIC_ASSERT (picture_asset);
+
+       /* We must emit texts first as when we emit the video for this frame
+          it will expect already to have the texts.
        */
-       pass_subtitles (_next);
+       pass_texts (_next, picture_asset->size());
 
        if ((_mono_reader || _stereo_reader) && (_decode_referenced || !_dcp_content->reference_video())) {
-               shared_ptr<dcp::PictureAsset> asset = (*_reel)->main_picture()->asset ();
                int64_t const entry_point = (*_reel)->main_picture()->entry_point ();
                if (_mono_reader) {
                        video->emit (
+                               film(),
                                shared_ptr<ImageProxy> (
                                        new J2KImageProxy (
                                                _mono_reader->get_frame (entry_point + frame),
-                                               asset->size(),
+                                               picture_asset->size(),
                                                AV_PIX_FMT_XYZ12LE,
                                                _forced_reduction
                                                )
@@ -121,10 +147,11 @@ DCPDecoder::pass ()
                                );
                } else {
                        video->emit (
+                               film(),
                                shared_ptr<ImageProxy> (
                                        new J2KImageProxy (
                                                _stereo_reader->get_frame (entry_point + frame),
-                                               asset->size(),
+                                               picture_asset->size(),
                                                dcp::EYE_LEFT,
                                                AV_PIX_FMT_XYZ12LE,
                                                _forced_reduction
@@ -134,10 +161,11 @@ DCPDecoder::pass ()
                                );
 
                        video->emit (
+                               film(),
                                shared_ptr<ImageProxy> (
                                        new J2KImageProxy (
                                                _stereo_reader->get_frame (entry_point + frame),
-                                               asset->size(),
+                                               picture_asset->size(),
                                                dcp::EYE_RIGHT,
                                                AV_PIX_FMT_XYZ12LE,
                                                _forced_reduction
@@ -164,7 +192,7 @@ DCPDecoder::pass ()
                        }
                }
 
-               audio->emit (_dcp_content->audio->stream(), data, ContentTime::from_frames (_offset, vfr) + _next);
+               audio->emit (film(), _dcp_content->audio->stream(), data, ContentTime::from_frames (_offset, vfr) + _next);
        }
 
        _next += ContentTime::from_frames (1, vfr);
@@ -180,30 +208,100 @@ DCPDecoder::pass ()
 }
 
 void
-DCPDecoder::pass_subtitles (ContentTime next)
+DCPDecoder::pass_texts (ContentTime next, dcp::Size size)
+{
+       list<shared_ptr<TextDecoder> >::const_iterator decoder = text.begin ();
+       if (decoder == text.end()) {
+               /* It's possible that there is now a main subtitle but no TextDecoders, for example if
+                  the CPL has just changed but the TextContent's texts have not been recreated yet.
+               */
+               return;
+       }
+
+       if ((*_reel)->main_subtitle()) {
+               pass_texts (
+                       next,
+                       (*_reel)->main_subtitle()->asset(),
+                       _dcp_content->reference_text(TEXT_OPEN_SUBTITLE),
+                       (*_reel)->main_subtitle()->entry_point(),
+                       *decoder,
+                       size
+                       );
+               ++decoder;
+       }
+
+       BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> i, (*_reel)->closed_captions()) {
+               pass_texts (
+                       next, i->asset(), _dcp_content->reference_text(TEXT_CLOSED_CAPTION), i->entry_point(), *decoder, size
+                       );
+               ++decoder;
+       }
+}
+
+void
+DCPDecoder::pass_texts (
+       ContentTime next, shared_ptr<dcp::SubtitleAsset> asset, bool reference, int64_t entry_point, shared_ptr<TextDecoder> decoder, dcp::Size size
+       )
 {
-       double const vfr = _dcp_content->active_video_frame_rate ();
+       double const vfr = _dcp_content->active_video_frame_rate (film());
        /* Frame within the (played part of the) reel that is coming up next */
        int64_t const frame = next.frames_round (vfr);
 
-       if ((*_reel)->main_subtitle() && (_decode_referenced || !_dcp_content->reference_subtitle())) {
-               int64_t const entry_point = (*_reel)->main_subtitle()->entry_point ();
-               list<dcp::SubtitleString> subs = (*_reel)->main_subtitle()->asset()->subtitles_during (
+       if (_decode_referenced || !reference) {
+               list<shared_ptr<dcp::Subtitle> > subs = asset->subtitles_during (
                        dcp::Time (entry_point + frame, vfr, vfr),
                        dcp::Time (entry_point + frame + 1, vfr, vfr),
                        true
                        );
 
-               BOOST_FOREACH (dcp::SubtitleString i, subs) {
-                       list<dcp::SubtitleString> s;
-                       s.push_back (i);
-                       subtitle->emit_text (
+               list<dcp::SubtitleString> strings;
+
+               BOOST_FOREACH (shared_ptr<dcp::Subtitle> i, subs) {
+                       shared_ptr<dcp::SubtitleString> is = dynamic_pointer_cast<dcp::SubtitleString> (i);
+                       if (is) {
+                               if (!strings.empty() && (strings.back().in() != is->in() || strings.back().out() != is->out())) {
+                                       dcp::SubtitleString b = strings.back();
+                                       decoder->emit_plain (
+                                               ContentTimePeriod (
+                                                       ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.in().as_seconds()),
+                                                       ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.out().as_seconds())
+                                                       ),
+                                               strings
+                                               );
+                                       strings.clear ();
+                               }
+
+                               strings.push_back (*is);
+                       }
+
+                       /* XXX: perhaps these image subs should also be collected together like the string ones are;
+                          this would need to be done both here and in DCPSubtitleDecoder.
+                       */
+
+                       shared_ptr<dcp::SubtitleImage> ii = dynamic_pointer_cast<dcp::SubtitleImage> (i);
+                       if (ii) {
+                               emit_subtitle_image (
+                                       ContentTimePeriod (
+                                               ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->in().as_seconds ()),
+                                               ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->out().as_seconds ())
+                                               ),
+                                       *ii,
+                                       size,
+                                       decoder
+                                       );
+                       }
+               }
+
+               if (!strings.empty()) {
+                       dcp::SubtitleString b = strings.back();
+                       decoder->emit_plain (
                                ContentTimePeriod (
-                                       ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i.in().as_seconds ()),
-                                       ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i.out().as_seconds ())
+                                       ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.in().as_seconds()),
+                                       ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.out().as_seconds())
                                        ),
-                               s
+                               strings
                                );
+                       strings.clear ();
                }
        }
 }
@@ -253,6 +351,10 @@ DCPDecoder::get_readers ()
 void
 DCPDecoder::seek (ContentTime t, bool accurate)
 {
+       if (!_dcp_content->can_be_played ()) {
+               return;
+       }
+
        Decoder::seek (t, accurate);
 
        _reel = _reels.begin ();
@@ -270,23 +372,25 @@ DCPDecoder::seek (ContentTime t, bool accurate)
 
        /* Seek to pre-roll position */
 
-       while (_reel != _reels.end() && pre >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ())) {
-               pre -= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ());
+       while (_reel != _reels.end() && pre >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate(film()))) {
+               ContentTime rd = ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate(film()));
+               pre -= rd;
+               t -= rd;
                next_reel ();
        }
 
-       /* Pass subtitles in the pre-roll */
+       /* Pass texts in the pre-roll */
 
-       double const vfr = _dcp_content->active_video_frame_rate ();
+       double const vfr = _dcp_content->active_video_frame_rate (film());
        for (int i = 0; i < pre_roll_seconds * vfr; ++i) {
-               pass_subtitles (pre);
+               pass_texts (pre, (*_reel)->main_picture()->asset()->size());
                pre += ContentTime::from_frames (1, vfr);
        }
 
        /* Seek to correct position */
 
-       while (_reel != _reels.end() && t >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ())) {
-               t -= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ());
+       while (_reel != _reels.end() && t >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate(film()))) {
+               t -= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate(film()));
                next_reel ();
        }
 
@@ -298,8 +402,12 @@ DCPDecoder::set_decode_referenced (bool r)
 {
        _decode_referenced = r;
 
-       video->set_ignore (_dcp_content->reference_video() && !_decode_referenced);
-       audio->set_ignore (_dcp_content->reference_audio() && !_decode_referenced);
+       if (video) {
+               video->set_ignore (_dcp_content->reference_video() && !_decode_referenced);
+       }
+       if (audio) {
+               audio->set_ignore (_dcp_content->reference_audio() && !_decode_referenced);
+       }
 }
 
 void
@@ -307,3 +415,9 @@ DCPDecoder::set_forced_reduction (optional<int> reduction)
 {
        _forced_reduction = reduction;
 }
+
+ContentTime
+DCPDecoder::position () const
+{
+       return ContentTime::from_frames(_offset, _dcp_content->active_video_frame_rate(film())) + _next;
+}