Rename Subtitle -> Text
[dcpomatic.git] / src / lib / dcp_decoder.cc
index 2e3ed374aaa8c07ef9dffe276ca40ab297aa78cd..03bd95d90f8a7671131e23f108bdf1ab32acdeba 100644 (file)
 #include "video_decoder.h"
 #include "audio_decoder.h"
 #include "j2k_image_proxy.h"
-#include "subtitle_decoder.h"
+#include "text_decoder.h"
 #include "image.h"
 #include "config.h"
 #include <dcp/dcp.h>
-#include <dcp/decrypted_kdm.h>
 #include <dcp/cpl.h>
 #include <dcp/reel.h>
 #include <dcp/mono_picture_asset.h>
 #include <boost/foreach.hpp>
 #include <iostream>
 
+#include "i18n.h"
+
 using std::list;
 using std::cout;
 using boost::shared_ptr;
 using boost::dynamic_pointer_cast;
+using boost::optional;
 
-DCPDecoder::DCPDecoder (shared_ptr<const DCPContent> c, shared_ptr<Log> log)
+DCPDecoder::DCPDecoder (shared_ptr<const DCPContent> c, shared_ptr<Log> log, bool fast)
        : DCP (c)
        , _decode_referenced (false)
 {
-       video.reset (new VideoDecoder (this, c, log));
-       audio.reset (new AudioDecoder (this, c->audio, log));
-
-       subtitle.reset (
-               new SubtitleDecoder (
-                       this,
-                       c->subtitle,
-                       bind (&DCPDecoder::image_subtitles_during, this, _1, _2),
-                       bind (&DCPDecoder::text_subtitles_during, this, _1, _2)
-                       )
-               );
+       if (c->video) {
+               video.reset (new VideoDecoder (this, c, log));
+       }
+       if (c->audio) {
+               audio.reset (new AudioDecoder (this, c->audio, log, fast));
+       }
+       if (c->subtitle) {
+               /* XXX: this time here should be the time of the first subtitle, not 0 */
+               subtitle.reset (new TextDecoder (this, c->subtitle, log, ContentTime()));
+       }
+
+       list<shared_ptr<dcp::CPL> > cpl_list = cpls ();
+
+       if (cpl_list.empty()) {
+               throw DCPError (_("No CPLs found in DCP."));
+       }
 
        shared_ptr<dcp::CPL> cpl;
-       BOOST_FOREACH (shared_ptr<dcp::CPL> i, cpls ()) {
+       BOOST_FOREACH (shared_ptr<dcp::CPL> i, cpl_list) {
                if (_dcp_content->cpl() && i->id() == _dcp_content->cpl().get()) {
                        cpl = i;
                }
        }
 
-       DCPOMATIC_ASSERT (cpl);
+       if (!cpl) {
+               /* No CPL found; probably an old file that doesn't specify it;
+                  just use the first one.
+               */
+               cpl = cpls().front ();
+       }
+
+       set_decode_referenced (false);
+
        _reels = cpl->reels ();
 
        _reel = _reels.begin ();
@@ -81,8 +96,9 @@ DCPDecoder::DCPDecoder (shared_ptr<const DCPContent> c, shared_ptr<Log> log)
        get_readers ();
 }
 
+
 bool
-DCPDecoder::pass (PassReason reason, bool)
+DCPDecoder::pass ()
 {
        if (_reel == _reels.end () || !_dcp_content->can_be_played ()) {
                return true;
@@ -93,32 +109,56 @@ DCPDecoder::pass (PassReason reason, bool)
        /* Frame within the (played part of the) reel that is coming up next */
        int64_t const frame = _next.frames_round (vfr);
 
-       if ((_mono_reader || _stereo_reader) && reason != PASS_REASON_SUBTITLE && (_decode_referenced || !_dcp_content->reference_video())) {
+       /* We must emit subtitles first as when we emit the video for this frame
+          it will expect already to have the subs.
+       */
+       pass_subtitles (_next);
+
+       if ((_mono_reader || _stereo_reader) && (_decode_referenced || !_dcp_content->reference_video())) {
                shared_ptr<dcp::PictureAsset> asset = (*_reel)->main_picture()->asset ();
                int64_t const entry_point = (*_reel)->main_picture()->entry_point ();
                if (_mono_reader) {
-                       video->give (
+                       video->emit (
                                shared_ptr<ImageProxy> (
-                                       new J2KImageProxy (_mono_reader->get_frame (entry_point + frame), asset->size(), AV_PIX_FMT_XYZ12LE)
+                                       new J2KImageProxy (
+                                               _mono_reader->get_frame (entry_point + frame),
+                                               asset->size(),
+                                               AV_PIX_FMT_XYZ12LE,
+                                               _forced_reduction
+                                               )
                                        ),
                                _offset + frame
                                );
                } else {
-                       video->give (
+                       video->emit (
                                shared_ptr<ImageProxy> (
-                                       new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_LEFT, AV_PIX_FMT_XYZ12LE)),
+                                       new J2KImageProxy (
+                                               _stereo_reader->get_frame (entry_point + frame),
+                                               asset->size(),
+                                               dcp::EYE_LEFT,
+                                               AV_PIX_FMT_XYZ12LE,
+                                               _forced_reduction
+                                               )
+                                       ),
                                _offset + frame
                                );
 
-                       video->give (
+                       video->emit (
                                shared_ptr<ImageProxy> (
-                                       new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_RIGHT, AV_PIX_FMT_XYZ12LE)),
+                                       new J2KImageProxy (
+                                               _stereo_reader->get_frame (entry_point + frame),
+                                               asset->size(),
+                                               dcp::EYE_RIGHT,
+                                               AV_PIX_FMT_XYZ12LE,
+                                               _forced_reduction
+                                               )
+                                       ),
                                _offset + frame
                                );
                }
        }
 
-       if (_sound_reader && reason != PASS_REASON_SUBTITLE && (_decode_referenced || !_dcp_content->reference_audio())) {
+       if (_sound_reader && (_decode_referenced || !_dcp_content->reference_audio())) {
                int64_t const entry_point = (*_reel)->main_sound()->entry_point ();
                shared_ptr<const dcp::SoundFrame> sf = _sound_reader->get_frame (entry_point + frame);
                uint8_t const * from = sf->data ();
@@ -134,27 +174,7 @@ DCPDecoder::pass (PassReason reason, bool)
                        }
                }
 
-               audio->give (_dcp_content->audio->stream(), data, ContentTime::from_frames (_offset, vfr) + _next);
-       }
-
-       if ((*_reel)->main_subtitle() && (_decode_referenced || !_dcp_content->reference_subtitle())) {
-               int64_t const entry_point = (*_reel)->main_subtitle()->entry_point ();
-               list<dcp::SubtitleString> subs = (*_reel)->main_subtitle()->asset()->subtitles_during (
-                       dcp::Time (entry_point + frame, vfr, vfr),
-                       dcp::Time (entry_point + frame + 1, vfr, vfr),
-                       true
-                       );
-
-               if (!subs.empty ()) {
-                       /* XXX: assuming that all `subs' are at the same time; maybe this is ok */
-                       subtitle->give_text (
-                               ContentTimePeriod (
-                                       ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (subs.front().in().as_seconds ()),
-                                       ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (subs.front().out().as_seconds ())
-                                       ),
-                               subs
-                               );
-               }
+               audio->emit (_dcp_content->audio->stream(), data, ContentTime::from_frames (_offset, vfr) + _next);
        }
 
        _next += ContentTime::from_frames (1, vfr);
@@ -169,6 +189,40 @@ DCPDecoder::pass (PassReason reason, bool)
        return false;
 }
 
+void
+DCPDecoder::pass_subtitles (ContentTime next)
+{
+       double const vfr = _dcp_content->active_video_frame_rate ();
+       /* Frame within the (played part of the) reel that is coming up next */
+       int64_t const frame = next.frames_round (vfr);
+
+       if ((*_reel)->main_subtitle() && (_decode_referenced || !_dcp_content->reference_subtitle())) {
+               int64_t const entry_point = (*_reel)->main_subtitle()->entry_point ();
+               list<shared_ptr<dcp::Subtitle> > subs = (*_reel)->main_subtitle()->asset()->subtitles_during (
+                       dcp::Time (entry_point + frame, vfr, vfr),
+                       dcp::Time (entry_point + frame + 1, vfr, vfr),
+                       true
+                       );
+
+               BOOST_FOREACH (shared_ptr<dcp::Subtitle> i, subs) {
+                       shared_ptr<dcp::SubtitleString> is = dynamic_pointer_cast<dcp::SubtitleString> (i);
+                       if (is) {
+                               list<dcp::SubtitleString> s;
+                               s.push_back (*is);
+                               subtitle->emit_text (
+                                       ContentTimePeriod (
+                                               ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->in().as_seconds ()),
+                                               ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->out().as_seconds ())
+                                               ),
+                                       s
+                                       );
+                       }
+
+                       /* XXX: image subtitles */
+               }
+       }
+}
+
 void
 DCPDecoder::next_reel ()
 {
@@ -214,65 +268,67 @@ DCPDecoder::get_readers ()
 void
 DCPDecoder::seek (ContentTime t, bool accurate)
 {
-       video->seek (t, accurate);
-       audio->seek (t, accurate);
-       subtitle->seek (t, accurate);
+       if (!_dcp_content->can_be_played ()) {
+               return;
+       }
+
+       Decoder::seek (t, accurate);
 
        _reel = _reels.begin ();
        _offset = 0;
        get_readers ();
 
-       while (_reel != _reels.end() && t >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ())) {
-               t -= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ());
-               next_reel ();
-       }
+       int const pre_roll_seconds = 2;
 
-       _next = t;
-}
+       /* Pre-roll for subs */
 
+       ContentTime pre = t - ContentTime::from_seconds (pre_roll_seconds);
+       if (pre < ContentTime()) {
+               pre = ContentTime ();
+       }
 
-list<ContentTimePeriod>
-DCPDecoder::image_subtitles_during (ContentTimePeriod, bool) const
-{
-       return list<ContentTimePeriod> ();
-}
+       /* Seek to pre-roll position */
 
-list<ContentTimePeriod>
-DCPDecoder::text_subtitles_during (ContentTimePeriod period, bool starting) const
-{
-       /* XXX: inefficient */
+       while (_reel != _reels.end() && pre >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ())) {
+               ContentTime rd = ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ());
+               pre -= rd;
+               t -= rd;
+               next_reel ();
+       }
+
+       /* Pass subtitles in the pre-roll */
 
-       list<ContentTimePeriod> ctp;
        double const vfr = _dcp_content->active_video_frame_rate ();
+       for (int i = 0; i < pre_roll_seconds * vfr; ++i) {
+               pass_subtitles (pre);
+               pre += ContentTime::from_frames (1, vfr);
+       }
 
-       BOOST_FOREACH (shared_ptr<dcp::Reel> r, _reels) {
-               if (!r->main_subtitle ()) {
-                       continue;
-               }
+       /* Seek to correct position */
 
-               int64_t const entry_point = r->main_subtitle()->entry_point ();
+       while (_reel != _reels.end() && t >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ())) {
+               t -= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ());
+               next_reel ();
+       }
 
-               list<dcp::SubtitleString> subs = r->main_subtitle()->asset()->subtitles_during (
-                       dcp::Time (period.from.seconds(), 1000) - dcp::Time (entry_point, vfr, vfr),
-                       dcp::Time (period.to.seconds(), 1000) - dcp::Time (entry_point, vfr, vfr),
-                       starting
-                       );
+       _next = t;
+}
 
-               BOOST_FOREACH (dcp::SubtitleString const & s, subs) {
-                       ctp.push_back (
-                               ContentTimePeriod (
-                                       ContentTime::from_seconds (s.in().as_seconds ()),
-                                       ContentTime::from_seconds (s.out().as_seconds ())
-                                       )
-                               );
-               }
-       }
+void
+DCPDecoder::set_decode_referenced (bool r)
+{
+       _decode_referenced = r;
 
-       return ctp;
+       if (video) {
+               video->set_ignore (_dcp_content->reference_video() && !_decode_referenced);
+       }
+       if (audio) {
+               audio->set_ignore (_dcp_content->reference_audio() && !_decode_referenced);
+       }
 }
 
 void
-DCPDecoder::set_decode_referenced ()
+DCPDecoder::set_forced_reduction (optional<int> reduction)
 {
-       _decode_referenced = true;
+       _forced_reduction = reduction;
 }