+void
+DCPDecoder::pass_texts (ContentTime next, dcp::Size size)
+{
+ list<shared_ptr<TextDecoder> >::const_iterator decoder = text.begin ();
+ if ((*_reel)->main_subtitle()) {
+ DCPOMATIC_ASSERT (decoder != text.end ());
+ pass_texts (
+ next,
+ (*_reel)->main_subtitle()->asset(),
+ _dcp_content->reference_text(TEXT_OPEN_SUBTITLE),
+ (*_reel)->main_subtitle()->entry_point(),
+ *decoder,
+ size
+ );
+ ++decoder;
+ }
+ BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> i, (*_reel)->closed_captions()) {
+ DCPOMATIC_ASSERT (decoder != text.end ());
+ pass_texts (
+ next, i->asset(), _dcp_content->reference_text(TEXT_CLOSED_CAPTION), i->entry_point(), *decoder, size
+ );
+ ++decoder;
+ }
+}
+
+void
+DCPDecoder::pass_texts (
+ ContentTime next, shared_ptr<dcp::SubtitleAsset> asset, bool reference, int64_t entry_point, shared_ptr<TextDecoder> decoder, dcp::Size size
+ )
+{
+ double const vfr = _dcp_content->active_video_frame_rate (film());
+ /* Frame within the (played part of the) reel that is coming up next */
+ int64_t const frame = next.frames_round (vfr);
+
+ if (_decode_referenced || !reference) {
+ list<shared_ptr<dcp::Subtitle> > subs = asset->subtitles_during (
+ dcp::Time (entry_point + frame, vfr, vfr),
+ dcp::Time (entry_point + frame + 1, vfr, vfr),
+ true
+ );
+
+ list<dcp::SubtitleString> strings;
+
+ BOOST_FOREACH (shared_ptr<dcp::Subtitle> i, subs) {
+ shared_ptr<dcp::SubtitleString> is = dynamic_pointer_cast<dcp::SubtitleString> (i);
+ if (is) {
+ if (!strings.empty() && (strings.back().in() != is->in() || strings.back().out() != is->out())) {
+ dcp::SubtitleString b = strings.back();
+ decoder->emit_plain (
+ ContentTimePeriod (
+ ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.in().as_seconds()),
+ ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.out().as_seconds())
+ ),
+ strings
+ );
+ strings.clear ();
+ }
+
+ strings.push_back (*is);
+ }
+
+ shared_ptr<dcp::SubtitleImage> ii = dynamic_pointer_cast<dcp::SubtitleImage> (i);
+ if (ii) {
+ FFmpegImageProxy proxy (ii->png_image());
+ shared_ptr<Image> image = proxy.image().first;
+ /* set up rect with height and width */
+ dcpomatic::Rect<double> rect(0, 0, image->size().width / double(size.width), image->size().height / double(size.height));
+
+ /* add in position */
+
+ switch (ii->h_align()) {
+ case dcp::HALIGN_LEFT:
+ rect.x += ii->h_position();
+ break;
+ case dcp::HALIGN_CENTER:
+ rect.x += 0.5 + ii->h_position() - rect.width / 2;
+ break;
+ case dcp::HALIGN_RIGHT:
+ rect.x += 1 - ii->h_position() - rect.width;
+ break;
+ }
+
+ switch (ii->v_align()) {
+ case dcp::VALIGN_TOP:
+ rect.y += ii->v_position();
+ break;
+ case dcp::VALIGN_CENTER:
+ rect.y += 0.5 + ii->v_position() - rect.height / 2;
+ break;
+ case dcp::VALIGN_BOTTOM:
+ rect.y += 1 - ii->v_position() - rect.height;
+ break;
+ }
+
+ decoder->emit_bitmap (
+ ContentTimePeriod (
+ ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->in().as_seconds ()),
+ ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->out().as_seconds ())
+ ),
+ image, rect
+ );
+ }
+ }
+
+ if (!strings.empty()) {
+ dcp::SubtitleString b = strings.back();
+ decoder->emit_plain (
+ ContentTimePeriod (
+ ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.in().as_seconds()),
+ ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.out().as_seconds())
+ ),
+ strings
+ );
+ strings.clear ();
+ }
+ }
+}
+
+void
+DCPDecoder::next_reel ()
+{
+ _offset += (*_reel)->main_picture()->duration();
+ ++_reel;
+ get_readers ();
+}
+
+void
+DCPDecoder::get_readers ()
+{
+ if (_reel == _reels.end() || !_dcp_content->can_be_played ()) {
+ _mono_reader.reset ();
+ _stereo_reader.reset ();
+ _sound_reader.reset ();
+ return;
+ }
+
+ if ((*_reel)->main_picture()) {
+ shared_ptr<dcp::PictureAsset> asset = (*_reel)->main_picture()->asset ();
+ shared_ptr<dcp::MonoPictureAsset> mono = dynamic_pointer_cast<dcp::MonoPictureAsset> (asset);
+ shared_ptr<dcp::StereoPictureAsset> stereo = dynamic_pointer_cast<dcp::StereoPictureAsset> (asset);
+ DCPOMATIC_ASSERT (mono || stereo);
+ if (mono) {
+ _mono_reader = mono->start_read ();
+ _stereo_reader.reset ();
+ } else {
+ _stereo_reader = stereo->start_read ();
+ _mono_reader.reset ();
+ }
+ } else {
+ _mono_reader.reset ();
+ _stereo_reader.reset ();
+ }
+
+ if ((*_reel)->main_sound()) {
+ _sound_reader = (*_reel)->main_sound()->asset()->start_read ();
+ } else {
+ _sound_reader.reset ();
+ }
+}
+