using boost::shared_ptr;
using boost::dynamic_pointer_cast;
using boost::optional;
+using namespace dcpomatic;
-DCPDecoder::DCPDecoder (shared_ptr<const DCPContent> c, shared_ptr<Log> log, bool fast)
- : DCP (c)
+DCPDecoder::DCPDecoder (shared_ptr<const Film> film, shared_ptr<const DCPContent> c, bool fast, bool tolerant, shared_ptr<DCPDecoder> old)
+ : DCP (c, tolerant)
+ , Decoder (film)
, _decode_referenced (false)
{
- if (c->video) {
- video.reset (new VideoDecoder (this, c, log));
- }
- if (c->audio) {
- audio.reset (new AudioDecoder (this, c->audio, log, fast));
- }
- BOOST_FOREACH (shared_ptr<TextContent> i, c->text) {
- /* XXX: this time here should be the time of the first subtitle, not 0 */
- text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, i, log, ContentTime())));
+ if (c->can_be_played()) {
+ if (c->video) {
+ video.reset (new VideoDecoder (this, c));
+ }
+ if (c->audio) {
+ audio.reset (new AudioDecoder (this, c->audio, fast));
+ }
+ BOOST_FOREACH (shared_ptr<TextContent> i, c->text) {
+ /* XXX: this time here should be the time of the first subtitle, not 0 */
+ text.push_back (shared_ptr<TextDecoder> (new TextDecoder (this, i, ContentTime())));
+ }
}
- list<shared_ptr<dcp::CPL> > cpl_list = cpls ();
+ if (old) {
+ _reels = old->_reels;
+ } else {
- if (cpl_list.empty()) {
- throw DCPError (_("No CPLs found in DCP."));
- }
+ list<shared_ptr<dcp::CPL> > cpl_list = cpls ();
- shared_ptr<dcp::CPL> cpl;
- BOOST_FOREACH (shared_ptr<dcp::CPL> i, cpl_list) {
- if (_dcp_content->cpl() && i->id() == _dcp_content->cpl().get()) {
- cpl = i;
+ if (cpl_list.empty()) {
+ throw DCPError (_("No CPLs found in DCP."));
}
- }
- if (!cpl) {
- /* No CPL found; probably an old file that doesn't specify it;
- just use the first one.
- */
- cpl = cpls().front ();
+ shared_ptr<dcp::CPL> cpl;
+ BOOST_FOREACH (shared_ptr<dcp::CPL> i, cpl_list) {
+ if (_dcp_content->cpl() && i->id() == _dcp_content->cpl().get()) {
+ cpl = i;
+ }
+ }
+
+ if (!cpl) {
+ /* No CPL found; probably an old file that doesn't specify it;
+ just use the first one.
+ */
+ cpl = cpls().front ();
+ }
+
+ _reels = cpl->reels ();
}
set_decode_referenced (false);
- _reels = cpl->reels ();
-
_reel = _reels.begin ();
_offset = 0;
get_readers ();
return true;
}
- double const vfr = _dcp_content->active_video_frame_rate ();
+ double const vfr = _dcp_content->active_video_frame_rate (film());
/* Frame within the (played part of the) reel that is coming up next */
int64_t const frame = _next.frames_round (vfr);
pass_texts (_next, picture_asset->size());
if ((_mono_reader || _stereo_reader) && (_decode_referenced || !_dcp_content->reference_video())) {
- int64_t const entry_point = (*_reel)->main_picture()->entry_point ();
+ int64_t const entry_point = (*_reel)->main_picture()->entry_point().get_value_or(0);
if (_mono_reader) {
video->emit (
+ film(),
shared_ptr<ImageProxy> (
new J2KImageProxy (
_mono_reader->get_frame (entry_point + frame),
);
} else {
video->emit (
+ film(),
shared_ptr<ImageProxy> (
new J2KImageProxy (
_stereo_reader->get_frame (entry_point + frame),
);
video->emit (
+ film(),
shared_ptr<ImageProxy> (
new J2KImageProxy (
_stereo_reader->get_frame (entry_point + frame),
}
if (_sound_reader && (_decode_referenced || !_dcp_content->reference_audio())) {
- int64_t const entry_point = (*_reel)->main_sound()->entry_point ();
+ int64_t const entry_point = (*_reel)->main_sound()->entry_point().get_value_or(0);
shared_ptr<const dcp::SoundFrame> sf = _sound_reader->get_frame (entry_point + frame);
uint8_t const * from = sf->data ();
}
}
- audio->emit (_dcp_content->audio->stream(), data, ContentTime::from_frames (_offset, vfr) + _next);
+ audio->emit (film(), _dcp_content->audio->stream(), data, ContentTime::from_frames (_offset, vfr) + _next);
}
_next += ContentTime::from_frames (1, vfr);
if ((*_reel)->main_subtitle()) {
DCPOMATIC_ASSERT (decoder != text.end ());
pass_texts (
- next, (*_reel)->main_subtitle()->asset(), _dcp_content->reference_text(TEXT_OPEN_SUBTITLE), (*_reel)->main_subtitle()->entry_point(), *decoder, size
+ next,
+ (*_reel)->main_subtitle()->asset(),
+ _dcp_content->reference_text(TEXT_OPEN_SUBTITLE),
+ (*_reel)->main_subtitle()->entry_point().get_value_or(0),
+ *decoder,
+ size
);
++decoder;
}
BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> i, (*_reel)->closed_captions()) {
DCPOMATIC_ASSERT (decoder != text.end ());
pass_texts (
- next, i->asset(), _dcp_content->reference_text(TEXT_CLOSED_CAPTION), i->entry_point(), *decoder, size
+ next, i->asset(), _dcp_content->reference_text(TEXT_CLOSED_CAPTION), i->entry_point().get_value_or(0), *decoder, size
);
++decoder;
}
}
void
-DCPDecoder::pass_texts (ContentTime next, shared_ptr<dcp::SubtitleAsset> asset, bool reference, int64_t entry_point, shared_ptr<TextDecoder> decoder, dcp::Size size)
+DCPDecoder::pass_texts (
+ ContentTime next, shared_ptr<dcp::SubtitleAsset> asset, bool reference, int64_t entry_point, shared_ptr<TextDecoder> decoder, dcp::Size size
+ )
{
- double const vfr = _dcp_content->active_video_frame_rate ();
+ double const vfr = _dcp_content->active_video_frame_rate (film());
/* Frame within the (played part of the) reel that is coming up next */
int64_t const frame = next.frames_round (vfr);
true
);
+ list<dcp::SubtitleString> strings;
+
BOOST_FOREACH (shared_ptr<dcp::Subtitle> i, subs) {
shared_ptr<dcp::SubtitleString> is = dynamic_pointer_cast<dcp::SubtitleString> (i);
if (is) {
- list<dcp::SubtitleString> s;
- s.push_back (*is);
- decoder->emit_plain (
- ContentTimePeriod (
- ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->in().as_seconds ()),
- ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->out().as_seconds ())
- ),
- s
- );
+ if (!strings.empty() && (strings.back().in() != is->in() || strings.back().out() != is->out())) {
+ dcp::SubtitleString b = strings.back();
+ decoder->emit_plain (
+ ContentTimePeriod (
+ ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.in().as_seconds()),
+ ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.out().as_seconds())
+ ),
+ strings
+ );
+ strings.clear ();
+ }
+
+ strings.push_back (*is);
}
+ /* XXX: perhaps these image subs should also be collected together like the string ones are;
+ this would need to be done both here and in DCPSubtitleDecoder.
+ */
+
shared_ptr<dcp::SubtitleImage> ii = dynamic_pointer_cast<dcp::SubtitleImage> (i);
if (ii) {
- FFmpegImageProxy proxy (ii->png_image());
- shared_ptr<Image> image = proxy.image().first;
- /* set up rect with height and width */
- dcpomatic::Rect<double> rect(0, 0, image->size().width / double(size.width), image->size().height / double(size.height));
-
- /* add in position */
-
- switch (ii->h_align()) {
- case dcp::HALIGN_LEFT:
- rect.x += ii->h_position();
- break;
- case dcp::HALIGN_CENTER:
- rect.x += 0.5 + ii->h_position() - rect.width / 2;
- break;
- case dcp::HALIGN_RIGHT:
- rect.x += 1 - ii->h_position() - rect.width;
- break;
- }
-
- switch (ii->v_align()) {
- case dcp::VALIGN_TOP:
- rect.y += ii->v_position();
- break;
- case dcp::VALIGN_CENTER:
- rect.y += 0.5 + ii->v_position() - rect.height / 2;
- break;
- case dcp::VALIGN_BOTTOM:
- rect.y += 1 - ii->v_position() - rect.height;
- break;
- }
-
- decoder->emit_bitmap (
+ emit_subtitle_image (
ContentTimePeriod (
ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->in().as_seconds ()),
ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->out().as_seconds ())
),
- image, rect
+ *ii,
+ size,
+ decoder
);
}
}
+
+ if (!strings.empty()) {
+ dcp::SubtitleString b = strings.back();
+ decoder->emit_plain (
+ ContentTimePeriod (
+ ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.in().as_seconds()),
+ ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.out().as_seconds())
+ ),
+ strings
+ );
+ strings.clear ();
+ }
}
}
void
DCPDecoder::next_reel ()
{
- _offset += (*_reel)->main_picture()->duration();
+ _offset += (*_reel)->main_picture()->actual_duration();
++_reel;
get_readers ();
}
/* Seek to pre-roll position */
- while (_reel != _reels.end() && pre >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ())) {
- ContentTime rd = ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ());
+ while (
+ _reel != _reels.end() &&
+ pre >= ContentTime::from_frames ((*_reel)->main_picture()->actual_duration(), _dcp_content->active_video_frame_rate(film()))
+ ) {
+
+ ContentTime rd = ContentTime::from_frames ((*_reel)->main_picture()->actual_duration(), _dcp_content->active_video_frame_rate(film()));
pre -= rd;
t -= rd;
next_reel ();
/* Pass texts in the pre-roll */
- double const vfr = _dcp_content->active_video_frame_rate ();
+ double const vfr = _dcp_content->active_video_frame_rate (film());
for (int i = 0; i < pre_roll_seconds * vfr; ++i) {
pass_texts (pre, (*_reel)->main_picture()->asset()->size());
pre += ContentTime::from_frames (1, vfr);
/* Seek to correct position */
- while (_reel != _reels.end() && t >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ())) {
- t -= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ());
+ while (
+ _reel != _reels.end() &&
+ t >= ContentTime::from_frames ((*_reel)->main_picture()->actual_duration(), _dcp_content->active_video_frame_rate(film()))
+ ) {
+
+ t -= ContentTime::from_frames ((*_reel)->main_picture()->actual_duration(), _dcp_content->active_video_frame_rate(film()));
next_reel ();
}