X-Git-Url: https://main.carlh.net/gitweb/?p=dcpomatic.git;a=blobdiff_plain;f=src%2Flib%2Fdcp_decoder.cc;h=03bd95d90f8a7671131e23f108bdf1ab32acdeba;hp=38c2a7ccfa2bdcab2bb96ecf850eed4de53ba5e4;hb=d7ac100c0eb1b5efdcfbec59be870fd869252840;hpb=97d39f46795af78b84d5f7bc9118a188f2864781 diff --git a/src/lib/dcp_decoder.cc b/src/lib/dcp_decoder.cc index 38c2a7ccf..03bd95d90 100644 --- a/src/lib/dcp_decoder.cc +++ b/src/lib/dcp_decoder.cc @@ -24,11 +24,10 @@ #include "video_decoder.h" #include "audio_decoder.h" #include "j2k_image_proxy.h" -#include "subtitle_decoder.h" +#include "text_decoder.h" #include "image.h" #include "config.h" #include -#include #include #include #include @@ -45,29 +44,37 @@ #include #include +#include "i18n.h" + using std::list; using std::cout; using boost::shared_ptr; using boost::dynamic_pointer_cast; +using boost::optional; -DCPDecoder::DCPDecoder (shared_ptr c, shared_ptr log) +DCPDecoder::DCPDecoder (shared_ptr c, shared_ptr log, bool fast) : DCP (c) , _decode_referenced (false) { - video.reset (new VideoDecoder (this, c, log)); - audio.reset (new AudioDecoder (this, c->audio, log)); - - subtitle.reset ( - new SubtitleDecoder ( - this, - c->subtitle, - bind (&DCPDecoder::image_subtitles_during, this, _1, _2), - bind (&DCPDecoder::text_subtitles_during, this, _1, _2) - ) - ); + if (c->video) { + video.reset (new VideoDecoder (this, c, log)); + } + if (c->audio) { + audio.reset (new AudioDecoder (this, c->audio, log, fast)); + } + if (c->subtitle) { + /* XXX: this time here should be the time of the first subtitle, not 0 */ + subtitle.reset (new TextDecoder (this, c->subtitle, log, ContentTime())); + } + + list > cpl_list = cpls (); + + if (cpl_list.empty()) { + throw DCPError (_("No CPLs found in DCP.")); + } shared_ptr cpl; - BOOST_FOREACH (shared_ptr i, cpls ()) { + BOOST_FOREACH (shared_ptr i, cpl_list) { if (_dcp_content->cpl() && i->id() == _dcp_content->cpl().get()) { cpl = i; } @@ -80,6 +87,8 @@ DCPDecoder::DCPDecoder (shared_ptr c, shared_ptr log) cpl = cpls().front (); } + set_decode_referenced (false); + _reels = cpl->reels (); _reel = _reels.begin (); @@ -87,8 +96,9 @@ DCPDecoder::DCPDecoder (shared_ptr c, shared_ptr log) get_readers (); } + bool -DCPDecoder::pass (PassReason reason, bool) +DCPDecoder::pass () { if (_reel == _reels.end () || !_dcp_content->can_be_played ()) { return true; @@ -99,32 +109,56 @@ DCPDecoder::pass (PassReason reason, bool) /* Frame within the (played part of the) reel that is coming up next */ int64_t const frame = _next.frames_round (vfr); - if ((_mono_reader || _stereo_reader) && reason != PASS_REASON_SUBTITLE && (_decode_referenced || !_dcp_content->reference_video())) { + /* We must emit subtitles first as when we emit the video for this frame + it will expect already to have the subs. + */ + pass_subtitles (_next); + + if ((_mono_reader || _stereo_reader) && (_decode_referenced || !_dcp_content->reference_video())) { shared_ptr asset = (*_reel)->main_picture()->asset (); int64_t const entry_point = (*_reel)->main_picture()->entry_point (); if (_mono_reader) { - video->give ( + video->emit ( shared_ptr ( - new J2KImageProxy (_mono_reader->get_frame (entry_point + frame), asset->size(), AV_PIX_FMT_XYZ12LE) + new J2KImageProxy ( + _mono_reader->get_frame (entry_point + frame), + asset->size(), + AV_PIX_FMT_XYZ12LE, + _forced_reduction + ) ), _offset + frame ); } else { - video->give ( + video->emit ( shared_ptr ( - new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_LEFT, AV_PIX_FMT_XYZ12LE)), + new J2KImageProxy ( + _stereo_reader->get_frame (entry_point + frame), + asset->size(), + dcp::EYE_LEFT, + AV_PIX_FMT_XYZ12LE, + _forced_reduction + ) + ), _offset + frame ); - video->give ( + video->emit ( shared_ptr ( - new J2KImageProxy (_stereo_reader->get_frame (entry_point + frame), asset->size(), dcp::EYE_RIGHT, AV_PIX_FMT_XYZ12LE)), + new J2KImageProxy ( + _stereo_reader->get_frame (entry_point + frame), + asset->size(), + dcp::EYE_RIGHT, + AV_PIX_FMT_XYZ12LE, + _forced_reduction + ) + ), _offset + frame ); } } - if (_sound_reader && reason != PASS_REASON_SUBTITLE && (_decode_referenced || !_dcp_content->reference_audio())) { + if (_sound_reader && (_decode_referenced || !_dcp_content->reference_audio())) { int64_t const entry_point = (*_reel)->main_sound()->entry_point (); shared_ptr sf = _sound_reader->get_frame (entry_point + frame); uint8_t const * from = sf->data (); @@ -140,30 +174,9 @@ DCPDecoder::pass (PassReason reason, bool) } } - audio->give (_dcp_content->audio->stream(), data, ContentTime::from_frames (_offset, vfr) + _next); + audio->emit (_dcp_content->audio->stream(), data, ContentTime::from_frames (_offset, vfr) + _next); } - if ((*_reel)->main_subtitle() && (_decode_referenced || !_dcp_content->reference_subtitle())) { - int64_t const entry_point = (*_reel)->main_subtitle()->entry_point (); - list subs = (*_reel)->main_subtitle()->asset()->subtitles_during ( - dcp::Time (entry_point + frame, vfr, vfr), - dcp::Time (entry_point + frame + 1, vfr, vfr), - true - ); - - if (!subs.empty ()) { - /* XXX: assuming that all `subs' are at the same time; maybe this is ok */ - subtitle->give_text ( - ContentTimePeriod ( - ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (subs.front().in().as_seconds ()), - ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (subs.front().out().as_seconds ()) - ), - subs - ); - } - } - - _position = _next; _next += ContentTime::from_frames (1, vfr); if ((*_reel)->main_picture ()) { @@ -176,6 +189,40 @@ DCPDecoder::pass (PassReason reason, bool) return false; } +void +DCPDecoder::pass_subtitles (ContentTime next) +{ + double const vfr = _dcp_content->active_video_frame_rate (); + /* Frame within the (played part of the) reel that is coming up next */ + int64_t const frame = next.frames_round (vfr); + + if ((*_reel)->main_subtitle() && (_decode_referenced || !_dcp_content->reference_subtitle())) { + int64_t const entry_point = (*_reel)->main_subtitle()->entry_point (); + list > subs = (*_reel)->main_subtitle()->asset()->subtitles_during ( + dcp::Time (entry_point + frame, vfr, vfr), + dcp::Time (entry_point + frame + 1, vfr, vfr), + true + ); + + BOOST_FOREACH (shared_ptr i, subs) { + shared_ptr is = dynamic_pointer_cast (i); + if (is) { + list s; + s.push_back (*is); + subtitle->emit_text ( + ContentTimePeriod ( + ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->in().as_seconds ()), + ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->out().as_seconds ()) + ), + s + ); + } + + /* XXX: image subtitles */ + } + } +} + void DCPDecoder::next_reel () { @@ -221,70 +268,67 @@ DCPDecoder::get_readers () void DCPDecoder::seek (ContentTime t, bool accurate) { - video->seek (t, accurate); - audio->seek (t, accurate); - subtitle->seek (t, accurate); + if (!_dcp_content->can_be_played ()) { + return; + } + + Decoder::seek (t, accurate); _reel = _reels.begin (); _offset = 0; get_readers (); - while (_reel != _reels.end() && t >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ())) { - t -= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ()); - next_reel (); - } + int const pre_roll_seconds = 2; - _next = t; -} + /* Pre-roll for subs */ + ContentTime pre = t - ContentTime::from_seconds (pre_roll_seconds); + if (pre < ContentTime()) { + pre = ContentTime (); + } -list -DCPDecoder::image_subtitles_during (ContentTimePeriod, bool) const -{ - return list (); -} + /* Seek to pre-roll position */ -list -DCPDecoder::text_subtitles_during (ContentTimePeriod period, bool starting) const -{ - /* XXX: inefficient */ + while (_reel != _reels.end() && pre >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ())) { + ContentTime rd = ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ()); + pre -= rd; + t -= rd; + next_reel (); + } - list ctp; - double const vfr = _dcp_content->active_video_frame_rate (); + /* Pass subtitles in the pre-roll */ - int offset = 0; + double const vfr = _dcp_content->active_video_frame_rate (); + for (int i = 0; i < pre_roll_seconds * vfr; ++i) { + pass_subtitles (pre); + pre += ContentTime::from_frames (1, vfr); + } - BOOST_FOREACH (shared_ptr r, _reels) { - if (!r->main_subtitle ()) { - offset += r->main_picture()->duration(); - continue; - } + /* Seek to correct position */ - int64_t const entry_point = r->main_subtitle()->entry_point (); + while (_reel != _reels.end() && t >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ())) { + t -= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate ()); + next_reel (); + } - list subs = r->main_subtitle()->asset()->subtitles_during ( - dcp::Time (period.from.seconds(), 1000) - dcp::Time (offset - entry_point, vfr, vfr), - dcp::Time (period.to.seconds(), 1000) - dcp::Time (offset - entry_point, vfr, vfr), - starting - ); + _next = t; +} - BOOST_FOREACH (dcp::SubtitleString const & s, subs) { - ctp.push_back ( - ContentTimePeriod ( - ContentTime::from_seconds (s.in().as_seconds ()) + ContentTime::from_frames (offset - entry_point, vfr), - ContentTime::from_seconds (s.out().as_seconds ()) + ContentTime::from_frames (offset - entry_point, vfr) - ) - ); - } +void +DCPDecoder::set_decode_referenced (bool r) +{ + _decode_referenced = r; - offset += r->main_subtitle()->duration(); + if (video) { + video->set_ignore (_dcp_content->reference_video() && !_decode_referenced); + } + if (audio) { + audio->set_ignore (_dcp_content->reference_audio() && !_decode_referenced); } - - return ctp; } void -DCPDecoder::set_decode_referenced () +DCPDecoder::set_forced_reduction (optional reduction) { - _decode_referenced = true; + _forced_reduction = reduction; }