X-Git-Url: https://main.carlh.net/gitweb/?p=dcpomatic.git;a=blobdiff_plain;f=src%2Flib%2Fdcp_decoder.cc;h=a67b244f75392b4803a7d1638372c80e216f1070;hp=ada0d01d1db617dabf66f117f92971e008102776;hb=254b3044d72de6b033d7c584f5abd2b9aa70aad5;hpb=bd709c1e98e7653dafe7dff302440a7890140c7d diff --git a/src/lib/dcp_decoder.cc b/src/lib/dcp_decoder.cc index ada0d01d1..a67b244f7 100644 --- a/src/lib/dcp_decoder.cc +++ b/src/lib/dcp_decoder.cc @@ -1,195 +1,427 @@ /* - Copyright (C) 2014-2015 Carl Hetherington + Copyright (C) 2014-2018 Carl Hetherington - This program is free software; you can redistribute it and/or modify + This file is part of DCP-o-matic. + + DCP-o-matic is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - This program is distributed in the hope that it will be useful, + DCP-o-matic is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + along with DCP-o-matic. If not, see . */ #include "dcp_decoder.h" #include "dcp_content.h" +#include "audio_content.h" +#include "video_decoder.h" +#include "audio_decoder.h" #include "j2k_image_proxy.h" +#include "text_decoder.h" +#include "ffmpeg_image_proxy.h" #include "image.h" #include "config.h" #include -#include #include #include #include +#include #include +#include #include #include #include +#include #include #include #include +#include +#include #include #include +#include "i18n.h" + using std::list; using std::cout; using boost::shared_ptr; using boost::dynamic_pointer_cast; +using boost::optional; DCPDecoder::DCPDecoder (shared_ptr c, bool fast) - : VideoDecoder (c) - , AudioDecoder (c, fast) - , SubtitleDecoder (c) - , _dcp_content (c) + : DCP (c) + , _decode_referenced (false) { - dcp::DCP dcp (c->directory ()); - dcp.read (); - if (c->kdm ()) { - dcp.add (dcp::DecryptedKDM (c->kdm().get (), Config::instance()->decryption_chain()->key().get ())); + if (c->video) { + video.reset (new VideoDecoder (this, c)); + } + if (c->audio) { + audio.reset (new AudioDecoder (this, c->audio, fast)); + } + BOOST_FOREACH (shared_ptr i, c->text) { + /* XXX: this time here should be the time of the first subtitle, not 0 */ + text.push_back (shared_ptr (new TextDecoder (this, i, ContentTime()))); + } + + list > cpl_list = cpls (); + + if (cpl_list.empty()) { + throw DCPError (_("No CPLs found in DCP.")); } - DCPOMATIC_ASSERT (dcp.cpls().size() == 1); - _reels = dcp.cpls().front()->reels (); + + shared_ptr cpl; + BOOST_FOREACH (shared_ptr i, cpl_list) { + if (_dcp_content->cpl() && i->id() == _dcp_content->cpl().get()) { + cpl = i; + } + } + + if (!cpl) { + /* No CPL found; probably an old file that doesn't specify it; + just use the first one. + */ + cpl = cpls().front (); + } + + set_decode_referenced (false); + + _reels = cpl->reels (); + _reel = _reels.begin (); + _offset = 0; + get_readers (); } + bool -DCPDecoder::pass (PassReason reason) +DCPDecoder::pass (shared_ptr film) { if (_reel == _reels.end () || !_dcp_content->can_be_played ()) { return true; } - double const vfr = _dcp_content->video_frame_rate (); + double const vfr = _dcp_content->active_video_frame_rate (film); + + /* Frame within the (played part of the) reel that is coming up next */ int64_t const frame = _next.frames_round (vfr); - if ((*_reel)->main_picture () && reason != PASS_REASON_SUBTITLE) { - shared_ptr asset = (*_reel)->main_picture()->asset (); - shared_ptr mono = dynamic_pointer_cast (asset); - shared_ptr stereo = dynamic_pointer_cast (asset); + shared_ptr picture_asset = (*_reel)->main_picture()->asset(); + DCPOMATIC_ASSERT (picture_asset); + + /* We must emit texts first as when we emit the video for this frame + it will expect already to have the texts. + */ + pass_texts (film, _next, picture_asset->size()); + + if ((_mono_reader || _stereo_reader) && (_decode_referenced || !_dcp_content->reference_video())) { int64_t const entry_point = (*_reel)->main_picture()->entry_point (); - if (mono) { - video (shared_ptr (new J2KImageProxy (mono->get_frame (entry_point + frame), asset->size())), frame); + if (_mono_reader) { + video->emit ( + film, + shared_ptr ( + new J2KImageProxy ( + _mono_reader->get_frame (entry_point + frame), + picture_asset->size(), + AV_PIX_FMT_XYZ12LE, + _forced_reduction + ) + ), + _offset + frame + ); } else { - video ( - shared_ptr (new J2KImageProxy (stereo->get_frame (entry_point + frame), asset->size(), dcp::EYE_LEFT)), - frame + video->emit ( + film, + shared_ptr ( + new J2KImageProxy ( + _stereo_reader->get_frame (entry_point + frame), + picture_asset->size(), + dcp::EYE_LEFT, + AV_PIX_FMT_XYZ12LE, + _forced_reduction + ) + ), + _offset + frame ); - video ( - shared_ptr (new J2KImageProxy (stereo->get_frame (entry_point + frame), asset->size(), dcp::EYE_RIGHT)), - frame + video->emit ( + film, + shared_ptr ( + new J2KImageProxy ( + _stereo_reader->get_frame (entry_point + frame), + picture_asset->size(), + dcp::EYE_RIGHT, + AV_PIX_FMT_XYZ12LE, + _forced_reduction + ) + ), + _offset + frame ); } } - if ((*_reel)->main_sound () && reason != PASS_REASON_SUBTITLE) { + if (_sound_reader && (_decode_referenced || !_dcp_content->reference_audio())) { int64_t const entry_point = (*_reel)->main_sound()->entry_point (); - shared_ptr sf = (*_reel)->main_sound()->asset()->get_frame (entry_point + frame); + shared_ptr sf = _sound_reader->get_frame (entry_point + frame); uint8_t const * from = sf->data (); - int const channels = _dcp_content->audio_stream()->channels (); + int const channels = _dcp_content->audio->stream()->channels (); int const frames = sf->size() / (3 * channels); shared_ptr data (new AudioBuffers (channels, frames)); + float** data_data = data->data(); for (int i = 0; i < frames; ++i) { for (int j = 0; j < channels; ++j) { - data->data()[j][i] = static_cast ((from[0] << 8) | (from[1] << 16) | (from[2] << 24)) / static_cast (INT_MAX - 256); + data_data[j][i] = static_cast ((from[0] << 8) | (from[1] << 16) | (from[2] << 24)) / static_cast (INT_MAX - 256); from += 3; } } - audio (_dcp_content->audio_stream(), data, _next); + audio->emit (film, _dcp_content->audio->stream(), data, ContentTime::from_frames (_offset, vfr) + _next); + } + + _next += ContentTime::from_frames (1, vfr); + + if ((*_reel)->main_picture ()) { + if (_next.frames_round (vfr) >= (*_reel)->main_picture()->duration()) { + next_reel (); + _next = ContentTime (); + } + } + + return false; +} + +void +DCPDecoder::pass_texts (shared_ptr film, ContentTime next, dcp::Size size) +{ + list >::const_iterator decoder = text.begin (); + if ((*_reel)->main_subtitle()) { + DCPOMATIC_ASSERT (decoder != text.end ()); + pass_texts ( + film, + next, + (*_reel)->main_subtitle()->asset(), + _dcp_content->reference_text(TEXT_OPEN_SUBTITLE), + (*_reel)->main_subtitle()->entry_point(), + *decoder, + size + ); + ++decoder; } + BOOST_FOREACH (shared_ptr i, (*_reel)->closed_captions()) { + DCPOMATIC_ASSERT (decoder != text.end ()); + pass_texts ( + film, next, i->asset(), _dcp_content->reference_text(TEXT_CLOSED_CAPTION), i->entry_point(), *decoder, size + ); + ++decoder; + } +} - if ((*_reel)->main_subtitle ()) { - int64_t const entry_point = (*_reel)->main_subtitle()->entry_point (); - list subs = (*_reel)->main_subtitle()->subtitle_asset()->subtitles_during ( +void +DCPDecoder::pass_texts ( + shared_ptr film, ContentTime next, shared_ptr asset, bool reference, int64_t entry_point, shared_ptr decoder, dcp::Size size + ) +{ + double const vfr = _dcp_content->active_video_frame_rate (film); + /* Frame within the (played part of the) reel that is coming up next */ + int64_t const frame = next.frames_round (vfr); + + if (_decode_referenced || !reference) { + list > subs = asset->subtitles_during ( dcp::Time (entry_point + frame, vfr, vfr), dcp::Time (entry_point + frame + 1, vfr, vfr), true ); - if (!subs.empty ()) { - /* XXX: assuming that all `subs' are at the same time; maybe this is ok */ - text_subtitle ( + list strings; + + BOOST_FOREACH (shared_ptr i, subs) { + shared_ptr is = dynamic_pointer_cast (i); + if (is) { + if (!strings.empty() && (strings.back().in() != is->in() || strings.back().out() != is->out())) { + dcp::SubtitleString b = strings.back(); + decoder->emit_plain ( + ContentTimePeriod ( + ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.in().as_seconds()), + ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.out().as_seconds()) + ), + strings + ); + strings.clear (); + } + + strings.push_back (*is); + } + + shared_ptr ii = dynamic_pointer_cast (i); + if (ii) { + FFmpegImageProxy proxy (ii->png_image()); + shared_ptr image = proxy.image().first; + /* set up rect with height and width */ + dcpomatic::Rect rect(0, 0, image->size().width / double(size.width), image->size().height / double(size.height)); + + /* add in position */ + + switch (ii->h_align()) { + case dcp::HALIGN_LEFT: + rect.x += ii->h_position(); + break; + case dcp::HALIGN_CENTER: + rect.x += 0.5 + ii->h_position() - rect.width / 2; + break; + case dcp::HALIGN_RIGHT: + rect.x += 1 - ii->h_position() - rect.width; + break; + } + + switch (ii->v_align()) { + case dcp::VALIGN_TOP: + rect.y += ii->v_position(); + break; + case dcp::VALIGN_CENTER: + rect.y += 0.5 + ii->v_position() - rect.height / 2; + break; + case dcp::VALIGN_BOTTOM: + rect.y += 1 - ii->v_position() - rect.height; + break; + } + + decoder->emit_bitmap ( + ContentTimePeriod ( + ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->in().as_seconds ()), + ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->out().as_seconds ()) + ), + image, rect + ); + } + } + + if (!strings.empty()) { + dcp::SubtitleString b = strings.back(); + decoder->emit_plain ( ContentTimePeriod ( - ContentTime::from_seconds (subs.front().in().as_seconds ()), - ContentTime::from_seconds (subs.front().out().as_seconds ()) + ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.in().as_seconds()), + ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.out().as_seconds()) ), - subs + strings ); + strings.clear (); } } +} - _next += ContentTime::from_frames (1, vfr); +void +DCPDecoder::next_reel () +{ + _offset += (*_reel)->main_picture()->duration(); + ++_reel; + get_readers (); +} - if ((*_reel)->main_picture ()) { - if (_next.frames_round (vfr) >= (*_reel)->main_picture()->duration()) { - ++_reel; +void +DCPDecoder::get_readers () +{ + if (_reel == _reels.end() || !_dcp_content->can_be_played ()) { + _mono_reader.reset (); + _stereo_reader.reset (); + _sound_reader.reset (); + return; + } + + if ((*_reel)->main_picture()) { + shared_ptr asset = (*_reel)->main_picture()->asset (); + shared_ptr mono = dynamic_pointer_cast (asset); + shared_ptr stereo = dynamic_pointer_cast (asset); + DCPOMATIC_ASSERT (mono || stereo); + if (mono) { + _mono_reader = mono->start_read (); + _stereo_reader.reset (); + } else { + _stereo_reader = stereo->start_read (); + _mono_reader.reset (); } + } else { + _mono_reader.reset (); + _stereo_reader.reset (); } - return false; + if ((*_reel)->main_sound()) { + _sound_reader = (*_reel)->main_sound()->asset()->start_read (); + } else { + _sound_reader.reset (); + } } void -DCPDecoder::seek (ContentTime t, bool accurate) +DCPDecoder::seek (shared_ptr film, ContentTime t, bool accurate) { - VideoDecoder::seek (t, accurate); - AudioDecoder::seek (t, accurate); - SubtitleDecoder::seek (t, accurate); + if (!_dcp_content->can_be_played ()) { + return; + } + + Decoder::seek (film, t, accurate); _reel = _reels.begin (); - while (_reel != _reels.end() && t >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->video_frame_rate ())) { - t -= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->video_frame_rate ()); - ++_reel; + _offset = 0; + get_readers (); + + int const pre_roll_seconds = 2; + + /* Pre-roll for subs */ + + ContentTime pre = t - ContentTime::from_seconds (pre_roll_seconds); + if (pre < ContentTime()) { + pre = ContentTime (); } - _next = t; -} + /* Seek to pre-roll position */ + while (_reel != _reels.end() && pre >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate(film))) { + ContentTime rd = ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate(film)); + pre -= rd; + t -= rd; + next_reel (); + } -list -DCPDecoder::image_subtitles_during (ContentTimePeriod, bool) const -{ - return list (); -} + /* Pass texts in the pre-roll */ -list -DCPDecoder::text_subtitles_during (ContentTimePeriod period, bool starting) const -{ - /* XXX: inefficient */ + double const vfr = _dcp_content->active_video_frame_rate (film); + for (int i = 0; i < pre_roll_seconds * vfr; ++i) { + pass_texts (film, pre, (*_reel)->main_picture()->asset()->size()); + pre += ContentTime::from_frames (1, vfr); + } - list ctp; - double const vfr = _dcp_content->video_frame_rate (); + /* Seek to correct position */ - BOOST_FOREACH (shared_ptr r, _reels) { - if (!r->main_subtitle ()) { - continue; - } + while (_reel != _reels.end() && t >= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate(film))) { + t -= ContentTime::from_frames ((*_reel)->main_picture()->duration(), _dcp_content->active_video_frame_rate(film)); + next_reel (); + } - int64_t const entry_point = r->main_subtitle()->entry_point (); + _next = t; +} - list subs = r->main_subtitle()->subtitle_asset()->subtitles_during ( - dcp::Time (period.from.seconds(), 1000) - dcp::Time (entry_point, vfr, vfr), - dcp::Time (period.to.seconds(), 1000) - dcp::Time (entry_point, vfr, vfr), - starting - ); +void +DCPDecoder::set_decode_referenced (bool r) +{ + _decode_referenced = r; - BOOST_FOREACH (dcp::SubtitleString const & s, subs) { - ctp.push_back ( - ContentTimePeriod ( - ContentTime::from_seconds (s.in().as_seconds ()), - ContentTime::from_seconds (s.out().as_seconds ()) - ) - ); - } + if (video) { + video->set_ignore (_dcp_content->reference_video() && !_decode_referenced); + } + if (audio) { + audio->set_ignore (_dcp_content->reference_audio() && !_decode_referenced); } +} - return ctp; +void +DCPDecoder::set_forced_reduction (optional reduction) +{ + _forced_reduction = reduction; }