X-Git-Url: https://main.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fplayer.cc;h=2b65fd54e393d5c252fecb67a564115d45bb3630;hb=6f344b876689a1234a5eb75041882f06f5d9fe5c;hp=361f41c92cf07263a083bdd872136cfb68e4ce3c;hpb=89ee4cc6019036fa4fc0a6e07e052ffdc3b136ac;p=dcpomatic.git diff --git a/src/lib/player.cc b/src/lib/player.cc index 361f41c92..2b65fd54e 100644 --- a/src/lib/player.cc +++ b/src/lib/player.cc @@ -27,8 +27,8 @@ #include "sndfile_decoder.h" #include "sndfile_content.h" #include "subtitle_content.h" -#include "subrip_decoder.h" -#include "subrip_content.h" +#include "text_subtitle_decoder.h" +#include "text_subtitle_content.h" #include "dcp_content.h" #include "job.h" #include "image.h" @@ -45,13 +45,20 @@ #include "dcp_subtitle_content.h" #include "dcp_subtitle_decoder.h" #include "audio_processor.h" +#include "playlist.h" +#include "referenced_reel_asset.h" +#include +#include +#include +#include #include #include #include +#include #include "i18n.h" -#define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), Log::TYPE_GENERAL); +#define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL); using std::list; using std::cout; @@ -67,15 +74,21 @@ using boost::shared_ptr; using boost::weak_ptr; using boost::dynamic_pointer_cast; using boost::optional; +using boost::scoped_ptr; -Player::Player (shared_ptr film) +Player::Player (shared_ptr film, shared_ptr playlist) : _film (film) + , _playlist (playlist) , _have_valid_pieces (false) , _ignore_video (false) + , _ignore_audio (false) , _always_burn_subtitles (false) + , _fast (false) + , _play_referenced (false) { - _film_content_changed_connection = _film->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3)); _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1)); + _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this)); + _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3)); set_video_container_size (_film->frame_size ()); film_changed (Film::AUDIO_PROCESSOR); @@ -87,56 +100,30 @@ Player::setup_pieces () list > old_pieces = _pieces; _pieces.clear (); - ContentList content = _film->content (); + BOOST_FOREACH (shared_ptr i, _playlist->content ()) { - for (ContentList::iterator i = content.begin(); i != content.end(); ++i) { - - if (!(*i)->paths_valid ()) { + if (!i->paths_valid ()) { continue; } shared_ptr decoder; optional frc; - /* Work out a FrameRateChange for the best overlap video for this content, in case we need it below */ - DCPTime best_overlap_t; - shared_ptr best_overlap; - for (ContentList::iterator j = content.begin(); j != content.end(); ++j) { - shared_ptr vc = dynamic_pointer_cast (*j); - if (!vc) { - continue; - } - - DCPTime const overlap = max (vc->position(), (*i)->position()) - min (vc->end(), (*i)->end()); - if (overlap > best_overlap_t) { - best_overlap = vc; - best_overlap_t = overlap; - } - } - - optional best_overlap_frc; - if (best_overlap) { - best_overlap_frc = FrameRateChange (best_overlap->video_frame_rate(), _film->video_frame_rate ()); - } else { - /* No video overlap; e.g. if the DCP is just audio */ - best_overlap_frc = FrameRateChange (_film->video_frame_rate(), _film->video_frame_rate ()); - } - /* FFmpeg */ - shared_ptr fc = dynamic_pointer_cast (*i); + shared_ptr fc = dynamic_pointer_cast (i); if (fc) { - decoder.reset (new FFmpegDecoder (fc, _film->log())); - frc = FrameRateChange (fc->video_frame_rate(), _film->video_frame_rate()); + decoder.reset (new FFmpegDecoder (fc, _film->log(), _fast)); + frc = FrameRateChange (fc->video->video_frame_rate(), _film->video_frame_rate()); } - shared_ptr dc = dynamic_pointer_cast (*i); + shared_ptr dc = dynamic_pointer_cast (i); if (dc) { - decoder.reset (new DCPDecoder (dc)); - frc = FrameRateChange (dc->video_frame_rate(), _film->video_frame_rate()); + decoder.reset (new DCPDecoder (dc, _film->log(), _fast)); + frc = FrameRateChange (dc->video->video_frame_rate(), _film->video_frame_rate()); } /* ImageContent */ - shared_ptr ic = dynamic_pointer_cast (*i); + shared_ptr ic = dynamic_pointer_cast (i); if (ic) { /* See if we can re-use an old ImageDecoder */ for (list >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) { @@ -147,31 +134,57 @@ Player::setup_pieces () } if (!decoder) { - decoder.reset (new ImageDecoder (ic)); + decoder.reset (new ImageDecoder (ic, _film->log())); } - frc = FrameRateChange (ic->video_frame_rate(), _film->video_frame_rate()); + frc = FrameRateChange (ic->video->video_frame_rate(), _film->video_frame_rate()); } /* SndfileContent */ - shared_ptr sc = dynamic_pointer_cast (*i); + shared_ptr sc = dynamic_pointer_cast (i); if (sc) { - decoder.reset (new SndfileDecoder (sc)); - frc = best_overlap_frc; + decoder.reset (new SndfileDecoder (sc, _fast)); + + /* Work out a FrameRateChange for the best overlap video for this content */ + DCPTime best_overlap_t; + shared_ptr best_overlap; + BOOST_FOREACH (shared_ptr j, _playlist->content ()) { + if (!j->video) { + continue; + } + + DCPTime const overlap = min (j->end(), i->end()) - max (j->position(), i->position()); + if (overlap > best_overlap_t) { + best_overlap = j; + best_overlap_t = overlap; + } + } + + if (best_overlap) { + frc = FrameRateChange (best_overlap->video->video_frame_rate(), _film->video_frame_rate ()); + } else { + /* No video overlap; e.g. if the DCP is just audio */ + frc = FrameRateChange (_film->video_frame_rate(), _film->video_frame_rate ()); + } } - /* SubRipContent */ - shared_ptr rc = dynamic_pointer_cast (*i); + /* It's questionable whether subtitle content should have a video frame rate; perhaps + it should be assumed that any subtitle content has been prepared at the same rate + as simultaneous video content (like we do with audio). + */ + + /* TextSubtitleContent */ + shared_ptr rc = dynamic_pointer_cast (i); if (rc) { - decoder.reset (new SubRipDecoder (rc)); - frc = best_overlap_frc; + decoder.reset (new TextSubtitleDecoder (rc)); + frc = FrameRateChange (rc->subtitle_video_frame_rate(), _film->video_frame_rate()); } /* DCPSubtitleContent */ - shared_ptr dsc = dynamic_pointer_cast (*i); + shared_ptr dsc = dynamic_pointer_cast (i); if (dsc) { decoder.reset (new DCPSubtitleDecoder (dsc)); - frc = best_overlap_frc; + frc = FrameRateChange (dsc->subtitle_video_frame_rate(), _film->video_frame_rate()); } shared_ptr vd = dynamic_pointer_cast (decoder); @@ -179,14 +192,19 @@ Player::setup_pieces () vd->set_ignore_video (); } - _pieces.push_back (shared_ptr (new Piece (*i, decoder, frc.get ()))); + shared_ptr ad = dynamic_pointer_cast (decoder); + if (ad && _ignore_audio) { + ad->set_ignore_audio (); + } + + _pieces.push_back (shared_ptr (new Piece (i, decoder, frc.get ()))); } _have_valid_pieces = true; } void -Player::content_changed (weak_ptr w, int property, bool frequent) +Player::playlist_content_changed (weak_ptr w, int property, bool frequent) { shared_ptr c = w.lock (); if (!c) { @@ -200,7 +218,11 @@ Player::content_changed (weak_ptr w, int property, bool frequent) property == ContentProperty::TRIM_END || property == ContentProperty::PATH || property == VideoContentProperty::VIDEO_FRAME_TYPE || - property == DCPContentProperty::CAN_BE_PLAYED + property == DCPContentProperty::CAN_BE_PLAYED || + property == TextSubtitleContentProperty::TEXT_SUBTITLE_COLOUR || + property == TextSubtitleContentProperty::TEXT_SUBTITLE_OUTLINE || + property == TextSubtitleContentProperty::TEXT_SUBTITLE_OUTLINE_COLOUR || + property == FFmpegContentProperty::SUBTITLE_STREAM ) { _have_valid_pieces = false; @@ -212,11 +234,13 @@ Player::content_changed (weak_ptr w, int property, bool frequent) property == SubtitleContentProperty::SUBTITLE_Y_OFFSET || property == SubtitleContentProperty::SUBTITLE_X_SCALE || property == SubtitleContentProperty::SUBTITLE_Y_SCALE || + property == SubtitleContentProperty::FONTS || property == VideoContentProperty::VIDEO_CROP || property == VideoContentProperty::VIDEO_SCALE || property == VideoContentProperty::VIDEO_FRAME_RATE || property == VideoContentProperty::VIDEO_FADE_IN || - property == VideoContentProperty::VIDEO_FADE_OUT + property == VideoContentProperty::VIDEO_FADE_OUT || + property == VideoContentProperty::COLOUR_CONVERSION ) { Changed (frequent); @@ -228,10 +252,17 @@ Player::set_video_container_size (dcp::Size s) { _video_container_size = s; - _black_image.reset (new Image (PIX_FMT_RGB24, _video_container_size, true)); + _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true)); _black_image->make_black (); } +void +Player::playlist_changed () +{ + _have_valid_pieces = false; + Changed (false); +} + void Player::film_changed (Film::Property p) { @@ -240,10 +271,13 @@ Player::film_changed (Film::Property p) last time we were run. */ - if (p == Film::CONTENT) { - _have_valid_pieces = false; + if (p == Film::CONTAINER) { Changed (false); - } else if (p == Film::CONTAINER || p == Film::VIDEO_FRAME_RATE) { + } else if (p == Film::VIDEO_FRAME_RATE) { + /* Pieces contain a FrameRateChange which contains the DCP frame rate, + so we need new pieces here. + */ + _have_valid_pieces = false; Changed (false); } else if (p == Film::AUDIO_PROCESSOR) { if (_film->audio_processor ()) { @@ -283,11 +317,12 @@ Player::transform_image_subtitles (list subs) const scaled_size, dcp::YUV_TO_RGB_REC601, i->image->pixel_format (), - true + true, + _fast ), Position ( - rint (_video_container_size.width * i->rectangle.x), - rint (_video_container_size.height * i->rectangle.y) + lrint (_video_container_size.width * i->rectangle.x), + lrint (_video_container_size.height * i->rectangle.y) ) ) ); @@ -304,7 +339,7 @@ Player::black_player_video_frame (DCPTime time) const shared_ptr (new RawImageProxy (_black_image)), time, Crop (), - optional (), + optional (), _video_container_size, _video_container_size, EYES_BOTH, @@ -314,7 +349,10 @@ Player::black_player_video_frame (DCPTime time) const ); } -/** @return All PlayerVideos at the given time (there may be two frames for 3D) */ +/** @return All PlayerVideos at the given time. There may be none if the content + * at `time' is a DCP which we are passing through (i.e. referring to by reference) + * or 2 if we have 3D. + */ list > Player::get_video (DCPTime time, bool accurate) { @@ -324,7 +362,7 @@ Player::get_video (DCPTime time, bool accurate) /* Find subtitles for possible burn-in */ - PlayerSubtitles ps = get_subtitles (time, DCPTime::from_frames (1, _film->video_frame_rate ()), false, true); + PlayerSubtitles ps = get_subtitles (time, DCPTime::from_frames (1, _film->video_frame_rate ()), false, true, accurate); list sub_images; @@ -334,7 +372,7 @@ Player::get_video (DCPTime time, bool accurate) /* Text subtitles (rendered to an image) */ if (!ps.text.empty ()) { - list s = render_subtitles (ps.text, _video_container_size); + list s = render_subtitles (ps.text, ps.fonts, _video_container_size); copy (s.begin (), s.end (), back_inserter (sub_images)); } @@ -343,11 +381,11 @@ Player::get_video (DCPTime time, bool accurate) subtitles = merge (sub_images); } - /* Find video */ + /* Find pieces containing video which is happening now */ list > ov = overlaps ( time, - time + DCPTime::from_frames (1, _film->video_frame_rate ()) - DCPTime::delta() + time + DCPTime::from_frames (1, _film->video_frame_rate ()) ); list > pvf; @@ -356,38 +394,60 @@ Player::get_video (DCPTime time, bool accurate) /* No video content at this time */ pvf.push_back (black_player_video_frame (time)); } else { - /* Create a PlayerVideo from the content's video at this time */ + /* Some video content at this time */ + shared_ptr last = *(ov.rbegin ()); + VideoFrameType const last_type = last->content->video->video_frame_type (); - shared_ptr piece = ov.back (); - shared_ptr decoder = dynamic_pointer_cast (piece->decoder); - DCPOMATIC_ASSERT (decoder); - shared_ptr video_content = dynamic_pointer_cast (piece->content); - DCPOMATIC_ASSERT (video_content); + /* Get video from appropriate piece(s) */ + BOOST_FOREACH (shared_ptr piece, ov) { - list content_video = decoder->get_video (dcp_to_content_video (piece, time), accurate); - if (content_video.empty ()) { - pvf.push_back (black_player_video_frame (time)); - return pvf; - } + shared_ptr decoder = dynamic_pointer_cast (piece->decoder); + DCPOMATIC_ASSERT (decoder); - dcp::Size image_size = video_content->scale().size (video_content, _video_container_size, _film->frame_size ()); - - for (list::const_iterator i = content_video.begin(); i != content_video.end(); ++i) { - pvf.push_back ( - shared_ptr ( - new PlayerVideo ( - i->image, - content_video_to_dcp (piece, i->frame), - video_content->crop (), - video_content->fade (i->frame), - image_size, - _video_container_size, - i->eyes, - i->part, - video_content->colour_conversion () - ) - ) - ); + shared_ptr dcp_content = dynamic_pointer_cast (piece->content); + if (dcp_content && dcp_content->reference_video () && !_play_referenced) { + continue; + } + + bool const use = + /* always use the last video */ + piece == last || + /* with a corresponding L/R eye if appropriate */ + (last_type == VIDEO_FRAME_TYPE_3D_LEFT && piece->content->video->video_frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) || + (last_type == VIDEO_FRAME_TYPE_3D_RIGHT && piece->content->video->video_frame_type() == VIDEO_FRAME_TYPE_3D_LEFT); + + if (use) { + /* We want to use this piece */ + list content_video = decoder->get_video (dcp_to_content_video (piece, time), accurate); + if (content_video.empty ()) { + pvf.push_back (black_player_video_frame (time)); + } else { + dcp::Size image_size = piece->content->video->scale().size ( + piece->content->video, _video_container_size, _film->frame_size () + ); + + for (list::const_iterator i = content_video.begin(); i != content_video.end(); ++i) { + pvf.push_back ( + shared_ptr ( + new PlayerVideo ( + i->image, + content_video_to_dcp (piece, i->frame), + piece->content->video->crop (), + piece->content->video->fade (i->frame), + image_size, + _video_container_size, + i->eyes, + i->part, + piece->content->video->colour_conversion () + ) + ) + ); + } + } + } else { + /* Discard unused video */ + decoder->get_video (dcp_to_content_video (piece, time), accurate); + } } } @@ -400,6 +460,7 @@ Player::get_video (DCPTime time, bool accurate) return pvf; } +/** @return Audio data or 0 if the only audio data here is referenced DCP data */ shared_ptr Player::get_audio (DCPTime time, DCPTime length, bool accurate) { @@ -407,7 +468,7 @@ Player::get_audio (DCPTime time, DCPTime length, bool accurate) setup_pieces (); } - Frame const length_frames = length.frames (_film->audio_frame_rate ()); + Frame const length_frames = length.frames_round (_film->audio_frame_rate ()); shared_ptr audio (new AudioBuffers (_film->audio_channels(), length_frames)); audio->make_silent (); @@ -417,11 +478,25 @@ Player::get_audio (DCPTime time, DCPTime length, bool accurate) return audio; } - for (list >::iterator i = ov.begin(); i != ov.end(); ++i) { + bool all_referenced = true; + BOOST_FOREACH (shared_ptr i, ov) { + shared_ptr audio_content = dynamic_pointer_cast (i->content); + shared_ptr dcp_content = dynamic_pointer_cast (i->content); + if (audio_content && (!dcp_content || !dcp_content->reference_audio ())) { + /* There is audio content which is not from a DCP or not set to be referenced */ + all_referenced = false; + } + } + + if (all_referenced && !_play_referenced) { + return shared_ptr (); + } + + BOOST_FOREACH (shared_ptr i, ov) { - shared_ptr content = dynamic_pointer_cast ((*i)->content); + shared_ptr content = dynamic_pointer_cast (i->content); DCPOMATIC_ASSERT (content); - shared_ptr decoder = dynamic_pointer_cast ((*i)->decoder); + shared_ptr decoder = dynamic_pointer_cast (i->decoder); DCPOMATIC_ASSERT (decoder); /* The time that we should request from the content */ @@ -433,17 +508,22 @@ Player::get_audio (DCPTime time, DCPTime length, bool accurate) the stuff we get back. */ offset = -request; - request_frames += request.frames (_film->audio_frame_rate ()); + request_frames += request.frames_round (_film->audio_frame_rate ()); if (request_frames < 0) { request_frames = 0; } request = DCPTime (); } - Frame const content_frame = dcp_to_content_audio (*i, request); + Frame const content_frame = dcp_to_resampled_audio (i, request); BOOST_FOREACH (AudioStreamPtr j, content->audio_streams ()) { + if (j->channels() == 0) { + /* Some content (e.g. DCPs) can have streams with no channels */ + continue; + } + /* Audio from this piece's decoder stream (which might be more or less than what we asked for) */ ContentAudio all = decoder->get_audio (j, content_frame, request_frames, accurate); @@ -472,7 +552,7 @@ Player::get_audio (DCPTime time, DCPTime length, bool accurate) } if (_audio_processor) { - dcp_mapped = _audio_processor->run (dcp_mapped); + dcp_mapped = _audio_processor->run (dcp_mapped, _film->audio_channels ()); } all.audio = dcp_mapped; @@ -480,7 +560,7 @@ Player::get_audio (DCPTime time, DCPTime length, bool accurate) audio->accumulate_frames ( all.audio.get(), content_frame - all.frame, - offset.frames (_film->audio_frame_rate()), + offset.frames_round (_film->audio_frame_rate()), min (Frame (all.audio->frames()), request_frames) ); } @@ -492,60 +572,51 @@ Player::get_audio (DCPTime time, DCPTime length, bool accurate) Frame Player::dcp_to_content_video (shared_ptr piece, DCPTime t) const { - /* s is the offset of t from the start position of this content */ + shared_ptr vc = dynamic_pointer_cast (piece->content); DCPTime s = t - piece->content->position (); - s = DCPTime (max (DCPTime::Type (0), s.get ())); - s = DCPTime (min (piece->content->length_after_trim().get(), s.get())); + s = min (piece->content->length_after_trim(), s); + s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc)); - /* Convert this to the content frame */ - return DCPTime (s + piece->content->trim_start()).frames (_film->video_frame_rate()) / piece->frc.factor (); + /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange) + then convert that ContentTime to frames at the content's rate. However this fails for + situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not + enough to distinguish between the two with low values of time (e.g. 3200 in Time units). + + Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat. + */ + return s.frames_floor (piece->frc.dcp) / piece->frc.factor (); } DCPTime Player::content_video_to_dcp (shared_ptr piece, Frame f) const { - DCPTime t = DCPTime::from_frames (f * piece->frc.factor (), _film->video_frame_rate()) - piece->content->trim_start () + piece->content->position (); - if (t < DCPTime ()) { - t = DCPTime (); - } - - return t; + shared_ptr vc = dynamic_pointer_cast (piece->content); + /* See comment in dcp_to_content_video */ + DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc); + return max (DCPTime (), d + piece->content->position ()); } Frame -Player::dcp_to_content_audio (shared_ptr piece, DCPTime t) const +Player::dcp_to_resampled_audio (shared_ptr piece, DCPTime t) const { - /* s is the offset of t from the start position of this content */ DCPTime s = t - piece->content->position (); - s = DCPTime (max (DCPTime::Type (0), s.get ())); - s = DCPTime (min (piece->content->length_after_trim().get(), s.get())); - - /* Convert this to the content frame */ - return DCPTime (s + piece->content->trim_start()).frames (_film->audio_frame_rate()); + s = min (piece->content->length_after_trim(), s); + /* See notes in dcp_to_content_video */ + return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ()); } ContentTime Player::dcp_to_content_subtitle (shared_ptr piece, DCPTime t) const { - /* s is the offset of t from the start position of this content */ DCPTime s = t - piece->content->position (); - s = DCPTime (max (DCPTime::Type (0), s.get ())); - s = DCPTime (min (piece->content->length_after_trim().get(), s.get())); - - return ContentTime (s + piece->content->trim_start(), piece->frc); -} - -void -PlayerStatistics::dump (shared_ptr log) const -{ - log->log (String::compose ("Video: %1 good %2 skipped %3 black %4 repeat", video.good, video.skip, video.black, video.repeat), Log::TYPE_GENERAL); - log->log (String::compose ("Audio: %1 good %2 skipped %3 silence", audio.good, audio.skip, audio.silence.seconds()), Log::TYPE_GENERAL); + s = min (piece->content->length_after_trim(), s); + return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start()); } -PlayerStatistics const & -Player::statistics () const +DCPTime +Player::content_subtitle_to_dcp (shared_ptr piece, ContentTime t) const { - return _statistics; + return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position()); } /** @param burnt true to return only subtitles to be burnt, false to return only @@ -553,7 +624,7 @@ Player::statistics () const * _always_burn_subtitles is true; in this case, all subtitles will be returned. */ PlayerSubtitles -Player::get_subtitles (DCPTime time, DCPTime length, bool starting, bool burnt) +Player::get_subtitles (DCPTime time, DCPTime length, bool starting, bool burnt, bool accurate) { list > subs = overlaps (time, time + length); @@ -565,12 +636,17 @@ Player::get_subtitles (DCPTime time, DCPTime length, bool starting, bool burnt) continue; } + shared_ptr dcp_content = dynamic_pointer_cast (subtitle_content); + if (dcp_content && dcp_content->reference_subtitle () && !_play_referenced) { + continue; + } + shared_ptr subtitle_decoder = dynamic_pointer_cast ((*j)->decoder); ContentTime const from = dcp_to_content_subtitle (*j, time); /* XXX: this video_frame_rate() should be the rate that the subtitle content has been prepared for */ ContentTime const to = from + ContentTime::from_frames (1, _film->video_frame_rate ()); - list image = subtitle_decoder->get_image_subtitles (ContentTimePeriod (from, to), starting); + list image = subtitle_decoder->get_image_subtitles (ContentTimePeriod (from, to), starting, accurate); for (list::iterator i = image.begin(); i != image.end(); ++i) { /* Apply content's subtitle offsets */ @@ -588,19 +664,31 @@ Player::get_subtitles (DCPTime time, DCPTime length, bool starting, bool burnt) ps.image.push_back (i->sub); } - list text = subtitle_decoder->get_text_subtitles (ContentTimePeriod (from, to), starting); + list text = subtitle_decoder->get_text_subtitles (ContentTimePeriod (from, to), starting, accurate); BOOST_FOREACH (ContentTextSubtitle& ts, text) { - BOOST_FOREACH (dcp::SubtitleString& s, ts.subs) { + BOOST_FOREACH (dcp::SubtitleString s, ts.subs) { s.set_h_position (s.h_position() + subtitle_content->subtitle_x_offset ()); s.set_v_position (s.v_position() + subtitle_content->subtitle_y_offset ()); float const xs = subtitle_content->subtitle_x_scale(); float const ys = subtitle_content->subtitle_y_scale(); - float const average = s.size() * (xs + ys) / 2; - s.set_size (average); + float size = s.size(); + + /* Adjust size to express the common part of the scaling; + e.g. if xs = ys = 0.5 we scale size by 2. + */ + if (xs > 1e-5 && ys > 1e-5) { + size *= 1 / min (1 / xs, 1 / ys); + } + s.set_size (size); + + /* Then express aspect ratio changes */ if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) { s.set_aspect_adjust (xs / ys); } + s.set_in (dcp::Time(content_subtitle_to_dcp (*j, ts.period().from).seconds(), 1000)); + s.set_out (dcp::Time(content_subtitle_to_dcp (*j, ts.period().to).seconds(), 1000)); ps.text.push_back (s); + ps.add_fonts (subtitle_content->fonts ()); } } } @@ -637,6 +725,13 @@ Player::set_ignore_video () _ignore_video = true; } +/** Set this player never to produce any audio data */ +void +Player::set_ignore_audio () +{ + _ignore_audio = true; +} + /** Set whether or not this player should always burn text subtitles into the image, * regardless of the content settings. * @param burn true to always burn subtitles, false to obey content settings. @@ -646,3 +741,74 @@ Player::set_always_burn_subtitles (bool burn) { _always_burn_subtitles = burn; } + +void +Player::set_fast () +{ + _fast = true; + _have_valid_pieces = false; +} + +void +Player::set_play_referenced () +{ + _play_referenced = true; + _have_valid_pieces = false; +} + +list +Player::get_reel_assets () +{ + list a; + + BOOST_FOREACH (shared_ptr i, _playlist->content ()) { + shared_ptr j = dynamic_pointer_cast (i); + if (!j) { + continue; + } + + scoped_ptr decoder; + try { + decoder.reset (new DCPDecoder (j, _film->log(), false)); + } catch (...) { + return a; + } + + int64_t offset = 0; + BOOST_FOREACH (shared_ptr k, decoder->reels()) { + DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate()); + if (j->reference_video ()) { + a.push_back ( + ReferencedReelAsset ( + k->main_picture (), + DCPTimePeriod (from, from + DCPTime::from_frames (k->main_picture()->duration(), _film->video_frame_rate())) + ) + ); + } + + if (j->reference_audio ()) { + a.push_back ( + ReferencedReelAsset ( + k->main_sound (), + DCPTimePeriod (from, from + DCPTime::from_frames (k->main_sound()->duration(), _film->video_frame_rate())) + ) + ); + } + + if (j->reference_subtitle ()) { + DCPOMATIC_ASSERT (k->main_subtitle ()); + a.push_back ( + ReferencedReelAsset ( + k->main_subtitle (), + DCPTimePeriod (from, from + DCPTime::from_frames (k->main_subtitle()->duration(), _film->video_frame_rate())) + ) + ); + } + + /* Assume that main picture duration is the length of the reel */ + offset += k->main_picture()->duration (); + } + } + + return a; +}