X-Git-Url: https://main.carlh.net/gitweb/?p=dcpomatic.git;a=blobdiff_plain;f=src%2Flib%2Fplayer.cc;h=df10ec14612f0d348cc27b70eea3d0402bbd7f39;hp=36d3fde67109ca51fe59aa8acdac8445a672b65e;hb=b56ca8250b8eae123c0992a50f5cabe99e655763;hpb=42a6003535d3153224da33b973bb79662d296e02 diff --git a/src/lib/player.cc b/src/lib/player.cc index 36d3fde67..df10ec146 100644 --- a/src/lib/player.cc +++ b/src/lib/player.cc @@ -70,7 +70,6 @@ using std::dynamic_pointer_cast; using std::list; using std::make_pair; using std::make_shared; -using std::map; using std::max; using std::min; using std::min; @@ -95,11 +94,12 @@ int const PlayerProperty::DCP_DECODE_REDUCTION = 704; int const PlayerProperty::PLAYBACK_LENGTH = 705; -Player::Player (shared_ptr film) +Player::Player (shared_ptr film, Image::Alignment subtitle_alignment) : _film (film) , _suspended (0) , _tolerant (film->tolerant()) , _audio_merger (_film->audio_frame_rate()) + , _subtitle_alignment (subtitle_alignment) { construct (); } @@ -134,12 +134,6 @@ Player::construct () } -Player::~Player () -{ - delete _shuffler; -} - - void Player::setup_pieces () { @@ -170,8 +164,7 @@ Player::setup_pieces_unlocked () auto old_pieces = _pieces; _pieces.clear (); - delete _shuffler; - _shuffler = new Shuffler(); + _shuffler.reset (new Shuffler()); _shuffler->Video.connect(bind(&Player::video, this, _1, _2)); for (auto i: playlist()->content()) { @@ -226,7 +219,7 @@ Player::setup_pieces_unlocked () if (decoder->video) { if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) { /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */ - decoder->video->Data.connect (bind(&Shuffler::video, _shuffler, weak_ptr(piece), _1)); + decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr(piece), _1)); } else { decoder->video->Data.connect (bind(&Player::video, this, weak_ptr(piece), _1)); } @@ -285,9 +278,9 @@ Player::setup_pieces_unlocked () _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length); _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length); - _last_video_time = {}; + _last_video_time = boost::optional(); _last_video_eyes = Eyes::BOTH; - _last_audio_time = {}; + _last_audio_time = boost::optional(); } @@ -338,7 +331,7 @@ Player::set_video_container_size (dcp::Size s) _video_container_size = s; - _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true)); + _black_image = make_shared(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED); _black_image->make_black (); } @@ -564,61 +557,61 @@ Player::get_reel_assets () { /* Does not require a lock on _mutex as it's only called from DCPEncoder */ - list a; + list reel_assets; - for (auto i: playlist()->content()) { - auto j = dynamic_pointer_cast (i); - if (!j) { + for (auto content: playlist()->content()) { + auto dcp = dynamic_pointer_cast(content); + if (!dcp) { continue; } scoped_ptr decoder; try { - decoder.reset (new DCPDecoder(_film, j, false, false, shared_ptr())); + decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr())); } catch (...) { - return a; + return reel_assets; } - DCPOMATIC_ASSERT (j->video_frame_rate ()); - double const cfr = j->video_frame_rate().get(); - Frame const trim_start = j->trim_start().frames_round (cfr); - Frame const trim_end = j->trim_end().frames_round (cfr); + DCPOMATIC_ASSERT (dcp->video_frame_rate()); + double const cfr = dcp->video_frame_rate().get(); + Frame const trim_start = dcp->trim_start().frames_round(cfr); + Frame const trim_end = dcp->trim_end().frames_round(cfr); int const ffr = _film->video_frame_rate (); /* position in the asset from the start */ int64_t offset_from_start = 0; - /* position in the asset from the end */ + /* position i the asset from the end */ int64_t offset_from_end = 0; for (auto k: decoder->reels()) { /* Assume that main picture duration is the length of the reel */ offset_from_end += k->main_picture()->actual_duration(); } - for (auto k: decoder->reels()) { + for (auto reel: decoder->reels()) { /* Assume that main picture duration is the length of the reel */ - int64_t const reel_duration = k->main_picture()->actual_duration(); + int64_t const reel_duration = reel->main_picture()->actual_duration(); /* See doc/design/trim_reels.svg */ Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start)); Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end))); - auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate()); - if (j->reference_video ()) { - maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr); + auto const from = content->position() + DCPTime::from_frames(offset_from_start, ffr); + if (dcp->reference_video()) { + maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, ffr); } - if (j->reference_audio ()) { - maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr); + if (dcp->reference_audio()) { + maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, ffr); } - if (j->reference_text (TextType::OPEN_SUBTITLE)) { - maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr); + if (dcp->reference_text(TextType::OPEN_SUBTITLE)) { + maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr); } - if (j->reference_text (TextType::CLOSED_CAPTION)) { - for (auto l: k->closed_captions()) { - maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr); + if (dcp->reference_text(TextType::CLOSED_CAPTION)) { + for (auto caption: reel->closed_captions()) { + maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, ffr); } } @@ -627,7 +620,7 @@ Player::get_reel_assets () } } - return a; + return reel_assets; } @@ -750,10 +743,38 @@ Player::pass () /* Emit any audio that is ready */ /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one - of our streams, or the position of the _silent. + of our streams, or the position of the _silent. First, though we choose only streams that are less than + ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far + behind it has finished). This is so that we don't withhold audio indefinitely awaiting data from a stream + that will never come, causing bugs like #2101. */ - auto pull_to = _playback_length; + constexpr int ignore_streams_behind = 5; + + using state_pair = std::pair; + + /* Find the 'leading' stream (i.e. the one that pushed data most recently) */ + auto latest_last_push_end = std::max_element( + _stream_states.begin(), + _stream_states.end(), + [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; } + ); + + if (latest_last_push_end != _stream_states.end()) { + LOG_DEBUG_PLAYER("Leading stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end)); + } + + /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */ + std::map alive_stream_states; for (auto const& i: _stream_states) { + if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) { + alive_stream_states.insert(i); + } else { + LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0)); + } + } + + auto pull_to = _playback_length; + for (auto const& i: alive_stream_states) { if (!i.second.piece->done && i.second.last_push_end < pull_to) { pull_to = i.second.last_push_end; } @@ -834,13 +855,17 @@ Player::open_subtitles_for_frame (DCPTime time) const return {}; } - return merge (captions); + return merge (captions, _subtitle_alignment); } void Player::video (weak_ptr wp, ContentVideo video) { + if (_suspended) { + return; + } + auto piece = wp.lock (); if (!piece) { return; @@ -923,16 +948,23 @@ Player::video (weak_ptr wp, ContentVideo video) } } + auto const content_video = piece->content->video; + _last_video[wp] = std::make_shared( video.image, - piece->content->video->crop (), - piece->content->video->fade (_film, video.frame), - scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()), + content_video->actual_crop(), + content_video->fade (_film, video.frame), + scale_for_display( + content_video->scaled_size(_film->frame_size()), + _video_container_size, + _film->frame_size(), + content_video->pixel_quanta() + ), _video_container_size, video.eyes, video.part, - piece->content->video->colour_conversion(), - piece->content->video->range(), + content_video->colour_conversion(), + content_video->range(), piece->content, video.frame, false @@ -951,6 +983,10 @@ Player::video (weak_ptr wp, ContentVideo video) void Player::audio (weak_ptr wp, AudioStreamPtr stream, ContentAudio content_audio) { + if (_suspended) { + return; + } + DCPOMATIC_ASSERT (content_audio.audio->frames() > 0); auto piece = wp.lock (); @@ -1021,6 +1057,10 @@ Player::audio (weak_ptr wp, AudioStreamPtr stream, ContentAudio content_a void Player::bitmap_text_start (weak_ptr wp, weak_ptr wc, ContentBitmapText subtitle) { + if (_suspended) { + return; + } + auto piece = wp.lock (); auto text = wc.lock (); if (!piece || !text) { @@ -1050,7 +1090,7 @@ Player::bitmap_text_start (weak_ptr wp, weak_ptr wc, C } dcp::Size scaled_size (width, height); - ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle)); + ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), subtitle.sub.rectangle)); DCPTime from (content_time_to_dcp (piece, subtitle.from())); _active_texts[static_cast(text->type())].add_from (wc, ps, from); @@ -1060,6 +1100,10 @@ Player::bitmap_text_start (weak_ptr wp, weak_ptr wc, C void Player::plain_text_start (weak_ptr wp, weak_ptr wc, ContentStringText subtitle) { + if (_suspended) { + return; + } + auto piece = wp.lock (); auto text = wc.lock (); if (!piece || !text) { @@ -1105,6 +1149,10 @@ Player::plain_text_start (weak_ptr wp, weak_ptr wc, Co void Player::subtitle_stop (weak_ptr wp, weak_ptr wc, ContentTime to) { + if (_suspended) { + return; + } + auto text = wc.lock (); if (!text) { return; @@ -1199,6 +1247,16 @@ Player::seek (DCPTime time, bool accurate) void Player::emit_video (shared_ptr pv, DCPTime time) { + if (!_film->three_d()) { + if (pv->eyes() == Eyes::LEFT) { + /* Use left-eye images for both eyes... */ + pv->set_eyes (Eyes::BOTH); + } else if (pv->eyes() == Eyes::RIGHT) { + /* ...and discard the right */ + return; + } + } + /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the player before the video that requires them. */ @@ -1344,6 +1402,10 @@ Player::playlist () const void Player::atmos (weak_ptr, ContentAtmos data) { + if (_suspended) { + return; + } + Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata); }