Cleanup: use a variable we already made.
[dcpomatic.git] / src / lib / player.cc
index b93bf3f4a7c2756643e47b4cc8e8f0d2a9bcc6b1..df10ec14612f0d348cc27b70eea3d0402bbd7f39 100644 (file)
@@ -557,61 +557,61 @@ Player::get_reel_assets ()
 {
        /* Does not require a lock on _mutex as it's only called from DCPEncoder */
 
-       list<ReferencedReelAsset> a;
+       list<ReferencedReelAsset> reel_assets;
 
-       for (auto i: playlist()->content()) {
-               auto j = dynamic_pointer_cast<DCPContent> (i);
-               if (!j) {
+       for (auto content: playlist()->content()) {
+               auto dcp = dynamic_pointer_cast<DCPContent>(content);
+               if (!dcp) {
                        continue;
                }
 
                scoped_ptr<DCPDecoder> decoder;
                try {
-                       decoder.reset (new DCPDecoder(_film, j, false, false, shared_ptr<DCPDecoder>()));
+                       decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr<DCPDecoder>()));
                } catch (...) {
-                       return a;
+                       return reel_assets;
                }
 
-               DCPOMATIC_ASSERT (j->video_frame_rate ());
-               double const cfr = j->video_frame_rate().get();
-               Frame const trim_start = j->trim_start().frames_round (cfr);
-               Frame const trim_end = j->trim_end().frames_round (cfr);
+               DCPOMATIC_ASSERT (dcp->video_frame_rate());
+               double const cfr = dcp->video_frame_rate().get();
+               Frame const trim_start = dcp->trim_start().frames_round(cfr);
+               Frame const trim_end = dcp->trim_end().frames_round(cfr);
                int const ffr = _film->video_frame_rate ();
 
                /* position in the asset from the start */
                int64_t offset_from_start = 0;
-               /* position in the asset from the end */
+               /* position i the asset from the end */
                int64_t offset_from_end = 0;
                for (auto k: decoder->reels()) {
                        /* Assume that main picture duration is the length of the reel */
                        offset_from_end += k->main_picture()->actual_duration();
                }
 
-               for (auto k: decoder->reels()) {
+               for (auto reel: decoder->reels()) {
 
                        /* Assume that main picture duration is the length of the reel */
-                       int64_t const reel_duration = k->main_picture()->actual_duration();
+                       int64_t const reel_duration = reel->main_picture()->actual_duration();
 
                        /* See doc/design/trim_reels.svg */
                        Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
                        Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
 
-                       auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
-                       if (j->reference_video ()) {
-                               maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
+                       auto const from = content->position() + DCPTime::from_frames(offset_from_start, ffr);
+                       if (dcp->reference_video()) {
+                               maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
                        }
 
-                       if (j->reference_audio ()) {
-                               maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
+                       if (dcp->reference_audio()) {
+                               maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
                        }
 
-                       if (j->reference_text (TextType::OPEN_SUBTITLE)) {
-                               maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
+                       if (dcp->reference_text(TextType::OPEN_SUBTITLE)) {
+                               maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
                        }
 
-                       if (j->reference_text (TextType::CLOSED_CAPTION)) {
-                               for (auto l: k->closed_captions()) {
-                                       maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
+                       if (dcp->reference_text(TextType::CLOSED_CAPTION)) {
+                               for (auto caption: reel->closed_captions()) {
+                                       maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, ffr);
                                }
                        }
 
@@ -620,7 +620,7 @@ Player::get_reel_assets ()
                }
        }
 
-       return a;
+       return reel_assets;
 }
 
 
@@ -743,10 +743,38 @@ Player::pass ()
        /* Emit any audio that is ready */
 
        /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
-          of our streams, or the position of the _silent.
+          of our streams, or the position of the _silent.  First, though we choose only streams that are less than
+          ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
+          behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
+          that will never come, causing bugs like #2101.
        */
-       auto pull_to = _playback_length;
+       constexpr int ignore_streams_behind = 5;
+
+       using state_pair = std::pair<AudioStreamPtr, StreamState>;
+
+       /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
+       auto latest_last_push_end = std::max_element(
+               _stream_states.begin(),
+               _stream_states.end(),
+               [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
+               );
+
+       if (latest_last_push_end != _stream_states.end()) {
+               LOG_DEBUG_PLAYER("Leading stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
+       }
+
+       /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
+       std::map<AudioStreamPtr, StreamState> alive_stream_states;
        for (auto const& i: _stream_states) {
+               if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
+                       alive_stream_states.insert(i);
+               } else {
+                       LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
+               }
+       }
+
+       auto pull_to = _playback_length;
+       for (auto const& i: alive_stream_states) {
                if (!i.second.piece->done && i.second.last_push_end < pull_to) {
                        pull_to = i.second.last_push_end;
                }