{
/* Does not require a lock on _mutex as it's only called from DCPEncoder */
- list<ReferencedReelAsset> a;
+ list<ReferencedReelAsset> reel_assets;
- for (auto i: playlist()->content()) {
- auto j = dynamic_pointer_cast<DCPContent> (i);
- if (!j) {
+ for (auto content: playlist()->content()) {
+ auto dcp = dynamic_pointer_cast<DCPContent>(content);
+ if (!dcp) {
+ continue;
+ }
+
+ if (!dcp->reference_video() && !dcp->reference_audio() && !dcp->reference_text(TextType::OPEN_SUBTITLE) && !dcp->reference_text(TextType::CLOSED_CAPTION)) {
continue;
}
scoped_ptr<DCPDecoder> decoder;
try {
- decoder.reset (new DCPDecoder(_film, j, false, false, shared_ptr<DCPDecoder>()));
+ decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr<DCPDecoder>()));
} catch (...) {
- return a;
+ return reel_assets;
}
- DCPOMATIC_ASSERT (j->video_frame_rate ());
- double const cfr = j->video_frame_rate().get();
- Frame const trim_start = j->trim_start().frames_round (cfr);
- Frame const trim_end = j->trim_end().frames_round (cfr);
- int const ffr = _film->video_frame_rate ();
+ auto const frame_rate = _film->video_frame_rate();
+ DCPOMATIC_ASSERT (dcp->video_frame_rate());
+ /* We should only be referencing if the DCP rate is the same as the film rate */
+ DCPOMATIC_ASSERT (std::round(dcp->video_frame_rate().get()) == frame_rate);
+
+ Frame const trim_start = dcp->trim_start().frames_round(frame_rate);
+ Frame const trim_end = dcp->trim_end().frames_round(frame_rate);
/* position in the asset from the start */
int64_t offset_from_start = 0;
- /* position in the asset from the end */
+ /* position i the asset from the end */
int64_t offset_from_end = 0;
- for (auto k: decoder->reels()) {
+ for (auto reel: decoder->reels()) {
/* Assume that main picture duration is the length of the reel */
- offset_from_end += k->main_picture()->actual_duration();
+ offset_from_end += reel->main_picture()->actual_duration();
}
- for (auto k: decoder->reels()) {
+ for (auto reel: decoder->reels()) {
/* Assume that main picture duration is the length of the reel */
- int64_t const reel_duration = k->main_picture()->actual_duration();
+ int64_t const reel_duration = reel->main_picture()->actual_duration();
/* See doc/design/trim_reels.svg */
Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
- auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
- if (j->reference_video ()) {
- maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
+ auto const from = max(DCPTime(), content->position() + DCPTime::from_frames(offset_from_start, frame_rate) - DCPTime::from_frames(trim_start, frame_rate));
+ if (dcp->reference_video()) {
+ maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, frame_rate);
}
- if (j->reference_audio ()) {
- maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
+ if (dcp->reference_audio()) {
+ maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, frame_rate);
}
- if (j->reference_text (TextType::OPEN_SUBTITLE)) {
- maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
+ if (dcp->reference_text(TextType::OPEN_SUBTITLE)) {
+ maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, frame_rate);
}
- if (j->reference_text (TextType::CLOSED_CAPTION)) {
- for (auto l: k->closed_captions()) {
- maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
+ if (dcp->reference_text(TextType::CLOSED_CAPTION)) {
+ for (auto caption: reel->closed_captions()) {
+ maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, frame_rate);
}
}
}
}
- return a;
+ return reel_assets;
}
/* Emit any audio that is ready */
/* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
- of our streams, or the position of the _silent.
+ of our streams, or the position of the _silent. First, though we choose only streams that are less than
+ ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
+ behind it has finished). This is so that we don't withhold audio indefinitely awaiting data from a stream
+ that will never come, causing bugs like #2101.
*/
- auto pull_to = _playback_length;
+ constexpr int ignore_streams_behind = 5;
+
+ using state_pair = std::pair<AudioStreamPtr, StreamState>;
+
+ /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
+ auto latest_last_push_end = std::max_element(
+ _stream_states.begin(),
+ _stream_states.end(),
+ [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
+ );
+
+ if (latest_last_push_end != _stream_states.end()) {
+ LOG_DEBUG_PLAYER("Leading stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
+ }
+
+ /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
+ std::map<AudioStreamPtr, StreamState> alive_stream_states;
for (auto const& i: _stream_states) {
+ if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
+ alive_stream_states.insert(i);
+ } else {
+ LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
+ }
+ }
+
+ auto pull_to = _playback_length;
+ for (auto const& i: alive_stream_states) {
if (!i.second.piece->done && i.second.last_push_end < pull_to) {
pull_to = i.second.last_push_end;
}
}
}
+ auto const content_video = piece->content->video;
+
_last_video[wp] = std::make_shared<PlayerVideo>(
video.image,
- piece->content->video->crop (),
- piece->content->video->fade (_film, video.frame),
- scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
+ content_video->actual_crop(),
+ content_video->fade (_film, video.frame),
+ scale_for_display(
+ content_video->scaled_size(_film->frame_size()),
+ _video_container_size,
+ _film->frame_size(),
+ content_video->pixel_quanta()
+ ),
_video_container_size,
video.eyes,
video.part,
- piece->content->video->colour_conversion(),
- piece->content->video->range(),
+ content_video->colour_conversion(),
+ content_video->range(),
piece->content,
video.frame,
false