2 Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
29 #include "dcp_content.h"
30 #include "dcp_decoder.h"
31 #include "dcpomatic_log.h"
33 #include "decoder_factory.h"
34 #include "ffmpeg_content.h"
36 #include "frame_rate_change.h"
38 #include "image_decoder.h"
41 #include "piece_video.h"
43 #include "player_video.h"
46 #include "raw_image_proxy.h"
47 #include "referenced_reel_asset.h"
48 #include "render_text.h"
50 #include "text_content.h"
51 #include "text_decoder.h"
53 #include "video_decoder.h"
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
68 using std::dynamic_pointer_cast;
71 using std::make_shared;
72 using std::make_shared;
78 using std::shared_ptr;
81 using std::unique_ptr;
82 using boost::optional;
83 #if BOOST_VERSION >= 106100
84 using namespace boost::placeholders;
86 using namespace dcpomatic;
89 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
90 int const PlayerProperty::PLAYLIST = 701;
91 int const PlayerProperty::FILM_CONTAINER = 702;
92 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
93 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
94 int const PlayerProperty::PLAYBACK_LENGTH = 705;
98 #define AUDIO_GAIN_EPSILON 0.001
101 Player::Player (shared_ptr<const Film> film)
104 , _tolerant (film->tolerant())
105 , _audio_merger (_film->audio_frame_rate())
111 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
113 , _playlist (playlist_)
115 , _tolerant (film->tolerant())
116 , _audio_merger (_film->audio_frame_rate())
125 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
126 /* The butler must hear about this first, so since we are proxying this through to the butler we must
129 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
130 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
131 set_video_container_size (_film->frame_size ());
133 film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
136 seek (DCPTime (), true);
141 Player::setup_pieces ()
143 boost::mutex::scoped_lock lm (_mutex);
144 setup_pieces_unlocked ();
149 have_video (shared_ptr<const Content> content)
151 return static_cast<bool>(content->video) && content->video->use();
156 have_audio (shared_ptr<const Content> content)
158 return static_cast<bool>(content->audio);
162 vector<vector<shared_ptr<Content>>>
163 collect (shared_ptr<const Film> film, ContentList content)
165 vector<shared_ptr<Content>> ungrouped;
166 vector<vector<shared_ptr<Content>>> grouped;
168 auto same_settings = [](shared_ptr<const Film> film, shared_ptr<const AudioContent> a, shared_ptr<const AudioContent> b) {
170 auto a_streams = a->streams();
171 auto b_streams = b->streams();
173 if (a_streams.size() != b_streams.size()) {
177 for (size_t i = 0; i < a_streams.size(); ++i) {
178 auto a_stream = a_streams[i];
179 auto b_stream = b_streams[i];
181 !a_stream->mapping().equals(b_stream->mapping(), AUDIO_GAIN_EPSILON) ||
182 a_stream->frame_rate() != b_stream->frame_rate() ||
183 a_stream->channels() != b_stream->channels()) {
189 fabs(a->gain() - b->gain()) < AUDIO_GAIN_EPSILON &&
190 a->delay() == b->delay() &&
191 a->language() == b->language() &&
192 a->resampled_frame_rate(film) == b->resampled_frame_rate(film) &&
193 a->channel_names() == b->channel_names()
197 for (auto i: content) {
198 if (i->video || !i->audio || !i->text.empty()) {
199 ungrouped.push_back (i);
202 for (auto& g: grouped) {
203 if (same_settings(film, g.front()->audio, i->audio) && i->position() == g.back()->end(film)) {
209 grouped.push_back ({i});
214 for (auto i: ungrouped) {
215 grouped.push_back({i});
223 Player::setup_pieces_unlocked ()
225 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
227 auto old_pieces = _pieces;
230 _shuffler.reset (new Shuffler());
231 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
233 for (auto i: playlist()->content()) {
235 if (!i->paths_valid ()) {
239 if (_ignore_video && _ignore_audio && i->text.empty()) {
240 /* We're only interested in text and this content has none */
244 shared_ptr<Decoder> old_decoder;
245 for (auto j: old_pieces) {
246 auto decoder = j->decoder_for(i);
248 old_decoder = decoder;
253 auto decoder = decoder_factory (_film, i, _tolerant, old_decoder);
254 DCPOMATIC_ASSERT (decoder);
256 FrameRateChange frc (_film, i);
258 if (decoder->video && _ignore_video) {
259 decoder->video->set_ignore (true);
262 if (decoder->audio && _ignore_audio) {
263 decoder->audio->set_ignore (true);
267 for (auto i: decoder->text) {
268 i->set_ignore (true);
272 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
274 dcp->set_decode_referenced (_play_referenced);
275 if (_play_referenced) {
276 dcp->set_forced_reduction (_dcp_decode_reduction);
280 auto piece = make_shared<Piece>(_film, i, decoder, frc, _fast);
281 _pieces.push_back (piece);
284 if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
285 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
286 piece->Video.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
288 piece->Video.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
293 piece->Audio.connect (bind(&Player::audio, this, weak_ptr<Piece>(piece), _1));
296 auto j = decoder->text.begin();
298 while (j != decoder->text.end()) {
299 (*j)->BitmapStart.connect (
300 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
302 (*j)->PlainStart.connect (
303 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
306 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
312 if (decoder->atmos) {
313 decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
317 for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
318 if ((*i)->use_video() && (*i)->video_frame_type() != VideoFrameType::THREE_D_LEFT && (*i)->video_frame_type() != VideoFrameType::THREE_D_RIGHT) {
319 /* Look for content later in the content list with in-use video that overlaps this */
320 for (auto j = std::next(i); j != _pieces.end(); ++j) {
321 if ((*j)->use_video()) {
322 (*i)->set_ignore_video ((*j)->period().overlap((*i)->period()));
328 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
329 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
331 _last_video_time = boost::optional<dcpomatic::DCPTime>();
332 _last_video_eyes = Eyes::BOTH;
333 _last_audio_time = boost::optional<dcpomatic::DCPTime>();
338 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
340 boost::mutex::scoped_lock lm (_mutex);
342 for (auto i: _pieces) {
343 auto dcp = i->content_time_to_dcp(content, t);
349 /* We couldn't find this content; perhaps things are being changed over */
355 Player::playlist_content_change (ChangeType type, int property, bool frequent)
357 if (property == VideoContentProperty::CROP) {
358 if (type == ChangeType::DONE) {
359 auto const vcs = video_container_size();
360 boost::mutex::scoped_lock lm (_mutex);
361 for (auto const& i: _delay) {
362 i.first->reset_metadata (_film, vcs);
366 if (type == ChangeType::PENDING) {
367 /* The player content is probably about to change, so we can't carry on
368 until that has happened and we've rebuilt our pieces. Stop pass()
369 and seek() from working until then.
372 } else if (type == ChangeType::DONE) {
373 /* A change in our content has gone through. Re-build our pieces. */
376 } else if (type == ChangeType::CANCELLED) {
381 Change (type, property, frequent);
386 Player::set_video_container_size (dcp::Size s)
388 Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
391 boost::mutex::scoped_lock lm (_mutex);
393 if (s == _video_container_size) {
395 Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
399 _video_container_size = s;
401 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
402 _black_image->make_black ();
405 Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
410 Player::playlist_change (ChangeType type)
412 if (type == ChangeType::DONE) {
415 Change (type, PlayerProperty::PLAYLIST, false);
420 Player::film_change (ChangeType type, Film::Property p)
422 /* Here we should notice Film properties that affect our output, and
423 alert listeners that our output now would be different to how it was
424 last time we were run.
427 if (p == Film::Property::CONTAINER) {
428 Change (type, PlayerProperty::FILM_CONTAINER, false);
429 } else if (p == Film::Property::VIDEO_FRAME_RATE) {
430 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
431 so we need new pieces here.
433 if (type == ChangeType::DONE) {
436 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
437 } else if (p == Film::Property::AUDIO_PROCESSOR) {
438 if (type == ChangeType::DONE && _film->audio_processor ()) {
439 boost::mutex::scoped_lock lm (_mutex);
440 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
442 } else if (p == Film::Property::AUDIO_CHANNELS) {
443 if (type == ChangeType::DONE) {
444 boost::mutex::scoped_lock lm (_mutex);
445 _audio_merger.clear ();
451 shared_ptr<PlayerVideo>
452 Player::black_player_video_frame (Eyes eyes) const
454 return std::make_shared<PlayerVideo> (
455 std::make_shared<const RawImageProxy>(_black_image),
458 _video_container_size,
459 _video_container_size,
462 PresetColourConversion::all().front().conversion,
464 std::weak_ptr<Content>(),
465 boost::optional<Frame>(),
472 Player::get_subtitle_fonts ()
474 boost::mutex::scoped_lock lm (_mutex);
476 vector<FontData> fonts;
477 for (auto i: _pieces) {
478 /* XXX: things may go wrong if there are duplicate font IDs
479 with different font files.
481 auto f = i->fonts ();
482 copy (f.begin(), f.end(), back_inserter(fonts));
489 /** Set this player never to produce any video data */
491 Player::set_ignore_video ()
493 boost::mutex::scoped_lock lm (_mutex);
494 _ignore_video = true;
495 setup_pieces_unlocked ();
500 Player::set_ignore_audio ()
502 boost::mutex::scoped_lock lm (_mutex);
503 _ignore_audio = true;
504 setup_pieces_unlocked ();
509 Player::set_ignore_text ()
511 boost::mutex::scoped_lock lm (_mutex);
513 setup_pieces_unlocked ();
517 /** Set the player to always burn open texts into the image regardless of the content settings */
519 Player::set_always_burn_open_subtitles ()
521 boost::mutex::scoped_lock lm (_mutex);
522 _always_burn_open_subtitles = true;
526 /** Sets up the player to be faster, possibly at the expense of quality */
530 boost::mutex::scoped_lock lm (_mutex);
532 setup_pieces_unlocked ();
537 Player::set_play_referenced ()
539 boost::mutex::scoped_lock lm (_mutex);
540 _play_referenced = true;
541 setup_pieces_unlocked ();
546 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
548 DCPOMATIC_ASSERT (r);
549 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
550 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
551 if (r->actual_duration() > 0) {
553 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
559 list<ReferencedReelAsset>
560 Player::get_reel_assets ()
562 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
564 list<ReferencedReelAsset> a;
566 for (auto i: playlist()->content()) {
567 auto j = dynamic_pointer_cast<DCPContent> (i);
572 unique_ptr<DCPDecoder> decoder;
574 decoder.reset (new DCPDecoder(_film, j, false, shared_ptr<DCPDecoder>()));
579 DCPOMATIC_ASSERT (j->video_frame_rate ());
580 double const cfr = j->video_frame_rate().get();
581 Frame const trim_start = j->trim_start().frames_round (cfr);
582 Frame const trim_end = j->trim_end().frames_round (cfr);
583 int const ffr = _film->video_frame_rate ();
585 /* position in the asset from the start */
586 int64_t offset_from_start = 0;
587 /* position in the asset from the end */
588 int64_t offset_from_end = 0;
589 for (auto k: decoder->reels()) {
590 /* Assume that main picture duration is the length of the reel */
591 offset_from_end += k->main_picture()->actual_duration();
594 for (auto k: decoder->reels()) {
596 /* Assume that main picture duration is the length of the reel */
597 int64_t const reel_duration = k->main_picture()->actual_duration();
599 /* See doc/design/trim_reels.svg */
600 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
601 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
603 auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
604 if (j->reference_video ()) {
605 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
608 if (j->reference_audio ()) {
609 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
612 if (j->reference_text (TextType::OPEN_SUBTITLE)) {
613 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
616 if (j->reference_text (TextType::CLOSED_CAPTION)) {
617 for (auto l: k->closed_captions()) {
618 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
622 offset_from_start += reel_duration;
623 offset_from_end -= reel_duration;
634 boost::mutex::scoped_lock lm (_mutex);
637 /* We can't pass in this state */
638 LOG_DEBUG_PLAYER_NC ("Player is suspended");
642 if (_playback_length == DCPTime()) {
643 /* Special; just give one black frame */
644 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
648 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
650 shared_ptr<Piece> earliest_content;
651 optional<DCPTime> earliest_time;
653 for (auto i: _pieces) {
654 auto time = i->decoder_before(earliest_time);
656 earliest_time = *time;
657 earliest_content = i;
670 if (earliest_content) {
674 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
675 earliest_time = _black.position ();
679 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
680 earliest_time = _silent.position ();
687 earliest_content->pass();
688 if (!_play_referenced && earliest_content->reference_dcp_audio()) {
689 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
690 to `hide' the fact that no audio was emitted during the referenced DCP (though
691 we need to behave as though it was).
693 _last_audio_time = earliest_content->end ();
698 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
699 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
700 _black.set_position (_black.position() + one_video_frame());
704 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
705 DCPTimePeriod period (_silent.period_at_position());
706 if (_last_audio_time) {
707 /* Sometimes the thing that happened last finishes fractionally before
708 or after this silence. Bodge the start time of the silence to fix it.
709 I think this is nothing to worry about since we will just add or
710 remove a little silence at the end of some content.
712 int64_t const error = labs(period.from.get() - _last_audio_time->get());
713 /* Let's not worry about less than a frame at 24fps */
714 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
715 if (error >= too_much_error) {
716 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
718 DCPOMATIC_ASSERT (error < too_much_error);
719 period.from = *_last_audio_time;
721 if (period.duration() > one_video_frame()) {
722 period.to = period.from + one_video_frame();
725 _silent.set_position (period.to);
733 /* Emit any audio that is ready */
735 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
736 of our streams, or the position of the _silent.
738 auto pull_to = _playback_length;
739 for (auto i: _pieces) {
740 i->update_pull_to (pull_to);
742 if (!_silent.done() && _silent.position() < pull_to) {
743 pull_to = _silent.position();
746 LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
747 auto audio = _audio_merger.pull (pull_to);
748 for (auto i = audio.begin(); i != audio.end(); ++i) {
749 if (_last_audio_time && i->second < *_last_audio_time) {
750 /* This new data comes before the last we emitted (or the last seek); discard it */
751 auto cut = discard_audio (i->first, i->second, *_last_audio_time);
756 } else if (_last_audio_time && i->second > *_last_audio_time) {
757 /* There's a gap between this data and the last we emitted; fill with silence */
758 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
761 emit_audio (i->first, i->second);
766 for (auto const& i: _delay) {
767 do_emit_video(i.first, i.second);
775 /** @return Open subtitles for the frame at the given time, converted to images */
776 optional<PositionImage>
777 Player::open_subtitles_for_frame (DCPTime time) const
779 list<PositionImage> captions;
780 int const vfr = _film->video_frame_rate();
784 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
787 /* Bitmap subtitles */
788 for (auto i: j.bitmap) {
793 /* i.image will already have been scaled to fit _video_container_size */
794 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
800 lrint(_video_container_size.width * i.rectangle.x),
801 lrint(_video_container_size.height * i.rectangle.y)
807 /* String subtitles (rendered to an image) */
808 if (!j.string.empty()) {
809 auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
810 copy (s.begin(), s.end(), back_inserter (captions));
814 if (captions.empty()) {
818 return merge (captions);
823 Player::video (weak_ptr<Piece> wp, PieceVideo video)
825 auto piece = wp.lock ();
830 if (!piece->use_video()) {
834 auto frc = piece->frame_rate_change();
835 if (frc.skip && (video.frame % 2) == 1) {
839 /* Time of the first frame we will emit */
840 DCPTime const time = piece->content_video_to_dcp (video.frame);
841 LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
843 /* Discard if it's before the content's period or the last accurate seek. We can't discard
844 if it's after the content's period here as in that case we still need to fill any gap between
845 `now' and the end of the content's period.
847 if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
851 if (piece->ignore_video_at(time)) {
855 /* Fill gaps that we discover now that we have some video which needs to be emitted.
856 This is where we need to fill to.
858 DCPTime fill_to = min (time, piece->end());
860 if (_last_video_time) {
861 DCPTime fill_from = max (*_last_video_time, piece->position());
863 /* Fill if we have more than half a frame to do */
864 if ((fill_to - fill_from) > one_video_frame() / 2) {
865 auto last = _last_video.find (wp);
866 if (_film->three_d()) {
867 auto fill_to_eyes = video.eyes;
868 if (fill_to_eyes == Eyes::BOTH) {
869 fill_to_eyes = Eyes::LEFT;
871 if (fill_to == piece->end()) {
872 /* Don't fill after the end of the content */
873 fill_to_eyes = Eyes::LEFT;
876 auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
877 if (eyes == Eyes::BOTH) {
880 while (j < fill_to || eyes != fill_to_eyes) {
881 if (last != _last_video.end()) {
882 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
883 auto copy = last->second->shallow_copy();
884 copy->set_eyes (eyes);
885 emit_video (copy, j);
887 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
888 emit_video (black_player_video_frame(eyes), j);
890 if (eyes == Eyes::RIGHT) {
891 j += one_video_frame();
893 eyes = increment_eyes (eyes);
896 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
897 if (last != _last_video.end()) {
898 emit_video (last->second, j);
900 emit_video (black_player_video_frame(Eyes::BOTH), j);
907 _last_video[wp] = piece->player_video (video, _video_container_size);
910 for (int i = 0; i < frc.repeat; ++i) {
911 if (t < piece->end()) {
912 emit_video (_last_video[wp], t);
914 t += one_video_frame ();
920 Player::audio (weak_ptr<Piece> wp, PieceAudio piece_audio)
922 DCPOMATIC_ASSERT (piece_audio.audio->frames() > 0);
924 auto piece = wp.lock ();
929 int const rfr = piece->resampled_audio_frame_rate ();
931 /* Compute time in the DCP */
932 auto time = piece->resampled_audio_to_dcp (piece_audio.frame);
933 LOG_DEBUG_PLAYER("Received audio frame %1 at %2", piece_audio.frame, to_string(time));
935 /* And the end of this block in the DCP */
936 auto end = time + DCPTime::from_frames(piece_audio.audio->frames(), rfr);
938 /* Remove anything that comes before the start or after the end of the content */
939 if (time < piece->position()) {
940 auto cut = discard_audio (piece_audio.audio, time, piece->position());
942 /* This audio is entirely discarded */
945 piece_audio.audio = cut.first;
947 } else if (time > piece->end()) {
950 } else if (end > piece->end()) {
951 Frame const remaining_frames = DCPTime(piece->end() - time).frames_round(rfr);
952 if (remaining_frames == 0) {
955 piece_audio.audio = make_shared<AudioBuffers>(piece_audio.audio, remaining_frames, 0);
958 DCPOMATIC_ASSERT (piece_audio.audio->frames() > 0);
962 if (piece->audio_gain() != 0) {
963 auto gain = make_shared<AudioBuffers>(piece_audio.audio);
964 gain->apply_gain (piece->audio_gain());
965 piece_audio.audio = gain;
970 piece_audio.audio = remap (piece_audio.audio, _film->audio_channels(), piece_audio.stream->mapping());
974 if (_audio_processor) {
975 piece_audio.audio = _audio_processor->run (piece_audio.audio, _film->audio_channels ());
980 _audio_merger.push (piece_audio.audio, time);
981 piece->set_last_push_end (piece_audio.stream, time + DCPTime::from_frames(piece_audio.audio->frames(), _film->audio_frame_rate()));
986 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentBitmapText subtitle)
988 auto piece = wp.lock ();
989 auto content = wc.lock ();
990 auto text = wt.lock ();
991 if (!piece || !content || !text) {
995 /* Apply content's subtitle offsets */
996 subtitle.sub.rectangle.x += text->x_offset ();
997 subtitle.sub.rectangle.y += text->y_offset ();
999 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1000 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
1001 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
1003 /* Apply content's subtitle scale */
1004 subtitle.sub.rectangle.width *= text->x_scale ();
1005 subtitle.sub.rectangle.height *= text->y_scale ();
1008 auto image = subtitle.sub.image;
1010 /* We will scale the subtitle up to fit _video_container_size */
1011 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
1012 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
1013 if (width == 0 || height == 0) {
1017 dcp::Size scaled_size (width, height);
1018 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
1019 auto from = piece->content_time_to_dcp(content, subtitle.from());
1020 DCPOMATIC_ASSERT (from);
1022 _active_texts[static_cast<int>(text->type())].add_from (wt, ps, *from);
1027 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentStringText subtitle)
1029 auto piece = wp.lock ();
1030 auto content = wc.lock ();
1031 auto text = wt.lock ();
1032 if (!piece || !content || !text) {
1037 auto const from = piece->content_time_to_dcp(content, subtitle.from());
1038 DCPOMATIC_ASSERT (from);
1040 if (from > piece->end()) {
1044 for (auto s: subtitle.subs) {
1045 s.set_h_position (s.h_position() + text->x_offset ());
1046 s.set_v_position (s.v_position() + text->y_offset ());
1047 float const xs = text->x_scale();
1048 float const ys = text->y_scale();
1049 float size = s.size();
1051 /* Adjust size to express the common part of the scaling;
1052 e.g. if xs = ys = 0.5 we scale size by 2.
1054 if (xs > 1e-5 && ys > 1e-5) {
1055 size *= 1 / min (1 / xs, 1 / ys);
1059 /* Then express aspect ratio changes */
1060 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1061 s.set_aspect_adjust (xs / ys);
1064 s.set_in (dcp::Time(from->seconds(), 1000));
1065 ps.string.push_back (StringText (s, text->outline_width()));
1066 ps.add_fonts (text->fonts ());
1069 _active_texts[static_cast<int>(text->type())].add_from (wt, ps, *from);
1074 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentTime to)
1076 auto content = wc.lock ();
1077 auto text = wt.lock ();
1082 if (!_active_texts[static_cast<int>(text->type())].have(wt)) {
1086 shared_ptr<Piece> piece = wp.lock ();
1091 auto const dcp_to = piece->content_time_to_dcp(content, to);
1092 DCPOMATIC_ASSERT (dcp_to);
1094 if (*dcp_to > piece->end()) {
1098 auto from = _active_texts[static_cast<int>(text->type())].add_to(wt, *dcp_to);
1100 bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1101 if (text->use() && !always && !text->burn()) {
1102 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, *dcp_to));
1108 Player::seek (DCPTime time, bool accurate)
1110 boost::mutex::scoped_lock lm (_mutex);
1111 LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1114 /* We can't seek in this state */
1119 _shuffler->clear ();
1124 if (_audio_processor) {
1125 _audio_processor->flush ();
1128 _audio_merger.clear ();
1129 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1130 _active_texts[i].clear ();
1133 for (auto i: _pieces) {
1134 i->seek (time, accurate);
1138 _last_video_time = time;
1139 _last_video_eyes = Eyes::LEFT;
1140 _last_audio_time = time;
1142 _last_video_time = optional<DCPTime>();
1143 _last_video_eyes = optional<Eyes>();
1144 _last_audio_time = optional<DCPTime>();
1147 _black.set_position (time);
1148 _silent.set_position (time);
1150 _last_video.clear ();
1155 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1157 if (!_film->three_d()) {
1158 if (pv->eyes() == Eyes::LEFT) {
1159 /* Use left-eye images for both eyes... */
1160 pv->set_eyes (Eyes::BOTH);
1161 } else if (pv->eyes() == Eyes::RIGHT) {
1162 /* ...and discard the right */
1167 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1168 player before the video that requires them.
1170 _delay.push_back (make_pair (pv, time));
1172 if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1173 _last_video_time = time + one_video_frame();
1175 _last_video_eyes = increment_eyes (pv->eyes());
1177 if (_delay.size() < 3) {
1181 auto to_do = _delay.front();
1183 do_emit_video (to_do.first, to_do.second);
1188 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1190 if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1191 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1192 _active_texts[i].clear_before (time);
1196 auto subtitles = open_subtitles_for_frame (time);
1198 pv->set_text (subtitles.get ());
1206 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1208 /* Log if the assert below is about to fail */
1209 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1210 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1213 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1214 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1215 Audio (data, time, _film->audio_frame_rate());
1216 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1221 Player::fill_audio (DCPTimePeriod period)
1223 if (period.from == period.to) {
1227 DCPOMATIC_ASSERT (period.from < period.to);
1229 DCPTime t = period.from;
1230 while (t < period.to) {
1231 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1232 Frame const samples = block.frames_round(_film->audio_frame_rate());
1234 auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1235 silence->make_silent ();
1236 emit_audio (silence, t);
1244 Player::one_video_frame () const
1246 return DCPTime::from_frames (1, _film->video_frame_rate ());
1250 pair<shared_ptr<AudioBuffers>, DCPTime>
1251 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1253 auto const discard_time = discard_to - time;
1254 auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1255 auto remaining_frames = audio->frames() - discard_frames;
1256 if (remaining_frames <= 0) {
1257 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1259 auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1260 return make_pair(cut, time + discard_time);
1265 Player::set_dcp_decode_reduction (optional<int> reduction)
1267 Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1270 boost::mutex::scoped_lock lm (_mutex);
1272 if (reduction == _dcp_decode_reduction) {
1274 Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1278 _dcp_decode_reduction = reduction;
1279 setup_pieces_unlocked ();
1282 Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1286 shared_ptr<const Playlist>
1287 Player::playlist () const
1289 return _playlist ? _playlist : _film->playlist();
1294 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1296 Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);