2 Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
22 #include "atmos_decoder.h"
25 #include "audio_buffers.h"
26 #include "content_audio.h"
27 #include "dcp_content.h"
28 #include "dcpomatic_log.h"
31 #include "raw_image_proxy.h"
34 #include "render_text.h"
36 #include "content_video.h"
37 #include "player_video.h"
38 #include "frame_rate_change.h"
39 #include "audio_processor.h"
41 #include "referenced_reel_asset.h"
42 #include "decoder_factory.h"
44 #include "video_decoder.h"
45 #include "audio_decoder.h"
46 #include "text_content.h"
47 #include "text_decoder.h"
48 #include "ffmpeg_content.h"
49 #include "audio_content.h"
50 #include "dcp_decoder.h"
51 #include "image_decoder.h"
52 #include "compose.hpp"
56 #include <dcp/reel_sound_asset.h>
57 #include <dcp/reel_subtitle_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_closed_caption_asset.h>
69 using std::dynamic_pointer_cast;
72 using std::make_shared;
78 using std::shared_ptr;
81 using std::make_shared;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
87 using namespace dcpomatic;
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
98 Player::Player (shared_ptr<const Film> film)
101 , _tolerant (film->tolerant())
102 , _audio_merger (_film->audio_frame_rate())
108 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
110 , _playlist (playlist_)
112 , _tolerant (film->tolerant())
113 , _audio_merger (_film->audio_frame_rate())
122 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
123 /* The butler must hear about this first, so since we are proxying this through to the butler we must
126 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
127 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
128 set_video_container_size (_film->frame_size ());
130 film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
133 seek (DCPTime (), true);
144 Player::setup_pieces ()
146 boost::mutex::scoped_lock lm (_mutex);
147 setup_pieces_unlocked ();
152 have_video (shared_ptr<const Content> content)
154 return static_cast<bool>(content->video) && content->video->use();
159 have_audio (shared_ptr<const Content> content)
161 return static_cast<bool>(content->audio);
166 Player::setup_pieces_unlocked ()
168 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
170 auto old_pieces = _pieces;
174 _shuffler = new Shuffler();
175 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
177 for (auto i: playlist()->content()) {
179 if (!i->paths_valid ()) {
183 if (_ignore_video && _ignore_audio && i->text.empty()) {
184 /* We're only interested in text and this content has none */
188 shared_ptr<Decoder> old_decoder;
189 for (auto j: old_pieces) {
190 if (j->content == i) {
191 old_decoder = j->decoder;
196 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
197 DCPOMATIC_ASSERT (decoder);
199 FrameRateChange frc (_film, i);
201 if (decoder->video && _ignore_video) {
202 decoder->video->set_ignore (true);
205 if (decoder->audio && _ignore_audio) {
206 decoder->audio->set_ignore (true);
210 for (auto i: decoder->text) {
211 i->set_ignore (true);
215 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
217 dcp->set_decode_referenced (_play_referenced);
218 if (_play_referenced) {
219 dcp->set_forced_reduction (_dcp_decode_reduction);
223 auto piece = make_shared<Piece>(i, decoder, frc);
224 _pieces.push_back (piece);
226 if (decoder->video) {
227 if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
228 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
229 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
231 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
235 if (decoder->audio) {
236 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
239 auto j = decoder->text.begin();
241 while (j != decoder->text.end()) {
242 (*j)->BitmapStart.connect (
243 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
245 (*j)->PlainStart.connect (
246 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
249 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
255 if (decoder->atmos) {
256 decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
260 _stream_states.clear ();
261 for (auto i: _pieces) {
262 if (i->content->audio) {
263 for (auto j: i->content->audio->streams()) {
264 _stream_states[j] = StreamState (i, i->content->position ());
269 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
270 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
272 _last_video_time = {};
273 _last_video_eyes = Eyes::BOTH;
274 _last_audio_time = {};
279 Player::playlist_content_change (ChangeType type, int property, bool frequent)
281 if (property == VideoContentProperty::CROP) {
282 if (type == ChangeType::DONE) {
283 auto const vcs = video_container_size();
284 boost::mutex::scoped_lock lm (_mutex);
285 for (auto const& i: _delay) {
286 i.first->reset_metadata (_film, vcs);
290 if (type == ChangeType::PENDING) {
291 /* The player content is probably about to change, so we can't carry on
292 until that has happened and we've rebuilt our pieces. Stop pass()
293 and seek() from working until then.
296 } else if (type == ChangeType::DONE) {
297 /* A change in our content has gone through. Re-build our pieces. */
300 } else if (type == ChangeType::CANCELLED) {
305 Change (type, property, frequent);
310 Player::set_video_container_size (dcp::Size s)
312 Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
315 boost::mutex::scoped_lock lm (_mutex);
317 if (s == _video_container_size) {
319 Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
323 _video_container_size = s;
325 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
326 _black_image->make_black ();
329 Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
334 Player::playlist_change (ChangeType type)
336 if (type == ChangeType::DONE) {
339 Change (type, PlayerProperty::PLAYLIST, false);
344 Player::film_change (ChangeType type, Film::Property p)
346 /* Here we should notice Film properties that affect our output, and
347 alert listeners that our output now would be different to how it was
348 last time we were run.
351 if (p == Film::Property::CONTAINER) {
352 Change (type, PlayerProperty::FILM_CONTAINER, false);
353 } else if (p == Film::Property::VIDEO_FRAME_RATE) {
354 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
355 so we need new pieces here.
357 if (type == ChangeType::DONE) {
360 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
361 } else if (p == Film::Property::AUDIO_PROCESSOR) {
362 if (type == ChangeType::DONE && _film->audio_processor ()) {
363 boost::mutex::scoped_lock lm (_mutex);
364 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
366 } else if (p == Film::Property::AUDIO_CHANNELS) {
367 if (type == ChangeType::DONE) {
368 boost::mutex::scoped_lock lm (_mutex);
369 _audio_merger.clear ();
375 shared_ptr<PlayerVideo>
376 Player::black_player_video_frame (Eyes eyes) const
378 return std::make_shared<PlayerVideo> (
379 std::make_shared<const RawImageProxy>(_black_image),
382 _video_container_size,
383 _video_container_size,
386 PresetColourConversion::all().front().conversion,
388 std::weak_ptr<Content>(),
389 boost::optional<Frame>(),
396 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
398 auto s = t - piece->content->position ();
399 s = min (piece->content->length_after_trim(_film), s);
400 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
402 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
403 then convert that ContentTime to frames at the content's rate. However this fails for
404 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
405 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
407 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
409 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
414 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
416 /* See comment in dcp_to_content_video */
417 auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
418 return d + piece->content->position();
423 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
425 auto s = t - piece->content->position ();
426 s = min (piece->content->length_after_trim(_film), s);
427 /* See notes in dcp_to_content_video */
428 return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
433 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
435 /* See comment in dcp_to_content_video */
436 return DCPTime::from_frames (f, _film->audio_frame_rate())
437 - DCPTime (piece->content->trim_start(), piece->frc)
438 + piece->content->position();
443 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
445 auto s = t - piece->content->position ();
446 s = min (piece->content->length_after_trim(_film), s);
447 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
452 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
454 return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
459 Player::get_subtitle_fonts ()
461 boost::mutex::scoped_lock lm (_mutex);
463 vector<FontData> fonts;
464 for (auto i: _pieces) {
465 /* XXX: things may go wrong if there are duplicate font IDs
466 with different font files.
468 auto f = i->decoder->fonts ();
469 copy (f.begin(), f.end(), back_inserter(fonts));
476 /** Set this player never to produce any video data */
478 Player::set_ignore_video ()
480 boost::mutex::scoped_lock lm (_mutex);
481 _ignore_video = true;
482 setup_pieces_unlocked ();
487 Player::set_ignore_audio ()
489 boost::mutex::scoped_lock lm (_mutex);
490 _ignore_audio = true;
491 setup_pieces_unlocked ();
496 Player::set_ignore_text ()
498 boost::mutex::scoped_lock lm (_mutex);
500 setup_pieces_unlocked ();
504 /** Set the player to always burn open texts into the image regardless of the content settings */
506 Player::set_always_burn_open_subtitles ()
508 boost::mutex::scoped_lock lm (_mutex);
509 _always_burn_open_subtitles = true;
513 /** Sets up the player to be faster, possibly at the expense of quality */
517 boost::mutex::scoped_lock lm (_mutex);
519 setup_pieces_unlocked ();
524 Player::set_play_referenced ()
526 boost::mutex::scoped_lock lm (_mutex);
527 _play_referenced = true;
528 setup_pieces_unlocked ();
533 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
535 DCPOMATIC_ASSERT (r);
536 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
537 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
538 if (r->actual_duration() > 0) {
540 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
546 list<ReferencedReelAsset>
547 Player::get_reel_assets ()
549 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
551 list<ReferencedReelAsset> a;
553 for (auto i: playlist()->content()) {
554 auto j = dynamic_pointer_cast<DCPContent> (i);
559 scoped_ptr<DCPDecoder> decoder;
561 decoder.reset (new DCPDecoder(_film, j, false, false, shared_ptr<DCPDecoder>()));
566 DCPOMATIC_ASSERT (j->video_frame_rate ());
567 double const cfr = j->video_frame_rate().get();
568 Frame const trim_start = j->trim_start().frames_round (cfr);
569 Frame const trim_end = j->trim_end().frames_round (cfr);
570 int const ffr = _film->video_frame_rate ();
572 /* position in the asset from the start */
573 int64_t offset_from_start = 0;
574 /* position in the asset from the end */
575 int64_t offset_from_end = 0;
576 for (auto k: decoder->reels()) {
577 /* Assume that main picture duration is the length of the reel */
578 offset_from_end += k->main_picture()->actual_duration();
581 for (auto k: decoder->reels()) {
583 /* Assume that main picture duration is the length of the reel */
584 int64_t const reel_duration = k->main_picture()->actual_duration();
586 /* See doc/design/trim_reels.svg */
587 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
588 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
590 auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
591 if (j->reference_video ()) {
592 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
595 if (j->reference_audio ()) {
596 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
599 if (j->reference_text (TextType::OPEN_SUBTITLE)) {
600 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
603 if (j->reference_text (TextType::CLOSED_CAPTION)) {
604 for (auto l: k->closed_captions()) {
605 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
609 offset_from_start += reel_duration;
610 offset_from_end -= reel_duration;
621 boost::mutex::scoped_lock lm (_mutex);
624 /* We can't pass in this state */
625 LOG_DEBUG_PLAYER_NC ("Player is suspended");
629 if (_playback_length == DCPTime()) {
630 /* Special; just give one black frame */
631 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
635 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
637 shared_ptr<Piece> earliest_content;
638 optional<DCPTime> earliest_time;
640 for (auto i: _pieces) {
645 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
646 if (t > i->content->end(_film)) {
650 /* Given two choices at the same time, pick the one with texts so we see it before
653 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
655 earliest_content = i;
669 if (earliest_content) {
673 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
674 earliest_time = _black.position ();
678 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
679 earliest_time = _silent.position ();
686 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
687 earliest_content->done = earliest_content->decoder->pass ();
688 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
689 if (dcp && !_play_referenced && dcp->reference_audio()) {
690 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
691 to `hide' the fact that no audio was emitted during the referenced DCP (though
692 we need to behave as though it was).
694 _last_audio_time = dcp->end (_film);
699 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
700 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
701 _black.set_position (_black.position() + one_video_frame());
705 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
706 DCPTimePeriod period (_silent.period_at_position());
707 if (_last_audio_time) {
708 /* Sometimes the thing that happened last finishes fractionally before
709 or after this silence. Bodge the start time of the silence to fix it.
710 I think this is nothing to worry about since we will just add or
711 remove a little silence at the end of some content.
713 int64_t const error = labs(period.from.get() - _last_audio_time->get());
714 /* Let's not worry about less than a frame at 24fps */
715 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
716 if (error >= too_much_error) {
717 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
719 DCPOMATIC_ASSERT (error < too_much_error);
720 period.from = *_last_audio_time;
722 if (period.duration() > one_video_frame()) {
723 period.to = period.from + one_video_frame();
726 _silent.set_position (period.to);
734 /* Emit any audio that is ready */
736 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
737 of our streams, or the position of the _silent.
739 auto pull_to = _playback_length;
740 for (auto const& i: _stream_states) {
741 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
742 pull_to = i.second.last_push_end;
745 if (!_silent.done() && _silent.position() < pull_to) {
746 pull_to = _silent.position();
749 LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
750 auto audio = _audio_merger.pull (pull_to);
751 for (auto i = audio.begin(); i != audio.end(); ++i) {
752 if (_last_audio_time && i->second < *_last_audio_time) {
753 /* This new data comes before the last we emitted (or the last seek); discard it */
754 auto cut = discard_audio (i->first, i->second, *_last_audio_time);
759 } else if (_last_audio_time && i->second > *_last_audio_time) {
760 /* There's a gap between this data and the last we emitted; fill with silence */
761 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
764 emit_audio (i->first, i->second);
769 for (auto const& i: _delay) {
770 do_emit_video(i.first, i.second);
778 /** @return Open subtitles for the frame at the given time, converted to images */
779 optional<PositionImage>
780 Player::open_subtitles_for_frame (DCPTime time) const
782 list<PositionImage> captions;
783 int const vfr = _film->video_frame_rate();
787 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
790 /* Bitmap subtitles */
791 for (auto i: j.bitmap) {
796 /* i.image will already have been scaled to fit _video_container_size */
797 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
803 lrint(_video_container_size.width * i.rectangle.x),
804 lrint(_video_container_size.height * i.rectangle.y)
810 /* String subtitles (rendered to an image) */
811 if (!j.string.empty()) {
812 auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
813 copy (s.begin(), s.end(), back_inserter (captions));
817 if (captions.empty()) {
821 return merge (captions);
826 Player::video (weak_ptr<Piece> wp, ContentVideo video)
828 auto piece = wp.lock ();
833 if (!piece->content->video->use()) {
837 FrameRateChange frc (_film, piece->content);
838 if (frc.skip && (video.frame % 2) == 1) {
842 /* Time of the first frame we will emit */
843 DCPTime const time = content_video_to_dcp (piece, video.frame);
844 LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
846 /* Discard if it's before the content's period or the last accurate seek. We can't discard
847 if it's after the content's period here as in that case we still need to fill any gap between
848 `now' and the end of the content's period.
850 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
854 /* Fill gaps that we discover now that we have some video which needs to be emitted.
855 This is where we need to fill to.
857 DCPTime fill_to = min (time, piece->content->end(_film));
859 if (_last_video_time) {
860 DCPTime fill_from = max (*_last_video_time, piece->content->position());
862 /* Fill if we have more than half a frame to do */
863 if ((fill_to - fill_from) > one_video_frame() / 2) {
864 auto last = _last_video.find (wp);
865 if (_film->three_d()) {
866 auto fill_to_eyes = video.eyes;
867 if (fill_to_eyes == Eyes::BOTH) {
868 fill_to_eyes = Eyes::LEFT;
870 if (fill_to == piece->content->end(_film)) {
871 /* Don't fill after the end of the content */
872 fill_to_eyes = Eyes::LEFT;
875 auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
876 if (eyes == Eyes::BOTH) {
879 while (j < fill_to || eyes != fill_to_eyes) {
880 if (last != _last_video.end()) {
881 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
882 auto copy = last->second->shallow_copy();
883 copy->set_eyes (eyes);
884 emit_video (copy, j);
886 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
887 emit_video (black_player_video_frame(eyes), j);
889 if (eyes == Eyes::RIGHT) {
890 j += one_video_frame();
892 eyes = increment_eyes (eyes);
895 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
896 if (last != _last_video.end()) {
897 emit_video (last->second, j);
899 emit_video (black_player_video_frame(Eyes::BOTH), j);
906 _last_video[wp] = std::make_shared<PlayerVideo>(
908 piece->content->video->crop (),
909 piece->content->video->fade (_film, video.frame),
910 scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
911 _video_container_size,
914 piece->content->video->colour_conversion(),
915 piece->content->video->range(),
922 for (int i = 0; i < frc.repeat; ++i) {
923 if (t < piece->content->end(_film)) {
924 emit_video (_last_video[wp], t);
926 t += one_video_frame ();
932 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
934 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
936 auto piece = wp.lock ();
941 auto content = piece->content->audio;
942 DCPOMATIC_ASSERT (content);
944 int const rfr = content->resampled_frame_rate (_film);
946 /* Compute time in the DCP */
947 auto time = resampled_audio_to_dcp (piece, content_audio.frame);
948 LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
950 /* And the end of this block in the DCP */
951 auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
953 /* Remove anything that comes before the start or after the end of the content */
954 if (time < piece->content->position()) {
955 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
957 /* This audio is entirely discarded */
960 content_audio.audio = cut.first;
962 } else if (time > piece->content->end(_film)) {
965 } else if (end > piece->content->end(_film)) {
966 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
967 if (remaining_frames == 0) {
970 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
973 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
977 if (content->gain() != 0) {
978 auto gain = make_shared<AudioBuffers>(content_audio.audio);
979 gain->apply_gain (content->gain());
980 content_audio.audio = gain;
985 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
989 if (_audio_processor) {
990 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
995 _audio_merger.push (content_audio.audio, time);
996 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
997 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
1002 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
1004 auto piece = wp.lock ();
1005 auto text = wc.lock ();
1006 if (!piece || !text) {
1010 /* Apply content's subtitle offsets */
1011 subtitle.sub.rectangle.x += text->x_offset ();
1012 subtitle.sub.rectangle.y += text->y_offset ();
1014 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1015 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
1016 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
1018 /* Apply content's subtitle scale */
1019 subtitle.sub.rectangle.width *= text->x_scale ();
1020 subtitle.sub.rectangle.height *= text->y_scale ();
1023 auto image = subtitle.sub.image;
1025 /* We will scale the subtitle up to fit _video_container_size */
1026 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
1027 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
1028 if (width == 0 || height == 0) {
1032 dcp::Size scaled_size (width, height);
1033 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
1034 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1036 _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1041 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1043 auto piece = wp.lock ();
1044 auto text = wc.lock ();
1045 if (!piece || !text) {
1050 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1052 if (from > piece->content->end(_film)) {
1056 for (auto s: subtitle.subs) {
1057 s.set_h_position (s.h_position() + text->x_offset ());
1058 s.set_v_position (s.v_position() + text->y_offset ());
1059 float const xs = text->x_scale();
1060 float const ys = text->y_scale();
1061 float size = s.size();
1063 /* Adjust size to express the common part of the scaling;
1064 e.g. if xs = ys = 0.5 we scale size by 2.
1066 if (xs > 1e-5 && ys > 1e-5) {
1067 size *= 1 / min (1 / xs, 1 / ys);
1071 /* Then express aspect ratio changes */
1072 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1073 s.set_aspect_adjust (xs / ys);
1076 s.set_in (dcp::Time(from.seconds(), 1000));
1077 ps.string.push_back (StringText (s, text->outline_width()));
1078 ps.add_fonts (text->fonts ());
1081 _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1086 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1088 auto text = wc.lock ();
1093 if (!_active_texts[static_cast<int>(text->type())].have(wc)) {
1097 shared_ptr<Piece> piece = wp.lock ();
1102 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1104 if (dcp_to > piece->content->end(_film)) {
1108 auto from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
1110 bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1111 if (text->use() && !always && !text->burn()) {
1112 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1118 Player::seek (DCPTime time, bool accurate)
1120 boost::mutex::scoped_lock lm (_mutex);
1121 LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1124 /* We can't seek in this state */
1129 _shuffler->clear ();
1134 if (_audio_processor) {
1135 _audio_processor->flush ();
1138 _audio_merger.clear ();
1139 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1140 _active_texts[i].clear ();
1143 for (auto i: _pieces) {
1144 if (time < i->content->position()) {
1145 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1146 we must seek this (following) content accurately, otherwise when we come to the end of the current
1147 content we may not start right at the beginning of the next, causing a gap (if the next content has
1148 been trimmed to a point between keyframes, or something).
1150 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1152 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1153 /* During; seek to position */
1154 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1157 /* After; this piece is done */
1163 _last_video_time = time;
1164 _last_video_eyes = Eyes::LEFT;
1165 _last_audio_time = time;
1167 _last_video_time = optional<DCPTime>();
1168 _last_video_eyes = optional<Eyes>();
1169 _last_audio_time = optional<DCPTime>();
1172 _black.set_position (time);
1173 _silent.set_position (time);
1175 _last_video.clear ();
1180 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1182 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1183 player before the video that requires them.
1185 _delay.push_back (make_pair (pv, time));
1187 if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1188 _last_video_time = time + one_video_frame();
1190 _last_video_eyes = increment_eyes (pv->eyes());
1192 if (_delay.size() < 3) {
1196 auto to_do = _delay.front();
1198 do_emit_video (to_do.first, to_do.second);
1203 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1205 if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1206 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1207 _active_texts[i].clear_before (time);
1211 auto subtitles = open_subtitles_for_frame (time);
1213 pv->set_text (subtitles.get ());
1221 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1223 /* Log if the assert below is about to fail */
1224 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1225 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1228 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1229 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1230 Audio (data, time, _film->audio_frame_rate());
1231 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1236 Player::fill_audio (DCPTimePeriod period)
1238 if (period.from == period.to) {
1242 DCPOMATIC_ASSERT (period.from < period.to);
1244 DCPTime t = period.from;
1245 while (t < period.to) {
1246 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1247 Frame const samples = block.frames_round(_film->audio_frame_rate());
1249 auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1250 silence->make_silent ();
1251 emit_audio (silence, t);
1259 Player::one_video_frame () const
1261 return DCPTime::from_frames (1, _film->video_frame_rate ());
1265 pair<shared_ptr<AudioBuffers>, DCPTime>
1266 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1268 auto const discard_time = discard_to - time;
1269 auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1270 auto remaining_frames = audio->frames() - discard_frames;
1271 if (remaining_frames <= 0) {
1272 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1274 auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1275 return make_pair(cut, time + discard_time);
1280 Player::set_dcp_decode_reduction (optional<int> reduction)
1282 Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1285 boost::mutex::scoped_lock lm (_mutex);
1287 if (reduction == _dcp_decode_reduction) {
1289 Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1293 _dcp_decode_reduction = reduction;
1294 setup_pieces_unlocked ();
1297 Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1302 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1304 boost::mutex::scoped_lock lm (_mutex);
1306 for (auto i: _pieces) {
1307 if (i->content == content) {
1308 return content_time_to_dcp (i, t);
1312 /* We couldn't find this content; perhaps things are being changed over */
1317 shared_ptr<const Playlist>
1318 Player::playlist () const
1320 return _playlist ? _playlist : _film->playlist();
1325 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1327 Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);