2 Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
22 #include "atmos_decoder.h"
25 #include "audio_buffers.h"
26 #include "content_audio.h"
27 #include "dcp_content.h"
28 #include "dcpomatic_log.h"
31 #include "raw_image_proxy.h"
34 #include "render_text.h"
36 #include "content_video.h"
37 #include "player_video.h"
38 #include "frame_rate_change.h"
39 #include "audio_processor.h"
41 #include "referenced_reel_asset.h"
42 #include "decoder_factory.h"
44 #include "video_decoder.h"
45 #include "audio_decoder.h"
46 #include "text_content.h"
47 #include "text_decoder.h"
48 #include "ffmpeg_content.h"
49 #include "audio_content.h"
50 #include "dcp_decoder.h"
51 #include "image_decoder.h"
52 #include "compose.hpp"
56 #include <dcp/reel_sound_asset.h>
57 #include <dcp/reel_subtitle_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_closed_caption_asset.h>
69 using std::dynamic_pointer_cast;
72 using std::make_shared;
78 using std::shared_ptr;
81 using std::make_shared;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
87 using namespace dcpomatic;
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
98 Player::Player (shared_ptr<const Film> film)
101 , _tolerant (film->tolerant())
102 , _audio_merger (_film->audio_frame_rate())
108 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
110 , _playlist (playlist_)
112 , _tolerant (film->tolerant())
113 , _audio_merger (_film->audio_frame_rate())
122 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
123 /* The butler must hear about this first, so since we are proxying this through to the butler we must
126 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
127 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
128 set_video_container_size (_film->frame_size ());
130 film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
133 seek (DCPTime (), true);
144 Player::setup_pieces ()
146 boost::mutex::scoped_lock lm (_mutex);
147 setup_pieces_unlocked ();
152 have_video (shared_ptr<const Content> content)
154 return static_cast<bool>(content->video) && content->video->use();
159 have_audio (shared_ptr<const Content> content)
161 return static_cast<bool>(content->audio);
166 Player::setup_pieces_unlocked ()
168 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
170 auto old_pieces = _pieces;
174 _shuffler = new Shuffler();
175 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
177 for (auto i: playlist()->content()) {
179 if (!i->paths_valid ()) {
183 if (_ignore_video && _ignore_audio && i->text.empty()) {
184 /* We're only interested in text and this content has none */
188 shared_ptr<Decoder> old_decoder;
189 for (auto j: old_pieces) {
190 auto decoder = j->decoder_for(i);
192 old_decoder = decoder;
197 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
198 DCPOMATIC_ASSERT (decoder);
200 FrameRateChange frc (_film, i);
202 if (decoder->video && _ignore_video) {
203 decoder->video->set_ignore (true);
206 if (decoder->audio && _ignore_audio) {
207 decoder->audio->set_ignore (true);
211 for (auto i: decoder->text) {
212 i->set_ignore (true);
216 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
218 dcp->set_decode_referenced (_play_referenced);
219 if (_play_referenced) {
220 dcp->set_forced_reduction (_dcp_decode_reduction);
224 auto piece = make_shared<Piece>(i, decoder, frc);
225 _pieces.push_back (piece);
227 if (decoder->video) {
228 if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
229 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
230 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
232 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
236 if (decoder->audio) {
237 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
240 auto j = decoder->text.begin();
242 while (j != decoder->text.end()) {
243 (*j)->BitmapStart.connect (
244 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
246 (*j)->PlainStart.connect (
247 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
250 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
256 if (decoder->atmos) {
257 decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
261 for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
262 if ((*i)->use_video() && (*i)->video_frame_type() != VideoFrameType::THREE_D_LEFT && (*i)->video_frame_type() != VideoFrameType::THREE_D_RIGHT) {
263 /* Look for content later in the content list with in-use video that overlaps this */
264 auto period = DCPTimePeriod((*i)->position(), (*i)->end(_film));
267 for (; j != _pieces.end(); ++j) {
268 if ((*j)->use_video()) {
269 (*i)->ignore_video = DCPTimePeriod((*j)->position(), (*j)->end(_film)).overlap(period);
275 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
276 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
278 _last_video_time = boost::optional<dcpomatic::DCPTime>();
279 _last_video_eyes = Eyes::BOTH;
280 _last_audio_time = boost::optional<dcpomatic::DCPTime>();
285 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
287 boost::mutex::scoped_lock lm (_mutex);
289 for (auto i: _pieces) {
290 auto dcp = i->content_time_to_dcp(content, t);
296 /* We couldn't find this content; perhaps things are being changed over */
302 Player::playlist_content_change (ChangeType type, int property, bool frequent)
304 if (property == VideoContentProperty::CROP) {
305 if (type == ChangeType::DONE) {
306 auto const vcs = video_container_size();
307 boost::mutex::scoped_lock lm (_mutex);
308 for (auto const& i: _delay) {
309 i.first->reset_metadata (_film, vcs);
313 if (type == ChangeType::PENDING) {
314 /* The player content is probably about to change, so we can't carry on
315 until that has happened and we've rebuilt our pieces. Stop pass()
316 and seek() from working until then.
319 } else if (type == ChangeType::DONE) {
320 /* A change in our content has gone through. Re-build our pieces. */
323 } else if (type == ChangeType::CANCELLED) {
328 Change (type, property, frequent);
333 Player::set_video_container_size (dcp::Size s)
335 Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
338 boost::mutex::scoped_lock lm (_mutex);
340 if (s == _video_container_size) {
342 Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
346 _video_container_size = s;
348 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
349 _black_image->make_black ();
352 Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
357 Player::playlist_change (ChangeType type)
359 if (type == ChangeType::DONE) {
362 Change (type, PlayerProperty::PLAYLIST, false);
367 Player::film_change (ChangeType type, Film::Property p)
369 /* Here we should notice Film properties that affect our output, and
370 alert listeners that our output now would be different to how it was
371 last time we were run.
374 if (p == Film::Property::CONTAINER) {
375 Change (type, PlayerProperty::FILM_CONTAINER, false);
376 } else if (p == Film::Property::VIDEO_FRAME_RATE) {
377 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
378 so we need new pieces here.
380 if (type == ChangeType::DONE) {
383 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
384 } else if (p == Film::Property::AUDIO_PROCESSOR) {
385 if (type == ChangeType::DONE && _film->audio_processor ()) {
386 boost::mutex::scoped_lock lm (_mutex);
387 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
389 } else if (p == Film::Property::AUDIO_CHANNELS) {
390 if (type == ChangeType::DONE) {
391 boost::mutex::scoped_lock lm (_mutex);
392 _audio_merger.clear ();
398 shared_ptr<PlayerVideo>
399 Player::black_player_video_frame (Eyes eyes) const
401 return std::make_shared<PlayerVideo> (
402 std::make_shared<const RawImageProxy>(_black_image),
405 _video_container_size,
406 _video_container_size,
409 PresetColourConversion::all().front().conversion,
411 std::weak_ptr<Content>(),
412 boost::optional<Frame>(),
419 Player::get_subtitle_fonts ()
421 boost::mutex::scoped_lock lm (_mutex);
423 vector<FontData> fonts;
424 for (auto i: _pieces) {
425 /* XXX: things may go wrong if there are duplicate font IDs
426 with different font files.
428 auto f = i->decoder->fonts ();
429 copy (f.begin(), f.end(), back_inserter(fonts));
436 /** Set this player never to produce any video data */
438 Player::set_ignore_video ()
440 boost::mutex::scoped_lock lm (_mutex);
441 _ignore_video = true;
442 setup_pieces_unlocked ();
447 Player::set_ignore_audio ()
449 boost::mutex::scoped_lock lm (_mutex);
450 _ignore_audio = true;
451 setup_pieces_unlocked ();
456 Player::set_ignore_text ()
458 boost::mutex::scoped_lock lm (_mutex);
460 setup_pieces_unlocked ();
464 /** Set the player to always burn open texts into the image regardless of the content settings */
466 Player::set_always_burn_open_subtitles ()
468 boost::mutex::scoped_lock lm (_mutex);
469 _always_burn_open_subtitles = true;
473 /** Sets up the player to be faster, possibly at the expense of quality */
477 boost::mutex::scoped_lock lm (_mutex);
479 setup_pieces_unlocked ();
484 Player::set_play_referenced ()
486 boost::mutex::scoped_lock lm (_mutex);
487 _play_referenced = true;
488 setup_pieces_unlocked ();
493 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
495 DCPOMATIC_ASSERT (r);
496 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
497 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
498 if (r->actual_duration() > 0) {
500 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
506 list<ReferencedReelAsset>
507 Player::get_reel_assets ()
509 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
511 list<ReferencedReelAsset> a;
513 for (auto i: playlist()->content()) {
514 auto j = dynamic_pointer_cast<DCPContent> (i);
519 scoped_ptr<DCPDecoder> decoder;
521 decoder.reset (new DCPDecoder(_film, j, false, false, shared_ptr<DCPDecoder>()));
526 DCPOMATIC_ASSERT (j->video_frame_rate ());
527 double const cfr = j->video_frame_rate().get();
528 Frame const trim_start = j->trim_start().frames_round (cfr);
529 Frame const trim_end = j->trim_end().frames_round (cfr);
530 int const ffr = _film->video_frame_rate ();
532 /* position in the asset from the start */
533 int64_t offset_from_start = 0;
534 /* position in the asset from the end */
535 int64_t offset_from_end = 0;
536 for (auto k: decoder->reels()) {
537 /* Assume that main picture duration is the length of the reel */
538 offset_from_end += k->main_picture()->actual_duration();
541 for (auto k: decoder->reels()) {
543 /* Assume that main picture duration is the length of the reel */
544 int64_t const reel_duration = k->main_picture()->actual_duration();
546 /* See doc/design/trim_reels.svg */
547 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
548 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
550 auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
551 if (j->reference_video ()) {
552 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
555 if (j->reference_audio ()) {
556 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
559 if (j->reference_text (TextType::OPEN_SUBTITLE)) {
560 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
563 if (j->reference_text (TextType::CLOSED_CAPTION)) {
564 for (auto l: k->closed_captions()) {
565 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
569 offset_from_start += reel_duration;
570 offset_from_end -= reel_duration;
581 boost::mutex::scoped_lock lm (_mutex);
584 /* We can't pass in this state */
585 LOG_DEBUG_PLAYER_NC ("Player is suspended");
589 if (_playback_length == DCPTime()) {
590 /* Special; just give one black frame */
591 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
595 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
597 shared_ptr<Piece> earliest_content;
598 optional<DCPTime> earliest_time;
600 for (auto i: _pieces) {
601 auto time = i->decoder_before(_film, earliest_time);
603 earliest_time = *time;
604 earliest_content = i;
617 if (earliest_content) {
621 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
622 earliest_time = _black.position ();
626 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
627 earliest_time = _silent.position ();
634 earliest_content->pass();
635 if (!_play_referenced && earliest_content->reference_dcp_audio()) {
636 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
637 to `hide' the fact that no audio was emitted during the referenced DCP (though
638 we need to behave as though it was).
640 _last_audio_time = earliest_content->end (_film);
645 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
646 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
647 _black.set_position (_black.position() + one_video_frame());
651 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
652 DCPTimePeriod period (_silent.period_at_position());
653 if (_last_audio_time) {
654 /* Sometimes the thing that happened last finishes fractionally before
655 or after this silence. Bodge the start time of the silence to fix it.
656 I think this is nothing to worry about since we will just add or
657 remove a little silence at the end of some content.
659 int64_t const error = labs(period.from.get() - _last_audio_time->get());
660 /* Let's not worry about less than a frame at 24fps */
661 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
662 if (error >= too_much_error) {
663 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
665 DCPOMATIC_ASSERT (error < too_much_error);
666 period.from = *_last_audio_time;
668 if (period.duration() > one_video_frame()) {
669 period.to = period.from + one_video_frame();
672 _silent.set_position (period.to);
680 /* Emit any audio that is ready */
682 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
683 of our streams, or the position of the _silent.
685 auto pull_to = _playback_length;
686 for (auto i: _pieces) {
687 i->update_pull_to (pull_to);
689 if (!_silent.done() && _silent.position() < pull_to) {
690 pull_to = _silent.position();
693 LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
694 auto audio = _audio_merger.pull (pull_to);
695 for (auto i = audio.begin(); i != audio.end(); ++i) {
696 if (_last_audio_time && i->second < *_last_audio_time) {
697 /* This new data comes before the last we emitted (or the last seek); discard it */
698 auto cut = discard_audio (i->first, i->second, *_last_audio_time);
703 } else if (_last_audio_time && i->second > *_last_audio_time) {
704 /* There's a gap between this data and the last we emitted; fill with silence */
705 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
708 emit_audio (i->first, i->second);
713 for (auto const& i: _delay) {
714 do_emit_video(i.first, i.second);
722 /** @return Open subtitles for the frame at the given time, converted to images */
723 optional<PositionImage>
724 Player::open_subtitles_for_frame (DCPTime time) const
726 list<PositionImage> captions;
727 int const vfr = _film->video_frame_rate();
731 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
734 /* Bitmap subtitles */
735 for (auto i: j.bitmap) {
740 /* i.image will already have been scaled to fit _video_container_size */
741 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
747 lrint(_video_container_size.width * i.rectangle.x),
748 lrint(_video_container_size.height * i.rectangle.y)
754 /* String subtitles (rendered to an image) */
755 if (!j.string.empty()) {
756 auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
757 copy (s.begin(), s.end(), back_inserter (captions));
761 if (captions.empty()) {
765 return merge (captions);
770 Player::video (weak_ptr<Piece> wp, ContentVideo video)
772 auto piece = wp.lock ();
777 if (!piece->use_video()) {
781 auto frc = piece->frame_rate_change();
782 if (frc.skip && (video.frame % 2) == 1) {
786 /* Time of the first frame we will emit */
787 DCPTime const time = piece->content_video_to_dcp (video.frame);
788 LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
790 /* Discard if it's before the content's period or the last accurate seek. We can't discard
791 if it's after the content's period here as in that case we still need to fill any gap between
792 `now' and the end of the content's period.
794 if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
798 if (piece->ignore_video && piece->ignore_video->contains(time)) {
802 /* Fill gaps that we discover now that we have some video which needs to be emitted.
803 This is where we need to fill to.
805 DCPTime fill_to = min (time, piece->end(_film));
807 if (_last_video_time) {
808 DCPTime fill_from = max (*_last_video_time, piece->position());
810 /* Fill if we have more than half a frame to do */
811 if ((fill_to - fill_from) > one_video_frame() / 2) {
812 auto last = _last_video.find (wp);
813 if (_film->three_d()) {
814 auto fill_to_eyes = video.eyes;
815 if (fill_to_eyes == Eyes::BOTH) {
816 fill_to_eyes = Eyes::LEFT;
818 if (fill_to == piece->end(_film)) {
819 /* Don't fill after the end of the content */
820 fill_to_eyes = Eyes::LEFT;
823 auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
824 if (eyes == Eyes::BOTH) {
827 while (j < fill_to || eyes != fill_to_eyes) {
828 if (last != _last_video.end()) {
829 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
830 auto copy = last->second->shallow_copy();
831 copy->set_eyes (eyes);
832 emit_video (copy, j);
834 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
835 emit_video (black_player_video_frame(eyes), j);
837 if (eyes == Eyes::RIGHT) {
838 j += one_video_frame();
840 eyes = increment_eyes (eyes);
843 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
844 if (last != _last_video.end()) {
845 emit_video (last->second, j);
847 emit_video (black_player_video_frame(Eyes::BOTH), j);
854 _last_video[wp] = piece->player_video (video, _film, _video_container_size);
857 for (int i = 0; i < frc.repeat; ++i) {
858 if (t < piece->end(_film)) {
859 emit_video (_last_video[wp], t);
861 t += one_video_frame ();
867 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
869 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
871 auto piece = wp.lock ();
876 int const rfr = piece->resampled_audio_frame_rate (_film);
878 /* Compute time in the DCP */
879 auto time = piece->resampled_audio_to_dcp (content_audio.frame, _film);
880 LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
882 /* And the end of this block in the DCP */
883 auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
885 /* Remove anything that comes before the start or after the end of the content */
886 if (time < piece->position()) {
887 auto cut = discard_audio (content_audio.audio, time, piece->position());
889 /* This audio is entirely discarded */
892 content_audio.audio = cut.first;
894 } else if (time > piece->end(_film)) {
897 } else if (end > piece->end(_film)) {
898 Frame const remaining_frames = DCPTime(piece->end(_film) - time).frames_round(rfr);
899 if (remaining_frames == 0) {
902 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
905 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
909 if (piece->audio_gain() != 0) {
910 auto gain = make_shared<AudioBuffers>(content_audio.audio);
911 gain->apply_gain (piece->audio_gain());
912 content_audio.audio = gain;
917 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
921 if (_audio_processor) {
922 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
927 _audio_merger.push (content_audio.audio, time);
928 piece->set_last_push_end (stream, time + DCPTime::from_frames(content_audio.audio->frames(), _film->audio_frame_rate()));
933 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentBitmapText subtitle)
935 auto piece = wp.lock ();
936 auto content = wc.lock ();
937 auto text = wt.lock ();
938 if (!piece || !content || !text) {
942 /* Apply content's subtitle offsets */
943 subtitle.sub.rectangle.x += text->x_offset ();
944 subtitle.sub.rectangle.y += text->y_offset ();
946 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
947 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
948 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
950 /* Apply content's subtitle scale */
951 subtitle.sub.rectangle.width *= text->x_scale ();
952 subtitle.sub.rectangle.height *= text->y_scale ();
955 auto image = subtitle.sub.image;
957 /* We will scale the subtitle up to fit _video_container_size */
958 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
959 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
960 if (width == 0 || height == 0) {
964 dcp::Size scaled_size (width, height);
965 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
966 auto from = piece->content_time_to_dcp(content, subtitle.from());
967 DCPOMATIC_ASSERT (from);
969 _active_texts[static_cast<int>(text->type())].add_from (wt, ps, *from);
974 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentStringText subtitle)
976 auto piece = wp.lock ();
977 auto content = wc.lock ();
978 auto text = wt.lock ();
979 if (!piece || !content || !text) {
984 auto const from = piece->content_time_to_dcp(content, subtitle.from());
985 DCPOMATIC_ASSERT (from);
987 if (from > piece->end(_film)) {
991 for (auto s: subtitle.subs) {
992 s.set_h_position (s.h_position() + text->x_offset ());
993 s.set_v_position (s.v_position() + text->y_offset ());
994 float const xs = text->x_scale();
995 float const ys = text->y_scale();
996 float size = s.size();
998 /* Adjust size to express the common part of the scaling;
999 e.g. if xs = ys = 0.5 we scale size by 2.
1001 if (xs > 1e-5 && ys > 1e-5) {
1002 size *= 1 / min (1 / xs, 1 / ys);
1006 /* Then express aspect ratio changes */
1007 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1008 s.set_aspect_adjust (xs / ys);
1011 s.set_in (dcp::Time(from->seconds(), 1000));
1012 ps.string.push_back (StringText (s, text->outline_width()));
1013 ps.add_fonts (text->fonts ());
1016 _active_texts[static_cast<int>(text->type())].add_from (wt, ps, *from);
1021 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentTime to)
1023 auto content = wc.lock ();
1024 auto text = wt.lock ();
1029 if (!_active_texts[static_cast<int>(text->type())].have(wt)) {
1033 shared_ptr<Piece> piece = wp.lock ();
1038 auto const dcp_to = piece->content_time_to_dcp(content, to);
1039 DCPOMATIC_ASSERT (dcp_to);
1041 if (*dcp_to > piece->end(_film)) {
1045 auto from = _active_texts[static_cast<int>(text->type())].add_to(wt, *dcp_to);
1047 bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1048 if (text->use() && !always && !text->burn()) {
1049 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, *dcp_to));
1055 Player::seek (DCPTime time, bool accurate)
1057 boost::mutex::scoped_lock lm (_mutex);
1058 LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1061 /* We can't seek in this state */
1066 _shuffler->clear ();
1071 if (_audio_processor) {
1072 _audio_processor->flush ();
1075 _audio_merger.clear ();
1076 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1077 _active_texts[i].clear ();
1080 for (auto i: _pieces) {
1081 i->seek (_film, time, accurate);
1085 _last_video_time = time;
1086 _last_video_eyes = Eyes::LEFT;
1087 _last_audio_time = time;
1089 _last_video_time = optional<DCPTime>();
1090 _last_video_eyes = optional<Eyes>();
1091 _last_audio_time = optional<DCPTime>();
1094 _black.set_position (time);
1095 _silent.set_position (time);
1097 _last_video.clear ();
1102 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1104 if (!_film->three_d()) {
1105 if (pv->eyes() == Eyes::LEFT) {
1106 /* Use left-eye images for both eyes... */
1107 pv->set_eyes (Eyes::BOTH);
1108 } else if (pv->eyes() == Eyes::RIGHT) {
1109 /* ...and discard the right */
1114 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1115 player before the video that requires them.
1117 _delay.push_back (make_pair (pv, time));
1119 if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1120 _last_video_time = time + one_video_frame();
1122 _last_video_eyes = increment_eyes (pv->eyes());
1124 if (_delay.size() < 3) {
1128 auto to_do = _delay.front();
1130 do_emit_video (to_do.first, to_do.second);
1135 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1137 if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1138 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1139 _active_texts[i].clear_before (time);
1143 auto subtitles = open_subtitles_for_frame (time);
1145 pv->set_text (subtitles.get ());
1153 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1155 /* Log if the assert below is about to fail */
1156 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1157 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1160 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1161 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1162 Audio (data, time, _film->audio_frame_rate());
1163 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1168 Player::fill_audio (DCPTimePeriod period)
1170 if (period.from == period.to) {
1174 DCPOMATIC_ASSERT (period.from < period.to);
1176 DCPTime t = period.from;
1177 while (t < period.to) {
1178 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1179 Frame const samples = block.frames_round(_film->audio_frame_rate());
1181 auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1182 silence->make_silent ();
1183 emit_audio (silence, t);
1191 Player::one_video_frame () const
1193 return DCPTime::from_frames (1, _film->video_frame_rate ());
1197 pair<shared_ptr<AudioBuffers>, DCPTime>
1198 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1200 auto const discard_time = discard_to - time;
1201 auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1202 auto remaining_frames = audio->frames() - discard_frames;
1203 if (remaining_frames <= 0) {
1204 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1206 auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1207 return make_pair(cut, time + discard_time);
1212 Player::set_dcp_decode_reduction (optional<int> reduction)
1214 Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1217 boost::mutex::scoped_lock lm (_mutex);
1219 if (reduction == _dcp_decode_reduction) {
1221 Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1225 _dcp_decode_reduction = reduction;
1226 setup_pieces_unlocked ();
1229 Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1233 shared_ptr<const Playlist>
1234 Player::playlist () const
1236 return _playlist ? _playlist : _film->playlist();
1241 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1243 Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);