2 Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
21 #include "atmos_decoder.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
30 #include "raw_image_proxy.h"
33 #include "render_text.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <boost/foreach.hpp>
76 using boost::shared_ptr;
77 using boost::weak_ptr;
78 using boost::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
81 #if BOOST_VERSION >= 106100
82 using namespace boost::placeholders;
84 using namespace dcpomatic;
86 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
87 int const PlayerProperty::PLAYLIST = 701;
88 int const PlayerProperty::FILM_CONTAINER = 702;
89 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
90 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
91 int const PlayerProperty::PLAYBACK_LENGTH = 705;
93 Player::Player (shared_ptr<const Film> film)
96 , _ignore_video (false)
97 , _ignore_audio (false)
98 , _ignore_text (false)
99 , _always_burn_open_subtitles (false)
101 , _tolerant (film->tolerant())
102 , _play_referenced (false)
103 , _audio_merger (_film->audio_frame_rate())
109 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
111 , _playlist (playlist_)
113 , _ignore_video (false)
114 , _ignore_audio (false)
115 , _ignore_text (false)
116 , _always_burn_open_subtitles (false)
118 , _tolerant (film->tolerant())
119 , _play_referenced (false)
120 , _audio_merger (_film->audio_frame_rate())
129 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
130 /* The butler must hear about this first, so since we are proxying this through to the butler we must
133 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
134 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
135 set_video_container_size (_film->frame_size ());
137 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
140 seek (DCPTime (), true);
149 Player::setup_pieces ()
151 boost::mutex::scoped_lock lm (_mutex);
152 setup_pieces_unlocked ();
157 have_video (shared_ptr<const Content> content)
159 return static_cast<bool>(content->video) && content->video->use();
163 have_audio (shared_ptr<const Content> content)
165 return static_cast<bool>(content->audio);
169 Player::setup_pieces_unlocked ()
171 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
173 list<shared_ptr<Piece> > old_pieces = _pieces;
177 _shuffler = new Shuffler();
178 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
180 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
182 if (!i->paths_valid ()) {
186 if (_ignore_video && _ignore_audio && i->text.empty()) {
187 /* We're only interested in text and this content has none */
191 shared_ptr<Decoder> old_decoder;
192 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
193 if (j->content == i) {
194 old_decoder = j->decoder;
199 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
200 DCPOMATIC_ASSERT (decoder);
202 FrameRateChange frc (_film, i);
204 if (decoder->video && _ignore_video) {
205 decoder->video->set_ignore (true);
208 if (decoder->audio && _ignore_audio) {
209 decoder->audio->set_ignore (true);
213 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
214 i->set_ignore (true);
218 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
220 dcp->set_decode_referenced (_play_referenced);
221 if (_play_referenced) {
222 dcp->set_forced_reduction (_dcp_decode_reduction);
226 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
227 _pieces.push_back (piece);
229 if (decoder->video) {
230 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
231 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
232 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
234 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
238 if (decoder->audio) {
239 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
242 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
244 while (j != decoder->text.end()) {
245 (*j)->BitmapStart.connect (
246 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
248 (*j)->PlainStart.connect (
249 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
252 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
258 if (decoder->atmos) {
259 decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
263 _stream_states.clear ();
264 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
265 if (i->content->audio) {
266 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
267 _stream_states[j] = StreamState (i, i->content->position ());
272 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
273 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
275 _last_video_time = DCPTime ();
276 _last_video_eyes = EYES_BOTH;
277 _last_audio_time = DCPTime ();
281 Player::playlist_content_change (ChangeType type, int property, bool frequent)
283 if (property == VideoContentProperty::CROP) {
284 if (type == CHANGE_TYPE_DONE) {
285 dcp::Size const vcs = video_container_size();
286 boost::mutex::scoped_lock lm (_mutex);
287 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
288 i->first->reset_metadata (_film, vcs);
292 if (type == CHANGE_TYPE_PENDING) {
293 /* The player content is probably about to change, so we can't carry on
294 until that has happened and we've rebuilt our pieces. Stop pass()
295 and seek() from working until then.
298 } else if (type == CHANGE_TYPE_DONE) {
299 /* A change in our content has gone through. Re-build our pieces. */
302 } else if (type == CHANGE_TYPE_CANCELLED) {
307 Change (type, property, frequent);
311 Player::set_video_container_size (dcp::Size s)
313 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
316 boost::mutex::scoped_lock lm (_mutex);
318 if (s == _video_container_size) {
320 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
324 _video_container_size = s;
326 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
327 _black_image->make_black ();
330 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
334 Player::playlist_change (ChangeType type)
336 if (type == CHANGE_TYPE_DONE) {
339 Change (type, PlayerProperty::PLAYLIST, false);
343 Player::film_change (ChangeType type, Film::Property p)
345 /* Here we should notice Film properties that affect our output, and
346 alert listeners that our output now would be different to how it was
347 last time we were run.
350 if (p == Film::CONTAINER) {
351 Change (type, PlayerProperty::FILM_CONTAINER, false);
352 } else if (p == Film::VIDEO_FRAME_RATE) {
353 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
354 so we need new pieces here.
356 if (type == CHANGE_TYPE_DONE) {
359 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
360 } else if (p == Film::AUDIO_PROCESSOR) {
361 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
362 boost::mutex::scoped_lock lm (_mutex);
363 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
365 } else if (p == Film::AUDIO_CHANNELS) {
366 if (type == CHANGE_TYPE_DONE) {
367 boost::mutex::scoped_lock lm (_mutex);
368 _audio_merger.clear ();
373 shared_ptr<PlayerVideo>
374 Player::black_player_video_frame (Eyes eyes) const
376 return shared_ptr<PlayerVideo> (
378 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
381 _video_container_size,
382 _video_container_size,
385 PresetColourConversion::all().front().conversion,
387 boost::weak_ptr<Content>(),
388 boost::optional<Frame>(),
395 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
397 DCPTime s = t - piece->content->position ();
398 s = min (piece->content->length_after_trim(_film), s);
399 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
401 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
402 then convert that ContentTime to frames at the content's rate. However this fails for
403 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
404 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
406 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
408 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
412 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
414 /* See comment in dcp_to_content_video */
415 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
416 return d + piece->content->position();
420 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
422 DCPTime s = t - piece->content->position ();
423 s = min (piece->content->length_after_trim(_film), s);
424 /* See notes in dcp_to_content_video */
425 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
429 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
431 /* See comment in dcp_to_content_video */
432 return DCPTime::from_frames (f, _film->audio_frame_rate())
433 - DCPTime (piece->content->trim_start(), piece->frc)
434 + piece->content->position();
438 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
440 DCPTime s = t - piece->content->position ();
441 s = min (piece->content->length_after_trim(_film), s);
442 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
446 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
448 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
451 list<shared_ptr<Font> >
452 Player::get_subtitle_fonts ()
454 boost::mutex::scoped_lock lm (_mutex);
456 list<shared_ptr<Font> > fonts;
457 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
458 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
459 /* XXX: things may go wrong if there are duplicate font IDs
460 with different font files.
462 list<shared_ptr<Font> > f = j->fonts ();
463 copy (f.begin(), f.end(), back_inserter (fonts));
470 /** Set this player never to produce any video data */
472 Player::set_ignore_video ()
474 boost::mutex::scoped_lock lm (_mutex);
475 _ignore_video = true;
476 setup_pieces_unlocked ();
480 Player::set_ignore_audio ()
482 boost::mutex::scoped_lock lm (_mutex);
483 _ignore_audio = true;
484 setup_pieces_unlocked ();
488 Player::set_ignore_text ()
490 boost::mutex::scoped_lock lm (_mutex);
492 setup_pieces_unlocked ();
495 /** Set the player to always burn open texts into the image regardless of the content settings */
497 Player::set_always_burn_open_subtitles ()
499 boost::mutex::scoped_lock lm (_mutex);
500 _always_burn_open_subtitles = true;
503 /** Sets up the player to be faster, possibly at the expense of quality */
507 boost::mutex::scoped_lock lm (_mutex);
509 setup_pieces_unlocked ();
513 Player::set_play_referenced ()
515 boost::mutex::scoped_lock lm (_mutex);
516 _play_referenced = true;
517 setup_pieces_unlocked ();
521 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
523 DCPOMATIC_ASSERT (r);
524 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
525 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
526 if (r->actual_duration() > 0) {
528 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
533 list<ReferencedReelAsset>
534 Player::get_reel_assets ()
536 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
538 list<ReferencedReelAsset> a;
540 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
541 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
546 scoped_ptr<DCPDecoder> decoder;
548 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
553 DCPOMATIC_ASSERT (j->video_frame_rate ());
554 double const cfr = j->video_frame_rate().get();
555 Frame const trim_start = j->trim_start().frames_round (cfr);
556 Frame const trim_end = j->trim_end().frames_round (cfr);
557 int const ffr = _film->video_frame_rate ();
559 /* position in the asset from the start */
560 int64_t offset_from_start = 0;
561 /* position in the asset from the end */
562 int64_t offset_from_end = 0;
563 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
564 /* Assume that main picture duration is the length of the reel */
565 offset_from_end += k->main_picture()->actual_duration();
568 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
570 /* Assume that main picture duration is the length of the reel */
571 int64_t const reel_duration = k->main_picture()->actual_duration();
573 /* See doc/design/trim_reels.svg */
574 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
575 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
577 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
578 if (j->reference_video ()) {
579 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
582 if (j->reference_audio ()) {
583 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
586 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
587 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
590 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
591 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
592 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
596 offset_from_start += reel_duration;
597 offset_from_end -= reel_duration;
607 boost::mutex::scoped_lock lm (_mutex);
610 /* We can't pass in this state */
611 LOG_DEBUG_PLAYER_NC ("Player is suspended");
615 if (_playback_length == DCPTime()) {
616 /* Special; just give one black frame */
617 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
621 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
623 shared_ptr<Piece> earliest_content;
624 optional<DCPTime> earliest_time;
626 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
631 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
632 if (t > i->content->end(_film)) {
636 /* Given two choices at the same time, pick the one with texts so we see it before
639 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
641 earliest_content = i;
655 if (earliest_content) {
659 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
660 earliest_time = _black.position ();
664 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
665 earliest_time = _silent.position ();
672 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
673 earliest_content->done = earliest_content->decoder->pass ();
674 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
675 if (dcp && !_play_referenced && dcp->reference_audio()) {
676 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
677 to `hide' the fact that no audio was emitted during the referenced DCP (though
678 we need to behave as though it was).
680 _last_audio_time = dcp->end (_film);
685 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
686 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
687 _black.set_position (_black.position() + one_video_frame());
691 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
692 DCPTimePeriod period (_silent.period_at_position());
693 if (_last_audio_time) {
694 /* Sometimes the thing that happened last finishes fractionally before
695 or after this silence. Bodge the start time of the silence to fix it.
696 I think this is nothing to worry about since we will just add or
697 remove a little silence at the end of some content.
699 int64_t const error = labs(period.from.get() - _last_audio_time->get());
700 /* Let's not worry about less than a frame at 24fps */
701 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
702 if (error >= too_much_error) {
703 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
705 DCPOMATIC_ASSERT (error < too_much_error);
706 period.from = *_last_audio_time;
708 if (period.duration() > one_video_frame()) {
709 period.to = period.from + one_video_frame();
712 _silent.set_position (period.to);
720 /* Emit any audio that is ready */
722 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
723 of our streams, or the position of the _silent.
725 DCPTime pull_to = _playback_length;
726 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
727 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
728 pull_to = i->second.last_push_end;
731 if (!_silent.done() && _silent.position() < pull_to) {
732 pull_to = _silent.position();
735 LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
736 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
737 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
738 if (_last_audio_time && i->second < *_last_audio_time) {
739 /* This new data comes before the last we emitted (or the last seek); discard it */
740 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
745 } else if (_last_audio_time && i->second > *_last_audio_time) {
746 /* There's a gap between this data and the last we emitted; fill with silence */
747 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
750 emit_audio (i->first, i->second);
755 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
756 do_emit_video(i->first, i->second);
763 /** @return Open subtitles for the frame at the given time, converted to images */
764 optional<PositionImage>
765 Player::open_subtitles_for_frame (DCPTime time) const
767 list<PositionImage> captions;
768 int const vfr = _film->video_frame_rate();
772 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
775 /* Bitmap subtitles */
776 BOOST_FOREACH (BitmapText i, j.bitmap) {
781 /* i.image will already have been scaled to fit _video_container_size */
782 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
788 lrint (_video_container_size.width * i.rectangle.x),
789 lrint (_video_container_size.height * i.rectangle.y)
795 /* String subtitles (rendered to an image) */
796 if (!j.string.empty ()) {
797 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
798 copy (s.begin(), s.end(), back_inserter (captions));
802 if (captions.empty ()) {
803 return optional<PositionImage> ();
806 return merge (captions);
810 Player::video (weak_ptr<Piece> wp, ContentVideo video)
812 shared_ptr<Piece> piece = wp.lock ();
817 if (!piece->content->video->use()) {
821 FrameRateChange frc (_film, piece->content);
822 if (frc.skip && (video.frame % 2) == 1) {
826 /* Time of the first frame we will emit */
827 DCPTime const time = content_video_to_dcp (piece, video.frame);
828 LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
830 /* Discard if it's before the content's period or the last accurate seek. We can't discard
831 if it's after the content's period here as in that case we still need to fill any gap between
832 `now' and the end of the content's period.
834 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
838 /* Fill gaps that we discover now that we have some video which needs to be emitted.
839 This is where we need to fill to.
841 DCPTime fill_to = min (time, piece->content->end(_film));
843 if (_last_video_time) {
844 DCPTime fill_from = max (*_last_video_time, piece->content->position());
846 /* Fill if we have more than half a frame to do */
847 if ((fill_to - fill_from) > one_video_frame() / 2) {
848 LastVideoMap::const_iterator last = _last_video.find (wp);
849 if (_film->three_d()) {
850 Eyes fill_to_eyes = video.eyes;
851 if (fill_to_eyes == EYES_BOTH) {
852 fill_to_eyes = EYES_LEFT;
854 if (fill_to == piece->content->end(_film)) {
855 /* Don't fill after the end of the content */
856 fill_to_eyes = EYES_LEFT;
858 DCPTime j = fill_from;
859 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
860 if (eyes == EYES_BOTH) {
863 while (j < fill_to || eyes != fill_to_eyes) {
864 if (last != _last_video.end()) {
865 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
866 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
867 copy->set_eyes (eyes);
868 emit_video (copy, j);
870 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
871 emit_video (black_player_video_frame(eyes), j);
873 if (eyes == EYES_RIGHT) {
874 j += one_video_frame();
876 eyes = increment_eyes (eyes);
879 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
880 if (last != _last_video.end()) {
881 emit_video (last->second, j);
883 emit_video (black_player_video_frame(EYES_BOTH), j);
890 _last_video[wp].reset (
893 piece->content->video->crop (),
894 piece->content->video->fade (_film, video.frame),
895 scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
896 _video_container_size,
899 piece->content->video->colour_conversion(),
900 piece->content->video->range(),
908 for (int i = 0; i < frc.repeat; ++i) {
909 if (t < piece->content->end(_film)) {
910 emit_video (_last_video[wp], t);
912 t += one_video_frame ();
917 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
919 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
921 shared_ptr<Piece> piece = wp.lock ();
926 shared_ptr<AudioContent> content = piece->content->audio;
927 DCPOMATIC_ASSERT (content);
929 int const rfr = content->resampled_frame_rate (_film);
931 /* Compute time in the DCP */
932 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
933 LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
935 /* And the end of this block in the DCP */
936 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
938 /* Remove anything that comes before the start or after the end of the content */
939 if (time < piece->content->position()) {
940 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
942 /* This audio is entirely discarded */
945 content_audio.audio = cut.first;
947 } else if (time > piece->content->end(_film)) {
950 } else if (end > piece->content->end(_film)) {
951 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
952 if (remaining_frames == 0) {
955 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
958 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
962 if (content->gain() != 0) {
963 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
964 gain->apply_gain (content->gain ());
965 content_audio.audio = gain;
970 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
974 if (_audio_processor) {
975 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
980 _audio_merger.push (content_audio.audio, time);
981 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
982 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
986 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
988 shared_ptr<Piece> piece = wp.lock ();
989 shared_ptr<const TextContent> text = wc.lock ();
990 if (!piece || !text) {
994 /* Apply content's subtitle offsets */
995 subtitle.sub.rectangle.x += text->x_offset ();
996 subtitle.sub.rectangle.y += text->y_offset ();
998 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
999 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
1000 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
1002 /* Apply content's subtitle scale */
1003 subtitle.sub.rectangle.width *= text->x_scale ();
1004 subtitle.sub.rectangle.height *= text->y_scale ();
1007 shared_ptr<Image> image = subtitle.sub.image;
1009 /* We will scale the subtitle up to fit _video_container_size */
1010 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
1011 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
1012 if (width == 0 || height == 0) {
1016 dcp::Size scaled_size (width, height);
1017 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
1018 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1020 _active_texts[text->type()].add_from (wc, ps, from);
1024 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1026 shared_ptr<Piece> piece = wp.lock ();
1027 shared_ptr<const TextContent> text = wc.lock ();
1028 if (!piece || !text) {
1033 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1035 if (from > piece->content->end(_film)) {
1039 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
1040 s.set_h_position (s.h_position() + text->x_offset ());
1041 s.set_v_position (s.v_position() + text->y_offset ());
1042 float const xs = text->x_scale();
1043 float const ys = text->y_scale();
1044 float size = s.size();
1046 /* Adjust size to express the common part of the scaling;
1047 e.g. if xs = ys = 0.5 we scale size by 2.
1049 if (xs > 1e-5 && ys > 1e-5) {
1050 size *= 1 / min (1 / xs, 1 / ys);
1054 /* Then express aspect ratio changes */
1055 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1056 s.set_aspect_adjust (xs / ys);
1059 s.set_in (dcp::Time(from.seconds(), 1000));
1060 ps.string.push_back (StringText (s, text->outline_width()));
1061 ps.add_fonts (text->fonts ());
1064 _active_texts[text->type()].add_from (wc, ps, from);
1068 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1070 shared_ptr<const TextContent> text = wc.lock ();
1075 if (!_active_texts[text->type()].have(wc)) {
1079 shared_ptr<Piece> piece = wp.lock ();
1084 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1086 if (dcp_to > piece->content->end(_film)) {
1090 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1092 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1093 if (text->use() && !always && !text->burn()) {
1094 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1099 Player::seek (DCPTime time, bool accurate)
1101 boost::mutex::scoped_lock lm (_mutex);
1102 LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1105 /* We can't seek in this state */
1110 _shuffler->clear ();
1115 if (_audio_processor) {
1116 _audio_processor->flush ();
1119 _audio_merger.clear ();
1120 for (int i = 0; i < TEXT_COUNT; ++i) {
1121 _active_texts[i].clear ();
1124 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1125 if (time < i->content->position()) {
1126 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1127 we must seek this (following) content accurately, otherwise when we come to the end of the current
1128 content we may not start right at the beginning of the next, causing a gap (if the next content has
1129 been trimmed to a point between keyframes, or something).
1131 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1133 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1134 /* During; seek to position */
1135 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1138 /* After; this piece is done */
1144 _last_video_time = time;
1145 _last_video_eyes = EYES_LEFT;
1146 _last_audio_time = time;
1148 _last_video_time = optional<DCPTime>();
1149 _last_video_eyes = optional<Eyes>();
1150 _last_audio_time = optional<DCPTime>();
1153 _black.set_position (time);
1154 _silent.set_position (time);
1156 _last_video.clear ();
1160 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1162 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1163 player before the video that requires them.
1165 _delay.push_back (make_pair (pv, time));
1167 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1168 _last_video_time = time + one_video_frame();
1170 _last_video_eyes = increment_eyes (pv->eyes());
1172 if (_delay.size() < 3) {
1176 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1178 do_emit_video (to_do.first, to_do.second);
1182 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1184 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1185 for (int i = 0; i < TEXT_COUNT; ++i) {
1186 _active_texts[i].clear_before (time);
1190 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1192 pv->set_text (subtitles.get ());
1199 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1201 /* Log if the assert below is about to fail */
1202 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1203 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1206 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1207 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1208 Audio (data, time, _film->audio_frame_rate());
1209 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1213 Player::fill_audio (DCPTimePeriod period)
1215 if (period.from == period.to) {
1219 DCPOMATIC_ASSERT (period.from < period.to);
1221 DCPTime t = period.from;
1222 while (t < period.to) {
1223 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1224 Frame const samples = block.frames_round(_film->audio_frame_rate());
1226 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1227 silence->make_silent ();
1228 emit_audio (silence, t);
1235 Player::one_video_frame () const
1237 return DCPTime::from_frames (1, _film->video_frame_rate ());
1240 pair<shared_ptr<AudioBuffers>, DCPTime>
1241 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1243 DCPTime const discard_time = discard_to - time;
1244 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1245 Frame remaining_frames = audio->frames() - discard_frames;
1246 if (remaining_frames <= 0) {
1247 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1249 shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1250 return make_pair(cut, time + discard_time);
1254 Player::set_dcp_decode_reduction (optional<int> reduction)
1256 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1259 boost::mutex::scoped_lock lm (_mutex);
1261 if (reduction == _dcp_decode_reduction) {
1263 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1267 _dcp_decode_reduction = reduction;
1268 setup_pieces_unlocked ();
1271 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1275 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1277 boost::mutex::scoped_lock lm (_mutex);
1279 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1280 if (i->content == content) {
1281 return content_time_to_dcp (i, t);
1285 /* We couldn't find this content; perhaps things are being changed over */
1286 return optional<DCPTime>();
1290 shared_ptr<const Playlist>
1291 Player::playlist () const
1293 return _playlist ? _playlist : _film->playlist();
1298 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1300 Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);